code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.typesafe.sbt
package packager
package archetypes
import Keys._
import sbt._
import sbt.Project.Initialize
import sbt.Keys.{ mappings, target, name, mainClass, normalizedName }
import linux.LinuxPackageMapping
import SbtNativePackager._
import com.typesafe.sbt.packager.linux.LinuxPackageMapping
/**
* This class contains the default settings for creating and deploying an archetypical Java application.
* A Java application archetype is defined as a project that has a main method and is run by placing
* all of its JAR files on the classpath and calling that main method.
*
* This doesn't create the best of distributions, but it can simplify the distribution of code.
*
* **NOTE: EXPERIMENTAL** This currently only supports debian upstart scripts.
*/
object JavaServerAppPackaging {
def settings: Seq[Setting[_]] =
JavaAppPackaging.settings ++
debianUpstartSettings
def debianUpstartSettings: Seq[Setting[_]] =
Seq(
debianUpstartScriptReplacements <<= (maintainer in Debian, packageSummary in Debian, normalizedName, sbt.Keys.version) map { (author, descr, name, version) =>
// TODO name-version is copied from UniversalPlugin. This should be consolidated into a setting (install location...)
val chdir = GenericPackageSettings.installLocation + "/" + name + "/bin"
JavaAppUpstartScript.makeReplacements(author = author, descr = descr, execScript = name, chdir = chdir)
},
debianMakeUpstartScript <<= (debianUpstartScriptReplacements, normalizedName, target in Universal) map makeDebianUpstartScript,
linuxPackageMappings in Debian <++= (debianMakeUpstartScript, normalizedName) map { (script, name) =>
for {
s <- script.toSeq
} yield LinuxPackageMapping(Seq(s -> ("/etc/init/" + name + ".conf"))).withPerms("0644")
},
// TODO - only make these if the upstart config exists...
debianMakePrermScript <<= (normalizedName, target in Universal) map makeDebianPrermScript,
debianMakePostinstScript <<= (normalizedName, target in Universal) map makeDebianPostinstScript)
private[this] final def makeDebianPrermScript(name: String, tmpDir: File): Option[File] = {
val scriptBits = JavaAppUpstartScript.generatePrerm(name)
val script = tmpDir / "tmp" / "bin" / "debian-prerm"
IO.write(script, scriptBits)
Some(script)
}
private[this] final def makeDebianPostinstScript(name: String, tmpDir: File): Option[File] = {
val scriptBits = JavaAppUpstartScript.generatePostinst(name)
val script = tmpDir / "tmp" / "bin" / "debian-postinst"
IO.write(script, scriptBits)
Some(script)
}
private[this] final def makeDebianUpstartScript(replacements: Seq[(String, String)], name: String, tmpDir: File): Option[File] =
if (replacements.isEmpty) None
else {
val scriptBits = JavaAppUpstartScript.generateScript(replacements)
val script = tmpDir / "tmp" / "bin" / (name + ".conf")
IO.write(script, scriptBits)
Some(script)
}
} | yanns/sbt-native-packager | src/main/scala/com/typesafe/sbt/packager/archetypes/JavaServerApplication.scala | Scala | bsd-2-clause | 3,032 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc
package symtab
abstract class SymbolTable extends scala.reflect.internal.SymbolTable
| scala/scala | src/compiler/scala/tools/nsc/symtab/SymbolTable.scala | Scala | apache-2.0 | 400 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.ast.convert.plannerQuery
import org.neo4j.cypher.internal.compiler.v2_3.ast.convert.plannerQuery.PatternConverters._
import org.neo4j.cypher.internal.compiler.v2_3.ast.rewriters.{LabelPredicateNormalizer, MatchPredicateNormalizerChain, PropertyPredicateNormalizer, addUniquenessPredicates}
import org.neo4j.cypher.internal.compiler.v2_3.helpers.UnNamedNameGenerator._
import org.neo4j.cypher.internal.compiler.v2_3.planner.logical.plans.{IdName, PatternLength, SimplePatternLength, VarPatternLength}
import org.neo4j.cypher.internal.compiler.v2_3.planner.{Predicate, QueryGraph}
import org.neo4j.cypher.internal.frontend.v2_3.ast._
import org.neo4j.cypher.internal.frontend.v2_3.{Rewriter, topDown}
object ExpressionConverters {
val normalizer = MatchPredicateNormalizerChain(PropertyPredicateNormalizer, LabelPredicateNormalizer)
implicit class PatternExpressionConverter(val exp: PatternExpression) extends AnyVal {
def asQueryGraph: QueryGraph = {
val uniqueRels = addUniquenessPredicates.collectUniqueRels(exp.pattern)
val uniquePredicates = addUniquenessPredicates.createPredicatesFor(uniqueRels, exp.pattern.position)
val relChain: RelationshipChain = exp.pattern.element
val predicates: Vector[Expression] = relChain.fold(uniquePredicates.toVector) {
case pattern: AnyRef if normalizer.extract.isDefinedAt(pattern) => acc => acc ++ normalizer.extract(pattern)
case _ => identity
}
val rewrittenChain = relChain.endoRewrite(topDown(Rewriter.lift(normalizer.replace)))
val patternContent = rewrittenChain.destructed
val qg = QueryGraph(
patternRelationships = patternContent.rels.toSet,
patternNodes = patternContent.nodeIds.toSet
).addPredicates(predicates: _*)
qg.addArgumentIds(qg.coveredIds.filter(_.name.isNamed).toSeq)
}
}
implicit class PatternExpressionExtractor(val expression: Expression) extends AnyVal {
def extractPatternExpressions: Seq[PatternExpression] =
expression.treeFold(Seq.empty[PatternExpression]) {
case p: PatternExpression =>
(acc, _) => acc :+ p
}
}
implicit class PredicateConverter(val predicate: Expression) extends AnyVal {
def asPredicates: Set[Predicate] = {
predicate.treeFold(Set.empty[Predicate]) {
// n:Label
case p@HasLabels(Identifier(name), labels) =>
(acc, _) => acc ++ labels.map {
label: LabelName =>
Predicate(Set(IdName(name)), p.copy(labels = Seq(label))(p.position))
}
// and
case _: Ands =>
(acc, children) => children(acc)
case p: Expression =>
(acc, _) => acc + Predicate(p.idNames, p)
}.map(filterUnnamed).toSet
}
private def filterUnnamed(predicate: Predicate): Predicate = predicate match {
case Predicate(deps, e: PatternExpression) =>
Predicate(deps.filter(x => isNamed(x.name)), e)
case Predicate(deps, e@Not(_: PatternExpression)) =>
Predicate(deps.filter(x => isNamed(x.name)), e)
case Predicate(deps, ors@Ors(exprs)) =>
val newDeps = exprs.foldLeft(Set.empty[IdName]) { (acc, exp) =>
exp match {
case e: PatternExpression =>
acc ++ e.idNames.filter(x => isNamed(x.name))
case e@Not(_: PatternExpression) =>
acc ++ e.idNames.filter(x => isNamed(x.name))
case e if e.exists { case _: PatternExpression => true} =>
acc ++ (e.idNames -- unnamedIdNamesInNestedPatternExpressions(e))
case e =>
acc ++ e.idNames
}
}
Predicate(newDeps, ors)
case Predicate(deps, expr) if expr.exists { case _: PatternExpression => true} =>
Predicate(deps -- unnamedIdNamesInNestedPatternExpressions(expr), expr)
case p => p
}
private def unnamedIdNamesInNestedPatternExpressions(expression: Expression) = {
val patternExpressions = expression.treeFold(Seq.empty[PatternExpression]) {
case p: PatternExpression => (acc, _) => acc :+ p
}
val unnamedIdsInPatternExprs = patternExpressions.flatMap(_.idNames)
.filterNot(x => isNamed(x.name))
.toSet
unnamedIdsInPatternExprs
}
}
implicit class IdExtractor(val exp: Expression) extends AnyVal {
def idNames: Set[IdName] = exp.dependencies.map(id => IdName(id.name))
}
implicit class RangeConvertor(val length: Option[Option[Range]]) extends AnyVal {
def asPatternLength: PatternLength = length match {
case Some(Some(Range(Some(left), Some(right)))) => VarPatternLength(left.value.toInt, Some(right.value.toInt))
case Some(Some(Range(Some(left), None))) => VarPatternLength(left.value.toInt, None)
case Some(Some(Range(None, Some(right)))) => VarPatternLength(1, Some(right.value.toInt))
case Some(Some(Range(None, None))) => VarPatternLength.unlimited
case Some(None) => VarPatternLength.unlimited
case None => SimplePatternLength
}
}
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/ast/convert/plannerQuery/ExpressionConverters.scala | Scala | apache-2.0 | 5,934 |
package com.monsanto.arch.kamon.spray.can
import akka.actor.{ActorSystem, Props}
import akka.io.IO
import akka.testkit.{ImplicitSender, TestKit}
import com.monsanto.arch.kamon.spray.can.server.SprayServerMetrics
import com.typesafe.config.ConfigFactory
import kamon.Kamon
import kamon.metric.Entity
import org.scalatest.{BeforeAndAfterAll, Matchers, WordSpecLike}
import spray.can.Http
import spray.http.HttpResponse
import spray.httpx.RequestBuilding.Get
import spray.routing.HttpServiceActor
/** Integration test for the Kamon spray extension.
*
* @author Daniel Solano Gómez
*/
class KamonHttpSpec(_system: ActorSystem) extends TestKit(_system) with WordSpecLike with Matchers with ImplicitSender with BeforeAndAfterAll {
import KamonHttpSpec._
def this() = this(ActorSystem("kamon-http-spec"))
override protected def beforeAll(): Unit = {
Kamon.start()
super.beforeAll()
}
override protected def afterAll(): Unit = {
TestKit.shutdownActorSystem(_system, verifySystemShutdown = true)
Kamon.shutdown()
super.afterAll()
}
"The KamonHttp extension should work" in {
val service = system.actorOf(Props(new TestServiceActor))
IO(KamonHttp) ! Http.Bind(service, interface = "localhost", port = 0)
val bound = expectMsgType[Http.Bound]
val httpListener = lastSender
val entity = Entity(s"${bound.localAddress.getHostName}:${bound.localAddress.getPort}", SprayServerMetrics.category)
IO(KamonHttp) ! Get(s"http://localhost:${bound.localAddress.getPort}/")
val response = expectMsgType[HttpResponse]
response.entity.asString shouldBe ResponseText
// wait for metrics to update
Thread.sleep(RefreshIntervalMillis * 3)
val maybeMetrics = Kamon.metrics.find(entity)
maybeMetrics should be (defined)
val stats = maybeMetrics.get.asInstanceOf[SprayServerMetrics].stats
stats.uptime.toNanos should be > 0L
stats.maxOpenConnections shouldBe 1
stats.maxOpenRequests shouldBe 1
stats.totalConnections shouldBe 1
stats.totalRequests shouldBe 1
stats.openConnections should be <= 1L
stats.openRequests shouldBe 0
httpListener ! Http.Unbind
expectMsg(Http.Unbound)
}
}
object KamonHttpSpec {
val RefreshIntervalMillis = new KamonHttpSettings(ConfigFactory.load()).refreshInterval.toMillis
val ResponseText = "Hello"
class TestServiceActor extends HttpServiceActor {
override def receive = runRoute {
complete {
Thread.sleep(RefreshIntervalMillis)
ResponseText
}
}
}
}
| MonsantoCo/spray-kamon-metrics | src/test/scala/com/monsanto/arch/kamon/spray/can/KamonHttpSpec.scala | Scala | bsd-3-clause | 2,538 |
package com.dragisak.typelevel
import org.scalatest.WordSpec
import org.scalatest.Matchers._
import Nat._
import NatToInt._
class NListSpec extends WordSpec {
"List" should {
"should compile if types match" in {
val a: NList[Int, Nat2] = 1 :: 2 :: NNil
}
"should have correct size" in {
val l = 1 :: 2 :: 3 :: NNil
toInt[l.size] should be(3)
}
"not compile if sizes mismatch" in {
"val a: NList[Int, Nat2] = 1 :: 2 :: 3 :: Nil" shouldNot typeCheck
}
}
}
| dragisak/type-level | src/test/scala/com/dragisak/typelevel/NListSpec.scala | Scala | apache-2.0 | 517 |
package se.lu.nateko.cp.meta.upload
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import java.net.URI
import scala.concurrent.Future
import se.lu.nateko.cp.meta.DataObjectDto
import se.lu.nateko.cp.meta.CpmetaJsonProtocol
import se.lu.nateko.cp.meta.utils.async.{ok, error, executeSequentially}
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.http.scaladsl.model.HttpRequest
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import scala.io.Source
import scala.util.Success
import scala.util.Failure
import spray.json._
import akka.Done
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model.HttpMethods
import akka.http.scaladsl.model.RequestEntity
import se.lu.nateko.cp.meta.core.crypto.Sha256Sum
object L3UpdateWorkbench extends CpmetaJsonProtocol{
implicit val system = ActorSystem("l3update_workbench")
import system.dispatcher
val uploadConfBase = new CpUploadClient.Config(
"???",
"meta.icos-cp.eu",
"data.icos-cp.eu"
//Some(Uri("http://127.0.0.1:9094")),
//Some(Uri("http://127.0.0.1:9010"))
)
def uploadClient(cpAuthToken: String) = new CpUploadClient(uploadConfBase.copy(cpauthToken = cpAuthToken))
private def sparqlUri = new java.net.URI(s"${uploadConfBase.metaBase}/sparql")
def updateDto(dto: DataObjectDto): DataObjectDto = {
val l3 = dto.specificInfo.left.getOrElse(???)
val l3updated = l3.copy(
spatial = Right(new URI("http://meta.icos-cp.eu/resources/latlonboxes/globalLatLonBox")),
variables = Some(Seq("emission")),
)
dto.copy(
submitterId = "CP",
specificInfo = Left(l3updated)
)
}
def updateEmissInventoriesMeta(): Unit = {
val token = Source.fromFile(new java.io.File("/home/oleg/token.txt")).getLines().mkString
val client = uploadClient(token)
val sparql = new SparqlHelper(sparqlUri)
sparql.emissionInventories
.flatMap{envs =>
executeSequentially(envs){uri =>
client.getUploadDto[DataObjectDto](uri).flatMap{dto =>
client.uploadSingleMeta(updateDto(dto)).andThen{
case Success(_) => println(s"Uploaded $uri")
}
}
}
}
.onComplete{
case Success(Done) => println("Done!")
case Failure(exception) => exception.printStackTrace()
}
}
def reingestLatestSpatialNetcdfs(): Unit = {
val token = Source.fromFile(new java.io.File("/home/oleg/token.txt")).getLines().mkString
val client = uploadClient(token)
val sparql = new SparqlHelper(sparqlUri)
sparql.latestSpatialNetcdfs
.flatMap{uris =>
executeSequentially(uris){uri =>
Future
.fromTry(Sha256Sum.fromBase64Url(uri.getPath.split("/").last))
.flatMap{hash =>
println("Re-ingesting " + hash.id)
client.reIngestObject(hash)
}
}
}
.onComplete{
case Success(Done) => println("Done!")
case Failure(exception) => exception.printStackTrace()
}
}
}
| ICOS-Carbon-Portal/meta | src/test/scala/se/lu/nateko/cp/meta/upload/L3UpdateWorkbench.scala | Scala | gpl-3.0 | 2,910 |
package org.jetbrains.plugins.scala.lang.typeInference
package generated
class TypeInferenceMethodCallTest extends TypeInferenceTestBase {
//This class was generated by build script, please don't change this
override def folderPath: String = super.folderPath + "methodCall/"
def testApplyArray() {doTest()}
def testApplyCall() {doTest()}
def testApplyList() {doTest()}
def testApplySeqCall() {doTest()}
def testCaseClassCall() {doTest()}
def testCharAt() {doTest()}
def testExplicitStringCharAt() {doTest()}
def testFunctionApply() {doTest()}
def testFunctionApplyReturnTypeAlsoHasApplyMethod() {doTest()}
def testFunctionApplyWithParams() {doTest()}
def testFunctionDirectCall() {doTest()}
def testFunctionLiteralApplyCall() {doTest()}
def testFunctionLiteralCall() {doTest()}
def testHashaSetCreation() {doTest()}
def testImplicitApply() {doTest()}
def testInfixIntegers() {doTest()}
def testLibraryList() {doTest()}
def testMacroCall() {doTest()}
def testMapApply() {doTest()}
def testObjectApplyCall() {doTest()}
def testOverloadedCall() {doTest()}
def testPrivateElementFromContext() {doTest()}
def testRecursive() {doTest()}
def testReduceLeft() {doTest()}
def testRepeated2() {doTest()}
def testSCL1760() {doTest()}
def testSimpleCall() {doTest()}
def testSimpleCall2() {doTest()}
def testSimpleStringCharAt() {doTest()}
def testSplit() {doTest()}
def testStaticCall() {doTest()}
def testSuperCall() {doTest()}
def testThisTypeCompound() {doTest()}
def testThisTypeSimple() {doTest()}
def testUpdateCall() {doTest()}
} | ilinum/intellij-scala | test/org/jetbrains/plugins/scala/lang/typeInference/generated/TypeInferenceMethodCallTest.scala | Scala | apache-2.0 | 1,637 |
object SCL4354 {
import java.util
import scala.collection.JavaConverters._
implicit def castingScalaListConversion(javaList: util.List[_]): {def asScalaListOf[T]: List[T]} = new {
def asScalaListOf[T]: List[T] = /*start*/javaList.asInstanceOf[util.List[T]].asScala.toList/*end*/
}
}
//List[T] | ilinum/intellij-scala | testdata/typeInference/bugs5/SCL4354.scala | Scala | apache-2.0 | 305 |
/*
* MandelActors - Mandelbrot fractal generator using actors
* Copyright (C) 2011 Jesper de Jong
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.jesperdj.mandelactors
// Ideas for sampling and reconstruction are reused from my project ScalaRay - https://github.com/jesperdj/scalaray
// and come from the book Physically Based Rendering - From Theory to Implementation - http://www.pbrt.org/
import scala.collection.immutable.Traversable
import scala.collection._
import scala.collection.generic._
import scala.collection.mutable.{ Builder, ListBuffer }
case class Sample (x: Float, y: Float)
//trait SampleBatch extends Traversable[Sample]
trait SampleBatch extends SerializableColl[Sample]
trait Sampler {
val rectangle: Rectangle
val samplesPerPixel: Int
//val batches: Traversable[SampleBatch]
val batches: SerializableColl[SampleBatch]
}
class StratifiedSampler (val rectangle: Rectangle, samplesPerPixelX: Int, samplesPerPixelY: Int, samplesPerBatch: Int, jitter: Boolean) extends Sampler
with Serializable {
val samplesPerPixel = samplesPerPixelX * samplesPerPixelY
private val numberOfBatches = ((rectangle.width * rectangle.height * samplesPerPixel) / samplesPerBatch.toFloat).ceil.toInt
//val batches = new Traversable[SampleBatch] {
val batches = new SerializableColl[SampleBatch] {
private class SampleBatchImpl (batchIndex: Int) extends SampleBatch {
override def foreach[U](f: Sample => U): Unit = {
val sampleIndex = batchIndex * samplesPerBatch
val samplesPerPY = rectangle.width * samplesPerPixel
val samplesPerSY = (sampleIndex % samplesPerPY) % samplesPerPixel
var py = rectangle.top + sampleIndex / samplesPerPY
var px = rectangle.left + (sampleIndex % samplesPerPY) / samplesPerPixel
var sy = samplesPerSY / samplesPerPixelX
var sx = samplesPerSY % samplesPerPixelX
val random = new scala.util.Random
0 until size foreach { _ =>
// Generate a sample
val (jx, jy) = if (jitter) (random.nextFloat, random.nextFloat) else (0.5f, 0.5f)
f(new Sample(px + ((sx + jx) / samplesPerPixelX), py + ((sy + jy) / samplesPerPixelY)))
// Move indices to the next sample
sx += 1
if (sx >= samplesPerPixelX) {
sx = 0; sy += 1
if (sy >= samplesPerPixelY) {
sy = 0; px += 1
if (px > rectangle.right) { px = rectangle.left; py += 1 }
}
}
}
}
override def size = if (batchIndex < numberOfBatches - 1) samplesPerBatch else {
// The last batch contains the remaining samples (can be less than samplesPerBatch)
rectangle.width * rectangle.height * samplesPerPixel - (numberOfBatches - 1) * samplesPerBatch
}
}
//def foreach[U](f: SampleBatch => U): Unit = for (batchIndex <- 0 until size) f(new SampleBatchImpl(batchIndex))
override def foreach[U](f: SampleBatch => U): Unit = for (batchIndex <- 0 until size) f(new SampleBatchImpl(batchIndex))
override val size = numberOfBatches
}
override def toString = "StratifiedSampler"
}
class SerializableColl[A](seq : A*) extends Traversable[A]
with GenericTraversableTemplate[A, SerializableColl]
with TraversableLike[A, SerializableColl[A]]
with Serializable {
override def companion = SerializableColl
def foreach[U](f: A => U) = util.Random.shuffle(seq.toSeq).foreach(f)
}
object SerializableColl extends TraversableFactory[SerializableColl] {
implicit def canBuildFrom[A]: CanBuildFrom[Coll, A, SerializableColl[A]] = new GenericCanBuildFrom[A]
def newBuilder[A] = new ListBuffer[A] mapResult (x => new SerializableColl(x:_*))
}
| jesperdj/mandelactors | src/main/scala/org/jesperdj/mandelactors/Sampler.scala | Scala | gpl-3.0 | 4,412 |
package im.actor.server.persist
import im.actor.server.models
import slick.driver.PostgresDriver.api._
object SexColumnType {
implicit val sexColumnType =
MappedColumnType.base[models.Sex, Int](_.toInt, models.Sex.fromInt)
}
| boneyao/actor-platform | actor-server/actor-persist/src/main/scala/im/actor/server/persist/SexColumnType.scala | Scala | mit | 233 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import scala.collection.immutable.TreeSet
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, CodeGenerator, ExprCode, FalseLiteral, GenerateSafeProjection, GenerateUnsafeProjection, Predicate => BasePredicate}
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util.TypeUtils
import org.apache.spark.sql.types._
object InterpretedPredicate {
def create(expression: Expression, inputSchema: Seq[Attribute]): InterpretedPredicate =
create(BindReferences.bindReference(expression, inputSchema))
def create(expression: Expression): InterpretedPredicate = new InterpretedPredicate(expression)
}
case class InterpretedPredicate(expression: Expression) extends BasePredicate {
override def eval(r: InternalRow): Boolean = expression.eval(r).asInstanceOf[Boolean]
override def initialize(partitionIndex: Int): Unit = {
super.initialize(partitionIndex)
expression.foreach {
case n: Nondeterministic => n.initialize(partitionIndex)
case _ =>
}
}
}
/**
* An [[Expression]] that returns a boolean value.
*/
trait Predicate extends Expression {
override def dataType: DataType = BooleanType
}
trait PredicateHelper {
protected def splitConjunctivePredicates(condition: Expression): Seq[Expression] = {
condition match {
case And(cond1, cond2) =>
splitConjunctivePredicates(cond1) ++ splitConjunctivePredicates(cond2)
case other => other :: Nil
}
}
protected def splitDisjunctivePredicates(condition: Expression): Seq[Expression] = {
condition match {
case Or(cond1, cond2) =>
splitDisjunctivePredicates(cond1) ++ splitDisjunctivePredicates(cond2)
case other => other :: Nil
}
}
// Substitute any known alias from a map.
protected def replaceAlias(
condition: Expression,
aliases: AttributeMap[Expression]): Expression = {
// Use transformUp to prevent infinite recursion when the replacement expression
// redefines the same ExprId,
condition.transformUp {
case a: Attribute =>
aliases.getOrElse(a, a)
}
}
/**
* Returns true if `expr` can be evaluated using only the output of `plan`. This method
* can be used to determine when it is acceptable to move expression evaluation within a query
* plan.
*
* For example consider a join between two relations R(a, b) and S(c, d).
*
* - `canEvaluate(EqualTo(a,b), R)` returns `true`
* - `canEvaluate(EqualTo(a,c), R)` returns `false`
* - `canEvaluate(Literal(1), R)` returns `true` as literals CAN be evaluated on any plan
*/
protected def canEvaluate(expr: Expression, plan: LogicalPlan): Boolean =
expr.references.subsetOf(plan.outputSet)
/**
* Returns true iff `expr` could be evaluated as a condition within join.
*/
protected def canEvaluateWithinJoin(expr: Expression): Boolean = expr match {
// Non-deterministic expressions are not allowed as join conditions.
case e if !e.deterministic => false
case _: ListQuery | _: Exists =>
// A ListQuery defines the query which we want to search in an IN subquery expression.
// Currently the only way to evaluate an IN subquery is to convert it to a
// LeftSemi/LeftAnti/ExistenceJoin by `RewritePredicateSubquery` rule.
// It cannot be evaluated as part of a Join operator.
// An Exists shouldn't be push into a Join operator too.
false
case e: SubqueryExpression =>
// non-correlated subquery will be replaced as literal
e.children.isEmpty
case a: AttributeReference => true
case e: Unevaluable => false
case e => e.children.forall(canEvaluateWithinJoin)
}
}
@ExpressionDescription(
usage = "_FUNC_ expr - Logical not.")
case class Not(child: Expression)
extends UnaryExpression with Predicate with ImplicitCastInputTypes with NullIntolerant {
override def toString: String = s"NOT $child"
override def inputTypes: Seq[DataType] = Seq(BooleanType)
protected override def nullSafeEval(input: Any): Any = !input.asInstanceOf[Boolean]
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c => s"!($c)")
}
override def sql: String = s"(NOT ${child.sql})"
}
/**
* Evaluates to `true` if `values` are returned in `query`'s result set.
*/
case class InSubquery(values: Seq[Expression], query: ListQuery)
extends Predicate with Unevaluable {
@transient private lazy val value: Expression = if (values.length > 1) {
CreateNamedStruct(values.zipWithIndex.flatMap {
case (v: NamedExpression, _) => Seq(Literal(v.name), v)
case (v, idx) => Seq(Literal(s"_$idx"), v)
})
} else {
values.head
}
override def checkInputDataTypes(): TypeCheckResult = {
if (values.length != query.childOutputs.length) {
TypeCheckResult.TypeCheckFailure(
s"""
|The number of columns in the left hand side of an IN subquery does not match the
|number of columns in the output of subquery.
|#columns in left hand side: ${values.length}.
|#columns in right hand side: ${query.childOutputs.length}.
|Left side columns:
|[${values.map(_.sql).mkString(", ")}].
|Right side columns:
|[${query.childOutputs.map(_.sql).mkString(", ")}].""".stripMargin)
} else if (!DataType.equalsStructurally(
query.dataType, value.dataType, ignoreNullability = true)) {
val mismatchedColumns = values.zip(query.childOutputs).flatMap {
case (l, r) if l.dataType != r.dataType =>
Seq(s"(${l.sql}:${l.dataType.catalogString}, ${r.sql}:${r.dataType.catalogString})")
case _ => None
}
TypeCheckResult.TypeCheckFailure(
s"""
|The data type of one or more elements in the left hand side of an IN subquery
|is not compatible with the data type of the output of the subquery
|Mismatched columns:
|[${mismatchedColumns.mkString(", ")}]
|Left side:
|[${values.map(_.dataType.catalogString).mkString(", ")}].
|Right side:
|[${query.childOutputs.map(_.dataType.catalogString).mkString(", ")}].""".stripMargin)
} else {
TypeUtils.checkForOrderingExpr(value.dataType, s"function $prettyName")
}
}
override def children: Seq[Expression] = values :+ query
override def nullable: Boolean = children.exists(_.nullable)
override def foldable: Boolean = children.forall(_.foldable)
override def toString: String = s"$value IN ($query)"
override def sql: String = s"(${value.sql} IN (${query.sql}))"
}
/**
* Evaluates to `true` if `list` contains `value`.
*/
// scalastyle:off line.size.limit
@ExpressionDescription(
usage = "expr1 _FUNC_(expr2, expr3, ...) - Returns true if `expr` equals to any valN.",
arguments = """
Arguments:
* expr1, expr2, expr3, ... - the arguments must be same type.
""",
examples = """
Examples:
> SELECT 1 _FUNC_(1, 2, 3);
true
> SELECT 1 _FUNC_(2, 3, 4);
false
> SELECT named_struct('a', 1, 'b', 2) _FUNC_(named_struct('a', 1, 'b', 1), named_struct('a', 1, 'b', 3));
false
> SELECT named_struct('a', 1, 'b', 2) _FUNC_(named_struct('a', 1, 'b', 2), named_struct('a', 1, 'b', 3));
true
""")
// scalastyle:on line.size.limit
case class In(value: Expression, list: Seq[Expression]) extends Predicate {
require(list != null, "list should not be null")
override def checkInputDataTypes(): TypeCheckResult = {
val mismatchOpt = list.find(l => !DataType.equalsStructurally(l.dataType, value.dataType,
ignoreNullability = true))
if (mismatchOpt.isDefined) {
TypeCheckResult.TypeCheckFailure(s"Arguments must be same type but were: " +
s"${value.dataType.catalogString} != ${mismatchOpt.get.dataType.catalogString}")
} else {
TypeUtils.checkForOrderingExpr(value.dataType, s"function $prettyName")
}
}
override def children: Seq[Expression] = value +: list
lazy val inSetConvertible = list.forall(_.isInstanceOf[Literal])
private lazy val ordering = TypeUtils.getInterpretedOrdering(value.dataType)
override def nullable: Boolean = children.exists(_.nullable)
override def foldable: Boolean = children.forall(_.foldable)
override def toString: String = s"$value IN ${list.mkString("(", ",", ")")}"
override def eval(input: InternalRow): Any = {
val evaluatedValue = value.eval(input)
if (evaluatedValue == null) {
null
} else {
var hasNull = false
list.foreach { e =>
val v = e.eval(input)
if (v == null) {
hasNull = true
} else if (ordering.equiv(v, evaluatedValue)) {
return true
}
}
if (hasNull) {
null
} else {
false
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val javaDataType = CodeGenerator.javaType(value.dataType)
val valueGen = value.genCode(ctx)
val listGen = list.map(_.genCode(ctx))
// inTmpResult has 3 possible values:
// -1 means no matches found and there is at least one value in the list evaluated to null
val HAS_NULL = -1
// 0 means no matches found and all values in the list are not null
val NOT_MATCHED = 0
// 1 means one value in the list is matched
val MATCHED = 1
val tmpResult = ctx.freshName("inTmpResult")
val valueArg = ctx.freshName("valueArg")
// All the blocks are meant to be inside a do { ... } while (false); loop.
// The evaluation of variables can be stopped when we find a matching value.
val listCode = listGen.map(x =>
s"""
|${x.code}
|if (${x.isNull}) {
| $tmpResult = $HAS_NULL; // ${ev.isNull} = true;
|} else if (${ctx.genEqual(value.dataType, valueArg, x.value)}) {
| $tmpResult = $MATCHED; // ${ev.isNull} = false; ${ev.value} = true;
| continue;
|}
""".stripMargin)
val codes = ctx.splitExpressionsWithCurrentInputs(
expressions = listCode,
funcName = "valueIn",
extraArguments = (javaDataType, valueArg) :: (CodeGenerator.JAVA_BYTE, tmpResult) :: Nil,
returnType = CodeGenerator.JAVA_BYTE,
makeSplitFunction = body =>
s"""
|do {
| $body
|} while (false);
|return $tmpResult;
""".stripMargin,
foldFunctions = _.map { funcCall =>
s"""
|$tmpResult = $funcCall;
|if ($tmpResult == $MATCHED) {
| continue;
|}
""".stripMargin
}.mkString("\\n"))
ev.copy(code =
code"""
|${valueGen.code}
|byte $tmpResult = $HAS_NULL;
|if (!${valueGen.isNull}) {
| $tmpResult = $NOT_MATCHED;
| $javaDataType $valueArg = ${valueGen.value};
| do {
| $codes
| } while (false);
|}
|final boolean ${ev.isNull} = ($tmpResult == $HAS_NULL);
|final boolean ${ev.value} = ($tmpResult == $MATCHED);
""".stripMargin)
}
override def sql: String = {
val valueSQL = value.sql
val listSQL = list.map(_.sql).mkString(", ")
s"($valueSQL IN ($listSQL))"
}
}
/**
* Optimized version of In clause, when all filter values of In clause are
* static.
*/
case class InSet(child: Expression, hset: Set[Any]) extends UnaryExpression with Predicate {
require(hset != null, "hset could not be null")
override def toString: String = s"$child INSET ${hset.mkString("(", ",", ")")}"
@transient private[this] lazy val hasNull: Boolean = hset.contains(null)
override def nullable: Boolean = child.nullable || hasNull
protected override def nullSafeEval(value: Any): Any = {
if (set.contains(value)) {
true
} else if (hasNull) {
null
} else {
false
}
}
@transient lazy val set: Set[Any] = child.dataType match {
case _: AtomicType => hset
case _: NullType => hset
case _ =>
// for structs use interpreted ordering to be able to compare UnsafeRows with non-UnsafeRows
TreeSet.empty(TypeUtils.getInterpretedOrdering(child.dataType)) ++ hset
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val setTerm = ctx.addReferenceObj("set", set)
val childGen = child.genCode(ctx)
val setIsNull = if (hasNull) {
s"${ev.isNull} = !${ev.value};"
} else {
""
}
ev.copy(code =
code"""
|${childGen.code}
|${CodeGenerator.JAVA_BOOLEAN} ${ev.isNull} = ${childGen.isNull};
|${CodeGenerator.JAVA_BOOLEAN} ${ev.value} = false;
|if (!${ev.isNull}) {
| ${ev.value} = $setTerm.contains(${childGen.value});
| $setIsNull
|}
""".stripMargin)
}
override def sql: String = {
val valueSQL = child.sql
val listSQL = hset.toSeq.map(Literal(_).sql).mkString(", ")
s"($valueSQL IN ($listSQL))"
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Logical AND.")
case class And(left: Expression, right: Expression) extends BinaryOperator with Predicate {
override def inputType: AbstractDataType = BooleanType
override def symbol: String = "&&"
override def sqlOperator: String = "AND"
override def eval(input: InternalRow): Any = {
val input1 = left.eval(input)
if (input1 == false) {
false
} else {
val input2 = right.eval(input)
if (input2 == false) {
false
} else {
if (input1 != null && input2 != null) {
true
} else {
null
}
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval1 = left.genCode(ctx)
val eval2 = right.genCode(ctx)
// The result should be `false`, if any of them is `false` whenever the other is null or not.
if (!left.nullable && !right.nullable) {
ev.copy(code = code"""
${eval1.code}
boolean ${ev.value} = false;
if (${eval1.value}) {
${eval2.code}
${ev.value} = ${eval2.value};
}""", isNull = FalseLiteral)
} else {
ev.copy(code = code"""
${eval1.code}
boolean ${ev.isNull} = false;
boolean ${ev.value} = false;
if (!${eval1.isNull} && !${eval1.value}) {
} else {
${eval2.code}
if (!${eval2.isNull} && !${eval2.value}) {
} else if (!${eval1.isNull} && !${eval2.isNull}) {
${ev.value} = true;
} else {
${ev.isNull} = true;
}
}
""")
}
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Logical OR.")
case class Or(left: Expression, right: Expression) extends BinaryOperator with Predicate {
override def inputType: AbstractDataType = BooleanType
override def symbol: String = "||"
override def sqlOperator: String = "OR"
override def eval(input: InternalRow): Any = {
val input1 = left.eval(input)
if (input1 == true) {
true
} else {
val input2 = right.eval(input)
if (input2 == true) {
true
} else {
if (input1 != null && input2 != null) {
false
} else {
null
}
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval1 = left.genCode(ctx)
val eval2 = right.genCode(ctx)
// The result should be `true`, if any of them is `true` whenever the other is null or not.
if (!left.nullable && !right.nullable) {
ev.isNull = FalseLiteral
ev.copy(code = code"""
${eval1.code}
boolean ${ev.value} = true;
if (!${eval1.value}) {
${eval2.code}
${ev.value} = ${eval2.value};
}""", isNull = FalseLiteral)
} else {
ev.copy(code = code"""
${eval1.code}
boolean ${ev.isNull} = false;
boolean ${ev.value} = true;
if (!${eval1.isNull} && ${eval1.value}) {
} else {
${eval2.code}
if (!${eval2.isNull} && ${eval2.value}) {
} else if (!${eval1.isNull} && !${eval2.isNull}) {
${ev.value} = false;
} else {
${ev.isNull} = true;
}
}
""")
}
}
}
abstract class BinaryComparison extends BinaryOperator with Predicate {
// Note that we need to give a superset of allowable input types since orderable types are not
// finitely enumerable. The allowable types are checked below by checkInputDataTypes.
override def inputType: AbstractDataType = AnyDataType
override def checkInputDataTypes(): TypeCheckResult = super.checkInputDataTypes() match {
case TypeCheckResult.TypeCheckSuccess =>
TypeUtils.checkForOrderingExpr(left.dataType, this.getClass.getSimpleName)
case failure => failure
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
if (CodeGenerator.isPrimitiveType(left.dataType)
&& left.dataType != BooleanType // java boolean doesn't support > or < operator
&& left.dataType != FloatType
&& left.dataType != DoubleType) {
// faster version
defineCodeGen(ctx, ev, (c1, c2) => s"$c1 $symbol $c2")
} else {
defineCodeGen(ctx, ev, (c1, c2) => s"${ctx.genComp(left.dataType, c1, c2)} $symbol 0")
}
}
protected lazy val ordering: Ordering[Any] = TypeUtils.getInterpretedOrdering(left.dataType)
}
object BinaryComparison {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = Some((e.left, e.right))
}
/** An extractor that matches both standard 3VL equality and null-safe equality. */
object Equality {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = e match {
case EqualTo(l, r) => Some((l, r))
case EqualNullSafe(l, r) => Some((l, r))
case _ => None
}
}
// TODO: although map type is not orderable, technically map type should be able to be used
// in equality comparison
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` equals `expr2`, or false otherwise.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be used in equality comparison. Map type is not supported.
For complex types such array/struct, the data types of fields must be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 2;
true
> SELECT 1 _FUNC_ '1';
true
> SELECT true _FUNC_ NULL;
NULL
> SELECT NULL _FUNC_ NULL;
NULL
""")
case class EqualTo(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = "="
protected override def nullSafeEval(left: Any, right: Any): Any = ordering.equiv(left, right)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (c1, c2) => ctx.genEqual(left.dataType, c1, c2))
}
}
// TODO: although map type is not orderable, technically map type should be able to be used
// in equality comparison
@ExpressionDescription(
usage = """
expr1 _FUNC_ expr2 - Returns same result as the EQUAL(=) operator for non-null operands,
but returns true if both are null, false if one of the them is null.
""",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be used in equality comparison. Map type is not supported.
For complex types such array/struct, the data types of fields must be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 2;
true
> SELECT 1 _FUNC_ '1';
true
> SELECT true _FUNC_ NULL;
false
> SELECT NULL _FUNC_ NULL;
true
""")
case class EqualNullSafe(left: Expression, right: Expression) extends BinaryComparison {
override def symbol: String = "<=>"
override def nullable: Boolean = false
override def eval(input: InternalRow): Any = {
val input1 = left.eval(input)
val input2 = right.eval(input)
if (input1 == null && input2 == null) {
true
} else if (input1 == null || input2 == null) {
false
} else {
ordering.equiv(input1, input2)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval1 = left.genCode(ctx)
val eval2 = right.genCode(ctx)
val equalCode = ctx.genEqual(left.dataType, eval1.value, eval2.value)
ev.copy(code = eval1.code + eval2.code + code"""
boolean ${ev.value} = (${eval1.isNull} && ${eval2.isNull}) ||
(!${eval1.isNull} && !${eval2.isNull} && $equalCode);""", isNull = FalseLiteral)
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is less than `expr2`.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be ordered. For example, map type is not orderable, so it
is not supported. For complex types such array/struct, the data types of fields must
be orderable.
""",
examples = """
Examples:
> SELECT 1 _FUNC_ 2;
true
> SELECT 1.1 _FUNC_ '1';
false
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-07-30 04:17:52');
false
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-08-01 04:17:52');
true
> SELECT 1 _FUNC_ NULL;
NULL
""")
case class LessThan(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = "<"
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.lt(input1, input2)
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is less than or equal to `expr2`.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be ordered. For example, map type is not orderable, so it
is not supported. For complex types such array/struct, the data types of fields must
be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 2;
true
> SELECT 1.0 _FUNC_ '1';
true
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-07-30 04:17:52');
true
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-08-01 04:17:52');
true
> SELECT 1 _FUNC_ NULL;
NULL
""")
case class LessThanOrEqual(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = "<="
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.lteq(input1, input2)
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is greater than `expr2`.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be ordered. For example, map type is not orderable, so it
is not supported. For complex types such array/struct, the data types of fields must
be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 1;
true
> SELECT 2 _FUNC_ '1.1';
true
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-07-30 04:17:52');
false
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-08-01 04:17:52');
false
> SELECT 1 _FUNC_ NULL;
NULL
""")
case class GreaterThan(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = ">"
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.gt(input1, input2)
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is greater than or equal to `expr2`.",
arguments = """
Arguments:
* expr1, expr2 - the two expressions must be same type or can be casted to a common type,
and must be a type that can be ordered. For example, map type is not orderable, so it
is not supported. For complex types such array/struct, the data types of fields must
be orderable.
""",
examples = """
Examples:
> SELECT 2 _FUNC_ 1;
true
> SELECT 2.0 _FUNC_ '2.1';
false
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-07-30 04:17:52');
true
> SELECT to_date('2009-07-30 04:17:52') _FUNC_ to_date('2009-08-01 04:17:52');
false
> SELECT 1 _FUNC_ NULL;
NULL
""")
case class GreaterThanOrEqual(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def symbol: String = ">="
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.gteq(input1, input2)
}
| michalsenkyr/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala | Scala | apache-2.0 | 26,194 |
import org.scalatest.Tag
package object sbtmarathon {
object FunctionalTest extends Tag("sbtmarathon.FunctionalTest")
} // end package object
| Tapad/sbt-marathon | marathon/src/test/scala/sbtmarathon/package.scala | Scala | bsd-3-clause | 145 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.experiments.yarn.appmaster
import com.typesafe.config.Config
import org.apache.gearpump.cluster.main.{Master, Worker}
import org.apache.gearpump.experiments.yarn.Constants._
import org.apache.gearpump.transport.HostPort
import org.apache.gearpump.util.Constants
/** Command to start a YARN container */
trait Command {
def get: String
override def toString: String = get
}
abstract class AbstractCommand extends Command {
protected def config: Config
def version: String
def classPath: Array[String] = {
Array(
s"conf",
s"pack/$version/conf",
s"pack/$version/lib/daemon/*",
s"pack/$version/lib/*"
)
}
protected def buildCommand(
java: String, properties: Array[String], mainClazz: String, cliOpts: Array[String])
: String = {
val exe = config.getString(java)
s"$exe -cp ${classPath.mkString(":")}:" +
"$CLASSPATH " + properties.mkString(" ") +
s" $mainClazz ${cliOpts.mkString(" ")} 2>&1 | /usr/bin/tee -a ${LOG_DIR_EXPANSION_VAR}/stderr"
}
protected def clazz(any: AnyRef): String = {
val name = any.getClass.getName
if (name.endsWith("$")) {
name.dropRight(1)
} else {
name
}
}
}
case class MasterCommand(config: Config, version: String, masterAddr: HostPort)
extends AbstractCommand {
def get: String = {
val masterArguments = Array(s"-ip ${masterAddr.host}", s"-port ${masterAddr.port}")
val properties = Array(
s"-D${Constants.GEARPUMP_CLUSTER_MASTERS}.0=${masterAddr.host}:${masterAddr.port}",
s"-D${Constants.GEARPUMP_HOSTNAME}=${masterAddr.host}",
s"-D${Constants.GEARPUMP_MASTER_RESOURCE_MANAGER_CONTAINER_ID}=${CONTAINER_ID}",
s"-D${Constants.GEARPUMP_HOME}=${LOCAL_DIRS}/${CONTAINER_ID}/pack/$version",
s"-D${Constants.GEARPUMP_LOG_DAEMON_DIR}=${LOG_DIR_EXPANSION_VAR}",
s"-D${Constants.GEARPUMP_LOG_APPLICATION_DIR}=${LOG_DIR_EXPANSION_VAR}")
buildCommand(MASTER_COMMAND, properties, clazz(Master), masterArguments)
}
}
case class WorkerCommand(config: Config, version: String, masterAddr: HostPort, workerHost: String)
extends AbstractCommand {
def get: String = {
val properties = Array(
s"-D${Constants.GEARPUMP_CLUSTER_MASTERS}.0=${masterAddr.host}:${masterAddr.port}",
s"-D${Constants.GEARPUMP_LOG_DAEMON_DIR}=${LOG_DIR_EXPANSION_VAR}",
s"-D${Constants.GEARPUMP_WORKER_RESOURCE_MANAGER_CONTAINER_ID}=${CONTAINER_ID}",
s"-D${Constants.GEARPUMP_HOME}=${LOCAL_DIRS}/${CONTAINER_ID}/pack/$version",
s"-D${Constants.GEARPUMP_LOG_APPLICATION_DIR}=${LOG_DIR_EXPANSION_VAR}",
s"-D${Constants.GEARPUMP_HOSTNAME}=$workerHost")
buildCommand(WORKER_COMMAND, properties, clazz(Worker), Array.empty[String])
}
}
case class AppMasterCommand(config: Config, version: String, args: Array[String])
extends AbstractCommand {
override val classPath = Array(
"conf",
s"pack/$version/conf",
s"pack/$version/dashboard",
s"pack/$version/lib/*",
s"pack/$version/lib/daemon/*",
s"pack/$version/lib/services/*",
s"pack/$version/lib/yarn/*"
)
def get: String = {
val properties = Array(
s"-D${Constants.GEARPUMP_HOME}=${LOCAL_DIRS}/${CONTAINER_ID}/pack/$version",
s"-D${Constants.GEARPUMP_FULL_SCALA_VERSION}=$version",
s"-D${Constants.GEARPUMP_LOG_DAEMON_DIR}=${LOG_DIR_EXPANSION_VAR}",
s"-D${Constants.GEARPUMP_LOG_APPLICATION_DIR}=${LOG_DIR_EXPANSION_VAR}",
s"-D${Constants.GEARPUMP_HOSTNAME}=${NODEMANAGER_HOST}")
val arguments = Array(s"") ++ args
buildCommand(APPMASTER_COMMAND, properties, clazz(YarnAppMaster),
arguments)
}
}
| manuzhang/incubator-gearpump | experiments/yarn/src/main/scala/org/apache/gearpump/experiments/yarn/appmaster/Command.scala | Scala | apache-2.0 | 4,478 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel.scala
import org.scalatest.{MustMatchers, FunSpec}
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
import org.apache.camel.{Exchange,Processor,Predicate,RuntimeTransformException}
@RunWith(classOf[JUnitRunner])
class RouteSpec extends FunSpec with CamelSpec with MustMatchers with Preamble {
describe("Processor/DSL") {
it("should process in") {
def f(x: Int): Int = x+1
val e = processExchange(in(classOf[Int]) {f _}) { _.in = 1 }
e.in[Int] must equal(2)
}
it("should process in -> in") {
val e = processExchange(in(classOf[Int]) {1+} .toIn) { _.in = 1 }
e.in must equal(2)
}
it("should process in -> out") {
val e = processExchange(in(classOf[Int]) {1+} .toOut) { _.in = 1 }
e.out must equal(2)
}
it("should process out ->") {
val e = processExchange(out(classOf[Int]) {1+}) { _.out = 1 }
e.in must equal(2)
}
it("should process out -> in") {
val e = processExchange(out(classOf[Int]) {1+} .toIn) { _.out = 1 }
e.in must equal(2)
}
it("should process out -> out") {
val e = processExchange(out(classOf[Int]) {1+} .toOut) { _.out = 1 }
e.out must equal(2)
}
it("should not modify exchange when function returns Unit") {
def fn(i: Int) { }
val e = processExchange(in(classOf[Int]) {fn _}) { _.in = 1}
e.in must equal(1)
}
it("should raise exception when trying to set In when function returns Unit") {
def fn(i: Int) { }
a [RuntimeTransformException] should be thrownBy {
processExchange(in(classOf[Int]) {fn _} .toIn) { _.in = 1}
}
}
it("should raise exception when trying to set Out when function returns Unit") {
def fn(i: Int) { }
a [RuntimeTransformException] should be thrownBy {
processExchange(in(classOf[Int]) {fn _} .toOut) { _.in = 1}
}
}
}
describe("Predicate/DSL") {
it("should filter in") {
filterExchange(in(classOf[Int]) {1==}) { _.in = 1 } must equal(true)
}
it("should raise exception when trying to filter when function returns Unit") {
def fn(i: Int) { }
a [RuntimeTransformException] should be thrownBy {
filterExchange(in(classOf[Int]) {fn _}) { _.in = 1}
}
}
}
describe("PartialFunction/DSL") {
sealed trait AlgoType
case object LeafOne extends AlgoType
case object LeafTwo extends AlgoType
it("should leave message body if it's not in function domain") {
val p: Processor = in(classOf[AlgoType]) collect {
case LeafOne => LeafTwo
}
val e = processExchange(p) { _.in = LeafTwo }
e.in[AlgoType] must equal(LeafTwo)
}
it("should process body if it's in function domain") {
val p: Processor = in(classOf[AlgoType]) collect {
case LeafOne => LeafTwo
}
val e = processExchange(p) { _.in = LeafOne }
e.in[AlgoType] must equal(LeafTwo)
}
it("should filter") {
val p: Predicate = in(classOf[AlgoType]) collect {
case LeafOne => true
}
filterExchange(p) { _.in = LeafOne } must equal(true)
filterExchange(p) { _.in = LeafTwo } must equal(false)
}
}
def processExchange(p: Processor)(pre: Exchange => Unit) = {
val e = createExchange
pre(e)
p.process(e)
e
}
def filterExchange(f: Predicate)(pre: Exchange => Unit) = {
val e = createExchange
pre(e)
f.matches(e)
}
}
| YMartsynkevych/camel | components/camel-scala/src/test/scala/org/apache/camel/scala/routeSpec.scala | Scala | apache-2.0 | 4,288 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.tensorflow
import com.google.protobuf.ByteString
import com.spotify.scio.testing.ScioIOSpec
import org.tensorflow.proto.example._
import scala.jdk.CollectionConverters._
object TFSequenceExampleIOTest {
case class Record(i: Int, ss: Seq[String])
def toSequenceExample(r: Record): SequenceExample = {
val context = Features
.newBuilder()
.putFeature(
"i",
Feature
.newBuilder()
.setInt64List(Int64List.newBuilder().addValue(r.i).build())
.build()
)
.build()
val fs = r.ss.map { s =>
Feature
.newBuilder()
.setBytesList(
BytesList
.newBuilder()
.addValue(ByteString.copyFromUtf8(s))
.build()
)
.build()
}
val featureLists = FeatureLists
.newBuilder()
.putFeatureList("ss", FeatureList.newBuilder().addAllFeature(fs.asJava).build())
.build()
SequenceExample
.newBuilder()
.setContext(context)
.setFeatureLists(featureLists)
.build()
}
}
class TFSequenceExampleIOTest extends ScioIOSpec {
import TFSequenceExampleIOTest._
"TFSequenceExampleIO" should "work" in {
val xs = (1 to 100).map(x => toSequenceExample(Record(x, Seq(x.toString, x.toString))))
testTap(xs)(_.saveAsTfRecordFile(_))(".tfrecords")
testJobTest(xs)(TFSequenceExampleIO(_))(_.tfRecordSequenceExampleFile(_))(
_.saveAsTfRecordFile(_)
)
}
}
| spotify/scio | scio-tensorflow/src/test/scala/com/spotify/scio/tensorflow/TFSequenceExampleIOTest.scala | Scala | apache-2.0 | 2,084 |
package uk.gov.dvla.service.testing.dropwizard
import com.google.common.base.Strings
import com.google.common.collect.ImmutableMap
import com.massrelevance.dropwizard.ScalaApplication
import com.sun.jersey.api.client.Client
import io.dropwizard.Configuration
import io.dropwizard.cli.ServerCommand
import io.dropwizard.client.JerseyClientBuilder
import io.dropwizard.lifecycle.ServerLifecycleListener
import io.dropwizard.setup.{Bootstrap, Environment}
import net.sourceforge.argparse4j.inf.Namespace
import org.eclipse.jetty.server.{ServerConnector, Server}
import org.scalatest.{BeforeAndAfterAll, Suite}
trait DropwizardSpec[C <: Configuration] extends Suite with BeforeAndAfterAll {
private var jettyServer: Option[Server] = None
val configPath: String
val application: ScalaApplication[C]
val port: Int = 6666
var httpClient: Client = _
def ifJettyNotRunning(block: => Unit) {
if (jettyServer.isEmpty) {
block
}
}
override protected def beforeAll(): Unit = ifJettyNotRunning {
System.setProperty("dw.server.applicationConnectors[0].port", port.toString)
val bootstrap = new Bootstrap[C](application) {
override def run(configuration: C,environment: Environment): Unit = {
println("Running Bootstrap")
environment.lifecycle.addServerLifecycleListener(new ServerLifecycleListener {
def serverStarted(server: Server) {
println("Starting servers")
jettyServer = Some(server)
}
})
super.run(configuration, environment)
httpClient = new JerseyClientBuilder(environment).build("testHttpClient")
}
}
application.initialize(bootstrap)
val command = new ServerCommand[C](application)
val file = ImmutableMap.builder[String, AnyRef]
if (!Strings.isNullOrEmpty(configPath)) {
file.put("file", configPath)
}
val namespace = new Namespace(file.build)
command.run(bootstrap, namespace)
}
override protected def afterAll(): Unit = {
val props = System.getProperties.propertyNames
while (props.hasMoreElements) {
val keyString: String = props.nextElement.asInstanceOf[String]
if (keyString.startsWith("dw.")) {
System.clearProperty(keyString)
}
}
jettyServer.foreach(_.stop())
}
}
| dvla/sdl-opensource | test-helpers/src/main/scala/uk.gov.dvla.service.testing.dropwizard/DropwizardSpec.scala | Scala | mit | 2,300 |
package dotty.tools.dotc.core
import dotty.tools.tasty.TastyFormat
/** The possible tags of a NameKind */
object NameTags extends TastyFormat.NameTags {
final val FLATTENED = 5 // A flat name, generated by Flatten
final val TRAITSETTER = 6 // A Scala-2 trait setter, generated by AugmentScala2Traits
final val OUTERSELECT = 13 // A name `<num>_outer`, used by the inliner to indicate an
// outer accessor that will be filled in by ExplicitOuter.
// <num> indicates the number of hops needed to select the outer field.
final val PROTECTEDACCESSOR = 24 // The name of a protected accesor `protected$<name>` created by ProtectedAccessors.
final val INITIALIZER = 26 // A mixin initializer method
final val FIELD = 29 // Used by Memoize to tag the name of a class member field.
final val EXTMETH = 30 // Used by ExtensionMethods for the name of an extension method
// implementing a value class method.
final val ADAPTEDCLOSURE = 31 // Used in Erasure to adapt closures over primitive types.
final val IMPLMETH = 32 // Used to define methods in implementation classes
// (can probably be removed).
final val PARAMACC = 33 // Used for a private parameter alias
def nameTagToString(tag: Int): String = tag match {
case UTF8 => "UTF8"
case QUALIFIED => "QUALIFIED"
case FLATTENED => "FLATTENED"
case EXPANDED => "EXPANDED"
case EXPANDPREFIX => "EXPANDPREFIX"
case TRAITSETTER => "TRAITSETTER"
case UNIQUE => "UNIQUE"
case DEFAULTGETTER => "DEFAULTGETTER"
case OUTERSELECT => "OUTERSELECT"
case SUPERACCESSOR => "SUPERACCESSOR"
case INLINEACCESSOR => "INLINEACCESSOR"
case PROTECTEDACCESSOR => "PROTECTEDACCESSOR"
case INITIALIZER => "INITIALIZER"
case FIELD => "FIELD"
case EXTMETH => "EXTMETH"
case IMPLMETH => "IMPLMETH"
case PARAMACC => "PARAMACC"
case ADAPTEDCLOSURE => "ADAPTEDCLOSURE"
case OBJECTCLASS => "OBJECTCLASS"
case SIGNED => "SIGNED"
}
}
| som-snytt/dotty | compiler/src/dotty/tools/dotc/core/NameTags.scala | Scala | apache-2.0 | 2,137 |
package com.ctask.client
import java.nio.file.{Files, Paths, StandardOpenOption}
import com.ctask.client.util.ClientProperties
import play.api.libs.json.Json
import scala.util.{Failure, Success, Try}
/**
* Handles persistence for the task list in use.
*/
object TaskListInUse {
implicit val taskListInUseJsonRead = Json.reads[TaskListInUse]
implicit val taskListWrites = Json.writes[TaskListInUse]
def taskListInUsePath: String = s"${ClientProperties.rootCtaskStorageFolder}/taskListInUse"
/**
* Persists the current task list in use.
* @param taskListName the task list in use to be persisted
*/
def persist(taskListName: String): Unit = {
Try {
val jsValue = Json.toJson(TaskListInUse(taskListName))
val path = Paths.get(taskListInUsePath)
Files.write(path, jsValue.toString().getBytes(), StandardOpenOption.CREATE, StandardOpenOption.TRUNCATE_EXISTING)
taskListName
} match {
case Success(_) =>
case Failure(ex) => // TODO: cannot log here for now. Log to log file when proper logging is implemented.
}
}
/**
* Load the last task list in use.
* @return the name of the last task list in use
*/
def load: Option[String] = {
Try {
val path = Paths.get(taskListInUsePath)
val jsonString = String.join("", Files.readAllLines(path))
Json.parse(jsonString).validate[TaskListInUse].get
} match {
case Success(taskListInUse) => Some(taskListInUse.name)
case Failure(ex) =>
// TODO: cannot log here for now. Log to log file when proper logging is implemented.
None
}
}
}
case class TaskListInUse(name: String)
| modsrm/ctask | client/src/main/scala/com/ctask/client/TaskListInUse.scala | Scala | gpl-3.0 | 1,666 |
package org.jetbrains.plugins.scala.annotator
import com.intellij.codeInsight.intention.IntentionAction
import org.jetbrains.plugins.scala.ExtensionPointDeclaration
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
/**
* @author Pavel Fatin
*/
abstract class UnresolvedReferenceFixProvider {
def fixesFor(reference: ScReferenceElement): Seq[IntentionAction]
}
object UnresolvedReferenceFixProvider
extends ExtensionPointDeclaration[UnresolvedReferenceFixProvider]("org.intellij.scala.unresolvedReferenceFixProvider")
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/annotator/UnresolvedReferenceFixProvider.scala | Scala | apache-2.0 | 550 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.config
import java.io.File
import java.nio.file.{ Path, Paths }
import scala.collection.breakOut
import scala.collection.JavaConverters._
import com.typesafe.config._
import org.apache.commons.vfs2.FileObject
import org.ensime.api._
import org.ensime.util.file._
import org.ensime.util.path._
import org.ensime.util.ensimefile._
import org.ensime.vfs.`package`.EnsimeVFS
package object richconfig {
// avoids a pureconfig / ficus dependency...
def parseServerConfig(c: Config): EnsimeServerConfig = {
EnsimeServerConfig(
RawFile(Paths.get(c.getString("ensime.config")).canon),
parseServerImportsConfig(c.getConfig("ensime.imports")),
c.getBoolean("ensime.explode.on.disconnect"),
c.getBoolean("ensime.exit"),
c.getString("ensime.protocol"),
c.getBoolean("ensime.exitAfterIndex"),
c.getBoolean("ensime.disableClassMonitoring"),
LegacyConfig(c.getBoolean("ensime.legacy.jarurls"))
)
}
private def parseServerImportsConfig(c: Config): ImportsConfig = {
ImportsConfig(
c.getString("strategy"),
c.getStringList("groups").asScala.toList,
c.getStringList("wildcards").asScala.toSet,
c.getInt("maxIndividualImports"),
c.getStringList("collapseExclude").asScala.toSet
)
}
implicit class RichEnsimeConfig(val c: EnsimeConfig) extends AnyVal {
// doesn't do the transitive lookups
def classpath: List[File] =
(targetFiles(c) ::: libraryJarFiles(c)).distinct
def targets: List[File] = targetFiles(c)
def referenceSourceJars: Set[File] = (javaSourceFiles(c) ++ librarySourceFiles(c))(breakOut)
def lookup(id: EnsimeProjectId) = c.projects.find(_.id == id).get
def allDocJars: Set[File] = libraryDocFiles(c).toSet
def scalaLibrary: Option[File] =
libraryJarFiles(c).find { f =>
val name = f.getName
name.startsWith("scala-library") && name.endsWith(".jar")
}
def findProject(f: FileObject)(implicit vfs: EnsimeVFS) = {
val filePath = f.getName.getPath
c.projects collectFirst {
case project if (project.sources ++ project.targets).map(rf =>
vfs.toFileObject(rf.file.toFile).getName.getPath).exists(filePath.startsWith) =>
project.id
}
}
def findProject(path: Path): Option[EnsimeProjectId] = {
c.projects collectFirst {
case project if (project.sources ++ project.targets).exists(f => path.startsWith(f.file)) => project.id
}
}
def findProject(file: EnsimeFile): Option[EnsimeProjectId] = file match {
case RawFile(file) => findProject(file)
case ArchiveFile(jar, _) => findProject(jar)
}
def findProject(file: SourceFileInfo): Option[EnsimeProjectId] = findProject(file.file)
}
implicit class RichEnsimeProject(val p: EnsimeProject) extends AnyVal {
def dependencies(implicit config: EnsimeConfig): List[EnsimeProject] =
p.depends.map(config.lookup)
def classpath(implicit config: EnsimeConfig): List[File] = {
// may not agree with the build tool (e.g. could put all targets first)
val files = (p.targets.toList ::: p.libraryJars).map(_.file.toFile)
files ::: (dependencies.flatMap(_.classpath))
}
def scalaSourceFiles: Set[RawFile] = for {
root <- p.sources
filePath <- root.file.tree
rawFile = RawFile(filePath)
if filePath.isFile && rawFile.isScala
} yield rawFile
}
private def targetFiles(c: EnsimeConfig): List[File] = c.projects.flatMap(_.targets).map(_.file.toFile)
private def libraryJarFiles(c: EnsimeConfig): List[File] = c.projects.flatMap(_.libraryJars).map(_.file.toFile)
private def librarySourceFiles(c: EnsimeConfig): List[File] = c.projects.flatMap(_.librarySources).map(_.file.toFile)
private def libraryDocFiles(c: EnsimeConfig): List[File] = c.projects.flatMap(_.libraryDocs).map(_.file.toFile)
private def javaSourceFiles(c: EnsimeConfig): List[File] = c.javaSources.map(_.file.toFile)
}
| VlachJosef/ensime-server | core/src/main/scala/org/ensime/config/richconfig.scala | Scala | gpl-3.0 | 4,121 |
package chapterx.answer12
import scratchpad.monoid._
//object Main {
// def main(args: Array[String]): Unit = {
//
//// Foldable[List]
////
// val foldable: Foldable[List] = new Foldable[List] {}
//
//
//// val listFoldable: Foldable[List] = new ListFoldable()
//
// ListFoldable.
// }
//}
trait Foldable[F[_]] {
def foldRight[A, B](as: F[A])(z: B)(f: (A, B) => B): B =
foldMap(as)(f.curried)(endoMonoid[B])(z)
def foldLeft[A, B](as: F[A])(z: B)(f: (B, A) => B): B =
foldMap(as)(a => (b: B) => f(b, a))(dual(endoMonoid[B]))(z)
def foldMap[A, B](as: F[A])(f: A => B)(mb: Monoid[B]): B =
foldRight(as)(mb.zero)((a, b) => mb.op(f(a), b))
def concatenate[A](as: F[A])(m: Monoid[A]): A =
foldLeft(as)(m.zero)(m.op)
}
object ListFoldable extends Foldable[List] {
override def foldRight[A, B](as: List[A])(z: B)(f: (A, B) => B) =
as.foldRight(z)(f)
override def foldLeft[A, B](as: List[A])(z: B)(f: (B, A) => B) =
as.foldLeft(z)(f)
override def foldMap[A, B](as: List[A])(f: A => B)(mb: Monoid[B]): B =
foldLeft(as)(mb.zero)((b, a) => mb.op(b, f(a)))
}
object IndexedSeqFoldable extends Foldable[IndexedSeq] {
import Monoid._
override def foldRight[A, B](as: IndexedSeq[A])(z: B)(f: (A, B) => B) =
as.foldRight(z)(f)
override def foldLeft[A, B](as: IndexedSeq[A])(z: B)(f: (B, A) => B) =
as.foldLeft(z)(f)
override def foldMap[A, B](as: IndexedSeq[A])(f: A => B)(mb: Monoid[B]): B =
foldMapV(as, mb)(f)
}
object StreamFoldable extends Foldable[Stream] {
override def foldRight[A, B](as: Stream[A])(z: B)(f: (A, B) => B) =
as.foldRight(z)(f)
override def foldLeft[A, B](as: Stream[A])(z: B)(f: (B, A) => B) =
as.foldLeft(z)(f)
} | waxmittmann/fpinscala | answerkey/monoids/12.answer.scala | Scala | mit | 1,716 |
package controllers
import javax.inject.Inject
import dao.ParenthoodDAO
import models.Parenthood._
import play.api.libs.json._
import play.api.mvc._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
class ParenthoodController @Inject()(parenthoodDao: ParenthoodDAO) extends Controller {
def listParenthoods = Action.async { implicit request =>
val parenthoods: Future[Seq[Parenthood]] = parenthoodDao.all()
parenthoods map {
p => Ok(Json.toJson(p))
}
}
def getParenthood(parenthoodId: Int) = Action.async { implicit request =>
val parenthood: Future[Option[Parenthood]] = parenthoodDao.getParenthood(parenthoodId)
parenthood map {
case Some(p) => Ok(Json.toJson(p))
case None => NotFound
}
}
def updateParenthood(parenthoodId: Int) = Action.async(parse.json[Parenthood]) { implicit request =>
val parenthood: Parenthood = request.body
val affectedRowsCount: Future[Int] = parenthoodDao.updateParenthood(parenthoodId, parenthood)
affectedRowsCount map {
case 1 => Ok
case 0 => NotFound
case _ => InternalServerError
}
}
def createParenthood = Action.async(parse.json[Parenthood]) { implicit request =>
val parenthood: Parenthood = request.body
val parenthoodId: Future[Int] = parenthoodDao.createParenthood(parenthood)
parenthoodId map {
case id => Created(Json.toJson(id))
}
}
def deleteParenthood(parenthoodId: Int) = Action.async { implicit request =>
val affectedRowsCount: Future[Int] = parenthoodDao.deleteParenthood(parenthoodId)
affectedRowsCount map {
case 1 => Ok
case 0 => NotFound
case _ => InternalServerError
}
}
def getChilds(parentId: Int) = Action.async { implicit request =>
val parents: Future[Seq[Parenthood]] = parenthoodDao.getChilds(parentId)
parents map {
p => {
Ok(Json.toJson(p.map("/person/" + _.childid)))
}
}
}
def getParents(childId: Int) = Action.async { implicit request =>
val childs: Future[Seq[Parenthood]] = parenthoodDao.getParents(childId)
childs map {
p => {
Ok(Json.toJson(p.map("/person/" + _.parentid)))
}
}
}
}
| magura42/KickAppServer | app/controllers/ParenthoodController.scala | Scala | mit | 2,227 |
/**
* Copyright (c) 2013, The National Archives <[email protected]>
* http://www.nationalarchives.gov.uk
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
*/
package uk.gov.nationalarchives.dri.preingest.loader.unit.network
/**
* Replacement for Path, which works only on local file systems for remote hosts
* @param userName owner of file
* @param fileName name of file minus extensions
* @param fileSize size of file in bytes
* @param lastModified timestamp of file
* @param path path to file starting from search root
*/
//class RemotePath(val name:String, val size: Long, val lastModified: Long ) {}
class RemotePath(val userName: String, val fileName: String, val fileSize: Long, val lastModified: Long, val path: String) {}
| digital-preservation/dali | src/main/scala/uk/gov/tna/dri/preingest/loader/unit/network/RemotePath.scala | Scala | mpl-2.0 | 936 |
package com.dbrsn.datatrain.dsl
import cats.free.Free
import cats.free.Free.inject
import cats.free.Inject
import scala.language.higherKinds
trait ImageComponent {
type Img
type FileExisted
type FileNotExisted
sealed trait ImageDSL[A]
object ImageDSL {
case class Cover(input: Img, width: Int, height: Int) extends ImageDSL[Img]
case class ScaleTo(input: Img, width: Int, height: Int) extends ImageDSL[Img]
case class ScaleToWidth(input: Img, width: Int) extends ImageDSL[Img]
case class ScaleToHeight(input: Img, height: Int) extends ImageDSL[Img]
case class ReadFromFile(input: FileExisted) extends ImageDSL[Img]
case class WriteToPngFile(input: Img, output: FileNotExisted, compressionLevel: Int = 9) extends ImageDSL[FileExisted]
case class WriteToJpegFile(input: Img, output: FileNotExisted, compression: Int = 80, progressive: Boolean = true) extends ImageDSL[FileExisted]
case class WriteToGifFile(input: Img, output: FileNotExisted, progressive: Boolean = true) extends ImageDSL[FileExisted]
}
class ImageInject[F[_]](implicit I: Inject[ImageDSL, F]) {
import ImageDSL._
final def cover(input: Img, width: Int, height: Int): Free[F, Img] = inject[ImageDSL, F](Cover(input, width, height))
final def scaleTo(input: Img, width: Int, height: Int): Free[F, Img] = inject[ImageDSL, F](ScaleTo(input, width, height))
final def scaleToWidth(input: Img, width: Int): Free[F, Img] = inject[ImageDSL, F](ScaleToWidth(input, width))
final def scaleToHeight(input: Img, height: Int): Free[F, Img] = inject[ImageDSL, F](ScaleToHeight(input, height))
final def readFromFile(input: FileExisted): Free[F, Img] = inject[ImageDSL, F](ReadFromFile(input))
final def writeToPngFile(input: Img, output: FileNotExisted, compressionLevel: Int = 9): Free[F, FileExisted] =
inject[ImageDSL, F](WriteToPngFile(input, output, compressionLevel))
final def writeToJpegFile(input: Img, output: FileNotExisted, compression: Int = 80, progressive: Boolean = true): Free[F, FileExisted] =
inject[ImageDSL, F](WriteToJpegFile(input, output, compression, progressive))
final def writeToGifFile(input: Img, output: FileNotExisted, progressive: Boolean = true): Free[F, FileExisted] =
inject[ImageDSL, F](WriteToGifFile(input, output, progressive))
}
object ImageInject {
implicit def image[F[_]](implicit I: Inject[ImageDSL, F]): ImageInject[F] = new ImageInject[F]
}
}
| dborisenko/data-train | data-train-core/src/main/scala/com/dbrsn/datatrain/dsl/ImageComponent.scala | Scala | apache-2.0 | 2,470 |
package io.youi.app
sealed trait ReconnectStrategy
object ReconnectStrategy {
case object Reload extends ReconnectStrategy
case object Stop extends ReconnectStrategy
case object Reconnect extends ReconnectStrategy
} | outr/youi | app/js/src/main/scala/io/youi/app/ReconnectStrategy.scala | Scala | mit | 223 |
package tifmo
import dcstree.SemRole
import dcstree.Relation
package inference {
abstract case class RuleArg(arg: Any) {
private[inference] val terms: Set[TermIndex]
private[inference] def dumpMe(tmmap: Map[TermIndex, TermIndex]): RuleArg
}
trait RuleDo[T <: IEPred] extends ((IEngineCore, T, Seq[RuleArg]) => Unit) with Serializable
object RAConversion {
class RArgT(x: TermIndex) extends RuleArg(x) {
private[inference] val terms = Set(x)
private[inference] def dumpMe(tmmap: Map[TermIndex, TermIndex]) = new RArgT(tmmap(x))
}
implicit def convertT(x: TermIndex) = new RArgT(x)
class RArgR(x: SemRole) extends RuleArg(x) {
private[inference] val terms = Set.empty[TermIndex]
private[inference] def dumpMe(tmmap: Map[TermIndex, TermIndex]) = new RArgR(x)
}
implicit def convertR(x: SemRole) = new RArgR(x)
class RArgTRs(x: Set[(TermIndex, SemRole)]) extends RuleArg(x) {
private[inference] val terms = x.map(_._1)
private[inference] def dumpMe(tmmap: Map[TermIndex, TermIndex]) = new RArgTRs(x.map(y => (tmmap(y._1), y._2)))
}
implicit def convertTRs(x: Set[(TermIndex, SemRole)]) = new RArgTRs(x)
class RArgTs(x: Iterable[TermIndex]) extends RuleArg(x) {
private[inference] val terms = x.toSet
private[inference] def dumpMe(tmmap: Map[TermIndex, TermIndex]) = new RArgTs(x.map(tmmap(_)))
}
implicit def convertTs(x: Iterable[TermIndex]) = new RArgTs(x)
class RArgRs(x: Iterable[SemRole]) extends RuleArg(x) {
private[inference] val terms = Set.empty[TermIndex]
private[inference] def dumpMe(tmmap: Map[TermIndex, TermIndex]) = new RArgRs(x)
}
implicit def convertRs(x: Iterable[SemRole]) = new RArgRs(x)
class RArgRL(x: Relation) extends RuleArg(x) {
private[inference] val terms = Set.empty[TermIndex]
private[inference] def dumpMe(tmmap: Map[TermIndex, TermIndex]) = new RArgRL(x)
}
implicit def convertRL(x: Relation) = new RArgRL(x)
class RArgStr(x: String) extends RuleArg(x) {
private[inference] val terms = Set.empty[TermIndex]
private[inference] def dumpMe(tmmap: Map[TermIndex, TermIndex]) = new RArgStr(x)
}
implicit def convertStr(x: String) = new RArgStr(x)
}
}
| tianran/tifmo | src/tifmo/inference/RuleArg.scala | Scala | bsd-2-clause | 2,203 |
// Copyright (c) 2011 Paul Butcher
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
import sbt._
import Keys._
object Borachio extends Build {
override lazy val settings = super.settings ++ Seq(
organization := "com.borachio",
version := "1.5",
crossScalaVersions := Seq("2.8.1", "2.9.0", "2.9.0-1", "2.9.1"),
scalacOptions ++= Seq("-deprecation", "-unchecked", "-Xfatal-warnings"),
publishTo <<= version { v =>
val nexus = "http://nexus.scala-tools.org/content/repositories/"
if (v.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "snapshots/")
else
Some("releases" at nexus + "releases/")
},
credentials += Credentials(Path.userHome / ".ivy2" / ".credentials")
)
lazy val core = Project("Borachio core", file("core"),
aggregate = Seq(scalatest, specs2, junit3, core_tests)) settings(
// Workaround https://github.com/harrah/xsbt/issues/193
unmanagedClasspath in Compile += Attributed.blank(new java.io.File("doesnotexist")))
lazy val scalatest: Project = Project("Borachio ScalaTest support", file("frameworks/scalatest")) settings(
libraryDependencies <+= scalaVersion("org.scalatest" %% "scalatest" % scalatestVersion(_))
) dependsOn(core)
lazy val specs2: Project = Project("Borachio Specs2 support", file("frameworks/specs2")) settings(
libraryDependencies <+= scalaVersion("org.specs2" %% "specs2" % specs2Version(_))
) dependsOn(core)
lazy val junit3: Project = Project("Borachio Junit3 support", file("frameworks/junit3")) settings(
libraryDependencies += "junit" % "junit" % "3.8.2"
) dependsOn(core)
lazy val core_tests: Project = Project("Tests", file("core_tests"),
dependencies = Seq(scalatest % "test", specs2 % "test")) settings(publish := ())
val scalatestVersions = Map("2.8" -> "1.5.1", "2.9" -> "1.6.1")
val specs2Versions = Map("2.8" -> "1.2", "2.9" -> "1.5")
def scalatestVersion(scalaVersion: String) = getLibraryVersion(scalatestVersions, scalaVersion)
def specs2Version(scalaVersion: String) = getLibraryVersion(specs2Versions, scalaVersion)
def getLibraryVersion(versionMap: Map[String, String], scalaVersion: String) =
versionMap.getOrElse(scalaVersion take 3, sys.error("Unsupported Scala version: "+ scalaVersion))
}
| paulbutcher/borachio | project/Build.scala | Scala | mit | 3,333 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package com.google.protobuf.wrappers
object GoogleProtobufWrappersProto {
lazy val descriptor: com.google.protobuf.Descriptors.FileDescriptor = {
val proto = com.google.protobuf.DescriptorProtos.FileDescriptorProto.parseFrom(
com.trueaccord.scalapb.Encoding.fromBase64(Seq(
"""Ch5nb29nbGUvcHJvdG9idWYvd3JhcHBlcnMucHJvdG8SD2dvb2dsZS5wcm90b2J1ZiIcCgtEb3VibGVWYWx1ZRINCgV2YWx1Z
RgBIAEoASIbCgpGbG9hdFZhbHVlEg0KBXZhbHVlGAEgASgCIhsKCkludDY0VmFsdWUSDQoFdmFsdWUYASABKAMiHAoLVUludDY0V
mFsdWUSDQoFdmFsdWUYASABKAQiGwoKSW50MzJWYWx1ZRINCgV2YWx1ZRgBIAEoBSIcCgtVSW50MzJWYWx1ZRINCgV2YWx1ZRgBI
AEoDSIaCglCb29sVmFsdWUSDQoFdmFsdWUYASABKAgiHAoLU3RyaW5nVmFsdWUSDQoFdmFsdWUYASABKAkiGwoKQnl0ZXNWYWx1Z
RINCgV2YWx1ZRgBIAEoDEJTChNjb20uZ29vZ2xlLnByb3RvYnVmQg1XcmFwcGVyc1Byb3RvUAGgAQH4AQGiAgNHUEKqAh5Hb29nb
GUuUHJvdG9idWYuV2VsbEtub3duVHlwZXNiBnByb3RvMw=="""
).mkString))
com.google.protobuf.Descriptors.FileDescriptor.buildFrom(proto, Array(
))
}
} | eiennohito/ScalaPB | scalapb-runtime/js/src/main/scala/com/google/protobuf/wrappers/GoogleProtobufWrappersProto.scala | Scala | apache-2.0 | 1,078 |
/*
* Copyright 2015 Paul Horn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package akka
import akka.actor._
import akka.event.Logging.{ LogEvent, Debug, Error, Info, InitializeLogger, LoggerInitialized, Warning }
/**
* Used for tut as a simple logger that only shows the message and the loglevel.
* Not meant to be used in actual production code.
*/
class PrintLogger extends Actor {
override def receive: Receive = {
case InitializeLogger(_) ⇒ sender() ! LoggerInitialized
case event: LogEvent if event.message.toString.startsWith("shutting down:") ⇒
case event: Error ⇒ println(s"[ERROR] ${event.message }")
case event: Warning ⇒ println(s"[WARN ] ${event.message }")
case event: Info ⇒ println(s"[INFO ] ${event.message }")
case event: Debug ⇒ println(s"[DEBUG] ${event.message }")
}
}
| lirenhao/typed-actors | docs/src/test/scala/akka/PrintLogger.scala | Scala | apache-2.0 | 1,380 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package testonly
import play.api.libs.json.{JsValue, Json}
object TestUserReturns {
object Descriptions {
//tax year ends in yyyy-04-05
val taxYearEnd = "Tax Year Indicator. Indicates the year ending 5/4/nnnn (MANDATORY)"
val issueDate = "Date of issue of the return (OPTIONAL)"
//tax year ends in yyyy-04-05, and due date is in 31st'Jan next year
val dueDate = "Date of deferral is returned when present else the return due date is supplied (OPTIONAL)"
val receivedDate = "Date SA Return was received (OPTIONAL)"
}
/**
* Assuming that today is 2019-03-15
* User has submitted all tax relevant tax returns
*/
val sample1: JsValue = Json.parse(
s"""
{
"returns" : [{
"taxYearEnd" : "2020-04-05",
"issuedDate" : "2019-01-06",
"dueDate" : "2020-01-31",
"receivedDate" : "2020-01-20"
}]
}""")
}
| hmrc/self-service-time-to-pay-frontend | app/testonly/TestUserReturns.scala | Scala | apache-2.0 | 1,536 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.sql.QueryTest
import org.apache.spark.sql.execution.adaptive.DisableAdaptiveExecutionSuite
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.expressions.scalalang.typed
import org.apache.spark.sql.test.SharedSparkSession
// Disable AQE because the WholeStageCodegenExec is added when running QueryStageExec
@deprecated("This test suite will be removed.", "3.0.0")
class DeprecatedWholeStageCodegenSuite extends QueryTest
with SharedSparkSession
with DisableAdaptiveExecutionSuite {
test("simple typed UDAF should be included in WholeStageCodegen") {
import testImplicits._
val ds = Seq(("a", 10), ("b", 1), ("b", 2), ("c", 1)).toDS()
.groupByKey(_._1).agg(typed.sum(_._2))
val plan = ds.queryExecution.executedPlan
assert(plan.find(p =>
p.isInstanceOf[WholeStageCodegenExec] &&
p.asInstanceOf[WholeStageCodegenExec].child.isInstanceOf[HashAggregateExec]).isDefined)
assert(ds.collect() === Array(("a", 10.0), ("b", 3.0), ("c", 1.0)))
}
}
| maropu/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/DeprecatedWholeStageCodegenSuite.scala | Scala | apache-2.0 | 1,900 |
package cook.actor
import cook.config.ConfigRef
import cook.ref.FileRef
import scala.concurrent.Future
import scala.util.Try
trait ConfigRefManager {
def taskComplete(refName: String)(tryConfigRef: Try[ConfigRef])
def getConfigRef(cookFileRef: FileRef): Future[ConfigRef]
def step2LoadIncludeRefs(refName: String, configRef: ConfigRef)
def checkDag
}
| timgreen/cook | src/cook/actor/ConfigRefManager.scala | Scala | apache-2.0 | 363 |
/*
* Copyright 2016 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.regex
import implicits._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
class RegexTests extends AnyFunSuite with ScalaCheckPropertyChecks with Matchers {
test("All matches should be decoded as expected.") {
forAll { is: List[Int] =>
val regex = ("-?\\\\d+").asUnsafeRegex[Int].map(_.fold(e => throw e, identity))
regex.eval(is.mkString(" ")).toList should be(is)
}
}
test("Invalid regular expressions should not compile") {
"[".asRegex[Int].isLeft should be(true)
}
test("Regexes obtained from a pattern should have that pattern as a toString") {
val pattern = rx"-?\\d+"
Regex[Int](pattern).toString should be(pattern.toString)
}
}
| nrinaudo/kantan.regex | core/shared/src/test/scala/kantan/regex/RegexTests.scala | Scala | apache-2.0 | 1,396 |
package com.rocketfuel.sdbc.base
import com.rocketfuel.sdbc.base
trait Updatable[Key, Connection, Update <: base.Update[Connection]] {
def update(key: Key): Update
}
trait UpdatableMethods[Connection, Update <: base.Update[Connection]] {
def updateIterator[Key](
key: Key
)(implicit updatable: base.Updatable[Key, Connection, Update],
connection: Connection
): Iterator[Long] = {
updatable.update(key).iterator()
}
def update[Key](
key: Key
)(implicit updatable: base.Updatable[Key, Connection, Update],
connection: Connection
): Long = {
updatable.update(key).update()
}
}
| wdacom/sdbc | base/src/main/scala/com/rocketfuel/sdbc/base/Updatable.scala | Scala | bsd-3-clause | 622 |
import sbt._
import Keys._
object ThoughtBuild extends Build {
} | ssjskipp/thought | project/Build.scala | Scala | mit | 67 |
/*
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package play.api.db
import java.sql.Connection
import javax.sql.DataSource
import play.api.Application
/**
* Provides a high-level API for getting JDBC connections.
*
* For example:
* {{{
* val conn = DB.getConnection("customers")
* }}}
*/
object DB {
private val dbCache = Application.instanceCache[DBApi]
private def db(implicit app: Application): DBApi = dbCache(app)
/**
* Retrieves a JDBC connection.
*
* @param name data source name
* @param autocommit when `true`, sets this connection to auto-commit
* @return a JDBC connection
*/
def getConnection(name: String = "default", autocommit: Boolean = true)(implicit app: Application): Connection =
db.database(name).getConnection(autocommit)
/**
* Retrieves a JDBC connection (autocommit is set to true).
*
* @param name data source name
* @return a JDBC connection
*/
def getDataSource(name: String = "default")(implicit app: Application): DataSource =
db.database(name).dataSource
/**
* Execute a block of code, providing a JDBC connection. The connection is
* automatically released.
*
* @param name The datasource name.
* @param autocommit when `true`, sets this connection to auto-commit
* @param block Code block to execute.
*/
def withConnection[A](name: String = "default", autocommit: Boolean = true)(block: Connection => A)(implicit app: Application): A =
db.database(name).withConnection(autocommit)(block)
/**
* Execute a block of code, providing a JDBC connection. The connection and all created statements are
* automatically released.
*
* @param block Code block to execute.
*/
def withConnection[A](block: Connection => A)(implicit app: Application): A =
db.database("default").withConnection(block)
/**
* Execute a block of code, in the scope of a JDBC transaction.
* The connection and all created statements are automatically released.
* The transaction is automatically committed, unless an exception occurs.
*
* @param name The datasource name.
* @param block Code block to execute.
*/
def withTransaction[A](name: String = "default")(block: Connection => A)(implicit app: Application): A =
db.database(name).withTransaction(block)
/**
* Execute a block of code, in the scope of a JDBC transaction.
* The connection and all created statements are automatically released.
* The transaction is automatically committed, unless an exception occurs.
*
* @param block Code block to execute.
*/
def withTransaction[A](block: Connection => A)(implicit app: Application): A =
db.database("default").withTransaction(block)
}
| jyotikamboj/container | pf-framework/src/play-jdbc/src/main/scala/play/api/db/DB.scala | Scala | mit | 2,742 |
import scala.util.Properties
object Version {
val scaliper = "0.5.0" + Properties.envOrElse("SCALIPER_VERSION_SUFFIX", "-SNAPSHOT")
val scala = "2.10.6"
val sprayJson = "1.3.2"
val spray = "1.3.3"
val akka = "2.3.9"
}
| azavea/scaliper | project/Version.scala | Scala | apache-2.0 | 229 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.aggregate
import org.apache.spark.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import scala.collection.mutable.ArrayBuffer
/**
* The base class of [[SortBasedAggregationIterator]].
* It mainly contains two parts:
* 1. It initializes aggregate functions.
* 2. It creates two functions, `processRow` and `generateOutput` based on [[AggregateMode]] of
* its aggregate functions. `processRow` is the function to handle an input. `generateOutput`
* is used to generate result.
*/
abstract class AggregationIterator(
groupingKeyAttributes: Seq[Attribute],
valueAttributes: Seq[Attribute],
nonCompleteAggregateExpressions: Seq[AggregateExpression2],
nonCompleteAggregateAttributes: Seq[Attribute],
completeAggregateExpressions: Seq[AggregateExpression2],
completeAggregateAttributes: Seq[Attribute],
initialInputBufferOffset: Int,
resultExpressions: Seq[NamedExpression],
newMutableProjection: (Seq[Expression], Seq[Attribute]) => (() => MutableProjection),
outputsUnsafeRows: Boolean)
extends Iterator[InternalRow] with Logging {
///////////////////////////////////////////////////////////////////////////
// Initializing functions.
///////////////////////////////////////////////////////////////////////////
// An Seq of all AggregateExpressions.
// It is important that all AggregateExpressions with the mode Partial, PartialMerge or Final
// are at the beginning of the allAggregateExpressions.
protected val allAggregateExpressions =
nonCompleteAggregateExpressions ++ completeAggregateExpressions
require(
allAggregateExpressions.map(_.mode).distinct.length <= 2,
s"$allAggregateExpressions are not supported becuase they have more than 2 distinct modes.")
/**
* The distinct modes of AggregateExpressions. Right now, we can handle the following mode:
* - Partial-only: all AggregateExpressions have the mode of Partial;
* - PartialMerge-only: all AggregateExpressions have the mode of PartialMerge);
* - Final-only: all AggregateExpressions have the mode of Final;
* - Final-Complete: some AggregateExpressions have the mode of Final and
* others have the mode of Complete;
* - Complete-only: nonCompleteAggregateExpressions is empty and we have AggregateExpressions
* with mode Complete in completeAggregateExpressions; and
* - Grouping-only: there is no AggregateExpression.
*/
protected val aggregationMode: (Option[AggregateMode], Option[AggregateMode]) =
nonCompleteAggregateExpressions.map(_.mode).distinct.headOption ->
completeAggregateExpressions.map(_.mode).distinct.headOption
// Initialize all AggregateFunctions by binding references if necessary,
// and set inputBufferOffset and mutableBufferOffset.
protected val allAggregateFunctions: Array[AggregateFunction2] = {
var mutableBufferOffset = 0
var inputBufferOffset: Int = initialInputBufferOffset
val functions = new Array[AggregateFunction2](allAggregateExpressions.length)
var i = 0
while (i < allAggregateExpressions.length) {
val func = allAggregateExpressions(i).aggregateFunction
val funcWithBoundReferences: AggregateFunction2 = allAggregateExpressions(i).mode match {
case Partial | Complete if func.isInstanceOf[ImperativeAggregate] =>
// We need to create BoundReferences if the function is not an
// expression-based aggregate function (it does not support code-gen) and the mode of
// this function is Partial or Complete because we will call eval of this
// function's children in the update method of this aggregate function.
// Those eval calls require BoundReferences to work.
BindReferences.bindReference(func, valueAttributes)
case _ =>
// We only need to set inputBufferOffset for aggregate functions with mode
// PartialMerge and Final.
val updatedFunc = func match {
case function: ImperativeAggregate =>
function.withNewInputAggBufferOffset(inputBufferOffset)
case function => function
}
inputBufferOffset += func.aggBufferSchema.length
updatedFunc
}
val funcWithUpdatedAggBufferOffset = funcWithBoundReferences match {
case function: ImperativeAggregate =>
// Set mutableBufferOffset for this function. It is important that setting
// mutableBufferOffset happens after all potential bindReference operations
// because bindReference will create a new instance of the function.
function.withNewMutableAggBufferOffset(mutableBufferOffset)
case function => function
}
mutableBufferOffset += funcWithUpdatedAggBufferOffset.aggBufferSchema.length
functions(i) = funcWithUpdatedAggBufferOffset
i += 1
}
functions
}
// Positions of those imperative aggregate functions in allAggregateFunctions.
// For example, we have func1, func2, func3, func4 in aggregateFunctions, and
// func2 and func3 are imperative aggregate functions.
// ImperativeAggregateFunctionPositions will be [1, 2].
private[this] val allImperativeAggregateFunctionPositions: Array[Int] = {
val positions = new ArrayBuffer[Int]()
var i = 0
while (i < allAggregateFunctions.length) {
allAggregateFunctions(i) match {
case agg: DeclarativeAggregate =>
case _ => positions += i
}
i += 1
}
positions.toArray
}
// All AggregateFunctions functions with mode Partial, PartialMerge, or Final.
private[this] val nonCompleteAggregateFunctions: Array[AggregateFunction2] =
allAggregateFunctions.take(nonCompleteAggregateExpressions.length)
// All imperative aggregate functions with mode Partial, PartialMerge, or Final.
private[this] val nonCompleteImperativeAggregateFunctions: Array[ImperativeAggregate] =
nonCompleteAggregateFunctions.collect { case func: ImperativeAggregate => func }
// The projection used to initialize buffer values for all expression-based aggregates.
private[this] val expressionAggInitialProjection = {
val initExpressions = allAggregateFunctions.flatMap {
case ae: DeclarativeAggregate => ae.initialValues
// For the positions corresponding to imperative aggregate functions, we'll use special
// no-op expressions which are ignored during projection code-generation.
case i: ImperativeAggregate => Seq.fill(i.aggBufferAttributes.length)(NoOp)
}
newMutableProjection(initExpressions, Nil)()
}
// All imperative AggregateFunctions.
private[this] val allImperativeAggregateFunctions: Array[ImperativeAggregate] =
allImperativeAggregateFunctionPositions
.map(allAggregateFunctions)
.map(_.asInstanceOf[ImperativeAggregate])
///////////////////////////////////////////////////////////////////////////
// Methods and fields used by sub-classes.
///////////////////////////////////////////////////////////////////////////
// Initializing functions used to process a row.
protected val processRow: (MutableRow, InternalRow) => Unit = {
val rowToBeProcessed = new JoinedRow
val aggregationBufferSchema = allAggregateFunctions.flatMap(_.aggBufferAttributes)
aggregationMode match {
// Partial-only
case (Some(Partial), None) =>
val updateExpressions = nonCompleteAggregateFunctions.flatMap {
case ae: DeclarativeAggregate => ae.updateExpressions
case agg: AggregateFunction2 => Seq.fill(agg.aggBufferAttributes.length)(NoOp)
}
val expressionAggUpdateProjection =
newMutableProjection(updateExpressions, aggregationBufferSchema ++ valueAttributes)()
(currentBuffer: MutableRow, row: InternalRow) => {
expressionAggUpdateProjection.target(currentBuffer)
// Process all expression-based aggregate functions.
expressionAggUpdateProjection(rowToBeProcessed(currentBuffer, row))
// Process all imperative aggregate functions.
var i = 0
while (i < nonCompleteImperativeAggregateFunctions.length) {
nonCompleteImperativeAggregateFunctions(i).update(currentBuffer, row)
i += 1
}
}
// PartialMerge-only or Final-only
case (Some(PartialMerge), None) | (Some(Final), None) =>
val inputAggregationBufferSchema = if (initialInputBufferOffset == 0) {
// If initialInputBufferOffset, the input value does not contain
// grouping keys.
// This part is pretty hacky.
allAggregateFunctions.flatMap(_.inputAggBufferAttributes).toSeq
} else {
groupingKeyAttributes ++ allAggregateFunctions.flatMap(_.inputAggBufferAttributes)
}
// val inputAggregationBufferSchema =
// groupingKeyAttributes ++
// allAggregateFunctions.flatMap(_.cloneBufferAttributes)
val mergeExpressions = nonCompleteAggregateFunctions.flatMap {
case ae: DeclarativeAggregate => ae.mergeExpressions
case agg: AggregateFunction2 => Seq.fill(agg.aggBufferAttributes.length)(NoOp)
}
// This projection is used to merge buffer values for all expression-based aggregates.
val expressionAggMergeProjection =
newMutableProjection(
mergeExpressions,
aggregationBufferSchema ++ inputAggregationBufferSchema)()
(currentBuffer: MutableRow, row: InternalRow) => {
// Process all expression-based aggregate functions.
expressionAggMergeProjection.target(currentBuffer)(rowToBeProcessed(currentBuffer, row))
// Process all imperative aggregate functions.
var i = 0
while (i < nonCompleteImperativeAggregateFunctions.length) {
nonCompleteImperativeAggregateFunctions(i).merge(currentBuffer, row)
i += 1
}
}
// Final-Complete
case (Some(Final), Some(Complete)) =>
val completeAggregateFunctions: Array[AggregateFunction2] =
allAggregateFunctions.takeRight(completeAggregateExpressions.length)
// All imperative aggregate functions with mode Complete.
val completeImperativeAggregateFunctions: Array[ImperativeAggregate] =
completeAggregateFunctions.collect { case func: ImperativeAggregate => func }
// The first initialInputBufferOffset values of the input aggregation buffer is
// for grouping expressions and distinct columns.
val groupingAttributesAndDistinctColumns = valueAttributes.take(initialInputBufferOffset)
val completeOffsetExpressions =
Seq.fill(completeAggregateFunctions.map(_.aggBufferAttributes.length).sum)(NoOp)
// We do not touch buffer values of aggregate functions with the Final mode.
val finalOffsetExpressions =
Seq.fill(nonCompleteAggregateFunctions.map(_.aggBufferAttributes.length).sum)(NoOp)
val mergeInputSchema =
aggregationBufferSchema ++
groupingAttributesAndDistinctColumns ++
nonCompleteAggregateFunctions.flatMap(_.inputAggBufferAttributes)
val mergeExpressions =
nonCompleteAggregateFunctions.flatMap {
case ae: DeclarativeAggregate => ae.mergeExpressions
case agg: AggregateFunction2 => Seq.fill(agg.aggBufferAttributes.length)(NoOp)
} ++ completeOffsetExpressions
val finalExpressionAggMergeProjection =
newMutableProjection(mergeExpressions, mergeInputSchema)()
val updateExpressions =
finalOffsetExpressions ++ completeAggregateFunctions.flatMap {
case ae: DeclarativeAggregate => ae.updateExpressions
case agg: AggregateFunction2 => Seq.fill(agg.aggBufferAttributes.length)(NoOp)
}
val completeExpressionAggUpdateProjection =
newMutableProjection(updateExpressions, aggregationBufferSchema ++ valueAttributes)()
(currentBuffer: MutableRow, row: InternalRow) => {
val input = rowToBeProcessed(currentBuffer, row)
// For all aggregate functions with mode Complete, update buffers.
completeExpressionAggUpdateProjection.target(currentBuffer)(input)
var i = 0
while (i < completeImperativeAggregateFunctions.length) {
completeImperativeAggregateFunctions(i).update(currentBuffer, row)
i += 1
}
// For all aggregate functions with mode Final, merge buffers.
finalExpressionAggMergeProjection.target(currentBuffer)(input)
i = 0
while (i < nonCompleteImperativeAggregateFunctions.length) {
nonCompleteImperativeAggregateFunctions(i).merge(currentBuffer, row)
i += 1
}
}
// Complete-only
case (None, Some(Complete)) =>
val completeAggregateFunctions: Array[AggregateFunction2] =
allAggregateFunctions.takeRight(completeAggregateExpressions.length)
// All imperative aggregate functions with mode Complete.
val completeImperativeAggregateFunctions: Array[ImperativeAggregate] =
completeAggregateFunctions.collect { case func: ImperativeAggregate => func }
val updateExpressions =
completeAggregateFunctions.flatMap {
case ae: DeclarativeAggregate => ae.updateExpressions
case agg: AggregateFunction2 => Seq.fill(agg.aggBufferAttributes.length)(NoOp)
}
val completeExpressionAggUpdateProjection =
newMutableProjection(updateExpressions, aggregationBufferSchema ++ valueAttributes)()
(currentBuffer: MutableRow, row: InternalRow) => {
val input = rowToBeProcessed(currentBuffer, row)
// For all aggregate functions with mode Complete, update buffers.
completeExpressionAggUpdateProjection.target(currentBuffer)(input)
var i = 0
while (i < completeImperativeAggregateFunctions.length) {
completeImperativeAggregateFunctions(i).update(currentBuffer, row)
i += 1
}
}
// Grouping only.
case (None, None) => (currentBuffer: MutableRow, row: InternalRow) => {}
case other =>
sys.error(
s"Could not evaluate ${nonCompleteAggregateExpressions} because we do not " +
s"support evaluate modes $other in this iterator.")
}
}
// Initializing the function used to generate the output row.
protected val generateOutput: (InternalRow, MutableRow) => InternalRow = {
val rowToBeEvaluated = new JoinedRow
val safeOutputRow = new SpecificMutableRow(resultExpressions.map(_.dataType))
val mutableOutput = if (outputsUnsafeRows) {
UnsafeProjection.create(resultExpressions.map(_.dataType).toArray).apply(safeOutputRow)
} else {
safeOutputRow
}
aggregationMode match {
// Partial-only or PartialMerge-only: every output row is basically the values of
// the grouping expressions and the corresponding aggregation buffer.
case (Some(Partial), None) | (Some(PartialMerge), None) =>
// Because we cannot copy a joinedRow containing a UnsafeRow (UnsafeRow does not
// support generic getter), we create a mutable projection to output the
// JoinedRow(currentGroupingKey, currentBuffer)
val bufferSchema = nonCompleteAggregateFunctions.flatMap(_.aggBufferAttributes)
val resultProjection =
newMutableProjection(
groupingKeyAttributes ++ bufferSchema,
groupingKeyAttributes ++ bufferSchema)()
resultProjection.target(mutableOutput)
(currentGroupingKey: InternalRow, currentBuffer: MutableRow) => {
resultProjection(rowToBeEvaluated(currentGroupingKey, currentBuffer))
// rowToBeEvaluated(currentGroupingKey, currentBuffer)
}
// Final-only, Complete-only and Final-Complete: every output row contains values representing
// resultExpressions.
case (Some(Final), None) | (Some(Final) | None, Some(Complete)) =>
val bufferSchemata =
allAggregateFunctions.flatMap(_.aggBufferAttributes)
val evalExpressions = allAggregateFunctions.map {
case ae: DeclarativeAggregate => ae.evaluateExpression
case agg: AggregateFunction2 => NoOp
}
val expressionAggEvalProjection = newMutableProjection(evalExpressions, bufferSchemata)()
val aggregateResultSchema = nonCompleteAggregateAttributes ++ completeAggregateAttributes
// TODO: Use unsafe row.
val aggregateResult = new SpecificMutableRow(aggregateResultSchema.map(_.dataType))
expressionAggEvalProjection.target(aggregateResult)
val resultProjection =
newMutableProjection(
resultExpressions, groupingKeyAttributes ++ aggregateResultSchema)()
resultProjection.target(mutableOutput)
(currentGroupingKey: InternalRow, currentBuffer: MutableRow) => {
// Generate results for all expression-based aggregate functions.
expressionAggEvalProjection(currentBuffer)
// Generate results for all imperative aggregate functions.
var i = 0
while (i < allImperativeAggregateFunctions.length) {
aggregateResult.update(
allImperativeAggregateFunctionPositions(i),
allImperativeAggregateFunctions(i).eval(currentBuffer))
i += 1
}
resultProjection(rowToBeEvaluated(currentGroupingKey, aggregateResult))
}
// Grouping-only: we only output values of grouping expressions.
case (None, None) =>
val resultProjection =
newMutableProjection(resultExpressions, groupingKeyAttributes)()
resultProjection.target(mutableOutput)
(currentGroupingKey: InternalRow, currentBuffer: MutableRow) => {
resultProjection(currentGroupingKey)
}
case other =>
sys.error(
s"Could not evaluate ${nonCompleteAggregateExpressions} because we do not " +
s"support evaluate modes $other in this iterator.")
}
}
/** Initializes buffer values for all aggregate functions. */
protected def initializeBuffer(buffer: MutableRow): Unit = {
expressionAggInitialProjection.target(buffer)(EmptyRow)
var i = 0
while (i < allImperativeAggregateFunctions.length) {
allImperativeAggregateFunctions(i).initialize(buffer)
i += 1
}
}
/**
* Creates a new aggregation buffer and initializes buffer values
* for all aggregate functions.
*/
protected def newBuffer: MutableRow
}
| pronix/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/aggregate/AggregationIterator.scala | Scala | apache-2.0 | 19,625 |
package com.cengallut.textualapp
import com.cengallut.textual.{TextualActivity, TouchComposite, Action}
import com.cengallut.textual.core.CharGrid
/** Demonstration of event handling for multiple sub-grids. */
class Demo3 extends TextualActivity {
override def onBufferReady(buffer: CharGrid): Unit = {
val (top, bottom) = buffer.horizontalBisect
val topTouch = Action(top).touch { (x, y) =>
top.charAt(x, y) match {
case 'o' => top.setChar(x, y, 'T')
case _ => top.setChar(x, y, 'o')
}
top.notifyChanged()
}
val botTouch = Action(bottom).touch { (x, y) =>
bottom.charAt(x, y) match {
case 'o' => bottom.setChar(x, y, 'B')
case _ => bottom.setChar(x, y, 'o')
}
bottom.notifyChanged()
}
val compositeTouch = TouchComposite.empty + topTouch + botTouch
view.setOnTouchListener(compositeTouch)
}
} | AliceCengal/android-textual | demo-android-scala/src/main/scala/com/cengallut/textualapp/Demo3.scala | Scala | mit | 907 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
package patterns
import com.intellij.lang.ASTNode
import com.intellij.psi._
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
/**
* @author Alexander Podkhalyuzin
* Date: 28.02.2008
*/
class ScLiteralPatternImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScLiteralPattern {
override def accept(visitor: PsiElementVisitor) {
visitor match {
case visitor: ScalaElementVisitor => super.accept(visitor)
case _ => super.accept(visitor)
}
}
override def toString: String = "LiteralPattern"
override def getType(ctx: TypingContext) = {
getLiteral.getType(TypingContext.empty)
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/base/patterns/ScLiteralPatternImpl.scala | Scala | apache-2.0 | 863 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Vamsi Thummala {[email protected]}, Copyright (C) 2013-2015
*
*/
package safe.safesets
package client
// Messages between master-worker
object MasterWorkerMessageProtocol {
type WorkerId = String
type WorkId = String
sealed trait MasterWorkerMessage
// Messages from Workers
case class RegisterWorker(workerId: WorkerId) extends MasterWorkerMessage
case class WorkerRequestsWork(workerId: WorkerId) extends MasterWorkerMessage
case class WorkIsDone(
workerId: WorkerId
, workId: WorkId
, result: Any
) extends MasterWorkerMessage
case class WorkFailed(workerId: WorkerId, workId: WorkId) extends MasterWorkerMessage
// Messages to Workers
case object WorkIsReady extends MasterWorkerMessage
case class Ack(id: String) extends MasterWorkerMessage
}
| wowmsi/safe | safe-lang/src/main/scala/safe/safesets/client/MasterWorkerMessageProtocol.scala | Scala | apache-2.0 | 1,613 |
/*
Listok is a dialect of LISP
Copyright (C) 2011 Konstantin Boukreev
[email protected]
This file is part of Listok.
Listok is free software; you can redistribute it and/or
modify it under the terms of the GNU Lesser General Public
License as published by the Free Software Foundation; either
version 3 of the License, or (at your option) any later version.
Listok is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with this library. If not, see <http://www.gnu.org/licenses/>.
*/
package ru.listok
object Interop extends ru.listok.builtin.Helpers {
def guard[R] (env: Env, name: String)(f: => R) =
try { f } catch {
case e: ClassNotFoundException => throw SyntaxError("Interop class "+ name +" not found", env)
case e: NoSuchFieldException => throw SyntaxError("Interop field "+name+" not found", env)
case e: NoSuchMethodException => throw SyntaxError("Interop method "+name+" not found", env)
case e: IllegalAccessException => throw SyntaxError("Interop access not allowed to " + name, env)
}
def getClassAndName(env: Env, name: String, ctor: Boolean): (Class[_], String) = guard(env, name) {
if (ctor)
(Class.forName(name), "")
else {
val ss = name.split('.')
val cname = ss.init.mkString(".") // class name
val mname = ss.last // method name
(Class.forName(cname), mname)
}
}
def checkFunc (env: Env, clazz: Class[_], name: String, mod: Symbol) = guard(env, name) {
mod match {
case 'method =>
clazz.getMethods.exists { m =>
m.getName == name &&
0 == (m.getModifiers & java.lang.reflect.Modifier.STATIC) }
case 'static =>
clazz.getMethods.exists { m =>
m.getName == name &&
0 != (m.getModifiers & java.lang.reflect.Modifier.STATIC) }
case _ => true
}
}
def callCtor(clazz: Class[_], l: List[Lcommon]): Lcommon = {
if (l.isEmpty) {
val ctors = clazz.getConstructors.filter { ctor => ctor.getParameterTypes.isEmpty }
fromJava(ctors.head.newInstance())
} else {
val params = l.map(p => toJava(p.getAny))
val types = params.map(_.getClass).toArray
val ctors = clazz.getConstructors.filter { ctor => matchingTypes(ctor.getParameterTypes, types) }
fromJava(ctors.head.newInstance(params: _*))
}
}
def callFunc (obj: AnyRef, clazz: Class[_], name: String, l: List[Lcommon]): Lcommon = {
// log(" for " + obj + " call " + clazz.getName + "." + name + " " + Util.pp(l))
if (l.isEmpty) {
val mm = clazz.getMethods.filter { m =>
m.getName == name && m.getParameterTypes.isEmpty }
if (mm.isEmpty) {
val ff = clazz.getField(name)
fromJava(ff.get(obj))
}
else
fromJava(mm.head.invoke(obj))
}
else {
val params = l.map(p => toJava(p.getAny))
val types = params.map(_.getClass).toArray
val mm = clazz.getMethods.filter { m =>
m.getName == name &&
matchingTypes(m.getParameterTypes, types) }
fromJava(mm.head.invoke(obj, params:_*))
}
}
def getField(env: Env, clazz: Class[_], name: String) = guard(env, name) {
val ff = clazz.getField(name)
fromJava(ff.get(null))
}
// this function taken from http://www.familie-kneissl.org/Members/martin/blog/reflection-from-scala-heaven-and-hell
def matchingTypes(declared: Array[Class[_]], actual: Array[Class[_]]): Boolean = {
declared.length == actual.length && (
(declared zip actual) forall {
case (declared, actual) => declared.isAssignableFrom(actual)
})
}
def toJava(x: Any): AnyRef = x match {
case x: Byte => Byte.box(x)
case x: Char => Char.box(x)
case x: Int => Int.box(x)
case x: Long => Long.box(x)
case x: Float => Float.box(x)
case x: Double => Double.box(x)
case x: Boolean => Boolean.box(x)
// case x: Unit => ()
case x: BigInt => x.bigInteger
case _ => x.asInstanceOf[AnyRef]
}
def fromJava(x: Any) = Util.toLcommon(x match {
case x: java.lang.Byte => x.byteValue
case x: java.lang.Character => x.charValue
case x: java.lang.Integer => x.intValue
case x: java.lang.Long => x.longValue
case x: java.lang.Float => x.floatValue
case x: java.lang.Double => x.doubleValue
case x: java.lang.Boolean => x.booleanValue
case x: java.lang.Void => ()
case x: java.lang.String => x
case x: java.math.BigInteger=> new BigInt(x)
case _ => x //.asInstanceOf[AnyRef]
})
def func_interop(env: Env, args: List[Lcommon]): Lcommon = {
notLess(env, args, 2)
val jname = args(0).getString(env)
val lname = args(1).getSymbol(env)
val mod = if (args.length > 2)
args(2).castKeyword(env).sym match {
case s if s == 'static || s == 'method || s == 'constructor || s == 'constant => s
case err =>
env.host.onwarning(env, "unknown modifier "+err+" on interop")
'method
}
else
'method
val (clazz, mname) = getClassAndName(env, jname, mod == 'constructor)
if (mod == 'constant) {
val value = getField(env, clazz, mname)
env.defineconst(lname, value)
} else {
if (!checkFunc(env, clazz, mname, mod))
throw SyntaxError("Interop method "+jname+" not found", env)
val fn = mod match {
case 'static =>
(env: Env, l: List[Lcommon]) => { guard(env, jname){callFunc(null, clazz, mname, l)} }
case 'method =>
(env: Env, l: List[Lcommon]) => {
notLess(env, l, 1)
guard(env, jname){callFunc(toJava(l.head.getAny), clazz, mname, l.tail)}
}
case 'constructor =>
(env: Env, l: List[Lcommon]) => { guard(env, jname){callCtor(clazz, l)} }
}
env.define(lname, Lfunction(fn, lname))
}
Lsymbol(lname)
}
val all = List(
Lfunction(func_interop, 'interop)
)
}
| kolyvan/listok | src/main/scala/interop.scala | Scala | lgpl-3.0 | 6,249 |
/**
* Copyright 2013-2015, AlwaysResolve Project (alwaysresolve.org), MOYD.CO LTD
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package httpSync
import java.text.SimpleDateFormat
import com.fasterxml.jackson.annotation.JsonInclude
import com.fasterxml.jackson.core.JsonParseException
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import configs.ConfigService
import datastructures.DNSAuthoritativeSection
import models.ExtendedDomain
import org.apache.commons.lang3.exception.ExceptionUtils
import org.slf4j.LoggerFactory
import scalaj.http.{Http, HttpOptions}
object HttpToDns {
var zones = Array("")
val logger = LoggerFactory.getLogger("Httprequests")
val HTTP_REQUEST_LIST = ConfigService.config.getString("httpRequestList")
val HTTP_REQUEST_ZONE = ConfigService.config.getString("httpRequestZone")
val API_KEY = ConfigService.config.getString("apiKey")
val API_SECRET = ConfigService.config.getString("apiSecret")
val REGION = ConfigService.config.getString("region")
val HTTP_TIMEOUT = ConfigService.config.getInt("httpTimeoutForZoneUpdate")
private val Json = {
val m = new ObjectMapper()
m.registerModule(DefaultScalaModule)
m.setSerializationInclusion(JsonInclude.Include.NON_NULL);
m.setDateFormat(new SimpleDateFormat("yyyy-MM-dd HH:mm:ss"))
m
}
def getZonesNames = {
var temp = ""
try {
temp = Http(HTTP_REQUEST_LIST).param("api_key", API_KEY).param("api_secret", API_SECRET)
.option(HttpOptions.connTimeout(HTTP_TIMEOUT)).option(HttpOptions.readTimeout(HTTP_TIMEOUT)).asString
} catch {
case e: Exception => {
logger.error("Unable to retrieve zones list"); System.exit(1);
}
}
zones = temp.split(" ")
logger.debug(zones.toString)
}
def getZoneFromHttp(zonename: String): Option[ExtendedDomain] = {
val temp = Http(HTTP_REQUEST_ZONE).param("zone", zonename).param("region", REGION)
.param("api_key", API_KEY).param("api_secret", API_SECRET)
.option(HttpOptions.connTimeout(HTTP_TIMEOUT)).option(HttpOptions.readTimeout(HTTP_TIMEOUT))
logger.debug("ResponseCode from " + zonename + " update: " + temp.responseCode.toString)
if (temp.responseCode != 404) {
try {
Option(Json.readValue(temp.asString, classOf[ExtendedDomain]))
}
catch {
case ex: JsonParseException => {
logger.error("Broken json: " + ExceptionUtils.getMessage(ex) + " " + ExceptionUtils.getStackTrace(ex))
None
}
}
}
else
None
}
def loadZonesInMemory = {
loadData
}
/*def loadSingleZone(zone: String) = {
loadDataOfType(Array(zone), classOf[ExtendedDomain]) { DNSAuthoritativeSection.setDomain(_) }
}*/
private def loadData = {
loadDataOfType(zones, classOf[ExtendedDomain]) {
DNSAuthoritativeSection.setDomain(_)
}
//loadDataOfType(cacheDataPath, classOf[ExtendedDomain]) { DNSCache.setDomain(_) }
DNSAuthoritativeSection.logDomains
}
private def loadDataOfType[T](zones: Array[String], typ: Class[T])(fn: T => Unit) = {
zones.foreach(loadItem(_, typ)(fn))
}
private def loadItem[T](zonename: String, typ: Class[T])(fn: T => Any) =
try {
val temp = Http(HTTP_REQUEST_ZONE).param("zone", zonename).param("region", REGION)
.param("api_key", API_KEY).param("api_secret", API_SECRET)
.option(HttpOptions.connTimeout(HTTP_TIMEOUT)).option(HttpOptions.readTimeout(HTTP_TIMEOUT)).asString
logger.debug(temp)
val item = Json.readValue(temp, typ)
fn(item)
} catch {
case ex: JsonParseException => logger.warn("Broken json: " + ExceptionUtils.getMessage(ex) + " " + ExceptionUtils.getStackTrace(ex))
}
} | Moydco/AlwaysResolveDNS | src/main/scala/httpSync/HttpToDns.scala | Scala | apache-2.0 | 4,128 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import scala.collection.mutable
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.expressions.{And, Attribute, AttributeSet, Expression, PredicateHelper}
import org.apache.spark.sql.catalyst.plans.{Inner, InnerLike, JoinType}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.internal.SQLConf
/**
* Cost-based join reorder.
* We may have several join reorder algorithms in the future. This class is the entry of these
* algorithms, and chooses which one to use.
*/
object CostBasedJoinReorder extends Rule[LogicalPlan] with PredicateHelper {
private def conf = SQLConf.get
def apply(plan: LogicalPlan): LogicalPlan = {
if (!conf.cboEnabled || !conf.joinReorderEnabled) {
plan
} else {
val result = plan transformDown {
// Start reordering with a joinable item, which is an InnerLike join with conditions.
// Avoid reordering if a join hint is present.
case j @ Join(_, _, _: InnerLike, Some(cond), JoinHint.NONE) =>
reorder(j, j.output)
case p @ Project(projectList, Join(_, _, _: InnerLike, Some(cond), JoinHint.NONE))
if projectList.forall(_.isInstanceOf[Attribute]) =>
reorder(p, p.output)
}
// After reordering is finished, convert OrderedJoin back to Join.
result transform {
case OrderedJoin(left, right, jt, cond) => Join(left, right, jt, cond, JoinHint.NONE)
}
}
}
private def reorder(plan: LogicalPlan, output: Seq[Attribute]): LogicalPlan = {
val (items, conditions) = extractInnerJoins(plan)
val result =
// Do reordering if the number of items is appropriate and join conditions exist.
// We also need to check if costs of all items can be evaluated.
if (items.size > 2 && items.size <= conf.joinReorderDPThreshold && conditions.nonEmpty &&
items.forall(_.stats.rowCount.isDefined)) {
JoinReorderDP.search(conf, items, conditions, output)
} else {
plan
}
// Set consecutive join nodes ordered.
replaceWithOrderedJoin(result)
}
/**
* Extracts items of consecutive inner joins and join conditions.
* This method works for bushy trees and left/right deep trees.
*/
private def extractInnerJoins(plan: LogicalPlan): (Seq[LogicalPlan], Set[Expression]) = {
plan match {
case Join(left, right, _: InnerLike, Some(cond), JoinHint.NONE) =>
val (leftPlans, leftConditions) = extractInnerJoins(left)
val (rightPlans, rightConditions) = extractInnerJoins(right)
(leftPlans ++ rightPlans, splitConjunctivePredicates(cond).toSet ++
leftConditions ++ rightConditions)
case Project(projectList, j @ Join(_, _, _: InnerLike, Some(cond), JoinHint.NONE))
if projectList.forall(_.isInstanceOf[Attribute]) =>
extractInnerJoins(j)
case _ =>
(Seq(plan), Set())
}
}
private def replaceWithOrderedJoin(plan: LogicalPlan): LogicalPlan = plan match {
case j @ Join(left, right, jt: InnerLike, Some(cond), JoinHint.NONE) =>
val replacedLeft = replaceWithOrderedJoin(left)
val replacedRight = replaceWithOrderedJoin(right)
OrderedJoin(replacedLeft, replacedRight, jt, Some(cond))
case p @ Project(projectList, j @ Join(_, _, _: InnerLike, Some(cond), JoinHint.NONE)) =>
p.copy(child = replaceWithOrderedJoin(j))
case _ =>
plan
}
}
/** This is a mimic class for a join node that has been ordered. */
case class OrderedJoin(
left: LogicalPlan,
right: LogicalPlan,
joinType: JoinType,
condition: Option[Expression]) extends BinaryNode {
override def output: Seq[Attribute] = left.output ++ right.output
}
/**
* Reorder the joins using a dynamic programming algorithm. This implementation is based on the
* paper: Access Path Selection in a Relational Database Management System.
* http://www.inf.ed.ac.uk/teaching/courses/adbs/AccessPath.pdf
*
* First we put all items (basic joined nodes) into level 0, then we build all two-way joins
* at level 1 from plans at level 0 (single items), then build all 3-way joins from plans
* at previous levels (two-way joins and single items), then 4-way joins ... etc, until we
* build all n-way joins and pick the best plan among them.
*
* When building m-way joins, we only keep the best plan (with the lowest cost) for the same set
* of m items. E.g., for 3-way joins, we keep only the best plan for items {A, B, C} among
* plans (A J B) J C, (A J C) J B and (B J C) J A.
* We also prune cartesian product candidates when building a new plan if there exists no join
* condition involving references from both left and right. This pruning strategy significantly
* reduces the search space.
* E.g., given A J B J C J D with join conditions A.k1 = B.k1 and B.k2 = C.k2 and C.k3 = D.k3,
* plans maintained for each level are as follows:
* level 0: p({A}), p({B}), p({C}), p({D})
* level 1: p({A, B}), p({B, C}), p({C, D})
* level 2: p({A, B, C}), p({B, C, D})
* level 3: p({A, B, C, D})
* where p({A, B, C, D}) is the final output plan.
*
* For cost evaluation, since physical costs for operators are not available currently, we use
* cardinalities and sizes to compute costs.
*/
object JoinReorderDP extends PredicateHelper with Logging {
def search(
conf: SQLConf,
items: Seq[LogicalPlan],
conditions: Set[Expression],
output: Seq[Attribute]): LogicalPlan = {
val startTime = System.nanoTime()
// Level i maintains all found plans for i + 1 items.
// Create the initial plans: each plan is a single item with zero cost.
val itemIndex = items.zipWithIndex
val foundPlans = mutable.Buffer[JoinPlanMap](itemIndex.map {
case (item, id) => Set(id) -> JoinPlan(Set(id), item, Set.empty, Cost(0, 0))
}.toMap)
// Build filters from the join graph to be used by the search algorithm.
val filters = JoinReorderDPFilters.buildJoinGraphInfo(conf, items, conditions, itemIndex)
// Build plans for next levels until the last level has only one plan. This plan contains
// all items that can be joined, so there's no need to continue.
val topOutputSet = AttributeSet(output)
while (foundPlans.size < items.length) {
// Build plans for the next level.
foundPlans += searchLevel(foundPlans, conf, conditions, topOutputSet, filters)
}
val durationInMs = (System.nanoTime() - startTime) / (1000 * 1000)
logDebug(s"Join reordering finished. Duration: $durationInMs ms, number of items: " +
s"${items.length}, number of plans in memo: ${foundPlans.map(_.size).sum}")
// The last level must have one and only one plan, because all items are joinable.
assert(foundPlans.size == items.length && foundPlans.last.size == 1)
foundPlans.last.head._2.plan match {
case p @ Project(projectList, j: Join) if projectList != output =>
assert(topOutputSet == p.outputSet)
// Keep the same order of final output attributes.
p.copy(projectList = output)
case finalPlan if !sameOutput(finalPlan, output) =>
Project(output, finalPlan)
case finalPlan =>
finalPlan
}
}
private def sameOutput(plan: LogicalPlan, expectedOutput: Seq[Attribute]): Boolean = {
val thisOutput = plan.output
thisOutput.length == expectedOutput.length && thisOutput.zip(expectedOutput).forall {
case (a1, a2) => a1.semanticEquals(a2)
}
}
/** Find all possible plans at the next level, based on existing levels. */
private def searchLevel(
existingLevels: Seq[JoinPlanMap],
conf: SQLConf,
conditions: Set[Expression],
topOutput: AttributeSet,
filters: Option[JoinGraphInfo]): JoinPlanMap = {
val nextLevel = mutable.Map.empty[Set[Int], JoinPlan]
var k = 0
val lev = existingLevels.length - 1
// Build plans for the next level from plans at level k (one side of the join) and level
// lev - k (the other side of the join).
// For the lower level k, we only need to search from 0 to lev - k, because when building
// a join from A and B, both A J B and B J A are handled.
while (k <= lev - k) {
val oneSideCandidates = existingLevels(k).values.toSeq
for (i <- oneSideCandidates.indices) {
val oneSidePlan = oneSideCandidates(i)
val otherSideCandidates = if (k == lev - k) {
// Both sides of a join are at the same level, no need to repeat for previous ones.
oneSideCandidates.drop(i)
} else {
existingLevels(lev - k).values.toSeq
}
otherSideCandidates.foreach { otherSidePlan =>
buildJoin(oneSidePlan, otherSidePlan, conf, conditions, topOutput, filters) match {
case Some(newJoinPlan) =>
// Check if it's the first plan for the item set, or it's a better plan than
// the existing one due to lower cost.
val existingPlan = nextLevel.get(newJoinPlan.itemIds)
if (existingPlan.isEmpty || newJoinPlan.betterThan(existingPlan.get, conf)) {
nextLevel.update(newJoinPlan.itemIds, newJoinPlan)
}
case None =>
}
}
}
k += 1
}
nextLevel.toMap
}
/**
* Builds a new JoinPlan if the following conditions hold:
* - the sets of items contained in left and right sides do not overlap.
* - there exists at least one join condition involving references from both sides.
* - if star-join filter is enabled, allow the following combinations:
* 1) (oneJoinPlan U otherJoinPlan) is a subset of star-join
* 2) star-join is a subset of (oneJoinPlan U otherJoinPlan)
* 3) (oneJoinPlan U otherJoinPlan) is a subset of non star-join
*
* @param oneJoinPlan One side JoinPlan for building a new JoinPlan.
* @param otherJoinPlan The other side JoinPlan for building a new join node.
* @param conf SQLConf for statistics computation.
* @param conditions The overall set of join conditions.
* @param topOutput The output attributes of the final plan.
* @param filters Join graph info to be used as filters by the search algorithm.
* @return Builds and returns a new JoinPlan if both conditions hold. Otherwise, returns None.
*/
private def buildJoin(
oneJoinPlan: JoinPlan,
otherJoinPlan: JoinPlan,
conf: SQLConf,
conditions: Set[Expression],
topOutput: AttributeSet,
filters: Option[JoinGraphInfo]): Option[JoinPlan] = {
if (oneJoinPlan.itemIds.intersect(otherJoinPlan.itemIds).nonEmpty) {
// Should not join two overlapping item sets.
return None
}
if (filters.isDefined) {
// Apply star-join filter, which ensures that tables in a star schema relationship
// are planned together. The star-filter will eliminate joins among star and non-star
// tables until the star joins are built. The following combinations are allowed:
// 1. (oneJoinPlan U otherJoinPlan) is a subset of star-join
// 2. star-join is a subset of (oneJoinPlan U otherJoinPlan)
// 3. (oneJoinPlan U otherJoinPlan) is a subset of non star-join
val isValidJoinCombination =
JoinReorderDPFilters.starJoinFilter(oneJoinPlan.itemIds, otherJoinPlan.itemIds,
filters.get)
if (!isValidJoinCombination) return None
}
val onePlan = oneJoinPlan.plan
val otherPlan = otherJoinPlan.plan
val joinConds = conditions
.filterNot(l => canEvaluate(l, onePlan))
.filterNot(r => canEvaluate(r, otherPlan))
.filter(e => e.references.subsetOf(onePlan.outputSet ++ otherPlan.outputSet))
if (joinConds.isEmpty) {
// Cartesian product is very expensive, so we exclude them from candidate plans.
// This also significantly reduces the search space.
return None
}
// Put the deeper side on the left, tend to build a left-deep tree.
val (left, right) = if (oneJoinPlan.itemIds.size >= otherJoinPlan.itemIds.size) {
(onePlan, otherPlan)
} else {
(otherPlan, onePlan)
}
val newJoin = Join(left, right, Inner, joinConds.reduceOption(And), JoinHint.NONE)
val collectedJoinConds = joinConds ++ oneJoinPlan.joinConds ++ otherJoinPlan.joinConds
val remainingConds = conditions -- collectedJoinConds
val neededAttr = AttributeSet(remainingConds.flatMap(_.references)) ++ topOutput
val neededFromNewJoin = newJoin.output.filter(neededAttr.contains)
val newPlan =
if ((newJoin.outputSet -- neededFromNewJoin).nonEmpty) {
Project(neededFromNewJoin, newJoin)
} else {
newJoin
}
val itemIds = oneJoinPlan.itemIds.union(otherJoinPlan.itemIds)
// Now the root node of onePlan/otherPlan becomes an intermediate join (if it's a non-leaf
// item), so the cost of the new join should also include its own cost.
val newPlanCost = oneJoinPlan.planCost + oneJoinPlan.rootCost(conf) +
otherJoinPlan.planCost + otherJoinPlan.rootCost(conf)
Some(JoinPlan(itemIds, newPlan, collectedJoinConds, newPlanCost))
}
/** Map[set of item ids, join plan for these items] */
type JoinPlanMap = Map[Set[Int], JoinPlan]
/**
* Partial join order in a specific level.
*
* @param itemIds Set of item ids participating in this partial plan.
* @param plan The plan tree with the lowest cost for these items found so far.
* @param joinConds Join conditions included in the plan.
* @param planCost The cost of this plan tree is the sum of costs of all intermediate joins.
*/
case class JoinPlan(
itemIds: Set[Int],
plan: LogicalPlan,
joinConds: Set[Expression],
planCost: Cost) {
/** Get the cost of the root node of this plan tree. */
def rootCost(conf: SQLConf): Cost = {
if (itemIds.size > 1) {
val rootStats = plan.stats
Cost(rootStats.rowCount.get, rootStats.sizeInBytes)
} else {
// If the plan is a leaf item, it has zero cost.
Cost(0, 0)
}
}
def betterThan(other: JoinPlan, conf: SQLConf): Boolean = {
if (other.planCost.card == 0 || other.planCost.size == 0) {
false
} else {
val relativeRows = BigDecimal(this.planCost.card) / BigDecimal(other.planCost.card)
val relativeSize = BigDecimal(this.planCost.size) / BigDecimal(other.planCost.size)
relativeRows * conf.joinReorderCardWeight +
relativeSize * (1 - conf.joinReorderCardWeight) < 1
}
}
}
}
/**
* This class defines the cost model for a plan.
* @param card Cardinality (number of rows).
* @param size Size in bytes.
*/
case class Cost(card: BigInt, size: BigInt) {
def +(other: Cost): Cost = Cost(this.card + other.card, this.size + other.size)
}
/**
* Implements optional filters to reduce the search space for join enumeration.
*
* 1) Star-join filters: Plan star-joins together since they are assumed
* to have an optimal execution based on their RI relationship.
* 2) Cartesian products: Defer their planning later in the graph to avoid
* large intermediate results (expanding joins, in general).
* 3) Composite inners: Don't generate "bushy tree" plans to avoid materializing
* intermediate results.
*
* Filters (2) and (3) are not implemented.
*/
object JoinReorderDPFilters extends PredicateHelper {
/**
* Builds join graph information to be used by the filtering strategies.
* Currently, it builds the sets of star/non-star joins.
* It can be extended with the sets of connected/unconnected joins, which
* can be used to filter Cartesian products.
*/
def buildJoinGraphInfo(
conf: SQLConf,
items: Seq[LogicalPlan],
conditions: Set[Expression],
itemIndex: Seq[(LogicalPlan, Int)]): Option[JoinGraphInfo] = {
if (conf.joinReorderDPStarFilter) {
// Compute the tables in a star-schema relationship.
val starJoin = StarSchemaDetection.findStarJoins(items, conditions.toSeq)
val nonStarJoin = items.filterNot(starJoin.contains(_))
if (starJoin.nonEmpty && nonStarJoin.nonEmpty) {
val itemMap = itemIndex.toMap
Some(JoinGraphInfo(starJoin.map(itemMap).toSet, nonStarJoin.map(itemMap).toSet))
} else {
// Nothing interesting to return.
None
}
} else {
// Star schema filter is not enabled.
None
}
}
/**
* Applies the star-join filter that eliminates join combinations among star
* and non-star tables until the star join is built.
*
* Given the oneSideJoinPlan/otherSideJoinPlan, which represent all the plan
* permutations generated by the DP join enumeration, and the star/non-star plans,
* the following plan combinations are allowed:
* 1. (oneSideJoinPlan U otherSideJoinPlan) is a subset of star-join
* 2. star-join is a subset of (oneSideJoinPlan U otherSideJoinPlan)
* 3. (oneSideJoinPlan U otherSideJoinPlan) is a subset of non star-join
*
* It assumes the sets are disjoint.
*
* Example query graph:
*
* t1 d1 - t2 - t3
* \\ /
* f1
* |
* d2
*
* star: {d1, f1, d2}
* non-star: {t2, t1, t3}
*
* level 0: (f1 ), (d2 ), (t3 ), (d1 ), (t1 ), (t2 )
* level 1: {t3 t2 }, {f1 d2 }, {f1 d1 }
* level 2: {d2 f1 d1 }
* level 3: {t1 d1 f1 d2 }, {t2 d1 f1 d2 }
* level 4: {d1 t2 f1 t1 d2 }, {d1 t3 t2 f1 d2 }
* level 5: {d1 t3 t2 f1 t1 d2 }
*
* @param oneSideJoinPlan One side of the join represented as a set of plan ids.
* @param otherSideJoinPlan The other side of the join represented as a set of plan ids.
* @param filters Star and non-star plans represented as sets of plan ids
*/
def starJoinFilter(
oneSideJoinPlan: Set[Int],
otherSideJoinPlan: Set[Int],
filters: JoinGraphInfo) : Boolean = {
val starJoins = filters.starJoins
val nonStarJoins = filters.nonStarJoins
val join = oneSideJoinPlan.union(otherSideJoinPlan)
// Disjoint sets
oneSideJoinPlan.intersect(otherSideJoinPlan).isEmpty &&
// Either star or non-star is empty
(starJoins.isEmpty || nonStarJoins.isEmpty ||
// Join is a subset of the star-join
join.subsetOf(starJoins) ||
// Star-join is a subset of join
starJoins.subsetOf(join) ||
// Join is a subset of non-star
join.subsetOf(nonStarJoins))
}
}
/**
* Helper class that keeps information about the join graph as sets of item/plan ids.
* It currently stores the star/non-star plans. It can be
* extended with the set of connected/unconnected plans.
*/
case class JoinGraphInfo (starJoins: Set[Int], nonStarJoins: Set[Int])
| pgandhi999/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/CostBasedJoinReorder.scala | Scala | apache-2.0 | 19,576 |
package org.broadinstitute.clio.server.webservice
import org.broadinstitute.clio.server.service.StatusService
import org.broadinstitute.clio.status.model.{
ClioStatus,
SearchStatus,
StatusInfo,
VersionInfo
}
import org.broadinstitute.clio.transfer.model.ApiConstants._
import org.scalamock.scalatest.MockFactory
import scala.concurrent.Future
class StatusWebServiceSpec extends BaseWebserviceSpec with MockFactory {
behavior of "StatusWebService"
val statusService: StatusService = mock[StatusService]
val webService = new StatusWebService(statusService)
it should "versionRoute" in {
val expectedVersion = VersionInfo("Mock Server Version")
(statusService.getVersion _).expects().returning(Future(expectedVersion))
Get(s"/$versionString") ~> webService.versionRoute ~> check {
responseAs[VersionInfo] should be(expectedVersion)
}
}
it should "healthRoute" in {
val expectedStatus = StatusInfo(ClioStatus.Started, SearchStatus.OK)
(statusService.getStatus _).expects().returning(Future(expectedStatus))
Get(s"/$healthString") ~> webService.healthRoute ~> check {
responseAs[StatusInfo] should be(expectedStatus)
}
}
}
| broadinstitute/clio | clio-server/src/test/scala/org/broadinstitute/clio/server/webservice/StatusWebServiceSpec.scala | Scala | bsd-3-clause | 1,190 |
import org.specs2.mutable.Specification
import reactivemongo.api.MongoDriver
import scala.concurrent.duration.FiniteDuration
/** A Test Suite For MongoDriver */
class MongoDriverSpec extends Specification {
sequential
val hosts = Seq("localhost")
"MongoDriver" should {
"start and close cleanly with no connections" in {
val md = MongoDriver()
md.numConnections must_== 0 and (
md.close(FiniteDuration(200,"milliseconds")) must not(
throwA[Throwable]))
}
"start and close with one connection open" in {
val md = MongoDriver()
val connection = md.connection(hosts)
md.close(FiniteDuration(2,"seconds"))
success
}
"start and close with multiple connections open" in {
val md = MongoDriver()
val connection1 = md.connection(hosts,name=Some("Connection1"))
val connection2 = md.connection(hosts)
val connection3 = md.connection(hosts)
md.close(FiniteDuration(2,"seconds"))
success
}
}
}
| reactific/ReactiveMongo | driver/src/test/scala/MongoDriverSpec.scala | Scala | apache-2.0 | 1,015 |
package scala.meta
package contrib
trait TreeExtractors {
object Select {
def unapply(tree: Tree): Option[(Term, Name)] = tree match {
case Term.Select(qual, name) => Some(qual -> name)
case Type.Select(qual, name) => Some(qual -> name)
case _ => None
}
}
}
| MasseGuillaume/scalameta | scalameta/contrib/shared/src/main/scala/scala/meta/contrib/TreeExtractors.scala | Scala | bsd-3-clause | 289 |
package xiatian.knowledge.highlight
import better.files.File
/**
* Highlight DataSet
*
* @author Tian Xia Email: [email protected]
* School of IRM, Renmin University of China.
* Jul 06, 2017 18:31
*/
object HighlightDataSet {
lazy val highlightFiles = File("./data/highlight").list.toList.sortBy(_.name)
lazy val articles = highlightFiles map {
f =>
println(s"parsing $f ...")
Highlight.parseFile(f)
}
/**
* 获取所有的章节类别的文档频度和总文字数量
*
* @return
*/
lazy val sectionStats: Map[String, (Int, Int)] =
articles.flatMap {
article =>
article.sections.map(section =>
(section.category, article.filename, section.characters))
}
.groupBy(_._1) // category->
.map {
case (category: String, items: List[(String, String, Int)]) =>
(category, (items.length, items.map(_._3).sum))
}
/**
* 按照文档频度排序,打印章节的类别名称、文档频度、文字总数量
*/
def printSectionStatsMessage() = {
val totalCharacters = sectionStats.map(_._2._2).sum.toDouble
println("章节类别\\t文档频度\\t文字总数")
sectionStats.toList.sortBy(_._2._1).map {
pair =>
val chRatio = (pair._2._2 / totalCharacters * 100).formatted("%.2f")
s"${pair._1}\\t${pair._2._1} \\t ${pair._2._2}(${chRatio}%)"
} .foreach(println)
}
/**
* 指定类别的章节总共包含了多少个不同的亮点
*/
val highlightCoverage: Seq[String] => Seq[(String, Int, Int)]
= (categories: Seq[String]) => articles.map {
article =>
val highlights = article.highlights
val matchedSectionCount = highlights.map {
highlight =>
val matchedSectionLights = highlight.labeledLights
.filter(
labeledLight => categories.contains(labeledLight.section.category)
)
if (matchedSectionLights.isEmpty) 0 else 1
}.sum
(article.filename, matchedSectionCount, highlights.length)
}
def printCoverage(category: String*) = {
val coverage: Seq[(String, Int, Int)] = highlightCoverage(category)
val average = coverage.map {
case (filename: String, matched: Int, total: Int) =>
val ratio = if (total == 0) 0 else matched.toDouble / total
println(s"$filename \\t $matched / $total \\t $ratio")
ratio
}.sum / coverage.length
println(s"average: $average")
}
}
| iamxiatian/knowledge | src/main/scala/xiatian/knowledge/highlight/HighlightDataSet.scala | Scala | gpl-3.0 | 2,490 |
package fpinscala.ch06_state
import annotation.tailrec
trait RNG {
def nextInt: (Int, RNG)
}
object RNG {
def simple(seed: Long): RNG = new RNG {
def nextInt = {
// `&` is bitwise AND
// `<<` is left binary shift
val seed2 = (seed*0x5DEECE66DL + 0xBL) & ((1L << 48) - 1)
// `>>>` is right binary shift with zero fill
((seed2 >>> 16).asInstanceOf[Int], simple(seed2))
}
}
def boolean(rng: RNG) =
rng.nextInt match { case (i, rng2) => (i % 2 == 0, rng2) }
// Exercise 6.1
def positiveInt(rng: RNG): (Int, RNG) = {
val (i, r) = rng.nextInt
// `Int.MinValue` is a corner case that needs special handling since its
// absolute value doesn't fit in an `Int`. We could just select
// `Int.MaxValue` or `0` as a replacement but that would skew the
// generator. One solution is to simply retry recursively until we get a
// different number.
if (i == Int.MinValue) positiveInt(r) else (i.abs, r)
}
// Exercise 6.2
def double(rng: RNG): (Double, RNG) = {
val (i, r) = positiveInt(rng)
// We generate a positive integer and divide it by one higher than the
// maximum. This is just one possible solution.
(i / (Int.MinValue.toDouble + 1), r)
}
// Exercise 6.3.a
def intDouble(rng: RNG): ((Int,Double), RNG) = {
val (i, r1) = positiveInt(rng)
val (d, r2) = double(r1)
((i, d), r2)
}
// Exercise 6.3.b
def doubleInt(rng: RNG): ((Double,Int), RNG) = {
val ((i, d), r) = intDouble(rng)
((d, i), r)
}
// Exercise 6.3.c
def double3(rng: RNG): ((Double,Double,Double), RNG) = {
val (d1, r1) = double(rng)
val (d2, r2) = double(r1)
val (d3, r3) = double(r2)
((d1, d2, d3), r3)
}
// Exercise 6.4
object Ints {
/* We can address the repetition of passing the RNG along every time with
* recursion.
*/
object NonTailRecursion {
def ints(count: Int)(rng: RNG): (List[Int], RNG) =
if (count <= 0)
(List(), rng)
else {
val (i, r1) = rng.nextInt
val (is, r2) = ints(count - 1)(r1)
(i :: is, r2)
}
}
object TailRecursion {
def ints(count: Int)(rng: RNG): (List[Int], RNG) = {
@tailrec
def ints(c: Int, acc: List[Int], r0: RNG): (List[Int], RNG) =
if (c <= 0)
(acc, r0)
else {
val (i, r1) = r0.nextInt
ints(c - 1, i :: acc, r1)
}
ints(count, List(), rng)
}
}
}
type Rand[+A] = RNG => (A, RNG)
val int: Rand[Int] = { _.nextInt }
def unit[A](a: A): Rand[A] =
rng => (a, rng)
def map[A, B](s: Rand[A])(f: A => B): Rand[B] =
rng => {
val (a, rng2) = s(rng)
(f(a), rng2)
}
object UsingRandAndMap {
// Exercise 6.5
def positiveMax(n: Int): Rand[Int] =
map(positiveInt) { _ % (n + 1) }
// Exercise 6.6
def double: Rand[Double] =
map(positiveInt) { _ / (Int.MinValue.toDouble + 1) }
}
// Exercise 6.7
def map2[A, B, C](ra: Rand[A], rb: Rand[B])(f: (A, B) => C): Rand[C] =
rng => {
val (a, rngA) = ra(rng)
val (b, rngB) = rb(rngA)
(f(a, b), rngB)
}
/* This implementation of 'map2' passes the initial RNG to the first argument
* and the resulting RNG to the second argument. It's not necessarily wrong
* to do this the other way around, since the results are random anyway. We
* could even pass the initial RNG to both `f` and `g`, but that might have
* unexpected results. For example, if both arguments are `RNG.int` then we
* would always get two of the same Int in the result. When implementing
* functions like this, it's important to consider how we would test them for
* correctness.
*/
// Exercise 6.8
object Sequence {
object LowLevel {
/* This works, but doesn't take advantage of the 'map2' combinator. */
def sequence[A](fs: List[Rand[A]]): Rand[List[A]] =
rng => {
fs.foldRight((List[A](), rng)) { case (ra, (acc, r)) =>
val (a, r1) = ra(r)
(a :: acc, r1)
}
}
}
object UsingMap2 {
def sequence[A](fs: List[Rand[A]]): Rand[List[A]] =
fs.foldRight(unit(List[A]())) { map2(_, _) { _ :: _ } }
/* Using 'map2' has a clear payoff. Also, it's interesting that we never
* actually need to talk about the `RNG` value in `sequence`. This is a
* strong hint that we could make this function polymorphic in that type.
*/
}
/* We are using `foldRight` for these implementations of 'sequence', which as
* we've discussed risks overflowing the stack for sufficiently large lists.
* If we used `foldLeft` to avoid this problem, then the values in the
* resulting list would appear in reverse order. It would be arguably better
* to use `foldLeft` with `reverse`. What do you think?
*/
}
// Exercise 6.9.a
def flatMap[A, B](f: Rand[A])(g: A => Rand[B]): Rand[B] =
rng => {
val (a, r1) = f(rng)
g(a)(r1)
}
object UsingFlatMap {
// Exercise 6.9.b
def positiveInt: Rand[Int] =
flatMap(int) { i =>
if (i == Int.MinValue) positiveInt else unit(i.abs)
}
// Exercise 6.10.a
def map[A, B](a: Rand[A])(f: A => B): Rand[B] = {
flatMap(a)(f andThen unit)
}
/* You may have used "{ a => unit(f(a)) }" instead of "f andThen unit".
* 'andThen' and 'compose' are provided on Scala's Function1 to help
* compose functions without worrying about an explicit input parameter.
* This way of mixing functions is called "point free style". This style
* is similar to using pipes in POSIX shell scripting.
*
* Unfortunately, Scala's inference can often complicate attempts to use
* point free style. Sometimes it works out more elegantly and other times
* less so.
*/
// Exercise 6.10.b
def map2[A, B, C](a: Rand[A])(b: Rand[B])(f: (A, B) => C): Rand[C] =
flatMap(a) { a => map(b) { b => f(a, b) } }
}
}
case class State[S,+A](run: S => (A, S)) {
// Exercise 6.11.b
def flatMap[B](f: A => State[S, B]): State[S, B] =
State { s =>
val (a, s1) = run(s)
f(a).run(s1)
}
// Exercise 6.11.c
def map[B](f: A => B): State[S, B] =
flatMap(f andThen State.unit)
/* Defining 'map' in terms of 'flatMap' calls 'unit' unnecessarily, but it's
* elegant and limits the state-specific definition to just 'flatMap' and
* 'unit'. Note that we've seen other abstrations like List and Option that
* have functions like 'map', 'flatmap', 'unit', 'map2', and 'sequence'.
* Furthermore, the implementations built from 'flatMap' and 'unit' can be
* reused in all of these abstractions. We've seen how to abstract away
* State from Rand. Later, we'll see how to abstract State further, so that
* we can share implementations between abstractions like State, List,
* Option, and many others.
*/
object WithoutUsingFlatMap {
def map[B](f: A => B): State[S, B] =
State { s =>
val (a, s1) = run(s)
(f(a), s1)
}
}
// Exercise 6.11.d
def map2[B,C](sb: State[S, B])(f: (A, B) => C): State[S, C] =
flatMap { a => sb.map { b => f(a, b) } }
}
object State {
// Exercise 6.11.a
def unit[S, A](a: A): State[S, A] =
State { (a, _) }
// Exercise 6.11.e
object Sequence {
object FoldRight {
/* This solution is analogous to our solution for Exercise 6.8. It uses
* 'foldRight', though, so it has the potential to overflow the stack for
* sufficiently large lists.
*/
def sequence[S, A](l: List[State[S, A]]): State[S, List[A]] =
l.foldRight(unit[S, List[A]](List())) { (a, acc) =>
a.map2(acc) { _ :: _ }
}
}
object FoldLeftReverseInput {
/* Every application of 'foldRight' can be easiliy transformed to an
* application of 'reverse' followed by 'foldLeft'. Using equational
* reasoning (which we get from the referential transparency of pure
* functions) we can prove to two are the same semantically. However,
* with 'foldLeft', though we don't have the potential to overflow the
* stack, we've taken on the performance penalty of reversing our input
* list.
*/
def sequence[S, A](l: List[State[S, A]]): State[S, List[A]] =
l.reverse.foldLeft(unit[S, List[A]](List())) { (acc, a) =>
a.map2(acc) { _ :: _ }
}
}
/* Though not the case for 'sequence', for folds that return lists, there
* are times when the output fold is smaller than the input fold. In
* these cases, to improve performance, it's preferable to reverse the
* output list rather than the input -- to call 'reverse' after 'foldLeft',
* rather than before it.
*
* In the following implementations, we practice this technique, even
* though it's not necessary for 'sequence'.
*/
object FoldLeftReverseOutput1 {
/* It's possible to perform the list reversal after the 'foldLeft', but
* we can no longer use 'map2' for the fold. The problem is that 'map2'
* threads through the state to its first argument before it's second.
* But our list is not yet in the correct order, so we need to thread our
* state through opposite to how 'map2' threads it. This is what we're
* doing below.
*/
def sequence[S, A](l: List[State[S, A]]): State[S, List[A]] =
l.foldLeft(unit[S, List[A]](List())) { (stateAcc, stateA) =>
State { s1 =>
val (acc, s2) = stateAcc.run(s1)
val (a, s3) = stateA.run(s2)
(a :: acc, s3)
}
} map { _.reverse }
}
object FoldLeftReverseOutput2 {
/* By inverting our call of 'foldLeft' with our 'State' constructor, we
* get an implementation that's perhaps easier to follow in that we can
* see the state threading through from left to right.
*
* Remember though, for 'sequence' we'd probably not go through this
* hassle since the output and input lists are the same size so there's
* no difference in performance. Reversing the input list and using
* 'map2' is much cleaner than these attempts to reverse the output list.
*/
def sequence[S, A](l: List[State[S, A]]): State[S, List[A]] =
State { (s: S) =>
l.foldLeft((List[A](), s)) { case ((acc, s1), action) =>
val (a, s2) = action.run(s1)
(a :: acc, s2)
}
} map { _.reverse }
}
object TailRecursion {
/* The following implementation uses a loop internally and is practically
* an inlining of a recursive definition of 'foldLeft' into the previous
* implementation. Alternatively, we could have used a
* collection.mutable.ListBuffer internally.
*
* But why go through that effort? Since the JVM's JIT optimizer is
* rather good at smart inlining, preemptive inlining seems only to
* complicate our implementation. It seems better to use 'foldLeft'
* directly.
*/
def sequence[S, A](l: List[State[S, A]]): State[S, List[A]] = {
@tailrec
def go(s: S, actions: List[State[S,A]], acc: List[A]): (List[A], S) =
actions match {
case Nil =>
(acc, s)
case h :: t =>
h.run(s) match { case (a, s2) => go(s2, t, a :: acc) }
}
State { (s: S) => go(s, l, List()) } map { _.reverse }
}
}
}
def sequence[S, A](l: List[State[S, A]]): State[S, List[A]] =
Sequence.FoldLeftReverseInput.sequence(l)
// Exercise 6.12
def get[S]: State[S, S] =
State { s => (s, s) }
def set[S](s: S): State[S, Unit] =
State { _ => ((), s) }
def modify[S](f: S => S): State[S, Unit] =
for {
s <- get
_ <- set(f(s))
} yield ()
}
sealed trait Input
case object Coin extends Input
case object Turn extends Input
case class Machine(locked: Boolean, candies: Int, coins: Int)
object Machine {
import State._
// Exercise 6.13
def simulateMachine(inputs: List[Input]): State[Machine, Int] = {
def step(i: Input, m: Machine): Machine =
if (m.candies <= 0)
m
else
i match {
case Coin =>
m.copy(coins = m.coins + 1, locked = m.candies > 0)
case Turn =>
if (!m.locked)
m.copy(locked = true, candies = m.candies - 1)
else m
}
for {
_ <- sequence(inputs map { i => modify { (m: Machine) => step(i, m) } })
m <- get
} yield m.coins
}
}
| shajra/fpinscala-exercises-shajra | answers/src/main/scala/fpinscala/ch06_state/State.scala | Scala | mit | 12,796 |
/**
* This file is part of agora-mixnet.
* Copyright (C) 2015-2016 Agora Voting SL <[email protected]>
* agora-mixnet is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License.
* agora-mixnet is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
* You should have received a copy of the GNU Affero General Public License
* along with agora-mixnet. If not, see <http://www.gnu.org/licenses/>.
**/
import shapeless._
import nat._
import com.typesafe.config.ConfigFactory
import scala.concurrent._
import scala.concurrent.duration.Duration
import akka.actor.ActorSystem
import akka.stream.{ActorMaterializer, Materializer}
import ch.bfh.unicrypt.math.algebra.multiplicative.classes.GStarModSafePrime
import ch.bfh.unicrypt.crypto.schemes.encryption.classes.ElGamalEncryptionScheme
import mpservice._
import app._
import accumulator.BoardReader
import director._
import scala.util.{Success, Failure}
import election._
import models._
import utils.Util
object FiwareDemo extends App {
val config = ConfigFactory.load()
val useGmp = config.getBoolean("mpservice.use-gmp")
val useExtractor = config.getBoolean("mpservice.use-extractor")
MPBridgeS.init(useGmp, useExtractor)
// actually used in Util.getE
val bypass = config.getBoolean("bypass-membership-check")
println(s"* bypass-membership-check: $bypass")
// actually used in AbstractCyclicGroup constructor
val generatorsParallel = config.getBoolean("use-generators-parallel")
println(s"* use-generators-parallel: $generatorsParallel")
// actually used in Util.getIndependentGenerators constructor
val generatorsParallelLevel = config.getInt("generators-parallelism-level")
println(s"* generators-parallelism-level: $generatorsParallelLevel")
implicit val system = ActorSystem()
implicit val executor = system.dispatchers.lookup("my-other-dispatcher")
implicit val materializer = ActorMaterializer()
Election.init()
args.toList.lift(0) match {
case None =>
throw new Error("Error, missing input arguments: the first argument should be director or authority")
case Some(arg1) =>
arg1 match {
case "director" =>
println("director")
val totalVotes = args.toList.lift(1).getOrElse("5").toInt
val totalAuthorities = args.toList.lift(2).getOrElse("0").toInt
val director = totalAuthorities match {
case 2 =>
new ElectionDirector[_2](totalVotes)
case 3 =>
new ElectionDirector[_3](totalVotes)
case 4 =>
new ElectionDirector[_4](totalVotes)
case 5 =>
new ElectionDirector[_5](totalVotes)
case 6 =>
new ElectionDirector[_6](totalVotes)
case 7 =>
new ElectionDirector[_7](totalVotes)
case 8 =>
new ElectionDirector[_8](totalVotes)
case 9 =>
new ElectionDirector[_9](totalVotes)
case _ =>
throw new Error("Error, the number of authorities must be 2 to 9")
}
waitAll()
case "authority" =>
println("authority")
val totalAuthorities = args.toList.lift(1).getOrElse("0").toInt
val authorityIndex = args.toList.lift(2).getOrElse("0").toInt
if( totalAuthorities != 2) {
throw new Error("Error, the number of authorities can be any number, between 2 and 2")
}
if(authorityIndex < 0 && authorityIndex >= totalAuthorities) {
throw new Error("Error, wrong authority index")
}
val authority = authorityIndex match {
case 0 =>
new ElectionAuthority[_2, _0]()
case 1 =>
new ElectionAuthority[_2, _1]()
case _ =>
throw new Error("this probably will never happen, but the authority index is not 0 or 1")
}
waitAll()
}
}
def waitAll() {
println("Waiting")
val promise = Promise[Any]()
Await.ready(promise.future, Duration.Inf)
}
}
/**
* An election process DEMO
*
* Simulates the steps in the election from public key generation all the way to decryption
*
* Things that are included in this demo are:
*
* - A typed purely functional data structure modeling the election process and bulletin board (see below)
*
* - Cryptography for
*
* a) encrypting votes
* b) creating keyshares, proofs and verification
* c) shuffling votes, proofs and verification
* d) joint (partial) decryption, proofs and verification
*
* - Not included
*
* Remoting (everything simulated with method calls)
* Signatures and authentication
* Error handling
* Proofs of knowledge of plaintext and verification in vote casting
*
*
* An election is modeled as a typed, purely functional sequential state machine. We use shapeless
* encoding of natural numbers to provide length-typed lists (aka dependent types), that way we get:
*
* 1) The election process logic is captured by types, so illegal transitions
* are caught by the compiler and inconsistent states are not possible, for example
*
* It is a compile-time error to try to construct the public key without all the shares
* It is a compile-time error to add more shares,shuffles or decryptions than expected
* It is a compile-error to start an election with no public key
* It is a compile-time error to decrypt without shuffling
* etc.
*
* 2) Because the election is purely functional, the entire history of the election
* can be reconstructed or replayed. A purely functional data structure is in this
* sense a general case of an immutable log
*
*
* This demo uses two trustees, ElectionTest3 below shows how number of trustees generalizes
*/
object ElectionTest extends App {
val config = ConfigFactory.load()
val useGmp = config.getBoolean("mpservice.use-gmp")
val useExtractor = config.getBoolean("mpservice.use-extractor")
MPBridgeS.init(useGmp, useExtractor)
// actually used in Util.getE
val bypass = config.getBoolean("bypass-membership-check")
println(s"* bypass-membership-check: $bypass")
// actually used in AbstractCyclicGroup constructor
val generatorsParallel = config.getBoolean("use-generators-parallel")
println(s"* use-generators-parallel: $generatorsParallel")
// actually used in Util.getIndependentGenerators constructor
val generatorsParallelLevel = config.getInt("generators-parallelism-level")
println(s"* generators-parallelism-level: $generatorsParallelLevel")
val totalVotes = args.toList.lift(0).getOrElse("100").toInt
implicit val system = ActorSystem()
implicit val executor = system.dispatchers.lookup("my-other-dispatcher")
implicit val materializer = ActorMaterializer()
Election.init()
// create the keymakers
// these are responsible for distributed key generation and joint decryption
val k1 = new KeyMakerTrustee("keymaker one")
val k2 = new KeyMakerTrustee("keymaker two")
// create the mixers
// these are responsible for shuffling the votes
val m1 = new MixerTrustee("mixer one")
val m2 = new MixerTrustee("mixer two")
// create the election,
// we are using privacy level 2, two trustees of each kind
// we are 2048 bits for the size of the group modulus
val start = Election.create[_2]("my election", 2048, None)
// get subscriber when we have the uid
// then subscribe to the election creation
var uidPromise = Promise[String]()
BoardReader.addElectionCreationListener { uid =>
if(!uidPromise.isCompleted) {
uidPromise.success(uid)
}
}
val createdPromise = Promise[Election[_, Created]]()
val startSharesPromise = Promise[Election[_, Shares[_0]]]()
uidPromise.future map { uid =>
val subscriber = BoardReader.getSubscriber(uid)
subscriber.create() onComplete {
case Success(dd) =>
println("GGG subscriber gives created!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
createdPromise.success(dd)
case Failure(e) =>
println(s"GGG $e")
createdPromise.failure(e)
}
subscriber.startShares() onComplete {
case Success(dd) =>
println("GGG subscriber gives startShares!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
startSharesPromise.success(dd)
case Failure(e) =>
println(s"GGG $e")
startSharesPromise.failure(e)
}
}
// the election is now ready to receive key shares
val readyForShares = createdPromise.future flatMap { start =>
Election.startShares(start.asInstanceOf[Election[_2, Created]])
}
// each keymaker creates the shares and their proofs, these are added to the election
val twoShares = startSharesPromise.future flatMap { readyForShares2 =>
val readyForShares = readyForShares2.asInstanceOf[Election[_2, Shares[_0]]]
k1.createKeyShare(readyForShares) flatMap { keyShare =>
Election.addShare(readyForShares, keyShare, k1.id)
} flatMap { oneShare =>
k2.createKeyShare(readyForShares) flatMap { keyShare =>
Election.addShare(oneShare, keyShare, k2.id)
}
}
}
// combine the shares from the keymaker trustees, this produces the election public key
val combined = twoShares flatMap { twoShares =>
Election.combineShares(twoShares)
}
// generate dummy votes
val plaintexts = Seq.fill(totalVotes)(scala.util.Random.nextInt(1000))
val electionGettingVotes = combined flatMap { combined =>
val startVotes = Election.startVotes(combined)
// since we are storing information in election as if it were a bulletin board, all
// the data is stored in a wire-compatible format, that is strings/jsons whatever
// we reconstruct the public key as if it had been read from such a format
val publicKey = Util.getPublicKeyFromString(combined.state.publicKey, combined.state.cSettings.generator)
// encrypt the votes with the public key of the election
val votes = Util.encryptVotes(plaintexts, combined.state.cSettings, publicKey)
startVotes flatMap { startVotes =>
// doing this in one step to avoid memory explosion
Election.addVotes(startVotes, votes.map(_.convertToString).toList)
}
}
// we are only timing the mixing phase
var mixingStart = System.currentTimeMillis()
// stop the voting period
val stopVotes = electionGettingVotes flatMap { electionGettingVotes =>
// FIXME remove this
MPBridge.total = 0;
mixingStart = System.currentTimeMillis()
Election.stopVotes(electionGettingVotes)
}
val mixing = stopVotes flatMap { stopVotes =>
// we can start preshuffling for both mixers
// that way the second preshuffle will be concurrent with the first mix
val (predata1, proof1) = m1.preShuffleVotes(stopVotes)
val (predata2, proof2) = m2.preShuffleVotes(stopVotes)
// prepare for mixing
val startMix = Election.startMixing(stopVotes)
startMix flatMap { startMix =>
val shuffle1 = m1.shuffleVotes(startMix, predata1, proof1)
// we compose futures, first mix then second mix
shuffle1 flatMap { shuffle =>
// the proof is verified and the shuffle is then added to the election, advancing its state
Election.addMix(startMix, shuffle, m1.id)
} flatMap { mixOne =>
// each mixing trustee extracts the needed information from the election
// and performs the shuffle and proofs
m2.shuffleVotes(mixOne, predata2, proof2) map { shuffle =>
// the proof is verified and the shuffle is then added to the election, advancing its state
Election.addMix(mixOne, shuffle, m2.id)
}
}
}
}
// once all the mixes are finished we proceed to decryption
val all = mixing.flatMap {
mixTwo => mixTwo
} flatMap { mixTwo =>
// we are done mixing
val stopMix = Election.stopMixing(mixTwo)
var mixingEnd = System.currentTimeMillis()
val startDecryptions = stopMix flatMap { stopMix =>
mixingEnd = System.currentTimeMillis()
// start the partial decryptions
// if we tried to do this before the mixing was completed, the compiler would protest
Election.startDecryptions(stopMix)
}
startDecryptions flatMap { startDecryptions =>
// each keymaker trustee extracts the votes from the last shuffle from the election and
// uses their private keys to do the partial decryption and create proofs
val pd1Future = Future { k1.partialDecryption(startDecryptions) }
val pd2Future = Future { k2.partialDecryption(startDecryptions) }
// the two decryption futures execute in parallel
val decryptions = for {
pd1 <- pd1Future
pd2 <- pd2Future
} yield(pd1, pd2)
decryptions.map { case (pd1, pd2) =>
val partialOne = Election.addDecryption(startDecryptions, pd1, k1.id)
val partialTwo = partialOne flatMap { partialOne =>
Election.addDecryption(partialOne, pd2, k2.id)
}
// the partial decryptions are combined, yielding the plaintexts
val electionDone = partialTwo flatMap { partialTwo =>
Election.combineDecryptions(partialTwo)
}
electionDone map { electionDone =>
// lets check that everything went well
// println(s"Plaintexts $plaintexts")
// println(s"Decrypted ${electionDone.state.decrypted}")
// println("ok: " + (plaintexts.sorted == electionDone.state.decrypted.map(_.toInt).sorted))
val mixTime = (mixingEnd - mixingStart) / 1000.0
val totalTime = (System.currentTimeMillis() - mixingStart) / 1000.0
println("*************************************************************")
println(s"finished run with votes = $totalVotes")
println(s"mixTime: $mixTime")
println(s"totalTime: $totalTime")
println(s"sec / vote (mix): ${mixTime / totalVotes}")
println(s"sec / vote: ${totalTime / totalVotes}")
println(s"total modExps: ${MPBridge.total}")
println(s"found modExps: ${MPBridge.found}")
println(s"found modExps %: ${MPBridge.found/MPBridge.total.toDouble}")
println(s"extracted modExps: ${MPBridge.getExtracted}")
println(s"extracted modExps %: ${MPBridge.getExtracted/MPBridge.total.toDouble}")
println(s"modExps / vote: ${MPBridge.total.toFloat / totalVotes}")
println("*************************************************************")
MPBridgeS.shutdown
//Thread.sleep(5000)
//BoardPoster.closeSystem()
}
}
}
}
all.onFailure { case e =>
e.printStackTrace
MPBridgeS.shutdown
}
}
/**
* Same as above but with three trustees
*
* Note that everything is done the same way except the type parameter _3 and
* the number of trustee operations
*
*/
object ElectionTest3 extends App {
val totalVotes = args.toList.headOption.getOrElse("100").toInt
implicit val system = ActorSystem()
implicit val executor = system.dispatchers.lookup("my-other-dispatcher")
implicit val materializer = ActorMaterializer()
val k1 = new KeyMakerTrustee("keymaker one")
val k2 = new KeyMakerTrustee("keymaker two")
val k3 = new KeyMakerTrustee("keymaker three")
val m1 = new MixerTrustee("mixer one")
val m2 = new MixerTrustee("mixer two")
val m3 = new MixerTrustee("mixer three")
// privacy level 3, three trustees of each kind, 512 bits for the size of the group modulus
val start = Election.create[_3]("my election", 512, None)
val readyForShares = start flatMap {
start => Election.startShares(start)
}
val threeShares = readyForShares flatMap { readyForShares =>
k1.createKeyShare(readyForShares) flatMap { keyShare =>
Election.addShare(readyForShares, keyShare, k1.id)
} flatMap { oneShare =>
k2.createKeyShare(readyForShares) flatMap { keyShare =>
Election.addShare(oneShare, keyShare, k2.id)
}
} flatMap { twoShares =>
k3.createKeyShare(readyForShares) flatMap { keyShare =>
Election.addShare(twoShares, keyShare, k3.id)
}
}
}
val combined = threeShares flatMap {
threeShares => Election.combineShares(threeShares)
}
val plaintexts = Seq.fill(totalVotes)(scala.util.Random.nextInt(10))
val electionGettingVotes = combined flatMap { combined =>
val startVotes = Election.startVotes(combined)
val publicKey = Util.getPublicKeyFromString(combined.state.publicKey, combined.state.cSettings.generator)
val votes = Util.encryptVotes(plaintexts, combined.state.cSettings, publicKey)
var electionGettingVotes = startVotes
for (i <- 0 until votes.length) {
electionGettingVotes = electionGettingVotes flatMap { electionGettingVotes =>
Election.addVote(electionGettingVotes, votes(i).convertToString)
}
}
electionGettingVotes
}
val stopVotes = electionGettingVotes flatMap { electionGettingVotes =>
Election.stopVotes(electionGettingVotes)
}
val startMix = stopVotes flatMap { stopVotes =>
Election.startMixing(stopVotes)
}
val mixOne = startMix flatMap { startMix =>
val shuffle1 = m1.shuffleVotes(startMix)
Election.addMix(startMix, shuffle1, m1.id)
}
val mixTwo = mixOne flatMap { mixOne =>
val shuffle2 = m2.shuffleVotes(mixOne)
Election.addMix(mixOne, shuffle2, m2.id)
}
val mixThree = mixTwo flatMap { mixTwo =>
val shuffle3 = m3.shuffleVotes(mixTwo)
Election.addMix(mixTwo, shuffle3, m3.id)
}
val stopMix = mixThree flatMap { mixThree =>
Election.stopMixing(mixThree)
}
val startDecryptions = stopMix flatMap { stopMix =>
Election.startDecryptions(stopMix)
}
val partialThree = startDecryptions flatMap { startDecryptions =>
val pd1 = k1.partialDecryption(startDecryptions)
val pd2 = k2.partialDecryption(startDecryptions)
val pd3 = k3.partialDecryption(startDecryptions)
val partialOne = Election.addDecryption(startDecryptions, pd1, k1.id)
partialOne flatMap { partialOne =>
val partialTwo = Election.addDecryption(partialOne, pd2, k2.id)
partialTwo flatMap { partialTwo =>
Election.addDecryption(partialTwo, pd3, k3.id)
}
}
}
val electionDone = partialThree flatMap { partialThree =>
Election.combineDecryptions(partialThree)
}
electionDone onSuccess { case electionDone =>
println(s"Plaintexts $plaintexts")
println(s"Decrypted ${electionDone.state.decrypted}")
println("ok: " + (plaintexts.sorted == electionDone.state.decrypted.map(_.toInt).sorted))
}
}
object ElectionTestSerial extends App {
val totalVotes = args.toList.lift(0).getOrElse("100").toInt
val config = ConfigFactory.load()
val useGmp = config.getBoolean("mpservice.use-gmp")
val useExtractor = config.getBoolean("mpservice.use-extractor")
implicit val system = ActorSystem()
implicit val executor = system.dispatchers.lookup("my-other-dispatcher")//system.dispatcher
implicit val materializer = ActorMaterializer()
MPBridgeS.init(useGmp, useExtractor)
// create the keymakers
// these are responsible for distributed key generation and joint decryption
val k1 = new KeyMakerTrustee("keymaker one")
val k2 = new KeyMakerTrustee("keymaker two")
// create the mixers
// these are responsible for shuffling the votes
val m1 = new MixerTrustee("mixer one")
val m2 = new MixerTrustee("mixer two")
// create the election
// we are using privacy level 2, two trustees of each kind
// we are 2048 bits for the size of the group modulus
val start = Election.create[_2]("my election", 2048, None)
// the election is now ready to receive key shares
val readyForShares = start flatMap { start =>
Election.startShares(start)
}
// each keymaker creates the shares and their proofs, these are added to the election
val twoShares = readyForShares flatMap { readyForShares =>
k1.createKeyShare(readyForShares) flatMap { keyShare =>
Election.addShare(readyForShares, keyShare, k1.id)
} flatMap { oneShare =>
k2.createKeyShare(readyForShares) flatMap { keyShare =>
Election.addShare(oneShare, keyShare, k2.id)
}
}
}
// combine the shares from the keymaker trustees, this produces the election public key
val combined = twoShares flatMap { twoShares =>
Election.combineShares(twoShares)
}
// generate dummy votes
val plaintexts = Seq.fill(totalVotes)(scala.util.Random.nextInt(10))
val electionGettingVotes = combined flatMap { combined =>
// since we are storing information in election as if it were a bulletin board, all
// the data is stored in a wire-compatible format, that is strings/jsons whatever
// we reconstruct the public key as if it had been read from such a format
val publicKey = Util.getPublicKeyFromString(combined.state.publicKey, combined.state.cSettings.generator)
// encrypt the votes with the public key of the election
val votes = Util.encryptVotes(plaintexts, combined.state.cSettings, publicKey)
// open the election period
val startVotes = Election.startVotes(combined)
// add the votes to the election
var electionGettingVotes = startVotes
for (i <- 0 until votes.length) {
electionGettingVotes = electionGettingVotes flatMap { electionGettingVotes =>
Election.addVote(electionGettingVotes, votes(i).convertToString)
}
}
electionGettingVotes
}
// we are only timing the mixing phase
var mixingStart = System.currentTimeMillis()
// FIXME remove
MPBridge.total = 0;
// stop the voting period
val stopVotes = electionGettingVotes flatMap { electionGettingVotes =>
mixingStart = System.currentTimeMillis()
Election.stopVotes(electionGettingVotes)
}
// prepare for mixing
val startMix = stopVotes flatMap { stopVotes =>
Election.startMixing(stopVotes)
}
// the proof is verified and the shuffle is then added to the election, advancing its state
val mixOne = startMix flatMap { startMix =>
// each mixing trustee extracts the needed information from the election
// and performs the shuffle and proofs
val shuffle1 = m1.shuffleVotes(startMix)
Election.addMix(startMix, shuffle1, m1.id)
}
// again for the second trustee..
val mixTwo = mixOne flatMap { mixOne =>
val shuffle2 = m2.shuffleVotes(mixOne)
Election.addMix(mixOne, shuffle2, m2.id)
}
// we are done mixing
val stopMix = mixTwo flatMap { mixTwo =>
Election.stopMixing(mixTwo)
}
var mixingEnd = System.currentTimeMillis()
// start the partial decryptions
val startDecryptions = stopMix flatMap { stopMix =>
mixingEnd = System.currentTimeMillis()
Election.startDecryptions(stopMix)
}
// the proofs are verified and the partial decryptions are added to the election,
val partialTwo = startDecryptions flatMap { startDecryptions =>
// each keymaker trustee extracts the votes from the last shuffle from the election and
// uses their private keys to do the partial decryption and create proofs
val pd1 = k1.partialDecryption(startDecryptions)
val pd2 = k2.partialDecryption(startDecryptions)
val partialOne = Election.addDecryption(startDecryptions, pd1, k1.id)
partialOne flatMap { partialOne =>
Election.addDecryption(partialOne, pd2, k2.id)
}
}
// the partial decryptions are combined, yielding the plaintexts
val electionDone = partialTwo flatMap { partialTwo =>
Election.combineDecryptions(partialTwo)
}
electionDone flatMap { electionDone =>
// lets check that everything went well
println(s"Plaintexts $plaintexts")
println(s"Decrypted ${electionDone.state.decrypted}")
println("ok: " + (plaintexts.sorted == electionDone.state.decrypted.map(_.toInt).sorted))
val mixTime = (mixingEnd - mixingStart) / 1000.0
val totalTime = (System.currentTimeMillis() - mixingStart) / 1000.0
println("*************************************************************")
println(s"finished run with votes = $totalVotes")
println(s"mixTime: $mixTime")
println(s"totalTime: $totalTime")
println(s"sec / vote (mix): ${mixTime / totalVotes}")
println(s"sec / vote: ${totalTime / totalVotes}")
println(s"total modExps: ${MPBridge.total}")
println(s"found modExps: ${MPBridge.found}")
println(s"found modExps %: ${MPBridge.found/MPBridge.total.toDouble}")
println(s"extracted modExps: ${MPBridge.getExtracted}")
println(s"extracted modExps %: ${MPBridge.getExtracted/MPBridge.total.toDouble}")
println(s"modExps / vote: ${MPBridge.total.toFloat / totalVotes}")
println("*************************************************************")
mpservice.MPService.shutdown
}
}
/**************************** other tests ****************************/
object DecryptionTest extends App {
implicit val system = ActorSystem()
implicit val executor = system.dispatchers.lookup("my-other-dispatcher")
implicit val materializer = ActorMaterializer()
val group = GStarModSafePrime.getFirstInstance(2048)
val generator = group.getDefaultGenerator()
val cSettings = CryptoSettings(group, generator)
val elGamal = ElGamalEncryptionScheme.getInstance(generator)
object d1 extends KeyMaker
object d2 extends KeyMaker
/*val (e1,pk1) = d1.createShare("d1", cSettings)
val (e2,pk2) = d2.createShare("d2", cSettings)*/
var keyShare1Opt : Option[(EncryptionKeyShareDTO, String)] = None
val e1k = d1.createShare("d1", cSettings) map { case (e1,pk1) =>
keyShare1Opt = Some((e1,pk1))
Util.getPublicKeyFromString(e1.keyShare, cSettings.generator)
}
val e2k = d2.createShare("d2", cSettings) map { case (e2,pk2) =>
Util.getPublicKeyFromString(e2.keyShare, cSettings.generator)
}
e1k map { e1k =>
e2k map { e2k =>
keyShare1Opt map { case (e1,pk1) =>
val publicKey = e1k.apply(e2k)
val pk1e = cSettings.group.getZModOrder().getElementFrom(pk1)
val plaintexts = Seq.fill(300)(scala.util.Random.nextInt(10))
// encrypt the votes with the public key of the election
val votes = Util.encryptVotes(plaintexts, cSettings, publicKey)
println("decrypting..")
MPBridge.total = 0;
MPBridge.y()
val decryption = d1.partialDecrypt(votes, pk1e, "d1", cSettings)
MPBridge.z()
MPBridge.y()
val share = elGamal.getMessageSpace.getElementFrom(e1.keyShare)
val ok = Verifier.verifyPartialDecryption(decryption, votes, cSettings, "d1", share)
MPBridge.z()
MPBridgeS.shutdown
}
}
}
}
object Issue1 extends App with ProofSettings {
import ch.bfh.unicrypt.crypto.keygenerator.interfaces.KeyPairGenerator
import ch.bfh.unicrypt.crypto.proofsystem.challengegenerator.classes.FiatShamirSigmaChallengeGenerator
import ch.bfh.unicrypt.crypto.proofsystem.challengegenerator.interfaces.ChallengeGenerator
import ch.bfh.unicrypt.crypto.proofsystem.challengegenerator.interfaces.SigmaChallengeGenerator
import ch.bfh.unicrypt.crypto.proofsystem.classes.EqualityPreimageProofSystem
import ch.bfh.unicrypt.crypto.proofsystem.classes.PermutationCommitmentProofSystem
import ch.bfh.unicrypt.crypto.proofsystem.classes.PlainPreimageProofSystem
import ch.bfh.unicrypt.crypto.proofsystem.classes.ReEncryptionShuffleProofSystem
import ch.bfh.unicrypt.crypto.schemes.commitment.classes.PermutationCommitmentScheme
import ch.bfh.unicrypt.crypto.schemes.encryption.classes.ElGamalEncryptionScheme
import ch.bfh.unicrypt.helper.converter.classes.ConvertMethod
import ch.bfh.unicrypt.helper.converter.classes.biginteger.ByteArrayToBigInteger
import ch.bfh.unicrypt.helper.converter.classes.bytearray.BigIntegerToByteArray
import ch.bfh.unicrypt.helper.converter.classes.bytearray.StringToByteArray
import ch.bfh.unicrypt.helper.hash.HashAlgorithm
import ch.bfh.unicrypt.helper.hash.HashMethod
import ch.bfh.unicrypt.helper.math.Alphabet
import ch.bfh.unicrypt.math.algebra.concatenative.classes.StringElement
import ch.bfh.unicrypt.math.algebra.concatenative.classes.StringMonoid
import ch.bfh.unicrypt.math.algebra.general.classes.Pair
import ch.bfh.unicrypt.math.algebra.general.classes.Triple
import ch.bfh.unicrypt.math.algebra.general.classes.Tuple
import ch.bfh.unicrypt.math.algebra.general.interfaces.Element
import ch.bfh.unicrypt.math.function.classes.CompositeFunction
import ch.bfh.unicrypt.math.function.classes.GeneratorFunction
import ch.bfh.unicrypt.math.function.classes.InvertFunction
import ch.bfh.unicrypt.math.function.classes.MultiIdentityFunction
import ch.bfh.unicrypt.math.function.classes.ProductFunction
import ch.bfh.unicrypt.math.function.interfaces.Function
import ch.bfh.unicrypt.math.algebra.general.abstracts.AbstractSet
import mpservice.MPBridgeS
import mpservice.MPBridge
val group = GStarModSafePrime.getFirstInstance(2048)
val generator = group.getDefaultGenerator()
val cSettings = CryptoSettings(group, generator)
val elGamal = ElGamalEncryptionScheme.getInstance(generator)
val keyPair = elGamal.getKeyPairGenerator().generateKeyPair()
val privateKey = keyPair.getFirst()
val publicKey = keyPair.getSecond()
val otherInput: StringElement = StringMonoid.getInstance(Alphabet.UNICODE_BMP).getElement("asdasd")
val challengeGenerator: SigmaChallengeGenerator = FiatShamirSigmaChallengeGenerator.getInstance(
cSettings.group.getZModOrder(), otherInput, convertMethod, hashMethod, converter)
val ecg: ChallengeGenerator = PermutationCommitmentProofSystem.createNonInteractiveEValuesGenerator(
cSettings.group.getZModOrder(), 400000)
val spg: ReEncryptionShuffleProofSystem = ReEncryptionShuffleProofSystem.getInstance(challengeGenerator, ecg, 400000, elGamal, publicKey)
val commitment = scala.io.Source.fromFile("commitment.dat").mkString
val commitment2 = spg.getCommitmentSpace().asInstanceOf[AbstractSet[_,_]].getElementFrom(commitment)
}
object Issue3 extends App {
val grp = GStarModSafePrime.getInstance(167)
// val grp = GStarModSafePrime.getInstance(new BigInteger("170141183460469231731687303715884114527"))
// val grp = GStarModSafePrime.getFirstInstance(2048)
val gen = grp.getDefaultGenerator()
val Csettings = CryptoSettings(grp, gen)
val elGamal = ElGamalEncryptionScheme.getInstance(Csettings.generator)
val keyPair = elGamal.getKeyPairGenerator().generateKeyPair()
val privateKey = keyPair.getFirst()
val publicKey = keyPair.getSecond()
// eventually 0 will be used in Z_q
val votes = Util.encryptVotes(List(0, 1, 2), Csettings, publicKey)
votes.foreach { v =>
val first = v.getFirst
println(first)
println(v.getFirst.isGenerator)
val decryption = elGamal.decrypt(privateKey, v)
println("decrypted " + decryption)
}
} | agoravoting/agora-mixnet | src/main/scala/Demo.scala | Scala | agpl-3.0 | 31,341 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.nn
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, TensorModule}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import scala.reflect.ClassTag
/**
* Apply SoftSign function to an n-dimensional input Tensor.
*
* SoftSign function: f_i(x) = x_i / (1+|x_i|)
*/
@SerialVersionUID(- 3936698382129844874L)
class SoftSign[T: ClassTag, D: ClassTag]()
(implicit ev: TensorNumeric[T], ev2: TensorNumeric[D])
extends AbstractModule[Tensor[D], Tensor[D], T] {
@transient private var temp: Tensor[D] = null
@transient private var tempGrad: Tensor[D] = null
override def updateOutput(input: Tensor[D]): Tensor[D] = {
if (null == temp) {
temp = input.clone()
} else {
temp.resizeAs(input).copy(input)
}
temp.abs().add(ev2.fromType[Int](1))
output.resizeAs(input).copy(input).cdiv(temp)
output
}
override def updateGradInput(input: Tensor[D], gradOutput: Tensor[D]): Tensor[D] = {
if (null == tempGrad) {
tempGrad = input.clone()
} else {
tempGrad.resizeAs(output).copy(input)
}
tempGrad.abs().add(ev2.fromType[Int](1)).cmul(tempGrad)
gradInput.resizeAs(input).copy(gradOutput).cdiv(tempGrad)
gradInput
}
}
object SoftSign {
def apply[@specialized(Float, Double) T: ClassTag, D: ClassTag]()
(implicit ev: TensorNumeric[T], ev2: TensorNumeric[D]) : SoftSign[T, D] = {
new SoftSign[T, D]()
}
}
| jenniew/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/nn/SoftSign.scala | Scala | apache-2.0 | 2,115 |
package mesosphere.marathon.integration.setup
import akka.actor.ActorSystem
import mesosphere.marathon.integration.facades.ITEnrichedTask
import org.slf4j.LoggerFactory
import play.api.libs.json.{ JsValue, Json }
import spray.client.pipelining._
import spray.http.HttpResponse
import scala.concurrent.duration.{ Duration, _ }
class ServiceMockFacade(task: ITEnrichedTask, waitTime: Duration = 30.seconds)(implicit system: ActorSystem) {
import scala.concurrent.ExecutionContext.Implicits.global
val log = LoggerFactory.getLogger(classOf[ServiceMockFacade])
val baseUrl = s"http://${task.host}:${task.ports.map(_.head).get}"
val pipeline = sendReceive
def continue(): RestResult[HttpResponse] = {
log.info(s"Continue with the service migration: $baseUrl/v1/plan/continue")
RestResult.await(pipeline(Post(s"$baseUrl/v1/plan/continue")), waitTime)
}
def plan(): RestResult[JsValue] = {
RestResult.await(pipeline(Get(s"$baseUrl/v1/plan")), waitTime).map(_.entity.asString).map(Json.parse)
}
}
| ss75710541/marathon | src/test/scala/mesosphere/marathon/integration/setup/ServiceMockFacade.scala | Scala | apache-2.0 | 1,025 |
package com.argcv.iphigenia.biendata
/**
*
* @author Yu Jing <[email protected]> on 12/21/16
*/
package object smp2016 {
}
| yuikns/iphigenia | src/main/scala/com/argcv/iphigenia/biendata/smp2016/package.scala | Scala | mit | 123 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers
import java.net.URLEncoder._
import com.mohiva.play.silhouette.api.exceptions._
import com.mohiva.play.silhouette.api.util.HTTPLayer
import com.mohiva.play.silhouette.impl.exceptions.{ UnexpectedResponseException, AccessDeniedException }
import com.mohiva.play.silhouette.impl.providers.OAuth2Provider._
import org.specs2.matcher.ThrownExpectations
import org.specs2.mock.Mockito
import org.specs2.specification.Scope
import play.api.libs.json.{ JsValue, Json }
import play.api.libs.ws.WSRequest
import play.api.mvc.Result
import play.api.test.{ FakeRequest, WithApplication }
import play.mvc.Http.HeaderNames
import scala.concurrent.Future
/**
* Abstract test case for the [[OAuth2Provider]] class.
*
* These tests will be additionally executed before every OAuth2 provider spec.
*/
abstract class OAuth2ProviderSpec extends SocialProviderSpec[OAuth2Info] {
isolated
"The `authenticate` method" should {
val c = context
"fail with an AccessDeniedException if `error` key with value `access_denied` exists in query string" in new WithApplication {
implicit val req = FakeRequest(GET, "?" + Error + "=" + AccessDenied)
failed[AccessDeniedException](c.provider.authenticate()) {
case e => e.getMessage must startWith(AuthorizationError.format(c.provider.id, ""))
}
}
"fail with an UnexpectedResponseException if `error` key with unspecified value exists in query string" in new WithApplication {
implicit val req = FakeRequest(GET, "?" + Error + "=unspecified")
failed[UnexpectedResponseException](c.provider.authenticate()) {
case e => e.getMessage must startWith(AuthorizationError.format(c.provider.id, "unspecified"))
}
}
"fail with an ConfigurationException if authorization URL is undefined when it's needed" in new WithApplication {
c.oAuthSettings.authorizationURL match {
case None => skipped("authorizationURL is not defined, so this step isn't needed for provider: " + c.provider.getClass)
case Some(authorizationURL) =>
implicit val req = FakeRequest(GET, "/")
c.state.serialize returns "session-value"
c.stateProvider.build(any) returns Future.successful(c.state)
c.oAuthSettings.authorizationURL returns None
failed[ConfigurationException](c.provider.authenticate()) {
case e => e.getMessage must startWith(AuthorizationURLUndefined.format(c.provider.id))
}
}
}
"redirect to authorization URL if authorization code doesn't exists in request" in new WithApplication {
c.oAuthSettings.authorizationURL match {
case None => skipped("authorizationURL is not defined, so this step isn't needed for provider: " + c.provider.getClass)
case Some(authorizationURL) =>
implicit val req = FakeRequest(GET, "/")
val sessionKey = "session-key"
val sessionValue = "session-value"
c.state.serialize returns sessionValue
c.stateProvider.build(any) returns Future.successful(c.state)
c.stateProvider.publish(any, any)(any) answers { (a, m) =>
val result = a.asInstanceOf[Array[Any]](0).asInstanceOf[Result]
val state = a.asInstanceOf[Array[Any]](1).asInstanceOf[OAuth2State]
result.withSession(sessionKey -> state.serialize)
}
result(c.provider.authenticate()) {
case result =>
status(result) must equalTo(SEE_OTHER)
session(result).get(sessionKey) must beSome(c.state.serialize)
redirectLocation(result) must beSome.which { url =>
val urlParams = c.urlParams(url)
val params = c.oAuthSettings.scope.foldLeft(List(
(ClientID, c.oAuthSettings.clientID),
(RedirectURI, c.oAuthSettings.redirectURL),
(ResponseType, Code),
(State, urlParams(State))) ++ c.oAuthSettings.authorizationParams.toList) {
case (p, s) => (Scope, s) :: p
}
url must be equalTo (authorizationURL + params.map { p =>
encode(p._1, "UTF-8") + "=" + encode(p._2, "UTF-8")
}.mkString("?", "&", ""))
}
}
}
}
"resolves relative redirectURLs before starting the flow" in new WithApplication {
verifyRelativeRedirectResolution("/redirect-url", secure = false, "http://www.example.com/redirect-url")
}
"resolves path relative redirectURLs before starting the flow" in new WithApplication {
verifyRelativeRedirectResolution("redirect-url", secure = false, "http://www.example.com/request-path/redirect-url")
}
"resolves relative redirectURLs before starting the flow over https" in new WithApplication {
verifyRelativeRedirectResolution("/redirect-url", secure = true, "https://www.example.com/redirect-url")
}
def verifyRelativeRedirectResolution(redirectURL: String, secure: Boolean, resolvedRedirectURL: String) = {
c.oAuthSettings.authorizationURL match {
case None => skipped("authorizationURL is not defined, so this step isn't needed for provider: " + c.provider.getClass)
case Some(authorizationURL) =>
implicit val req = spy(FakeRequest(GET, "/request-path/something").withHeaders(HeaderNames.HOST -> "www.example.com"))
val sessionKey = "session-key"
val sessionValue = "session-value"
req.secure returns secure
c.oAuthSettings.redirectURL returns redirectURL
c.state.serialize returns sessionValue
c.stateProvider.build(any) returns Future.successful(c.state)
c.stateProvider.publish(any, any)(any) answers { (a, m) =>
val result = a.asInstanceOf[Array[Any]](0).asInstanceOf[Result]
val state = a.asInstanceOf[Array[Any]](1).asInstanceOf[OAuth2State]
result.withSession(sessionKey -> state.serialize)
}
result(c.provider.authenticate()) {
case result =>
redirectLocation(result) must beSome.which { url =>
url must contain(s"$RedirectURI=${encode(resolvedRedirectURL, "UTF-8")}")
}
}
}
}
"not send state param if state is empty" in new WithApplication {
c.oAuthSettings.authorizationURL match {
case None => skipped("authorizationURL is not defined, so this step isn't needed for provider: " + c.provider.getClass)
case Some(_) =>
implicit val req = FakeRequest(GET, "/")
c.state.serialize returns ""
c.stateProvider.build(any) returns Future.successful(c.state)
c.stateProvider.publish(any, any)(any) answers { (a, m) =>
a.asInstanceOf[Array[Any]](0).asInstanceOf[Result]
}
result(c.provider.authenticate()) {
case result =>
redirectLocation(result) must beSome.which(_ must not contain State)
}
}
}
"submit the proper params to the access token post request" in new WithApplication {
val requestHolder = mock[WSRequest]
val params = Map(
ClientID -> Seq(c.oAuthSettings.clientID),
ClientSecret -> Seq(c.oAuthSettings.clientSecret),
GrantType -> Seq(AuthorizationCode),
Code -> Seq("my.code"),
RedirectURI -> Seq(c.oAuthSettings.redirectURL)) ++ c.oAuthSettings.accessTokenParams.mapValues(Seq(_))
implicit val req = FakeRequest(GET, "?" + Code + "=my.code")
requestHolder.withHeaders(any) returns requestHolder
c.stateProvider.validate(any) returns Future.successful(c.state)
// We must use this neat trick here because it isn't possible to check the post call with a verification,
// because of the implicit params needed for the post call. On the other hand we can test it in the abstract
// spec, because we throw an exception in both cases which stops the test once the post method was called.
// This protects as for an NPE because of the not mocked dependencies. The other solution would be to execute
// this test in every provider with the full mocked dependencies.
requestHolder.post[Map[String, Seq[String]]](any)(any) answers { (a, m) =>
a.asInstanceOf[Array[Any]](0).asInstanceOf[Map[String, Seq[String]]].equals(params) match {
case true => throw new RuntimeException("success")
case false => throw new RuntimeException("failure")
}
}
c.httpLayer.url(c.oAuthSettings.accessTokenURL) returns requestHolder
failed[RuntimeException](c.provider.authenticate()) {
case e => e.getMessage must startWith("success")
}
}
}
"The `settings` method" should {
val c = context
"return the settings instance" in {
c.provider.settings must be equalTo c.oAuthSettings
}
}
/**
* Defines the context for the abstract OAuth2 provider spec.
*
* @return The Context to use for the abstract OAuth2 provider spec.
*/
protected def context: OAuth2ProviderSpecContext
}
/**
* Context for the OAuth2ProviderSpec.
*/
trait OAuth2ProviderSpecContext extends Scope with Mockito with ThrownExpectations {
abstract class TestState extends OAuth2State
abstract class TestStateProvider extends OAuth2StateProvider {
type State = TestState
}
/**
* The HTTP layer mock.
*/
lazy val httpLayer: HTTPLayer = mock[HTTPLayer]
/**
* A OAuth2 info.
*/
lazy val oAuthInfo: JsValue = Json.obj(
AccessToken -> "my.access.token",
TokenType -> "bearer",
ExpiresIn -> 3600,
RefreshToken -> "my.refresh.token")
/**
* The OAuth2 state.
*/
lazy val state: TestState = mock[TestState]
/**
* The OAuth2 state provider.
*/
lazy val stateProvider: TestStateProvider = mock[TestStateProvider]
/**
* The OAuth2 settings.
*/
def oAuthSettings: OAuth2Settings
/**
* The provider to test.
*/
def provider: OAuth2Provider
/**
* Extracts the params of a URL.
*
* @param url The url to parse.
* @return The params of a URL.
*/
def urlParams(url: String) = (url.split('&') map { str =>
val pair = str.split('=')
pair(0) -> pair(1)
}).toMap
}
| rfranco/play-silhouette | silhouette/test/com/mohiva/play/silhouette/impl/providers/OAuth2ProviderSpec.scala | Scala | apache-2.0 | 10,947 |
import org.scalatest._
import NumericContainer._
class NumericExample extends FlatSpec with Matchers {
"An Impl" should "be instantiable as an int" in {
val one = Impl(1)
}
it should "be instantiable as a double" in {
val double = Impl(2.0)
}
it should "be instantiable as a float" in {
val float = Impl(3F)
}
it should "be instantiable as a long" in {
val long = Impl(4L)
}
"An Int Impl" should "be able to use operators" in {
val one = 1
val two = 2
val res1 = 3
val res2 = -1
val res3 = 2
val res4 = 0
val o = Impl(one)
val t = Impl(two)
(o+t) shouldBe Impl(res1)
(o-t) shouldBe Impl(res2)
(o*t) shouldBe Impl(res3)
(o/t) shouldBe Impl(res4)
}
"A Double Impl" should "be able to use operators" in {
val one = 1.0
val two = 2.0
val res1 = 3.0
val res2 = -1.0
val res3 = 2.0
val res4 = 0.5
val o = Impl(one)
val t = Impl(two)
(o+t) shouldBe Impl(res1)
(o-t) shouldBe Impl(res2)
(o*t) shouldBe Impl(res3)
(o/t) shouldBe Impl(res4)
}
"A Float Impl" should "be able to use operators" in {
val one = 1F
val two = 2F
val res1 = 3F
val res2 = -1F
val res3 = 2F
val res4 = 0.5F
val o = Impl(one)
val t = Impl(two)
(o+t) shouldBe Impl(res1)
(o-t) shouldBe Impl(res2)
(o*t) shouldBe Impl(res3)
(o/t) shouldBe Impl(res4)
}
"A Long Impl" should "be able to use operators" in {
val one = 1L
val two = 2L
val res1 = 3L
val res2 = -1L
val res3 = 2L
val res4 = 0L
val o = Impl(one)
val t = Impl(two)
(o+t) shouldBe Impl(res1)
(o-t) shouldBe Impl(res2)
(o*t) shouldBe Impl(res3)
(o/t) shouldBe Impl(res4)
}
}
| mrmechko/spire-test | src/test/scala/NumericContainer.scala | Scala | gpl-2.0 | 1,797 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.tests.webservice
package client {
//#client
import javax.inject.Inject
import play.api.libs.ws.WSClient
import scala.concurrent.{ExecutionContext, Future}
class GitHubClient(ws: WSClient, baseUrl: String)(implicit ec: ExecutionContext) {
@Inject def this(ws: WSClient, ec: ExecutionContext) = this(ws, "https://api.github.com")(ec)
def repositories(): Future[Seq[String]] = {
ws.url(baseUrl + "/repositories").get().map { response =>
(response.json \\\\ "full_name").map(_.as[String])
}
}
}
//#client
}
package test {
import client._
//#full-test
import play.core.server.Server
import play.api.routing.sird._
import play.api.mvc._
import play.api.libs.json._
import play.api.test._
import scala.concurrent.Await
import scala.concurrent.duration._
import org.specs2.mutable.Specification
class GitHubClientSpec extends Specification {
import scala.concurrent.ExecutionContext.Implicits.global
"GitHubClient" should {
"get all repositories" in {
Server.withRouter() {
case GET(p"/repositories") => Action {
Results.Ok(Json.arr(Json.obj("full_name" -> "octocat/Hello-World")))
}
} { implicit port =>
WsTestClient.withClient { client =>
val result = Await.result(
new GitHubClient(client, "").repositories(), 10.seconds)
result must_== Seq("octocat/Hello-World")
}
}
}
}
}
//#full-test
}
import client._
import scala.concurrent.Await
import scala.concurrent.duration._
import org.specs2.mutable.Specification
import org.specs2.time.NoTimeConversions
import play.api.routing.Router
import play.api.{BuiltInComponents, BuiltInComponentsFromContext}
import play.api.routing.sird._
import play.filters.HttpFiltersComponents
class ScalaTestingWebServiceClients extends Specification {
import scala.concurrent.ExecutionContext.Implicits.global
"webservice testing" should {
"allow mocking a service" in {
//#mock-service
import play.api.libs.json._
import play.api.mvc._
import play.api.routing.sird._
import play.core.server.Server
Server.withRouter() {
case GET(p"/repositories") => Action {
Results.Ok(Json.arr(Json.obj("full_name" -> "octocat/Hello-World")))
}
} { implicit port =>
//#mock-service
ok
}
}
"allow sending a resource" in {
//#send-resource
import play.api.mvc._
import play.api.routing.sird._
import play.api.test._
import play.core.server.Server
Server.withApplicationFromContext() { context =>
new BuiltInComponentsFromContext(context) with HttpFiltersComponents {
override def router: Router = Router.from {
case GET(p"/repositories") =>
this.defaultActionBuilder { req =>
Results.Ok.sendResource("github/repositories.json")(fileMimeTypes)
}
}
}.application
} { implicit port =>
//#send-resource
WsTestClient.withClient { client =>
Await.result(new GitHubClient(client, "").repositories(), 10.seconds) must_== Seq("octocat/Hello-World")
}
}
}
"allow being dry" in {
//#with-github-client
import play.api.mvc._
import play.api.routing.sird._
import play.core.server.Server
import play.api.test._
def withGitHubClient[T](block: GitHubClient => T): T = {
Server.withApplicationFromContext() { context =>
new BuiltInComponentsFromContext(context) with HttpFiltersComponents{
override def router: Router = Router.from {
case GET(p"/repositories") =>
this.defaultActionBuilder { req =>
Results.Ok.sendResource("github/repositories.json")(fileMimeTypes)
}
}
}.application
} { implicit port =>
WsTestClient.withClient { client =>
block(new GitHubClient(client, ""))
}
}
}
//#with-github-client
//#with-github-test
withGitHubClient { client =>
val result = Await.result(client.repositories(), 10.seconds)
result must_== Seq("octocat/Hello-World")
}
//#with-github-test
}
}
}
| wsargent/playframework | documentation/manual/working/scalaGuide/main/tests/code/webservice/ScalaTestingWebServiceClients.scala | Scala | apache-2.0 | 4,368 |
package coder.simon
import java.util.UUID
class Account(x: Int) {
var balance = x
def withdrawWithLog(delta: Int) = {
val yyy = balance
val result = withdraw(delta)
println(s"before $yyy,delta is $delta, after $balance")
result
}
def withdraw(delta: Int) = synchronized {
if (delta > 0 && balance >= delta) {
balance = balance - delta
true
} else false
}
}
object RaceCondition {
def createThreads = {
val account = new Account(100)
for (i <- 0 until 10) {
new Thread(
new Runnable {
def run() {
account.withdrawWithLog(3)
Thread.sleep(35)
account.withdrawWithLog(7)
}
}, s"t$i").start()
}
account
}
def main(args: Array[String]) = {
val a = createThreads
try {
Thread.sleep(1000 * 3)
} catch {
case e: Exception =>
println("exception when thread sleep")
}
println(s"eventually the balance is ${a.balance}")
}
} | erlangxk/concurrent_account_demo | ConcurrentWebWithDB/app/models/RaceCondition.scala | Scala | mit | 1,007 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package statements
package params
import com.intellij.lang.ASTNode
import com.intellij.psi._
import com.intellij.psi.scope.PsiScopeProcessor
import org.jetbrains.plugins.scala.lang.parser.ScalaElementTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaElementVisitor
import org.jetbrains.plugins.scala.lang.psi.api.statements.params._
import org.jetbrains.plugins.scala.lang.psi.stubs.ScParamClausesStub
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
class ScParametersImpl extends ScalaStubBasedElementImpl[ScParameters] with ScParameters {
def this(node: ASTNode) = {this(); setNode(node)}
def this(stub: ScParamClausesStub) = {this(); setStub(stub); setNullNode()}
override def toString: String = "Parameters"
def clauses: Seq[ScParameterClause] = {
getStubOrPsiChildren(ScalaElementTypes.PARAM_CLAUSE, JavaArrayFactoryUtil.ScParameterClauseFactory).toSeq
}
override def processDeclarations(processor: PsiScopeProcessor, state: ResolveState,
lastParent: PsiElement, place: PsiElement): Boolean = {
if (lastParent != null) {
val clausesIterator = clauses.iterator
var break = false
while (clausesIterator.hasNext && !break) {
val clause = clausesIterator.next()
if (clause == lastParent) break = true
else {
val paramsIterator = clause.parameters.iterator
while (paramsIterator.hasNext) {
val param = paramsIterator.next()
if (!processor.execute(param, state)) return false
}
}
}
}
true
}
override def accept(visitor: ScalaElementVisitor) {
visitor.visitParameters(this)
}
override def accept(visitor: PsiElementVisitor) {
visitor match {
case s: ScalaElementVisitor => s.visitParameters(this)
case _ => super.accept(visitor)
}
}
override def add(element: PsiElement): PsiElement = {
element match {
case param: ScParameter =>
clauses.lastOption match {
case Some(clause) =>
clause.addParameter(param).parameters.last
case _ =>
val clause = ScalaPsiElementFactory.createClauseFromText("()", getManager)
val newClause = clause.addParameter(param)
super.add(clause)
newClause.parameters.last
}
case _ => super.add(element)
}
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/impl/statements/params/ScParametersImpl.scala | Scala | apache-2.0 | 2,458 |
package model
trait IssueLabelComponent extends TemplateComponent { self: Profile =>
import profile.simple._
lazy val IssueLabels = TableQuery[IssueLabels]
class IssueLabels(tag: Tag) extends Table[IssueLabel](tag, "ISSUE_LABEL") with IssueTemplate with LabelTemplate {
def * = (userName, repositoryName, issueId, labelId) <> (IssueLabel.tupled, IssueLabel.unapply)
def byPrimaryKey(owner: String, repository: String, issueId: Int, labelId: Int) =
byIssue(owner, repository, issueId) && (this.labelId is labelId.bind)
}
case class IssueLabel(
userName: String,
repositoryName: String,
issueId: Int,
labelId: Int)
}
| campolake/gitbucketV2.1 | src/main/scala/model/IssueLabels.scala | Scala | apache-2.0 | 659 |
package springrtsru.pages.games
import springrtsru.pages.{BaseContentPage, Games, PageIndex}
class ZerokGamePage extends BaseContentPage {
override def getTitle: String = "Zero-K RTS"
override def getPageIndex: PageIndex = Games
}
| Eltario/springrts-ru-website | src/main/scala/springrtsru/pages/games/ZerokGamePage.scala | Scala | unlicense | 238 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import scala.collection.JavaConverters._
import org.apache.hadoop.fs.{FileStatus, Path}
import org.apache.hadoop.hive.ql.exec.Utilities
import org.apache.hadoop.hive.ql.io.{HiveFileFormatUtils, HiveOutputFormat}
import org.apache.hadoop.hive.serde2.Serializer
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspectorUtils, StructObjectInspector}
import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils.ObjectInspectorCopyOption
import org.apache.hadoop.io.Writable
import org.apache.hadoop.mapred.{JobConf, Reporter}
import org.apache.hadoop.mapreduce.{Job, TaskAttemptContext}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.SPECULATION_ENABLED
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.execution.datasources.{FileFormat, OutputWriter, OutputWriterFactory}
import org.apache.spark.sql.hive.{HiveInspectors, HiveTableUtil}
import org.apache.spark.sql.hive.HiveShim.{ShimFileSinkDesc => FileSinkDesc}
import org.apache.spark.sql.sources.DataSourceRegister
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.SerializableJobConf
/**
* `FileFormat` for writing Hive tables.
*
* TODO: implement the read logic.
*/
class HiveFileFormat(fileSinkConf: FileSinkDesc)
extends FileFormat with DataSourceRegister with Logging {
def this() = this(null)
override def shortName(): String = "hive"
override def inferSchema(
sparkSession: SparkSession,
options: Map[String, String],
files: Seq[FileStatus]): Option[StructType] = {
throw QueryExecutionErrors.inferSchemaUnsupportedForHiveError()
}
override def prepareWrite(
sparkSession: SparkSession,
job: Job,
options: Map[String, String],
dataSchema: StructType): OutputWriterFactory = {
val conf = job.getConfiguration
val tableDesc = fileSinkConf.getTableInfo
conf.set("mapred.output.format.class", tableDesc.getOutputFileFormatClassName)
// When speculation is on and output committer class name contains "Direct", we should warn
// users that they may loss data if they are using a direct output committer.
val speculationEnabled = sparkSession.sparkContext.conf.get(SPECULATION_ENABLED)
val outputCommitterClass = conf.get("mapred.output.committer.class", "")
if (speculationEnabled && outputCommitterClass.contains("Direct")) {
val warningMessage =
s"$outputCommitterClass may be an output committer that writes data directly to " +
"the final location. Because speculation is enabled, this output committer may " +
"cause data loss (see the case in SPARK-10063). If possible, please use an output " +
"committer that does not have this behavior (e.g. FileOutputCommitter)."
logWarning(warningMessage)
}
// Add table properties from storage handler to hadoopConf, so any custom storage
// handler settings can be set to hadoopConf
HiveTableUtil.configureJobPropertiesForStorageHandler(tableDesc, conf, false)
Utilities.copyTableJobPropertiesToConf(tableDesc, conf)
// Avoid referencing the outer object.
val fileSinkConfSer = fileSinkConf
new OutputWriterFactory {
private val jobConf = new SerializableJobConf(new JobConf(conf))
@transient private lazy val outputFormat =
jobConf.value.getOutputFormat.asInstanceOf[HiveOutputFormat[AnyRef, Writable]]
override def getFileExtension(context: TaskAttemptContext): String = {
Utilities.getFileExtension(jobConf.value, fileSinkConfSer.getCompressed, outputFormat)
}
override def newInstance(
path: String,
dataSchema: StructType,
context: TaskAttemptContext): OutputWriter = {
new HiveOutputWriter(path, fileSinkConfSer, jobConf.value, dataSchema)
}
}
}
}
class HiveOutputWriter(
val path: String,
fileSinkConf: FileSinkDesc,
jobConf: JobConf,
dataSchema: StructType) extends OutputWriter with HiveInspectors {
private def tableDesc = fileSinkConf.getTableInfo
private val serializer = {
val serializer = tableDesc.getDeserializerClass.getConstructor().
newInstance().asInstanceOf[Serializer]
serializer.initialize(jobConf, tableDesc.getProperties)
serializer
}
private val hiveWriter = HiveFileFormatUtils.getHiveRecordWriter(
jobConf,
tableDesc,
serializer.getSerializedClass,
fileSinkConf,
new Path(path),
Reporter.NULL)
/**
* Since SPARK-30201 ObjectInspectorCopyOption.JAVA change to ObjectInspectorCopyOption.DEFAULT.
* The reason is DEFAULT option can convert `UTF8String` to `Text` with bytes and
* we can compatible with non UTF-8 code bytes during write.
*/
private val standardOI = ObjectInspectorUtils
.getStandardObjectInspector(
tableDesc.getDeserializer(jobConf).getObjectInspector,
ObjectInspectorCopyOption.DEFAULT)
.asInstanceOf[StructObjectInspector]
private val fieldOIs =
standardOI.getAllStructFieldRefs.asScala.map(_.getFieldObjectInspector).toArray
private val dataTypes = dataSchema.map(_.dataType).toArray
private val wrappers = fieldOIs.zip(dataTypes).map { case (f, dt) => wrapperFor(f, dt) }
private val outputData = new Array[Any](fieldOIs.length)
override def write(row: InternalRow): Unit = {
var i = 0
while (i < fieldOIs.length) {
outputData(i) = if (row.isNullAt(i)) null else wrappers(i)(row.get(i, dataTypes(i)))
i += 1
}
hiveWriter.write(serializer.serialize(outputData, standardOI))
}
override def close(): Unit = {
// Seems the boolean value passed into close does not matter.
hiveWriter.close(false)
}
}
| shaneknapp/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/execution/HiveFileFormat.scala | Scala | apache-2.0 | 6,649 |
/*
* Copyright (C) 2009-2013 Typesafe Inc. <http://www.typesafe.com>
*/
package play.libs
import org.scalacheck.Arbitrary.arbitrary
import org.scalacheck.{ Arbitrary, Gen }
import org.specs2.mutable.Specification
import org.specs2.ScalaCheck
object FTupleSpec extends Specification with ScalaCheck {
import ArbitraryTuples._
type A = String
type B = Integer
type C = String
type D = Integer
type E = String
implicit val stringParam: Arbitrary[String] = Arbitrary(Gen.oneOf("x", null))
implicit val integerParam: Arbitrary[Integer] = Arbitrary(Gen.oneOf(42, null))
checkEquality[F.Tuple[A, B]]("Tuple")
checkEquality[F.Tuple3[A, B, C]]("Tuple3")
checkEquality[F.Tuple4[A, B, C, D]]("Tuple4")
checkEquality[F.Tuple5[A, B, C, D, E]]("Tuple5")
def checkEquality[A: Arbitrary](name: String): Unit = {
s"$name equality" should {
"be commutative" in prop { (a1: A, a2: A) =>
(a1 equals a2) == (a2 equals a1)
}
"be reflexive" in prop { (a: A) =>
a equals a
}
"check for null" in prop { (a: A) =>
!(a equals null)
}
"check object type" in prop { (a: A, s: String) =>
!(a equals s)
}
"obey hashCode contract" in prop { (a1: A, a2: A) =>
// (a1 equals a2) ==> (a1.hashCode == a2.hashCode)
if (a1 equals a2) (a1.hashCode == a2.hashCode) else true
}
}
}
object ArbitraryTuples {
implicit def arbTuple[A: Arbitrary, B: Arbitrary]: Arbitrary[F.Tuple[A, B]] = Arbitrary {
for (a <- arbitrary[A]; b <- arbitrary[B]) yield F.Tuple(a, b)
}
implicit def arbTuple3[A: Arbitrary, B: Arbitrary, C: Arbitrary]: Arbitrary[F.Tuple3[A, B, C]] = Arbitrary {
for (a <- arbitrary[A]; b <- arbitrary[B]; c <- arbitrary[C]) yield F.Tuple3(a, b, c)
}
implicit def arbTuple4[A: Arbitrary, B: Arbitrary, C: Arbitrary, D: Arbitrary]: Arbitrary[F.Tuple4[A, B, C, D]] = Arbitrary {
for (a <- arbitrary[A]; b <- arbitrary[B]; c <- arbitrary[C]; d <- arbitrary[D]) yield F.Tuple4(a, b, c, d)
}
implicit def arbTuple5[A: Arbitrary, B: Arbitrary, C: Arbitrary, D: Arbitrary, E: Arbitrary]: Arbitrary[F.Tuple5[A, B, C, D, E]] = Arbitrary {
for (a <- arbitrary[A]; b <- arbitrary[B]; c <- arbitrary[C]; d <- arbitrary[D]; e <- arbitrary[E]) yield F.Tuple5(a, b, c, d, e)
}
}
}
| jyotikamboj/container | pf-framework/src/play/src/test/scala/play/libs/FTupleSpec.scala | Scala | mit | 2,355 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2014 MineFormers
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package de.mineformers.kybology.core.block
import cpw.mods.fml.relauncher.{Side, SideOnly}
import de.mineformers.core.block.{TileProvider, BaseBlock}
import de.mineformers.core.client.util.{RenderingProxy, Rendering}
import de.mineformers.kybology.Core
import de.mineformers.kybology.Core.Names
import de.mineformers.kybology.core.client.renderer.item.RiftItemRenderer
import de.mineformers.kybology.core.client.renderer.tileentity.RiftRenderer
import de.mineformers.kybology.core.tileentity.TileRift
import net.minecraft.block.material.Material
import net.minecraft.util.{DamageSource, AxisAlignedBB}
import net.minecraft.world.World
/**
* BlockRift
*
* @author PaleoCrafter
*/
class BlockRift extends BaseBlock(Names.Blocks.Rift, Core.CreativeTab, Material.rock) with TileProvider[TileRift] with Rendering {
setBlockBounds(0.3F, 0.3F, 0.3F, 0.7F, 0.7F, 0.7F)
setBlockUnbreakable()
setResistance(6000000.0F)
override def getCollisionBoundingBoxFromPool(world: World, x: Int, y: Int, z: Int): AxisAlignedBB = null
override def getSelectedBoundingBoxFromPool(world: World, x: Int, y: Int, z: Int): AxisAlignedBB = AxisAlignedBB.getBoundingBox(0, 0, 0, 0, 0, 0)
override def tileClass: Class[TileRift] = classOf[TileRift]
@SideOnly(Side.CLIENT)
override protected def createProxy: RenderingProxy = new RenderingProxy {
@SideOnly(Side.CLIENT)
override def createTileRenderer = new RiftRenderer
@SideOnly(Side.CLIENT)
override def createItemRenderer = new RiftItemRenderer
}
val damage = new DamageSource("kybology:rift")
}
| MineFormers/Kybology | src/main/scala/de/mineformers/kybology/core/block/BlockRift.scala | Scala | mit | 2,713 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import java.io._
import java.net.URI
import java.util.{Arrays, Locale, Properties, ServiceLoader, UUID}
import java.util.concurrent.{ConcurrentHashMap, ConcurrentMap}
import java.util.concurrent.atomic.{AtomicBoolean, AtomicInteger, AtomicReference}
import scala.collection.JavaConverters._
import scala.collection.Map
import scala.collection.immutable
import scala.collection.mutable.HashMap
import scala.language.implicitConversions
import scala.reflect.{classTag, ClassTag}
import scala.util.control.NonFatal
import com.google.common.collect.MapMaker
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.hadoop.io.{ArrayWritable, BooleanWritable, BytesWritable, DoubleWritable, FloatWritable, IntWritable, LongWritable, NullWritable, Text, Writable}
import org.apache.hadoop.mapred.{FileInputFormat, InputFormat, JobConf, SequenceFileInputFormat, TextInputFormat}
import org.apache.hadoop.mapreduce.{InputFormat => NewInputFormat, Job => NewHadoopJob}
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => NewFileInputFormat}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.deploy.{LocalSparkCluster, SparkHadoopUtil}
import org.apache.spark.executor.{ExecutorMetrics, ExecutorMetricsSource}
import org.apache.spark.input.{FixedLengthBinaryInputFormat, PortableDataStream, StreamInputFormat, WholeTextFileInputFormat}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Tests._
import org.apache.spark.internal.config.UI._
import org.apache.spark.internal.plugin.PluginContainer
import org.apache.spark.io.CompressionCodec
import org.apache.spark.metrics.source.JVMCPUSource
import org.apache.spark.partial.{ApproximateEvaluator, PartialResult}
import org.apache.spark.rdd._
import org.apache.spark.resource._
import org.apache.spark.resource.ResourceUtils._
import org.apache.spark.rpc.RpcEndpointRef
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
import org.apache.spark.scheduler.local.LocalSchedulerBackend
import org.apache.spark.shuffle.ShuffleDataIOUtils
import org.apache.spark.shuffle.api.ShuffleDriverComponents
import org.apache.spark.status.{AppStatusSource, AppStatusStore}
import org.apache.spark.status.api.v1.ThreadStackTrace
import org.apache.spark.storage._
import org.apache.spark.storage.BlockManagerMessages.TriggerThreadDump
import org.apache.spark.ui.{ConsoleProgressBar, SparkUI}
import org.apache.spark.util._
import org.apache.spark.util.logging.DriverLogger
/**
* Main entry point for Spark functionality. A SparkContext represents the connection to a Spark
* cluster, and can be used to create RDDs, accumulators and broadcast variables on that cluster.
*
* @note Only one `SparkContext` should be active per JVM. You must `stop()` the
* active `SparkContext` before creating a new one.
* @param config a Spark Config object describing the application configuration. Any settings in
* this config overrides the default configs as well as system properties.
*/
class SparkContext(config: SparkConf) extends Logging {
// The call site where this SparkContext was constructed.
private val creationSite: CallSite = Utils.getCallSite()
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having started construction.
// NOTE: this must be placed at the beginning of the SparkContext constructor.
SparkContext.markPartiallyConstructed(this)
val startTime = System.currentTimeMillis()
private[spark] val stopped: AtomicBoolean = new AtomicBoolean(false)
private[spark] def assertNotStopped(): Unit = {
if (stopped.get()) {
val activeContext = SparkContext.activeContext.get()
val activeCreationSite =
if (activeContext == null) {
"(No active SparkContext.)"
} else {
activeContext.creationSite.longForm
}
throw new IllegalStateException(
s"""Cannot call methods on a stopped SparkContext.
|This stopped SparkContext was created at:
|
|${creationSite.longForm}
|
|The currently active SparkContext was created at:
|
|$activeCreationSite
""".stripMargin)
}
}
/**
* Create a SparkContext that loads settings from system properties (for instance, when
* launching with ./bin/spark-submit).
*/
def this() = this(new SparkConf())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI
* @param conf a [[org.apache.spark.SparkConf]] object specifying other Spark parameters
*/
def this(master: String, appName: String, conf: SparkConf) =
this(SparkContext.updatedConf(conf, master, appName))
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
* @param environment Environment variables to set on worker nodes.
*/
def this(
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()) = {
this(SparkContext.updatedConf(new SparkConf(), master, appName, sparkHome, jars, environment))
}
// The following constructors are required when Java code accesses SparkContext directly.
// Please see SI-4278
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
*/
private[spark] def this(master: String, appName: String) =
this(master, appName, null, Nil, Map())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
*/
private[spark] def this(master: String, appName: String, sparkHome: String) =
this(master, appName, sparkHome, Nil, Map())
/**
* Alternative constructor that allows setting common Spark properties directly
*
* @param master Cluster URL to connect to (e.g. mesos://host:port, spark://host:port, local[4]).
* @param appName A name for your application, to display on the cluster web UI.
* @param sparkHome Location where Spark is installed on cluster nodes.
* @param jars Collection of JARs to send to the cluster. These can be paths on the local file
* system or HDFS, HTTP, HTTPS, or FTP URLs.
*/
private[spark] def this(master: String, appName: String, sparkHome: String, jars: Seq[String]) =
this(master, appName, sparkHome, jars, Map())
// log out Spark Version in Spark driver log
logInfo(s"Running Spark version $SPARK_VERSION")
/* ------------------------------------------------------------------------------------- *
| Private variables. These variables keep the internal state of the context, and are |
| not accessible by the outside world. They're mutable since we want to initialize all |
| of them to some neutral value ahead of time, so that calling "stop()" while the |
| constructor is still running is safe. |
* ------------------------------------------------------------------------------------- */
private var _conf: SparkConf = _
private var _eventLogDir: Option[URI] = None
private var _eventLogCodec: Option[String] = None
private var _listenerBus: LiveListenerBus = _
private var _env: SparkEnv = _
private var _statusTracker: SparkStatusTracker = _
private var _progressBar: Option[ConsoleProgressBar] = None
private var _ui: Option[SparkUI] = None
private var _hadoopConfiguration: Configuration = _
private var _executorMemory: Int = _
private var _schedulerBackend: SchedulerBackend = _
private var _taskScheduler: TaskScheduler = _
private var _heartbeatReceiver: RpcEndpointRef = _
@volatile private var _dagScheduler: DAGScheduler = _
private var _applicationId: String = _
private var _applicationAttemptId: Option[String] = None
private var _eventLogger: Option[EventLoggingListener] = None
private var _driverLogger: Option[DriverLogger] = None
private var _executorAllocationManager: Option[ExecutorAllocationManager] = None
private var _cleaner: Option[ContextCleaner] = None
private var _listenerBusStarted: Boolean = false
private var _jars: Seq[String] = _
private var _files: Seq[String] = _
private var _shutdownHookRef: AnyRef = _
private var _statusStore: AppStatusStore = _
private var _heartbeater: Heartbeater = _
private var _resources: immutable.Map[String, ResourceInformation] = _
private var _shuffleDriverComponents: ShuffleDriverComponents = _
private var _plugins: Option[PluginContainer] = None
private var _resourceProfileManager: ResourceProfileManager = _
/* ------------------------------------------------------------------------------------- *
| Accessors and public fields. These provide access to the internal state of the |
| context. |
* ------------------------------------------------------------------------------------- */
private[spark] def conf: SparkConf = _conf
/**
* Return a copy of this SparkContext's configuration. The configuration ''cannot'' be
* changed at runtime.
*/
def getConf: SparkConf = conf.clone()
def resources: Map[String, ResourceInformation] = _resources
def jars: Seq[String] = _jars
def files: Seq[String] = _files
def master: String = _conf.get("spark.master")
def deployMode: String = _conf.get(SUBMIT_DEPLOY_MODE)
def appName: String = _conf.get("spark.app.name")
private[spark] def isEventLogEnabled: Boolean = _conf.get(EVENT_LOG_ENABLED)
private[spark] def eventLogDir: Option[URI] = _eventLogDir
private[spark] def eventLogCodec: Option[String] = _eventLogCodec
def isLocal: Boolean = Utils.isLocalMaster(_conf)
/**
* @return true if context is stopped or in the midst of stopping.
*/
def isStopped: Boolean = stopped.get()
private[spark] def statusStore: AppStatusStore = _statusStore
// An asynchronous listener bus for Spark events
private[spark] def listenerBus: LiveListenerBus = _listenerBus
// This function allows components created by SparkEnv to be mocked in unit tests:
private[spark] def createSparkEnv(
conf: SparkConf,
isLocal: Boolean,
listenerBus: LiveListenerBus): SparkEnv = {
SparkEnv.createDriverEnv(conf, isLocal, listenerBus, SparkContext.numDriverCores(master, conf))
}
private[spark] def env: SparkEnv = _env
// Used to store a URL for each static file/jar together with the file's local timestamp
private[spark] val addedFiles = new ConcurrentHashMap[String, Long]().asScala
private[spark] val addedJars = new ConcurrentHashMap[String, Long]().asScala
// Keeps track of all persisted RDDs
private[spark] val persistentRdds = {
val map: ConcurrentMap[Int, RDD[_]] = new MapMaker().weakValues().makeMap[Int, RDD[_]]()
map.asScala
}
def statusTracker: SparkStatusTracker = _statusTracker
private[spark] def progressBar: Option[ConsoleProgressBar] = _progressBar
private[spark] def ui: Option[SparkUI] = _ui
def uiWebUrl: Option[String] = _ui.map(_.webUrl)
/**
* A default Hadoop Configuration for the Hadoop code (e.g. file systems) that we reuse.
*
* @note As it will be reused in all Hadoop RDDs, it's better not to modify it unless you
* plan to set some global configurations for all Hadoop RDDs.
*/
def hadoopConfiguration: Configuration = _hadoopConfiguration
private[spark] def executorMemory: Int = _executorMemory
// Environment variables to pass to our executors.
private[spark] val executorEnvs = HashMap[String, String]()
// Set SPARK_USER for user who is running SparkContext.
val sparkUser = Utils.getCurrentUserName()
private[spark] def schedulerBackend: SchedulerBackend = _schedulerBackend
private[spark] def taskScheduler: TaskScheduler = _taskScheduler
private[spark] def taskScheduler_=(ts: TaskScheduler): Unit = {
_taskScheduler = ts
}
private[spark] def dagScheduler: DAGScheduler = _dagScheduler
private[spark] def dagScheduler_=(ds: DAGScheduler): Unit = {
_dagScheduler = ds
}
private[spark] def shuffleDriverComponents: ShuffleDriverComponents = _shuffleDriverComponents
/**
* A unique identifier for the Spark application.
* Its format depends on the scheduler implementation.
* (i.e.
* in case of local spark app something like 'local-1433865536131'
* in case of YARN something like 'application_1433865536131_34483'
* in case of MESOS something like 'driver-20170926223339-0001'
* )
*/
def applicationId: String = _applicationId
def applicationAttemptId: Option[String] = _applicationAttemptId
private[spark] def eventLogger: Option[EventLoggingListener] = _eventLogger
private[spark] def executorAllocationManager: Option[ExecutorAllocationManager] =
_executorAllocationManager
private[spark] def resourceProfileManager: ResourceProfileManager = _resourceProfileManager
private[spark] def cleaner: Option[ContextCleaner] = _cleaner
private[spark] var checkpointDir: Option[String] = None
// Thread Local variable that can be used by users to pass information down the stack
protected[spark] val localProperties = new InheritableThreadLocal[Properties] {
override protected def childValue(parent: Properties): Properties = {
// Note: make a clone such that changes in the parent properties aren't reflected in
// the those of the children threads, which has confusing semantics (SPARK-10563).
Utils.cloneProperties(parent)
}
override protected def initialValue(): Properties = new Properties()
}
/* ------------------------------------------------------------------------------------- *
| Initialization. This code initializes the context in a manner that is exception-safe. |
| All internal fields holding state are initialized here, and any error prompts the |
| stop() method to be called. |
* ------------------------------------------------------------------------------------- */
private def warnSparkMem(value: String): String = {
logWarning("Using SPARK_MEM to set amount of memory to use per executor process is " +
"deprecated, please use spark.executor.memory instead.")
value
}
/** Control our logLevel. This overrides any user-defined log settings.
* @param logLevel The desired log level as a string.
* Valid log levels include: ALL, DEBUG, ERROR, FATAL, INFO, OFF, TRACE, WARN
*/
def setLogLevel(logLevel: String): Unit = {
// let's allow lowercase or mixed case too
val upperCased = logLevel.toUpperCase(Locale.ROOT)
require(SparkContext.VALID_LOG_LEVELS.contains(upperCased),
s"Supplied level $logLevel did not match one of:" +
s" ${SparkContext.VALID_LOG_LEVELS.mkString(",")}")
Utils.setLogLevel(org.apache.log4j.Level.toLevel(upperCased))
}
try {
_conf = config.clone()
_conf.validateSettings()
if (!_conf.contains("spark.master")) {
throw new SparkException("A master URL must be set in your configuration")
}
if (!_conf.contains("spark.app.name")) {
throw new SparkException("An application name must be set in your configuration")
}
_driverLogger = DriverLogger(_conf)
val resourcesFileOpt = conf.get(DRIVER_RESOURCES_FILE)
_resources = getOrDiscoverAllResources(_conf, SPARK_DRIVER_PREFIX, resourcesFileOpt)
logResourceInfo(SPARK_DRIVER_PREFIX, _resources)
// log out spark.app.name in the Spark driver logs
logInfo(s"Submitted application: $appName")
// System property spark.yarn.app.id must be set if user code ran by AM on a YARN cluster
if (master == "yarn" && deployMode == "cluster" && !_conf.contains("spark.yarn.app.id")) {
throw new SparkException("Detected yarn cluster mode, but isn't running on a cluster. " +
"Deployment to YARN is not supported directly by SparkContext. Please use spark-submit.")
}
if (_conf.getBoolean("spark.logConf", false)) {
logInfo("Spark configuration:\\n" + _conf.toDebugString)
}
// Set Spark driver host and port system properties. This explicitly sets the configuration
// instead of relying on the default value of the config constant.
_conf.set(DRIVER_HOST_ADDRESS, _conf.get(DRIVER_HOST_ADDRESS))
_conf.setIfMissing(DRIVER_PORT, 0)
_conf.set(EXECUTOR_ID, SparkContext.DRIVER_IDENTIFIER)
_jars = Utils.getUserJars(_conf)
_files = _conf.getOption(FILES.key).map(_.split(",")).map(_.filter(_.nonEmpty))
.toSeq.flatten
_eventLogDir =
if (isEventLogEnabled) {
val unresolvedDir = conf.get(EVENT_LOG_DIR).stripSuffix("/")
Some(Utils.resolveURI(unresolvedDir))
} else {
None
}
_eventLogCodec = {
val compress = _conf.get(EVENT_LOG_COMPRESS)
if (compress && isEventLogEnabled) {
Some(_conf.get(EVENT_LOG_COMPRESSION_CODEC)).map(CompressionCodec.getShortName)
} else {
None
}
}
_listenerBus = new LiveListenerBus(_conf)
_resourceProfileManager = new ResourceProfileManager(_conf)
// Initialize the app status store and listener before SparkEnv is created so that it gets
// all events.
val appStatusSource = AppStatusSource.createSource(conf)
_statusStore = AppStatusStore.createLiveStore(conf, appStatusSource)
listenerBus.addToStatusQueue(_statusStore.listener.get)
// Create the Spark execution environment (cache, map output tracker, etc)
_env = createSparkEnv(_conf, isLocal, listenerBus)
SparkEnv.set(_env)
// If running the REPL, register the repl's output dir with the file server.
_conf.getOption("spark.repl.class.outputDir").foreach { path =>
val replUri = _env.rpcEnv.fileServer.addDirectory("/classes", new File(path))
_conf.set("spark.repl.class.uri", replUri)
}
_statusTracker = new SparkStatusTracker(this, _statusStore)
_progressBar =
if (_conf.get(UI_SHOW_CONSOLE_PROGRESS)) {
Some(new ConsoleProgressBar(this))
} else {
None
}
_ui =
if (conf.get(UI_ENABLED)) {
Some(SparkUI.create(Some(this), _statusStore, _conf, _env.securityManager, appName, "",
startTime))
} else {
// For tests, do not enable the UI
None
}
// Bind the UI before starting the task scheduler to communicate
// the bound port to the cluster manager properly
_ui.foreach(_.bind())
_hadoopConfiguration = SparkHadoopUtil.get.newConfiguration(_conf)
// Performance optimization: this dummy call to .size() triggers eager evaluation of
// Configuration's internal `properties` field, guaranteeing that it will be computed and
// cached before SessionState.newHadoopConf() uses `sc.hadoopConfiguration` to create
// a new per-session Configuration. If `properties` has not been computed by that time
// then each newly-created Configuration will perform its own expensive IO and XML
// parsing to load configuration defaults and populate its own properties. By ensuring
// that we've pre-computed the parent's properties, the child Configuration will simply
// clone the parent's properties.
_hadoopConfiguration.size()
// Add each JAR given through the constructor
if (jars != null) {
jars.foreach(addJar)
}
if (files != null) {
files.foreach(addFile)
}
_executorMemory = _conf.getOption(EXECUTOR_MEMORY.key)
.orElse(Option(System.getenv("SPARK_EXECUTOR_MEMORY")))
.orElse(Option(System.getenv("SPARK_MEM"))
.map(warnSparkMem))
.map(Utils.memoryStringToMb)
.getOrElse(1024)
// Convert java options to env vars as a work around
// since we can't set env vars directly in sbt.
for { (envKey, propKey) <- Seq(("SPARK_TESTING", IS_TESTING.key))
value <- Option(System.getenv(envKey)).orElse(Option(System.getProperty(propKey)))} {
executorEnvs(envKey) = value
}
Option(System.getenv("SPARK_PREPEND_CLASSES")).foreach { v =>
executorEnvs("SPARK_PREPEND_CLASSES") = v
}
// The Mesos scheduler backend relies on this environment variable to set executor memory.
// TODO: Set this only in the Mesos scheduler.
executorEnvs("SPARK_EXECUTOR_MEMORY") = executorMemory + "m"
executorEnvs ++= _conf.getExecutorEnv
executorEnvs("SPARK_USER") = sparkUser
_shuffleDriverComponents = ShuffleDataIOUtils.loadShuffleDataIO(config).driver()
_shuffleDriverComponents.initializeApplication().asScala.foreach { case (k, v) =>
_conf.set(ShuffleDataIOUtils.SHUFFLE_SPARK_CONF_PREFIX + k, v)
}
// We need to register "HeartbeatReceiver" before "createTaskScheduler" because Executor will
// retrieve "HeartbeatReceiver" in the constructor. (SPARK-6640)
_heartbeatReceiver = env.rpcEnv.setupEndpoint(
HeartbeatReceiver.ENDPOINT_NAME, new HeartbeatReceiver(this))
// Initialize any plugins before the task scheduler is initialized.
_plugins = PluginContainer(this, _resources.asJava)
// Create and start the scheduler
val (sched, ts) = SparkContext.createTaskScheduler(this, master, deployMode)
_schedulerBackend = sched
_taskScheduler = ts
_dagScheduler = new DAGScheduler(this)
_heartbeatReceiver.ask[Boolean](TaskSchedulerIsSet)
val _executorMetricsSource =
if (_conf.get(METRICS_EXECUTORMETRICS_SOURCE_ENABLED)) {
Some(new ExecutorMetricsSource)
} else {
None
}
// create and start the heartbeater for collecting memory metrics
_heartbeater = new Heartbeater(
() => SparkContext.this.reportHeartBeat(_executorMetricsSource),
"driver-heartbeater",
conf.get(EXECUTOR_HEARTBEAT_INTERVAL))
_heartbeater.start()
// start TaskScheduler after taskScheduler sets DAGScheduler reference in DAGScheduler's
// constructor
_taskScheduler.start()
_applicationId = _taskScheduler.applicationId()
_applicationAttemptId = _taskScheduler.applicationAttemptId()
_conf.set("spark.app.id", _applicationId)
if (_conf.get(UI_REVERSE_PROXY)) {
System.setProperty("spark.ui.proxyBase", "/proxy/" + _applicationId)
}
_ui.foreach(_.setAppId(_applicationId))
_env.blockManager.initialize(_applicationId)
// The metrics system for Driver need to be set spark.app.id to app ID.
// So it should start after we get app ID from the task scheduler and set spark.app.id.
_env.metricsSystem.start(_conf.get(METRICS_STATIC_SOURCES_ENABLED))
// Attach the driver metrics servlet handler to the web ui after the metrics system is started.
_env.metricsSystem.getServletHandlers.foreach(handler => ui.foreach(_.attachHandler(handler)))
_eventLogger =
if (isEventLogEnabled) {
val logger =
new EventLoggingListener(_applicationId, _applicationAttemptId, _eventLogDir.get,
_conf, _hadoopConfiguration)
logger.start()
listenerBus.addToEventLogQueue(logger)
Some(logger)
} else {
None
}
_cleaner =
if (_conf.get(CLEANER_REFERENCE_TRACKING)) {
Some(new ContextCleaner(this, _shuffleDriverComponents))
} else {
None
}
_cleaner.foreach(_.start())
val dynamicAllocationEnabled = Utils.isDynamicAllocationEnabled(_conf)
_executorAllocationManager =
if (dynamicAllocationEnabled) {
schedulerBackend match {
case b: ExecutorAllocationClient =>
Some(new ExecutorAllocationManager(
schedulerBackend.asInstanceOf[ExecutorAllocationClient], listenerBus, _conf,
cleaner = cleaner, resourceProfileManager = resourceProfileManager))
case _ =>
None
}
} else {
None
}
_executorAllocationManager.foreach(_.start())
setupAndStartListenerBus()
postEnvironmentUpdate()
postApplicationStart()
// Post init
_taskScheduler.postStartHook()
_env.metricsSystem.registerSource(_dagScheduler.metricsSource)
_env.metricsSystem.registerSource(new BlockManagerSource(_env.blockManager))
_env.metricsSystem.registerSource(new JVMCPUSource())
_executorMetricsSource.foreach(_.register(_env.metricsSystem))
_executorAllocationManager.foreach { e =>
_env.metricsSystem.registerSource(e.executorAllocationManagerSource)
}
appStatusSource.foreach(_env.metricsSystem.registerSource(_))
_plugins.foreach(_.registerMetrics(applicationId))
// Make sure the context is stopped if the user forgets about it. This avoids leaving
// unfinished event logs around after the JVM exits cleanly. It doesn't help if the JVM
// is killed, though.
logDebug("Adding shutdown hook") // force eager creation of logger
_shutdownHookRef = ShutdownHookManager.addShutdownHook(
ShutdownHookManager.SPARK_CONTEXT_SHUTDOWN_PRIORITY) { () =>
logInfo("Invoking stop() from shutdown hook")
try {
stop()
} catch {
case e: Throwable =>
logWarning("Ignoring Exception while stopping SparkContext from shutdown hook", e)
}
}
} catch {
case NonFatal(e) =>
logError("Error initializing SparkContext.", e)
try {
stop()
} catch {
case NonFatal(inner) =>
logError("Error stopping SparkContext after init error.", inner)
} finally {
throw e
}
}
/**
* Called by the web UI to obtain executor thread dumps. This method may be expensive.
* Logs an error and returns None if we failed to obtain a thread dump, which could occur due
* to an executor being dead or unresponsive or due to network issues while sending the thread
* dump message back to the driver.
*/
private[spark] def getExecutorThreadDump(executorId: String): Option[Array[ThreadStackTrace]] = {
try {
if (executorId == SparkContext.DRIVER_IDENTIFIER) {
Some(Utils.getThreadDump())
} else {
val endpointRef = env.blockManager.master.getExecutorEndpointRef(executorId).get
Some(endpointRef.askSync[Array[ThreadStackTrace]](TriggerThreadDump))
}
} catch {
case e: Exception =>
logError(s"Exception getting thread dump from executor $executorId", e)
None
}
}
private[spark] def getLocalProperties: Properties = localProperties.get()
private[spark] def setLocalProperties(props: Properties): Unit = {
localProperties.set(props)
}
/**
* Set a local property that affects jobs submitted from this thread, such as the Spark fair
* scheduler pool. User-defined properties may also be set here. These properties are propagated
* through to worker tasks and can be accessed there via
* [[org.apache.spark.TaskContext#getLocalProperty]].
*
* These properties are inherited by child threads spawned from this thread. This
* may have unexpected consequences when working with thread pools. The standard java
* implementation of thread pools have worker threads spawn other worker threads.
* As a result, local properties may propagate unpredictably.
*/
def setLocalProperty(key: String, value: String): Unit = {
if (value == null) {
localProperties.get.remove(key)
} else {
localProperties.get.setProperty(key, value)
}
}
/**
* Get a local property set in this thread, or null if it is missing. See
* `org.apache.spark.SparkContext.setLocalProperty`.
*/
def getLocalProperty(key: String): String =
Option(localProperties.get).map(_.getProperty(key)).orNull
/** Set a human readable description of the current job. */
def setJobDescription(value: String): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, value)
}
/**
* Assigns a group ID to all the jobs started by this thread until the group ID is set to a
* different value or cleared.
*
* Often, a unit of execution in an application consists of multiple Spark actions or jobs.
* Application programmers can use this method to group all those jobs together and give a
* group description. Once set, the Spark web UI will associate such jobs with this group.
*
* The application can also use `org.apache.spark.SparkContext.cancelJobGroup` to cancel all
* running jobs in this group. For example,
* {{{
* // In the main thread:
* sc.setJobGroup("some_job_to_cancel", "some job description")
* sc.parallelize(1 to 10000, 2).map { i => Thread.sleep(10); i }.count()
*
* // In a separate thread:
* sc.cancelJobGroup("some_job_to_cancel")
* }}}
*
* @param interruptOnCancel If true, then job cancellation will result in `Thread.interrupt()`
* being called on the job's executor threads. This is useful to help ensure that the tasks
* are actually stopped in a timely manner, but is off by default due to HDFS-1208, where HDFS
* may respond to Thread.interrupt() by marking nodes as dead.
*/
def setJobGroup(groupId: String,
description: String, interruptOnCancel: Boolean = false): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, description)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, groupId)
// Note: Specifying interruptOnCancel in setJobGroup (rather than cancelJobGroup) avoids
// changing several public APIs and allows Spark cancellations outside of the cancelJobGroup
// APIs to also take advantage of this property (e.g., internal job failures or canceling from
// JobProgressTab UI) on a per-job basis.
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, interruptOnCancel.toString)
}
/** Clear the current thread's job group ID and its description. */
def clearJobGroup(): Unit = {
setLocalProperty(SparkContext.SPARK_JOB_DESCRIPTION, null)
setLocalProperty(SparkContext.SPARK_JOB_GROUP_ID, null)
setLocalProperty(SparkContext.SPARK_JOB_INTERRUPT_ON_CANCEL, null)
}
/**
* Execute a block of code in a scope such that all new RDDs created in this body will
* be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}.
*
* @note Return statements are NOT allowed in the given body.
*/
private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](this)(body)
// Methods for creating RDDs
/** Distribute a local Scala collection to form an RDD.
*
* @note Parallelize acts lazily. If `seq` is a mutable collection and is altered after the call
* to parallelize and before the first action on the RDD, the resultant RDD will reflect the
* modified collection. Pass a copy of the argument to avoid this.
* @note avoid using `parallelize(Seq())` to create an empty `RDD`. Consider `emptyRDD` for an
* RDD with no partitions, or `parallelize(Seq[T]())` for an RDD of `T` with empty partitions.
* @param seq Scala collection to distribute
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed collection
*/
def parallelize[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
assertNotStopped()
new ParallelCollectionRDD[T](this, seq, numSlices, Map[Int, Seq[String]]())
}
/**
* Creates a new RDD[Long] containing elements from `start` to `end`(exclusive), increased by
* `step` every element.
*
* @note if we need to cache this RDD, we should make sure each partition does not exceed limit.
*
* @param start the start value.
* @param end the end value.
* @param step the incremental step
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed range
*/
def range(
start: Long,
end: Long,
step: Long = 1,
numSlices: Int = defaultParallelism): RDD[Long] = withScope {
assertNotStopped()
// when step is 0, range will run infinitely
require(step != 0, "step cannot be 0")
val numElements: BigInt = {
val safeStart = BigInt(start)
val safeEnd = BigInt(end)
if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) {
(safeEnd - safeStart) / step
} else {
// the remainder has the same sign with range, could add 1 more
(safeEnd - safeStart) / step + 1
}
}
parallelize(0 until numSlices, numSlices).mapPartitionsWithIndex { (i, _) =>
val partitionStart = (i * numElements) / numSlices * step + start
val partitionEnd = (((i + 1) * numElements) / numSlices) * step + start
def getSafeMargin(bi: BigInt): Long =
if (bi.isValidLong) {
bi.toLong
} else if (bi > 0) {
Long.MaxValue
} else {
Long.MinValue
}
val safePartitionStart = getSafeMargin(partitionStart)
val safePartitionEnd = getSafeMargin(partitionEnd)
new Iterator[Long] {
private[this] var number: Long = safePartitionStart
private[this] var overflow: Boolean = false
override def hasNext =
if (!overflow) {
if (step > 0) {
number < safePartitionEnd
} else {
number > safePartitionEnd
}
} else false
override def next() = {
val ret = number
number += step
if (number < ret ^ step < 0) {
// we have Long.MaxValue + Long.MaxValue < Long.MaxValue
// and Long.MinValue + Long.MinValue > Long.MinValue, so iff the step causes a step
// back, we are pretty sure that we have an overflow.
overflow = true
}
ret
}
}
}
}
/** Distribute a local Scala collection to form an RDD.
*
* This method is identical to `parallelize`.
* @param seq Scala collection to distribute
* @param numSlices number of partitions to divide the collection into
* @return RDD representing distributed collection
*/
def makeRDD[T: ClassTag](
seq: Seq[T],
numSlices: Int = defaultParallelism): RDD[T] = withScope {
parallelize(seq, numSlices)
}
/**
* Distribute a local Scala collection to form an RDD, with one or more
* location preferences (hostnames of Spark nodes) for each object.
* Create a new partition for each collection item.
* @param seq list of tuples of data and location preferences (hostnames of Spark nodes)
* @return RDD representing data partitioned according to location preferences
*/
def makeRDD[T: ClassTag](seq: Seq[(T, Seq[String])]): RDD[T] = withScope {
assertNotStopped()
val indexToPrefs = seq.zipWithIndex.map(t => (t._2, t._1._2)).toMap
new ParallelCollectionRDD[T](this, seq.map(_._1), math.max(seq.size, 1), indexToPrefs)
}
/**
* Read a text file from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI, and return it as an RDD of Strings.
* The text files must be encoded as UTF-8.
*
* @param path path to the text file on a supported file system
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of lines of the text file
*/
def textFile(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[String] = withScope {
assertNotStopped()
hadoopFile(path, classOf[TextInputFormat], classOf[LongWritable], classOf[Text],
minPartitions).map(pair => pair._2.toString).setName(path)
}
/**
* Read a directory of text files from HDFS, a local file system (available on all nodes), or any
* Hadoop-supported file system URI. Each file is read as a single record and returned in a
* key-value pair, where the key is the path of each file, the value is the content of each file.
* The text files must be encoded as UTF-8.
*
* <p> For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do `val rdd = sparkContext.wholeTextFile("hdfs://a-hdfs-path")`,
*
* <p> then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred, large file is also allowable, but may cause bad performance.
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
* @note Partitioning is determined by data locality. This may result in too few partitions
* by default.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* @return RDD representing tuples of file path and the corresponding file content
*/
def wholeTextFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, String)] = withScope {
assertNotStopped()
val job = NewHadoopJob.getInstance(hadoopConfiguration)
// Use setInputPaths so that wholeTextFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new WholeTextFileRDD(
this,
classOf[WholeTextFileInputFormat],
classOf[Text],
classOf[Text],
updateConf,
minPartitions).map(record => (record._1.toString, record._2.toString)).setName(path)
}
/**
* Get an RDD for a Hadoop-readable dataset as PortableDataStream for each file
* (useful for binary data)
*
* For example, if you have the following files:
* {{{
* hdfs://a-hdfs-path/part-00000
* hdfs://a-hdfs-path/part-00001
* ...
* hdfs://a-hdfs-path/part-nnnnn
* }}}
*
* Do
* `val rdd = sparkContext.binaryFiles("hdfs://a-hdfs-path")`,
*
* then `rdd` contains
* {{{
* (a-hdfs-path/part-00000, its content)
* (a-hdfs-path/part-00001, its content)
* ...
* (a-hdfs-path/part-nnnnn, its content)
* }}}
*
* @note Small files are preferred; very large files may cause bad performance.
* @note On some filesystems, `.../path/*` can be a more efficient way to read all files
* in a directory rather than `.../path/` or `.../path`
* @note Partitioning is determined by data locality. This may result in too few partitions
* by default.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param minPartitions A suggestion value of the minimal splitting number for input data.
* @return RDD representing tuples of file path and corresponding file content
*/
def binaryFiles(
path: String,
minPartitions: Int = defaultMinPartitions): RDD[(String, PortableDataStream)] = withScope {
assertNotStopped()
val job = NewHadoopJob.getInstance(hadoopConfiguration)
// Use setInputPaths so that binaryFiles aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updateConf = job.getConfiguration
new BinaryFileRDD(
this,
classOf[StreamInputFormat],
classOf[String],
classOf[PortableDataStream],
updateConf,
minPartitions).setName(path)
}
/**
* Load data from a flat binary file, assuming the length of each record is constant.
*
* @note We ensure that the byte array for each record in the resulting RDD
* has the provided record length.
*
* @param path Directory to the input data files, the path can be comma separated paths as the
* list of inputs.
* @param recordLength The length at which to split the records
* @param conf Configuration for setting up the dataset.
*
* @return An RDD of data with values, represented as byte arrays
*/
def binaryRecords(
path: String,
recordLength: Int,
conf: Configuration = hadoopConfiguration): RDD[Array[Byte]] = withScope {
assertNotStopped()
conf.setInt(FixedLengthBinaryInputFormat.RECORD_LENGTH_PROPERTY, recordLength)
val br = newAPIHadoopFile[LongWritable, BytesWritable, FixedLengthBinaryInputFormat](path,
classOf[FixedLengthBinaryInputFormat],
classOf[LongWritable],
classOf[BytesWritable],
conf = conf)
br.map { case (k, v) =>
val bytes = v.copyBytes()
assert(bytes.length == recordLength, "Byte array does not have correct length")
bytes
}
}
/**
* Get an RDD for a Hadoop-readable dataset from a Hadoop JobConf given its InputFormat and other
* necessary info (e.g. file name for a filesystem-based dataset, table name for HyperTable),
* using the older MapReduce API (`org.apache.hadoop.mapred`).
*
* @param conf JobConf for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param inputFormatClass storage format of the data to be read
* @param keyClass `Class` of the key associated with the `inputFormatClass` parameter
* @param valueClass `Class` of the value associated with the `inputFormatClass` parameter
* @param minPartitions Minimum number of Hadoop Splits to generate.
* @return RDD of tuples of key and corresponding value
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def hadoopRDD[K, V](
conf: JobConf,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(conf)
// Add necessary security credentials to the JobConf before broadcasting it.
SparkHadoopUtil.get.addCredentials(conf)
new HadoopRDD(this, conf, inputFormatClass, keyClass, valueClass, minPartitions)
}
/** Get an RDD for a Hadoop file with an arbitrary InputFormat
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param inputFormatClass storage format of the data to be read
* @param keyClass `Class` of the key associated with the `inputFormatClass` parameter
* @param valueClass `Class` of the value associated with the `inputFormatClass` parameter
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V](
path: String,
inputFormatClass: Class[_ <: InputFormat[K, V]],
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int = defaultMinPartitions): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(hadoopConfiguration)
// A Hadoop configuration can be about 10 KiB, which is pretty big, so broadcast it.
val confBroadcast = broadcast(new SerializableConfiguration(hadoopConfiguration))
val setInputPathsFunc = (jobConf: JobConf) => FileInputFormat.setInputPaths(jobConf, path)
new HadoopRDD(
this,
confBroadcast,
Some(setInputPathsFunc),
inputFormatClass,
keyClass,
valueClass,
minPartitions).setName(path)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path, minPartitions)
* }}}
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V, F <: InputFormat[K, V]]
(path: String, minPartitions: Int)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile(path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]],
minPartitions)
}
/**
* Smarter version of hadoopFile() that uses class tags to figure out the classes of keys,
* values and the InputFormat so that users don't need to pass them directly. Instead, callers
* can just write, for example,
* {{{
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* }}}
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths as
* a list of inputs
* @return RDD of tuples of key and corresponding value
*/
def hadoopFile[K, V, F <: InputFormat[K, V]](path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
hadoopFile[K, V, F](path, defaultMinPartitions)
}
/**
* Smarter version of `newApiHadoopFile` that uses class tags to figure out the classes of keys,
* values and the `org.apache.hadoop.mapreduce.InputFormat` (new MapReduce API) so that user
* don't need to pass them directly. Instead, callers can just write, for example:
* ```
* val file = sparkContext.hadoopFile[LongWritable, Text, TextInputFormat](path)
* ```
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @return RDD of tuples of key and corresponding value
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]]
(path: String)
(implicit km: ClassTag[K], vm: ClassTag[V], fm: ClassTag[F]): RDD[(K, V)] = withScope {
newAPIHadoopFile(
path,
fm.runtimeClass.asInstanceOf[Class[F]],
km.runtimeClass.asInstanceOf[Class[K]],
vm.runtimeClass.asInstanceOf[Class[V]])
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param fClass storage format of the data to be read
* @param kClass `Class` of the key associated with the `fClass` parameter
* @param vClass `Class` of the value associated with the `fClass` parameter
* @param conf Hadoop configuration
* @return RDD of tuples of key and corresponding value
*/
def newAPIHadoopFile[K, V, F <: NewInputFormat[K, V]](
path: String,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V],
conf: Configuration = hadoopConfiguration): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(hadoopConfiguration)
// The call to NewHadoopJob automatically adds security credentials to conf,
// so we don't need to explicitly add them ourselves
val job = NewHadoopJob.getInstance(conf)
// Use setInputPaths so that newAPIHadoopFile aligns with hadoopFile/textFile in taking
// comma separated files as input. (see SPARK-7155)
NewFileInputFormat.setInputPaths(job, path)
val updatedConf = job.getConfiguration
new NewHadoopRDD(this, fClass, kClass, vClass, updatedConf).setName(path)
}
/**
* Get an RDD for a given Hadoop file with an arbitrary new API InputFormat
* and extra configuration options to pass to the input format.
*
* @param conf Configuration for setting up the dataset. Note: This will be put into a Broadcast.
* Therefore if you plan to reuse this conf to create multiple RDDs, you need to make
* sure you won't modify the conf. A safe approach is always creating a new conf for
* a new RDD.
* @param fClass storage format of the data to be read
* @param kClass `Class` of the key associated with the `fClass` parameter
* @param vClass `Class` of the value associated with the `fClass` parameter
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
*/
def newAPIHadoopRDD[K, V, F <: NewInputFormat[K, V]](
conf: Configuration = hadoopConfiguration,
fClass: Class[F],
kClass: Class[K],
vClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
// This is a hack to enforce loading hdfs-site.xml.
// See SPARK-11227 for details.
FileSystem.getLocal(conf)
// Add necessary security credentials to the JobConf. Required to access secure HDFS.
val jconf = new JobConf(conf)
SparkHadoopUtil.get.addCredentials(jconf)
new NewHadoopRDD(this, fClass, kClass, vClass, jconf)
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param keyClass `Class` of the key associated with `SequenceFileInputFormat`
* @param valueClass `Class` of the value associated with `SequenceFileInputFormat`
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V](path: String,
keyClass: Class[K],
valueClass: Class[V],
minPartitions: Int
): RDD[(K, V)] = withScope {
assertNotStopped()
val inputFormatClass = classOf[SequenceFileInputFormat[K, V]]
hadoopFile(path, inputFormatClass, keyClass, valueClass, minPartitions)
}
/**
* Get an RDD for a Hadoop SequenceFile with given key and value types.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param keyClass `Class` of the key associated with `SequenceFileInputFormat`
* @param valueClass `Class` of the value associated with `SequenceFileInputFormat`
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V](
path: String,
keyClass: Class[K],
valueClass: Class[V]): RDD[(K, V)] = withScope {
assertNotStopped()
sequenceFile(path, keyClass, valueClass, defaultMinPartitions)
}
/**
* Version of sequenceFile() for types implicitly convertible to Writables through a
* WritableConverter. For example, to access a SequenceFile where the keys are Text and the
* values are IntWritable, you could simply write
* {{{
* sparkContext.sequenceFile[String, Int](path, ...)
* }}}
*
* WritableConverters are provided in a somewhat strange way (by an implicit function) to support
* both subclasses of Writable and types for which we define a converter (e.g. Int to
* IntWritable). The most natural thing would've been to have implicit objects for the
* converters, but then we couldn't have an object for every subclass of Writable (you can't
* have a parameterized singleton object). We use functions instead to create a new converter
* for the appropriate type. In addition, we pass the converter a ClassTag of its type to
* allow it to figure out the Writable class to use in the subclass case.
*
* @note Because Hadoop's RecordReader class re-uses the same Writable object for each
* record, directly caching the returned RDD or directly passing it to an aggregation or shuffle
* operation will create many references to the same object.
* If you plan to directly cache, sort, or aggregate Hadoop writable objects, you should first
* copy them using a `map` function.
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD of tuples of key and corresponding value
*/
def sequenceFile[K, V]
(path: String, minPartitions: Int = defaultMinPartitions)
(implicit km: ClassTag[K], vm: ClassTag[V],
kcf: () => WritableConverter[K], vcf: () => WritableConverter[V]): RDD[(K, V)] = {
withScope {
assertNotStopped()
val kc = clean(kcf)()
val vc = clean(vcf)()
val format = classOf[SequenceFileInputFormat[Writable, Writable]]
val writables = hadoopFile(path, format,
kc.writableClass(km).asInstanceOf[Class[Writable]],
vc.writableClass(vm).asInstanceOf[Class[Writable]], minPartitions)
writables.map { case (k, v) => (kc.convert(k), vc.convert(v)) }
}
}
/**
* Load an RDD saved as a SequenceFile containing serialized objects, with NullWritable keys and
* BytesWritable values that contain a serialized partition. This is still an experimental
* storage format and may not be supported exactly as is in future Spark releases. It will also
* be pretty slow if you use the default serializer (Java serialization),
* though the nice thing about it is that there's very little effort required to save arbitrary
* objects.
*
* @param path directory to the input data files, the path can be comma separated paths
* as a list of inputs
* @param minPartitions suggested minimum number of partitions for the resulting RDD
* @return RDD representing deserialized data from the file(s)
*/
def objectFile[T: ClassTag](
path: String,
minPartitions: Int = defaultMinPartitions): RDD[T] = withScope {
assertNotStopped()
sequenceFile(path, classOf[NullWritable], classOf[BytesWritable], minPartitions)
.flatMap(x => Utils.deserialize[Array[T]](x._2.getBytes, Utils.getContextOrSparkClassLoader))
}
protected[spark] def checkpointFile[T: ClassTag](path: String): RDD[T] = withScope {
new ReliableCheckpointRDD[T](this, path)
}
/** Build the union of a list of RDDs. */
def union[T: ClassTag](rdds: Seq[RDD[T]]): RDD[T] = withScope {
val nonEmptyRdds = rdds.filter(!_.partitions.isEmpty)
val partitioners = nonEmptyRdds.flatMap(_.partitioner).toSet
if (nonEmptyRdds.forall(_.partitioner.isDefined) && partitioners.size == 1) {
new PartitionerAwareUnionRDD(this, nonEmptyRdds)
} else {
new UnionRDD(this, nonEmptyRdds)
}
}
/** Build the union of a list of RDDs passed as variable-length arguments. */
def union[T: ClassTag](first: RDD[T], rest: RDD[T]*): RDD[T] = withScope {
union(Seq(first) ++ rest)
}
/** Get an RDD that has no partitions or elements. */
def emptyRDD[T: ClassTag]: RDD[T] = new EmptyRDD[T](this)
// Methods for creating shared variables
/**
* Register the given accumulator.
*
* @note Accumulators must be registered before use, or it will throw exception.
*/
def register(acc: AccumulatorV2[_, _]): Unit = {
acc.register(this)
}
/**
* Register the given accumulator with given name.
*
* @note Accumulators must be registered before use, or it will throw exception.
*/
def register(acc: AccumulatorV2[_, _], name: String): Unit = {
acc.register(this, name = Option(name))
}
/**
* Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def longAccumulator: LongAccumulator = {
val acc = new LongAccumulator
register(acc)
acc
}
/**
* Create and register a long accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def longAccumulator(name: String): LongAccumulator = {
val acc = new LongAccumulator
register(acc, name)
acc
}
/**
* Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def doubleAccumulator: DoubleAccumulator = {
val acc = new DoubleAccumulator
register(acc)
acc
}
/**
* Create and register a double accumulator, which starts with 0 and accumulates inputs by `add`.
*/
def doubleAccumulator(name: String): DoubleAccumulator = {
val acc = new DoubleAccumulator
register(acc, name)
acc
}
/**
* Create and register a `CollectionAccumulator`, which starts with empty list and accumulates
* inputs by adding them into the list.
*/
def collectionAccumulator[T]: CollectionAccumulator[T] = {
val acc = new CollectionAccumulator[T]
register(acc)
acc
}
/**
* Create and register a `CollectionAccumulator`, which starts with empty list and accumulates
* inputs by adding them into the list.
*/
def collectionAccumulator[T](name: String): CollectionAccumulator[T] = {
val acc = new CollectionAccumulator[T]
register(acc, name)
acc
}
/**
* Broadcast a read-only variable to the cluster, returning a
* [[org.apache.spark.broadcast.Broadcast]] object for reading it in distributed functions.
* The variable will be sent to each cluster only once.
*
* @param value value to broadcast to the Spark nodes
* @return `Broadcast` object, a read-only variable cached on each machine
*/
def broadcast[T: ClassTag](value: T): Broadcast[T] = {
assertNotStopped()
require(!classOf[RDD[_]].isAssignableFrom(classTag[T].runtimeClass),
"Can not directly broadcast RDDs; instead, call collect() and broadcast the result.")
val bc = env.broadcastManager.newBroadcast[T](value, isLocal)
val callSite = getCallSite
logInfo("Created broadcast " + bc.id + " from " + callSite.shortForm)
cleaner.foreach(_.registerBroadcastForCleanup(bc))
bc
}
/**
* Add a file to be downloaded with this Spark job on every node.
*
* If a file is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addFile(path: String): Unit = {
addFile(path, false)
}
/**
* Returns a list of file paths that are added to resources.
*/
def listFiles(): Seq[String] = addedFiles.keySet.toSeq
/**
* Add a file to be downloaded with this Spark job on every node.
*
* If a file is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported
* filesystems), or an HTTP, HTTPS or FTP URI. To access the file in Spark jobs,
* use `SparkFiles.get(fileName)` to find its download location.
* @param recursive if true, a directory can be given in `path`. Currently directories are
* only supported for Hadoop-supported filesystems.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addFile(path: String, recursive: Boolean): Unit = {
val uri = new Path(path).toUri
val schemeCorrectedURI = uri.getScheme match {
case null => new File(path).getCanonicalFile.toURI
case "local" =>
logWarning("File with 'local' scheme is not supported to add to file server, since " +
"it is already available on every node.")
return
case _ => uri
}
val hadoopPath = new Path(schemeCorrectedURI)
val scheme = schemeCorrectedURI.getScheme
if (!Array("http", "https", "ftp").contains(scheme)) {
val fs = hadoopPath.getFileSystem(hadoopConfiguration)
val isDir = fs.getFileStatus(hadoopPath).isDirectory
if (!isLocal && scheme == "file" && isDir) {
throw new SparkException(s"addFile does not support local directories when not running " +
"local mode.")
}
if (!recursive && isDir) {
throw new SparkException(s"Added file $hadoopPath is a directory and recursive is not " +
"turned on.")
}
} else {
// SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies
Utils.validateURL(uri)
}
val key = if (!isLocal && scheme == "file") {
env.rpcEnv.fileServer.addFile(new File(uri.getPath))
} else {
if (uri.getScheme == null) {
schemeCorrectedURI.toString
} else {
path
}
}
val timestamp = System.currentTimeMillis
if (addedFiles.putIfAbsent(key, timestamp).isEmpty) {
logInfo(s"Added file $path at $key with timestamp $timestamp")
// Fetch the file locally so that closures which are run on the driver can still use the
// SparkFiles API to access files.
Utils.fetchFile(uri.toString, new File(SparkFiles.getRootDirectory()), conf,
env.securityManager, hadoopConfiguration, timestamp, useCache = false)
postEnvironmentUpdate()
} else {
logWarning(s"The path $path has been added already. Overwriting of added paths " +
"is not supported in the current version.")
}
}
/**
* :: DeveloperApi ::
* Register a listener to receive up-calls from events that happen during execution.
*/
@DeveloperApi
def addSparkListener(listener: SparkListenerInterface): Unit = {
listenerBus.addToSharedQueue(listener)
}
/**
* :: DeveloperApi ::
* Deregister the listener from Spark's listener bus.
*/
@DeveloperApi
def removeSparkListener(listener: SparkListenerInterface): Unit = {
listenerBus.removeListener(listener)
}
private[spark] def getExecutorIds(): Seq[String] = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.getExecutorIds()
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
Nil
}
}
/**
* Get the max number of tasks that can be concurrent launched currently.
* Note that please don't cache the value returned by this method, because the number can change
* due to add/remove executors.
*
* @return The max number of tasks that can be concurrent launched currently.
*/
private[spark] def maxNumConcurrentTasks(): Int = schedulerBackend.maxNumConcurrentTasks()
/**
* Update the cluster manager on our scheduling needs. Three bits of information are included
* to help it make decisions. This applies to the default ResourceProfile.
* @param numExecutors The total number of executors we'd like to have. The cluster manager
* shouldn't kill any running executor to reach this number, but,
* if all existing executors were to die, this is the number of executors
* we'd want to be allocated.
* @param localityAwareTasks The number of tasks in all active stages that have a locality
* preferences. This includes running, pending, and completed tasks.
* @param hostToLocalTaskCount A map of hosts to the number of tasks from all active stages
* that would like to like to run on that host.
* This includes running, pending, and completed tasks.
* @return whether the request is acknowledged by the cluster manager.
*/
@DeveloperApi
def requestTotalExecutors(
numExecutors: Int,
localityAwareTasks: Int,
hostToLocalTaskCount: immutable.Map[String, Int]
): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
// this is being applied to the default resource profile, would need to add api to support
// others
val defaultProfId = resourceProfileManager.defaultResourceProfile.id
b.requestTotalExecutors(immutable.Map(defaultProfId-> numExecutors),
immutable.Map(localityAwareTasks -> defaultProfId),
immutable.Map(defaultProfId -> hostToLocalTaskCount))
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request an additional number of executors from the cluster manager.
* @return whether the request is received.
*/
@DeveloperApi
def requestExecutors(numAdditionalExecutors: Int): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.requestExecutors(numAdditionalExecutors)
case _ =>
logWarning("Requesting executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executors.
*
* This is not supported when dynamic allocation is turned on.
*
* @note This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executors it kills
* through this method with new ones, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
*
* @return whether the request is received.
*/
@DeveloperApi
def killExecutors(executorIds: Seq[String]): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
require(executorAllocationManager.isEmpty,
"killExecutors() unsupported with Dynamic Allocation turned on")
b.killExecutors(executorIds, adjustTargetNumExecutors = true, countFailures = false,
force = true).nonEmpty
case _ =>
logWarning("Killing executors is not supported by current scheduler.")
false
}
}
/**
* :: DeveloperApi ::
* Request that the cluster manager kill the specified executor.
*
* @note This is an indication to the cluster manager that the application wishes to adjust
* its resource usage downwards. If the application wishes to replace the executor it kills
* through this method with a new one, it should follow up explicitly with a call to
* {{SparkContext#requestExecutors}}.
*
* @return whether the request is received.
*/
@DeveloperApi
def killExecutor(executorId: String): Boolean = killExecutors(Seq(executorId))
/**
* Request that the cluster manager kill the specified executor without adjusting the
* application resource requirements.
*
* The effect is that a new executor will be launched in place of the one killed by
* this request. This assumes the cluster manager will automatically and eventually
* fulfill all missing application resource requests.
*
* @note The replace is by no means guaranteed; another application on the same cluster
* can steal the window of opportunity and acquire this application's resources in the
* mean time.
*
* @return whether the request is received.
*/
private[spark] def killAndReplaceExecutor(executorId: String): Boolean = {
schedulerBackend match {
case b: ExecutorAllocationClient =>
b.killExecutors(Seq(executorId), adjustTargetNumExecutors = false, countFailures = true,
force = true).nonEmpty
case _ =>
logWarning("Killing executors is not supported by current scheduler.")
false
}
}
/** The version of Spark on which this application is running. */
def version: String = SPARK_VERSION
/**
* Return a map from the slave to the max memory available for caching and the remaining
* memory available for caching.
*/
def getExecutorMemoryStatus: Map[String, (Long, Long)] = {
assertNotStopped()
env.blockManager.master.getMemoryStatus.map { case(blockManagerId, mem) =>
(blockManagerId.host + ":" + blockManagerId.port, mem)
}
}
/**
* :: DeveloperApi ::
* Return information about what RDDs are cached, if they are in mem or on disk, how much space
* they take, etc.
*/
@DeveloperApi
def getRDDStorageInfo: Array[RDDInfo] = {
getRDDStorageInfo(_ => true)
}
private[spark] def getRDDStorageInfo(filter: RDD[_] => Boolean): Array[RDDInfo] = {
assertNotStopped()
val rddInfos = persistentRdds.values.filter(filter).map(RDDInfo.fromRdd).toArray
rddInfos.foreach { rddInfo =>
val rddId = rddInfo.id
val rddStorageInfo = statusStore.asOption(statusStore.rdd(rddId))
rddInfo.numCachedPartitions = rddStorageInfo.map(_.numCachedPartitions).getOrElse(0)
rddInfo.memSize = rddStorageInfo.map(_.memoryUsed).getOrElse(0L)
rddInfo.diskSize = rddStorageInfo.map(_.diskUsed).getOrElse(0L)
}
rddInfos.filter(_.isCached)
}
/**
* Returns an immutable map of RDDs that have marked themselves as persistent via cache() call.
*
* @note This does not necessarily mean the caching or computation was successful.
*/
def getPersistentRDDs: Map[Int, RDD[_]] = persistentRdds.toMap
/**
* :: DeveloperApi ::
* Return pools for fair scheduler
*/
@DeveloperApi
def getAllPools: Seq[Schedulable] = {
assertNotStopped()
// TODO(xiajunluan): We should take nested pools into account
taskScheduler.rootPool.schedulableQueue.asScala.toSeq
}
/**
* :: DeveloperApi ::
* Return the pool associated with the given name, if one exists
*/
@DeveloperApi
def getPoolForName(pool: String): Option[Schedulable] = {
assertNotStopped()
Option(taskScheduler.rootPool.schedulableNameToSchedulable.get(pool))
}
/**
* Return current scheduling mode
*/
def getSchedulingMode: SchedulingMode.SchedulingMode = {
assertNotStopped()
taskScheduler.schedulingMode
}
/**
* Gets the locality information associated with the partition in a particular rdd
* @param rdd of interest
* @param partition to be looked up for locality
* @return list of preferred locations for the partition
*/
private [spark] def getPreferredLocs(rdd: RDD[_], partition: Int): Seq[TaskLocation] = {
dagScheduler.getPreferredLocs(rdd, partition)
}
/**
* Register an RDD to be persisted in memory and/or disk storage
*/
private[spark] def persistRDD(rdd: RDD[_]): Unit = {
persistentRdds(rdd.id) = rdd
}
/**
* Unpersist an RDD from memory and/or disk storage
*/
private[spark] def unpersistRDD(rddId: Int, blocking: Boolean): Unit = {
env.blockManager.master.removeRdd(rddId, blocking)
persistentRdds.remove(rddId)
listenerBus.post(SparkListenerUnpersistRDD(rddId))
}
/**
* Adds a JAR dependency for all tasks to be executed on this `SparkContext` in the future.
*
* If a jar is added during execution, it will not be available until the next TaskSet starts.
*
* @param path can be either a local file, a file in HDFS (or other Hadoop-supported filesystems),
* an HTTP, HTTPS or FTP URI, or local:/path for a file on every worker node.
*
* @note A path can be added only once. Subsequent additions of the same path are ignored.
*/
def addJar(path: String): Unit = {
def addLocalJarFile(file: File): String = {
try {
if (!file.exists()) {
throw new FileNotFoundException(s"Jar ${file.getAbsolutePath} not found")
}
if (file.isDirectory) {
throw new IllegalArgumentException(
s"Directory ${file.getAbsoluteFile} is not allowed for addJar")
}
env.rpcEnv.fileServer.addJar(file)
} catch {
case NonFatal(e) =>
logError(s"Failed to add $path to Spark environment", e)
null
}
}
def checkRemoteJarFile(path: String): String = {
val hadoopPath = new Path(path)
val scheme = hadoopPath.toUri.getScheme
if (!Array("http", "https", "ftp").contains(scheme)) {
try {
val fs = hadoopPath.getFileSystem(hadoopConfiguration)
if (!fs.exists(hadoopPath)) {
throw new FileNotFoundException(s"Jar ${path} not found")
}
if (fs.isDirectory(hadoopPath)) {
throw new IllegalArgumentException(
s"Directory ${path} is not allowed for addJar")
}
path
} catch {
case NonFatal(e) =>
logError(s"Failed to add $path to Spark environment", e)
null
}
} else {
path
}
}
if (path == null || path.isEmpty) {
logWarning("null or empty path specified as parameter to addJar")
} else {
val key = if (path.contains("\\\\")) {
// For local paths with backslashes on Windows, URI throws an exception
addLocalJarFile(new File(path))
} else {
val uri = new Path(path).toUri
// SPARK-17650: Make sure this is a valid URL before adding it to the list of dependencies
Utils.validateURL(uri)
uri.getScheme match {
// A JAR file which exists only on the driver node
case null =>
// SPARK-22585 path without schema is not url encoded
addLocalJarFile(new File(uri.getPath))
// A JAR file which exists only on the driver node
case "file" => addLocalJarFile(new File(uri.getPath))
// A JAR file which exists locally on every worker node
case "local" => "file:" + uri.getPath
case _ => checkRemoteJarFile(path)
}
}
if (key != null) {
val timestamp = System.currentTimeMillis
if (addedJars.putIfAbsent(key, timestamp).isEmpty) {
logInfo(s"Added JAR $path at $key with timestamp $timestamp")
postEnvironmentUpdate()
} else {
logWarning(s"The jar $path has been added already. Overwriting of added jars " +
"is not supported in the current version.")
}
}
}
}
/**
* Returns a list of jar files that are added to resources.
*/
def listJars(): Seq[String] = addedJars.keySet.toSeq
/**
* When stopping SparkContext inside Spark components, it's easy to cause dead-lock since Spark
* may wait for some internal threads to finish. It's better to use this method to stop
* SparkContext instead.
*/
private[spark] def stopInNewThread(): Unit = {
new Thread("stop-spark-context") {
setDaemon(true)
override def run(): Unit = {
try {
SparkContext.this.stop()
} catch {
case e: Throwable =>
logError(e.getMessage, e)
throw e
}
}
}.start()
}
/**
* Shut down the SparkContext.
*/
def stop(): Unit = {
if (LiveListenerBus.withinListenerThread.value) {
throw new SparkException(s"Cannot stop SparkContext within listener bus thread.")
}
// Use the stopping variable to ensure no contention for the stop scenario.
// Still track the stopped variable for use elsewhere in the code.
if (!stopped.compareAndSet(false, true)) {
logInfo("SparkContext already stopped.")
return
}
if (_shutdownHookRef != null) {
ShutdownHookManager.removeShutdownHook(_shutdownHookRef)
}
if (listenerBus != null) {
Utils.tryLogNonFatalError {
postApplicationEnd()
}
}
Utils.tryLogNonFatalError {
_driverLogger.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_ui.foreach(_.stop())
}
if (env != null) {
Utils.tryLogNonFatalError {
env.metricsSystem.report()
}
}
Utils.tryLogNonFatalError {
_cleaner.foreach(_.stop())
}
Utils.tryLogNonFatalError {
_executorAllocationManager.foreach(_.stop())
}
if (_dagScheduler != null) {
Utils.tryLogNonFatalError {
_dagScheduler.stop()
}
_dagScheduler = null
}
if (_listenerBusStarted) {
Utils.tryLogNonFatalError {
listenerBus.stop()
_listenerBusStarted = false
}
}
Utils.tryLogNonFatalError {
_plugins.foreach(_.shutdown())
}
Utils.tryLogNonFatalError {
_eventLogger.foreach(_.stop())
}
if (_heartbeater != null) {
Utils.tryLogNonFatalError {
_heartbeater.stop()
}
_heartbeater = null
}
if (_shuffleDriverComponents != null) {
Utils.tryLogNonFatalError {
_shuffleDriverComponents.cleanupApplication()
}
}
if (env != null && _heartbeatReceiver != null) {
Utils.tryLogNonFatalError {
env.rpcEnv.stop(_heartbeatReceiver)
}
}
Utils.tryLogNonFatalError {
_progressBar.foreach(_.stop())
}
_taskScheduler = null
// TODO: Cache.stop()?
if (_env != null) {
Utils.tryLogNonFatalError {
_env.stop()
}
SparkEnv.set(null)
}
if (_statusStore != null) {
_statusStore.close()
}
// Clear this `InheritableThreadLocal`, or it will still be inherited in child threads even this
// `SparkContext` is stopped.
localProperties.remove()
ResourceProfile.clearDefaultProfile()
// Unset YARN mode system env variable, to allow switching between cluster types.
SparkContext.clearActiveContext()
logInfo("Successfully stopped SparkContext")
}
/**
* Get Spark's home location from either a value set through the constructor,
* or the spark.home Java property, or the SPARK_HOME environment variable
* (in that order of preference). If neither of these is set, return None.
*/
private[spark] def getSparkHome(): Option[String] = {
conf.getOption("spark.home").orElse(Option(System.getenv("SPARK_HOME")))
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
*/
def setCallSite(shortCallSite: String): Unit = {
setLocalProperty(CallSite.SHORT_FORM, shortCallSite)
}
/**
* Set the thread-local property for overriding the call sites
* of actions and RDDs.
*/
private[spark] def setCallSite(callSite: CallSite): Unit = {
setLocalProperty(CallSite.SHORT_FORM, callSite.shortForm)
setLocalProperty(CallSite.LONG_FORM, callSite.longForm)
}
/**
* Clear the thread-local property for overriding the call sites
* of actions and RDDs.
*/
def clearCallSite(): Unit = {
setLocalProperty(CallSite.SHORT_FORM, null)
setLocalProperty(CallSite.LONG_FORM, null)
}
/**
* Capture the current user callsite and return a formatted version for printing. If the user
* has overridden the call site using `setCallSite()`, this will return the user's version.
*/
private[spark] def getCallSite(): CallSite = {
lazy val callSite = Utils.getCallSite()
CallSite(
Option(getLocalProperty(CallSite.SHORT_FORM)).getOrElse(callSite.shortForm),
Option(getLocalProperty(CallSite.LONG_FORM)).getOrElse(callSite.longForm)
)
}
/**
* Run a function on a given set of partitions in an RDD and pass the results to the given
* handler function. This is the main entry point for all actions in Spark.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit): Unit = {
if (stopped.get()) {
throw new IllegalStateException("SparkContext has been shutdown")
}
val callSite = getCallSite
val cleanedFunc = clean(func)
logInfo("Starting job: " + callSite.shortForm)
if (conf.getBoolean("spark.logLineage", false)) {
logInfo("RDD's recursive dependencies:\\n" + rdd.toDebugString)
}
dagScheduler.runJob(rdd, cleanedFunc, partitions, callSite, resultHandler, localProperties.get)
progressBar.foreach(_.finishAll())
rdd.doCheckpoint()
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
* The function that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
partitions: Seq[Int]): Array[U] = {
val results = new Array[U](partitions.size)
runJob[T, U](rdd, func, partitions, (index, res) => results(index) = res)
results
}
/**
* Run a function on a given set of partitions in an RDD and return the results as an array.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
func: Iterator[T] => U,
partitions: Seq[Int]): Array[U] = {
val cleanedFunc = clean(func)
runJob(rdd, (ctx: TaskContext, it: Iterator[T]) => cleanedFunc(it), partitions)
}
/**
* Run a job on all partitions in an RDD and return the results in an array. The function
* that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: (TaskContext, Iterator[T]) => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and return the results in an array.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @return in-memory collection with a result of the job (each collection element will contain
* a result from one partition)
*/
def runJob[T, U: ClassTag](rdd: RDD[T], func: Iterator[T] => U): Array[U] = {
runJob(rdd, func, 0 until rdd.partitions.length)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function. The function
* that is run against each partition additionally takes `TaskContext` argument.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: (TaskContext, Iterator[T]) => U,
resultHandler: (Int, U) => Unit): Unit = {
runJob[T, U](rdd, processPartition, 0 until rdd.partitions.length, resultHandler)
}
/**
* Run a job on all partitions in an RDD and pass the results to a handler function.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param resultHandler callback to pass each result to
*/
def runJob[T, U: ClassTag](
rdd: RDD[T],
processPartition: Iterator[T] => U,
resultHandler: (Int, U) => Unit): Unit = {
val processFunc = (context: TaskContext, iter: Iterator[T]) => processPartition(iter)
runJob[T, U](rdd, processFunc, 0 until rdd.partitions.length, resultHandler)
}
/**
* :: DeveloperApi ::
* Run a job that can return approximate results.
*
* @param rdd target RDD to run tasks on
* @param func a function to run on each partition of the RDD
* @param evaluator `ApproximateEvaluator` to receive the partial results
* @param timeout maximum time to wait for the job, in milliseconds
* @return partial result (how partial depends on whether the job was finished before or
* after timeout)
*/
@DeveloperApi
def runApproximateJob[T, U, R](
rdd: RDD[T],
func: (TaskContext, Iterator[T]) => U,
evaluator: ApproximateEvaluator[U, R],
timeout: Long): PartialResult[R] = {
assertNotStopped()
val callSite = getCallSite
logInfo("Starting job: " + callSite.shortForm)
val start = System.nanoTime
val cleanedFunc = clean(func)
val result = dagScheduler.runApproximateJob(rdd, cleanedFunc, evaluator, callSite, timeout,
localProperties.get)
logInfo(
"Job finished: " + callSite.shortForm + ", took " + (System.nanoTime - start) / 1e9 + " s")
result
}
/**
* Submit a job for execution and return a FutureJob holding the result.
*
* @param rdd target RDD to run tasks on
* @param processPartition a function to run on each partition of the RDD
* @param partitions set of partitions to run on; some jobs may not want to compute on all
* partitions of the target RDD, e.g. for operations like `first()`
* @param resultHandler callback to pass each result to
* @param resultFunc function to be executed when the result is ready
*/
def submitJob[T, U, R](
rdd: RDD[T],
processPartition: Iterator[T] => U,
partitions: Seq[Int],
resultHandler: (Int, U) => Unit,
resultFunc: => R): SimpleFutureAction[R] =
{
assertNotStopped()
val cleanF = clean(processPartition)
val callSite = getCallSite
val waiter = dagScheduler.submitJob(
rdd,
(context: TaskContext, iter: Iterator[T]) => cleanF(iter),
partitions,
callSite,
resultHandler,
localProperties.get)
new SimpleFutureAction(waiter, resultFunc)
}
/**
* Submit a map stage for execution. This is currently an internal API only, but might be
* promoted to DeveloperApi in the future.
*/
private[spark] def submitMapStage[K, V, C](dependency: ShuffleDependency[K, V, C])
: SimpleFutureAction[MapOutputStatistics] = {
assertNotStopped()
val callSite = getCallSite()
var result: MapOutputStatistics = null
val waiter = dagScheduler.submitMapStage(
dependency,
(r: MapOutputStatistics) => { result = r },
callSite,
localProperties.get)
new SimpleFutureAction[MapOutputStatistics](waiter, result)
}
/**
* Cancel active jobs for the specified group. See `org.apache.spark.SparkContext.setJobGroup`
* for more information.
*/
def cancelJobGroup(groupId: String): Unit = {
assertNotStopped()
dagScheduler.cancelJobGroup(groupId)
}
/** Cancel all jobs that have been scheduled or are running. */
def cancelAllJobs(): Unit = {
assertNotStopped()
dagScheduler.cancelAllJobs()
}
/**
* Cancel a given job if it's scheduled or running.
*
* @param jobId the job ID to cancel
* @param reason optional reason for cancellation
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelJob(jobId: Int, reason: String): Unit = {
dagScheduler.cancelJob(jobId, Option(reason))
}
/**
* Cancel a given job if it's scheduled or running.
*
* @param jobId the job ID to cancel
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelJob(jobId: Int): Unit = {
dagScheduler.cancelJob(jobId, None)
}
/**
* Cancel a given stage and all jobs associated with it.
*
* @param stageId the stage ID to cancel
* @param reason reason for cancellation
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelStage(stageId: Int, reason: String): Unit = {
dagScheduler.cancelStage(stageId, Option(reason))
}
/**
* Cancel a given stage and all jobs associated with it.
*
* @param stageId the stage ID to cancel
* @note Throws `InterruptedException` if the cancel message cannot be sent
*/
def cancelStage(stageId: Int): Unit = {
dagScheduler.cancelStage(stageId, None)
}
/**
* Kill and reschedule the given task attempt. Task ids can be obtained from the Spark UI
* or through SparkListener.onTaskStart.
*
* @param taskId the task ID to kill. This id uniquely identifies the task attempt.
* @param interruptThread whether to interrupt the thread running the task.
* @param reason the reason for killing the task, which should be a short string. If a task
* is killed multiple times with different reasons, only one reason will be reported.
*
* @return Whether the task was successfully killed.
*/
def killTaskAttempt(
taskId: Long,
interruptThread: Boolean = true,
reason: String = "killed via SparkContext.killTaskAttempt"): Boolean = {
dagScheduler.killTaskAttempt(taskId, interruptThread, reason)
}
/**
* Clean a closure to make it ready to be serialized and sent to tasks
* (removes unreferenced variables in $outer's, updates REPL variables)
* If <tt>checkSerializable</tt> is set, <tt>clean</tt> will also proactively
* check to see if <tt>f</tt> is serializable and throw a <tt>SparkException</tt>
* if not.
*
* @param f the closure to clean
* @param checkSerializable whether or not to immediately check <tt>f</tt> for serializability
* @throws SparkException if <tt>checkSerializable</tt> is set but <tt>f</tt> is not
* serializable
* @return the cleaned closure
*/
private[spark] def clean[F <: AnyRef](f: F, checkSerializable: Boolean = true): F = {
ClosureCleaner.clean(f, checkSerializable)
f
}
/**
* Set the directory under which RDDs are going to be checkpointed.
* @param directory path to the directory where checkpoint files will be stored
* (must be HDFS path if running in cluster)
*/
def setCheckpointDir(directory: String): Unit = {
// If we are running on a cluster, log a warning if the directory is local.
// Otherwise, the driver may attempt to reconstruct the checkpointed RDD from
// its own local file system, which is incorrect because the checkpoint files
// are actually on the executor machines.
if (!isLocal && Utils.nonLocalPaths(directory).isEmpty) {
logWarning("Spark is not running in local mode, therefore the checkpoint directory " +
s"must not be on the local filesystem. Directory '$directory' " +
"appears to be on the local filesystem.")
}
checkpointDir = Option(directory).map { dir =>
val path = new Path(dir, UUID.randomUUID().toString)
val fs = path.getFileSystem(hadoopConfiguration)
fs.mkdirs(path)
fs.getFileStatus(path).getPath.toString
}
}
def getCheckpointDir: Option[String] = checkpointDir
/** Default level of parallelism to use when not given by user (e.g. parallelize and makeRDD). */
def defaultParallelism: Int = {
assertNotStopped()
taskScheduler.defaultParallelism
}
/**
* Default min number of partitions for Hadoop RDDs when not given by user
* Notice that we use math.min so the "defaultMinPartitions" cannot be higher than 2.
* The reasons for this are discussed in https://github.com/mesos/spark/pull/718
*/
def defaultMinPartitions: Int = math.min(defaultParallelism, 2)
private val nextShuffleId = new AtomicInteger(0)
private[spark] def newShuffleId(): Int = nextShuffleId.getAndIncrement()
private val nextRddId = new AtomicInteger(0)
/** Register a new RDD, returning its RDD ID */
private[spark] def newRddId(): Int = nextRddId.getAndIncrement()
/**
* Registers listeners specified in spark.extraListeners, then starts the listener bus.
* This should be called after all internal listeners have been registered with the listener bus
* (e.g. after the web UI and event logging listeners have been registered).
*/
private def setupAndStartListenerBus(): Unit = {
try {
conf.get(EXTRA_LISTENERS).foreach { classNames =>
val listeners = Utils.loadExtensions(classOf[SparkListenerInterface], classNames, conf)
listeners.foreach { listener =>
listenerBus.addToSharedQueue(listener)
logInfo(s"Registered listener ${listener.getClass().getName()}")
}
}
} catch {
case e: Exception =>
try {
stop()
} finally {
throw new SparkException(s"Exception when registering SparkListener", e)
}
}
listenerBus.start(this, _env.metricsSystem)
_listenerBusStarted = true
}
/** Post the application start event */
private def postApplicationStart(): Unit = {
// Note: this code assumes that the task scheduler has been initialized and has contacted
// the cluster manager to get an application ID (in case the cluster manager provides one).
listenerBus.post(SparkListenerApplicationStart(appName, Some(applicationId),
startTime, sparkUser, applicationAttemptId, schedulerBackend.getDriverLogUrls,
schedulerBackend.getDriverAttributes))
_driverLogger.foreach(_.startSync(_hadoopConfiguration))
}
/** Post the application end event */
private def postApplicationEnd(): Unit = {
listenerBus.post(SparkListenerApplicationEnd(System.currentTimeMillis))
}
/** Post the environment update event once the task scheduler is ready */
private def postEnvironmentUpdate(): Unit = {
if (taskScheduler != null) {
val schedulingMode = getSchedulingMode.toString
val addedJarPaths = addedJars.keys.toSeq
val addedFilePaths = addedFiles.keys.toSeq
val environmentDetails = SparkEnv.environmentDetails(conf, hadoopConfiguration,
schedulingMode, addedJarPaths, addedFilePaths)
val environmentUpdate = SparkListenerEnvironmentUpdate(environmentDetails)
listenerBus.post(environmentUpdate)
}
}
/** Reports heartbeat metrics for the driver. */
private def reportHeartBeat(executorMetricsSource: Option[ExecutorMetricsSource]): Unit = {
val currentMetrics = ExecutorMetrics.getCurrentMetrics(env.memoryManager)
executorMetricsSource.foreach(_.updateMetricsSnapshot(currentMetrics))
val driverUpdates = new HashMap[(Int, Int), ExecutorMetrics]
// In the driver, we do not track per-stage metrics, so use a dummy stage for the key
driverUpdates.put(EventLoggingListener.DRIVER_STAGE_KEY, new ExecutorMetrics(currentMetrics))
val accumUpdates = new Array[(Long, Int, Int, Seq[AccumulableInfo])](0)
listenerBus.post(SparkListenerExecutorMetricsUpdate("driver", accumUpdates,
driverUpdates))
}
// In order to prevent multiple SparkContexts from being active at the same time, mark this
// context as having finished construction.
// NOTE: this must be placed at the end of the SparkContext constructor.
SparkContext.setActiveContext(this)
}
/**
* The SparkContext object contains a number of implicit conversions and parameters for use with
* various Spark features.
*/
object SparkContext extends Logging {
private val VALID_LOG_LEVELS =
Set("ALL", "DEBUG", "ERROR", "FATAL", "INFO", "OFF", "TRACE", "WARN")
/**
* Lock that guards access to global variables that track SparkContext construction.
*/
private val SPARK_CONTEXT_CONSTRUCTOR_LOCK = new Object()
/**
* The active, fully-constructed SparkContext. If no SparkContext is active, then this is `null`.
*
* Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`.
*/
private val activeContext: AtomicReference[SparkContext] =
new AtomicReference[SparkContext](null)
/**
* Points to a partially-constructed SparkContext if another thread is in the SparkContext
* constructor, or `None` if no SparkContext is being constructed.
*
* Access to this field is guarded by `SPARK_CONTEXT_CONSTRUCTOR_LOCK`.
*/
private var contextBeingConstructed: Option[SparkContext] = None
/**
* Called to ensure that no other SparkContext is running in this JVM.
*
* Throws an exception if a running context is detected and logs a warning if another thread is
* constructing a SparkContext. This warning is necessary because the current locking scheme
* prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*/
private def assertNoOtherContextIsRunning(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
Option(activeContext.get()).filter(_ ne sc).foreach { ctx =>
val errMsg = "Only one SparkContext should be running in this JVM (see SPARK-2243)." +
s"The currently running SparkContext was created at:\\n${ctx.creationSite.longForm}"
throw new SparkException(errMsg)
}
contextBeingConstructed.filter(_ ne sc).foreach { otherContext =>
// Since otherContext might point to a partially-constructed context, guard against
// its creationSite field being null:
val otherContextCreationSite =
Option(otherContext.creationSite).map(_.longForm).getOrElse("unknown location")
val warnMsg = "Another SparkContext is being constructed (or threw an exception in its" +
" constructor). This may indicate an error, since only one SparkContext should be" +
" running in this JVM (see SPARK-2243)." +
s" The other SparkContext was created at:\\n$otherContextCreationSite"
logWarning(warnMsg)
}
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* @param config `SparkConfig` that will be used for initialisation of the `SparkContext`
* @return current `SparkContext` (or a new one if it wasn't created before the function call)
*/
def getOrCreate(config: SparkConf): SparkContext = {
// Synchronize to ensure that multiple create requests don't trigger an exception
// from assertNoOtherContextIsRunning within setActiveContext
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext(config))
} else {
if (config.getAll.nonEmpty) {
logWarning("Using an existing SparkContext; some configuration may not take effect.")
}
}
activeContext.get()
}
}
/**
* This function may be used to get or instantiate a SparkContext and register it as a
* singleton object. Because we can only have one active SparkContext per JVM,
* this is useful when applications may wish to share a SparkContext.
*
* This method allows not passing a SparkConf (useful if just retrieving).
*
* @return current `SparkContext` (or a new one if wasn't created before the function call)
*/
def getOrCreate(): SparkContext = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
if (activeContext.get() == null) {
setActiveContext(new SparkContext())
}
activeContext.get()
}
}
/** Return the current active [[SparkContext]] if any. */
private[spark] def getActive: Option[SparkContext] = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
Option(activeContext.get())
}
}
/**
* Called at the beginning of the SparkContext constructor to ensure that no SparkContext is
* running. Throws an exception if a running context is detected and logs a warning if another
* thread is constructing a SparkContext. This warning is necessary because the current locking
* scheme prevents us from reliably distinguishing between cases where another context is being
* constructed and cases where another constructor threw an exception.
*/
private[spark] def markPartiallyConstructed(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc)
contextBeingConstructed = Some(sc)
}
}
/**
* Called at the end of the SparkContext constructor to ensure that no other SparkContext has
* raced with this constructor and started.
*/
private[spark] def setActiveContext(sc: SparkContext): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
assertNoOtherContextIsRunning(sc)
contextBeingConstructed = None
activeContext.set(sc)
}
}
/**
* Clears the active SparkContext metadata. This is called by `SparkContext#stop()`. It's
* also called in unit tests to prevent a flood of warnings from test suites that don't / can't
* properly clean up their SparkContexts.
*/
private[spark] def clearActiveContext(): Unit = {
SPARK_CONTEXT_CONSTRUCTOR_LOCK.synchronized {
activeContext.set(null)
}
}
private[spark] val SPARK_JOB_DESCRIPTION = "spark.job.description"
private[spark] val SPARK_JOB_GROUP_ID = "spark.jobGroup.id"
private[spark] val SPARK_JOB_INTERRUPT_ON_CANCEL = "spark.job.interruptOnCancel"
private[spark] val SPARK_SCHEDULER_POOL = "spark.scheduler.pool"
private[spark] val RDD_SCOPE_KEY = "spark.rdd.scope"
private[spark] val RDD_SCOPE_NO_OVERRIDE_KEY = "spark.rdd.scope.noOverride"
/**
* Executor id for the driver. In earlier versions of Spark, this was `<driver>`, but this was
* changed to `driver` because the angle brackets caused escaping issues in URLs and XML (see
* SPARK-6716 for more details).
*/
private[spark] val DRIVER_IDENTIFIER = "driver"
private implicit def arrayToArrayWritable[T <: Writable : ClassTag](arr: Iterable[T])
: ArrayWritable = {
def anyToWritable[U <: Writable](u: U): Writable = u
new ArrayWritable(classTag[T].runtimeClass.asInstanceOf[Class[Writable]],
arr.map(x => anyToWritable(x)).toArray)
}
/**
* Find the JAR from which a given class was loaded, to make it easy for users to pass
* their JARs to SparkContext.
*
* @param cls class that should be inside of the jar
* @return jar that contains the Class, `None` if not found
*/
def jarOfClass(cls: Class[_]): Option[String] = {
val uri = cls.getResource("/" + cls.getName.replace('.', '/') + ".class")
if (uri != null) {
val uriStr = uri.toString
if (uriStr.startsWith("jar:file:")) {
// URI will be of the form "jar:file:/path/foo.jar!/package/cls.class",
// so pull out the /path/foo.jar
Some(uriStr.substring("jar:file:".length, uriStr.indexOf('!')))
} else {
None
}
} else {
None
}
}
/**
* Find the JAR that contains the class of a particular object, to make it easy for users
* to pass their JARs to SparkContext. In most cases you can call jarOfObject(this) in
* your driver program.
*
* @param obj reference to an instance which class should be inside of the jar
* @return jar that contains the class of the instance, `None` if not found
*/
def jarOfObject(obj: AnyRef): Option[String] = jarOfClass(obj.getClass)
/**
* Creates a modified version of a SparkConf with the parameters that can be passed separately
* to SparkContext, to make it easier to write SparkContext's constructors. This ignores
* parameters that are passed as the default value of null, instead of throwing an exception
* like SparkConf would.
*/
private[spark] def updatedConf(
conf: SparkConf,
master: String,
appName: String,
sparkHome: String = null,
jars: Seq[String] = Nil,
environment: Map[String, String] = Map()): SparkConf =
{
val res = conf.clone()
res.setMaster(master)
res.setAppName(appName)
if (sparkHome != null) {
res.setSparkHome(sparkHome)
}
if (jars != null && !jars.isEmpty) {
res.setJars(jars)
}
res.setExecutorEnv(environment.toSeq)
res
}
/**
* The number of cores available to the driver to use for tasks such as I/O with Netty
*/
private[spark] def numDriverCores(master: String): Int = {
numDriverCores(master, null)
}
/**
* The number of cores available to the driver to use for tasks such as I/O with Netty
*/
private[spark] def numDriverCores(master: String, conf: SparkConf): Int = {
def convertToInt(threads: String): Int = {
if (threads == "*") Runtime.getRuntime.availableProcessors() else threads.toInt
}
master match {
case "local" => 1
case SparkMasterRegex.LOCAL_N_REGEX(threads) => convertToInt(threads)
case SparkMasterRegex.LOCAL_N_FAILURES_REGEX(threads, _) => convertToInt(threads)
case "yarn" =>
if (conf != null && conf.get(SUBMIT_DEPLOY_MODE) == "cluster") {
conf.getInt(DRIVER_CORES.key, 0)
} else {
0
}
case _ => 0 // Either driver is not being used, or its core count will be interpolated later
}
}
/**
* Create a task scheduler based on a given master URL.
* Return a 2-tuple of the scheduler backend and the task scheduler.
*/
private def createTaskScheduler(
sc: SparkContext,
master: String,
deployMode: String): (SchedulerBackend, TaskScheduler) = {
import SparkMasterRegex._
// When running locally, don't try to re-execute tasks on failure.
val MAX_LOCAL_TASK_FAILURES = 1
// Ensure that default executor's resources satisfies one or more tasks requirement.
// This function is for cluster managers that don't set the executor cores config, for
// others its checked in ResourceProfile.
def checkResourcesPerTask(executorCores: Int): Unit = {
val taskCores = sc.conf.get(CPUS_PER_TASK)
validateTaskCpusLargeEnough(executorCores, taskCores)
val defaultProf = sc.resourceProfileManager.defaultResourceProfile
// TODO - this is temporary until all of stage level scheduling feature is integrated,
// fail if any other resource limiting due to dynamic allocation and scheduler using
// slots based on cores
val cpuSlots = executorCores/taskCores
val limitingResource = defaultProf.limitingResource(sc.conf)
if (limitingResource.nonEmpty && !limitingResource.equals(ResourceProfile.CPUS) &&
defaultProf.maxTasksPerExecutor(sc.conf) < cpuSlots) {
throw new IllegalArgumentException("The number of slots on an executor has to be " +
"limited by the number of cores, otherwise you waste resources and " +
"some scheduling doesn't work properly. Your configuration has " +
s"core/task cpu slots = ${cpuSlots} and " +
s"${limitingResource} = " +
s"${defaultProf.maxTasksPerExecutor(sc.conf)}. Please adjust your configuration " +
"so that all resources require same number of executor slots.")
}
ResourceUtils.warnOnWastedResources(defaultProf, sc.conf, Some(executorCores))
}
master match {
case "local" =>
checkResourcesPerTask(1)
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, 1)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_REGEX(threads) =>
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*] estimates the number of cores on the machine; local[N] uses exactly N threads.
val threadCount = if (threads == "*") localCpuCount else threads.toInt
if (threadCount <= 0) {
throw new SparkException(s"Asked to run locally with $threadCount threads")
}
checkResourcesPerTask(threadCount)
val scheduler = new TaskSchedulerImpl(sc, MAX_LOCAL_TASK_FAILURES, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_N_FAILURES_REGEX(threads, maxFailures) =>
def localCpuCount: Int = Runtime.getRuntime.availableProcessors()
// local[*, M] means the number of cores on the computer with M failures
// local[N, M] means exactly N threads with M failures
val threadCount = if (threads == "*") localCpuCount else threads.toInt
checkResourcesPerTask(threadCount)
val scheduler = new TaskSchedulerImpl(sc, maxFailures.toInt, isLocal = true)
val backend = new LocalSchedulerBackend(sc.getConf, scheduler, threadCount)
scheduler.initialize(backend)
(backend, scheduler)
case SPARK_REGEX(sparkUrl) =>
val scheduler = new TaskSchedulerImpl(sc)
val masterUrls = sparkUrl.split(",").map("spark://" + _)
val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
(backend, scheduler)
case LOCAL_CLUSTER_REGEX(numSlaves, coresPerSlave, memoryPerSlave) =>
checkResourcesPerTask(coresPerSlave.toInt)
// Check to make sure memory requested <= memoryPerSlave. Otherwise Spark will just hang.
val memoryPerSlaveInt = memoryPerSlave.toInt
if (sc.executorMemory > memoryPerSlaveInt) {
throw new SparkException(
"Asked to launch cluster with %d MiB RAM / worker but requested %d MiB/worker".format(
memoryPerSlaveInt, sc.executorMemory))
}
// For host local mode setting the default of SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED
// to false because this mode is intended to be used for testing and in this case all the
// executors are running on the same host. So if host local reading was enabled here then
// testing of the remote fetching would be secondary as setting this config explicitly to
// false would be required in most of the unit test (despite the fact that remote fetching
// is much more frequent in production).
sc.conf.setIfMissing(SHUFFLE_HOST_LOCAL_DISK_READING_ENABLED, false)
val scheduler = new TaskSchedulerImpl(sc)
val localCluster = new LocalSparkCluster(
numSlaves.toInt, coresPerSlave.toInt, memoryPerSlaveInt, sc.conf)
val masterUrls = localCluster.start()
val backend = new StandaloneSchedulerBackend(scheduler, sc, masterUrls)
scheduler.initialize(backend)
backend.shutdownCallback = (backend: StandaloneSchedulerBackend) => {
localCluster.stop()
}
(backend, scheduler)
case masterUrl =>
val cm = getClusterManager(masterUrl) match {
case Some(clusterMgr) => clusterMgr
case None => throw new SparkException("Could not parse Master URL: '" + master + "'")
}
try {
val scheduler = cm.createTaskScheduler(sc, masterUrl)
val backend = cm.createSchedulerBackend(sc, masterUrl, scheduler)
cm.initialize(scheduler, backend)
(backend, scheduler)
} catch {
case se: SparkException => throw se
case NonFatal(e) =>
throw new SparkException("External scheduler cannot be instantiated", e)
}
}
}
private def getClusterManager(url: String): Option[ExternalClusterManager] = {
val loader = Utils.getContextOrSparkClassLoader
val serviceLoaders =
ServiceLoader.load(classOf[ExternalClusterManager], loader).asScala.filter(_.canCreate(url))
if (serviceLoaders.size > 1) {
throw new SparkException(
s"Multiple external cluster managers registered for the url $url: $serviceLoaders")
}
serviceLoaders.headOption
}
}
/**
* A collection of regexes for extracting information from the master string.
*/
private object SparkMasterRegex {
// Regular expression used for local[N] and local[*] master formats
val LOCAL_N_REGEX = """local\\[([0-9]+|\\*)\\]""".r
// Regular expression for local[N, maxRetries], used in tests with failing tasks
val LOCAL_N_FAILURES_REGEX = """local\\[([0-9]+|\\*)\\s*,\\s*([0-9]+)\\]""".r
// Regular expression for simulating a Spark cluster of [N, cores, memory] locally
val LOCAL_CLUSTER_REGEX = """local-cluster\\[\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*,\\s*([0-9]+)\\s*]""".r
// Regular expression for connecting to Spark deploy clusters
val SPARK_REGEX = """spark://(.*)""".r
}
/**
* A class encapsulating how to convert some type `T` from `Writable`. It stores both the `Writable`
* class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the
* conversion.
* The getter for the writable class takes a `ClassTag[T]` in case this is a generic object
* that doesn't know the type of `T` when it is created. This sounds strange but is necessary to
* support converting subclasses of `Writable` to themselves (`writableWritableConverter()`).
*/
private[spark] class WritableConverter[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: Writable => T)
extends Serializable
object WritableConverter {
// Helper objects for converting common types to Writable
private[spark] def simpleWritableConverter[T, W <: Writable: ClassTag](convert: W => T)
: WritableConverter[T] = {
val wClass = classTag[W].runtimeClass.asInstanceOf[Class[W]]
new WritableConverter[T](_ => wClass, x => convert(x.asInstanceOf[W]))
}
// The following implicit functions were in SparkContext before 1.3 and users had to
// `import SparkContext._` to enable them. Now we move them here to make the compiler find
// them automatically. However, we still keep the old functions in SparkContext for backward
// compatibility and forward to the following functions directly.
// The following implicit declarations have been added on top of the very similar ones
// below in order to enable compatibility with Scala 2.12. Scala 2.12 deprecates eta
// expansion of zero-arg methods and thus won't match a no-arg method where it expects
// an implicit that is a function of no args.
implicit val intWritableConverterFn: () => WritableConverter[Int] =
() => simpleWritableConverter[Int, IntWritable](_.get)
implicit val longWritableConverterFn: () => WritableConverter[Long] =
() => simpleWritableConverter[Long, LongWritable](_.get)
implicit val doubleWritableConverterFn: () => WritableConverter[Double] =
() => simpleWritableConverter[Double, DoubleWritable](_.get)
implicit val floatWritableConverterFn: () => WritableConverter[Float] =
() => simpleWritableConverter[Float, FloatWritable](_.get)
implicit val booleanWritableConverterFn: () => WritableConverter[Boolean] =
() => simpleWritableConverter[Boolean, BooleanWritable](_.get)
implicit val bytesWritableConverterFn: () => WritableConverter[Array[Byte]] = {
() => simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
implicit val stringWritableConverterFn: () => WritableConverter[String] =
() => simpleWritableConverter[String, Text](_.toString)
implicit def writableWritableConverterFn[T <: Writable : ClassTag]: () => WritableConverter[T] =
() => new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])
// These implicits remain included for backwards-compatibility. They fulfill the
// same role as those above.
implicit def intWritableConverter(): WritableConverter[Int] =
simpleWritableConverter[Int, IntWritable](_.get)
implicit def longWritableConverter(): WritableConverter[Long] =
simpleWritableConverter[Long, LongWritable](_.get)
implicit def doubleWritableConverter(): WritableConverter[Double] =
simpleWritableConverter[Double, DoubleWritable](_.get)
implicit def floatWritableConverter(): WritableConverter[Float] =
simpleWritableConverter[Float, FloatWritable](_.get)
implicit def booleanWritableConverter(): WritableConverter[Boolean] =
simpleWritableConverter[Boolean, BooleanWritable](_.get)
implicit def bytesWritableConverter(): WritableConverter[Array[Byte]] = {
simpleWritableConverter[Array[Byte], BytesWritable] { bw =>
// getBytes method returns array which is longer then data to be returned
Arrays.copyOfRange(bw.getBytes, 0, bw.getLength)
}
}
implicit def stringWritableConverter(): WritableConverter[String] =
simpleWritableConverter[String, Text](_.toString)
implicit def writableWritableConverter[T <: Writable](): WritableConverter[T] =
new WritableConverter[T](_.runtimeClass.asInstanceOf[Class[T]], _.asInstanceOf[T])
}
/**
* A class encapsulating how to convert some type `T` to `Writable`. It stores both the `Writable`
* class corresponding to `T` (e.g. `IntWritable` for `Int`) and a function for doing the
* conversion.
* The `Writable` class will be used in `SequenceFileRDDFunctions`.
*/
private[spark] class WritableFactory[T](
val writableClass: ClassTag[T] => Class[_ <: Writable],
val convert: T => Writable) extends Serializable
object WritableFactory {
private[spark] def simpleWritableFactory[T: ClassTag, W <: Writable : ClassTag](convert: T => W)
: WritableFactory[T] = {
val writableClass = implicitly[ClassTag[W]].runtimeClass.asInstanceOf[Class[W]]
new WritableFactory[T](_ => writableClass, convert)
}
implicit def intWritableFactory: WritableFactory[Int] =
simpleWritableFactory(new IntWritable(_))
implicit def longWritableFactory: WritableFactory[Long] =
simpleWritableFactory(new LongWritable(_))
implicit def floatWritableFactory: WritableFactory[Float] =
simpleWritableFactory(new FloatWritable(_))
implicit def doubleWritableFactory: WritableFactory[Double] =
simpleWritableFactory(new DoubleWritable(_))
implicit def booleanWritableFactory: WritableFactory[Boolean] =
simpleWritableFactory(new BooleanWritable(_))
implicit def bytesWritableFactory: WritableFactory[Array[Byte]] =
simpleWritableFactory(new BytesWritable(_))
implicit def stringWritableFactory: WritableFactory[String] =
simpleWritableFactory(new Text(_))
implicit def writableWritableFactory[T <: Writable: ClassTag]: WritableFactory[T] =
simpleWritableFactory(w => w)
}
| goldmedal/spark | core/src/main/scala/org/apache/spark/SparkContext.scala | Scala | apache-2.0 | 124,013 |
package services.publisher
import com.google.inject.{Inject, Singleton}
import common.publisher.{BasicPubSubService, Event, Param}
import monix.execution.Scheduler
import models.Message
import scala.language.postfixOps
@Singleton
class MessagePubSubServiceImpl @Inject()(implicit scheduler: Scheduler) extends BasicPubSubService[Event[Message]] {
override def filter(event: Event[Message], params: Seq[Param]): Boolean = {
if (params isEmpty) {
true
} else {
params.exists {
case entityId: EntityId => event.element.id == entityId.id
case endCursor: EndCursor => endCursor.cursor <= event.element.id
}
}
}
}
case class EntityId(id: Int) extends Param
case class EndCursor(cursor: Int) extends Param
| sysgears/apollo-universal-starter-kit | modules/chat/server-scala/src/main/scala/services/publisher/MessagePubSubServiceImpl.scala | Scala | mit | 753 |
package evescala.cache
import com.twitter.storehaus.cache.TTLCache
import com.twitter.util.Duration
class PerKeyTTLCache[K, V](
defaultTTL: Duration
) extends TTLCache[K, V](ttl = defaultTTL, cache = Map.empty)(() => System.currentTimeMillis) {
def putWithTTL(kv: (K, V), ttl: Duration): (Set[K], TTLCache[K, V]) = {
val (k, v) = kv
val now: Long = clock()
putWithTime((k, (now + ttl.inMilliseconds, v)), now)
}
}
| pequalsnp/evescala | src/main/scala/evescala/cache/PerKeyTTLCache.scala | Scala | apache-2.0 | 434 |
package io.skysail.core.server
import akka.actor.{ActorRef, ActorSystem, Props}
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.RouteResult.route2HandlerFlow
import akka.osgi.ActorSystemActivator
import akka.stream.ActorMaterializer
import akka.util.Timeout
import domino.DominoActivator
import domino.bundle_watching.BundleWatcherEvent.{AddingBundle, ModifiedBundle, RemovedBundle}
import domino.capsule.Capsule
import domino.service_watching.ServiceWatcherContext
import domino.service_watching.ServiceWatcherEvent.{AddingService, ModifiedService, RemovedService}
import io.skysail.api.security.AuthenticationService
import io.skysail.core.Constants
import io.skysail.core.app.SkysailApplication.CreateApplicationActor
import io.skysail.core.app.{ApplicationProvider, SkysailApplication, SkysailRootApplication}
import io.skysail.core.server.actors.{ApplicationsActor, BundlesActor}
import io.skysail.core.server.routes.RoutesTracker
import org.osgi.framework.BundleContext
import org.slf4j.LoggerFactory
import scala.concurrent.Future
import scala.concurrent.duration.DurationInt
case class ServerConfig(port: Integer, binding: String)
class AkkaServer extends DominoActivator {
private var log = LoggerFactory.getLogger(this.getClass)
implicit var actorSystem: ActorSystem = _
var futureBinding: Future[Http.ServerBinding] = _
var applicationsActor: ActorRef = _
var bundlesActor: ActorRef = _
val defaultPort = 8080
val defaultBinding = "localhost"
val defaultAuthentication = "HTTP_BASIC"
var serverConfig = new ServerConfig(defaultPort, defaultBinding)
var routesTracker: RoutesTracker = null
private class AkkaCapsule(bundleContext: BundleContext) extends ActorSystemActivator with Capsule {
override def start(): Unit = start(bundleContext)
override def stop(): Unit = stop(bundleContext)
def configure(osgiContext: BundleContext, system: ActorSystem): Unit = {
log info "Registering Actor System as Service."
//registerService(osgiContext, system)
log info s"ActorSystem [${system.name}] initialized."
actorSystem = system
applicationsActor = system.actorOf(Props[ApplicationsActor], Constants.APPLICATIONS_ACTOR_NAME)
log info s"created ApplicationsActor with path ${applicationsActor.path}"
bundlesActor = system.actorOf(Props(new BundlesActor(bundleContext)), Constants.BUNDLES_ACTOR_NAME)
log info s"created BundlesActor with path ${bundlesActor.path}"
//routesTracker = new RoutesTracker(system, serverConfig.authentication)
}
override def getActorSystemName(context: BundleContext): String = "SkysailActorSystem"
}
whenBundleActive({
addCapsule(new AkkaCapsule(bundleContext))
watchServices[ApplicationProvider] {
case AddingService(service, context) => addApplicationProvider(service, context)
case ModifiedService(service, context) => log info s"Service '$service' modified"; addApplicationProvider(service, context)
case RemovedService(service, _) => removeApplicationProvider(service)
}
watchServices[AuthenticationService] {
case AddingService(service, context) => routesTracker.setAuthentication(service)
case ModifiedService(service, _) => //log info s"Service '$service' modified"
case RemovedService(service, _) => removeAuthenticationService(service)
}
watchBundles {
case AddingBundle(b, context) => bundlesActor ! BundlesActor.CreateBundleActor(b)
case ModifiedBundle(b, _) => //log info s"Bundle ${b.getSymbolicName} modified"
case RemovedBundle(b, _) => log info s"Bundle ${b.getSymbolicName} removed"
}
whenConfigurationActive("server") { conf =>
log info s"received configuration for 'server': ${conf}"
val port = Integer.parseInt(conf.getOrElse("port", defaultPort.toString).asInstanceOf[String])
var binding = conf.getOrElse("binding", defaultBinding).asInstanceOf[String]
//var authentication = conf.getOrElse("authentication", defaultAuthentication).asInstanceOf[String]
serverConfig = ServerConfig(port, binding)
routesTracker = new RoutesTracker(actorSystem)
val app = new SkysailRootApplication()
app.providesService[ApplicationProvider]
}
})
private def addApplicationProvider(appInfoProvider: ApplicationProvider, ctx: ServiceWatcherContext[_]): Unit = {
log info s"adding ApplicationProvider: $appInfoProvider; $ctx"
createApplicationActor(appInfoProvider)
routesTracker.addRoutesFor(appInfoProvider)
restartServer(routesTracker.routes())
}
private def removeApplicationProvider(appInfoProvider: ApplicationProvider) = {
routesTracker.removeRoutesFrom(appInfoProvider)
restartServer(routesTracker.routes())
}
private def removeAuthenticationService(authenticationService: AuthenticationService) = {
routesTracker.setAuthentication(null)
}
private def startServer(arg: List[Route]) = {
implicit val materializer = ActorMaterializer()
log info s"(re)starting server with binding ${serverConfig.binding}:${serverConfig.port} with #${routesTracker.routes.size} routes."
arg.size match {
case 0 =>
log warn "Akka HTTP Server not started as no routes are defined"; null
case 1 => Http(actorSystem).bindAndHandle(arg(0), serverConfig.binding, serverConfig.port)
case _ => Http(actorSystem).bindAndHandle(arg.reduce((a, b) => a ~ b), serverConfig.binding, serverConfig.port)
}
}
private def restartServer(routes: List[Route]) = {
implicit val materializer = ActorMaterializer()
if (futureBinding != null) {
implicit val executionContext = actorSystem.dispatcher
futureBinding.flatMap(_.unbind()).onComplete { _ => futureBinding = startServer(routes) }
} else {
futureBinding = startServer(routes)
}
}
private def createApplicationActor(appInfoProvider: ApplicationProvider) = {
if (appInfoProvider == null) {
log warn "provided ApplicationProvider was null!"
} else {
implicit val askTimeout: Timeout = 1.seconds
val appsActor = SkysailApplication.getApplicationsActor(actorSystem)
val appClass = appInfoProvider.getClass.asInstanceOf[Class[SkysailApplication]]
val appModel = appInfoProvider.appModel()
val application = appInfoProvider.application()
val optionalBundleContext = appInfoProvider.getBundleContext()
appsActor ! CreateApplicationActor(appClass, appModel, application, optionalBundleContext)
}
}
} | evandor/skysail-core | skysail.core/src/io/skysail/core/server/AkkaServer.scala | Scala | apache-2.0 | 6,604 |
package xyz.nabijaczleweli.nactors.process
import java.util.UUID
import xyz.nabijaczleweli.lonning.Logger
import xyz.nabijaczleweli.lonning.loutput.FileLOutput
import xyz.nabijaczleweli.nactors.Actor
import xyz.nabijaczleweli.scala_game_of_life.util.DataUtil
/** Contains <tt>Actor</tt>s and <tt>ActorThread</tt>s.
*
* @author Jędrzej
* @since 22.04.14
*/
class ActorsHome(private final val homeName: String) extends ThreadGroup(homeName) with Logger[FileLOutput] {
private[nactors] var actors = Vector[Actor]()
private[nactors] var actorThreads = Vector[ActorThread]()
private[nactors] val loggingFileName = s"Actors from $homeName.log"
//new File(loggingFileName).delete
def kill(actorName: String) {
try {
var tmpact = Vector[Actor]()
var idx = -1
var tmp = 0
for(a <- actors; if idx == -1) {
if(a.name == actorName)
idx = tmp
else
tmpact +:= a
tmp += 1
}
actors = tmpact
actorThreads(idx).interrupt()
actorThreads = DataUtil.removeFromVector[ActorThread](actorThreads, idx)
actorThreads foreach {_.updateidx()}
} catch {
case _: ArrayIndexOutOfBoundsException =>
}
}
def killAll() {
actorThreads foreach {_.interrupt()}
var idx = 0
for(t <- actorThreads) {
val timeStart = System.currentTimeMillis
while(t.isAlive && (System.currentTimeMillis - timeStart) < 2500) // Kill after 2.5 seconds.
Thread sleep 10
if(t.isAlive) {
println(s"Force killing $t.")
actorThreads = actorThreads.updated(idx, null)
}
idx += 1
}
actorThreads = Vector[ActorThread]()
actors = Vector[Actor]()
}
def add(actor: Actor) = {
actor.home = Some(this)
actors :+= actor
actorThreads :+= new ActorThread(actor.name, this, actorThreads.length - 1)
get(actor.name)
}
def get(actorName: String) = {
val idx = actorThreads.indexWhere(_.getName == actorName)
if(idx == -1)
throw new IllegalArgumentException(s"Couldn't find name: \\'$actorName\\'.")
if(!actorThreads(idx).isAlive)
actorThreads(idx).start()
new ActorPublic(this, actorThreads(idx), actors(idx))
}
def forUUID(uuid: UUID): String =
actors(actorThreads.indexWhere(uuid == _.uuid)).name
override val logger = new FileLOutput(homeName, loggingFileName)(true)
logger.ensureFlushed()
}
| nabijaczleweli/Scala-Game-of-Life | src/main/scala/xyz/nabijaczleweli/nactors/process/ActorsHome.scala | Scala | mit | 2,283 |
package com.sksamuel.elastic4s.searches.aggs.pipeline
import org.elasticsearch.search.aggregations.bucket.histogram.DateHistogramInterval
import org.elasticsearch.search.aggregations.pipeline.BucketHelpers.GapPolicy
import org.elasticsearch.search.aggregations.pipeline.PipelineAggregatorBuilders
import org.elasticsearch.search.aggregations.pipeline.derivative.DerivativePipelineAggregationBuilder
import scala.collection.JavaConverters._
case class DerivativeDefinition(name: String,
bucketsPath: String,
format: Option[String] = None,
gapPolicy: Option[GapPolicy] = None,
unit: Option[DateHistogramInterval] = None,
unitString: Option[String] = None,
metadata: Map[String, AnyRef] = Map.empty) extends PipelineAggregationDefinition {
type T = DerivativePipelineAggregationBuilder
def builder: T = {
val builder = PipelineAggregatorBuilders.derivative(name, bucketsPath)
if (metadata.nonEmpty) builder.setMetaData(metadata.asJava)
format.foreach(builder.format)
gapPolicy.foreach(builder.gapPolicy)
unit.foreach(builder.unit)
unitString.foreach(builder.unit)
builder
}
def unit(unit: DateHistogramInterval): DerivativeDefinition = copy(unit = Some(unit))
def unit(unit: String): DerivativeDefinition = copy(unitString = Some(unit))
def format(format: String): DerivativeDefinition = copy(format = Some(format))
def gapPolicy(gapPolicy: GapPolicy): DerivativeDefinition = copy(gapPolicy = Some(gapPolicy))
def metadata(metadata: Map[String, AnyRef]): DerivativeDefinition = copy(metadata = metadata)
}
| ulric260/elastic4s | elastic4s-core/src/main/scala/com/sksamuel/elastic4s/searches/aggs/pipeline/DerivativeDefinition.scala | Scala | apache-2.0 | 1,745 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.datamap
import scala.collection.JavaConverters._
import org.apache.spark.Partition
import org.apache.spark.rdd.CarbonMergeFilePartition
import org.apache.spark.sql.SparkSession
import org.apache.spark.TaskContext
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.util.path.CarbonTablePath
import org.apache.carbondata.datamap.bloom.BloomIndexFileStore
import org.apache.carbondata.spark.rdd.CarbonRDD
/**
* RDD to merge all bloomindex files of specified segment for bloom datamap
*/
class CarbonMergeBloomIndexFilesRDD(
@transient ss: SparkSession,
carbonTable: CarbonTable,
segmentIds: Seq[String],
bloomDatamapNames: Seq[String],
bloomIndexColumns: Seq[Seq[String]])
extends CarbonRDD[String](ss, Nil) {
override def internalGetPartitions: Array[Partition] = {
segmentIds.zipWithIndex.map {s =>
CarbonMergeFilePartition(id, s._2, s._1)
}.toArray
}
override def internalCompute(theSplit: Partition, context: TaskContext): Iterator[String] = {
val tablePath = carbonTable.getTablePath
val split = theSplit.asInstanceOf[CarbonMergeFilePartition]
logInfo("Merging bloom index files of " +
s"segment ${split.segmentId} for ${carbonTable.getTableName}")
bloomDatamapNames.zipWithIndex.map( dm => {
val dmSegmentPath = CarbonTablePath.getDataMapStorePath(
tablePath, split.segmentId, dm._1)
BloomIndexFileStore.mergeBloomIndexFile(dmSegmentPath, bloomIndexColumns(dm._2).asJava)
})
val iter = new Iterator[String] {
var havePair = false
var finished = false
override def hasNext: Boolean = {
if (!finished && !havePair) {
finished = true
havePair = !finished
}
!finished
}
override def next(): String = {
if (!hasNext) {
throw new java.util.NoSuchElementException("End of stream")
}
havePair = false
""
}
}
iter
}
}
| sgururajshetty/carbondata | integration/spark2/src/main/scala/org/apache/carbondata/datamap/CarbonMergeBloomIndexFilesRDD.scala | Scala | apache-2.0 | 2,824 |
/*
* Original implementation (C) 2009-2011 Debasish Ghosh
* Adapted and extended in 2011 by Mathias Doenitz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spray.json
/**
* Provides all the predefined JsonFormats.
*/
trait DefaultJsonProtocol
extends BasicFormats
with StandardFormats
with CollectionFormats
with ProductFormats
with AdditionalFormats
object DefaultJsonProtocol extends DefaultJsonProtocol
| rsun07/spray-json | src/main/scala/spray/json/DefaultJsonProtocol.scala | Scala | apache-2.0 | 972 |
package common
import org.mashupbots.socko.events.WebSocketFrameEvent
import akka.actor.{Actor, ActorRef}
case class POST(socketId: String, message: String)
case class MESSAGES(messages: List[String])
case class GETTAGMESSAGES(event: WebSocketFrameEvent)
case class REQUESTFORTAG(tag: String)
case class GETUSERMESSAGES(event: WebSocketFrameEvent)
case class REQUESTFORUSER(username: String)
case class POSTMESSAGE(event: WebSocketFrameEvent)
case class POSTTAGMESSAGE(event: WebSocketFrameEvent)
case class POSTBYTAG(tag: String, message: String)
case class POSTBYUSER(username: String, message: String)
case class POSTED(response: Boolean)
case class POSTLOGIN(event: WebSocketFrameEvent)
case class LOGIN(username: String)
case class LOGGED(username: String)
case class CONNECT(clientId: String, username: String, userHandler: ActorRef)
case class CONNECTED(response: String)
case class POSTLOGOUT(event: WebSocketFrameEvent)
case class DISCONNECT(clientId: String, username: String, userHandler: ActorRef)
case class DISCONNECTED(response: Boolean)
case class STARTFOLLOWING(event: WebSocketFrameEvent)
case class FORWARDFOLLOW(clientId: String, userToFollow: String, userHandler: ActorRef)
case class FOLLOW(currentUser: String, userToFollow: String)
case class STOPFOLLOWING(event: WebSocketFrameEvent)
case class FORWARDSTOPFOLLOW(clientId: String, userToFollow: String, userHandler: ActorRef)
case class STOPFOLLOW(currentUser: String, userToFollow: String)
case class GETUSERNAME(clientId: String)
case class USERNAME(username: String)
case class GETUSERSPERUSER(event: WebSocketFrameEvent)
case object GETUSERS
case class USERS(users: List[String])
case class GETFOLLOWERSWITHCLIENTS(userHandler: ActorRef, username: String)
case class GETFOLLOWERSPERUSER(event: WebSocketFrameEvent)
case class GETFOLLOWERS(username: String)
case class GETFOLLOWING(username: String)
case class FOLLOWERSWITHCLIENTS(clients: List[String])
case class FOLLOWERS(followers: List[String])
case class GETCLIENT(username: String)
case class CLIENT(client: String)
| highlanderkev/ChatShare | ChatShare/common/src/main/scala/messages.scala | Scala | mit | 2,055 |
package com.typesafe.akka.http.benchmark.entity
case class Fortune(id: Int, message: String)
| zdanek/FrameworkBenchmarks | frameworks/Scala/akka-http/src/main/scala/com/typesafe/akka/http/benchmark/entity/Fortune.scala | Scala | bsd-3-clause | 94 |
/*
* Copyright 2018 Aman Mehara
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
object SequenceMaximum {
def maximum(numbers: List[Int]): Int = {
numbers.max
}
} | amanmehara/programming-app-data | Scala/Sequence Maximum/SequenceMaximum.scala | Scala | apache-2.0 | 686 |
package com.mdataset.service.api
import com.ecfront.ez.framework.core.EZManager
import com.mdataset.service.api.export.query.SocketAPI
import com.mdataset.service.api.process.QueryProcessor
import com.typesafe.scalalogging.slf4j.LazyLogging
/**
* API Service启动类
*/
object MdsStartup extends App with LazyLogging {
if (EZManager.start()) {
MdsContext.apiExchangeMaster.registerResp()
MdsContext.apiExchangeMaster.unRegisterResp()
SocketAPI.listening(MdsContext.socketPort, MdsContext.socketHost)
logger.info("==Startup== api service startup.")
}
}
| MDataSet/mds | modules/service_api/src/main/scala/com/mdataset/service/api/MdsStartup.scala | Scala | apache-2.0 | 581 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic._
import Matchers._
import TripleEquals._
import exceptions.TestFailedException
class ShouldTripleEqualsToleranceSpec extends Spec /* with NonImplicitAssertions */ with Tolerance {
val sevenDotOh = 7.0
val minusSevenDotOh = -7.0
val sevenDotOhFloat = 7.0f
val minusSevenDotOhFloat = -7.0f
val sevenLong = 7L
val minusSevenLong = -7L
val sevenInt = 7
val minusSevenInt = -7
val sevenShort: Short = 7
val minusSevenShort: Short = -7
val sevenByte: Byte = 7
val minusSevenByte: Byte = -7
/*
I decided that for X +- Y, Y can be any numeric type that's implicitly
convertible to X. So if X is Double, Y could be Double, Float, Long, Int, Short, Byte.
If X is Long, Y could be Long, Int, Short, Byte. If X is Short, Y could be Short or Byte.
And if X is Byte, Y must be Byte.
assert(minusSevenDotOhFloat === (-6.8f +- 0.2d))
*/
/* Chose not to do the symmetry, because no one needs it and implementing it would require an implicit. So these fail:
(7.1 +- 0.2) should === (sevenDotOh)
(7.5 +- 0.2) should !== (sevenDotOh)
*/
object `The should === syntax` {
def `should be true if the number is within the given interval` {
// Double +- Double
sevenDotOh should === (7.1 +- 0.2)
sevenDotOh should === (6.9 +- 0.2)
sevenDotOh should === (7.0 +- 0.2)
sevenDotOh should === (7.2 +- 0.2)
sevenDotOh should === (6.8 +- 0.2)
minusSevenDotOh should === (-7.1 +- 0.2)
minusSevenDotOh should === (-6.9 +- 0.2)
minusSevenDotOh should === (-7.0 +- 0.2)
minusSevenDotOh should === (-7.2 +- 0.2)
minusSevenDotOh should === (-6.8 +- 0.2)
// Double +- Float
sevenDotOh should === (7.1 +- 0.2f)
sevenDotOh should === (6.9 +- 0.2f)
sevenDotOh should === (7.0 +- 0.2f)
sevenDotOh should === (7.2 +- 0.2f)
sevenDotOh should === (6.8 +- 0.2f)
minusSevenDotOh should === (-7.1 +- 0.2f)
minusSevenDotOh should === (-6.9 +- 0.2f)
minusSevenDotOh should === (-7.0 +- 0.2f)
minusSevenDotOh should === (-7.2 +- 0.2f)
minusSevenDotOh should === (-6.8 +- 0.2f)
// Double +- Long
sevenDotOh should === (7.1 +- 2L)
sevenDotOh should === (6.9 +- 2L)
sevenDotOh should === (7.0 +- 2L)
sevenDotOh should === (7.2 +- 2L)
sevenDotOh should === (6.8 +- 2L)
minusSevenDotOh should === (-7.1 +- 2L)
minusSevenDotOh should === (-6.9 +- 2L)
minusSevenDotOh should === (-7.0 +- 2L)
minusSevenDotOh should === (-7.2 +- 2L)
minusSevenDotOh should === (-6.8 +- 2L)
// Double +- Int
sevenDotOh should === (7.1 +- 2)
sevenDotOh should === (6.9 +- 2)
sevenDotOh should === (7.0 +- 2)
sevenDotOh should === (7.2 +- 2)
sevenDotOh should === (6.8 +- 2)
minusSevenDotOh should === (-7.1 +- 2)
minusSevenDotOh should === (-6.9 +- 2)
minusSevenDotOh should === (-7.0 +- 2)
minusSevenDotOh should === (-7.2 +- 2)
minusSevenDotOh should === (-6.8 +- 2)
// Double +- Short
sevenDotOh should === (7.1 +- 2.toShort)
sevenDotOh should === (6.9 +- 2.toShort)
sevenDotOh should === (7.0 +- 2.toShort)
sevenDotOh should === (7.2 +- 2.toShort)
sevenDotOh should === (6.8 +- 2.toShort)
minusSevenDotOh should === (-7.1 +- 2.toShort)
minusSevenDotOh should === (-6.9 +- 2.toShort)
minusSevenDotOh should === (-7.0 +- 2.toShort)
minusSevenDotOh should === (-7.2 +- 2.toShort)
minusSevenDotOh should === (-6.8 +- 2.toShort)
// Double +- Byte
sevenDotOh should === (7.1 +- 2.toByte)
sevenDotOh should === (6.9 +- 2.toByte)
sevenDotOh should === (7.0 +- 2.toByte)
sevenDotOh should === (7.2 +- 2.toByte)
sevenDotOh should === (6.8 +- 2.toByte)
minusSevenDotOh should === (-7.1 +- 2.toByte)
minusSevenDotOh should === (-6.9 +- 2.toByte)
minusSevenDotOh should === (-7.0 +- 2.toByte)
minusSevenDotOh should === (-7.2 +- 2.toByte)
minusSevenDotOh should === (-6.8 +- 2.toByte)
// Float +- Float
sevenDotOhFloat should === (7.1f +- 0.2f)
sevenDotOhFloat should === (6.9f +- 0.2f)
sevenDotOhFloat should === (7.0f +- 0.2f)
sevenDotOhFloat should === (7.2f +- 0.2f)
sevenDotOhFloat should === (6.8f +- 0.2f)
minusSevenDotOhFloat should === (-7.1f +- 0.2f)
minusSevenDotOhFloat should === (-6.9f +- 0.2f)
minusSevenDotOhFloat should === (-7.0f +- 0.2f)
minusSevenDotOhFloat should === (-7.2f +- 0.2f)
minusSevenDotOhFloat should === (-6.8f +- 0.2f)
// Float +- Long
sevenDotOhFloat should === (7.1f +- 2L)
sevenDotOhFloat should === (6.9f +- 2L)
sevenDotOhFloat should === (7.0f +- 2L)
sevenDotOhFloat should === (7.2f +- 2L)
sevenDotOhFloat should === (6.8f +- 2L)
minusSevenDotOhFloat should === (-7.1f +- 2L)
minusSevenDotOhFloat should === (-6.9f +- 2L)
minusSevenDotOhFloat should === (-7.0f +- 2L)
minusSevenDotOhFloat should === (-7.2f +- 2L)
minusSevenDotOhFloat should === (-6.8f +- 2L)
// Float +- Int
sevenDotOhFloat should === (7.1f +- 2)
sevenDotOhFloat should === (6.9f +- 2)
sevenDotOhFloat should === (7.0f +- 2)
sevenDotOhFloat should === (7.2f +- 2)
sevenDotOhFloat should === (6.8f +- 2)
minusSevenDotOhFloat should === (-7.1f +- 2)
minusSevenDotOhFloat should === (-6.9f +- 2)
minusSevenDotOhFloat should === (-7.0f +- 2)
minusSevenDotOhFloat should === (-7.2f +- 2)
minusSevenDotOhFloat should === (-6.8f +- 2)
// Float +- Short
sevenDotOhFloat should === (7.1f +- 2.toShort)
sevenDotOhFloat should === (6.9f +- 2.toShort)
sevenDotOhFloat should === (7.0f +- 2.toShort)
sevenDotOhFloat should === (7.2f +- 2.toShort)
sevenDotOhFloat should === (6.8f +- 2.toShort)
minusSevenDotOhFloat should === (-7.1f +- 2.toShort)
minusSevenDotOhFloat should === (-6.9f +- 2.toShort)
minusSevenDotOhFloat should === (-7.0f +- 2.toShort)
minusSevenDotOhFloat should === (-7.2f +- 2.toShort)
minusSevenDotOhFloat should === (-6.8f +- 2.toShort)
// Float +- Byte
sevenDotOhFloat should === (7.1f +- 2.toByte)
sevenDotOhFloat should === (6.9f +- 2.toByte)
sevenDotOhFloat should === (7.0f +- 2.toByte)
sevenDotOhFloat should === (7.2f +- 2.toByte)
sevenDotOhFloat should === (6.8f +- 2.toByte)
minusSevenDotOhFloat should === (-7.1f +- 2.toByte)
minusSevenDotOhFloat should === (-6.9f +- 2.toByte)
minusSevenDotOhFloat should === (-7.0f +- 2.toByte)
minusSevenDotOhFloat should === (-7.2f +- 2.toByte)
minusSevenDotOhFloat should === (-6.8f +- 2.toByte)
// Long +- Long
sevenLong should === (9L +- 2L)
sevenLong should === (8L +- 2L)
sevenLong should === (7L +- 2L)
sevenLong should === (6L +- 2L)
sevenLong should === (5L +- 2L)
minusSevenLong should === (-9L +- 2L)
minusSevenLong should === (-8L +- 2L)
minusSevenLong should === (-7L +- 2L)
minusSevenLong should === (-6L +- 2L)
minusSevenLong should === (-5L +- 2L)
// Long +- Int
sevenLong should === (9L +- 2)
sevenLong should === (8L +- 2)
sevenLong should === (7L +- 2)
sevenLong should === (6L +- 2)
sevenLong should === (5L +- 2)
minusSevenLong should === (-9L +- 2)
minusSevenLong should === (-8L +- 2)
minusSevenLong should === (-7L +- 2)
minusSevenLong should === (-6L +- 2)
minusSevenLong should === (-5L +- 2)
// Long +- Short
sevenLong should === (9L +- 2.toShort)
sevenLong should === (8L +- 2.toShort)
sevenLong should === (7L +- 2.toShort)
sevenLong should === (6L +- 2.toShort)
sevenLong should === (5L +- 2.toShort)
minusSevenLong should === (-9L +- 2.toShort)
minusSevenLong should === (-8L +- 2.toShort)
minusSevenLong should === (-7L +- 2.toShort)
minusSevenLong should === (-6L +- 2.toShort)
minusSevenLong should === (-5L +- 2.toShort)
// Long +- Byte
sevenLong should === (9L +- 2.toByte)
sevenLong should === (8L +- 2.toByte)
sevenLong should === (7L +- 2.toByte)
sevenLong should === (6L +- 2.toByte)
sevenLong should === (5L +- 2.toByte)
minusSevenLong should === (-9L +- 2.toByte)
minusSevenLong should === (-8L +- 2.toByte)
minusSevenLong should === (-7L +- 2.toByte)
minusSevenLong should === (-6L +- 2.toByte)
minusSevenLong should === (-5L +- 2.toByte)
// Int +- Int
sevenInt should === (9 +- 2)
sevenInt should === (8 +- 2)
sevenInt should === (7 +- 2)
sevenInt should === (6 +- 2)
sevenInt should === (5 +- 2)
minusSevenInt should === (-9 +- 2)
minusSevenInt should === (-8 +- 2)
minusSevenInt should === (-7 +- 2)
minusSevenInt should === (-6 +- 2)
minusSevenInt should === (-5 +- 2)
// Int +- Short
sevenInt should === (9 +- 2.toShort)
sevenInt should === (8 +- 2.toShort)
sevenInt should === (7 +- 2.toShort)
sevenInt should === (6 +- 2.toShort)
sevenInt should === (5 +- 2.toShort)
minusSevenInt should === (-9 +- 2.toShort)
minusSevenInt should === (-8 +- 2.toShort)
minusSevenInt should === (-7 +- 2.toShort)
minusSevenInt should === (-6 +- 2.toShort)
minusSevenInt should === (-5 +- 2.toShort)
// Int +- Byte
sevenInt should === (9 +- 2.toByte)
sevenInt should === (8 +- 2.toByte)
sevenInt should === (7 +- 2.toByte)
sevenInt should === (6 +- 2.toByte)
sevenInt should === (5 +- 2.toByte)
minusSevenInt should === (-9 +- 2.toByte)
minusSevenInt should === (-8 +- 2.toByte)
minusSevenInt should === (-7 +- 2.toByte)
minusSevenInt should === (-6 +- 2.toByte)
minusSevenInt should === (-5 +- 2.toByte)
// Short +- Short
sevenShort should === (9.toShort +- 2.toShort)
sevenShort should === (8.toShort +- 2.toShort)
sevenShort should === (7.toShort +- 2.toShort)
sevenShort should === (6.toShort +- 2.toShort)
sevenShort should === (5.toShort +- 2.toShort)
minusSevenShort should === ((-9).toShort +- 2.toShort)
minusSevenShort should === ((-8).toShort +- 2.toShort)
minusSevenShort should === ((-7).toShort +- 2.toShort)
minusSevenShort should === ((-6).toShort +- 2.toShort)
minusSevenShort should === ((-5).toShort +- 2.toShort)
// Short +- Byte
sevenShort should === (9.toShort +- 2.toByte)
sevenShort should === (8.toShort +- 2.toByte)
sevenShort should === (7.toShort +- 2.toByte)
sevenShort should === (6.toShort +- 2.toByte)
sevenShort should === (5.toShort +- 2.toByte)
minusSevenShort should === ((-9).toShort +- 2.toByte)
minusSevenShort should === ((-8).toShort +- 2.toByte)
minusSevenShort should === ((-7).toShort +- 2.toByte)
minusSevenShort should === ((-6).toShort +- 2.toByte)
minusSevenShort should === ((-5).toShort +- 2.toByte)
// Byte +- Byte
sevenByte should === (9.toByte +- 2.toByte)
sevenByte should === (8.toByte +- 2.toByte)
sevenByte should === (7.toByte +- 2.toByte)
sevenByte should === (6.toByte +- 2.toByte)
sevenByte should === (5.toByte +- 2.toByte)
minusSevenByte should === ((-9).toByte +- 2.toByte)
minusSevenByte should === ((-8).toByte +- 2.toByte)
minusSevenByte should === ((-7).toByte +- 2.toByte)
minusSevenByte should === ((-6).toByte +- 2.toByte)
minusSevenByte should === ((-5).toByte +- 2.toByte)
}
def `should throw TFE if the number is outside the given interval` {
// Double +- Double
val caught = intercept[TestFailedException] { sevenDotOh should === (7.5 +- 0.2) }
assert(caught.getMessage === "7.0 did not equal 7.5 plus or minus 0.2")
intercept[TestFailedException] { sevenDotOh should === (6.5 +- 0.2) }
intercept[TestFailedException] { minusSevenDotOh should === (-7.5 +- 0.2) }
intercept[TestFailedException] { minusSevenDotOh should === (-6.5 +- 0.2) }
// Double +- Float
intercept[TestFailedException] { sevenDotOh should === (7.5 +- 0.2f) }
intercept[TestFailedException] { sevenDotOh should === (6.5 +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOh should === (-7.5 +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOh should === (-6.5 +- 0.2f) }
// Double +- Long
intercept[TestFailedException] { sevenDotOh should === (4.0 +- 2L) }
intercept[TestFailedException] { sevenDotOh should === (9.1 +- 2L) }
intercept[TestFailedException] { minusSevenDotOh should === (-4.0 +- 2L) }
intercept[TestFailedException] { minusSevenDotOh should === (-9.1 +- 2L) }
// Double +- Int
intercept[TestFailedException] { sevenDotOh should === (4.0 +- 2) }
intercept[TestFailedException] { sevenDotOh should === (9.1 +- 2) }
intercept[TestFailedException] { minusSevenDotOh should === (-4.0 +- 2) }
intercept[TestFailedException] { minusSevenDotOh should === (-9.1 +- 2) }
// Double +- Short
intercept[TestFailedException] { sevenDotOh should === (4.0 +- 2.toShort) }
intercept[TestFailedException] { sevenDotOh should === (9.1 +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOh should === (-4.0 +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOh should === (-9.1 +- 2.toShort) }
// Double +- Byte
intercept[TestFailedException] { sevenDotOh should === (4.0 +- 2.toByte) }
intercept[TestFailedException] { sevenDotOh should === (9.1 +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOh should === (-4.0 +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOh should === (-9.1 +- 2.toByte) }
// Float +- Float
intercept[TestFailedException] { sevenDotOhFloat should === (7.5f +- 0.2f) }
intercept[TestFailedException] { sevenDotOhFloat should === (6.5f +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOhFloat should === (-7.5f +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOhFloat should === (-6.5f +- 0.2f) }
// Float +- Long
intercept[TestFailedException] { sevenDotOhFloat should === (4.0f +- 2L) }
intercept[TestFailedException] { sevenDotOhFloat should === (9.1f +- 2L) }
intercept[TestFailedException] { minusSevenDotOhFloat should === (-4.0f +- 2L) }
intercept[TestFailedException] { minusSevenDotOhFloat should === (-9.1f +- 2L) }
// Float +- Int
intercept[TestFailedException] { sevenDotOhFloat should === (4.0f +- 2) }
intercept[TestFailedException] { sevenDotOhFloat should === (9.1f +- 2) }
intercept[TestFailedException] { minusSevenDotOhFloat should === (-4.0f +- 2) }
intercept[TestFailedException] { minusSevenDotOhFloat should === (-9.1f +- 2) }
// Float +- Short
intercept[TestFailedException] { sevenDotOhFloat should === (4.0f +- 2.toShort) }
intercept[TestFailedException] { sevenDotOhFloat should === (9.1f +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOhFloat should === (-4.0f +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOhFloat should === (-9.1f +- 2.toShort) }
// Float +- Byte
intercept[TestFailedException] { sevenDotOhFloat should === (4.0f +- 2.toByte) }
intercept[TestFailedException] { sevenDotOhFloat should === (9.1f +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOhFloat should === (-4.0f +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOhFloat should === (-9.1f +- 2.toByte) }
// Long +- Long
intercept[TestFailedException] { sevenLong should === (4L +- 2L) }
intercept[TestFailedException] { sevenLong should === (10L +- 2L) }
intercept[TestFailedException] { minusSevenLong should === (-4L +- 2L) }
intercept[TestFailedException] { minusSevenLong should === (-10L +- 2L) }
// Long +- Int
intercept[TestFailedException] { sevenLong should === (4L +- 2) }
intercept[TestFailedException] { sevenLong should === (10L +- 2) }
intercept[TestFailedException] { minusSevenLong should === (-4L +- 2) }
intercept[TestFailedException] { minusSevenLong should === (-10L +- 2) }
// Long +- Short
intercept[TestFailedException] { sevenLong should === (4L +- 2.toShort) }
intercept[TestFailedException] { sevenLong should === (10L +- 2.toShort) }
intercept[TestFailedException] { minusSevenLong should === (-4L +- 2.toShort) }
intercept[TestFailedException] { minusSevenLong should === (-10L +- 2.toShort) }
// Long +- Byte
intercept[TestFailedException] { sevenLong should === (4L +- 2.toByte) }
intercept[TestFailedException] { sevenLong should === (10L +- 2.toByte) }
intercept[TestFailedException] { minusSevenLong should === (-4L +- 2.toByte) }
intercept[TestFailedException] { minusSevenLong should === (-10L +- 2.toByte) }
// Int +- Int
intercept[TestFailedException] { sevenInt should === (4 +- 2) }
intercept[TestFailedException] { sevenInt should === (10 +- 2) }
intercept[TestFailedException] { minusSevenInt should === (-4 +- 2) }
intercept[TestFailedException] { minusSevenInt should === (-10 +- 2) }
// Int +- Short
intercept[TestFailedException] { sevenInt should === (4 +- 2.toShort) }
intercept[TestFailedException] { sevenInt should === (10 +- 2.toShort) }
intercept[TestFailedException] { minusSevenInt should === (-4 +- 2.toShort) }
intercept[TestFailedException] { minusSevenInt should === (-10 +- 2.toShort) }
// Int +- Byte
intercept[TestFailedException] { sevenInt should === (4 +- 2.toByte) }
intercept[TestFailedException] { sevenInt should === (10 +- 2.toByte) }
intercept[TestFailedException] { minusSevenInt should === (-4 +- 2.toByte) }
intercept[TestFailedException] { minusSevenInt should === (-10 +- 2.toByte) }
// Short +- Short
intercept[TestFailedException] { sevenShort should === (4.toShort +- 2.toShort) }
intercept[TestFailedException] { sevenShort should === (10.toShort +- 2.toShort) }
intercept[TestFailedException] { minusSevenShort should === ((-4).toShort +- 2.toShort) }
intercept[TestFailedException] { minusSevenShort should === ((-10).toShort +- 2.toShort) }
// Short +- Byte
intercept[TestFailedException] { sevenShort should === (4.toShort +- 2.toByte) }
intercept[TestFailedException] { sevenShort should === (10.toShort +- 2.toByte) }
intercept[TestFailedException] { minusSevenShort should === ((-4).toShort +- 2.toByte) }
intercept[TestFailedException] { minusSevenShort should === ((-10).toShort +- 2.toByte) }
// Byte +- Byte
intercept[TestFailedException] { sevenByte should === (4.toByte +- 2.toByte) }
intercept[TestFailedException] { sevenByte should === (10.toByte +- 2.toByte) }
intercept[TestFailedException] { minusSevenByte should === ((-4).toByte +- 2.toByte) }
intercept[TestFailedException] { minusSevenByte should === ((-10).toByte +- 2.toByte) }
}
}
object `The !== syntax` {
def `should succeed if the number is outside the given interval` {
// Double +- Double
sevenDotOh should !== (7.5 +- 0.2)
sevenDotOh should !== (6.5 +- 0.2)
minusSevenDotOh should !== (-7.5 +- 0.2)
minusSevenDotOh should !== (-6.5 +- 0.2)
// Double +- Float
sevenDotOh should !== (7.5 +- 0.2f)
sevenDotOh should !== (6.5 +- 0.2f)
minusSevenDotOh should !== (-7.5 +- 0.2f)
minusSevenDotOh should !== (-6.5 +- 0.2f)
// Double +- Long
sevenDotOh should !== (4.0 +- 2L)
sevenDotOh should !== (9.1 +- 2L)
minusSevenDotOh should !== (-4.0 +- 2L)
minusSevenDotOh should !== (-9.1 +- 2L)
// Double +- Int
sevenDotOh should !== (4.0 +- 2)
sevenDotOh should !== (9.1 +- 2)
minusSevenDotOh should !== (-4.0 +- 2)
minusSevenDotOh should !== (-9.1 +- 2)
// Double +- Short
sevenDotOh should !== (4.0 +- 2.toShort)
sevenDotOh should !== (9.1 +- 2.toShort)
minusSevenDotOh should !== (-4.0 +- 2.toShort)
minusSevenDotOh should !== (-9.1 +- 2.toShort)
// Double +- Byte
sevenDotOh should !== (4.0 +- 2.toByte)
sevenDotOh should !== (9.1 +- 2.toByte)
minusSevenDotOh should !== (-4.0 +- 2.toByte)
minusSevenDotOh should !== (-9.1 +- 2.toByte)
// Float +- Float
sevenDotOhFloat should !== (7.5f +- 0.2f)
sevenDotOhFloat should !== (6.5f +- 0.2f)
minusSevenDotOhFloat should !== (-7.5f +- 0.2f)
minusSevenDotOhFloat should !== (-6.5f +- 0.2f)
// Float +- Long
sevenDotOhFloat should !== (4.0f +- 2L)
sevenDotOhFloat should !== (9.1f +- 2L)
minusSevenDotOhFloat should !== (-4.0f +- 2L)
minusSevenDotOhFloat should !== (-9.1f +- 2L)
// Float +- Int
sevenDotOhFloat should !== (4.0f +- 2)
sevenDotOhFloat should !== (9.1f +- 2)
minusSevenDotOhFloat should !== (-4.0f +- 2)
minusSevenDotOhFloat should !== (-9.1f +- 2)
// Float +- Short
sevenDotOhFloat should !== (4.0f +- 2.toShort)
sevenDotOhFloat should !== (9.1f +- 2.toShort)
minusSevenDotOhFloat should !== (-4.0f +- 2.toShort)
minusSevenDotOhFloat should !== (-9.1f +- 2.toShort)
// Float +- Byte
sevenDotOhFloat should !== (4.0f +- 2.toByte)
sevenDotOhFloat should !== (9.1f +- 2.toByte)
minusSevenDotOhFloat should !== (-4.0f +- 2.toByte)
minusSevenDotOhFloat should !== (-9.1f +- 2.toByte)
// Long +- Long
sevenLong should !== (4L +- 2L)
sevenLong should !== (10L +- 2L)
minusSevenLong should !== (-4L +- 2L)
minusSevenLong should !== (-10L +- 2L)
// Long +- Int
sevenLong should !== (4L +- 2)
sevenLong should !== (10L +- 2)
minusSevenLong should !== (-4L +- 2)
minusSevenLong should !== (-10L +- 2)
// Long +- Short
sevenLong should !== (4L +- 2.toShort)
sevenLong should !== (10L +- 2.toShort)
minusSevenLong should !== (-4L +- 2.toShort)
minusSevenLong should !== (-10L +- 2.toShort)
// Long +- Byte
sevenLong should !== (4L +- 2.toByte)
sevenLong should !== (10L +- 2.toByte)
minusSevenLong should !== (-4L +- 2.toByte)
minusSevenLong should !== (-10L +- 2.toByte)
// Int +- Int
sevenInt should !== (4 +- 2)
sevenInt should !== (10 +- 2)
minusSevenInt should !== (-4 +- 2)
minusSevenInt should !== (-10 +- 2)
// Int +- Short
sevenInt should !== (4 +- 2.toShort)
sevenInt should !== (10 +- 2.toShort)
minusSevenInt should !== (-4 +- 2.toShort)
minusSevenInt should !== (-10 +- 2.toShort)
// Int +- Byte
sevenInt should !== (4 +- 2.toByte)
sevenInt should !== (10 +- 2.toByte)
minusSevenInt should !== (-4 +- 2.toByte)
minusSevenInt should !== (-10 +- 2.toByte)
// Short +- Short
sevenShort should !== (4.toShort +- 2.toShort)
sevenShort should !== (10.toShort +- 2.toShort)
minusSevenShort should !== ((-4).toShort +- 2.toShort)
minusSevenShort should !== ((-10).toShort +- 2.toShort)
// Short +- Byte
sevenShort should !== (4.toShort +- 2.toByte)
sevenShort should !== (10.toShort +- 2.toByte)
minusSevenShort should !== ((-4).toShort +- 2.toByte)
minusSevenShort should !== ((-10).toShort +- 2.toByte)
// Byte +- Byte
sevenByte should !== (4.toByte +- 2.toByte)
sevenByte should !== (10.toByte +- 2.toByte)
minusSevenByte should !== ((-4).toByte +- 2.toByte)
minusSevenByte should !== ((-10).toByte +- 2.toByte)
}
def `should throw TFE if the number is within the given interval` {
// Double +- Double
val caught = intercept[TestFailedException] { sevenDotOh should !== (7.1 +- 0.2) }
assert(caught.getMessage === "7.0 equaled 7.1 plus or minus 0.2")
intercept[TestFailedException] { sevenDotOh should !== (6.9 +- 0.2) }
intercept[TestFailedException] { sevenDotOh should !== (7.0 +- 0.2) }
intercept[TestFailedException] { sevenDotOh should !== (7.2 +- 0.2) }
intercept[TestFailedException] { sevenDotOh should !== (6.8 +- 0.2) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.1 +- 0.2) }
intercept[TestFailedException] { minusSevenDotOh should !== (-6.9 +- 0.2) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.0 +- 0.2) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.2 +- 0.2) }
intercept[TestFailedException] { minusSevenDotOh should !== (-6.8 +- 0.2) }
// Double +- Float
intercept[TestFailedException] { sevenDotOh should !== (7.1 +- 0.2f) }
intercept[TestFailedException] { sevenDotOh should !== (6.9 +- 0.2f) }
intercept[TestFailedException] { sevenDotOh should !== (7.0 +- 0.2f) }
intercept[TestFailedException] { sevenDotOh should !== (7.2 +- 0.2f) }
intercept[TestFailedException] { sevenDotOh should !== (6.8 +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.1 +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOh should !== (-6.9 +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.0 +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.2 +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOh should !== (-6.8 +- 0.2f) }
// Double +- Long
intercept[TestFailedException] { sevenDotOh should !== (7.1 +- 2L) }
intercept[TestFailedException] { sevenDotOh should !== (6.9 +- 2L) }
intercept[TestFailedException] { sevenDotOh should !== (7.0 +- 2L) }
intercept[TestFailedException] { sevenDotOh should !== (7.2 +- 2L) }
intercept[TestFailedException] { sevenDotOh should !== (6.8 +- 2L) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.1 +- 2L) }
intercept[TestFailedException] { minusSevenDotOh should !== (-6.9 +- 2L) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.0 +- 2L) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.2 +- 2L) }
intercept[TestFailedException] { minusSevenDotOh should !== (-6.8 +- 2L) }
// Double +- Int
intercept[TestFailedException] { sevenDotOh should !== (7.1 +- 2) }
intercept[TestFailedException] { sevenDotOh should !== (6.9 +- 2) }
intercept[TestFailedException] { sevenDotOh should !== (7.0 +- 2) }
intercept[TestFailedException] { sevenDotOh should !== (7.2 +- 2) }
intercept[TestFailedException] { sevenDotOh should !== (6.8 +- 2) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.1 +- 2) }
intercept[TestFailedException] { minusSevenDotOh should !== (-6.9 +- 2) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.0 +- 2) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.2 +- 2) }
intercept[TestFailedException] { minusSevenDotOh should !== (-6.8 +- 2) }
// Double +- Short
intercept[TestFailedException] { sevenDotOh should !== (7.1 +- 2.toShort) }
intercept[TestFailedException] { sevenDotOh should !== (6.9 +- 2.toShort) }
intercept[TestFailedException] { sevenDotOh should !== (7.0 +- 2.toShort) }
intercept[TestFailedException] { sevenDotOh should !== (7.2 +- 2.toShort) }
intercept[TestFailedException] { sevenDotOh should !== (6.8 +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.1 +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOh should !== (-6.9 +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.0 +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.2 +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOh should !== (-6.8 +- 2.toShort) }
// Double +- Byte
intercept[TestFailedException] { sevenDotOh should !== (7.1 +- 2.toByte) }
intercept[TestFailedException] { sevenDotOh should !== (6.9 +- 2.toByte) }
intercept[TestFailedException] { sevenDotOh should !== (7.0 +- 2.toByte) }
intercept[TestFailedException] { sevenDotOh should !== (7.2 +- 2.toByte) }
intercept[TestFailedException] { sevenDotOh should !== (6.8 +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.1 +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOh should !== (-6.9 +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.0 +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOh should !== (-7.2 +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOh should !== (-6.8 +- 2.toByte) }
// Float +- Float
intercept[TestFailedException] { sevenDotOhFloat should !== (7.1f +- 0.2f) }
intercept[TestFailedException] { sevenDotOhFloat should !== (6.9f +- 0.2f) }
intercept[TestFailedException] { sevenDotOhFloat should !== (7.0f +- 0.2f) }
intercept[TestFailedException] { sevenDotOhFloat should !== (7.2f +- 0.2f) }
intercept[TestFailedException] { sevenDotOhFloat should !== (6.8f +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.1f +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-6.9f +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.0f +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.2f +- 0.2f) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-6.8f +- 0.2f) }
// Float +- Long
intercept[TestFailedException] { sevenDotOhFloat should !== (7.1f +- 2L) }
intercept[TestFailedException] { sevenDotOhFloat should !== (6.9f +- 2L) }
intercept[TestFailedException] { sevenDotOhFloat should !== (7.0f +- 2L) }
intercept[TestFailedException] { sevenDotOhFloat should !== (7.2f +- 2L) }
intercept[TestFailedException] { sevenDotOhFloat should !== (6.8f +- 2L) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.1f +- 2L) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-6.9f +- 2L) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.0f +- 2L) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.2f +- 2L) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-6.8f +- 2L) }
// Float +- Int
intercept[TestFailedException] { sevenDotOhFloat should !== (7.1f +- 2) }
intercept[TestFailedException] { sevenDotOhFloat should !== (6.9f +- 2) }
intercept[TestFailedException] { sevenDotOhFloat should !== (7.0f +- 2) }
intercept[TestFailedException] { sevenDotOhFloat should !== (7.2f +- 2) }
intercept[TestFailedException] { sevenDotOhFloat should !== (6.8f +- 2) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.1f +- 2) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-6.9f +- 2) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.0f +- 2) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.2f +- 2) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-6.8f +- 2) }
// Float +- Short
intercept[TestFailedException] { sevenDotOhFloat should !== (7.1f +- 2.toShort) }
intercept[TestFailedException] { sevenDotOhFloat should !== (6.9f +- 2.toShort) }
intercept[TestFailedException] { sevenDotOhFloat should !== (7.0f +- 2.toShort) }
intercept[TestFailedException] { sevenDotOhFloat should !== (7.2f +- 2.toShort) }
intercept[TestFailedException] { sevenDotOhFloat should !== (6.8f +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.1f +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-6.9f +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.0f +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.2f +- 2.toShort) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-6.8f +- 2.toShort) }
// Float +- Byte
intercept[TestFailedException] { sevenDotOhFloat should !== (7.1f +- 2.toByte) }
intercept[TestFailedException] { sevenDotOhFloat should !== (6.9f +- 2.toByte) }
intercept[TestFailedException] { sevenDotOhFloat should !== (7.0f +- 2.toByte) }
intercept[TestFailedException] { sevenDotOhFloat should !== (7.2f +- 2.toByte) }
intercept[TestFailedException] { sevenDotOhFloat should !== (6.8f +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.1f +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-6.9f +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.0f +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-7.2f +- 2.toByte) }
intercept[TestFailedException] { minusSevenDotOhFloat should !== (-6.8f +- 2.toByte) }
// Long +- Long
intercept[TestFailedException] { sevenLong should !== (9L +- 2L) }
intercept[TestFailedException] { sevenLong should !== (8L +- 2L) }
intercept[TestFailedException] { sevenLong should !== (7L +- 2L) }
intercept[TestFailedException] { sevenLong should !== (6L +- 2L) }
intercept[TestFailedException] { sevenLong should !== (5L +- 2L) }
intercept[TestFailedException] { minusSevenLong should !== (-9L +- 2L) }
intercept[TestFailedException] { minusSevenLong should !== (-8L +- 2L) }
intercept[TestFailedException] { minusSevenLong should !== (-7L +- 2L) }
intercept[TestFailedException] { minusSevenLong should !== (-6L +- 2L) }
intercept[TestFailedException] { minusSevenLong should !== (-5L +- 2L) }
// Long +- Int
intercept[TestFailedException] { sevenLong should !== (9L +- 2) }
intercept[TestFailedException] { sevenLong should !== (8L +- 2) }
intercept[TestFailedException] { sevenLong should !== (7L +- 2) }
intercept[TestFailedException] { sevenLong should !== (6L +- 2) }
intercept[TestFailedException] { sevenLong should !== (5L +- 2) }
intercept[TestFailedException] { minusSevenLong should !== (-9L +- 2) }
intercept[TestFailedException] { minusSevenLong should !== (-8L +- 2) }
intercept[TestFailedException] { minusSevenLong should !== (-7L +- 2) }
intercept[TestFailedException] { minusSevenLong should !== (-6L +- 2) }
intercept[TestFailedException] { minusSevenLong should !== (-5L +- 2) }
// Long +- Short
intercept[TestFailedException] { sevenLong should !== (9L +- 2.toShort) }
intercept[TestFailedException] { sevenLong should !== (8L +- 2.toShort) }
intercept[TestFailedException] { sevenLong should !== (7L +- 2.toShort) }
intercept[TestFailedException] { sevenLong should !== (6L +- 2.toShort) }
intercept[TestFailedException] { sevenLong should !== (5L +- 2.toShort) }
intercept[TestFailedException] { minusSevenLong should !== (-9L +- 2.toShort) }
intercept[TestFailedException] { minusSevenLong should !== (-8L +- 2.toShort) }
intercept[TestFailedException] { minusSevenLong should !== (-7L +- 2.toShort) }
intercept[TestFailedException] { minusSevenLong should !== (-6L +- 2.toShort) }
intercept[TestFailedException] { minusSevenLong should !== (-5L +- 2.toShort) }
// Long +- Byte
intercept[TestFailedException] { sevenLong should !== (9L +- 2.toByte) }
intercept[TestFailedException] { sevenLong should !== (8L +- 2.toByte) }
intercept[TestFailedException] { sevenLong should !== (7L +- 2.toByte) }
intercept[TestFailedException] { sevenLong should !== (6L +- 2.toByte) }
intercept[TestFailedException] { sevenLong should !== (5L +- 2.toByte) }
intercept[TestFailedException] { minusSevenLong should !== (-9L +- 2.toByte) }
intercept[TestFailedException] { minusSevenLong should !== (-8L +- 2.toByte) }
intercept[TestFailedException] { minusSevenLong should !== (-7L +- 2.toByte) }
intercept[TestFailedException] { minusSevenLong should !== (-6L +- 2.toByte) }
intercept[TestFailedException] { minusSevenLong should !== (-5L +- 2.toByte) }
// Int +- Int
intercept[TestFailedException] { sevenInt should !== (9 +- 2) }
intercept[TestFailedException] { sevenInt should !== (8 +- 2) }
intercept[TestFailedException] { sevenInt should !== (7 +- 2) }
intercept[TestFailedException] { sevenInt should !== (6 +- 2) }
intercept[TestFailedException] { sevenInt should !== (5 +- 2) }
intercept[TestFailedException] { minusSevenInt should !== (-9 +- 2) }
intercept[TestFailedException] { minusSevenInt should !== (-8 +- 2) }
intercept[TestFailedException] { minusSevenInt should !== (-7 +- 2) }
intercept[TestFailedException] { minusSevenInt should !== (-6 +- 2) }
intercept[TestFailedException] { minusSevenInt should !== (-5 +- 2) }
// Int +- Short
intercept[TestFailedException] { sevenInt should !== (9 +- 2.toShort) }
intercept[TestFailedException] { sevenInt should !== (8 +- 2.toShort) }
intercept[TestFailedException] { sevenInt should !== (7 +- 2.toShort) }
intercept[TestFailedException] { sevenInt should !== (6 +- 2.toShort) }
intercept[TestFailedException] { sevenInt should !== (5 +- 2.toShort) }
intercept[TestFailedException] { minusSevenInt should !== (-9 +- 2.toShort) }
intercept[TestFailedException] { minusSevenInt should !== (-8 +- 2.toShort) }
intercept[TestFailedException] { minusSevenInt should !== (-7 +- 2.toShort) }
intercept[TestFailedException] { minusSevenInt should !== (-6 +- 2.toShort) }
intercept[TestFailedException] { minusSevenInt should !== (-5 +- 2.toShort) }
// Int +- Byte
intercept[TestFailedException] { sevenInt should !== (9 +- 2.toByte) }
intercept[TestFailedException] { sevenInt should !== (8 +- 2.toByte) }
intercept[TestFailedException] { sevenInt should !== (7 +- 2.toByte) }
intercept[TestFailedException] { sevenInt should !== (6 +- 2.toByte) }
intercept[TestFailedException] { sevenInt should !== (5 +- 2.toByte) }
intercept[TestFailedException] { minusSevenInt should !== (-9 +- 2.toByte) }
intercept[TestFailedException] { minusSevenInt should !== (-8 +- 2.toByte) }
intercept[TestFailedException] { minusSevenInt should !== (-7 +- 2.toByte) }
intercept[TestFailedException] { minusSevenInt should !== (-6 +- 2.toByte) }
intercept[TestFailedException] { minusSevenInt should !== (-5 +- 2.toByte) }
// Short +- Short
intercept[TestFailedException] { sevenShort should !== (9.toShort +- 2.toShort) }
intercept[TestFailedException] { sevenShort should !== (8.toShort +- 2.toShort) }
intercept[TestFailedException] { sevenShort should !== (7.toShort +- 2.toShort) }
intercept[TestFailedException] { sevenShort should !== (6.toShort +- 2.toShort) }
intercept[TestFailedException] { sevenShort should !== (5.toShort +- 2.toShort) }
intercept[TestFailedException] { minusSevenShort should !== ((-9).toShort +- 2.toShort) }
intercept[TestFailedException] { minusSevenShort should !== ((-8).toShort +- 2.toShort) }
intercept[TestFailedException] { minusSevenShort should !== ((-7).toShort +- 2.toShort) }
intercept[TestFailedException] { minusSevenShort should !== ((-6).toShort +- 2.toShort) }
intercept[TestFailedException] { minusSevenShort should !== ((-5).toShort +- 2.toShort) }
// Short +- Byte
intercept[TestFailedException] { sevenShort should !== (9.toShort +- 2.toByte) }
intercept[TestFailedException] { sevenShort should !== (8.toShort +- 2.toByte) }
intercept[TestFailedException] { sevenShort should !== (7.toShort +- 2.toByte) }
intercept[TestFailedException] { sevenShort should !== (6.toShort +- 2.toByte) }
intercept[TestFailedException] { sevenShort should !== (5.toShort +- 2.toByte) }
intercept[TestFailedException] { minusSevenShort should !== ((-9).toShort +- 2.toByte) }
intercept[TestFailedException] { minusSevenShort should !== ((-8).toShort +- 2.toByte) }
intercept[TestFailedException] { minusSevenShort should !== ((-7).toShort +- 2.toByte) }
intercept[TestFailedException] { minusSevenShort should !== ((-6).toShort +- 2.toByte) }
intercept[TestFailedException] { minusSevenShort should !== ((-5).toShort +- 2.toByte) }
// Byte +- Byte
intercept[TestFailedException] { sevenByte should !== (9.toByte +- 2.toByte) }
intercept[TestFailedException] { sevenByte should !== (8.toByte +- 2.toByte) }
intercept[TestFailedException] { sevenByte should !== (7.toByte +- 2.toByte) }
intercept[TestFailedException] { sevenByte should !== (6.toByte +- 2.toByte) }
intercept[TestFailedException] { sevenByte should !== (5.toByte +- 2.toByte) }
intercept[TestFailedException] { minusSevenByte should !== ((-9).toByte +- 2.toByte) }
intercept[TestFailedException] { minusSevenByte should !== ((-8).toByte +- 2.toByte) }
intercept[TestFailedException] { minusSevenByte should !== ((-7).toByte +- 2.toByte) }
intercept[TestFailedException] { minusSevenByte should !== ((-6).toByte +- 2.toByte) }
intercept[TestFailedException] { minusSevenByte should !== ((-5).toByte +- 2.toByte) }
}
}
object `The X +- Y syntax` {
def `should throw IllegalArgumentException if the number passed to the right is 0 or negative` {
// Double +- Double
val caught1 = intercept[IllegalArgumentException] {
sevenDotOh should === (7.1 +- -0.2)
}
assert(caught1.getMessage === "-0.2 passed to +- was zero or negative. Must be a positive non-zero number.", caught1.getMessage)
// Double +- Float
val caught2 = intercept[IllegalArgumentException] {
sevenDotOh should === (7.1 +- -0.2f)
}
assert(caught2.getMessage === "-0.20000000298023224 passed to +- was zero or negative. Must be a positive non-zero number.")
// Double +- Long
val caught3 = intercept[IllegalArgumentException] {
sevenDotOh should === (7.1 +- -2L)
}
assert(caught3.getMessage === "-2.0 passed to +- was zero or negative. Must be a positive non-zero number.")
// Double +- Int
val caught4 = intercept[IllegalArgumentException] {
sevenDotOh should === (7.1 +- -2)
}
assert(caught4.getMessage === "-2.0 passed to +- was zero or negative. Must be a positive non-zero number.")
// Double +- Short
val caught5 = intercept[IllegalArgumentException] {
sevenDotOh should === (7.1 +- (-2).toShort)
}
assert(caught5.getMessage === "-2.0 passed to +- was zero or negative. Must be a positive non-zero number.")
// Double +- Byte
val caught6 = intercept[IllegalArgumentException] {
sevenDotOh should === (7.1 +- (-2).toByte)
}
assert(caught6.getMessage === "-2.0 passed to +- was zero or negative. Must be a positive non-zero number.")
// Float +- Float
val caught7 = intercept[IllegalArgumentException] {
sevenDotOhFloat should === (7.1f +- -0.2f)
}
assert(caught7.getMessage === "-0.2 passed to +- was zero or negative. Must be a positive non-zero number.")
// Float +- Long
val caught8 = intercept[IllegalArgumentException] {
sevenDotOhFloat should === (7.1f +- -2L)
}
assert(caught8.getMessage === "-2.0 passed to +- was zero or negative. Must be a positive non-zero number.")
// Float +- Int
val caught9 = intercept[IllegalArgumentException] {
sevenDotOhFloat should === (7.1f +- -2)
}
assert(caught9.getMessage === "-2.0 passed to +- was zero or negative. Must be a positive non-zero number.")
// Float +- Short
val caught10 = intercept[IllegalArgumentException] {
sevenDotOhFloat should === (7.1f +- (-2).toShort)
}
assert(caught10.getMessage === "-2.0 passed to +- was zero or negative. Must be a positive non-zero number.")
// Float +- Byte
val caught11 = intercept[IllegalArgumentException] {
sevenDotOhFloat should === (7.1f +- (-2).toByte)
}
assert(caught11.getMessage === "-2.0 passed to +- was zero or negative. Must be a positive non-zero number.")
// Long +- Long
val caught12 = intercept[IllegalArgumentException] {
sevenLong should === (9L +- -2L)
}
assert(caught12.getMessage === "-2 passed to +- was zero or negative. Must be a positive non-zero number.")
// Long +- Int
val caught13 = intercept[IllegalArgumentException] {
sevenLong should === (9L +- -2)
}
assert(caught13.getMessage === "-2 passed to +- was zero or negative. Must be a positive non-zero number.")
// Long +- Short
val caught14 = intercept[IllegalArgumentException] {
sevenLong should === (9L +- (-2).toShort)
}
assert(caught14.getMessage === "-2 passed to +- was zero or negative. Must be a positive non-zero number.")
// Long +- Byte
val caught15 = intercept[IllegalArgumentException] {
sevenLong should === (9L +- (-2).toByte)
}
assert(caught15.getMessage === "-2 passed to +- was zero or negative. Must be a positive non-zero number.")
// Int +- Int
val caught16 = intercept[IllegalArgumentException] {
sevenInt should === (9 +- -2)
}
assert(caught16.getMessage === "-2 passed to +- was zero or negative. Must be a positive non-zero number.")
// Int +- Short
val caught17 = intercept[IllegalArgumentException] {
sevenInt should === (9 +- (-2).toShort)
}
assert(caught17.getMessage === "-2 passed to +- was zero or negative. Must be a positive non-zero number.")
// Int +- Byte
val caught18 = intercept[IllegalArgumentException] {
sevenInt should === (9 +- (-2).toByte)
}
assert(caught18.getMessage === "-2 passed to +- was zero or negative. Must be a positive non-zero number.")
// Short +- Short
val caught19 = intercept[IllegalArgumentException] {
sevenShort should === (9.toShort +- (-2).toShort)
}
assert(caught19.getMessage === "-2 passed to +- was zero or negative. Must be a positive non-zero number.")
// Short +- Byte
val caught20 = intercept[IllegalArgumentException] {
sevenShort should === (9.toShort +- (-2).toByte)
}
assert(caught20.getMessage === "-2 passed to +- was zero or negative. Must be a positive non-zero number.")
// Byte +- Byte
val caught21 = intercept[IllegalArgumentException] {
sevenByte should === (9.toByte +- (-2).toByte)
}
assert(caught21.getMessage === "-2 passed to +- was zero or negative. Must be a positive non-zero number.")
}
}
}
| SRGOM/scalatest | scalatest-test/src/test/scala/org/scalatest/ShouldTripleEqualsToleranceSpec.scala | Scala | apache-2.0 | 48,262 |
package com.stefansavev.randomprojections.implementation
import java.util.Random
import com.stefansavev.randomprojections.datarepr.dense.{DataFrameView}
import com.stefansavev.randomprojections.datarepr.sparse.SparseVector
class OnlineVariance(k: Int) {
var n = 0.0
val mean = Array.ofDim[Double](k)
val sqrLeft = Array.ofDim[Double](k)
val cntsLeft = Array.ofDim[Double](k)
val sqrRight = Array.ofDim[Double](k)
val cntsRight = Array.ofDim[Double](k)
val M2 = Array.ofDim[Double](k)
val delta = Array.ofDim[Double](k)
def processPoint(point: Array[Double]): Unit = {
val x = point
n = n + 1.0
var j = 0
while (j < k) {
delta(j) = x(j) - mean(j)
mean(j) = mean(j) + delta(j) / n
M2(j) = M2(j) + delta(j) * (x(j) - mean(j))
if (x(j) > 0){
sqrLeft(j) += x(j)*x(j)
cntsLeft(j) += 1
}
else{
sqrRight(j) += x(j)*x(j)
cntsRight(j) += 1
}
j += 1
}
}
def getMeanAndVar(): (Array[Double], Array[Double]) = {
var j = 0
while (j < k) {
M2(j) = M2(j) / (n - 1.0) //M2 is now the variance
j += 1
}
(mean, M2)
}
def diffSqr(): Array[Double] = {
var j = 0
val v = Array.ofDim[Double](k)
while (j < k) {
val a = sqrLeft(j)/(cntsLeft(j) + 0.5)
val b = sqrRight(j)/(cntsRight(j) + 0.5)
v(j) = Math.sqrt(a*b)
j += 1
}
v
}
}
case class DataInformedProjectionStrategy(rnd: Random, numCols: Int) extends ProjectionStrategy{
def nextRandomProjection(depth: Int, view: DataFrameView, prevProjection: AbstractProjectionVector): AbstractProjectionVector = {
val indexes = view.indexes.indexes
val proj = Array.ofDim[Double](view.numCols)
val replacements = new scala.collection.mutable.HashMap[Int, Int]()
//TODO: must guarantee that for different trees we end up with different vectors
//sample numSamples(256) vectors without replacement
//if the vectors are a b c d e f, compute a + b - c + d - e + f
val numSamples = Math.min(256, indexes.length - 1)
var j = 0
while(j < numSamples){
var b = rnd.nextInt(indexes.length - j)
if (replacements.contains(b)){
b = replacements(b)
}
replacements += ((b, indexes.length - j - 1))
val pnt = view.getPointAsDenseVector(b)
val sign = (if (j % 2 == 0) 1.0 else - 1.0)
var i = 0
while(i < pnt.length){
proj(i) += sign* pnt(i)
i += 1
}
j += 1
}
var norm = 0.0
var i = 0
while(i < proj.length){
norm += proj(i)*proj(i)
i += 1
}
norm = Math.sqrt(norm + 0.001)
i = 0
while(i < proj.length){
proj(i) /= norm
i += 1
}
val randomVector = new SparseVector(numCols, Array.range(0, numCols), proj)
new HadamardProjectionVector(randomVector)
}
def nextRandomProjection1(depth: Int, view: DataFrameView, prevProjVector: AbstractProjectionVector): AbstractProjectionVector = {
val indexes = view.indexes.indexes
val a = rnd.nextInt(indexes.length)
val b0 = rnd.nextInt(indexes.length - 1)
val b = if (b0 == a) (indexes.length - 1) else b0
/*
val onlineVariance = new OnlineVariance(numCols)
for(_id <- indexes){
val p = view.getPointAsDenseVector(_id)
onlineVariance.processPoint(p)
}
val (mean, variance) = onlineVariance.getMeanAndVar()
*/
val vA = view.getPointAsDenseVector(a)
val vB = view.getPointAsDenseVector(b)
var norm = 0.0
var i = 0
while(i < vA.length){
//vA(i) -= vB(i)
//vA(i) = Math.signum((vA(i) - vB(i)))*Math.abs(rnd.nextGaussian()) * (Math.sqrt(variance(i) + 0.1) )
vA(i) = (vA(i) - vB(i)) // + rnd.nextGaussian()*Math.sqrt(variance(i))
norm += vA(i)*vA(i)
i += 1
}
norm = Math.sqrt(norm + 0.001)
i = 0
while(i < vA.length){
vA(i) /= norm
i += 1
}
val randomVector = new SparseVector(numCols, Array.range(0, numCols), vA)
val proj = new HadamardProjectionVector(randomVector)
proj
}
}
case class DataInformedProjectionSettings()
class DataInformedProjectionBuilder(builderSettings: DataInformedProjectionSettings) extends ProjectionStrategyBuilder{
type T = DataInformedProjectionStrategy
val splitStrategy: DatasetSplitStrategy = new DataInformedSplitStrategy()
def build(settings: IndexSettings, rnd: Random, dataFrameView:DataFrameView): T = DataInformedProjectionStrategy(rnd, dataFrameView.numCols)
def datasetSplitStrategy: DatasetSplitStrategy = splitStrategy
}
| codeaudit/random-projections-at-berlinbuzzwords | src/main/scala/com/stefansavev/randomprojections/implementation/DataInformedProjectionStrategy.scala | Scala | apache-2.0 | 4,564 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.common.config
object BenchmarkConfigNames {
val test = "test"
val zkHosts = test + ".zookeeper.hosts"
val kafkaHosts = test + ".kafka.hosts"
val benchmarkPort = test + ".benchmark.port"
val inputStreamTypes = test + ".input.stream.types"
}
| bwsw/sj-platform | core/sj-common/src/main/scala/com/bwsw/sj/common/config/BenchmarkConfigNames.scala | Scala | apache-2.0 | 1,079 |
package com.dwolla.circe
import io.circe._
import io.circe.generic.extras.Configuration
import io.circe.{ DecodingFailure, Json }
import shapeless._
import shapeless.labelled.{ field, FieldType }
object EnumerationSnakeCodec extends EnumerationSnakeCodec
/** This is mainly copied from Circe's [[io.circe.generic.extras.encoding.EnumerationEncoder]] and
* [[io.circe.generic.extras.decoding.EnumerationDecoder]], but since they're not configurable
* to use snake_case for the named values, this version applies the
* [[io.circe.generic.extras.Configuration.snakeCaseTransformation]] function to the names
* during encoding and decoding.
*/
trait EnumerationSnakeCodec {
abstract class EnumerationSnakeEncoder[A] extends Encoder[A]
abstract class EnumerationSnakeDecoder[A] extends Decoder[A]
implicit val encodeEnumerationCNil: EnumerationSnakeEncoder[CNil] = _ => sys.error("Cannot encode CNil")
implicit def encodeEnumerationCCons[K <: Symbol, V, R <: Coproduct](implicit
wit: Witness.Aux[K],
gv: LabelledGeneric.Aux[V, HNil],
dr: EnumerationSnakeEncoder[R]
): EnumerationSnakeEncoder[FieldType[K, V] :+: R] = x => {
val _ = gv
x match {
case Inl(_) => Json.fromString(Configuration.snakeCaseTransformation(wit.value.name))
case Inr(r) => dr(r)
}
}
implicit def encodeEnumeration[A, Repr <: Coproduct](implicit
gen: LabelledGeneric.Aux[A, Repr],
rr: EnumerationSnakeEncoder[Repr]
): EnumerationSnakeEncoder[A] =
a => rr(gen.to(a))
def deriveEnumerationSnakeEncoder[A](implicit encode: Lazy[EnumerationSnakeEncoder[A]]): Encoder[A] = encode.value
implicit val decodeEnumerationCNil: EnumerationSnakeDecoder[CNil] =
c => Left(DecodingFailure("Enumeration", c.history))
implicit def decodeEnumerationCCons[K <: Symbol, V, R <: Coproduct](implicit
wit: Witness.Aux[K],
gv: LabelledGeneric.Aux[V, HNil],
dr: EnumerationSnakeDecoder[R]
): EnumerationSnakeDecoder[FieldType[K, V] :+: R] =
c => c.as[String] match {
case Right(s) if s == Configuration.snakeCaseTransformation(wit.value.name) => Right(Inl(field[K](gv.from(HNil))))
case Right(_) => dr.apply(c).map(Inr(_))
case Left(_) => Left(DecodingFailure("Enumeration", c.history))
}
implicit def decodeEnumeration[A, Repr <: Coproduct](implicit
gen: LabelledGeneric.Aux[A, Repr],
rr: EnumerationSnakeDecoder[Repr]
): EnumerationSnakeDecoder[A] =
rr(_).map(gen.from)
def deriveEnumerationSnakeDecoder[A](implicit decode: Lazy[EnumerationSnakeDecoder[A]]): Decoder[A] = decode.value
}
| Dwolla/scala-cloudflare | client/src/main/scala/com/dwolla/circe/EnumerationSnakeCodec.scala | Scala | mit | 3,424 |
package com.wix.mysql.config
import java.io.File
import java.nio.file.Files
import com.wix.mysql.EmbeddedMysql.anEmbeddedMysql
import com.wix.mysql.config.DownloadConfig.aDownloadConfig
import com.wix.mysql.support.IntegrationTest.testConfigBuilder
import com.wix.mysql.support.{IntegrationTest, MysqlCacheServingHttpServer}
import de.flapdoodle.embed.process.exceptions.DistributionException
import org.apache.commons.io.FileUtils.deleteDirectory
import org.specs2.matcher.FileMatchers
import org.specs2.mutable.BeforeAfter
class DownloadConfigTest extends IntegrationTest with FileMatchers {
"EmbeddedMysql download config" should {
"store download cache in custom location" in {
withTempDir { tempDir =>
val defaultCachePath = aDownloadConfig().build().getCacheDir
val downloadConfig = aDownloadConfig().withCacheDir(tempDir).build()
val mysqld = start(anEmbeddedMysql(testConfigBuilder().build, downloadConfig))
tempDir must not(beEqualToIgnoringSep(defaultCachePath))
aPath(tempDir, "extracted") must beADirectory and exist
}
}
"uses custom download base url" in {
withTempDir { tempDir =>
val downloadConfig = aDownloadConfig()
.withCacheDir(tempDir)
.withBaseUrl(s"http://localhost:2222")
.build()
start(anEmbeddedMysql(testConfigBuilder().build, downloadConfig)) must throwA[DistributionException].like {
case e => e.getMessage must contain("Could not open inputStream for http://localhost:2222/MySQL-5.7")
}
}
}
"downloads via custom download base url" in new context {
val mysqldConfig = testConfigBuilder().build
ensureVersionPresentInCache(mysqldConfig)
withTempDir { tempDir =>
val downloadConfig = aDownloadConfig()
.withDownloadCacheDir(tempDir)
.withBaseUrl(s"http://localhost:${httpServer.port}")
.build()
start(anEmbeddedMysql(mysqldConfig, downloadConfig))
aPath(tempDir, "extracted") must beADirectory and exist
}
}
}
class context extends BeforeAfter {
val httpServer = new MysqlCacheServingHttpServer
override def before: Any = {
if (httpServer != null) {
httpServer.start()
}
}
override def after: Any = {
if (httpServer != null) {
httpServer.stop()
}
}
}
def aPath(basedir: String, subdir: String): File = {
new File(basedir, subdir)
}
def withTempDir[T](f: String => T): T = {
val tempDir = Files.createTempDirectory("embed-mysql-test").toFile
try {
f(tempDir.getAbsolutePath)
} finally {
deleteDirectory(tempDir)
}
}
def ensureVersionPresentInCache(config: MysqldConfig): Unit = {
anEmbeddedMysql(config).start().stop()
}
}
| wix/wix-embedded-mysql | wix-embedded-mysql/src/test/scala/com/wix/mysql/config/DownloadConfigTest.scala | Scala | bsd-3-clause | 2,818 |
/*
* Copyright 2015 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.communication.actors
import akka.actor.{Actor, ActorRef}
import akka.util.ByteString
import com.ibm.spark.communication.{ZMQMessage, SocketManager}
import com.ibm.spark.utils.LogLike
import org.zeromq.ZMQ
/**
* Represents an actor containing a request socket.
*
* @param connection The address to connect to
* @param listener The actor to send incoming messages back to
*/
class ReqSocketActor(connection: String, listener: ActorRef)
extends Actor with LogLike
{
logger.debug(s"Initializing request socket actor for $connection")
private val manager: SocketManager = new SocketManager
private val socket = manager.newReqSocket(connection, (message: Seq[String]) => {
listener ! ZMQMessage(message.map(ByteString.apply): _*)
})
override def postStop(): Unit = {
manager.closeSocket(socket)
}
override def receive: Actor.Receive = {
case zmqMessage: ZMQMessage =>
val frames = zmqMessage.frames.map(byteString =>
new String(byteString.toArray, ZMQ.CHARSET))
socket.send(frames: _*)
}
}
| codeaudit/spark-kernel | communication/src/main/scala/com/ibm/spark/communication/actors/ReqSocketActor.scala | Scala | apache-2.0 | 1,667 |
/*
* Copyright (c) 2014 Oculus Info Inc.
* http://www.oculusinfo.com/
*
* Released under the MIT License.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is furnished to do
* so, subject to the following conditions:
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.oculusinfo.tilegen.tiling
import com.oculusinfo.binning.impl.AOITilePyramid
trait IndexScheme[T] {
def toCartesian (t: T): (Double, Double)
//TODO -- toCartesianEndpoints is only used for RDDLineBinner
//so ideally this should be moved to LineSegmentIndexScheme in RDDLineBinner?
def toCartesianEndpoints (t: T): (Double, Double, Double, Double)
}
class CartesianIndexScheme extends IndexScheme[(Double, Double)] with Serializable {
def toCartesian (coords: (Double, Double)): (Double, Double) = coords
def toCartesianEndpoints (coords: (Double, Double)): (Double, Double, Double, Double) = (coords._1, coords._1, coords._2, coords._2) //TODO -- redundant, see note above
}
object IPv4ZCurveIndexScheme {
def getDefaultIPPyramid =
new AOITilePyramid(0, 0, 0x10000L.toDouble, 0x10000L.toDouble)
def ipArrayToLong (ip: Array[Byte]): Long =
ip.map(_.toLong & 0xffL).foldLeft(0L)(256L*_+_)
def longToIPArray (ip: Long): Array[Byte] =
Array[Byte]((0xff & (ip >> 24)).toByte,
(0xff & (ip >> 16)).toByte,
(0xff & (ip >> 8)).toByte,
(0xff & ip ).toByte)
def ipArrayToString (ip: Array[Byte]): String =
ip.map(_.toInt & 0xff).mkString(".")
def stringToIPArray (ip: String): Array[Byte] =
ip.split("\\\\.").map(_.toInt.toByte)
def reverse (x: Double, y: Double): Array[Byte] = {
val xL = x.toLong
val yL = y.toLong
val yExpand = Range(0, 16).map(i => ((yL >> i) & 0x1L) << (2*i+1)).reduce(_ + _)
val xExpand = Range(0, 16).map(i => ((xL >> i) & 0x1L) << (2*i )).reduce(_ + _)
val ipAddress = xExpand + yExpand
longToIPArray(ipAddress)
}
}
class IPv4ZCurveIndexScheme extends IndexScheme[Array[Byte]] with Serializable {
def toCartesian (ipAddress: Array[Byte]): (Double, Double) = {
def getXDigit (byte: Byte): Long =
(((byte & 0x40) >> 3) |
((byte & 0x10) >> 2) |
((byte & 0x04) >> 1) |
((byte & 0x01))).toLong
def getYDigit (byte: Byte): Long =
(((byte & 0x80) >> 4) |
((byte & 0x20) >> 3) |
((byte & 0x08) >> 2) |
((byte & 0x02) >> 1)).toLong
ipAddress.map(byte => (getXDigit(byte), getYDigit(byte)))
.foldLeft((0.0, 0.0))((a, b) =>
(16.0*a._1+b._1, 16.0*a._2+b._2)
)
}
def toCartesianEndpoints (ipAddress: Array[Byte]): (Double, Double, Double, Double) = (0, 0, 0, 0) //TODO -- redundant, see note above
}
trait TimeIndexScheme[T] extends IndexScheme[T] {
def extractTime (t: T): Double
}
/**
* Assumes the coords coming in are (Date, X, Y), so this just throws away the date field.
*/
class TimeRangeCartesianIndexScheme extends TimeIndexScheme[(Double, Double, Double)] with Serializable {
def toCartesian (coords: (Double, Double, Double)): (Double, Double) = (coords._2, coords._3)
def extractTime (coords: (Double, Double, Double)): Double = coords._1
def toCartesianEndpoints (coords: (Double, Double, Double)): (Double, Double, Double, Double) = (coords._1, coords._2, coords._3, coords._3) //TODO -- redundant, see note above
}
| aashish24/aperture-tiles | tile-generation/src/main/scala/com/oculusinfo/tilegen/tiling/IndexingScheme.scala | Scala | mit | 4,196 |
/**
* Copyright (C) 2010 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.control
import org.dom4j.QName
import org.junit.Test
import org.mockito.{Matchers, Mockito}
import org.orbeon.oxf.test.ResourceManagerTestBase
import org.orbeon.oxf.xforms.{PartAnalysis, XFormsContainingDocument}
import org.orbeon.oxf.xforms.analysis.ElementAnalysis
import org.orbeon.oxf.xforms.control.controls.XFormsInputControl
import org.xml.sax.helpers.AttributesImpl
import org.scalatest.junit.AssertionsForJUnit
class XFormsControlsTest extends ResourceManagerTestBase with AssertionsForJUnit {
// Mock just what's needed to make XFormsInputControl as used below happy
private def getContainingDocument(id: String): XFormsContainingDocument = {
val doc = Mockito.mock(classOf[XFormsContainingDocument])
Mockito.when(doc.getContainingDocument).thenReturn(doc)
val elementAnalysis = Mockito.mock(classOf[ElementAnalysis])
Mockito.when(elementAnalysis.staticId).thenReturn(id)
Mockito.when(elementAnalysis.prefixedId).thenReturn(id)
val partAnalysis = Mockito.mock(classOf[PartAnalysis])
Mockito.when(partAnalysis.getControlAnalysis(Matchers.anyString)).thenReturn(elementAnalysis)
Mockito.when(doc.getPartAnalysis).thenReturn(partAnalysis)
doc
}
@Test def testDiffCustomMIPsChanges(): Unit = {
val attributes = new AttributesImpl
val control1 = new XFormsInputControl(getContainingDocument("input-1"), null, null, "input-1") {
override val customMIPs = Map(
"name1" → "value1",
"name2" → "value2",
"name3" → "value3",
"name4" → "value4"
)
}
val control2 = new XFormsInputControl(getContainingDocument("input-2"), null, null, "input-2") {
// setBindingContext(new BindingContext())
override val customMIPs = Map(
// leave as is
"name1" → "value1",
// remove name2
// change value
"name3" → "newvalue3",
// leave as is
"name4" → "value4"
)
}
XFormsSingleNodeControl.addAjaxCustomMIPs(attributes, false, control1, control2)
assert("-name2-value2 -name3-value3 +name3-newvalue3" === attributes.getValue("class"))
}
@Test def testDiffCustomMIPsNew(): Unit = {
val attributes = new AttributesImpl
val control2 = new XFormsInputControl(getContainingDocument("input-1"), null, null, "input-1") {
override val customMIPs = Map(
"name1" → "value1",
"name2" → "value2",
"name3" → "value3",
"name4" → "value4"
)
}
XFormsSingleNodeControl.addAjaxCustomMIPs(attributes, true, null, control2)
assert("+name1-value1 +name2-value2 +name3-value3 +name4-value4" === attributes.getValue("class"))
}
@Test def testDiffClassAVT(): Unit = {
val attributes = new AttributesImpl
val control1 = new XFormsInputControl(getContainingDocument("input-1"), null, null, "input-1") {
override def extensionAttributeValue(attributeName: QName) = Some("foo bar gaga")
}
val control2 = new XFormsInputControl(getContainingDocument("input-2"), null, null, "input-2") {
override def extensionAttributeValue(attributeName: QName) = Some("bar toto")
}
AjaxSupport.addAjaxClasses(attributes, false, control1, control2)
assert("-foo -gaga +toto" === attributes.getValue("class"))
}
@Test def testDiffClassAVTNew(): Unit = {
val attributes = new AttributesImpl
val control2 = new XFormsInputControl(getContainingDocument("input-1"), null, null, "input-1") {
override def extensionAttributeValue(attributeName: QName) = Some("foo bar")
}
AjaxSupport.addAjaxClasses(attributes, false, null, control2)
assert("foo bar" === attributes.getValue("class"))
}
// NOTE: started writing this test, but just using an XFormsOutputControl without the context of an XFormsContainingDocument seems a dead-end!
// @Test
// public void testOutputControlRewrite() {
//
// final Document document = Dom4jUtils.readFromURL("oxf:/org/orbeon/oxf/xforms/processor/test-form.xml", false, false);
// final DocumentWrapper documentWrapper = new DocumentWrapper(document, null, new Configuration());
// final Element outputElement = (Element) ((NodeWrapper) XPathCache.evaluateSingle(new PipelineContext(), documentWrapper, "(//xh:body//xf:output)[1]", XFormsDocumentAnnotatorContentHandlerTest.BASIC_NAMESPACE_MAPPINGS, null, null, null, null, null)).getUnderlyingNode();
//
// final PipelineContext pipelineContext = new PipelineContext();
//
// final XBLContainer container = new XBLContainer("", null) {};
// final XFormsOutputControl control1 = new XFormsOutputControl(container, null, outputElement, "output", "output-1");
// control1.setBindingContext(pipelineContext, new XFormsContextStack.BindingContext(null, null, Collections.singletonList(documentWrapper.wrap(outputElement)), 1, "output-1", true, outputElement, null, false, null));
//
// control1.evaluateIfNeeded(pipelineContext);
//
// assertEquals("", control1.getExternalValue(pipelineContext));
// }
} | ajw625/orbeon-forms | src/test/scala/org/orbeon/oxf/xforms/control/XFormsControlsTest.scala | Scala | lgpl-2.1 | 6,091 |
package model
import skinny.orm._, feature._
import scalikejdbc._
import org.joda.time._
case class MailBatch(
id: Long,
mailId: Option[Long] = None,
status: Option[Int] = None,
textbody: Option[String] = None,
htmlbody: Option[String] = None,
mailCount: Option[String] = None,
startAt: Option[DateTime] = None,
endAt: Option[DateTime] = None,
deleteFlg: Boolean,
createdBy: Option[String] = None,
createdAt: Option[DateTime] = None,
updatedBy: Option[String] = None,
updatedAt: Option[DateTime] = None
)
object MailBatch extends SkinnyCRUDMapper[MailBatch] {
override lazy val tableName = "mail_batch"
override lazy val defaultAlias = createAlias("mb")
override def extract(rs: WrappedResultSet, rn: ResultName[MailBatch]): MailBatch = {
autoConstruct(rs, rn)
}
}
| yoshitakes/skinny-task-example | src/main/scala/model/MailBatch.scala | Scala | mit | 807 |
package com.github.tminglei.slickpg
import java.sql.{Date, Time, Timestamp}
import javax.xml.bind.DatatypeConverter
import java.util.{Calendar, TimeZone}
import org.postgresql.core.Provider
import slick.jdbc.{JdbcType, PostgresProfile}
trait PgDateSupport extends date.PgDateExtensions with utils.PgCommonJdbcTypes { driver: PostgresProfile =>
import driver.api._
/// alias
trait DateTimeImplicits extends SimpleDateTimeImplicits
trait SimpleDateTimeImplicits {
implicit val simpleIntervalTypeMapper: JdbcType[Interval] = new GenericJdbcType[Interval]("interval", Interval.apply, hasLiteralForm=false)
implicit val simpleTimestampTZTypeMapper: JdbcType[Calendar] = new GenericJdbcType[Calendar]("timestamptz",
PgDateSupportUtils.parseCalendar, DatatypeConverter.printDateTime, hasLiteralForm=false)
///
implicit def simpleDateColumnExtensionMethods(c: Rep[Date]) =
new DateColumnExtensionMethods[Date, Time, Timestamp, Interval, Date](c)
implicit def simpleDateOptColumnExtensionMethods(c: Rep[Option[Date]]) =
new DateColumnExtensionMethods[Date, Time, Timestamp, Interval, Option[Date]](c)
implicit def simpleTimeColumnExtensionMethods(c: Rep[Time]) =
new TimeColumnExtensionMethods[Date, Time, Timestamp, Calendar, Interval, Time](c)
implicit def simpleTimeOptColumnExtensionMethods(c: Rep[Option[Time]]) =
new TimeColumnExtensionMethods[Date, Time, Timestamp, Calendar, Interval, Option[Time]](c)
implicit def simpleTimestampColumnExtensionMethods(c: Rep[Timestamp]) =
new TimestampColumnExtensionMethods[Date, Time, Timestamp, Calendar, Interval, Timestamp](c)
implicit def simpleTimestampOptColumnExtensionMethods(c: Rep[Option[Timestamp]]) =
new TimestampColumnExtensionMethods[Date, Time, Timestamp, Calendar, Interval, Option[Timestamp]](c)
implicit def simpleIntervalColumnExtensionMethods(c: Rep[Interval]) =
new IntervalColumnExtensionMethods[Date, Time, Timestamp, Interval, Interval](c)
implicit def simpleIntervalOptColumnExtensionMethods(c: Rep[Option[Interval]]) =
new IntervalColumnExtensionMethods[Date, Time, Timestamp, Interval, Option[Interval]](c)
implicit def simpleTimestampTZColumnExtensionMethods(c: Rep[Calendar]) =
new TimestampColumnExtensionMethods[Date, Time, Calendar, Timestamp, Interval, Calendar](c)
implicit def simpleTimestampTZOptColumnExtensionMethods(c: Rep[Option[Calendar]]) =
new TimestampColumnExtensionMethods[Date, Time, Calendar, Timestamp, Interval, Option[Calendar]](c)
}
}
object PgDateSupportUtils {
import org.postgresql.jdbc.TimestampUtils
import java.lang.reflect.{Field, Method}
/** related codes hacked from [[org.postgresql.jdbc.TimestampUtils]] */
def parseCalendar(tsStr: String): Calendar = {
val parsedts = tsUtilLoadCalendar.invoke(tsUtilInstance, tsStr)
val (tz, era, year, month, day, hour, minute, second, nanos) = tsUtilGetters(parsedts)
val useCal: Calendar = if (tz.get(parsedts) == null) Calendar.getInstance() else tz.get(parsedts).asInstanceOf[Calendar]
useCal.set(Calendar.ERA, era.get(parsedts).asInstanceOf[Int])
useCal.set(Calendar.YEAR, year.get(parsedts).asInstanceOf[Int])
useCal.set(Calendar.MONTH, month.get(parsedts).asInstanceOf[Int] - 1)
useCal.set(Calendar.DAY_OF_MONTH, day.get(parsedts).asInstanceOf[Int])
useCal.set(Calendar.HOUR_OF_DAY, hour.get(parsedts).asInstanceOf[Int])
useCal.set(Calendar.MINUTE, minute.get(parsedts).asInstanceOf[Int])
useCal.set(Calendar.SECOND, second.get(parsedts).asInstanceOf[Int])
useCal.set(Calendar.MILLISECOND, nanos.get(parsedts).asInstanceOf[Int] / 1000)
useCal
}
//////////////////////////////////////////////////////////////////////
private val tsUtilInstanceHolder = new ThreadLocal[TimestampUtils]
private val tsUtilLoadCalendarHolder = new ThreadLocal[Method]
private val tsUtilParsedGettersHolder = new ThreadLocal[(Field, Field, Field, Field, Field, Field, Field, Field, Field)]
private def tsUtilInstance = {
import java.lang.Boolean.TRUE
if (tsUtilInstanceHolder.get() == null) {
val tsUtilConstructor = classOf[TimestampUtils].getDeclaredConstructor(classOf[Boolean], classOf[Provider[TimeZone]])
tsUtilConstructor.setAccessible(true)
tsUtilInstanceHolder.set(tsUtilConstructor.newInstance(TRUE, null))
}
tsUtilInstanceHolder.get()
}
private def tsUtilLoadCalendar = {
if (tsUtilLoadCalendarHolder.get() == null) {
val loadCalendar = classOf[TimestampUtils].getDeclaredMethods.find(_.getName == "parseBackendTimestamp").get
loadCalendar.setAccessible(true)
tsUtilLoadCalendarHolder.set(loadCalendar)
}
tsUtilLoadCalendarHolder.get()
}
private def tsUtilGetters(parsed: AnyRef) = {
def getField(clazz: Class[_], name: String) = {
val field = clazz.getDeclaredField(name)
field.setAccessible(true)
field
}
if (tsUtilParsedGettersHolder.get() == null) {
val clazz = parsed.getClass
val tz = getField(clazz, "tz")
val era = getField(clazz, "era")
val year = getField(clazz, "year")
val month = getField(clazz, "month")
val day = getField(clazz, "day")
val hour = getField(clazz, "hour")
val minute = getField(clazz, "minute")
val second = getField(clazz, "second")
val nanos = getField(clazz, "nanos")
tsUtilParsedGettersHolder.set((tz, era, year, month, day, hour, minute, second, nanos))
}
tsUtilParsedGettersHolder.get()
}
}
/**
* copy from [[org.postgresql.util.PGInterval]],
* should be more convenient to be used in scala environment
*/
import java.text.DecimalFormat
import org.postgresql.util.PGInterval
case class Interval(
years: Int,
months: Int,
days: Int,
hours: Int,
minutes: Int,
seconds: Double) {
def milliseconds: Int = (microseconds + (if (microseconds < 0) -500 else 500)) / 1000
def microseconds: Int = (seconds * 1000000.0).asInstanceOf[Int]
def +:(cal: Calendar): Calendar = {
cal.add(Calendar.MILLISECOND, milliseconds)
cal.add(Calendar.MINUTE, minutes)
cal.add(Calendar.HOUR, hours)
cal.add(Calendar.DAY_OF_MONTH, days)
cal.add(Calendar.MONTH, months)
cal.add(Calendar.YEAR, years)
cal
}
def +:(date: java.util.Date): java.util.Date = {
val cal = Calendar.getInstance
cal.setTime(date)
date.setTime((cal +: this).getTime.getTime)
date
}
def +(other: Interval): Interval = {
new Interval(
years + other.years,
months + other.months,
days + other.days,
hours + other.hours,
minutes + other.minutes,
seconds + other.seconds
)
}
def *(factor: Int): Interval = {
new Interval(
years * factor,
months * factor,
days * factor,
hours * factor,
minutes * factor,
seconds * factor
)
}
override def toString = {
val secs = Interval.secondsFormat.format(seconds)
""+years+" years "+months+" mons "+days+" days "+hours+" hours "+minutes+" mins "+secs+" secs"
}
}
object Interval {
private val secondsFormat = {
val format = new DecimalFormat("0.00####")
val dfs = format.getDecimalFormatSymbols()
dfs.setDecimalSeparator('.')
format.setDecimalFormatSymbols(dfs)
format
}
def apply(interval: String): Interval = fromPgInterval(new PGInterval(interval))
def fromPgInterval(interval: PGInterval): Interval = {
new Interval(
interval.getYears,
interval.getMonths,
interval.getDays,
interval.getHours,
interval.getMinutes,
interval.getSeconds
)
}
def toPgInterval(interval: Interval): PGInterval = {
new PGInterval(
interval.years,
interval.months,
interval.days,
interval.hours,
interval.minutes,
interval.seconds
)
}
}
| TimothyKlim/slick-pg | src/main/scala/com/github/tminglei/slickpg/PgDateSupport.scala | Scala | bsd-2-clause | 7,870 |
/**
* Copyright (C) 2012-2013 Vadim Bartko ([email protected]).
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* See file LICENSE.txt for License information.
*/
package com.nevilon.nomad.crawler
import java.io.InputStream
import com.nevilon.nomad.storage.graph.FileStorage
class ContentSaver(fileStorage: FileStorage) {
def saveContent(is: InputStream, url: String, contentType: String, urlId:String): String = {
fileStorage.saveStream(is, url, contentType, urlId) match {
case Some(fileId) => fileId
case None => {
throw new RuntimeException("Unable to save file")
}
}
}
}
| hudvin/nomad | src/main/scala/com/nevilon/nomad/crawler/ContentSaver.scala | Scala | gpl-2.0 | 840 |
/*
*
* Copyright (c) 2016 Sylvain Julmy
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; see the file COPYING. If not, write to the
* Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
package klughdl.core.model
import spinal.core._
case class Model(topLevel: Component) {
var diagrams: Set[Diagram] = Set()
generateDiagrams(topLevel)
diagrams.foreach(generateComponent)
diagrams.foreach(generatePort)
diagrams.foreach(generateConnection)
override def toString: String = s"Model : " + diagrams.mkString("\\n")
private def generateDiagrams(component: Component): Unit = {
diagrams += new Diagram(component.parent)
component.children.foreach(generateDiagrams)
}
private def generateComponent(diagram: Diagram): Model = {
// add all the children of the parent to the diagrams
diagram.foreachChildren(diagram.addComponents, topLevel)
// add the input and output parent connection, except it's top level
if (diagram.parent != null)
diagram.addIoComponents(diagram.parent)
this
}
private def generatePort(diagram: Diagram): Unit = {
def generatePort(entry: (Component, KlugHDLComponent)): Unit = {
if (entry._1 != null) {
entry._1.getAllIo.foreach { bt =>
entry._2.addPort(Port(bt))
}
}
}
diagram.components.foreach(generatePort)
}
private def generateConnection(diagram: Diagram): Unit = {
// Generate the input connection between the brother
def parseInputConnection(component: Component): Unit = {
def parseInputs(node: Node): List[BaseType] = {
def inner(node: Node): List[BaseType] = node match {
case bt: BaseType =>
if (bt.isOutput) List(bt)
else List(bt) ::: node.getInputs.map(inner).foldLeft(List(): List[BaseType])(_ ::: _)
case null => List()
case _ => node.getInputs.map(inner).foldLeft(List(): List[BaseType])(_ ::: _)
}
inner(node).filter {
_ match {
case bt: BaseType => bt.isOutput
case _ => false
}
}
}
for {
io <- component.getAllIo
if io.isInput
input <- parseInputs(io)
} {
diagram.addConnection(input.component, Port(input), io.component, Port(io))
}
}
// Generate the output connection between the brother
def parseOutputConnection(component: Component): Unit = {
def parseConsumers(node: Node): List[BaseType] = {
def inner(node: Node): List[BaseType] = node match {
case bt: BaseType => if (bt.isInput) List(bt) else List(bt) ::: node.consumers.map(inner).foldLeft(List(): List[BaseType])(
_ ::: _)
case null => List()
case _ => node.consumers.map(inner).foldLeft(List(): List[BaseType])(_ ::: _)
}
inner(node).filter {
_ match {
case bt: BaseType => bt.isInput
case _ => false
}
}
}
for {
io <- component.getAllIo
if io.isInput
consumer <- parseConsumers(io)
} {
diagram.addConnection(io.component, Port(io), consumer.component, Port(consumer))
}
}
// Generate the connection with the parent
def parseParentConnection(component: Component): Unit = {
def parseInputParentConnection(component: Component): Unit = {
if (component.parent != null) {
val con = for {
io_p <- component.parent.getAllIo
io <- component.getAllIo
if io.getInputs.contains(io_p)
} yield (io_p.component, Port(io_p), io.component, Port(io))
con.foreach(diagram.addConnection)
}
}
def parseOutputParentConnection(component: Component): Unit = {
if (component.parent != null) {
val con = for {
io_p <- component.parent.getAllIo
io <- getInputs(io_p)
if io != null
} yield (io.component, Port(io), io_p.component, Port(io_p))
con.foreach(diagram.addConnection)
}
}
parseInputParentConnection(component)
parseOutputParentConnection(component)
}
diagram.components.keys.foreach { c =>
if (c != null) {
parseInputConnection(c)
parseOutputConnection(c)
parseParentConnection(c)
}
}
}
private def getInputs(bt: BaseType): List[BaseType] = {
val comp = bt.component
def inner(n: Node, acc: List[Node]): List[Node] = {
if (n == null) acc
else if (n.component != comp) {
acc
}
else if (n.getInputsCount > 1) {
n.getInputs.flatMap(n => inner(n, Nil)).toList
}
else {
inner(n.getInputs.next(), n :: acc)
}
}
inner(bt, Nil).map(_.getInputs.next().asInstanceOf[BaseType])
}
}
| SnipyJulmy/MSE_1617_PA | KlugHDL/src/main/scala/klughdl/core/model/Model.scala | Scala | gpl-2.0 | 5,413 |
package org.jetbrains.plugins.scala
package lang
package completion
import com.intellij.codeInsight.completion.{CompletionParameters, PrefixMatcher}
import com.intellij.openapi.util.Key
import com.intellij.psi._
import org.jetbrains.plugins.scala.lang.lexer._
import org.jetbrains.plugins.scala.lang.parser._
import org.jetbrains.plugins.scala.lang.psi._
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScCaseClause
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.packaging._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.templates._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.lang.psi.types.ScType
import org.jetbrains.plugins.scala.lang.refactoring.namesSuggester.NameSuggester
import scala.collection.mutable.ArrayBuffer
/**
* User: Alexander Podkhalyuzin
* Date: 21.05.2008.
*/
object ScalaCompletionUtil {
val PREFIX_COMPLETION_KEY: Key[Boolean] = Key.create("prefix.completion.key")
def completeThis(ref: ScReferenceExpression): Boolean = {
ref.qualifier match {
case Some(_) => false
case None =>
ref.getParent match {
case inf: ScInfixExpr if inf.operation == ref => false
case postf: ScPostfixExpr if postf.operation == ref => false
case pref: ScPrefixExpr if pref.operation == ref => false
case _ => true
}
}
}
def shouldRunClassNameCompletion(dummyPosition: PsiElement, parameters: CompletionParameters, prefixMatcher: PrefixMatcher,
checkInvocationCount: Boolean = true, lookingForAnnotations: Boolean = false): Boolean = {
if (checkInvocationCount && parameters.getInvocationCount < 2) return false
if (dummyPosition.getNode.getElementType == ScalaTokenTypes.tIDENTIFIER) {
dummyPosition.getParent match {
case ref: ScReferenceElement if ref.qualifier.isDefined => return false
case _ =>
}
}
if (checkInvocationCount && parameters.getInvocationCount >= 2) return true
val prefix = prefixMatcher.getPrefix
val capitalized = prefix.length() > 0 && prefix.substring(0, 1).capitalize == prefix.substring(0, 1)
capitalized || lookingForAnnotations
}
def generateAnonymousFunctionText(braceArgs: Boolean, params: scala.Seq[ScType], canonical: Boolean,
withoutEnd: Boolean = false, arrowText: String = "=>"): String = {
val text = new StringBuilder()
if (braceArgs) text.append("case ")
val paramNamesWithTypes = new ArrayBuffer[(String, ScType)]
def contains(name: String): Boolean = {
paramNamesWithTypes.exists{
case (s, _) => s == name
}
}
for (param <- params) {
val names = NameSuggester.suggestNamesByType(param)
var name = if (names.length == 0) "x" else names(0)
if (contains(name)) {
var count = 0
var newName = name + count
while (contains(newName)) {
count += 1
newName = name + count
}
name = newName
}
paramNamesWithTypes.+=(name -> param)
}
val iter = paramNamesWithTypes.map {
case (s, tp) => s + ": " + (if (canonical) {
ScType.canonicalText(tp)
} else ScType.presentableText(tp))
}
val paramsString =
if (paramNamesWithTypes.size != 1 || !braceArgs) iter.mkString("(", ", ", ")")
else iter.head
text.append(paramsString)
if (!withoutEnd) text.append(" ").append(arrowText)
text.toString()
}
def getLeafByOffset(offset: Int, element: PsiElement): PsiElement = {
if (offset < 0) {
return null
}
var candidate: PsiElement = element.getContainingFile
if (candidate == null || candidate.getNode == null) return null
while (candidate.getNode.getChildren(null).length > 0) {
candidate = candidate.findElementAt(offset)
if (candidate == null || candidate.getNode == null) return null
}
candidate
}
/**
* first return value mean to stop here.
* Second return value in case if first is true return second value
*/
def getForAll(parent: PsiElement, leaf: PsiElement): (Boolean, Boolean) = {
parent match {
case _: ScalaFile =>
if (leaf.getNextSibling != null && leaf.getNextSibling.getNextSibling.isInstanceOf[ScPackaging] &&
leaf.getNextSibling.getNextSibling.getText.indexOf('{') == -1)
return (true, false)
case _ =>
}
parent match {
case _: ScalaFile | _: ScPackaging =>
var node = leaf.getPrevSibling
if (node.isInstanceOf[PsiWhiteSpace]) node = node.getPrevSibling
node match {
case x: PsiErrorElement =>
val s = ErrMsg("wrong.top.statment.declaration")
x.getErrorDescription match {
case `s` => return (true, true)
case _ => return (true, false)
}
case _ => return (true, true)
}
case expr: ScReferenceExpression =>
parent.getParent match {
case _: ScBlockExpr | _: ScTemplateBody | _: ScBlock | _: ScCaseClause =>
if (awful(parent, leaf))
return (true, true)
case _ =>
}
case _ =>
}
(false, true)
}
def awful(parent: PsiElement, leaf: PsiElement): Boolean = {
(leaf.getPrevSibling == null || leaf.getPrevSibling.getPrevSibling == null ||
leaf.getPrevSibling.getPrevSibling.getNode.getElementType != ScalaTokenTypes.kDEF) &&
(parent.getPrevSibling == null || parent.getPrevSibling.getPrevSibling == null ||
(parent.getPrevSibling.getPrevSibling.getNode.getElementType != ScalaElementTypes.MATCH_STMT ||
!parent.getPrevSibling.getPrevSibling.getLastChild.isInstanceOf[PsiErrorElement]))
}
val DUMMY_IDENTIFIER = "IntellijIdeaRulezzz"
def checkClassWith(clazz: ScTypeDefinition, additionText: String, manager: PsiManager): Boolean = {
val classText: String = clazz.getText
val text = removeDummy(classText + " " + additionText)
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, text).asInstanceOf[ScalaFile]
!checkErrors(dummyFile)
}
def checkElseWith(text: String, manager: PsiManager): Boolean = {
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, "class a {\\n" + text + "\\n}").asInstanceOf[ScalaFile]
!checkErrors(dummyFile)
}
def checkDoWith(text: String, manager: PsiManager): Boolean = {
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, "class a {\\n" + text + "\\n}").asInstanceOf[ScalaFile]
!checkErrors(dummyFile)
}
def checkTypeWith(typez: ScTypeElement, additionText: String, manager: PsiManager): Boolean = {
val typeText = typez.getText
val text = removeDummy("class a { x:" + typeText + " " + additionText + "}")
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, text).asInstanceOf[ScalaFile]
val value = !checkErrors(dummyFile)
value
}
def checkAnyTypeWith(typez: ScTypeElement, additionText: String, manager: PsiManager): Boolean = {
val typeText = typez.getText
val text = removeDummy("class a { val x:" + typeText + " " + additionText + "}")
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, text).asInstanceOf[ScalaFile]
val value = !checkErrors(dummyFile)
value
}
def checkAnyWith(typez: PsiElement, additionText: String, manager: PsiManager): Boolean = {
val typeText = typez.getText
val text = removeDummy("class a { " + typeText + " " + additionText + "}")
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, text).asInstanceOf[ScalaFile]
!checkErrors(dummyFile)
}
def removeDummy(text: String): String = {
replaceDummy(text, "")
}
def replaceDummy(text: String, to: String): String = {
if (text.indexOf(DUMMY_IDENTIFIER) != -1) {
text.replaceAll("\\\\w*" + DUMMY_IDENTIFIER,to)
} else text
}
def checkNewWith(news: ScNewTemplateDefinition, additionText: String, manager: PsiManager): Boolean = {
val newsText = news.getText
val text = removeDummy("class a { " + newsText + " " + additionText + "}")
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, text).asInstanceOf[ScalaFile]
!checkErrors(dummyFile)
}
def checkReplace(elem: PsiElement, additionText: String, manager: PsiManager): Boolean = {
val typeText = elem.getText
var text = "class a { " + typeText + "}"
if (text.indexOf(DUMMY_IDENTIFIER) == -1) return false
text = replaceDummy(text, " "+ additionText+ " ")
val DUMMY = "dummy."
val dummyFile = PsiFileFactory.getInstance(manager.getProject).
createFileFromText(DUMMY + ScalaFileType.SCALA_FILE_TYPE.getDefaultExtension,
ScalaFileType.SCALA_FILE_TYPE, text).asInstanceOf[ScalaFile]
!checkErrors(dummyFile)
}
private def checkErrors(elem: PsiElement): Boolean = {
elem match {
case _: PsiErrorElement => return true
case _ =>
}
val iterator = elem.getChildren.iterator
while (iterator.hasNext) {
val child = iterator.next()
if (checkErrors(child)) return true
}
false
}
/**
* @param leaf Start PsiElement
* @return (End PsiElement, ContainingFile.isScriptFile)
*/
def processPsiLeafForFilter(leaf: PsiElement): (PsiElement, Boolean) = Option(leaf) map {
l => l.getContainingFile match {
case scriptFile: ScalaFile if scriptFile.isScriptFile() => (leaf.getParent, true)
case scalaFile: ScalaFile => (leaf, false)
case _ => (null, false)
}
} getOrElse (null, false)
} | JetBrains/intellij-scala-historical | src/org/jetbrains/plugins/scala/lang/completion/ScalaCompletionUtil.scala | Scala | apache-2.0 | 10,897 |
package uk.gov.dvla.vehicles.presentation.common
import play.api.Play
object ConfigProperties {
implicit val stringProp = (property: String) => Play.current.configuration.getString(property)
implicit val intProp = (property: String) => Play.current.configuration.getInt(property)
implicit val booleanProp = (property: String) => Play.current.configuration.getBoolean(property)
implicit val longProp = (property: String) => Play.current.configuration.getLong(property)
implicit val listStringProp = (property: String) => Play.current.configuration.getStringList(property)
implicit val listIntProp = (property: String) => Play.current.configuration.getIntList(property)
/**
* Returns a property or throws a Runtime error if this property doesn't exist.
* As an improvement we could wrap this into a Try.
* Runtime Exception should be thrown for all mandatory properties.
*/
def getProperty[T](property: String)(implicit propertyGetter: String => Option[T]): T =
getOptionalProperty[T](property).getOrElse(error(property))
def getDurationProperty(property: String): Long =
Play.current.configuration.getMilliseconds(property).getOrElse(error(property))
def getOptionalDurationProperty(property: String): Option[Long] =
Play.current.configuration.getMilliseconds(property)
private def error(property: String) = {
throw new RuntimeException(s"Property with name $property was not found. Try adding this property to application.conf file")
}
/**
* Returns an optional property.
*/
def getOptionalProperty[T](property: String)(implicit propertyGetter: String => Option[T]): Option[T] =
propertyGetter(property)
/**
* helper method to map the java.util.List to a scala List.
* By default all the lists in the properties are mapped to a java List.
*/
def getStringListProperty(property: String): Option[List[String]] = {
import collection.JavaConversions._ // configuration.getStringList returns a Java list but we need a scala list
// so import this here and convert the list bellow to a scala list
getOptionalProperty[java.util.List[String]](property).map(_.toList)
}
def getIntListProperty(property: String): Option[List[Int]] = {
import collection.JavaConverters._
getOptionalProperty[java.util.List[Integer]](property).map(_.asScala.toList.map(_.intValue))
}
}
| dvla/vehicles-presentation-common | app/uk/gov/dvla/vehicles/presentation/common/ConfigProperties.scala | Scala | mit | 2,373 |
import org.apache.spark.sql._
import Main.{spark, logger}
object Converter {
def parquetFileName(tableName:String) = s"$tableName.parquet"
def write(df: DataFrame, tableName: String) = {
df.write
.mode(SaveMode.Overwrite)
.parquet(parquetFileName(tableName))
}
}
class Converter(conf: AppConfig) {
def folder : String = conf.sourcePath
def tblFileName(tableName:String) = s"$folder/$tableName*.tbl*"
def readFile(table: Table): DataFrame = {
spark.read
.schema(table.structure)
.option("delimiter", "|")
.csv(tblFileName(table.name))
}
def read(tableName: String) : DataFrame = {
TPCHTables.byName.get(tableName) match {
case None => throw new Exception("No such table: " ++ tableName)
case Some(table) => readFile(table)
}
}
def convert(tableName: String) = {
logger.info(s"Converting $tableName in $folder...")
Converter.write(read(tableName), tableName)
}
}
| lovasoa/spark-joinapp | src/main/scala/Convert.scala | Scala | mit | 949 |
package au.com.agiledigital.toolform.model
import org.scalacheck.{Arbitrary, Gen}
import org.scalatest.{FlatSpec, Matchers}
import org.scalacheck.Prop.forAll
class PortMappingTest extends FlatSpec with Matchers {
private implicit final val ArbitraryProtocol: Arbitrary[PortProtocolType] = Arbitrary(Gen.oneOf(PortProtocolType.Tcp, PortProtocolType.Udp))
"parsePortMappingFromConfigString" should "parse a valid port string" in {
val validStrings = List(
"3000",
"8000:8000",
"49100:22",
"9000/udp",
"9000:80/tcp",
"6060:6060/udp"
)
noException should be thrownBy {
validStrings.foreach(PortMapping.parsePortMappingFromConfigString)
}
}
"parsePortMappingFromConfigString" should "throw an exception for an invalid port string" in {
// Ranges and IP addresses are accepted by some tools such Docker Compose but not others
// such as Kubernetes. To keep things simple for now they are specified as invalid.
val invalidStrings = List(
"3000-3005",
"9090-9091:8080-8081",
"127.0.0.1:8001:8001",
"127.0.0.1:5000-5010:5000-5010",
"10/xdp",
"80/https",
"80:8080/https",
"80:/udp"
)
invalidStrings.foreach {
PortMapping.parsePortMappingFromConfigString(_).isLeft should equal(true)
}
}
"parsePortMappingFromConfigString" should "handle partial matching" in {
PortMapping.parsePortMappingFromConfigString("80") should matchPattern { case Right(PortMapping(80, 80, PortProtocolType.Tcp)) => }
PortMapping.parsePortMappingFromConfigString("80:8080") should matchPattern { case Right(PortMapping(80, 8080, PortProtocolType.Tcp)) => }
PortMapping.parsePortMappingFromConfigString("80:8080/udp") should matchPattern { case Right(PortMapping(80, 8080, PortProtocolType.Udp)) => }
PortMapping.parsePortMappingFromConfigString("80/udp") should matchPattern { case Right(PortMapping(80, 80, PortProtocolType.Udp)) => }
PortMapping.parsePortMappingFromConfigString("80/tcp") should matchPattern { case Right(PortMapping(80, 80, PortProtocolType.Tcp)) => }
}
"parsePortMappingFromConfigString" should "always be able to parse its own output" in {
forAll { (port: Int, containerPort: Int, protocol: PortProtocolType) =>
val originalPortMapping = PortMapping(port, containerPort, protocol)
val parsedPortMappingResult = PortMapping.parsePortMappingFromConfigString(originalPortMapping.toPortString)
parsedPortMappingResult match {
case Right(parsedPortMapping) => originalPortMapping == parsedPortMapping
case Left(_) => false
}
}
}
"parsePortMappingFromConfigString" should "always output the same string that is parsed in" in {
forAll { (port: Int, containerPort: Int, protocol: PortProtocolType) =>
val protocolLower = protocol.toString.toLowerCase
val originalPortString = s"$port:$containerPort/$protocolLower"
val portMappingResult = PortMapping.parsePortMappingFromConfigString(originalPortString)
portMappingResult match {
case Right(parsedPortMapping) => originalPortString == parsedPortMapping.toPortString
case Left(_) => false
}
}
}
}
| agiledigital/toolform | src/test/scala/au/com/agiledigital/toolform/model/PortMappingTest.scala | Scala | apache-2.0 | 3,287 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution
/** A small toolkit of classes that support compare-and-swap semantics for safe mutation of variables.
*
* On top of the JVM, this means dealing with lock-free thread-safe programming. Also works on top of Javascript,
* with Scala.js, for API compatibility purposes and because it's a useful way to box a value.
*
* The backbone of Atomic references is this method:
* {{{
* def compareAndSet(expect: T, update: T): Boolean
* }}}
*
* This method atomically sets a variable to the `update` value if it currently holds
* the `expect` value, reporting `true` on success or `false` on failure. The classes in this package
* also contain methods to get and unconditionally set values.
*
* Building a reference is easy with the provided constructor, which will automatically
* return the most specific type needed (in the following sample, that's an `AtomicDouble`,
* inheriting from `AtomicNumber[A]`):
* {{{
* val atomicNumber = Atomic(12.2)
*
* atomicNumber.incrementAndGet()
* // => 13.2
* }}}
*
* These also provide useful helpers for atomically mutating of values
* (i.e. `transform`, `transformAndGet`, `getAndTransform`, etc...) or of numbers of any kind
* (`incrementAndGet`, `getAndAdd`, etc...).
*/
package object atomic
| alexandru/monifu | monix-execution/js/src/main/scala/monix/execution/atomic/package.scala | Scala | apache-2.0 | 1,987 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher
import com.sun.net.httpserver.{HttpExchange, HttpHandler, HttpServer}
import java.net.{InetAddress, InetSocketAddress}
import java.io.IOException
import java.util.concurrent.Executors
import scala.collection.mutable
import scala.collection.mutable.HashMap
trait HttpServerTestSupport {
def boundInfo: InetSocketAddress
def start()
def stop()
}
class HttpServerTestSupportBuilder {
val ASK_OS_TO_PROVIDE_A_PORT = 0
private var port = ASK_OS_TO_PROVIDE_A_PORT
private var allowedMethods: Set[String] = Set()
private val mapping = new mutable.HashMap[String, (HttpExchange => Unit)]()
private val filters = new mutable.HashMap[String, (HttpExchange => Boolean)]()
private val transformations = new mutable.HashMap[String, (HttpExchange => HttpExchange)]()
def withPort(newPort: Int) {
assert(newPort >= 0 && newPort < 65536)
port = newPort
}
def onPathReplyWithData(path: String, data: Array[Byte]) {
assert(path != null && !path.isEmpty)
assert(data != null)
allowedMethods = allowedMethods + HttpReplyer.GET
mapping(path) = HttpReplyer.sendResponse(data)
}
def onPathRedirectTo(path: String, redirectTo: String) {
assert(path != null && !path.isEmpty)
assert(redirectTo != null && !redirectTo.isEmpty)
allowedMethods = allowedMethods + HttpReplyer.GET
mapping(path) = HttpReplyer.sendRedirect(redirectTo)
}
def onPathReplyOnlyWhen(path: String, predicate: HttpExchange => Boolean) {
assert(path != null && !path.isEmpty)
assert(mapping.contains(path))
filters(path) = predicate
}
def onPathTransformResponse(path: String, transformation: HttpExchange => HttpExchange) {
assert(path != null && !path.isEmpty)
assert(mapping.contains(path))
transformations(path) = transformation
}
def build(): HttpServerTestSupport = {
new HttpServerTestSupportImpl(port, allowedMethods, mapping.toMap, filters.toMap, transformations.toMap)
}
private class HttpServerTestSupportImpl(port: Int, allowedMethods: Set[String],
mapping: Map[String, (HttpExchange => Unit)],
filters: Map[String, (HttpExchange => Boolean)],
transformations: Map[String, (HttpExchange => HttpExchange)])
extends HttpServerTestSupport {
private var optServer: Option[HttpServer] = None
private def provideServer = {
try {
val address = new InetSocketAddress(InetAddress.getLoopbackAddress, port)
HttpServer.create(address, 0)
} catch {
case (ex: IOException) =>
throw new IllegalStateException("Error in creating and/or binding the server.", ex)
}
}
def boundInfo = optServer.get.getAddress
def start {
optServer = Some(provideServer)
val server = optServer.get
server.createContext("/", new HttpHandler {
def handle(exchange: HttpExchange) {
if (!allowedMethods.contains(exchange.getRequestMethod)) {
HttpReplyer.sendMethodNotAllowed(exchange)
return
}
val path = exchange.getRequestURI.getPath
if (mapping.contains(path)) {
if (filters.getOrElse(path, {_: HttpExchange => true})(exchange)) {
val reply = transformations.getOrElse(path, identity[HttpExchange](_))(exchange)
mapping(path)(reply)
}
}
HttpReplyer.sendNotFound(exchange)
}
})
server.setExecutor(Executors.newFixedThreadPool(1))
server.start()
}
def stop() {
optServer.foreach(server => server.stop(0))
}
}
private object HttpReplyer {
private val NO_DATA = Array[Byte]()
val GET = "GET"
val POST = "POST"
def sendResponse(data: Array[Byte])(exchange: HttpExchange) {
sendResponse(200, data)(exchange)
}
def sendRedirect(location: String)(exchange: HttpExchange) {
exchange.getResponseHeaders.set("Location", location)
sendResponse(307, NO_DATA)(exchange)
}
def sendNotFound(exchange: HttpExchange) {
sendResponse(404, NO_DATA)(exchange)
}
def sendMethodNotAllowed(exchange: HttpExchange) {
sendResponse(405, NO_DATA)(exchange)
}
def sendBadRequest(exchange: HttpExchange) {
sendResponse(400, NO_DATA)(exchange)
}
def sendPermissionDenied(exchange: HttpExchange) {
sendResponse(403, NO_DATA)(exchange)
}
private def sendResponse(statusCode: Int, data: Array[Byte])(exchange: HttpExchange) {
exchange.sendResponseHeaders(statusCode, data.length)
val os = exchange.getResponseBody
try {
os.write(data)
} finally {
os.close()
}
}
}
}
object HttpServerTestSupport {
def hasCookie(cookie: String)(exchange: HttpExchange) = {
val cookieString = Option(exchange.getRequestHeaders.getFirst("Cookie"))
cookieString.exists(cookie => cookie.split(";").contains(cookie))
}
def setCookie(cookie: String)(exchange: HttpExchange) = {
exchange.getResponseHeaders.set("Set-Cookie", cookie)
exchange
}
}
| HuangLS/neo4j | community/cypher/cypher/src/test/scala/org/neo4j/cypher/HttpServerTestSupport.scala | Scala | apache-2.0 | 5,939 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.producer.async
import kafka.common._
import kafka.message.{NoCompressionCodec, Message, ByteBufferMessageSet}
import kafka.producer._
import kafka.serializer.Encoder
import kafka.utils.{Utils, Logging, SystemTime}
import scala.util.Random
import scala.collection.{Seq, Map}
import scala.collection.mutable.{ArrayBuffer, HashMap, Set}
import java.util.concurrent.atomic._
import kafka.api.{TopicMetadata, ProducerRequest}
class DefaultEventHandler[K,V](config: ProducerConfig,
private val partitioner: Partitioner,
private val encoder: Encoder[V],
private val keyEncoder: Encoder[K],
private val producerPool: ProducerPool,
private val topicPartitionInfos: HashMap[String, TopicMetadata] = new HashMap[String, TopicMetadata])
extends EventHandler[K,V] with Logging {
val isSync = ("sync" == config.producerType)
val correlationId = new AtomicInteger(0)
val brokerPartitionInfo = new BrokerPartitionInfo(config, producerPool, topicPartitionInfos)
private val topicMetadataRefreshInterval = config.topicMetadataRefreshIntervalMs
private var lastTopicMetadataRefreshTime = 0L
private val topicMetadataToRefresh = Set.empty[String]
private val sendPartitionPerTopicCache = HashMap.empty[String, Int]
private val producerStats = ProducerStatsRegistry.getProducerStats(config.clientId)
private val producerTopicStats = ProducerTopicStatsRegistry.getProducerTopicStats(config.clientId)
def handle(events: Seq[KeyedMessage[K,V]]) {
val serializedData = serialize(events)
serializedData.foreach {
keyed =>
val dataSize = keyed.message.payloadSize
producerTopicStats.getProducerTopicStats(keyed.topic).byteRate.mark(dataSize)
producerTopicStats.getProducerAllTopicsStats.byteRate.mark(dataSize)
}
var outstandingProduceRequests = serializedData
var remainingRetries = config.messageSendMaxRetries + 1
val correlationIdStart = correlationId.get()
debug("Handling %d events".format(events.size))
while (remainingRetries > 0 && outstandingProduceRequests.size > 0) {
topicMetadataToRefresh ++= outstandingProduceRequests.map(_.topic)
if (topicMetadataRefreshInterval >= 0 &&
SystemTime.milliseconds - lastTopicMetadataRefreshTime > topicMetadataRefreshInterval) {
Utils.swallowError(brokerPartitionInfo.updateInfo(topicMetadataToRefresh.toSet, correlationId.getAndIncrement))
sendPartitionPerTopicCache.clear()
topicMetadataToRefresh.clear
lastTopicMetadataRefreshTime = SystemTime.milliseconds
}
outstandingProduceRequests = dispatchSerializedData(outstandingProduceRequests)
if (outstandingProduceRequests.size > 0) {
info("Back off for %d ms before retrying send. Remaining retries = %d".format(config.retryBackoffMs, remainingRetries-1))
// back off and update the topic metadata cache before attempting another send operation
Thread.sleep(config.retryBackoffMs)
// get topics of the outstanding produce requests and refresh metadata for those
Utils.swallowError(brokerPartitionInfo.updateInfo(outstandingProduceRequests.map(_.topic).toSet, correlationId.getAndIncrement))
sendPartitionPerTopicCache.clear()
remainingRetries -= 1
producerStats.resendRate.mark()
}
}
if(outstandingProduceRequests.size > 0) {
producerStats.failedSendRate.mark()
val correlationIdEnd = correlationId.get()
error("Failed to send requests for topics %s with correlation ids in [%d,%d]"
.format(outstandingProduceRequests.map(_.topic).toSet.mkString(","),
correlationIdStart, correlationIdEnd-1))
throw new FailedToSendMessageException("Failed to send messages after " + config.messageSendMaxRetries + " tries.", null)
}
}
private def dispatchSerializedData(messages: Seq[KeyedMessage[K,Message]]): Seq[KeyedMessage[K, Message]] = {
val partitionedDataOpt = partitionAndCollate(messages)
partitionedDataOpt match {
case Some(partitionedData) =>
val failedProduceRequests = new ArrayBuffer[KeyedMessage[K,Message]]
try {
for ((brokerid, messagesPerBrokerMap) <- partitionedData) {
if (logger.isTraceEnabled)
messagesPerBrokerMap.foreach(partitionAndEvent =>
trace("Handling event for Topic: %s, Broker: %d, Partitions: %s".format(partitionAndEvent._1, brokerid, partitionAndEvent._2)))
val messageSetPerBroker = groupMessagesToSet(messagesPerBrokerMap)
val failedTopicPartitions = send(brokerid, messageSetPerBroker)
failedTopicPartitions.foreach(topicPartition => {
messagesPerBrokerMap.get(topicPartition) match {
case Some(data) => failedProduceRequests.appendAll(data)
case None => // nothing
}
})
}
} catch {
case t: Throwable => error("Failed to send messages", t)
}
failedProduceRequests
case None => // all produce requests failed
messages
}
}
def serialize(events: Seq[KeyedMessage[K,V]]): Seq[KeyedMessage[K,Message]] = {
val serializedMessages = new ArrayBuffer[KeyedMessage[K,Message]](events.size)
events.map{e =>
try {
if(e.hasKey)
serializedMessages += new KeyedMessage[K,Message](topic = e.topic, key = e.key, partKey = e.partKey, message = new Message(key = keyEncoder.toBytes(e.key), bytes = encoder.toBytes(e.message)))
else
serializedMessages += new KeyedMessage[K,Message](topic = e.topic, key = e.key, partKey = e.partKey, message = new Message(bytes = encoder.toBytes(e.message)))
} catch {
case t: Throwable =>
producerStats.serializationErrorRate.mark()
if (isSync) {
throw t
} else {
// currently, if in async mode, we just log the serialization error. We need to revisit
// this when doing kafka-496
error("Error serializing message for topic %s".format(e.topic), t)
}
}
}
serializedMessages
}
def partitionAndCollate(messages: Seq[KeyedMessage[K,Message]]): Option[Map[Int, collection.mutable.Map[TopicAndPartition, Seq[KeyedMessage[K,Message]]]]] = {
val ret = new HashMap[Int, collection.mutable.Map[TopicAndPartition, Seq[KeyedMessage[K,Message]]]]
try {
for (message <- messages) {
val topicPartitionsList = getPartitionListForTopic(message)
val partitionIndex = getPartition(message.topic, message.partitionKey, topicPartitionsList)
val brokerPartition = topicPartitionsList(partitionIndex)
// postpone the failure until the send operation, so that requests for other brokers are handled correctly
val leaderBrokerId = brokerPartition.leaderBrokerIdOpt.getOrElse(-1)
var dataPerBroker: HashMap[TopicAndPartition, Seq[KeyedMessage[K,Message]]] = null
ret.get(leaderBrokerId) match {
case Some(element) =>
dataPerBroker = element.asInstanceOf[HashMap[TopicAndPartition, Seq[KeyedMessage[K,Message]]]]
case None =>
dataPerBroker = new HashMap[TopicAndPartition, Seq[KeyedMessage[K,Message]]]
ret.put(leaderBrokerId, dataPerBroker)
}
val topicAndPartition = TopicAndPartition(message.topic, brokerPartition.partitionId)
var dataPerTopicPartition: ArrayBuffer[KeyedMessage[K,Message]] = null
dataPerBroker.get(topicAndPartition) match {
case Some(element) =>
dataPerTopicPartition = element.asInstanceOf[ArrayBuffer[KeyedMessage[K,Message]]]
case None =>
dataPerTopicPartition = new ArrayBuffer[KeyedMessage[K,Message]]
dataPerBroker.put(topicAndPartition, dataPerTopicPartition)
}
dataPerTopicPartition.append(message)
}
Some(ret)
}catch { // Swallow recoverable exceptions and return None so that they can be retried.
case ute: UnknownTopicOrPartitionException => warn("Failed to collate messages by topic,partition due to: " + ute.getMessage); None
case lnae: LeaderNotAvailableException => warn("Failed to collate messages by topic,partition due to: " + lnae.getMessage); None
case oe: Throwable => error("Failed to collate messages by topic, partition due to: " + oe.getMessage); None
}
}
private def getPartitionListForTopic(m: KeyedMessage[K,Message]): Seq[PartitionAndLeader] = {
val topicPartitionsList = brokerPartitionInfo.getBrokerPartitionInfo(m.topic, correlationId.getAndIncrement)
debug("Broker partitions registered for topic: %s are %s"
.format(m.topic, topicPartitionsList.map(p => p.partitionId).mkString(",")))
val totalNumPartitions = topicPartitionsList.length
if(totalNumPartitions == 0)
throw new NoBrokersForPartitionException("Partition key = " + m.key)
topicPartitionsList
}
/**
* Retrieves the partition id and throws an UnknownTopicOrPartitionException if
* the value of partition is not between 0 and numPartitions-1
* @param topic The topic
* @param key the partition key
* @param topicPartitionList the list of available partitions
* @return the partition id
*/
private def getPartition(topic: String, key: Any, topicPartitionList: Seq[PartitionAndLeader]): Int = {
val numPartitions = topicPartitionList.size
if(numPartitions <= 0)
throw new UnknownTopicOrPartitionException("Topic " + topic + " doesn't exist")
val partition =
if(key == null) {
// If the key is null, we don't really need a partitioner
// So we look up in the send partition cache for the topic to decide the target partition
val id = sendPartitionPerTopicCache.get(topic)
id match {
case Some(partitionId) =>
// directly return the partitionId without checking availability of the leader,
// since we want to postpone the failure until the send operation anyways
partitionId
case None =>
val availablePartitions = topicPartitionList.filter(_.leaderBrokerIdOpt.isDefined)
if (availablePartitions.isEmpty)
throw new LeaderNotAvailableException("No leader for any partition in topic " + topic)
val index = Utils.abs(Random.nextInt) % availablePartitions.size
val partitionId = availablePartitions(index).partitionId
sendPartitionPerTopicCache.put(topic, partitionId)
partitionId
}
} else
partitioner.partition(key, numPartitions)
if(partition < 0 || partition >= numPartitions)
throw new UnknownTopicOrPartitionException("Invalid partition id: " + partition + " for topic " + topic +
"; Valid values are in the inclusive range of [0, " + (numPartitions-1) + "]")
trace("Assigning message of topic %s and key %s to a selected partition %d".format(topic, if (key == null) "[none]" else key.toString, partition))
partition
}
/**
* Constructs and sends the produce request based on a map from (topic, partition) -> messages
*
* @param brokerId the broker that will receive the request
* @param messagesPerTopic the messages as a map from (topic, partition) -> messages
* @return the set (topic, partitions) messages which incurred an error sending or processing
*/
private def send(brokerId: Int, messagesPerTopic: collection.mutable.Map[TopicAndPartition, ByteBufferMessageSet]) = {
if(brokerId < 0) {
warn("Failed to send data since partitions %s don't have a leader".format(messagesPerTopic.map(_._1).mkString(",")))
messagesPerTopic.keys.toSeq
} else if(messagesPerTopic.size > 0) {
val currentCorrelationId = correlationId.getAndIncrement
val producerRequest = new ProducerRequest(currentCorrelationId, config.clientId, config.requestRequiredAcks,
config.requestTimeoutMs, messagesPerTopic)
var failedTopicPartitions = Seq.empty[TopicAndPartition]
try {
val syncProducer = producerPool.getProducer(brokerId)
debug("Producer sending messages with correlation id %d for topics %s to broker %d on %s:%d"
.format(currentCorrelationId, messagesPerTopic.keySet.mkString(","), brokerId, syncProducer.config.host, syncProducer.config.port))
val response = syncProducer.send(producerRequest)
debug("Producer sent messages with correlation id %d for topics %s to broker %d on %s:%d"
.format(currentCorrelationId, messagesPerTopic.keySet.mkString(","), brokerId, syncProducer.config.host, syncProducer.config.port))
if(response != null) {
if (response.status.size != producerRequest.data.size)
throw new KafkaException("Incomplete response (%s) for producer request (%s)".format(response, producerRequest))
if (logger.isTraceEnabled) {
val successfullySentData = response.status.filter(_._2.error == ErrorMapping.NoError)
successfullySentData.foreach(m => messagesPerTopic(m._1).foreach(message =>
trace("Successfully sent message: %s".format(if(message.message.isNull) null else Utils.readString(message.message.payload)))))
}
val failedPartitionsAndStatus = response.status.filter(_._2.error != ErrorMapping.NoError).toSeq
failedTopicPartitions = failedPartitionsAndStatus.map(partitionStatus => partitionStatus._1)
if(failedTopicPartitions.size > 0) {
val errorString = failedPartitionsAndStatus
.sortWith((p1, p2) => p1._1.topic.compareTo(p2._1.topic) < 0 ||
(p1._1.topic.compareTo(p2._1.topic) == 0 && p1._1.partition < p2._1.partition))
.map{
case(topicAndPartition, status) =>
topicAndPartition.toString + ": " + ErrorMapping.exceptionFor(status.error).getClass.getName
}.mkString(",")
warn("Produce request with correlation id %d failed due to %s".format(currentCorrelationId, errorString))
}
failedTopicPartitions
} else {
Seq.empty[TopicAndPartition]
}
} catch {
case t: Throwable =>
warn("Failed to send producer request with correlation id %d to broker %d with data for partitions %s"
.format(currentCorrelationId, brokerId, messagesPerTopic.map(_._1).mkString(",")), t)
messagesPerTopic.keys.toSeq
}
} else {
List.empty
}
}
private def groupMessagesToSet(messagesPerTopicAndPartition: collection.mutable.Map[TopicAndPartition, Seq[KeyedMessage[K,Message]]]) = {
/** enforce the compressed.topics config here.
* If the compression codec is anything other than NoCompressionCodec,
* Enable compression only for specified topics if any
* If the list of compressed topics is empty, then enable the specified compression codec for all topics
* If the compression codec is NoCompressionCodec, compression is disabled for all topics
*/
val messagesPerTopicPartition = messagesPerTopicAndPartition.map { case (topicAndPartition, messages) =>
val rawMessages = messages.map(_.message)
( topicAndPartition,
config.compressionCodec match {
case NoCompressionCodec =>
debug("Sending %d messages with no compression to %s".format(messages.size, topicAndPartition))
new ByteBufferMessageSet(NoCompressionCodec, rawMessages: _*)
case _ =>
config.compressedTopics.size match {
case 0 =>
debug("Sending %d messages with compression codec %d to %s"
.format(messages.size, config.compressionCodec.codec, topicAndPartition))
new ByteBufferMessageSet(config.compressionCodec, rawMessages: _*)
case _ =>
if(config.compressedTopics.contains(topicAndPartition.topic)) {
debug("Sending %d messages with compression codec %d to %s"
.format(messages.size, config.compressionCodec.codec, topicAndPartition))
new ByteBufferMessageSet(config.compressionCodec, rawMessages: _*)
}
else {
debug("Sending %d messages to %s with no compression as it is not in compressed.topics - %s"
.format(messages.size, topicAndPartition, config.compressedTopics.toString))
new ByteBufferMessageSet(NoCompressionCodec, rawMessages: _*)
}
}
}
)
}
messagesPerTopicPartition
}
def close() {
if (producerPool != null)
producerPool.close
}
}
| unix1986/universe | tool/kafka-0.8.1.1-src/core/src/main/scala/kafka/producer/async/DefaultEventHandler.scala | Scala | bsd-2-clause | 17,692 |
package org.jetbrains.plugins.scala
package lang.psi.light
import lang.psi.api.statements.ScAnnotationsHolder
import lang.psi.types.result.{Success, TypingContext}
import lang.psi.types.{ScType, ScParameterizedType}
import com.intellij.psi.{PsiClass, PsiClassType}
import extensions.toPsiClassExt
/**
* @author Alefas
* @since 07.12.12
*/
object LightUtil {
/**
* for Java only
* @param holder annotation holder
* @return Java throws section string or empty string
*/
def getThrowsSection(holder: ScAnnotationsHolder): String = {
holder.hasAnnotation("scala.throws") match {
case Some(annotation) =>
val classes = annotation.constructor.args.map(_.exprs).getOrElse(Seq.empty).flatMap { expr =>
expr.getType(TypingContext.empty) match {
case Success(ScParameterizedType(des, Seq(arg)), _) => ScType.extractClass(des) match {
case Some(clazz) if clazz.qualifiedName == "java.lang.Class" =>
ScType.toPsi(arg, holder.getProject, holder.getResolveScope) match {
case c: PsiClassType =>
c.resolve() match {
case clazz: PsiClass => Seq(clazz.getQualifiedName)
case _ => Seq.empty
}
case _ => Seq.empty
}
case _ => Seq.empty
}
case _ => Seq.empty
}
}
if (classes.length == 0) {
annotation.constructor.typeArgList match {
case Some(args) =>
val classes = args.typeArgs.map(_.getType(TypingContext.empty)).filter(_.isDefined).map(_.get).flatMap { arg =>
ScType.toPsi(arg, holder.getProject, holder.getResolveScope) match {
case c: PsiClassType =>
c.resolve() match {
case clazz: PsiClass => Seq(clazz.getQualifiedName)
case _ => Seq.empty
}
case _ => Seq.empty
}
}
if (!classes.isEmpty) classes.mkString(" throws ", ", ", " ")
else ""
case None => ""
}
} else classes.mkString(" throws ", ", ", " ")
case _ => ""
}
}
}
| consulo/consulo-scala | src/org/jetbrains/plugins/scala/lang/psi/light/LightUtil.scala | Scala | apache-2.0 | 2,272 |
package org.crockeo.genericplatformer.assets
import com.badlogic.gdx.graphics.g2d.{ TextureRegion, SpriteBatch, Animation }
import com.badlogic.gdx.graphics.Texture
import com.badlogic.gdx.utils.Array
// Actor for updating the animations
class AnimationActor(val animation: Animation) {
private var time: Float = 0.0f
// Updating the AnimationActor
def update(dt: Float): Unit =
time += dt
// Getting the current frame
def getFrame: TextureRegion =
animation.getKeyFrame(time)
}
object AnimationManager {
// The cache
private var cache: Map[String, AnimationActor] = Map[String, AnimationActor]()
// The backend for loading an asset
private def loadBackend(path: String,
width: Int, height: Int,
timeStep: Float): AnimationActor = {
var time: Float = 0.0f
val frames: Array[TextureRegion] = new Array[TextureRegion]()
TextureRegion.split(TextureManager.load(path),
width, height).foreach { _.foreach { frames.add(_) } }
val aa: AnimationActor = new AnimationActor(new Animation(timeStep, frames))
aa
}
// Updating all AnimationActors
def update(dt: Float) =
cache.values.foreach { _.update(dt) }
// Caching an asset
def cache(path: String,
width: Int, height: Int,
timeStep: Float): Unit =
if (!cache.contains(path)) cache = cache + (path -> loadBackend(path, width, height, timeStep))
// Decaching an asset
def decache(path: String): Unit =
if (cache.contains(path)) cache = cache - path
// Loading an asset
def load(path: String,
width: Int, height: Int,
timeStep: Float): AnimationActor =
if (cache.contains(path)) cache(path)
else {
val temp: AnimationActor = loadBackend(path, width, height, timeStep)
cache = cache + (path -> temp)
temp
}
// Caching all assets
def init = {}
} | crockeo/generic-platformer | src/org/crockeo/genericplatformer/assets/AnimationManager.scala | Scala | gpl-3.0 | 1,946 |
package collins.softlayer
import models.Status
import util.config.Configurable
object SoftLayerConfig extends Configurable {
override val namespace = "softlayer"
override val referenceConfigFilename = "softlayer_reference.conf"
def enabled = getBoolean("enabled", false)
def username = getString("username", "")
def password = getString("password", "")
def allowedCancelStatus = getStringSet("allowedCancelStatus", Status.statusNames).map { s =>
Status.findByName(s).get.id
}
override def validateConfig() {
if (enabled) {
require(username.nonEmpty, "softlayer.username must not be empty if enabled")
require(password.nonEmpty, "softlayer.password must not be empty if enabled")
allowedCancelStatus
}
}
}
| Shopify/collins | app/collins/softlayer/SoftLayerConfig.scala | Scala | apache-2.0 | 759 |
package io.github.quark.action
import io.github.quark.action.OperationAction.OperationOutput
import io.github.quark.action.OperationResult.{Abort, Success}
import io.github.quark.stage.PipelineStage.{Input, Output}
import scala.concurrent.{ExecutionContext, Future}
sealed trait OperationResult[+T]
object OperationResult {
final case class Success[T](result: T) extends OperationResult[T]
final case class Abort[T](cause: String) extends OperationResult[T]
}
trait OperationAction {
type L
type R
def apply(v1: L)(implicit ec: ExecutionContext): Future[OperationResult[R]] =
f(v1).map {
case Right(v2) => Success(v2)
case Left(cause) => Abort(cause)
}
protected val f: L => OperationOutput[R]
}
object OperationAction {
type OperationOutput[A] = Future[Either[String, A]]
final case class Incoming(f: Input => OperationOutput[Input])
extends OperationAction {
type L = Input
type R = Input
}
final case class Endpoint(f: Input => OperationOutput[Output])
extends OperationAction {
type L = Input
type R = Output
}
final case class Outgoing(f: Output => OperationOutput[Output])
extends OperationAction {
type L = Output
type R = Output
}
}
| burakkose/Quark | src/main/scala/io/github/quark/action/OperationAction.scala | Scala | apache-2.0 | 1,240 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.apache.spark.internal.Logging
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.catalog.BucketSpec
import org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.planning.ScanOperation
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.{FileSourceScanExec, SparkPlan}
import org.apache.spark.util.collection.BitSet
/**
* A strategy for planning scans over collections of files that might be partitioned or bucketed
* by user specified columns.
*
* At a high level planning occurs in several phases:
* - Split filters by when they need to be evaluated.
* - Prune the schema of the data requested based on any projections present. Today this pruning
* is only done on top level columns, but formats should support pruning of nested columns as
* well.
* - Construct a reader function by passing filters and the schema into the FileFormat.
* - Using a partition pruning predicates, enumerate the list of files that should be read.
* - Split the files into tasks and construct a FileScanRDD.
* - Add any projection or filters that must be evaluated after the scan.
*
* Files are assigned into tasks using the following algorithm:
* - If the table is bucketed, group files by bucket id into the correct number of partitions.
* - If the table is not bucketed or bucketing is turned off:
* - If any file is larger than the threshold, split it into pieces based on that threshold
* - Sort the files by decreasing file size.
* - Assign the ordered files to buckets using the following algorithm. If the current partition
* is under the threshold with the addition of the next file, add it. If not, open a new bucket
* and add it. Proceed to the next file.
*/
object FileSourceStrategy extends Strategy with Logging {
// should prune buckets iff num buckets is greater than 1 and there is only one bucket column
private def shouldPruneBuckets(bucketSpec: Option[BucketSpec]): Boolean = {
bucketSpec match {
case Some(spec) => spec.bucketColumnNames.length == 1 && spec.numBuckets > 1
case None => false
}
}
private def getExpressionBuckets(
expr: Expression,
bucketColumnName: String,
numBuckets: Int): BitSet = {
def getBucketNumber(attr: Attribute, v: Any): Int = {
BucketingUtils.getBucketIdFromValue(attr, numBuckets, v)
}
def getBucketSetFromIterable(attr: Attribute, iter: Iterable[Any]): BitSet = {
val matchedBuckets = new BitSet(numBuckets)
iter
.map(v => getBucketNumber(attr, v))
.foreach(bucketNum => matchedBuckets.set(bucketNum))
matchedBuckets
}
def getBucketSetFromValue(attr: Attribute, v: Any): BitSet = {
val matchedBuckets = new BitSet(numBuckets)
matchedBuckets.set(getBucketNumber(attr, v))
matchedBuckets
}
expr match {
case expressions.Equality(a: Attribute, Literal(v, _)) if a.name == bucketColumnName =>
getBucketSetFromValue(a, v)
case expressions.In(a: Attribute, list)
if list.forall(_.isInstanceOf[Literal]) && a.name == bucketColumnName =>
getBucketSetFromIterable(a, list.map(e => e.eval(EmptyRow)))
case expressions.InSet(a: Attribute, hset)
if hset.forall(_.isInstanceOf[Literal]) && a.name == bucketColumnName =>
getBucketSetFromIterable(a, hset.map(e => expressions.Literal(e).eval(EmptyRow)))
case expressions.IsNull(a: Attribute) if a.name == bucketColumnName =>
getBucketSetFromValue(a, null)
case expressions.And(left, right) =>
getExpressionBuckets(left, bucketColumnName, numBuckets) &
getExpressionBuckets(right, bucketColumnName, numBuckets)
case expressions.Or(left, right) =>
getExpressionBuckets(left, bucketColumnName, numBuckets) |
getExpressionBuckets(right, bucketColumnName, numBuckets)
case _ =>
val matchedBuckets = new BitSet(numBuckets)
matchedBuckets.setUntil(numBuckets)
matchedBuckets
}
}
private def genBucketSet(
normalizedFilters: Seq[Expression],
bucketSpec: BucketSpec): Option[BitSet] = {
if (normalizedFilters.isEmpty) {
return None
}
val bucketColumnName = bucketSpec.bucketColumnNames.head
val numBuckets = bucketSpec.numBuckets
val normalizedFiltersAndExpr = normalizedFilters
.reduce(expressions.And)
val matchedBuckets = getExpressionBuckets(normalizedFiltersAndExpr, bucketColumnName,
numBuckets)
val numBucketsSelected = matchedBuckets.cardinality()
logInfo {
s"Pruned ${numBuckets - numBucketsSelected} out of $numBuckets buckets."
}
// None means all the buckets need to be scanned
if (numBucketsSelected == numBuckets) {
None
} else {
Some(matchedBuckets)
}
}
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case ScanOperation(projects, filters,
l @ LogicalRelation(fsRelation: HadoopFsRelation, _, table, _)) =>
// Filters on this relation fall into four categories based on where we can use them to avoid
// reading unneeded data:
// - partition keys only - used to prune directories to read
// - bucket keys only - optionally used to prune files to read
// - keys stored in the data only - optionally used to skip groups of data in files
// - filters that need to be evaluated again after the scan
val filterSet = ExpressionSet(filters)
val normalizedFilters = DataSourceStrategy.normalizeExprs(
filters.filter(_.deterministic), l.output)
val partitionColumns =
l.resolve(
fsRelation.partitionSchema, fsRelation.sparkSession.sessionState.analyzer.resolver)
val partitionSet = AttributeSet(partitionColumns)
val partitionKeyFilters =
ExpressionSet(normalizedFilters
.filter(_.references.subsetOf(partitionSet)))
logInfo(s"Pruning directories with: ${partitionKeyFilters.mkString(",")}")
// subquery expressions are filtered out because they can't be used to prune buckets or pushed
// down as data filters, yet they would be executed
val normalizedFiltersWithoutSubqueries =
normalizedFilters.filterNot(SubqueryExpression.hasSubquery)
val bucketSpec: Option[BucketSpec] = fsRelation.bucketSpec
val bucketSet = if (shouldPruneBuckets(bucketSpec)) {
genBucketSet(normalizedFiltersWithoutSubqueries, bucketSpec.get)
} else {
None
}
val dataColumns =
l.resolve(fsRelation.dataSchema, fsRelation.sparkSession.sessionState.analyzer.resolver)
// Partition keys are not available in the statistics of the files.
val dataFilters =
normalizedFiltersWithoutSubqueries.filter(_.references.intersect(partitionSet).isEmpty)
val supportNestedPredicatePushdown =
DataSourceUtils.supportNestedPredicatePushdown(fsRelation)
val pushedFilters = dataFilters
.flatMap(DataSourceStrategy.translateFilter(_, supportNestedPredicatePushdown))
logInfo(s"Pushed Filters: ${pushedFilters.mkString(",")}")
// Predicates with both partition keys and attributes need to be evaluated after the scan.
val afterScanFilters = filterSet -- partitionKeyFilters.filter(_.references.nonEmpty)
logInfo(s"Post-Scan Filters: ${afterScanFilters.mkString(",")}")
val filterAttributes = AttributeSet(afterScanFilters)
val requiredExpressions: Seq[NamedExpression] = filterAttributes.toSeq ++ projects
val requiredAttributes = AttributeSet(requiredExpressions)
val readDataColumns =
dataColumns
.filter(requiredAttributes.contains)
.filterNot(partitionColumns.contains)
val outputSchema = readDataColumns.toStructType
logInfo(s"Output Data Schema: ${outputSchema.simpleString(5)}")
val outputAttributes = readDataColumns ++ partitionColumns
val scan =
FileSourceScanExec(
fsRelation,
outputAttributes,
outputSchema,
partitionKeyFilters.toSeq,
bucketSet,
None,
dataFilters,
table.map(_.identifier))
val afterScanFilter = afterScanFilters.toSeq.reduceOption(expressions.And)
val withFilter = afterScanFilter.map(execution.FilterExec(_, scan)).getOrElse(scan)
val withProjections = if (projects == withFilter.output) {
withFilter
} else {
execution.ProjectExec(projects, withFilter)
}
withProjections :: Nil
case _ => Nil
}
}
| dbtsai/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/FileSourceStrategy.scala | Scala | apache-2.0 | 9,541 |
/*
* Artificial Intelligence for Humans
* Volume 1: Fundamental Algorithms
* Scala Version
* http://www.aifh.org
* http://www.jeffheaton.com
*
* Code repository:
* https://github.com/jeffheaton/aifh
* Copyright 2013 by Jeff Heaton
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* For more information on Heaton Research copyrights, licenses
* and trademarks visit:
* http://www.heatonresearch.com/copyright
*/
package com.heatonresearch.aifh.randomize
/**
* The Mersenne twister is a pseudo random number generator developed in 1997 by Makoto Matsumoto and
* Takuji Nishimura that is based on a matrix linear recurrence over a finite binary field F2.
* <p/>
* References:
* <p/>
* http://www.cs.gmu.edu/~sean/research/
* <p/>
* http://en.wikipedia.org/wiki/Mersenne_twister
* <p/>
* Makato Matsumoto and Takuji Nishimura, "Mersenne Twister: A 623-Dimensionally Equidistributed Uniform
* Pseudo-Random Number Generator", ACM Transactions on Modeling and. Computer Simulation,
* Vol. 8, No. 1, January 1998, pp 3--30.
*/
object MersenneTwisterGenerateRandom {
private val N: Int = 624
private val M: Int = 397
private val MATRIX_A: Int = 0x9908b0df
private val UPPER_MASK: Int = 0x80000000
private val LOWER_MASK: Int = 0x7fffffff
private val TEMPERING_MASK_B: Int = 0x9d2c5680
private val TEMPERING_MASK_C: Int = 0xefc60000
}
class MersenneTwisterGenerateRandom private (seedInit : Either[Long,Vector[Int]]) extends AbstractBoxMuller {
private var stateVector: Array[Int] = null
private var mti: Int = 0
private var mag01: Array[Int] = null
seedInit match {
case Left(long) => setSeed(long)
case Right(arr) => setSeed(arr)
}
import MersenneTwisterGenerateRandom._
def this(seed: Long) {
this(Left(seed))
}
def this(array: Vector[Int]) {
this(Right(array))
}
def this() {
this(System.currentTimeMillis())
}
private def setSeed(seed: Long) {
stateVector = Array.ofDim[Int](N)
mag01 = Array[Int](0x0,MATRIX_A)
stateVector(0) = seed.toInt
mti = 1
while (mti < N) {
stateVector(mti) = 1812433253 * (stateVector(mti - 1) ^ (stateVector(mti - 1) >>> 30)) + mti
mti+=1
}
}
private def setSeed(array: Vector[Int]) {
var i: Int = 0
var j: Int = 0
setSeed(19650218)
i = 1
j = 0
val kStart = Math.max(N,array.length)
(kStart to 1 by -1) foreach { k=>
stateVector(i) = (stateVector(i) ^ ((stateVector(i - 1) ^ (stateVector(i - 1) >>> 30)) * 1664525)) + array(j) + j
i += 1
j += 1
if (i >= N) {
stateVector(0) = stateVector(N - 1)
i = 1
}
if (j >= array.length)
j = 0
}
(N-1 to 1 by -1) foreach { k =>
stateVector(i) = (stateVector(i) ^ ((stateVector(i - 1) ^ (stateVector(i - 1) >>> 30)) * 1566083941)) - i
i += 1
if (i >= N) {
stateVector(0) = stateVector(N - 1)
i = 1
}
}
stateVector(0) = 0x80000000
}
protected def next(bits: Int): Int = {
var y: Int = 0
if (mti >= N) {
var kk: Int = 0
while (kk < N - M) {
y = (stateVector(kk) & UPPER_MASK) | (stateVector(kk + 1) & LOWER_MASK)
stateVector(kk) = stateVector(kk + M) ^ (y >>> 1) ^ mag01(y & 0x1)
kk +=1
}
while (kk < N - 1) {
y = (stateVector(kk) & UPPER_MASK) | (stateVector(kk + 1) & LOWER_MASK)
stateVector(kk) = stateVector(kk + (M - N)) ^ (y >>> 1) ^ mag01(y & 0x1)
kk += 1
}
y = (stateVector(N - 1) & UPPER_MASK) | (stateVector(0) & LOWER_MASK)
stateVector(N - 1) = stateVector(M - 1) ^ (y >>> 1) ^ mag01(y & 0x1)
mti = 0
}
y = stateVector(mti)
mti += 1
y ^= y >>> 11
y ^= (y << 7) & TEMPERING_MASK_B
y ^= (y << 15) & TEMPERING_MASK_C
y ^= (y >>> 18)
y >>> (32 - bits)
}
override def nextDouble(): Double = ((next(26).toLong << 27) + next(27)) / (1L << 53).toDouble
override def nextLong: Long = (next(32).toLong << 32) + next(32)
override def nextBoolean: Boolean = nextDouble > 0.5
override def nextFloat: Float = nextDouble().toFloat
override def nextInt: Int = nextLong.toInt
} | HairyFotr/aifh | vol1/scala-examples/src/main/scala/com/heatonresearch/aifh/randomize/MersenneTwisterGenerateRandom.scala | Scala | apache-2.0 | 4,682 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.