code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package breeze.linalg
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import support._
import breeze.collection.mutable.Beam
import breeze.generic.UFunc
import breeze.math.Semiring
import scala.{specialized=>spec}
import scala.reflect.ClassTag
/**
* We occasionally need a Tensor that doesn't extend NumericOps directly. This is that tensor.
* @tparam K
* @tparam V
*/
trait QuasiTensor[@spec(Int) K, @spec(Double, Int, Float, Long) V] {
def apply(i: K): V
def update(i: K, v: V): Unit
def keySet: scala.collection.Set[K]
// Aggregators
@deprecated("Use max(t) instead of t.max", "0.6")
def max(implicit ord: Ordering[V]) = valuesIterator.max
@deprecated("Use min(t) instead of t.min", "0.6")
def min(implicit ord: Ordering[V]) = valuesIterator.min
@deprecated("Use argmax(t) instead of t.argmax", "0.6")
def argmax(implicit ord: Ordering[V]) = keysIterator.maxBy( apply _)
@deprecated("Use argmin(t) instead of t.argmin", "0.6")
def argmin(implicit ord: Ordering[V]) = keysIterator.minBy( apply _)
@deprecated("Use sum(t) instead of t.sum", "0.6")
def sum(implicit num: Numeric[V]) = activeValuesIterator.sum
@deprecated("Use argsort(t) instead of t.argsort", "0.6")
def argsort(implicit ord : Ordering[V]) : IndexedSeq[K] =
keysIterator.toIndexedSeq.sorted(ord.on[K](apply _))
/**
* Returns the k indices with maximum value. (NOT absolute value.)
* @param k how many to return
* @param ordering
* @return
*/
@deprecated("Use argtopk(t, k) instead of t.argtopk(k)", "0.6")
def argtopk(k: Int)(implicit ordering: Ordering[V]) = {
implicit val ordK = ordering.on(apply _)
val queue = new Beam[K](k)
queue ++= keysIterator
queue.toIndexedSeq.reverse
}
def iterator: Iterator[(K, V)]
def activeIterator: Iterator[(K, V)]
def valuesIterator: Iterator[V]
def activeValuesIterator: Iterator[V]
def keysIterator: Iterator[K]
def activeKeysIterator: Iterator[K]
/** Returns all indices k whose value satisfies a predicate. */
def findAll(f: V=>Boolean) = activeIterator.filter(p => f(p._2)).map(_._1).toIndexedSeq
/** Returns true if all elements are non-zero */
@deprecated("Use breeze.linalg.all instead", "0.6")
def all(implicit semi: Semiring[V]) = valuesIterator.forall(_ != semi.zero)
/** Returns true if some element is non-zero */
@deprecated("Use breeze.linalg.any instead", "0.6")
def any(implicit semi: Semiring[V]) = valuesIterator.exists(_ != semi.zero)
}
trait TensorLike[@spec(Int) K, @spec(Double, Int, Float, Long) V, +This<:Tensor[K, V]]
extends QuasiTensor[K,V]
with NumericOps[This] {
def apply(i: K): V
def update(i: K, v: V)
def size: Int
def activeSize: Int
// iteration and such
def keys: TensorKeys[K, V, This] = new TensorKeys[K, V, This](repr, false)
def values: TensorValues[K, V, This] = new TensorValues[K, V, This](repr, false)
def pairs: TensorPairs[K, V, This] = new TensorPairs[K, V, This](repr, false)
def active: TensorActive[K, V, This] = new TensorActive[K, V, This](repr)
// slicing
/**
* method for slicing a tensor. For instance, DenseVectors support efficient slicing by a Range object.
* @return
*/
def apply[Slice, Result](slice: Slice)(implicit canSlice: CanSlice[This, Slice, Result]) = {
canSlice(repr, slice)
}
/**
* Slice a sequence of elements. Must be at least 2.
* @param a
* @param slice
* @param canSlice
* @tparam Result
* @return
*/
def apply[Result](a: K, slice: K*)(implicit canSlice: CanSlice[This, Seq[K], Result]) = {
canSlice(repr, a +: slice)
}
/**
* Method for slicing that is tuned for Matrices.
* @return
*/
def apply[Slice1, Slice2, Result](slice1: Slice1, slice2: Slice2)(implicit canSlice: CanSlice2[This, Slice1, Slice2, Result]) = {
canSlice(repr, slice1, slice2)
}
/** Creates a new map containing a transformed copy of this map. */
def mapPairs[TT>:This,O,That](f : (K,V) => O)(implicit bf : CanMapKeyValuePairs[TT, K, V, O, That]) : That = {
bf.map(repr, f)
}
/** Maps all active key-value pairs values. */
def mapActivePairs[TT>:This,O,That](f : (K,V) => O)(implicit bf : CanMapKeyValuePairs[TT, K, V, O, That]) : That = {
bf.mapActive(repr.asInstanceOf[TT], f)
}
/** Creates a new map containing a transformed copy of this map. */
def mapValues[TT>:This,O,That](f : V => O)(implicit bf : CanMapValues[TT, V, O, That]) : That = {
bf.map(repr.asInstanceOf[TT], f)
}
/** Maps all non-zero values. */
def mapActiveValues[TT>:This,O,That](f : V => O)(implicit bf : CanMapValues[TT, V, O, That]) : That = {
bf.mapActive(repr.asInstanceOf[TT], f)
}
/** Applies the given function to each key in the tensor. */
def foreachKey[U](fn: K => U) : Unit =
keysIterator.foreach[U](fn)
/**
* Applies the given function to each key and its corresponding value.
*/
def foreachPair[U](fn: (K,V) => U) : Unit =
foreachKey[U](k => fn(k,apply(k)))
/**
* Applies the given function to each value in the map (one for
* each element of the domain, including zeros).
*/
def foreachValue[U](fn : (V=>U)) =
foreachKey[U](k => fn(apply(k)))
/** Returns true if and only if the given predicate is true for all elements. */
def forall(fn : (K,V) => Boolean) : Boolean = {
foreachPair((k,v) => if (!fn(k,v)) return false)
true
}
/** Returns true if and only if the given predicate is true for all elements. */
@deprecated("Please use 'forall' with the same arguments, which is more in accordance with scala.collections syntax", "0.8")
def forallValues(fn : V => Boolean) : Boolean = forall(fn)
/** Returns true if and only if the given predicate is true for all elements. */
def forall(fn : V => Boolean) : Boolean = {
foreachValue(v => if (!fn(v)) return false)
true
}
}
/**
* A Tensor defines a map from an index set to a set of values.
*
* @author dlwh
*/
trait Tensor[@spec(Int) K, @spec(Double, Int, Float, Long) V] extends TensorLike[K, V, Tensor[K, V]]
object Tensor {
implicit def liftTransposeOps[Op, K, V, T, R, RT](implicit ev: T<:<Tensor[K, V], op: UFunc.UImpl2[Op, T, V, R], canTranspose: CanTranspose[R, RT]):UFunc.UImpl2[Op, Transpose[T], V, RT] = {
new UFunc.UImpl2[Op, Transpose[T], V, RT] {
def apply(a: Transpose[T], b: V) = {
canTranspose(op(a.inner, b))
}
}
}
implicit def liftTransposeInPlaceOps[Op, K, V, T](implicit ev: T<:<Tensor[K, V], op: UFunc.InPlaceImpl2[Op, T, V]):UFunc.InPlaceImpl2[Op, Transpose[T], V] = {
new UFunc.InPlaceImpl2[Op, Transpose[T], V] {
def apply(a: Transpose[T], b: V) {
op(a.inner,b )
}
}
}
implicit def transposeTensor[K, V, T](implicit ev: T<:<Tensor[K, V]): CanTranspose[T, Transpose[T]] = {
new CanTranspose[T, Transpose[T]] {
def apply(from: T): Transpose[T] = new Transpose(from)
}
}
implicit def canSliceTensor[K, V:ClassTag]:CanSlice[Tensor[K,V], Seq[K], SliceVector[K, V]] = new CanSlice[Tensor[K,V], Seq[K], SliceVector[K, V]] {
def apply(from: Tensor[K, V], slice: Seq[K]): SliceVector[K, V] = new SliceVector(from, slice.toIndexedSeq)
}
implicit def canSliceTensorBoolean[K, V:ClassTag]:CanSlice[Tensor[K,V], Tensor[K, Boolean], SliceVector[K, V]] = new CanSlice[Tensor[K,V], Tensor[K, Boolean], SliceVector[K, V]] {
override def apply(from: Tensor[K, V], slice: Tensor[K, Boolean]): SliceVector[K, V] = {
new SliceVector(from, slice.findAll(_ == true))
}
}
implicit def canSliceTensor2[K1, K2, V:Semiring:ClassTag]:CanSlice2[Tensor[(K1,K2),V], Seq[K1], Seq[K2], SliceMatrix[K1, K2, V]] = {
new CanSlice2[Tensor[(K1,K2),V], Seq[K1], Seq[K2], SliceMatrix[K1, K2, V]] {
def apply(from: Tensor[(K1, K2), V], slice: Seq[K1], slice2: Seq[K2]): SliceMatrix[K1, K2, V] = {
new SliceMatrix(from, slice.toIndexedSeq, slice2.toIndexedSeq)
}
}
}
}
| jaredk-porch/breeze | math/src/main/scala/breeze/linalg/Tensor.scala | Scala | apache-2.0 | 8,480 |
package com.tribbloids.spookystuff.uav
import scala.concurrent.duration.Duration
/**
* Created by peng on 16/07/17.
*/
object UAVConst {
import scala.concurrent.duration._
object UAVNavigation {
val delayMin: Duration = 0.seconds
// val blocking: Boolean = true
}
}
| tribbloid/spookystuff | uav/src/main/scala/com/tribbloids/spookystuff/uav/UAVConst.scala | Scala | apache-2.0 | 288 |
package fr.ramiro.sfuzzy.dsl
import fr.ramiro.sfuzzy._
import org.scalatest.FunSuite
import MembershipFunctions.{ piecewiseLinear => l }
class TipperTest extends FunSuite {
//noinspection TypeAnnotation
case object food extends FuzzyVar {
val rancid: FuzzyTerm = l((0, 1), (1, 1), (3, 0))
val delicious: FuzzyTerm = l((7, 0), (9, 1))
}
//noinspection TypeAnnotation
case object service extends FuzzyVar {
val poor: FuzzyTerm = l((0, 1), (4, 0))
val good: FuzzyTerm = l((1, 0), (4, 1), (6, 1), (9, 0))
val excellent: FuzzyTerm = l((6, 0), (9, 1))
}
//noinspection TypeAnnotation
case object tip extends DefuzzyVar {
val cheap: DefuzzyTerm = l((0, 0), (5, 1), (10, 0))
val average: DefuzzyTerm = l((10, 0), (15, 1), (20, 0))
val generous: DefuzzyTerm = l((20, 0), (25, 1), (30, 0))
val method = FunctionsUtils.cog(0, 30, 1e-2d)
val default = 0
}
import food._
import service._
import tip._
import FunctionsUtils._
private implicit val fuzzyEvaluationEquality = Utils.tolerantFuzzyEvaluationTypeEquality(1e-2d)
test("tipper") {
assertDoesNotCompile("val rule0: FuzzyRule = Fuzzy IF service IS rancid")
implicit val config = FuzzyConfiguration(
andMethod = min,
orMethod = max,
activation = min,
accumulation = max
)
import Rule._
val fuzzyRules1 = FuzzyFunction(
IF((service IS poor) OR (food IS rancid)) THEN (tip IS cheap),
IF(service IS good) THEN (tip IS average),
IF((service IS excellent) AND (food IS delicious)) THEN (tip IS generous)
)
val fuzzyRules2 = FuzzyFunction(
IF((service IS poor) OR (food IS rancid)) THEN (tip IS cheap WITH 0.8),
IF(service IS good) THEN (tip IS average WITH 0.5),
IF((service IS excellent) AND (food IS delicious)) THEN (tip IS generous WITH 0.9)
)
for ((serviceValue, foodValue, expected) <- data.map { transformData }) {
//val resultMap = fuzzyRules eval(service -> serviceValue, food -> foodValue)
//assert(actual === expected)
}
}
val data = Seq(
(100, 100, 500),
(100, 200, 500),
(100, 300, 500),
(100, 400, 500),
(100, 500, 500),
(100, 600, 500),
(100, 700, 500),
(100, 800, 500),
(100, 900, 500),
(100, 1000, 500),
(200, 100, 857),
(200, 200, 926),
(200, 300, 926),
(200, 400, 926),
(200, 500, 926),
(200, 600, 926),
(200, 700, 926),
(200, 800, 926),
(200, 900, 926),
(200, 1000, 926),
(300, 100, 971),
(300, 200, 1042),
(300, 300, 1170),
(300, 400, 1170),
(300, 500, 1170),
(300, 600, 1170),
(300, 700, 1170),
(300, 800, 1170),
(300, 900, 1170),
(300, 1000, 1170),
(400, 100, 1000),
(400, 200, 1071),
(400, 300, 1500),
(400, 400, 1500),
(400, 500, 1500),
(400, 600, 1500),
(400, 700, 1500),
(400, 800, 1500),
(400, 900, 1500),
(400, 1000, 1500),
(500, 100, 1000),
(500, 200, 1071),
(500, 300, 1500),
(500, 400, 1500),
(500, 500, 1500),
(500, 600, 1500),
(500, 700, 1500),
(500, 800, 1500),
(500, 900, 1500),
(500, 1000, 1500),
(600, 100, 1000),
(600, 200, 1071),
(600, 300, 1500),
(600, 400, 1500),
(600, 500, 1500),
(600, 600, 1500),
(600, 700, 1500),
(600, 800, 1500),
(600, 900, 1500),
(600, 1000, 1500),
(700, 100, 990),
(700, 200, 1061),
(700, 300, 1500),
(700, 400, 1500),
(700, 500, 1500),
(700, 600, 1500),
(700, 700, 1500),
(700, 800, 1867),
(700, 900, 1867),
(700, 1000, 1867),
(800, 100, 957),
(800, 200, 1028),
(800, 300, 1500),
(800, 400, 1500),
(800, 500, 1500),
(800, 600, 1500),
(800, 700, 1500),
(800, 800, 1972),
(800, 900, 2014),
(800, 1000, 2014),
(900, 100, 890),
(900, 200, 960),
(900, 300, 1500),
(900, 400, 1500),
(900, 500, 1500),
(900, 600, 1500),
(900, 700, 1500),
(900, 800, 2040),
(900, 900, 2110),
(900, 1000, 2110),
(1000, 100, 765),
(1000, 200, 824),
(1000, 300, 1500),
(1000, 400, 1500),
(1000, 500, 1500),
(1000, 600, 1500),
(1000, 700, 1500),
(1000, 800, 2176),
(1000, 900, 2235),
(1000, 1000, 2235)
)
private def transformData(line: (Int, Int, Int)) = line match {
case (serviceValue, foodValue, expectedTip) =>
val hundred: FuzzyEvaluationType = 100.0
((serviceValue: FuzzyEvaluationType) / hundred, (foodValue: FuzzyEvaluationType) / hundred, (expectedTip: FuzzyEvaluationType) / hundred)
}
}
| rrramiro/sFuzzyLogic | src/test/scala/fr/ramiro/sfuzzy/dsl/TipperTest.scala | Scala | apache-2.0 | 4,595 |
class UnapplySeq {
class B {
def _1 = "text"
def _2 = Seq(1, 2, 3)
}
class A {
val isEmpty: Boolean = false
def get: B = new B
}
object Z {
def unapplySeq(s: String): Option[B] = None
}
"text" match {
case Z(s, l, z) =>
/*start*/l/*end*/
}
}
//Int | katejim/intellij-scala | testdata/typeInference/newExtractors/UnapplySeq.scala | Scala | apache-2.0 | 294 |
package com.twitter.algebird.macros
import com.twitter.algebird._
import com.twitter.algebird.macros.caseclass._
import com.twitter.algebird.macros.ArbitraryCaseClassMacro.arbitrary
import org.scalacheck.{ Properties, Arbitrary }
import org.scalacheck.Prop.forAll
object CaseClassMacrosTest extends Properties("Case class macros") {
import BaseProperties._
implicit val arbitraryFoo: Arbitrary[Foo] = arbitrary[Foo]
implicit val arbitraryBar: Arbitrary[Bar] = arbitrary[Bar]
case class Foo(a: Int, b: Short, c: Long)
case class Bar(a: Boolean, foo: Foo)
property("Foo is a Semigroup") = semigroupLaws[Foo]
property("Foo is a Monoid") = monoidLaws[Foo]
property("Foo is a Group") = groupLaws[Foo]
property("Foo is a Ring") = ringLaws[Foo]
property("Bar is a Semigroup") = semigroupLaws[Bar]
property("Bar is a Monoid") = monoidLaws[Bar]
property("Bar is a Group") = groupLaws[Bar]
property("Bar is a Ring") = ringLaws[Bar]
}
| erikerlandson/algebird | algebird-test/src/test/scala/com/twitter/algebird/macros/CaseClassMacrosTest.scala | Scala | apache-2.0 | 957 |
package lila
package object learn extends PackageObject {
private[learn] val logger = lila.log("learn")
}
| luanlv/lila | modules/learn/src/main/package.scala | Scala | mit | 110 |
package org.eigengo.akkapatterns.domain
import com.typesafe.config.ConfigFactory
import com.mongodb.{ServerAddress, WriteConcern}
import scala.collection.JavaConversions._
import akka.contrib.jul.JavaLogging
object Settings extends JavaLogging {
// https://groups.google.com/d/topic/scala-user/wzguzEJtLaI/discussion
private val overrides = ConfigFactory.load("local")
private val config = overrides.withFallback(ConfigFactory.load())
private def unmerged(path: String) =
if (overrides.hasPath(path)) overrides.getConfig(path)
else config.getConfig(path)
case class Cassandra(cluster: String, connections: Int, hosts: String)
object Cassandra {
def apply(base: String) = {
val c = config.getConfig(base)
val cluster = c.getString("cluster")
val connections = c.getInt("connections")
val hosts = unmerged(base + ".hosts").entrySet().map{e =>
e.getKey.replaceAll("\"", "") + ":" + e.getValue.unwrapped()
}.mkString(",")
new Cassandra(cluster, connections, hosts)
}
}
case class Mongo(name: String, connections: Int, hosts: List[ServerAddress], concern: WriteConcern)
object Mongo {
def apply(base: String) = {
val c = config.getConfig(base)
val name = c.getString("name")
val connections = c.getInt("connections")
val concern = WriteConcern.valueOf(c.getString("concern"))
val hosts = unmerged(base + ".hosts").entrySet().map{e =>
new ServerAddress(e.getKey.replaceAll("\"", ""), e.getValue.unwrapped().asInstanceOf[Integer])
}.toList
new Mongo(name, connections, hosts, concern)
}
}
case class Db(cassandra: Cassandra, mongo: Mongo)
case class Main(db: Db)
val main = try Main(
Db(
Cassandra("main.db.cassandra"),
Mongo("main.db.mongo")
)
) catch {
case t: Throwable => log.error(t, "Settings.main") ; throw t
}
val test = try Main(
Db(
Cassandra("test.db.cassandra"),
Mongo("test.db.mongo")
)
) catch {
case t: Throwable => log.error(t, "Settings.test") ; throw t
}
}
| eigengo/akka-patterns | server/domain/src/main/scala/org/eigengo/akkapatterns/domain/settings.scala | Scala | apache-2.0 | 2,079 |
package eu.pepot.eu.spark.inputsplitter.common.file.matcher
import eu.pepot.eu.spark.inputsplitter.common.file.{FileDetails, FileDetailsSet}
import org.apache.log4j.Logger
object FilesMatcher {
val logger = Logger.getLogger(this.getClass)
/**
* Retrieve only the files that conform to the condition.
* @param files
* @param condition
* @return
*/
def matches(files: FileDetailsSet, condition: Condition): FileDetailsSet = {
FileDetailsSet(
files = files.files.filter(currentFile => matchesCondition(currentFile, condition))
)
}
private def matchesCondition(file: FileDetails, condition: Condition): Boolean = {
condition.biggerThan.map(minimalSize => file.size > minimalSize).getOrElse(false) ||
condition.namePattern.map(namePattern => file.path.toString.matches(namePattern)).getOrElse(false) ||
condition.pathCondition.map(f => f(file.path)).getOrElse(false)
}
}
| mauriciojost/spark-input-splitter | src/main/scala/eu/pepot/eu/spark/inputsplitter/common/file/matcher/FilesMatcher.scala | Scala | apache-2.0 | 938 |
package examples
import mhtml._
import scala.scalajs.js
object Timer extends Example {
var interval: js.UndefOr[js.timers.SetIntervalHandle] = js.undefined
def app: xml.Node = {
val counter = Var(0)
interval = js.timers.setInterval(1000)(counter.update(_ + 1))
<p>Seconds elapsed: {counter}</p>
}
override def cancel = {
interval foreach js.timers.clearInterval
interval = js.undefined
}
}
| OlivierBlanvillain/monadic-html | examples/src/main/scala/mhtml/examples/Timer.scala | Scala | mit | 422 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.client
import java.lang.{Boolean => JBoolean, Integer => JInteger, Long => JLong}
import java.lang.reflect.{InvocationTargetException, Method, Modifier}
import java.net.URI
import java.util.{ArrayList => JArrayList, List => JList, Locale, Map => JMap, Set => JSet}
import java.util.concurrent.TimeUnit
import scala.collection.JavaConverters._
import scala.util.control.NonFatal
import org.apache.hadoop.fs.Path
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.metastore.IMetaStoreClient
import org.apache.hadoop.hive.metastore.TableType
import org.apache.hadoop.hive.metastore.api.{Database, EnvironmentContext, Function => HiveFunction, FunctionType, MetaException, PrincipalType, ResourceType, ResourceUri}
import org.apache.hadoop.hive.ql.Driver
import org.apache.hadoop.hive.ql.io.AcidUtils
import org.apache.hadoop.hive.ql.metadata.{Hive, Partition, Table}
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc
import org.apache.hadoop.hive.ql.processors.{CommandProcessor, CommandProcessorFactory}
import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.hadoop.hive.serde.serdeConstants
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.analysis.NoSuchPermanentFunctionException
import org.apache.spark.sql.catalyst.catalog.{CatalogFunction, CatalogTablePartition, CatalogUtils, FunctionResource, FunctionResourceType}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.{DateFormatter, TypeUtils}
import org.apache.spark.sql.errors.{QueryCompilationErrors, QueryExecutionErrors}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{AtomicType, DateType, IntegralType, StringType}
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.Utils
/**
* A shim that defines the interface between [[HiveClientImpl]] and the underlying Hive library used
* to talk to the metastore. Each Hive version has its own implementation of this class, defining
* version-specific version of needed functions.
*
* The guideline for writing shims is:
* - always extend from the previous version unless really not possible
* - initialize methods in lazy vals, both for quicker access for multiple invocations, and to
* avoid runtime errors due to the above guideline.
*/
private[client] sealed abstract class Shim {
/**
* Set the current SessionState to the given SessionState. Also, set the context classloader of
* the current thread to the one set in the HiveConf of this given `state`.
*/
def setCurrentSessionState(state: SessionState): Unit
/**
* This shim is necessary because the return type is different on different versions of Hive.
* All parameters are the same, though.
*/
def getDataLocation(table: Table): Option[String]
def setDataLocation(table: Table, loc: String): Unit
def getAllPartitions(hive: Hive, table: Table): Seq[Partition]
def getPartitionsByFilter(
hive: Hive,
table: Table,
predicates: Seq[Expression]): Seq[Partition]
def getCommandProcessor(token: String, conf: HiveConf): CommandProcessor
def getDriverResults(driver: Driver): Seq[String]
def getMetastoreClientConnectRetryDelayMillis(conf: HiveConf): Long
def alterTable(hive: Hive, tableName: String, table: Table): Unit
def alterPartitions(hive: Hive, tableName: String, newParts: JList[Partition]): Unit
def getTablesByType(
hive: Hive,
dbName: String,
pattern: String,
tableType: TableType): Seq[String]
def createPartitions(
hive: Hive,
db: String,
table: String,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit
def loadPartition(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
inheritTableSpecs: Boolean,
isSkewedStoreAsSubdir: Boolean,
isSrcLocal: Boolean): Unit
def loadTable(
hive: Hive,
loadPath: Path,
tableName: String,
replace: Boolean,
isSrcLocal: Boolean): Unit
def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit
def createFunction(hive: Hive, db: String, func: CatalogFunction): Unit
def dropFunction(hive: Hive, db: String, name: String): Unit
def renameFunction(hive: Hive, db: String, oldName: String, newName: String): Unit
def alterFunction(hive: Hive, db: String, func: CatalogFunction): Unit
def getFunctionOption(hive: Hive, db: String, name: String): Option[CatalogFunction]
def listFunctions(hive: Hive, db: String, pattern: String): Seq[String]
def dropIndex(hive: Hive, dbName: String, tableName: String, indexName: String): Unit
def dropTable(
hive: Hive,
dbName: String,
tableName: String,
deleteData: Boolean,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit
def dropPartition(
hive: Hive,
dbName: String,
tableName: String,
part: JList[String],
deleteData: Boolean,
purge: Boolean): Unit
def getDatabaseOwnerName(db: Database): String
def setDatabaseOwnerName(db: Database, owner: String): Unit
protected def findStaticMethod(klass: Class[_], name: String, args: Class[_]*): Method = {
val method = findMethod(klass, name, args: _*)
require(Modifier.isStatic(method.getModifiers()),
s"Method $name of class $klass is not static.")
method
}
def getMSC(hive: Hive): IMetaStoreClient
protected def findMethod(klass: Class[_], name: String, args: Class[_]*): Method = {
klass.getMethod(name, args: _*)
}
}
private[client] class Shim_v0_12 extends Shim with Logging {
// See HIVE-12224, HOLD_DDLTIME was broken as soon as it landed
protected lazy val holdDDLTime = JBoolean.FALSE
// deletes the underlying data along with metadata
protected lazy val deleteDataInDropIndex = JBoolean.TRUE
protected lazy val getMSCMethod = {
// Since getMSC() in Hive 0.12 is private, findMethod() could not work here
val msc = classOf[Hive].getDeclaredMethod("getMSC")
msc.setAccessible(true)
msc
}
override def getMSC(hive: Hive): IMetaStoreClient = {
getMSCMethod.invoke(hive).asInstanceOf[IMetaStoreClient]
}
private lazy val startMethod =
findStaticMethod(
classOf[SessionState],
"start",
classOf[SessionState])
private lazy val getDataLocationMethod = findMethod(classOf[Table], "getDataLocation")
private lazy val setDataLocationMethod =
findMethod(
classOf[Table],
"setDataLocation",
classOf[URI])
private lazy val getAllPartitionsMethod =
findMethod(
classOf[Hive],
"getAllPartitionsForPruner",
classOf[Table])
private lazy val getCommandProcessorMethod =
findStaticMethod(
classOf[CommandProcessorFactory],
"get",
classOf[String],
classOf[HiveConf])
private lazy val getDriverResultsMethod =
findMethod(
classOf[Driver],
"getResults",
classOf[JArrayList[String]])
private lazy val createPartitionMethod =
findMethod(
classOf[Hive],
"createPartition",
classOf[Table],
classOf[JMap[String, String]],
classOf[Path],
classOf[JMap[String, String]],
classOf[String],
classOf[String],
JInteger.TYPE,
classOf[JList[Object]],
classOf[String],
classOf[JMap[String, String]],
classOf[JList[Object]],
classOf[JList[Object]])
private lazy val loadPartitionMethod =
findMethod(
classOf[Hive],
"loadPartition",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadTableMethod =
findMethod(
classOf[Hive],
"loadTable",
classOf[Path],
classOf[String],
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadDynamicPartitionsMethod =
findMethod(
classOf[Hive],
"loadDynamicPartitions",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JInteger.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val dropIndexMethod =
findMethod(
classOf[Hive],
"dropIndex",
classOf[String],
classOf[String],
classOf[String],
JBoolean.TYPE)
private lazy val alterTableMethod =
findMethod(
classOf[Hive],
"alterTable",
classOf[String],
classOf[Table])
private lazy val alterPartitionsMethod =
findMethod(
classOf[Hive],
"alterPartitions",
classOf[String],
classOf[JList[Partition]])
override def setCurrentSessionState(state: SessionState): Unit = {
// Starting from Hive 0.13, setCurrentSessionState will internally override
// the context class loader of the current thread by the class loader set in
// the conf of the SessionState. So, for this Hive 0.12 shim, we add the same
// behavior and make shim.setCurrentSessionState of all Hive versions have the
// consistent behavior.
Thread.currentThread().setContextClassLoader(state.getConf.getClassLoader)
startMethod.invoke(null, state)
}
override def getDataLocation(table: Table): Option[String] =
Option(getDataLocationMethod.invoke(table)).map(_.toString())
override def setDataLocation(table: Table, loc: String): Unit =
setDataLocationMethod.invoke(table, new URI(loc))
// Follows exactly the same logic of DDLTask.createPartitions in Hive 0.12
override def createPartitions(
hive: Hive,
database: String,
tableName: String,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = {
val table = hive.getTable(database, tableName)
parts.foreach { s =>
val location = s.storage.locationUri.map(
uri => new Path(table.getPath, new Path(uri))).orNull
val params = if (s.parameters.nonEmpty) s.parameters.asJava else null
val spec = s.spec.asJava
if (hive.getPartition(table, spec, false) != null && ignoreIfExists) {
// Ignore this partition since it already exists and ignoreIfExists == true
} else {
if (location == null && table.isView()) {
throw QueryExecutionErrors.illegalLocationClauseForViewPartitionError()
}
createPartitionMethod.invoke(
hive,
table,
spec,
location,
params, // partParams
null, // inputFormat
null, // outputFormat
-1: JInteger, // numBuckets
null, // cols
null, // serializationLib
null, // serdeParams
null, // bucketCols
null) // sortCols
}
}
}
override def getAllPartitions(hive: Hive, table: Table): Seq[Partition] =
getAllPartitionsMethod.invoke(hive, table).asInstanceOf[JSet[Partition]].asScala.toSeq
override def getPartitionsByFilter(
hive: Hive,
table: Table,
predicates: Seq[Expression]): Seq[Partition] = {
// getPartitionsByFilter() doesn't support binary comparison ops in Hive 0.12.
// See HIVE-4888.
logDebug("Hive 0.12 doesn't support predicate pushdown to metastore. " +
"Please use Hive 0.13 or higher.")
getAllPartitions(hive, table)
}
override def getCommandProcessor(token: String, conf: HiveConf): CommandProcessor =
getCommandProcessorMethod.invoke(null, token, conf).asInstanceOf[CommandProcessor]
override def getDriverResults(driver: Driver): Seq[String] = {
val res = new JArrayList[String]()
getDriverResultsMethod.invoke(driver, res)
res.asScala.toSeq
}
override def getMetastoreClientConnectRetryDelayMillis(conf: HiveConf): Long = {
conf.getIntVar(HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY) * 1000L
}
override def getTablesByType(
hive: Hive,
dbName: String,
pattern: String,
tableType: TableType): Seq[String] = {
throw QueryExecutionErrors.getTablesByTypeUnsupportedByHiveVersionError()
}
override def loadPartition(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
inheritTableSpecs: Boolean,
isSkewedStoreAsSubdir: Boolean,
isSrcLocal: Boolean): Unit = {
loadPartitionMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
JBoolean.FALSE, inheritTableSpecs: JBoolean, isSkewedStoreAsSubdir: JBoolean)
}
override def loadTable(
hive: Hive,
loadPath: Path,
tableName: String,
replace: Boolean,
isSrcLocal: Boolean): Unit = {
loadTableMethod.invoke(hive, loadPath, tableName, replace: JBoolean, holdDDLTime)
}
override def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit = {
loadDynamicPartitionsMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
numDP: JInteger, holdDDLTime, listBucketingEnabled: JBoolean)
}
override def dropIndex(hive: Hive, dbName: String, tableName: String, indexName: String): Unit = {
dropIndexMethod.invoke(hive, dbName, tableName, indexName, deleteDataInDropIndex)
}
override def dropTable(
hive: Hive,
dbName: String,
tableName: String,
deleteData: Boolean,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit = {
if (purge) {
throw QueryExecutionErrors.dropTableWithPurgeUnsupportedError()
}
hive.dropTable(dbName, tableName, deleteData, ignoreIfNotExists)
}
override def alterTable(hive: Hive, tableName: String, table: Table): Unit = {
alterTableMethod.invoke(hive, tableName, table)
}
override def alterPartitions(hive: Hive, tableName: String, newParts: JList[Partition]): Unit = {
alterPartitionsMethod.invoke(hive, tableName, newParts)
}
override def dropPartition(
hive: Hive,
dbName: String,
tableName: String,
part: JList[String],
deleteData: Boolean,
purge: Boolean): Unit = {
if (purge) {
throw QueryExecutionErrors.alterTableWithDropPartitionAndPurgeUnsupportedError()
}
hive.dropPartition(dbName, tableName, part, deleteData)
}
override def createFunction(hive: Hive, db: String, func: CatalogFunction): Unit = {
throw QueryCompilationErrors.hiveCreatePermanentFunctionsUnsupportedError()
}
def dropFunction(hive: Hive, db: String, name: String): Unit = {
throw new NoSuchPermanentFunctionException(db, name)
}
def renameFunction(hive: Hive, db: String, oldName: String, newName: String): Unit = {
throw new NoSuchPermanentFunctionException(db, oldName)
}
def alterFunction(hive: Hive, db: String, func: CatalogFunction): Unit = {
throw new NoSuchPermanentFunctionException(db, func.identifier.funcName)
}
def getFunctionOption(hive: Hive, db: String, name: String): Option[CatalogFunction] = {
None
}
def listFunctions(hive: Hive, db: String, pattern: String): Seq[String] = {
Seq.empty[String]
}
override def getDatabaseOwnerName(db: Database): String = ""
override def setDatabaseOwnerName(db: Database, owner: String): Unit = {}
}
private[client] class Shim_v0_13 extends Shim_v0_12 {
private lazy val setCurrentSessionStateMethod =
findStaticMethod(
classOf[SessionState],
"setCurrentSessionState",
classOf[SessionState])
private lazy val setDataLocationMethod =
findMethod(
classOf[Table],
"setDataLocation",
classOf[Path])
private lazy val getAllPartitionsMethod =
findMethod(
classOf[Hive],
"getAllPartitionsOf",
classOf[Table])
private lazy val getPartitionsByFilterMethod =
findMethod(
classOf[Hive],
"getPartitionsByFilter",
classOf[Table],
classOf[String])
private lazy val getCommandProcessorMethod =
findStaticMethod(
classOf[CommandProcessorFactory],
"get",
classOf[Array[String]],
classOf[HiveConf])
private lazy val getDriverResultsMethod =
findMethod(
classOf[Driver],
"getResults",
classOf[JList[Object]])
private lazy val getDatabaseOwnerNameMethod =
findMethod(
classOf[Database],
"getOwnerName")
private lazy val setDatabaseOwnerNameMethod =
findMethod(
classOf[Database],
"setOwnerName",
classOf[String])
override def setCurrentSessionState(state: SessionState): Unit =
setCurrentSessionStateMethod.invoke(null, state)
override def setDataLocation(table: Table, loc: String): Unit =
setDataLocationMethod.invoke(table, new Path(loc))
override def createPartitions(
hive: Hive,
db: String,
table: String,
parts: Seq[CatalogTablePartition],
ignoreIfExists: Boolean): Unit = {
val addPartitionDesc = new AddPartitionDesc(db, table, ignoreIfExists)
parts.zipWithIndex.foreach { case (s, i) =>
addPartitionDesc.addPartition(
s.spec.asJava, s.storage.locationUri.map(CatalogUtils.URIToString(_)).orNull)
if (s.parameters.nonEmpty) {
addPartitionDesc.getPartition(i).setPartParams(s.parameters.asJava)
}
}
hive.createPartitions(addPartitionDesc)
}
override def getAllPartitions(hive: Hive, table: Table): Seq[Partition] =
getAllPartitionsMethod.invoke(hive, table).asInstanceOf[JSet[Partition]].asScala.toSeq
private def toHiveFunction(f: CatalogFunction, db: String): HiveFunction = {
val resourceUris = f.resources.map { resource =>
new ResourceUri(ResourceType.valueOf(
resource.resourceType.resourceType.toUpperCase(Locale.ROOT)), resource.uri)
}
new HiveFunction(
f.identifier.funcName,
db,
f.className,
null,
PrincipalType.USER,
TimeUnit.MILLISECONDS.toSeconds(System.currentTimeMillis).toInt,
FunctionType.JAVA,
resourceUris.asJava)
}
override def createFunction(hive: Hive, db: String, func: CatalogFunction): Unit = {
hive.createFunction(toHiveFunction(func, db))
}
override def dropFunction(hive: Hive, db: String, name: String): Unit = {
hive.dropFunction(db, name)
}
override def renameFunction(hive: Hive, db: String, oldName: String, newName: String): Unit = {
val catalogFunc = getFunctionOption(hive, db, oldName)
.getOrElse(throw new NoSuchPermanentFunctionException(db, oldName))
.copy(identifier = FunctionIdentifier(newName, Some(db)))
val hiveFunc = toHiveFunction(catalogFunc, db)
hive.alterFunction(db, oldName, hiveFunc)
}
override def alterFunction(hive: Hive, db: String, func: CatalogFunction): Unit = {
hive.alterFunction(db, func.identifier.funcName, toHiveFunction(func, db))
}
private def fromHiveFunction(hf: HiveFunction): CatalogFunction = {
val name = FunctionIdentifier(hf.getFunctionName, Option(hf.getDbName))
val resources = hf.getResourceUris.asScala.map { uri =>
val resourceType = uri.getResourceType() match {
case ResourceType.ARCHIVE => "archive"
case ResourceType.FILE => "file"
case ResourceType.JAR => "jar"
case r => throw QueryCompilationErrors.unknownHiveResourceTypeError(r.toString)
}
FunctionResource(FunctionResourceType.fromString(resourceType), uri.getUri())
}
CatalogFunction(name, hf.getClassName, resources.toSeq)
}
override def getFunctionOption(hive: Hive, db: String, name: String): Option[CatalogFunction] = {
try {
Option(hive.getFunction(db, name)).map(fromHiveFunction)
} catch {
case NonFatal(e) if isCausedBy(e, s"$name does not exist") =>
None
}
}
private def isCausedBy(e: Throwable, matchMassage: String): Boolean = {
if (e.getMessage.contains(matchMassage)) {
true
} else if (e.getCause != null) {
isCausedBy(e.getCause, matchMassage)
} else {
false
}
}
override def listFunctions(hive: Hive, db: String, pattern: String): Seq[String] = {
hive.getFunctions(db, pattern).asScala.toSeq
}
/**
* Converts catalyst expression to the format that Hive's getPartitionsByFilter() expects, i.e.
* a string that represents partition predicates like "str_key=\\"value\\" and int_key=1 ...".
*
* Unsupported predicates are skipped.
*/
def convertFilters(table: Table, filters: Seq[Expression]): String = {
lazy val dateFormatter = DateFormatter()
/**
* An extractor that matches all binary comparison operators except null-safe equality.
*
* Null-safe equality is not supported by Hive metastore partition predicate pushdown
*/
object SpecialBinaryComparison {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = e match {
case _: EqualNullSafe => None
case _ => Some((e.left, e.right))
}
}
object ExtractableLiteral {
def unapply(expr: Expression): Option[String] = expr match {
case Literal(null, _) => None // `null`s can be cast as other types; we want to avoid NPEs.
case Literal(value, _: IntegralType) => Some(value.toString)
case Literal(value, _: StringType) => Some(quoteStringLiteral(value.toString))
case Literal(value, _: DateType) =>
Some(dateFormatter.format(value.asInstanceOf[Int]))
case _ => None
}
}
object ExtractableLiterals {
def unapply(exprs: Seq[Expression]): Option[Seq[String]] = {
// SPARK-24879: The Hive metastore filter parser does not support "null", but we still want
// to push down as many predicates as we can while still maintaining correctness.
// In SQL, the `IN` expression evaluates as follows:
// > `1 in (2, NULL)` -> NULL
// > `1 in (1, NULL)` -> true
// > `1 in (2)` -> false
// Since Hive metastore filters are NULL-intolerant binary operations joined only by
// `AND` and `OR`, we can treat `NULL` as `false` and thus rewrite `1 in (2, NULL)` as
// `1 in (2)`.
// If the Hive metastore begins supporting NULL-tolerant predicates and Spark starts
// pushing down these predicates, then this optimization will become incorrect and need
// to be changed.
val extractables = exprs
.filter {
case Literal(null, _) => false
case _ => true
}.map(ExtractableLiteral.unapply)
if (extractables.nonEmpty && extractables.forall(_.isDefined)) {
Some(extractables.map(_.get))
} else {
None
}
}
}
object ExtractableValues {
private lazy val valueToLiteralString: PartialFunction[Any, String] = {
case value: Byte => value.toString
case value: Short => value.toString
case value: Int => value.toString
case value: Long => value.toString
case value: UTF8String => quoteStringLiteral(value.toString)
}
def unapply(values: Set[Any]): Option[Seq[String]] = {
val extractables = values.filter(_ != null).toSeq.map(valueToLiteralString.lift)
if (extractables.nonEmpty && extractables.forall(_.isDefined)) {
Some(extractables.map(_.get))
} else {
None
}
}
}
object ExtractableDateValues {
private lazy val valueToLiteralString: PartialFunction[Any, String] = {
case value: Int => dateFormatter.format(value)
}
def unapply(values: Set[Any]): Option[Seq[String]] = {
val extractables = values.filter(_ != null).toSeq.map(valueToLiteralString.lift)
if (extractables.nonEmpty && extractables.forall(_.isDefined)) {
Some(extractables.map(_.get))
} else {
None
}
}
}
object SupportedAttribute {
// hive varchar is treated as catalyst string, but hive varchar can't be pushed down.
private val varcharKeys = table.getPartitionKeys.asScala
.filter(col => col.getType.startsWith(serdeConstants.VARCHAR_TYPE_NAME) ||
col.getType.startsWith(serdeConstants.CHAR_TYPE_NAME))
.map(col => col.getName).toSet
def unapply(attr: Attribute): Option[String] = {
val resolver = SQLConf.get.resolver
if (varcharKeys.exists(c => resolver(c, attr.name))) {
None
} else if (attr.dataType.isInstanceOf[IntegralType] || attr.dataType == StringType ||
attr.dataType == DateType) {
Some(attr.name)
} else {
None
}
}
}
def convertInToOr(name: String, values: Seq[String]): String = {
values.map(value => s"$name = $value").mkString("(", " or ", ")")
}
def convertNotInToAnd(name: String, values: Seq[String]): String = {
values.map(value => s"$name != $value").mkString("(", " and ", ")")
}
def hasNullLiteral(list: Seq[Expression]): Boolean = list.exists {
case Literal(null, _) => true
case _ => false
}
val useAdvanced = SQLConf.get.advancedPartitionPredicatePushdownEnabled
val inSetThreshold = SQLConf.get.metastorePartitionPruningInSetThreshold
object ExtractAttribute {
def unapply(expr: Expression): Option[Attribute] = {
expr match {
case attr: Attribute => Some(attr)
case Cast(child @ IntegralType(), dt: IntegralType, _, _)
if Cast.canUpCast(child.dataType.asInstanceOf[AtomicType], dt) => unapply(child)
case _ => None
}
}
}
def convert(expr: Expression): Option[String] = expr match {
case Not(InSet(_, values)) if values.size > inSetThreshold =>
None
case Not(In(_, list)) if hasNullLiteral(list) => None
case Not(InSet(_, list)) if list.contains(null) => None
case In(ExtractAttribute(SupportedAttribute(name)), ExtractableLiterals(values))
if useAdvanced =>
Some(convertInToOr(name, values))
case Not(In(ExtractAttribute(SupportedAttribute(name)), ExtractableLiterals(values)))
if useAdvanced =>
Some(convertNotInToAnd(name, values))
case InSet(child, values) if useAdvanced && values.size > inSetThreshold =>
val dataType = child.dataType
// Skip null here is safe, more details could see at ExtractableLiterals.
val sortedValues = values.filter(_ != null).toSeq
.sorted(TypeUtils.getInterpretedOrdering(dataType))
convert(And(GreaterThanOrEqual(child, Literal(sortedValues.head, dataType)),
LessThanOrEqual(child, Literal(sortedValues.last, dataType))))
case InSet(child @ ExtractAttribute(SupportedAttribute(name)), ExtractableDateValues(values))
if useAdvanced && child.dataType == DateType =>
Some(convertInToOr(name, values))
case Not(InSet(child @ ExtractAttribute(SupportedAttribute(name)),
ExtractableDateValues(values))) if useAdvanced && child.dataType == DateType =>
Some(convertNotInToAnd(name, values))
case InSet(ExtractAttribute(SupportedAttribute(name)), ExtractableValues(values))
if useAdvanced =>
Some(convertInToOr(name, values))
case Not(InSet(ExtractAttribute(SupportedAttribute(name)), ExtractableValues(values)))
if useAdvanced =>
Some(convertNotInToAnd(name, values))
case op @ SpecialBinaryComparison(
ExtractAttribute(SupportedAttribute(name)), ExtractableLiteral(value)) =>
Some(s"$name ${op.symbol} $value")
case op @ SpecialBinaryComparison(
ExtractableLiteral(value), ExtractAttribute(SupportedAttribute(name))) =>
Some(s"$value ${op.symbol} $name")
case Contains(ExtractAttribute(SupportedAttribute(name)), ExtractableLiteral(value)) =>
Some(s"$name like " + (("\\".*" + value.drop(1)).dropRight(1) + ".*\\""))
case StartsWith(ExtractAttribute(SupportedAttribute(name)), ExtractableLiteral(value)) =>
Some(s"$name like " + (value.dropRight(1) + ".*\\""))
case EndsWith(ExtractAttribute(SupportedAttribute(name)), ExtractableLiteral(value)) =>
Some(s"$name like " + ("\\".*" + value.drop(1)))
case And(expr1, expr2) if useAdvanced =>
val converted = convert(expr1) ++ convert(expr2)
if (converted.isEmpty) {
None
} else {
Some(converted.mkString("(", " and ", ")"))
}
case Or(expr1, expr2) if useAdvanced =>
for {
left <- convert(expr1)
right <- convert(expr2)
} yield s"($left or $right)"
case Not(EqualTo(
ExtractAttribute(SupportedAttribute(name)), ExtractableLiteral(value))) if useAdvanced =>
Some(s"$name != $value")
case Not(EqualTo(
ExtractableLiteral(value), ExtractAttribute(SupportedAttribute(name)))) if useAdvanced =>
Some(s"$value != $name")
case _ => None
}
filters.flatMap(convert).mkString(" and ")
}
private def quoteStringLiteral(str: String): String = {
if (!str.contains("\\"")) {
s""""$str""""
} else if (!str.contains("'")) {
s"""'$str'"""
} else {
throw QueryExecutionErrors.invalidPartitionFilterError()
}
}
override def getPartitionsByFilter(
hive: Hive,
table: Table,
predicates: Seq[Expression]): Seq[Partition] = {
// Hive getPartitionsByFilter() takes a string that represents partition
// predicates like "str_key=\\"value\\" and int_key=1 ..."
val filter = convertFilters(table, predicates)
val partitions =
if (filter.isEmpty) {
getAllPartitionsMethod.invoke(hive, table).asInstanceOf[JSet[Partition]]
} else {
logDebug(s"Hive metastore filter is '$filter'.")
val tryDirectSqlConfVar = HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL
val shouldFallback = SQLConf.get.metastorePartitionPruningFallbackOnException
try {
// Hive may throw an exception when calling this method in some circumstances, such as
// when filtering on a non-string partition column when the hive config key
// hive.metastore.try.direct.sql is false. In some cases the remote metastore will throw
// exceptions even if the config is true, due to various reasons including the
// underlying RDBMS, Hive bugs when generating the filter, etc.
//
// Because of the above we'll fallback to use `Hive.getAllPartitionsOf` when the exception
// occurs and the config`spark.sql.hive.metastorePartitionPruningFallbackOnException` is
// enabled.
getPartitionsByFilterMethod.invoke(hive, table, filter)
.asInstanceOf[JArrayList[Partition]]
} catch {
case ex: InvocationTargetException if ex.getCause.isInstanceOf[MetaException] &&
shouldFallback =>
logWarning("Caught Hive MetaException attempting to get partition metadata by " +
"filter from Hive. Falling back to fetching all partition metadata, which will " +
"degrade performance. Modifying your Hive metastore configuration to set " +
s"${tryDirectSqlConfVar.varname} to true (if it is not true already) may resolve " +
"this problem. Otherwise, to avoid degraded performance you can set " +
s"${SQLConf.HIVE_METASTORE_PARTITION_PRUNING_FALLBACK_ON_EXCEPTION.key} " +
" to false and let the query fail instead.", ex)
// HiveShim clients are expected to handle a superset of the requested partitions
getAllPartitionsMethod.invoke(hive, table).asInstanceOf[JSet[Partition]]
case ex: InvocationTargetException if ex.getCause.isInstanceOf[MetaException] =>
throw QueryExecutionErrors.getPartitionMetadataByFilterError(ex)
}
}
partitions.asScala.toSeq
}
override def getCommandProcessor(token: String, conf: HiveConf): CommandProcessor =
getCommandProcessorMethod.invoke(null, Array(token), conf).asInstanceOf[CommandProcessor]
override def getDriverResults(driver: Driver): Seq[String] = {
val res = new JArrayList[Object]()
getDriverResultsMethod.invoke(driver, res)
res.asScala.map { r =>
r match {
case s: String => s
case a: Array[Object] => a(0).asInstanceOf[String]
}
}.toSeq
}
override def getDatabaseOwnerName(db: Database): String = {
Option(getDatabaseOwnerNameMethod.invoke(db)).map(_.asInstanceOf[String]).getOrElse("")
}
override def setDatabaseOwnerName(db: Database, owner: String): Unit = {
setDatabaseOwnerNameMethod.invoke(db, owner)
}
}
private[client] class Shim_v0_14 extends Shim_v0_13 {
// true if this is an ACID operation
protected lazy val isAcid = JBoolean.FALSE
// true if list bucketing enabled
protected lazy val isSkewedStoreAsSubdir = JBoolean.FALSE
private lazy val loadPartitionMethod =
findMethod(
classOf[Hive],
"loadPartition",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadTableMethod =
findMethod(
classOf[Hive],
"loadTable",
classOf[Path],
classOf[String],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadDynamicPartitionsMethod =
findMethod(
classOf[Hive],
"loadDynamicPartitions",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JInteger.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val dropTableMethod =
findMethod(
classOf[Hive],
"dropTable",
classOf[String],
classOf[String],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val getTimeVarMethod =
findMethod(
classOf[HiveConf],
"getTimeVar",
classOf[HiveConf.ConfVars],
classOf[TimeUnit])
override def loadPartition(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
inheritTableSpecs: Boolean,
isSkewedStoreAsSubdir: Boolean,
isSrcLocal: Boolean): Unit = {
loadPartitionMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
holdDDLTime, inheritTableSpecs: JBoolean, isSkewedStoreAsSubdir: JBoolean,
isSrcLocal: JBoolean, isAcid)
}
override def loadTable(
hive: Hive,
loadPath: Path,
tableName: String,
replace: Boolean,
isSrcLocal: Boolean): Unit = {
loadTableMethod.invoke(hive, loadPath, tableName, replace: JBoolean, holdDDLTime,
isSrcLocal: JBoolean, isSkewedStoreAsSubdir, isAcid)
}
override def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit = {
loadDynamicPartitionsMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
numDP: JInteger, holdDDLTime, listBucketingEnabled: JBoolean, isAcid)
}
override def dropTable(
hive: Hive,
dbName: String,
tableName: String,
deleteData: Boolean,
ignoreIfNotExists: Boolean,
purge: Boolean): Unit = {
dropTableMethod.invoke(hive, dbName, tableName, deleteData: JBoolean,
ignoreIfNotExists: JBoolean, purge: JBoolean)
}
override def getMetastoreClientConnectRetryDelayMillis(conf: HiveConf): Long = {
getTimeVarMethod.invoke(
conf,
HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY,
TimeUnit.MILLISECONDS).asInstanceOf[Long]
}
}
private[client] class Shim_v1_0 extends Shim_v0_14
private[client] class Shim_v1_1 extends Shim_v1_0 {
// throws an exception if the index does not exist
protected lazy val throwExceptionInDropIndex = JBoolean.TRUE
private lazy val dropIndexMethod =
findMethod(
classOf[Hive],
"dropIndex",
classOf[String],
classOf[String],
classOf[String],
JBoolean.TYPE,
JBoolean.TYPE)
override def dropIndex(hive: Hive, dbName: String, tableName: String, indexName: String): Unit = {
dropIndexMethod.invoke(hive, dbName, tableName, indexName, throwExceptionInDropIndex,
deleteDataInDropIndex)
}
}
private[client] class Shim_v1_2 extends Shim_v1_1 {
// txnId can be 0 unless isAcid == true
protected lazy val txnIdInLoadDynamicPartitions: JLong = 0L
private lazy val loadDynamicPartitionsMethod =
findMethod(
classOf[Hive],
"loadDynamicPartitions",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JInteger.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JLong.TYPE)
private lazy val dropOptionsClass =
Utils.classForName("org.apache.hadoop.hive.metastore.PartitionDropOptions")
private lazy val dropOptionsDeleteData = dropOptionsClass.getField("deleteData")
private lazy val dropOptionsPurge = dropOptionsClass.getField("purgeData")
private lazy val dropPartitionMethod =
findMethod(
classOf[Hive],
"dropPartition",
classOf[String],
classOf[String],
classOf[JList[String]],
dropOptionsClass)
override def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit = {
loadDynamicPartitionsMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
numDP: JInteger, holdDDLTime, listBucketingEnabled: JBoolean, isAcid,
txnIdInLoadDynamicPartitions)
}
override def dropPartition(
hive: Hive,
dbName: String,
tableName: String,
part: JList[String],
deleteData: Boolean,
purge: Boolean): Unit = {
val dropOptions = dropOptionsClass.getConstructor().newInstance().asInstanceOf[Object]
dropOptionsDeleteData.setBoolean(dropOptions, deleteData)
dropOptionsPurge.setBoolean(dropOptions, purge)
dropPartitionMethod.invoke(hive, dbName, tableName, part, dropOptions)
}
}
private[client] class Shim_v2_0 extends Shim_v1_2 {
private lazy val loadPartitionMethod =
findMethod(
classOf[Hive],
"loadPartition",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadTableMethod =
findMethod(
classOf[Hive],
"loadTable",
classOf[Path],
classOf[String],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadDynamicPartitionsMethod =
findMethod(
classOf[Hive],
"loadDynamicPartitions",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JInteger.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JLong.TYPE)
override def loadPartition(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
inheritTableSpecs: Boolean,
isSkewedStoreAsSubdir: Boolean,
isSrcLocal: Boolean): Unit = {
loadPartitionMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
inheritTableSpecs: JBoolean, isSkewedStoreAsSubdir: JBoolean,
isSrcLocal: JBoolean, isAcid)
}
override def loadTable(
hive: Hive,
loadPath: Path,
tableName: String,
replace: Boolean,
isSrcLocal: Boolean): Unit = {
loadTableMethod.invoke(hive, loadPath, tableName, replace: JBoolean, isSrcLocal: JBoolean,
isSkewedStoreAsSubdir, isAcid)
}
override def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit = {
loadDynamicPartitionsMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
numDP: JInteger, listBucketingEnabled: JBoolean, isAcid, txnIdInLoadDynamicPartitions)
}
}
private[client] class Shim_v2_1 extends Shim_v2_0 {
// true if there is any following stats task
protected lazy val hasFollowingStatsTask = JBoolean.FALSE
// TODO: Now, always set environmentContext to null. In the future, we should avoid setting
// hive-generated stats to -1 when altering tables by using environmentContext. See Hive-12730
protected lazy val environmentContextInAlterTable = null
private lazy val loadPartitionMethod =
findMethod(
classOf[Hive],
"loadPartition",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadTableMethod =
findMethod(
classOf[Hive],
"loadTable",
classOf[Path],
classOf[String],
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE)
private lazy val loadDynamicPartitionsMethod =
findMethod(
classOf[Hive],
"loadDynamicPartitions",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
JBoolean.TYPE,
JInteger.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JLong.TYPE,
JBoolean.TYPE,
classOf[AcidUtils.Operation])
private lazy val alterTableMethod =
findMethod(
classOf[Hive],
"alterTable",
classOf[String],
classOf[Table],
classOf[EnvironmentContext])
private lazy val alterPartitionsMethod =
findMethod(
classOf[Hive],
"alterPartitions",
classOf[String],
classOf[JList[Partition]],
classOf[EnvironmentContext])
override def loadPartition(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
inheritTableSpecs: Boolean,
isSkewedStoreAsSubdir: Boolean,
isSrcLocal: Boolean): Unit = {
loadPartitionMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
inheritTableSpecs: JBoolean, isSkewedStoreAsSubdir: JBoolean,
isSrcLocal: JBoolean, isAcid, hasFollowingStatsTask)
}
override def loadTable(
hive: Hive,
loadPath: Path,
tableName: String,
replace: Boolean,
isSrcLocal: Boolean): Unit = {
loadTableMethod.invoke(hive, loadPath, tableName, replace: JBoolean, isSrcLocal: JBoolean,
isSkewedStoreAsSubdir, isAcid, hasFollowingStatsTask)
}
override def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit = {
loadDynamicPartitionsMethod.invoke(hive, loadPath, tableName, partSpec, replace: JBoolean,
numDP: JInteger, listBucketingEnabled: JBoolean, isAcid, txnIdInLoadDynamicPartitions,
hasFollowingStatsTask, AcidUtils.Operation.NOT_ACID)
}
override def alterTable(hive: Hive, tableName: String, table: Table): Unit = {
alterTableMethod.invoke(hive, tableName, table, environmentContextInAlterTable)
}
override def alterPartitions(hive: Hive, tableName: String, newParts: JList[Partition]): Unit = {
alterPartitionsMethod.invoke(hive, tableName, newParts, environmentContextInAlterTable)
}
}
private[client] class Shim_v2_2 extends Shim_v2_1
private[client] class Shim_v2_3 extends Shim_v2_1 {
private lazy val getTablesByTypeMethod =
findMethod(
classOf[Hive],
"getTablesByType",
classOf[String],
classOf[String],
classOf[TableType])
override def getTablesByType(
hive: Hive,
dbName: String,
pattern: String,
tableType: TableType): Seq[String] = {
getTablesByTypeMethod.invoke(hive, dbName, pattern, tableType)
.asInstanceOf[JList[String]].asScala.toSeq
}
}
private[client] class Shim_v3_0 extends Shim_v2_3 {
// Spark supports only non-ACID operations
protected lazy val isAcidIUDoperation = JBoolean.FALSE
// Writer ID can be 0 for non-ACID operations
protected lazy val writeIdInLoadTableOrPartition: JLong = 0L
// Statement ID
protected lazy val stmtIdInLoadTableOrPartition: JInteger = 0
protected lazy val listBucketingLevel: JInteger = 0
private lazy val clazzLoadFileType = getClass.getClassLoader.loadClass(
"org.apache.hadoop.hive.ql.plan.LoadTableDesc$LoadFileType")
private lazy val loadPartitionMethod =
findMethod(
classOf[Hive],
"loadPartition",
classOf[Path],
classOf[Table],
classOf[JMap[String, String]],
clazzLoadFileType,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
classOf[JLong],
JInteger.TYPE,
JBoolean.TYPE)
private lazy val loadTableMethod =
findMethod(
classOf[Hive],
"loadTable",
classOf[Path],
classOf[String],
clazzLoadFileType,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
JBoolean.TYPE,
classOf[JLong],
JInteger.TYPE,
JBoolean.TYPE)
private lazy val loadDynamicPartitionsMethod =
findMethod(
classOf[Hive],
"loadDynamicPartitions",
classOf[Path],
classOf[String],
classOf[JMap[String, String]],
clazzLoadFileType,
JInteger.TYPE,
JInteger.TYPE,
JBoolean.TYPE,
JLong.TYPE,
JInteger.TYPE,
JBoolean.TYPE,
classOf[AcidUtils.Operation],
JBoolean.TYPE)
override def loadPartition(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
inheritTableSpecs: Boolean,
isSkewedStoreAsSubdir: Boolean,
isSrcLocal: Boolean): Unit = {
val table = hive.getTable(tableName)
val loadFileType = if (replace) {
clazzLoadFileType.getEnumConstants.find(_.toString.equalsIgnoreCase("REPLACE_ALL"))
} else {
clazzLoadFileType.getEnumConstants.find(_.toString.equalsIgnoreCase("KEEP_EXISTING"))
}
assert(loadFileType.isDefined)
loadPartitionMethod.invoke(hive, loadPath, table, partSpec, loadFileType.get,
inheritTableSpecs: JBoolean, isSkewedStoreAsSubdir: JBoolean,
isSrcLocal: JBoolean, isAcid, hasFollowingStatsTask,
writeIdInLoadTableOrPartition, stmtIdInLoadTableOrPartition, replace: JBoolean)
}
override def loadTable(
hive: Hive,
loadPath: Path,
tableName: String,
replace: Boolean,
isSrcLocal: Boolean): Unit = {
val loadFileType = if (replace) {
clazzLoadFileType.getEnumConstants.find(_.toString.equalsIgnoreCase("REPLACE_ALL"))
} else {
clazzLoadFileType.getEnumConstants.find(_.toString.equalsIgnoreCase("KEEP_EXISTING"))
}
assert(loadFileType.isDefined)
loadTableMethod.invoke(hive, loadPath, tableName, loadFileType.get, isSrcLocal: JBoolean,
isSkewedStoreAsSubdir, isAcidIUDoperation, hasFollowingStatsTask,
writeIdInLoadTableOrPartition, stmtIdInLoadTableOrPartition: JInteger, replace: JBoolean)
}
override def loadDynamicPartitions(
hive: Hive,
loadPath: Path,
tableName: String,
partSpec: JMap[String, String],
replace: Boolean,
numDP: Int,
listBucketingEnabled: Boolean): Unit = {
val loadFileType = if (replace) {
clazzLoadFileType.getEnumConstants.find(_.toString.equalsIgnoreCase("REPLACE_ALL"))
} else {
clazzLoadFileType.getEnumConstants.find(_.toString.equalsIgnoreCase("KEEP_EXISTING"))
}
assert(loadFileType.isDefined)
loadDynamicPartitionsMethod.invoke(hive, loadPath, tableName, partSpec, loadFileType.get,
numDP: JInteger, listBucketingLevel, isAcid, writeIdInLoadTableOrPartition,
stmtIdInLoadTableOrPartition, hasFollowingStatsTask, AcidUtils.Operation.NOT_ACID,
replace: JBoolean)
}
}
private[client] class Shim_v3_1 extends Shim_v3_0
| chuckchen/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/client/HiveShim.scala | Scala | apache-2.0 | 49,441 |
/*
* Copyright 2016 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.regex
import laws.discipline._, arbitrary._
class CharDecoderTests extends DisciplineSuite {
checkAll("GroupDecoder[Char]", GroupDecoderTests[Char].decoder[Int, Int])
checkAll("MatchDecoder[Char]", MatchDecoderTests[Char].decoder[Int, Int])
}
| nrinaudo/kantan.regex | core/shared/src/test/scala/kantan/regex/CharDecoderTests.scala | Scala | apache-2.0 | 866 |
package cc.factorie.app.nlp.pos
import java.io._
import java.util.{HashSet, HashMap}
import cc.factorie.app.chain.Observations._
import cc.factorie.app.nlp.{Document, Sentence, Token}
import cc.factorie.util.{ClasspathURL, BinarySerializer}
/**
* Created by Oskar Singer on 10/6/14.
*/
class CtbChainPosTagger extends ChainPosTagger((t:Token) => new CtbPosTag(t, 0)) {
private var prefixMap = new HashMap[Char, HashSet[String]]
private var suffixMap = new HashMap[Char, HashSet[String]]
def this(url: java.net.URL) = {
this()
deserialize(url.openConnection().getInputStream)
}
override def train(trainSentences:Seq[Sentence],
testSentences:Seq[Sentence],
lrate:Double = 0.1,
decay:Double = 0.01,
cutoff:Int = 2,
doBootstrap:Boolean = true,
useHingeLoss:Boolean = false,
numIterations: Int = 5,
l1Factor:Double = 0.000001,
l2Factor:Double = 0.000001)(implicit random: scala.util.Random): Unit = {
initPrefixAndSuffixMaps(trainSentences.flatMap(_.tokens))
super.train(trainSentences, testSentences, lrate, decay, cutoff, doBootstrap, useHingeLoss, numIterations, l1Factor, l2Factor)
}
def initPOSFeatures(sentence: Sentence): Unit = {
import cc.factorie.app.chineseStrings._
for (token <- sentence.tokens) {
if(token.attr[PosFeatures] ne null)
token.attr.remove[PosFeatures]
val features = token.attr += new PosFeatures(token)
val rawWord = token.string
val prefix = rawWord(0)
val suffix = rawWord(rawWord.size - 1)
features += "W="+rawWord
(0 to 4).foreach {
i =>
features += "SUFFIX" + i + "=" + rawWord.takeRight(i)
features += "PREFIX" + i + "=" + rawWord.take(i)
}
if(prefixMap.containsKey(prefix)) {
val prefixLabelSet = prefixMap.get(prefix)
val prefixCTBMorph = posDomain.categories.map{
category =>
val hasCategory = {
if(prefixLabelSet.contains(category))
"TRUE"
else
"FALSE"
}
"PRE_" + category + "_" + hasCategory
}
features ++= prefixCTBMorph
}
if(suffixMap.containsKey(suffix)) {
val suffixLabelSet = suffixMap.get(suffix)
val suffixCTBMorph = posDomain.categories.map{
category =>
val hasCategory = {
if(suffixLabelSet.contains(category))
"TRUE"
else
"FALSE"
}
"SUF_" + category + "_" + hasCategory
}
features ++= suffixCTBMorph
}
if (hasPunctuation(rawWord)) features += "PUNCTUATION"
/*
if (hasNumeric(rawWord)) features += "NUMERIC"
if (hasChineseNumeric(rawWord)) features += "CHINESE_NUMERIC"
if (hasAlpha(rawWord)) features += "ALPHA"
*/
}
addNeighboringFeatureConjunctions(sentence.tokens,
(t: Token) => t.attr[PosFeatures],
"W=[^@]*$",
List(-2),
List(-1),
List(1),
List(-2,-1),
List(-1,0))
}
def initPrefixAndSuffixMaps(tokens: Seq[Token]): Unit = {
prefixMap.clear()
suffixMap.clear()
tokens.map(
token => (token.string, token.attr[LabeledCtbPosTag].categoryValue)
).foreach{
case (word, label) =>
val prefix = word(0)
val suffix = word(word.size - 1)
val prefixLabelSet = prefixMap.get(prefix)
if(prefixLabelSet != null) {
if(!prefixLabelSet.contains(label)) {
prefixLabelSet.add(label)
}
} else {
val labelSet = new HashSet[String]
labelSet.add(label)
prefixMap.put(prefix, labelSet)
}
val suffixLabelSet = suffixMap.get(suffix)
if(suffixLabelSet != null) {
if(!suffixLabelSet.contains(label)) {
suffixLabelSet.add(label)
}
} else {
val labelSet = new HashSet[String]
labelSet.add(label)
suffixMap.put(suffix, labelSet)
}
}
println("PREFIX MAP SIZE: " + prefixMap.size())
println("SUFFIX MAP SIZE: " + suffixMap.size())
}
override def serialize(stream: OutputStream) {
import cc.factorie.util.CubbieConversions._
val dstream = new DataOutputStream(new BufferedOutputStream(stream))
val out = new ObjectOutputStream(dstream)
out.writeObject(prefixMap)
out.writeObject(suffixMap)
BinarySerializer.serialize(PosFeaturesDomain.dimensionDomain, dstream)
BinarySerializer.serialize(model, dstream)
dstream.close()
out.close()
}
override def deserialize(stream: InputStream) {
import cc.factorie.util.CubbieConversions._
val dstream = new DataInputStream(new BufferedInputStream(stream))
val in = new ObjectInputStream(dstream)
prefixMap = in.readObject().asInstanceOf[HashMap[Char, HashSet[String]]]
suffixMap = in.readObject().asInstanceOf[HashMap[Char, HashSet[String]]]
BinarySerializer.deserialize(PosFeaturesDomain.dimensionDomain, dstream)
BinarySerializer.deserialize(model, dstream)
dstream.close()
in.close()
}
}
object CtbChainPosTagger extends CtbChainPosTagger(ClasspathURL[CtbChainPosTagger](".factorie"))
object CtbChainPosTrainer extends ChainPosTrainer[CtbPosTag, CtbChainPosTagger](
() => new CtbChainPosTagger(),
(dirName: String) => {
val directory = new File(dirName)
val documents =
(for{
file <- directory.listFiles
if file.isFile
document = new Document
line <- scala.io.Source.fromFile(file, "utf-8").getLines
if line.size > 0 && line(0) != '<'
sentence = new Sentence(document)
(word, label) <- line.split(' ').map( pair => {val (word, label) = pair.splitAt(pair.lastIndexOf('_')); (word, label.slice(1,label.size))} )
token = new Token(sentence, word)
labeledTag = token.attr += new LabeledCtbPosTag(token, label)
} yield document
).toIndexedSeq.distinct
documents
}
) | hlin117/factorie | src/main/scala/cc/factorie/app/nlp/pos/CtbChainPosTagger.scala | Scala | apache-2.0 | 6,206 |
import scala.reflect.macros.blackbox.Context
object Impls {
def foo(c: Context)(xs: c.Expr[Int]*) = {
import c.universe._
val stripped_xs = xs map (_.tree) toList match {
case List(Typed(stripped, Ident(wildstar))) if wildstar == typeNames.WILDCARD_STAR => List(stripped)
case _ => ???
}
val body = Apply(Select(Ident(definitions.PredefModule), TermName("println")), stripped_xs)
c.Expr[Unit](body)
}
} | yusuke2255/dotty | tests/disabled/macro/run/macro-expand-varargs-explicit-over-nonvarargs-good/Impls_1.scala | Scala | bsd-3-clause | 439 |
package gameover.fwk.ai
import com.badlogic.gdx.math.{Rectangle, Vector2}
import com.badlogic.gdx.utils.{Array => GdxArray}
trait CollisionDetector {
/**
* Check an area status
*/
def checkPosition(area: Rectangle): CollisionState.Value
/**
* Check a position status
*/
def checkPosition(x: Float, y: Float) : CollisionState.Value
/**
* This method check if area is intersecting something it shouldn't and return an array of
* Rectangle objects where the collision append. The velocity of the sprite is updated accordingly.
*/
def checkCollision(area: Rectangle, movingSpriteVelocity: Vector2, onlyBlocking: Boolean): GdxArray[Rectangle]
/**
* Check if a point has a direct view to a target point. Only blocking can block this view.
*/
def hasDirectView(x: Float, y: Float, targetX: Float, targetY: Float): Boolean
/**
* Check if a sprite defined by its area has a direct view to a target point. Only blocking can block this view.
*/
def hasDirectView(visionArea: Rectangle, targetX: Float, targetY: Float): Boolean
/**
* Check if a sprite defined by its area has a direct view to a target area. Only blocking can block this view.
*/
def hasDirectView(visionArea: Rectangle, targetArea: Rectangle): Boolean
/**
* Check if a sprite can move straight to a point.
*/
def canMoveStraightToPoint(area: Rectangle, targetX: Float, targetY: Float): Boolean
/**
* Check area vs the collision map to see if something collides.
*/
def checkCollision(area: Rectangle, onlyBlocking: Boolean) : Boolean = {
val state: CollisionState.Value = checkPosition(area)
(onlyBlocking && state == CollisionState.Blocking) || (!onlyBlocking && state != CollisionState.Empty)
}
/**
* Check a point vs the collision map to see if something collides.
*/
def checkCollision(x: Float, y: Float, onlyBlocking: Boolean) : Boolean = {
val state: CollisionState.Value = checkPosition(x, y)
(onlyBlocking && state == CollisionState.Blocking) || (!onlyBlocking && state != CollisionState.Empty)
}
}
object CollisionState extends Enumeration {
val Blocking, Void, Empty = Value
}
| PixelDuck/gameover-game-framework | src/main/scala/gameover/fwk/ai/CollisionDetector.scala | Scala | mit | 2,181 |
package org.mandrake.simulation
import scala.reflect.ClassTag
case class SimpleAggregator[T <: Event : ClassTag]() extends StateInputAggregator {
override def aggregate(events: Vector[Event]): StateInputAggregator.Result =
events.iterator.collect {
case event: T => event
}.find(_ => true).fold[StateInputAggregator.Result](Left(this))(Right(_))
}
abstract class SimpleInputState[T <: Event : ClassTag] extends State {
final override def aggregator: StateInputAggregator = SimpleAggregator[T]()
final override def apply(event: StateInput): StateOutput = apply(event.asInstanceOf[T])
def apply(event: T): StateOutput
}
| louis-mon/mandrake | src/main/scala/org/mandrake/simulation/SimpleAggregator.scala | Scala | mit | 645 |
import helper.WithServer
import java.io.File
import net.azalea.curl.HTTP
import org.apache.http.entity.mime.content.ContentBody
import org.apache.http.entity.StringEntity
import org.scalatra.ScalatraServlet
import org.scalatra.servlet.{MultipartConfig, FileUploadSupport}
import org.specs2.mutable.Specification
import scala.io.Source
class PostServerServlet extends ScalatraServlet with FileUploadSupport {
post("/") {
"Hello POST!"
}
post("/reflect") {
request.body
}
post("/form") {
params.map(v => s"${v._1} : ${v._2}").mkString("\\n")
}
post("/multipart") {
val string = params.toSeq.sortBy(_._1).map(v => s"${v._1} : ${v._2}").mkString("\\n")
val file = fileParams("file")
string + "\\n" + s"${file.getFieldName} : ${file.getName}"
}
}
class HTTPPostSpec extends Specification {
sequential
class SimpleMockServer extends WithServer {
def servlet = classOf[PostServerServlet]
holder.getRegistration.setMultipartConfig(
MultipartConfig(
maxFileSize = Some(3*1024*1024),
fileSizeThreshold = Some(1*1024*1024)
).toMultipartConfigElement
)
}
"post" should {
"can access remote with POST" in new SimpleMockServer {
val respond = HTTP.post("http://localhost:9100/", new StringEntity("SampleMessage"))
respond.status mustEqual 200
respond.bodyAsString() mustEqual "Hello POST!"
}
"can send entity body" in new SimpleMockServer {
val respond = HTTP.post("http://localhost:9100/reflect", new StringEntity("SampleMessage"))
respond.status mustEqual 200
respond.bodyAsString() mustEqual "SampleMessage"
}
}
val file = new File(getClass.getResource("sample.txt").getPath())
val fileText = {
val source = Source.fromFile(file)
val all = source.getLines().mkString("\\n")
source.close()
all
}
"Using HTTPHelper DSL" should {
import net.azalea.curl.HTTPHelper._
"""String content (using toEntity)""" in new SimpleMockServer {
val respond = HTTP.post("http://localhost:9100/reflect", "SampleMessage".toBody())
respond.status mustEqual 200
respond.bodyAsString() mustEqual "SampleMessage"
}
"""String content (implicit conversion)""" in new SimpleMockServer {
val respond = HTTP.post("http://localhost:9100/reflect", "SampleMessage")
respond.status mustEqual 200
respond.bodyAsString() mustEqual "SampleMessage"
}
"""File content""" in new SimpleMockServer {
val respond = HTTP.post("http://localhost:9100/reflect", file.toEntity())
respond.status mustEqual 200
respond.bodyAsString() mustEqual fileText
}
"form content" in new SimpleMockServer {
val respond = HTTP.post("http://localhost:9100/form", Map(
"param1" -> "value1",
"param2" -> "value2"
))
respond.status mustEqual 200
respond.bodyAsString() mustEqual "param1 : value1\\nparam2 : value2"
}
"multipart" in new SimpleMockServer {
val respond = HTTP.post("http://localhost:9100/multipart", Map[String, ContentBody](
"param1" -> "value1".toField(),
"param2" -> "value2".toField(),
"file" -> file.toField()
))
respond.status mustEqual 200
respond.bodyAsString() mustEqual "param1 : value1\\nparam2 : value2\\nfile : sample.txt"
}
}
}
| Sunao-Yoshii/scala_curl | src/test/scala/HTTPPostSpec.scala | Scala | apache-2.0 | 3,347 |
//
// Codex - a multi-language code indexer and grokker
// http://github.com/samskivert/codex
package codex.extract
import org.junit.Assert._
import org.junit._
class ClikeTest {
import ExtractorTest._
@Test def testSomeJava {
val out = test(new ClikeExtractor("java"), """
package com.test
public class Foo {
public class Bar {
public void baz () {}
public int BAZ = 1;
}
public interface Bippy {
void bangle ();
}
public void fiddle () {
}
}
""")
assertEquals("""
CU test
ENTER package com.test 1
EXIT com.test
ENTER class Foo 3
ENTER class Bar 4
EXIT Bar
ENTER interface Bippy 8
EXIT Bippy
EXIT Foo
""".substring(1), out)
}
@Test def testSomeScala {
val out = test(new ClikeExtractor("scala"), """
package com.test
object Foo {
class Bar {
def baz () {}
val BAZ = 1
}
trait Bippy {
def bangle ()
}
def fiddle (foo :Int, bar :Int) = monkey
def faddle (one :Int, two :String) = {
def nested1 (thing :Bippy) = ...
def nested2 (thing :Bippy) = {}
}
}
def outer (thing :Bippy) = ...
""")
assertEquals("""
CU test
ENTER package com.test 1
ENTER object Foo 3
ENTER class Bar 4
ENTER def baz 5
EXIT baz
EXIT Bar
ENTER trait Bippy 8
ENTER def bangle 9
EXIT bangle
EXIT Bippy
ENTER def fiddle 11
EXIT fiddle
ENTER def faddle 12
ENTER def nested1 13
EXIT nested1
ENTER def nested2 14
EXIT nested2
EXIT faddle
EXIT Foo
ENTER def outer 18
EXIT outer
""".substring(1), out)
}
}
| samskivert/codex | src/test/scala/codex/extract/ClikeTest.scala | Scala | bsd-3-clause | 1,507 |
/*
* Licensed to Cloudera, Inc. under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Cloudera, Inc. licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.cloudera.hue.livy.server.interactive
import java.net.{ConnectException, URL}
import java.util.concurrent.TimeUnit
import com.cloudera.hue.livy._
import com.cloudera.hue.livy.msgs.ExecuteRequest
import com.cloudera.hue.livy.sessions._
import dispatch._
import org.json4s.JsonAST.JNull
import org.json4s.jackson.Serialization.write
import org.json4s.{DefaultFormats, Formats, JValue}
import scala.annotation.tailrec
import scala.concurrent.duration.Duration
import scala.concurrent.{Future, _}
import scala.util
abstract class InteractiveWebSession(val id: Int, createInteractiveRequest: CreateInteractiveRequest) extends InteractiveSession with Logging {
protected implicit def executor: ExecutionContextExecutor = ExecutionContext.global
protected implicit def jsonFormats: Formats = DefaultFormats
protected[this] var _state: State = Starting()
private[this] var _lastActivity = Long.MaxValue
private[this] var _url: Option[URL] = None
private[this] var _executedStatements = 0
private[this] var _statements = IndexedSeq[Statement]()
override def kind = createInteractiveRequest.kind
override def proxyUser = createInteractiveRequest.proxyUser
override def url: Option[URL] = _url
override def url_=(url: URL) = {
ensureState(Starting(), {
_state = Idle()
_url = Some(url)
})
}
private def svc = {
val url = _url.head
dispatch.url(url.toString)
}
override def lastActivity: Option[Long] = Some(_lastActivity)
override def state: State = _state
override def executeStatement(content: ExecuteRequest): Statement = {
ensureIdle {
_state = Busy()
touchLastActivity()
val req = (svc / "execute").setContentType("application/json", "UTF-8") << write(content)
val future = Http(req OK as.json4s.Json).map { case resp: JValue =>
resp \ "result" match {
case JNull =>
// The result isn't ready yet. Loop until it is.
val id = (resp \ "id").extract[Int]
waitForStatement(id)
case result =>
transition(Idle())
result
}
}
val statement = new Statement(_executedStatements, content, future)
_executedStatements += 1
_statements = _statements :+ statement
statement
}
}
@tailrec
private def waitForStatement(id: Int): JValue = {
val req = (svc / "history" / id).setContentType("application/json", "UTF-8")
val resp = Await.result(Http(req OK as.json4s.Json), Duration.Inf)
resp \ "result" match {
case JNull =>
Thread.sleep(1000)
waitForStatement(id)
case result =>
transition(Idle())
result
}
}
override def statements: IndexedSeq[Statement] = _statements
override def interrupt(): Future[Unit] = {
stop()
}
override def stop(): Future[Unit] = {
synchronized {
_state match {
case Idle() =>
_state = Busy()
Http(svc.DELETE OK as.String).either() match {
case (Right(_) | Left(_: ConnectException)) =>
// Make sure to eat any connection errors because the repl shut down before it sent
// out an OK.
synchronized {
_state = Dead()
}
Future.successful(())
case Left(t: Throwable) =>
Future.failed(t)
}
case NotStarted() =>
Future {
waitForStateChange(NotStarted(), Duration(10, TimeUnit.SECONDS))
stop()
}
case Starting() =>
Future {
waitForStateChange(Starting(), Duration(10, TimeUnit.SECONDS))
stop()
}
case Busy() | Running() =>
Future {
waitForStateChange(Busy(), Duration(10, TimeUnit.SECONDS))
stop()
}
case ShuttingDown() =>
Future {
waitForStateChange(ShuttingDown(), Duration(10, TimeUnit.SECONDS))
stop()
}
case Error() | Dead() | Success() =>
Future.successful(Unit)
}
}
}
private def transition(state: State) = synchronized {
_state = state
}
private def touchLastActivity() = {
_lastActivity = System.currentTimeMillis()
}
private def ensureState[A](state: State, f: => A) = {
synchronized {
if (_state == state) {
f
} else {
throw new IllegalStateException("Session is in state %s" format _state)
}
}
}
private def ensureIdle[A](f: => A) = {
ensureState(Idle(), f)
}
private def ensureRunning[A](f: => A) = {
synchronized {
_state match {
case Idle() | Busy() =>
f
case _ =>
throw new IllegalStateException("Session is in state %s" format _state)
}
}
}
}
| azureplus/hue | apps/spark/java/livy-server/src/main/scala/com/cloudera/hue/livy/server/interactive/InteractiveWebSession.scala | Scala | apache-2.0 | 5,634 |
import collection._
import collection.concurrent.TrieMap
object IteratorSpec extends Spec {
def test() {
"work for an empty trie" in {
val ct = new TrieMap
val it = ct.iterator
it.hasNext shouldEqual (false)
evaluating { it.next() }.shouldProduce [NoSuchElementException]
}
def nonEmptyIteratorCheck(sz: Int) {
val ct = new TrieMap[Wrap, Int]
for (i <- 0 until sz) ct.put(new Wrap(i), i)
val it = ct.iterator
val tracker = mutable.Map[Wrap, Int]()
for (i <- 0 until sz) {
assert(it.hasNext == true)
tracker += it.next
}
it.hasNext shouldEqual (false)
evaluating { it.next() }.shouldProduce [NoSuchElementException]
tracker.size shouldEqual (sz)
tracker shouldEqual (ct)
}
"work for a 1 element trie" in {
nonEmptyIteratorCheck(1)
}
"work for a 2 element trie" in {
nonEmptyIteratorCheck(2)
}
"work for a 3 element trie" in {
nonEmptyIteratorCheck(3)
}
"work for a 5 element trie" in {
nonEmptyIteratorCheck(5)
}
"work for a 10 element trie" in {
nonEmptyIteratorCheck(10)
}
"work for a 20 element trie" in {
nonEmptyIteratorCheck(20)
}
"work for a 50 element trie" in {
nonEmptyIteratorCheck(50)
}
"work for a 100 element trie" in {
nonEmptyIteratorCheck(100)
}
"work for a 1k element trie" in {
nonEmptyIteratorCheck(1000)
}
"work for a 5k element trie" in {
nonEmptyIteratorCheck(5000)
}
"work for a 75k element trie" in {
nonEmptyIteratorCheck(75000)
}
"work for a 250k element trie" in {
nonEmptyIteratorCheck(500000)
}
def nonEmptyCollideCheck(sz: Int) {
val ct = new TrieMap[DumbHash, Int]
for (i <- 0 until sz) ct.put(new DumbHash(i), i)
val it = ct.iterator
val tracker = mutable.Map[DumbHash, Int]()
for (i <- 0 until sz) {
assert(it.hasNext == true)
tracker += it.next
}
it.hasNext shouldEqual (false)
evaluating { it.next() }.shouldProduce [NoSuchElementException]
tracker.size shouldEqual (sz)
tracker shouldEqual (ct)
}
"work for colliding hashcodes, 2 element trie" in {
nonEmptyCollideCheck(2)
}
"work for colliding hashcodes, 3 element trie" in {
nonEmptyCollideCheck(3)
}
"work for colliding hashcodes, 5 element trie" in {
nonEmptyCollideCheck(5)
}
"work for colliding hashcodes, 10 element trie" in {
nonEmptyCollideCheck(10)
}
"work for colliding hashcodes, 100 element trie" in {
nonEmptyCollideCheck(100)
}
"work for colliding hashcodes, 500 element trie" in {
nonEmptyCollideCheck(500)
}
"work for colliding hashcodes, 5k element trie" in {
nonEmptyCollideCheck(5000)
}
def assertEqual(a: Map[Wrap, Int], b: Map[Wrap, Int]) {
if (a != b) {
println(a.size + " vs " + b.size)
}
assert(a == b)
}
"be consistent when taken with concurrent modifications" in {
val sz = 25000
val W = 15
val S = 5
val checks = 5
val ct = new TrieMap[Wrap, Int]
for (i <- 0 until sz) ct.put(new Wrap(i), i)
class Modifier extends Thread {
override def run() {
for (i <- 0 until sz) ct.putIfAbsent(new Wrap(i), i) match {
case Some(_) => ct.remove(new Wrap(i))
case None =>
}
}
}
def consistentIteration(ct: TrieMap[Wrap, Int], checks: Int) {
class Iter extends Thread {
override def run() {
val snap = ct.readOnlySnapshot()
val initial = mutable.Map[Wrap, Int]()
for (kv <- snap) initial += kv
for (i <- 0 until checks) {
assertEqual(snap.iterator.toMap, initial)
}
}
}
val iter = new Iter
iter.start()
iter.join()
}
val threads = for (_ <- 0 until W) yield new Modifier
threads.foreach(_.start())
for (_ <- 0 until S) consistentIteration(ct, checks)
threads.foreach(_.join())
}
"be consistent with a concurrent removal with a well defined order" in {
val sz = 150000
val sgroupsize = 10
val sgroupnum = 5
val removerslowdown = 50
val ct = new TrieMap[Wrap, Int]
for (i <- 0 until sz) ct.put(new Wrap(i), i)
class Remover extends Thread {
override def run() {
for (i <- 0 until sz) {
assert(ct.remove(new Wrap(i)) == Some(i))
for (i <- 0 until removerslowdown) ct.get(new Wrap(i)) // slow down, mate
}
}
}
def consistentIteration(it: Iterator[(Wrap, Int)]) = {
class Iter extends Thread {
override def run() {
val elems = it.toBuffer
if (elems.nonEmpty) {
val minelem = elems.minBy((x: (Wrap, Int)) => x._1.i)._1.i
assert(elems.forall(_._1.i >= minelem))
}
}
}
new Iter
}
val remover = new Remover
remover.start()
for (_ <- 0 until sgroupnum) {
val iters = for (_ <- 0 until sgroupsize) yield consistentIteration(ct.iterator)
iters.foreach(_.start())
iters.foreach(_.join())
}
remover.join()
}
"be consistent with a concurrent insertion with a well defined order" in {
val sz = 150000
val sgroupsize = 10
val sgroupnum = 10
val inserterslowdown = 50
val ct = new TrieMap[Wrap, Int]
class Inserter extends Thread {
override def run() {
for (i <- 0 until sz) {
assert(ct.put(new Wrap(i), i) == None)
for (i <- 0 until inserterslowdown) ct.get(new Wrap(i)) // slow down, mate
}
}
}
def consistentIteration(it: Iterator[(Wrap, Int)]) = {
class Iter extends Thread {
override def run() {
val elems = it.toSeq
if (elems.nonEmpty) {
val maxelem = elems.maxBy((x: (Wrap, Int)) => x._1.i)._1.i
assert(elems.forall(_._1.i <= maxelem))
}
}
}
new Iter
}
val inserter = new Inserter
inserter.start()
for (_ <- 0 until sgroupnum) {
val iters = for (_ <- 0 until sgroupsize) yield consistentIteration(ct.iterator)
iters.foreach(_.start())
iters.foreach(_.join())
}
inserter.join()
}
"work on a yet unevaluated snapshot" in {
val sz = 50000
val ct = new TrieMap[Wrap, Int]
for (i <- 0 until sz) ct.update(new Wrap(i), i)
val snap = ct.snapshot()
val it = snap.iterator
while (it.hasNext) it.next()
}
"be duplicated" in {
val sz = 50
val ct = collection.parallel.mutable.ParTrieMap((0 until sz) zip (0 until sz): _*)
val it = ct.splitter
for (_ <- 0 until (sz / 2)) it.next()
val dupit = it.dup
it.toList shouldEqual dupit.toList
}
}
}
| felixmulder/scala | test/files/run/ctries-new/iterator.scala | Scala | bsd-3-clause | 7,120 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.yourtaxcalculator.services
import org.scalatest.concurrent.ScalaFutures
import uk.gov.hmrc.play.test.{UnitSpec, WithFakeApplication}
import scala.concurrent.ExecutionContext.Implicits.global
class VersionCheckServiceSpec extends UnitSpec with WithFakeApplication with ScalaFutures {
"VersionCheckService preFlightCheck" should {
"return a new journeyId given a version that does not require an upgrade and no journeyId is provided" in new TestVersionCheckServiceNoUpgradeRequired {
val result = service.preFlightCheck(request).futureValue
result.upgradeRequired shouldBe false
result.journeyId.isDefined shouldBe true
result.journeyId shouldNot be (existingJourneyId)
}
"return the journeyId provided given a version that does not require an upgrade" in new TestVersionCheckServiceNoUpgradeRequired {
val result = service.preFlightCheck(request, existingJourneyId).futureValue
result.upgradeRequired shouldBe false
result.journeyId shouldBe existingJourneyId
}
"not return a new journeyId given a version that does require an upgrade" in new TestVersionCheckServiceUpgradeRequired {
val result = service.preFlightCheck(request).futureValue
result.upgradeRequired shouldBe true
result.journeyId shouldBe None
}
}
}
| HerbiePorter/your-tax-calculator-frontend | test/services/VersionCheckServiceSpec.scala | Scala | apache-2.0 | 1,937 |
package playground.typelevel
import shapeless.test.illTyped
object BoolTypeSpecs {
implicitly[TrueType =:= TrueType]
implicitly[FalseType =:= FalseType]
illTyped("implicitly[TrueType =:= FalseType]")
illTyped("implicitly[FalseType =:= TrueType]")
// NOT
implicitly[TrueType#Not =:= FalseType]
implicitly[FalseType#Not =:= TrueType]
illTyped("implicitly[TrueType#Not =:= TrueType]")
illTyped("implicitly[FalseType#Not =:= FalseType]")
// OR
implicitly[TrueType#Or[TrueType] =:= TrueType]
implicitly[TrueType#Or[FalseType] =:= TrueType]
implicitly[FalseType#Or[TrueType] =:= TrueType]
implicitly[FalseType#Or[FalseType] =:= FalseType]
// AND
implicitly[TrueType#And[TrueType] =:= TrueType]
implicitly[TrueType#And[FalseType] =:= FalseType]
implicitly[FalseType#And[TrueType] =:= FalseType]
implicitly[FalseType#And[FalseType] =:= FalseType]
// IMPLICATION
implicitly[TrueType#Imp[TrueType] =:= TrueType]
implicitly[TrueType#Imp[FalseType] =:= FalseType]
implicitly[FalseType#Imp[TrueType] =:= TrueType]
implicitly[FalseType#Imp[FalseType] =:= TrueType]
}
| falconepl/scala-playground | src/test/scala/playground/typelevel/BoolTypeSpecs.scala | Scala | mit | 1,107 |
package com.github.j5ik2o.forseti.adaptor.repository
trait IdConfig {
val timestampBits: Int
val dataCenterIdBits: Int
val workerIdBits: Int
val sequenceBits: Int
val epoch: Long
lazy val maxTimestamp: Long = -1L ^ (-1L << timestampBits)
lazy val maxDataCenterId: Long = -1L ^ (-1L << dataCenterIdBits)
lazy val maxIdWorkerId: Long = -1L ^ (-1L << workerIdBits)
lazy val maxSequence: Long = -1L ^ (-1L << sequenceBits)
lazy val timestampShift: Int = sequenceBits + workerIdBits + dataCenterIdBits
lazy val dataCenterIdShift = sequenceBits + workerIdBits
lazy val workerIdShift: Int = sequenceBits
lazy val timestampMask: Long = -1L ^ (-1L << (timestampBits + timestampShift))
lazy val dataCenterIdMask: Long = -1L ^ (-1L << (dataCenterIdBits + dataCenterIdShift))
lazy val workerIdMask: Long = -1L ^ (-1L << (workerIdBits + workerIdShift))
lazy val sequenceMask: Long = -1L ^ (-1L << sequenceBits)
}
object IdConfig {
def apply(
timestampBits: Int,
dataCenterIdBits: Int,
workerIdBits: Int,
sequenceBits: Int,
epoch: Long
): IdConfig =
Default(timestampBits, dataCenterIdBits, workerIdBits, sequenceBits, epoch)
private case class Default(
timestampBits: Int,
dataCenterIdBits: Int,
workerIdBits: Int,
sequenceBits: Int,
epoch: Long
) extends IdConfig
}
| j5ik2o/forseti | server/server-adaptor-driver/src/main/scala/com/github/j5ik2o/forseti/adaptor/repository/IdConfig.scala | Scala | mit | 1,380 |
package org.skycastle.core.design
import org.skycastle.util.grid.{GridSize, GridBounds, GridPos}
import org.skycastle.util.Vec3i
/**
* A component of a design.
* Has some shape and outside dimensions.
*/
trait Part {
def anchorPos: GridPos
def gridSize: GridSize
def outerBounds: GridBounds
def occupiedCells: Iterator[GridPos]
} | zzorn/skycastle | src/main/scala/org/skycastle/core/design/Part.scala | Scala | gpl-2.0 | 344 |
package mesosphere.marathon
package state
import mesosphere.UnitTest
import mesosphere.marathon.core.instance.TestTaskBuilder
import mesosphere.marathon.core.pod.{BridgeNetwork, ContainerNetwork}
import mesosphere.marathon.core.task.state.NetworkInfo
import mesosphere.marathon.state.Container.PortMapping
import mesosphere.marathon.test.MarathonTestHelper
import scala.collection.immutable.Seq
class AppDefinitionPortAssignmentsTest extends UnitTest {
import MarathonTestHelper.Implicits._
val hostName = "host.some"
"AppDefinitionPortAssignment" should {
"portAssignments with IP-per-task defining ports" in {
Given("An app requesting IP-per-Task and specifying ports in the discovery info")
val app = MarathonTestHelper.makeBasicApp()
.withNoPortDefinitions()
.withDockerNetworks(ContainerNetwork("whatever"))
.withPortMappings(Seq(Container.PortMapping(80, hostPort = Some(0), name = Some("http"), protocol = "tcp")))
Given("A task with an IP address and a port")
val task = {
val t = TestTaskBuilder.Helper.minimalTask(app.id)
t.copy(status = t.status.copy(networkInfo = NetworkInfo(hostName, hostPorts = Seq(1), ipAddresses = Seq(MarathonTestHelper.mesosIpAddress("192.168.0.1")))))
}
When("Getting the ports assignments")
val portAssignments = task.status.networkInfo.portAssignments(app, includeUnresolved = true)
Then("The right port assignment is returned")
portAssignments should equal(Seq(
PortAssignment(
portName = Some("http"),
effectiveIpAddress = Some(hostName),
effectivePort = 1,
hostPort = Some(1),
containerPort = Some(80))
))
}
"portAssignments with IP-per-task defining ports, but a task which doesn't have an IP address yet" in {
Given("An app requesting IP-per-Task and specifying ports in the discovery info")
val app = MarathonTestHelper.makeBasicApp()
.withNoPortDefinitions()
.withDockerNetworks(ContainerNetwork("whatever"))
.withPortMappings(Seq(Container.PortMapping(80, name = Some("http"), protocol = "tcp")))
Given("A task with no IP address nor host ports")
val task = {
val t = TestTaskBuilder.Helper.minimalTask(app.id)
t.copy(status = t.status.copy(networkInfo = NetworkInfo(hostName, hostPorts = Nil, ipAddresses = Nil)))
}
Then("The port assignments are empty")
task.status.networkInfo.portAssignments(app, includeUnresolved = true) should equal(Seq(
PortAssignment(
portName = Some("http"),
effectiveIpAddress = None,
effectivePort = PortAssignment.NoPort,
hostPort = None,
containerPort = Some(80))
))
}
"portAssignments with IP-per-task without ports" in {
Given("An app requesting IP-per-Task and not specifying ports in the discovery info")
val app = MarathonTestHelper.makeBasicApp()
.withNoPortDefinitions()
.withDockerNetworks(ContainerNetwork("whatever")
)
Given("A task with an IP address and no host ports")
val task = {
val t = TestTaskBuilder.Helper.minimalTask(app.id)
t.copy(status = t.status.copy(networkInfo = NetworkInfo(hostName, hostPorts = Nil, ipAddresses = Seq(MarathonTestHelper.mesosIpAddress("192.168.0.1")))))
}
Then("The port assignments are empty")
task.status.networkInfo.portAssignments(app, includeUnresolved = true) should be(empty)
}
"portAssignments, without IP-allocation and BRIDGE mode with a port mapping" in {
Given("An app without IP-per-task, using BRIDGE networking with one port mapping requesting a dynamic port")
val app = MarathonTestHelper.makeBasicApp()
.withNoPortDefinitions()
.withDockerNetworks(BridgeNetwork())
.withPortMappings(Seq(
PortMapping(containerPort = 80, hostPort = Some(0), servicePort = 0, protocol = "tcp",
name = Some("http"))
))
Given("A task without an IP and with a host port")
val task = {
val t = TestTaskBuilder.Helper.minimalTask(app.id)
t.copy(status = t.status.copy(networkInfo = NetworkInfo(hostName, hostPorts = Seq(1), ipAddresses = Nil)))
}
Then("The right port assignment is returned")
val portAssignments = task.status.networkInfo.portAssignments(app, includeUnresolved = true)
portAssignments should be(Seq(
PortAssignment(
portName = Some("http"),
effectiveIpAddress = Option(hostName),
effectivePort = 1,
hostPort = Some(1),
containerPort = Some(80))
))
}
"portAssignments without IP-allocation Docker BRIDGE network and no port mappings" in {
Given("An app using bridge network with no port mappings nor ports")
val app = MarathonTestHelper.makeBasicApp().copy(
container = Some(Container.Docker(
image = "mesosphere/marathon"
)),
portDefinitions = Seq.empty,
networks = Seq(BridgeNetwork()))
Given("A task with a port")
val task = TestTaskBuilder.Helper.minimalTask(app.id)
Then("The port assignments are empty")
task.status.networkInfo.portAssignments(app, includeUnresolved = true) should be(empty)
}
"portAssignments with IP-per-task using Docker USER networking and a port mapping NOT requesting a host port" in {
Given("An app using IP-per-task, USER networking and with a port mapping requesting no ports")
val app = MarathonTestHelper.makeBasicApp()
.withNoPortDefinitions()
.withDockerNetworks(ContainerNetwork("whatever"))
.withPortMappings(Seq(
PortMapping(containerPort = 80, hostPort = None, servicePort = 0, protocol = "tcp", name = Some("http"))
))
Given("A task with an IP and without a host port")
val task = {
val t = TestTaskBuilder.Helper.minimalTask(app.id)
t.copy(status = t.status.copy(networkInfo = NetworkInfo(hostName, hostPorts = Nil, ipAddresses = Seq(MarathonTestHelper.mesosIpAddress("192.168.0.1")))))
}
Then("The right port assignment is returned")
val portAssignments = task.status.networkInfo.portAssignments(app, includeUnresolved = true)
portAssignments should be(Seq(
PortAssignment(
portName = Some("http"),
effectiveIpAddress = Some("192.168.0.1"),
effectivePort = 80,
containerPort = Some(80),
hostPort = None)
))
}
"portAssignments with IP-per-task Docker USER networking and a port mapping requesting a host port" in {
Given("An app using IP-per-task, USER networking and with a port mapping requesting one host port")
val app = MarathonTestHelper.makeBasicApp()
.withNoPortDefinitions()
.withDockerNetworks(ContainerNetwork("whatever"))
.withPortMappings(Seq(
PortMapping(containerPort = 80, hostPort = Some(0), servicePort = 0, protocol = "tcp",
name = Some("http"))
))
Given("A task with IP-per-task and a host port")
val task = {
val t = TestTaskBuilder.Helper.minimalTask(app.id)
t.copy(status = t.status.copy(networkInfo = NetworkInfo(hostName, hostPorts = Seq(30000), ipAddresses = Seq(MarathonTestHelper.mesosIpAddress("192.168.0.1")))))
}
Then("The right port assignment is returned")
val portAssignments = task.status.networkInfo.portAssignments(app, includeUnresolved = true)
portAssignments should be(Seq(
PortAssignment(
portName = Some("http"),
effectiveIpAddress = Some(hostName),
effectivePort = 30000,
containerPort = Some(80),
hostPort = Some(30000))
))
}
"portAssignments with IP-per-task Docker, USER networking, and a mix of port mappings" in {
Given("An app using IP-per-task, USER networking and a mix of port mappings")
val app = MarathonTestHelper.makeBasicApp()
.withNoPortDefinitions()
.withDockerNetworks(ContainerNetwork("whatever"))
.withPortMappings(Seq(
PortMapping(containerPort = 80, hostPort = None, servicePort = 0, protocol = "tcp", name = Some("http")),
PortMapping(containerPort = 443, hostPort = Some(0), servicePort = 0, protocol = "tcp",
name = Some("https"))
))
Given("A task with IP-per-task and a host port")
val task = {
val t = TestTaskBuilder.Helper.minimalTask(app.id)
t.copy(status = t.status.copy(networkInfo = NetworkInfo(hostName, hostPorts = Seq(30000), ipAddresses = Seq(MarathonTestHelper.mesosIpAddress("192.168.0.1")))))
}
Then("The right port assignment is returned")
val portAssignments = task.status.networkInfo.portAssignments(app, includeUnresolved = true)
portAssignments should be(Seq(
PortAssignment(
portName = Some("http"),
effectiveIpAddress = Some("192.168.0.1"),
effectivePort = 80,
containerPort = Some(80),
hostPort = None),
PortAssignment(
portName = Some("https"),
effectiveIpAddress = Some(hostName),
effectivePort = 30000,
containerPort = Some(443),
hostPort = Some(30000))
))
}
"portAssignments with port definitions" in {
Given("An app with port definitions")
val app = MarathonTestHelper.makeBasicApp()
.withPortDefinitions(Seq(PortDefinition(port = 0, protocol = "tcp", name = Some("http"), labels = Map.empty)))
Given("A task with one port")
val task = {
val t = TestTaskBuilder.Helper.minimalTask(app.id)
t.copy(status = t.status.copy(networkInfo = NetworkInfo(hostName, hostPorts = Seq(1), ipAddresses = Nil)))
}
Then("The right port assignment is returned")
val portAssignments = task.status.networkInfo.portAssignments(app, includeUnresolved = true)
portAssignments should be(Seq(
PortAssignment(
portName = Some("http"),
effectiveIpAddress = task.status.networkInfo.effectiveIpAddress(app),
effectivePort = 1,
containerPort = None,
hostPort = Some(1))
))
}
"portAssignments with absolutely no ports" in {
import MarathonTestHelper.Implicits._
Given("An app with absolutely no ports defined")
val app = MarathonTestHelper.makeBasicApp().withNoPortDefinitions()
Given("A task with no ports")
val task = {
val t = TestTaskBuilder.Helper.minimalTask(app.id)
t.copy(status = t.status.copy(networkInfo = NetworkInfo(hostName, hostPorts = Nil, ipAddresses = Nil)))
}
Then("The port assignments are empty")
task.status.networkInfo.portAssignments(app, includeUnresolved = true) should be(empty)
}
}
}
| gsantovena/marathon | src/test/scala/mesosphere/marathon/state/AppDefinitionPortAssignmentsTest.scala | Scala | apache-2.0 | 10,924 |
package com.datastax.examples.meetup
import org.joda.time.{DateTimeZone, DateTime, Duration}
import org.scalatra.scalate.ScalateSupport
import org.scalatra.{CorsSupport, ScalatraServlet}
import scala.concurrent.Await
import scala.concurrent.duration._
import org.json4s.{DefaultFormats, Formats}
import org.scalatra.json._
class EventStatsServlet() extends ScalatraServlet with CorsSupport with JacksonJsonSupport with ScalateSupport
{
protected implicit val jsonFormats: Formats = DefaultFormats
before() {
contentType = formats("json")
}
options("/*"){
response.setHeader("Access-Control-Allow-Headers", request.getHeader("Access-Control-Request-Headers"));
}
get("/trending") {
val time = new DateTime(DateTimeZone.UTC)
// Scan 5 second intervals within the past 1 minute.
// Stop as soon as first successful found.
val result = (for (i <- Stream range (0,12); v = getTrendingTopics(i, time); if v.nonEmpty) yield v).headOption
// Order topics by count in desc order and take top 20
result.map(r => r.toIndexedSeq.sortBy(_._2).reverse.take(20))
}
get("/countries") {
val attendeesByCountry = Event.dimensions("attending", "ALL")
Await.result(attendeesByCountry, 5 seconds)
.map{ case (a,b) => Map("code" -> a.toUpperCase, "value" -> b)}
}
get("/") {
contentType="text/html"
layoutTemplate("dashboard.ssp")
}
def roundDateTime(t: DateTime, d: Duration) = {
t minus (t.getMillis - (t.getMillis.toDouble / d.getMillis).round * d.getMillis)
}
def getTrendingTopics(i:Int, time:DateTime) = {
val t = roundDateTime(time minusSeconds 5*i, Duration.standardSeconds(5))
val trendingTopics = Event.dimensions("trending", "S" + t.toString("yyyyMMddHHmmss"))
Await.result(trendingTopics, 5 seconds)
}
} | MiguelPeralvo/spark-streaming-demo | web/src/main/scala/com/datastax/examples/meetup/EventStatsServlet.scala | Scala | apache-2.0 | 1,806 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js API **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
/* Definitions for js.Function3 to js.Function22 that do not show in doc */
package scala.scalajs.js
// scalastyle:off line.size.limit
@native
trait Function3[-T1, -T2, -T3, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3): R
}
@native
trait Function4[-T1, -T2, -T3, -T4, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4): R
}
@native
trait Function5[-T1, -T2, -T3, -T4, -T5, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5): R
}
@native
trait Function6[-T1, -T2, -T3, -T4, -T5, -T6, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6): R
}
@native
trait Function7[-T1, -T2, -T3, -T4, -T5, -T6, -T7, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7): R
}
@native
trait Function8[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8): R
}
@native
trait Function9[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9): R
}
@native
trait Function10[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10): R
}
@native
trait Function11[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10, arg11: T11): R
}
@native
trait Function12[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10, arg11: T11, arg12: T12): R
}
@native
trait Function13[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, -T13, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10, arg11: T11, arg12: T12, arg13: T13): R
}
@native
trait Function14[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, -T13, -T14, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10, arg11: T11, arg12: T12, arg13: T13, arg14: T14): R
}
@native
trait Function15[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, -T13, -T14, -T15, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10, arg11: T11, arg12: T12, arg13: T13, arg14: T14, arg15: T15): R
}
@native
trait Function16[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, -T13, -T14, -T15, -T16, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10, arg11: T11, arg12: T12, arg13: T13, arg14: T14, arg15: T15, arg16: T16): R
}
@native
trait Function17[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, -T13, -T14, -T15, -T16, -T17, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10, arg11: T11, arg12: T12, arg13: T13, arg14: T14, arg15: T15, arg16: T16, arg17: T17): R
}
@native
trait Function18[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, -T13, -T14, -T15, -T16, -T17, -T18, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10, arg11: T11, arg12: T12, arg13: T13, arg14: T14, arg15: T15, arg16: T16, arg17: T17, arg18: T18): R
}
@native
trait Function19[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, -T13, -T14, -T15, -T16, -T17, -T18, -T19, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10, arg11: T11, arg12: T12, arg13: T13, arg14: T14, arg15: T15, arg16: T16, arg17: T17, arg18: T18, arg19: T19): R
}
@native
trait Function20[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, -T13, -T14, -T15, -T16, -T17, -T18, -T19, -T20, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10, arg11: T11, arg12: T12, arg13: T13, arg14: T14, arg15: T15, arg16: T16, arg17: T17, arg18: T18, arg19: T19, arg20: T20): R
}
@native
trait Function21[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, -T13, -T14, -T15, -T16, -T17, -T18, -T19, -T20, -T21, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10, arg11: T11, arg12: T12, arg13: T13, arg14: T14, arg15: T15, arg16: T16, arg17: T17, arg18: T18, arg19: T19, arg20: T20, arg21: T21): R
}
@native
trait Function22[-T1, -T2, -T3, -T4, -T5, -T6, -T7, -T8, -T9, -T10, -T11, -T12, -T13, -T14, -T15, -T16, -T17, -T18, -T19, -T20, -T21, -T22, +R] extends Function {
def apply(arg1: T1, arg2: T2, arg3: T3, arg4: T4, arg5: T5, arg6: T6, arg7: T7, arg8: T8, arg9: T9, arg10: T10, arg11: T11, arg12: T12, arg13: T13, arg14: T14, arg15: T15, arg16: T16, arg17: T17, arg18: T18, arg19: T19, arg20: T20, arg21: T21, arg22: T22): R
}
// scalastyle:on line.size.limit
| xuwei-k/scala-js | library/src/main/scala/scala/scalajs/js/Function.nodoc.scala | Scala | bsd-3-clause | 6,032 |
package mockws
import java.io._
import java.util.zip.GZIPInputStream
import java.util.zip.GZIPOutputStream
import play.shaded.ahc.org.asynchttpclient.Response
import mockws.MockWSHelpers._
import play.api.mvc.Results._
import play.api.test.Helpers._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class GzippedResponsesTest extends AnyFunSuite with Matchers {
test("mock WS handle gzipped responses") {
val ws = MockWS { case (_, _) =>
Action {
val os = new ByteArrayOutputStream()
val gzip = new GZIPOutputStream(os)
gzip.write("my response".getBytes())
gzip.close()
Ok(os.toByteArray)
}
}
val result = await(ws.url("").get())
val body =
scala.io.Source.fromInputStream(new GZIPInputStream(result.underlying[Response].getResponseBodyAsStream)).mkString
body shouldEqual "my response"
ws.close()
}
}
| leanovate/play-mockws | src/test/scala/mockws/GzippedResponsesTest.scala | Scala | mit | 937 |
/**
* Exercise 3:
*
* Repeat the preceding assignment, but produce a new array with the swapped
* values. Use for/yield.
*
**/
// 1
val a = Array(1, 2, 3, 4, 5)
val result = for(b <- a.grouped(2); c <- b.reverse) yield c
result.toArray // Array(2, 1, 4, 3, 5): Array[Int]
// 2
val a = Array(1, 2, 3, 4, 5)
val result = for(i <- 0 until a.length) yield {
if(i % 2 != 0) a(i-1)
else if (i == a.length -1) a(i)
else a(i + 1)
}
result.toArray // Array(2, 1, 4, 3, 5): Array[Int]
| ragmha/scala-impatient | solutions/working-with-arrays/ex3.scala | Scala | mit | 486 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.coordinator
import java.util.concurrent.locks.ReentrantReadWriteLock
import kafka.utils.CoreUtils._
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.protocol.types.{ArrayOf, Struct, Schema, Field}
import org.apache.kafka.common.protocol.types.Type.STRING
import org.apache.kafka.common.protocol.types.Type.INT32
import org.apache.kafka.common.protocol.types.Type.INT64
import org.apache.kafka.common.protocol.types.Type.BYTES
import org.apache.kafka.common.utils.Utils
import kafka.utils._
import kafka.common._
import kafka.message._
import kafka.log.FileMessageSet
import kafka.metrics.KafkaMetricsGroup
import kafka.common.TopicAndPartition
import kafka.tools.MessageFormatter
import kafka.api.ProducerResponseStatus
import kafka.server.ReplicaManager
import scala.collection._
import java.io.PrintStream
import java.nio.ByteBuffer
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.TimeUnit
import com.yammer.metrics.core.Gauge
case class DelayedStore(messageSet: Map[TopicAndPartition, MessageSet],
callback: Map[TopicAndPartition, ProducerResponseStatus] => Unit)
class GroupMetadataManager(val brokerId: Int,
val config: OffsetConfig,
replicaManager: ReplicaManager,
zkUtils: ZkUtils) extends Logging with KafkaMetricsGroup {
/* offsets cache */
private val offsetsCache = new Pool[GroupTopicPartition, OffsetAndMetadata]
/* group metadata cache */
private val groupsCache = new Pool[String, GroupMetadata]
/* partitions of consumer groups that are being loaded, its lock should be always called BEFORE offsetExpireLock and the group lock if needed */
private val loadingPartitions: mutable.Set[Int] = mutable.Set()
/* partitions of consumer groups that are assigned, using the same loading partition lock */
private val ownedPartitions: mutable.Set[Int] = mutable.Set()
/* lock for expiring stale offsets, it should be always called BEFORE the group lock if needed */
private val offsetExpireLock = new ReentrantReadWriteLock()
/* shutting down flag */
private val shuttingDown = new AtomicBoolean(false)
/* number of partitions for the consumer metadata topic */
private val groupMetadataTopicPartitionCount = getOffsetsTopicPartitionCount
/* Single-thread scheduler to handling offset/group metadata cache loading and unloading */
private val scheduler = new KafkaScheduler(threads = 1, threadNamePrefix = "group-metadata-manager-")
this.logIdent = "[Group Metadata Manager on Broker " + brokerId + "]: "
scheduler.startup()
scheduler.schedule(name = "delete-expired-consumer-offsets",
fun = deleteExpiredOffsets,
period = config.offsetsRetentionCheckIntervalMs,
unit = TimeUnit.MILLISECONDS)
newGauge("NumOffsets",
new Gauge[Int] {
def value = offsetsCache.size
}
)
newGauge("NumGroups",
new Gauge[Int] {
def value = groupsCache.size
}
)
def currentGroups(): Iterable[GroupMetadata] = groupsCache.values
def partitionFor(groupId: String): Int = Utils.abs(groupId.hashCode) % groupMetadataTopicPartitionCount
def isGroupLocal(groupId: String): Boolean = loadingPartitions synchronized ownedPartitions.contains(partitionFor(groupId))
def isGroupLoading(groupId: String): Boolean = loadingPartitions synchronized loadingPartitions.contains(partitionFor(groupId))
def isLoading(): Boolean = loadingPartitions synchronized !loadingPartitions.isEmpty
/**
* Get the group associated with the given groupId, or null if not found
*/
def getGroup(groupId: String): GroupMetadata = {
groupsCache.get(groupId)
}
/**
* Add a group or get the group associated with the given groupId if it already exists
*/
def addGroup(group: GroupMetadata): GroupMetadata = {
val currentGroup = groupsCache.putIfNotExists(group.groupId, group)
if (currentGroup != null) {
currentGroup
} else {
group
}
}
/**
* Remove all metadata associated with the group
* @param group
*/
def removeGroup(group: GroupMetadata) {
// guard this removal in case of concurrent access (e.g. if a delayed join completes with no members
// while the group is being removed due to coordinator emigration)
if (groupsCache.remove(group.groupId, group)) {
// Append the tombstone messages to the partition. It is okay if the replicas don't receive these (say,
// if we crash or leaders move) since the new leaders will still expire the consumers with heartbeat and
// retry removing this group.
val groupPartition = partitionFor(group.groupId)
val tombstone = new Message(bytes = null, key = GroupMetadataManager.groupMetadataKey(group.groupId))
val partitionOpt = replicaManager.getPartition(GroupCoordinator.GroupMetadataTopicName, groupPartition)
partitionOpt.foreach { partition =>
val appendPartition = TopicAndPartition(GroupCoordinator.GroupMetadataTopicName, groupPartition)
trace("Marking group %s as deleted.".format(group.groupId))
try {
// do not need to require acks since even if the tombstone is lost,
// it will be appended again by the new leader
// TODO KAFKA-2720: periodic purging instead of immediate removal of groups
partition.appendMessagesToLeader(new ByteBufferMessageSet(config.offsetsTopicCompressionCodec, tombstone))
} catch {
case t: Throwable =>
error("Failed to mark group %s as deleted in %s.".format(group.groupId, appendPartition), t)
// ignore and continue
}
}
}
}
def prepareStoreGroup(group: GroupMetadata,
groupAssignment: Map[String, Array[Byte]],
responseCallback: Short => Unit): DelayedStore = {
// construct the message to append
val message = new Message(
key = GroupMetadataManager.groupMetadataKey(group.groupId),
bytes = GroupMetadataManager.groupMetadataValue(group, groupAssignment)
)
val groupMetadataPartition = TopicAndPartition(GroupCoordinator.GroupMetadataTopicName, partitionFor(group.groupId))
val groupMetadataMessageSet = Map(groupMetadataPartition ->
new ByteBufferMessageSet(config.offsetsTopicCompressionCodec, message))
val generationId = group.generationId
// set the callback function to insert the created group into cache after log append completed
def putCacheCallback(responseStatus: Map[TopicAndPartition, ProducerResponseStatus]) {
// the append response should only contain the topics partition
if (responseStatus.size != 1 || ! responseStatus.contains(groupMetadataPartition))
throw new IllegalStateException("Append status %s should only have one partition %s"
.format(responseStatus, groupMetadataPartition))
// construct the error status in the propagated assignment response
// in the cache
val status = responseStatus(groupMetadataPartition)
var responseCode = Errors.NONE.code
if (status.error != Errors.NONE.code) {
debug("Metadata from group %s with generation %d failed when appending to log due to %s"
.format(group.groupId, generationId, Errors.forCode(status.error).exceptionName))
// transform the log append error code to the corresponding the commit status error code
responseCode = if (status.error == Errors.UNKNOWN_TOPIC_OR_PARTITION.code) {
Errors.GROUP_COORDINATOR_NOT_AVAILABLE.code
} else if (status.error == Errors.NOT_LEADER_FOR_PARTITION.code) {
Errors.NOT_COORDINATOR_FOR_GROUP.code
} else if (status.error == Errors.REQUEST_TIMED_OUT.code) {
Errors.REBALANCE_IN_PROGRESS.code
} else if (status.error == Errors.MESSAGE_TOO_LARGE.code
|| status.error == Errors.RECORD_LIST_TOO_LARGE.code
|| status.error == Errors.INVALID_FETCH_SIZE.code) {
error("Appending metadata message for group %s generation %d failed due to %s, returning UNKNOWN error code to the client"
.format(group.groupId, generationId, Errors.forCode(status.error).exceptionName))
Errors.UNKNOWN.code
} else {
error("Appending metadata message for group %s generation %d failed due to unexpected error: %s"
.format(group.groupId, generationId, status.error))
status.error
}
}
responseCallback(responseCode)
}
DelayedStore(groupMetadataMessageSet, putCacheCallback)
}
def store(delayedAppend: DelayedStore) {
// call replica manager to append the group message
replicaManager.appendMessages(
config.offsetCommitTimeoutMs.toLong,
config.offsetCommitRequiredAcks,
true, // allow appending to internal offset topic
delayedAppend.messageSet,
delayedAppend.callback)
}
/**
* Store offsets by appending it to the replicated log and then inserting to cache
*/
def prepareStoreOffsets(groupId: String,
consumerId: String,
generationId: Int,
offsetMetadata: immutable.Map[TopicAndPartition, OffsetAndMetadata],
responseCallback: immutable.Map[TopicAndPartition, Short] => Unit): DelayedStore = {
// first filter out partitions with offset metadata size exceeding limit
val filteredOffsetMetadata = offsetMetadata.filter { case (topicAndPartition, offsetAndMetadata) =>
validateOffsetMetadataLength(offsetAndMetadata.metadata)
}
// construct the message set to append
val messages = filteredOffsetMetadata.map { case (topicAndPartition, offsetAndMetadata) =>
new Message(
key = GroupMetadataManager.offsetCommitKey(groupId, topicAndPartition.topic, topicAndPartition.partition),
bytes = GroupMetadataManager.offsetCommitValue(offsetAndMetadata)
)
}.toSeq
val offsetTopicPartition = TopicAndPartition(GroupCoordinator.GroupMetadataTopicName, partitionFor(groupId))
val offsetsAndMetadataMessageSet = Map(offsetTopicPartition ->
new ByteBufferMessageSet(config.offsetsTopicCompressionCodec, messages:_*))
// set the callback function to insert offsets into cache after log append completed
def putCacheCallback(responseStatus: Map[TopicAndPartition, ProducerResponseStatus]) {
// the append response should only contain the topics partition
if (responseStatus.size != 1 || ! responseStatus.contains(offsetTopicPartition))
throw new IllegalStateException("Append status %s should only have one partition %s"
.format(responseStatus, offsetTopicPartition))
// construct the commit response status and insert
// the offset and metadata to cache if the append status has no error
val status = responseStatus(offsetTopicPartition)
val responseCode =
if (status.error == Errors.NONE.code) {
filteredOffsetMetadata.foreach { case (topicAndPartition, offsetAndMetadata) =>
putOffset(GroupTopicPartition(groupId, topicAndPartition), offsetAndMetadata)
}
Errors.NONE.code
} else {
debug("Offset commit %s from group %s consumer %s with generation %d failed when appending to log due to %s"
.format(filteredOffsetMetadata, groupId, consumerId, generationId, Errors.forCode(status.error).exceptionName))
// transform the log append error code to the corresponding the commit status error code
if (status.error == Errors.UNKNOWN_TOPIC_OR_PARTITION.code)
Errors.GROUP_COORDINATOR_NOT_AVAILABLE.code
else if (status.error == Errors.NOT_LEADER_FOR_PARTITION.code)
Errors.NOT_COORDINATOR_FOR_GROUP.code
else if (status.error == Errors.MESSAGE_TOO_LARGE.code
|| status.error == Errors.RECORD_LIST_TOO_LARGE.code
|| status.error == Errors.INVALID_FETCH_SIZE.code)
Errors.INVALID_COMMIT_OFFSET_SIZE.code
else
status.error
}
// compute the final error codes for the commit response
val commitStatus = offsetMetadata.map { case (topicAndPartition, offsetAndMetadata) =>
if (validateOffsetMetadataLength(offsetAndMetadata.metadata))
(topicAndPartition, responseCode)
else
(topicAndPartition, Errors.OFFSET_METADATA_TOO_LARGE.code)
}
// finally trigger the callback logic passed from the API layer
responseCallback(commitStatus)
}
DelayedStore(offsetsAndMetadataMessageSet, putCacheCallback)
}
/**
* The most important guarantee that this API provides is that it should never return a stale offset. i.e., it either
* returns the current offset or it begins to sync the cache from the log (and returns an error code).
*/
def getOffsets(group: String, topicPartitions: Seq[TopicAndPartition]): Map[TopicAndPartition, OffsetMetadataAndError] = {
trace("Getting offsets %s for group %s.".format(topicPartitions, group))
if (isGroupLocal(group)) {
if (topicPartitions.isEmpty) {
// Return offsets for all partitions owned by this consumer group. (this only applies to consumers that commit offsets to Kafka.)
offsetsCache.filter(_._1.group == group).map { case(groupTopicPartition, offsetAndMetadata) =>
(groupTopicPartition.topicPartition, OffsetMetadataAndError(offsetAndMetadata.offset, offsetAndMetadata.metadata, Errors.NONE.code))
}.toMap
} else {
topicPartitions.map { topicAndPartition =>
val groupTopicPartition = GroupTopicPartition(group, topicAndPartition)
(groupTopicPartition.topicPartition, getOffset(groupTopicPartition))
}.toMap
}
} else {
debug("Could not fetch offsets for group %s (not offset coordinator).".format(group))
topicPartitions.map { topicAndPartition =>
val groupTopicPartition = GroupTopicPartition(group, topicAndPartition)
(groupTopicPartition.topicPartition, OffsetMetadataAndError.NotCoordinatorForGroup)
}.toMap
}
}
/**
* Asynchronously read the partition from the offsets topic and populate the cache
*/
def loadGroupsForPartition(offsetsPartition: Int,
onGroupLoaded: GroupMetadata => Unit) {
val topicPartition = TopicAndPartition(GroupCoordinator.GroupMetadataTopicName, offsetsPartition)
scheduler.schedule(topicPartition.toString, loadGroupsAndOffsets)
def loadGroupsAndOffsets() {
info("Loading offsets and group metadata from " + topicPartition)
loadingPartitions synchronized {
if (loadingPartitions.contains(offsetsPartition)) {
info("Offset load from %s already in progress.".format(topicPartition))
return
} else {
loadingPartitions.add(offsetsPartition)
}
}
val startMs = SystemTime.milliseconds
try {
replicaManager.logManager.getLog(topicPartition) match {
case Some(log) =>
var currOffset = log.logSegments.head.baseOffset
val buffer = ByteBuffer.allocate(config.loadBufferSize)
// loop breaks if leader changes at any time during the load, since getHighWatermark is -1
inWriteLock(offsetExpireLock) {
val loadedGroups = mutable.Map[String, GroupMetadata]()
val removedGroups = mutable.Set[String]()
while (currOffset < getHighWatermark(offsetsPartition) && !shuttingDown.get()) {
buffer.clear()
val messages = log.read(currOffset, config.loadBufferSize).messageSet.asInstanceOf[FileMessageSet]
messages.readInto(buffer, 0)
val messageSet = new ByteBufferMessageSet(buffer)
messageSet.foreach { msgAndOffset =>
require(msgAndOffset.message.key != null, "Offset entry key should not be null")
val baseKey = GroupMetadataManager.readMessageKey(msgAndOffset.message.key)
if (baseKey.isInstanceOf[OffsetKey]) {
// load offset
val key = baseKey.key.asInstanceOf[GroupTopicPartition]
if (msgAndOffset.message.payload == null) {
if (offsetsCache.remove(key) != null)
trace("Removed offset for %s due to tombstone entry.".format(key))
else
trace("Ignoring redundant tombstone for %s.".format(key))
} else {
// special handling for version 0:
// set the expiration time stamp as commit time stamp + server default retention time
val value = GroupMetadataManager.readOffsetMessageValue(msgAndOffset.message.payload)
putOffset(key, value.copy (
expireTimestamp = {
if (value.expireTimestamp == org.apache.kafka.common.requests.OffsetCommitRequest.DEFAULT_TIMESTAMP)
value.commitTimestamp + config.offsetsRetentionMs
else
value.expireTimestamp
}
))
trace("Loaded offset %s for %s.".format(value, key))
}
} else {
// load group metadata
val groupId = baseKey.key.asInstanceOf[String]
val groupMetadata = GroupMetadataManager.readGroupMessageValue(groupId, msgAndOffset.message.payload)
if (groupMetadata != null) {
trace(s"Loaded group metadata for group ${groupMetadata.groupId} with generation ${groupMetadata.generationId}")
removedGroups.remove(groupId)
loadedGroups.put(groupId, groupMetadata)
} else {
loadedGroups.remove(groupId)
removedGroups.add(groupId)
}
}
currOffset = msgAndOffset.nextOffset
}
}
loadedGroups.values.foreach { group =>
val currentGroup = addGroup(group)
if (group != currentGroup)
debug(s"Attempt to load group ${group.groupId} from log with generation ${group.generationId} failed " +
s"because there is already a cached group with generation ${currentGroup.generationId}")
else
onGroupLoaded(group)
}
removedGroups.foreach { groupId =>
val group = groupsCache.get(groupId)
if (group != null)
throw new IllegalStateException(s"Unexpected unload of acitve group ${group.groupId} while " +
s"loading partition ${topicPartition}")
}
}
if (!shuttingDown.get())
info("Finished loading offsets from %s in %d milliseconds."
.format(topicPartition, SystemTime.milliseconds - startMs))
case None =>
warn("No log found for " + topicPartition)
}
}
catch {
case t: Throwable =>
error("Error in loading offsets from " + topicPartition, t)
}
finally {
loadingPartitions synchronized {
ownedPartitions.add(offsetsPartition)
loadingPartitions.remove(offsetsPartition)
}
}
}
}
/**
* When this broker becomes a follower for an offsets topic partition clear out the cache for groups that belong to
* that partition.
* @param offsetsPartition Groups belonging to this partition of the offsets topic will be deleted from the cache.
*/
def removeGroupsForPartition(offsetsPartition: Int,
onGroupUnloaded: GroupMetadata => Unit) {
val topicPartition = TopicAndPartition(GroupCoordinator.GroupMetadataTopicName, offsetsPartition)
scheduler.schedule(topicPartition.toString, removeGroupsAndOffsets)
def removeGroupsAndOffsets() {
var numOffsetsRemoved = 0
var numGroupsRemoved = 0
loadingPartitions synchronized {
// we need to guard the group removal in cache in the loading partition lock
// to prevent coordinator's check-and-get-group race condition
ownedPartitions.remove(offsetsPartition)
// clear the offsets for this partition in the cache
/**
* NOTE: we need to put this in the loading partition lock as well to prevent race condition of the leader-is-local check
* in getOffsets to protects against fetching from an empty/cleared offset cache (i.e., cleared due to a leader->follower
* transition right after the check and clear the cache), causing offset fetch return empty offsets with NONE error code
*/
offsetsCache.keys.foreach { key =>
if (partitionFor(key.group) == offsetsPartition) {
offsetsCache.remove(key)
numOffsetsRemoved += 1
}
}
// clear the groups for this partition in the cache
for (group <- groupsCache.values) {
if (partitionFor(group.groupId) == offsetsPartition) {
onGroupUnloaded(group)
groupsCache.remove(group.groupId, group)
numGroupsRemoved += 1
}
}
}
if (numOffsetsRemoved > 0) info("Removed %d cached offsets for %s on follower transition."
.format(numOffsetsRemoved, TopicAndPartition(GroupCoordinator.GroupMetadataTopicName, offsetsPartition)))
if (numGroupsRemoved > 0) info("Removed %d cached groups for %s on follower transition."
.format(numGroupsRemoved, TopicAndPartition(GroupCoordinator.GroupMetadataTopicName, offsetsPartition)))
}
}
/**
* Fetch the current offset for the given group/topic/partition from the underlying offsets storage.
*
* @param key The requested group-topic-partition
* @return If the key is present, return the offset and metadata; otherwise return None
*/
private def getOffset(key: GroupTopicPartition) = {
val offsetAndMetadata = offsetsCache.get(key)
if (offsetAndMetadata == null)
OffsetMetadataAndError.NoOffset
else
OffsetMetadataAndError(offsetAndMetadata.offset, offsetAndMetadata.metadata, Errors.NONE.code)
}
/**
* Put the (already committed) offset for the given group/topic/partition into the cache.
*
* @param key The group-topic-partition
* @param offsetAndMetadata The offset/metadata to be stored
*/
private def putOffset(key: GroupTopicPartition, offsetAndMetadata: OffsetAndMetadata) {
offsetsCache.put(key, offsetAndMetadata)
}
private def deleteExpiredOffsets() {
debug("Collecting expired offsets.")
val startMs = SystemTime.milliseconds
val numExpiredOffsetsRemoved = inWriteLock(offsetExpireLock) {
val expiredOffsets = offsetsCache.filter { case (groupTopicPartition, offsetAndMetadata) =>
offsetAndMetadata.expireTimestamp < startMs
}
debug("Found %d expired offsets.".format(expiredOffsets.size))
// delete the expired offsets from the table and generate tombstone messages to remove them from the log
val tombstonesForPartition = expiredOffsets.map { case (groupTopicAndPartition, offsetAndMetadata) =>
val offsetsPartition = partitionFor(groupTopicAndPartition.group)
trace("Removing expired offset and metadata for %s: %s".format(groupTopicAndPartition, offsetAndMetadata))
offsetsCache.remove(groupTopicAndPartition)
val commitKey = GroupMetadataManager.offsetCommitKey(groupTopicAndPartition.group,
groupTopicAndPartition.topicPartition.topic, groupTopicAndPartition.topicPartition.partition)
(offsetsPartition, new Message(bytes = null, key = commitKey))
}.groupBy { case (partition, tombstone) => partition }
// Append the tombstone messages to the offset partitions. It is okay if the replicas don't receive these (say,
// if we crash or leaders move) since the new leaders will get rid of expired offsets during their own purge cycles.
tombstonesForPartition.flatMap { case (offsetsPartition, tombstones) =>
val partitionOpt = replicaManager.getPartition(GroupCoordinator.GroupMetadataTopicName, offsetsPartition)
partitionOpt.map { partition =>
val appendPartition = TopicAndPartition(GroupCoordinator.GroupMetadataTopicName, offsetsPartition)
val messages = tombstones.map(_._2).toSeq
trace("Marked %d offsets in %s for deletion.".format(messages.size, appendPartition))
try {
// do not need to require acks since even if the tombstone is lost,
// it will be appended again in the next purge cycle
partition.appendMessagesToLeader(new ByteBufferMessageSet(config.offsetsTopicCompressionCodec, messages: _*))
tombstones.size
}
catch {
case t: Throwable =>
error("Failed to mark %d expired offsets for deletion in %s.".format(messages.size, appendPartition), t)
// ignore and continue
0
}
}
}.sum
}
info("Removed %d expired offsets in %d milliseconds.".format(numExpiredOffsetsRemoved, SystemTime.milliseconds - startMs))
}
private def getHighWatermark(partitionId: Int): Long = {
val partitionOpt = replicaManager.getPartition(GroupCoordinator.GroupMetadataTopicName, partitionId)
val hw = partitionOpt.map { partition =>
partition.leaderReplicaIfLocal().map(_.highWatermark.messageOffset).getOrElse(-1L)
}.getOrElse(-1L)
hw
}
/*
* Check if the offset metadata length is valid
*/
private def validateOffsetMetadataLength(metadata: String) : Boolean = {
metadata == null || metadata.length() <= config.maxMetadataSize
}
def shutdown() {
shuttingDown.set(true)
scheduler.shutdown()
// TODO: clear the caches
}
/**
* Gets the partition count of the offsets topic from ZooKeeper.
* If the topic does not exist, the configured partition count is returned.
*/
private def getOffsetsTopicPartitionCount = {
val topic = GroupCoordinator.GroupMetadataTopicName
val topicData = zkUtils.getPartitionAssignmentForTopics(Seq(topic))
if (topicData(topic).nonEmpty)
topicData(topic).size
else
config.offsetsTopicNumPartitions
}
/**
* Add the partition into the owned list
*
* NOTE: this is for test only
*/
def addPartitionOwnership(partition: Int) {
loadingPartitions synchronized {
ownedPartitions.add(partition)
}
}
}
/**
* Messages stored for the group topic has versions for both the key and value fields. Key
* version is used to indicate the type of the message (also to differentiate different types
* of messages from being compacted together if they have the same field values); and value
* version is used to evolve the messages within their data types:
*
* key version 0: group consumption offset
* -> value version 0: [offset, metadata, timestamp]
*
* key version 1: group consumption offset
* -> value version 1: [offset, metadata, commit_timestamp, expire_timestamp]
*
* key version 2: group metadata
* -> value version 0: [protocol_type, generation, protocol, leader, members]
*/
object GroupMetadataManager {
private val CURRENT_OFFSET_KEY_SCHEMA_VERSION = 1.toShort
private val CURRENT_GROUP_KEY_SCHEMA_VERSION = 2.toShort
private val OFFSET_COMMIT_KEY_SCHEMA = new Schema(new Field("group", STRING),
new Field("topic", STRING),
new Field("partition", INT32))
private val OFFSET_KEY_GROUP_FIELD = OFFSET_COMMIT_KEY_SCHEMA.get("group")
private val OFFSET_KEY_TOPIC_FIELD = OFFSET_COMMIT_KEY_SCHEMA.get("topic")
private val OFFSET_KEY_PARTITION_FIELD = OFFSET_COMMIT_KEY_SCHEMA.get("partition")
private val OFFSET_COMMIT_VALUE_SCHEMA_V0 = new Schema(new Field("offset", INT64),
new Field("metadata", STRING, "Associated metadata.", ""),
new Field("timestamp", INT64))
private val OFFSET_VALUE_OFFSET_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("offset")
private val OFFSET_VALUE_METADATA_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("metadata")
private val OFFSET_VALUE_TIMESTAMP_FIELD_V0 = OFFSET_COMMIT_VALUE_SCHEMA_V0.get("timestamp")
private val OFFSET_COMMIT_VALUE_SCHEMA_V1 = new Schema(new Field("offset", INT64),
new Field("metadata", STRING, "Associated metadata.", ""),
new Field("commit_timestamp", INT64),
new Field("expire_timestamp", INT64))
private val OFFSET_VALUE_OFFSET_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("offset")
private val OFFSET_VALUE_METADATA_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("metadata")
private val OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("commit_timestamp")
private val OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1 = OFFSET_COMMIT_VALUE_SCHEMA_V1.get("expire_timestamp")
private val GROUP_METADATA_KEY_SCHEMA = new Schema(new Field("group", STRING))
private val GROUP_KEY_GROUP_FIELD = GROUP_METADATA_KEY_SCHEMA.get("group")
private val MEMBER_METADATA_V0 = new Schema(new Field("member_id", STRING),
new Field("client_id", STRING),
new Field("client_host", STRING),
new Field("session_timeout", INT32),
new Field("subscription", BYTES),
new Field("assignment", BYTES))
private val MEMBER_METADATA_MEMBER_ID_V0 = MEMBER_METADATA_V0.get("member_id")
private val MEMBER_METADATA_CLIENT_ID_V0 = MEMBER_METADATA_V0.get("client_id")
private val MEMBER_METADATA_CLIENT_HOST_V0 = MEMBER_METADATA_V0.get("client_host")
private val MEMBER_METADATA_SESSION_TIMEOUT_V0 = MEMBER_METADATA_V0.get("session_timeout")
private val MEMBER_METADATA_SUBSCRIPTION_V0 = MEMBER_METADATA_V0.get("subscription")
private val MEMBER_METADATA_ASSIGNMENT_V0 = MEMBER_METADATA_V0.get("assignment")
private val GROUP_METADATA_VALUE_SCHEMA_V0 = new Schema(new Field("protocol_type", STRING),
new Field("generation", INT32),
new Field("protocol", STRING),
new Field("leader", STRING),
new Field("members", new ArrayOf(MEMBER_METADATA_V0)))
private val GROUP_METADATA_PROTOCOL_TYPE_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("protocol_type")
private val GROUP_METADATA_GENERATION_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("generation")
private val GROUP_METADATA_PROTOCOL_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("protocol")
private val GROUP_METADATA_LEADER_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("leader")
private val GROUP_METADATA_MEMBERS_V0 = GROUP_METADATA_VALUE_SCHEMA_V0.get("members")
// map of versions to key schemas as data types
private val MESSAGE_TYPE_SCHEMAS = Map(
0 -> OFFSET_COMMIT_KEY_SCHEMA,
1 -> OFFSET_COMMIT_KEY_SCHEMA,
2 -> GROUP_METADATA_KEY_SCHEMA)
// map of version of offset value schemas
private val OFFSET_VALUE_SCHEMAS = Map(
0 -> OFFSET_COMMIT_VALUE_SCHEMA_V0,
1 -> OFFSET_COMMIT_VALUE_SCHEMA_V1)
private val CURRENT_OFFSET_VALUE_SCHEMA_VERSION = 1.toShort
// map of version of group metadata value schemas
private val GROUP_VALUE_SCHEMAS = Map(0 -> GROUP_METADATA_VALUE_SCHEMA_V0)
private val CURRENT_GROUP_VALUE_SCHEMA_VERSION = 0.toShort
private val CURRENT_OFFSET_KEY_SCHEMA = schemaForKey(CURRENT_OFFSET_KEY_SCHEMA_VERSION)
private val CURRENT_GROUP_KEY_SCHEMA = schemaForKey(CURRENT_GROUP_KEY_SCHEMA_VERSION)
private val CURRENT_OFFSET_VALUE_SCHEMA = schemaForOffset(CURRENT_OFFSET_VALUE_SCHEMA_VERSION)
private val CURRENT_GROUP_VALUE_SCHEMA = schemaForGroup(CURRENT_GROUP_VALUE_SCHEMA_VERSION)
private def schemaForKey(version: Int) = {
val schemaOpt = MESSAGE_TYPE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown offset schema version " + version)
}
}
private def schemaForOffset(version: Int) = {
val schemaOpt = OFFSET_VALUE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown offset schema version " + version)
}
}
private def schemaForGroup(version: Int) = {
val schemaOpt = GROUP_VALUE_SCHEMAS.get(version)
schemaOpt match {
case Some(schema) => schema
case _ => throw new KafkaException("Unknown group metadata version " + version)
}
}
/**
* Generates the key for offset commit message for given (group, topic, partition)
*
* @return key for offset commit message
*/
private def offsetCommitKey(group: String, topic: String, partition: Int, versionId: Short = 0): Array[Byte] = {
val key = new Struct(CURRENT_OFFSET_KEY_SCHEMA)
key.set(OFFSET_KEY_GROUP_FIELD, group)
key.set(OFFSET_KEY_TOPIC_FIELD, topic)
key.set(OFFSET_KEY_PARTITION_FIELD, partition)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + key.sizeOf)
byteBuffer.putShort(CURRENT_OFFSET_KEY_SCHEMA_VERSION)
key.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Generates the key for group metadata message for given group
*
* @return key bytes for group metadata message
*/
private def groupMetadataKey(group: String): Array[Byte] = {
val key = new Struct(CURRENT_GROUP_KEY_SCHEMA)
key.set(GROUP_KEY_GROUP_FIELD, group)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + key.sizeOf)
byteBuffer.putShort(CURRENT_GROUP_KEY_SCHEMA_VERSION)
key.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Generates the payload for offset commit message from given offset and metadata
*
* @param offsetAndMetadata consumer's current offset and metadata
* @return payload for offset commit message
*/
private def offsetCommitValue(offsetAndMetadata: OffsetAndMetadata): Array[Byte] = {
// generate commit value with schema version 1
val value = new Struct(CURRENT_OFFSET_VALUE_SCHEMA)
value.set(OFFSET_VALUE_OFFSET_FIELD_V1, offsetAndMetadata.offset)
value.set(OFFSET_VALUE_METADATA_FIELD_V1, offsetAndMetadata.metadata)
value.set(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1, offsetAndMetadata.commitTimestamp)
value.set(OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1, offsetAndMetadata.expireTimestamp)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + value.sizeOf)
byteBuffer.putShort(CURRENT_OFFSET_VALUE_SCHEMA_VERSION)
value.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Generates the payload for group metadata message from given offset and metadata
* assuming the generation id, selected protocol, leader and member assignment are all available
*
* @param groupMetadata
* @return payload for offset commit message
*/
private def groupMetadataValue(groupMetadata: GroupMetadata, assignment: Map[String, Array[Byte]]): Array[Byte] = {
// generate commit value with schema version 1
val value = new Struct(CURRENT_GROUP_VALUE_SCHEMA)
value.set(GROUP_METADATA_PROTOCOL_TYPE_V0, groupMetadata.protocolType)
value.set(GROUP_METADATA_GENERATION_V0, groupMetadata.generationId)
value.set(GROUP_METADATA_PROTOCOL_V0, groupMetadata.protocol)
value.set(GROUP_METADATA_LEADER_V0, groupMetadata.leaderId)
val memberArray = groupMetadata.allMemberMetadata.map {
case memberMetadata =>
val memberStruct = value.instance(GROUP_METADATA_MEMBERS_V0)
memberStruct.set(MEMBER_METADATA_MEMBER_ID_V0, memberMetadata.memberId)
memberStruct.set(MEMBER_METADATA_CLIENT_ID_V0, memberMetadata.clientId)
memberStruct.set(MEMBER_METADATA_CLIENT_HOST_V0, memberMetadata.clientHost)
memberStruct.set(MEMBER_METADATA_SESSION_TIMEOUT_V0, memberMetadata.sessionTimeoutMs)
val metadata = memberMetadata.metadata(groupMetadata.protocol)
memberStruct.set(MEMBER_METADATA_SUBSCRIPTION_V0, ByteBuffer.wrap(metadata))
val memberAssignment = assignment(memberMetadata.memberId)
assert(memberAssignment != null)
memberStruct.set(MEMBER_METADATA_ASSIGNMENT_V0, ByteBuffer.wrap(memberAssignment))
memberStruct
}
value.set(GROUP_METADATA_MEMBERS_V0, memberArray.toArray)
val byteBuffer = ByteBuffer.allocate(2 /* version */ + value.sizeOf)
byteBuffer.putShort(CURRENT_GROUP_VALUE_SCHEMA_VERSION)
value.writeTo(byteBuffer)
byteBuffer.array()
}
/**
* Decodes the offset messages' key
*
* @param buffer input byte-buffer
* @return an GroupTopicPartition object
*/
def readMessageKey(buffer: ByteBuffer): BaseKey = {
val version = buffer.getShort
val keySchema = schemaForKey(version)
val key = keySchema.read(buffer)
if (version <= CURRENT_OFFSET_KEY_SCHEMA_VERSION) {
// version 0 and 1 refer to offset
val group = key.get(OFFSET_KEY_GROUP_FIELD).asInstanceOf[String]
val topic = key.get(OFFSET_KEY_TOPIC_FIELD).asInstanceOf[String]
val partition = key.get(OFFSET_KEY_PARTITION_FIELD).asInstanceOf[Int]
OffsetKey(version, GroupTopicPartition(group, TopicAndPartition(topic, partition)))
} else if (version == CURRENT_GROUP_KEY_SCHEMA_VERSION) {
// version 2 refers to offset
val group = key.get(GROUP_KEY_GROUP_FIELD).asInstanceOf[String]
GroupMetadataKey(version, group)
} else {
throw new IllegalStateException("Unknown version " + version + " for group metadata message")
}
}
/**
* Decodes the offset messages' payload and retrieves offset and metadata from it
*
* @param buffer input byte-buffer
* @return an offset-metadata object from the message
*/
def readOffsetMessageValue(buffer: ByteBuffer): OffsetAndMetadata = {
if(buffer == null) { // tombstone
null
} else {
val version = buffer.getShort
val valueSchema = schemaForOffset(version)
val value = valueSchema.read(buffer)
if (version == 0) {
val offset = value.get(OFFSET_VALUE_OFFSET_FIELD_V0).asInstanceOf[Long]
val metadata = value.get(OFFSET_VALUE_METADATA_FIELD_V0).asInstanceOf[String]
val timestamp = value.get(OFFSET_VALUE_TIMESTAMP_FIELD_V0).asInstanceOf[Long]
OffsetAndMetadata(offset, metadata, timestamp)
} else if (version == 1) {
val offset = value.get(OFFSET_VALUE_OFFSET_FIELD_V1).asInstanceOf[Long]
val metadata = value.get(OFFSET_VALUE_METADATA_FIELD_V1).asInstanceOf[String]
val commitTimestamp = value.get(OFFSET_VALUE_COMMIT_TIMESTAMP_FIELD_V1).asInstanceOf[Long]
val expireTimestamp = value.get(OFFSET_VALUE_EXPIRE_TIMESTAMP_FIELD_V1).asInstanceOf[Long]
OffsetAndMetadata(offset, metadata, commitTimestamp, expireTimestamp)
} else {
throw new IllegalStateException("Unknown offset message version")
}
}
}
/**
* Decodes the group metadata messages' payload and retrieves its member metadatafrom it
*
* @param buffer input byte-buffer
* @return a group metadata object from the message
*/
def readGroupMessageValue(groupId: String, buffer: ByteBuffer): GroupMetadata = {
if(buffer == null) { // tombstone
null
} else {
val version = buffer.getShort
val valueSchema = schemaForGroup(version)
val value = valueSchema.read(buffer)
if (version == 0) {
val protocolType = value.get(GROUP_METADATA_PROTOCOL_TYPE_V0).asInstanceOf[String]
val group = new GroupMetadata(groupId, protocolType)
group.generationId = value.get(GROUP_METADATA_GENERATION_V0).asInstanceOf[Int]
group.leaderId = value.get(GROUP_METADATA_LEADER_V0).asInstanceOf[String]
group.protocol = value.get(GROUP_METADATA_PROTOCOL_V0).asInstanceOf[String]
value.getArray(GROUP_METADATA_MEMBERS_V0).foreach {
case memberMetadataObj =>
val memberMetadata = memberMetadataObj.asInstanceOf[Struct]
val memberId = memberMetadata.get(MEMBER_METADATA_MEMBER_ID_V0).asInstanceOf[String]
val clientId = memberMetadata.get(MEMBER_METADATA_CLIENT_ID_V0).asInstanceOf[String]
val clientHost = memberMetadata.get(MEMBER_METADATA_CLIENT_HOST_V0).asInstanceOf[String]
val sessionTimeout = memberMetadata.get(MEMBER_METADATA_SESSION_TIMEOUT_V0).asInstanceOf[Int]
val subscription = Utils.toArray(memberMetadata.get(MEMBER_METADATA_SUBSCRIPTION_V0).asInstanceOf[ByteBuffer])
val member = new MemberMetadata(memberId, groupId, clientId, clientHost, sessionTimeout,
List((group.protocol, subscription)))
member.assignment = Utils.toArray(memberMetadata.get(MEMBER_METADATA_ASSIGNMENT_V0).asInstanceOf[ByteBuffer])
group.add(memberId, member)
}
group
} else {
throw new IllegalStateException("Unknown group metadata message version")
}
}
}
// Formatter for use with tools such as console consumer: Consumer should also set exclude.internal.topics to false.
// (specify --formatter "kafka.coordinator.GroupMetadataManager\\$OffsetsMessageFormatter" when consuming __consumer_offsets)
class OffsetsMessageFormatter extends MessageFormatter {
def writeTo(key: Array[Byte], value: Array[Byte], output: PrintStream) {
val formattedKey = if (key == null) "NULL" else GroupMetadataManager.readMessageKey(ByteBuffer.wrap(key))
// only print if the message is an offset record
if (formattedKey.isInstanceOf[OffsetKey]) {
val groupTopicPartition = formattedKey.asInstanceOf[OffsetKey].toString
val formattedValue = if (value == null) "NULL" else GroupMetadataManager.readOffsetMessageValue(ByteBuffer.wrap(value)).toString
output.write(groupTopicPartition.getBytes)
output.write("::".getBytes)
output.write(formattedValue.getBytes)
output.write("\\n".getBytes)
}
}
}
// Formatter for use with tools to read group metadata history
class GroupMetadataMessageFormatter extends MessageFormatter {
def writeTo(key: Array[Byte], value: Array[Byte], output: PrintStream) {
val formattedKey = if (key == null) "NULL" else GroupMetadataManager.readMessageKey(ByteBuffer.wrap(key))
// only print if the message is a group metadata record
if (formattedKey.isInstanceOf[GroupMetadataKey]) {
val groupId = formattedKey.asInstanceOf[GroupMetadataKey].key
val formattedValue = if (value == null) "NULL" else GroupMetadataManager.readGroupMessageValue(groupId, ByteBuffer.wrap(value)).toString
output.write(groupId.getBytes)
output.write("::".getBytes)
output.write(formattedValue.getBytes)
output.write("\\n".getBytes)
}
}
}
}
case class GroupTopicPartition(group: String, topicPartition: TopicAndPartition) {
def this(group: String, topic: String, partition: Int) =
this(group, new TopicAndPartition(topic, partition))
override def toString =
"[%s,%s,%d]".format(group, topicPartition.topic, topicPartition.partition)
}
trait BaseKey{
def version: Short
def key: Object
}
case class OffsetKey(version: Short, key: GroupTopicPartition) extends BaseKey {
override def toString = key.toString
}
case class GroupMetadataKey(version: Short, key: String) extends BaseKey {
override def toString = key
}
| Mszak/kafka | core/src/main/scala/kafka/coordinator/GroupMetadataManager.scala | Scala | apache-2.0 | 44,338 |
package kogu.practice.fpinscala.state
object Main {
def main(args: Array[String]): Unit = {
val inputs = List(Coin, Turn, Coin, Turn, Coin, Turn, Coin, Turn)
val steps = Machine.simulateMachine(inputs)
println(steps.run(Machine(locked = true, 5, 10)))
println(steps.run(Machine(locked = true, 0, 10)))
}
}
| kogupta/scala-playground | src/main/scala/kogu/practice/fpinscala/state/Main.scala | Scala | apache-2.0 | 328 |
package org.simpleservices
import org.kakashi.simpleservices.ServiceRoutes
import spray.http.StatusCodes._
import spray.testkit.ScalatestRouteTest
class ServiceRoutesSpec
extends AbstractServiceSpec
with ScalatestRouteTest
with ServiceRoutes {
def actorRefFactory = system
"ServiceRoutes" should {
"return a greeting for GET requests to the root path" in {
Get() ~> routes ~> check {
responseAs[String] should include regex """Simple Services"""
}
}
"leave GET requests to other paths unhandled" in {
Get("/unimplemented") ~> routes ~> check {
handled should be(false)
}
}
"handle GET requests of resources that exist under the resources directory" in {
Get("/ping") ~> routes ~> check {
responseAs[String] should include regex """pong"""
}
}
"leave GET requests of resources that DON'T exist under the resources directory" in {
Get("/foo") ~> routes ~> check {
handled should be(false)
}
}
"return a MethodNotAllowed error for PUT requests to the root path" in {
Put() ~> sealRoute(routes) ~> check {
status shouldBe(MethodNotAllowed)
responseAs[String] should be("HTTP method not allowed, supported methods: GET")
}
}
}
}
| freeservices/simpleservices | src/test/scala/com/kakashi/simpleservices/ServiceSpec.scala | Scala | mit | 1,295 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import org.mockito.Mockito.when
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.accounts.{AccountsFreeTextValidationFixture, MockFrs102AccountsRetriever}
import uk.gov.hmrc.ct.accounts.frs102.retriever.Frs102AccountsBoxRetriever
import uk.gov.hmrc.ct.box.CtValidation
import uk.gov.hmrc.ct.box.ValidatableBox._
class AC5032Spec extends WordSpec with MockitoSugar with Matchers with MockFrs102AccountsRetriever with AccountsFreeTextValidationFixture[Frs102AccountsBoxRetriever] {
override def setUpMocks(): Unit = {
when(boxRetriever.ac32()).thenReturn(AC32(Some(4)))
when(boxRetriever.ac33()).thenReturn(AC33(Some(4)))
}
testTextFieldValidation("AC5032", AC5032, testUpperLimit = Some(StandardCohoTextFieldLimit))
testTextFieldIllegalCharacterValidationReturnsIllegalCharacters("AC5032", AC5032)
"AC5032" should {
"pass validation when populated and AC32 is empty" in {
when(boxRetriever.ac32()).thenReturn(AC32(None))
when(boxRetriever.ac33()).thenReturn(AC33(Some(4)))
AC5032(Some("testing")).validate(boxRetriever) shouldBe Set.empty
}
"pass validation when populated and AC33 is empty" in {
when(boxRetriever.ac32()).thenReturn(AC32(Some(4)))
when(boxRetriever.ac33()).thenReturn(AC33(None))
AC5032(Some("testing")).validate(boxRetriever) shouldBe Set.empty
}
"fail validation when populated and AC32 and AC33 are empty" in {
when(boxRetriever.ac32()).thenReturn(AC32(None))
when(boxRetriever.ac33()).thenReturn(AC33(None))
AC5032(Some("testing")).validate(boxRetriever) shouldBe Set(CtValidation(Some("AC5032"), "error.AC5032.cannot.exist"))
}
}
}
| liquidarmour/ct-calculations | src/test/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC5032Spec.scala | Scala | apache-2.0 | 2,365 |
package example
import org.scalatest._
import csvquery._
import scalikejdbc._
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
class UsageSpec extends AnyFunSpec with Matchers {
// http://support.spatialkey.com/spatialkey-sample-csv-data/
val filepath = "src/test/resources/SacramentocrimeJanuary2006.csv"
val headers = Seq("cdatetime", "address", "district", "beat", "grid", "crimedescr", "ucr_ncic_code", "latitude", "longitude")
describe("CSVQuery") {
it("counts csv records") {
val count = withSession { implicit session =>
withCSV(CSV(filepath, headers)) { table =>
sql"select count(1) from $table".map(_.long(1)).single.apply()
}
}
count should equal(Some(7584))
}
it("filters csv records") {
implicit val session = autoCSVSession
val records: Seq[Map[String, Any]] = withCSV(CSV(filepath, headers)) { table =>
val (from, to) = (38.45, 38.50)
sql"select cdatetime, address, latitude from $table where latitude >= $from and latitude <= $to"
.toMap.list.apply()
}
records.size should equal(1258)
}
case class Account(name: String, companyName: String, company: Option[Company])
case class Company(name: String, url: String)
it("runs join queries") {
implicit val session = autoCSVSession
val (accountsCsv, companiesCsv) = (
CSV("src/test/resources/accounts.csv", Seq("name", "company_name")),
CSV("src/test/resources/companies.csv", Seq("name", "url")))
val accounts = withCSV(accountsCsv, companiesCsv) { (a, c) =>
sql"select a.name, a.company_name, c.url from $a a left join $c c on a.company_name = c.name".map { rs =>
new Account(
name = rs.get("name"),
companyName = rs.get("company_name"),
company = rs.stringOpt("url").map(url => Company(rs.get("company_name"), url)))
}.list.apply()
}
accounts.size should equal(10)
}
it("fails to update csv records") {
implicit val session = autoCSVSession
intercept[org.h2.jdbc.JdbcSQLSyntaxErrorException] {
withCSV(CSV(filepath, headers)) { table => sql"delete from $table".update.apply() }
}
}
}
}
| scalikejdbc/csvquery | src/test/scala/example/UsageSpec.scala | Scala | mit | 2,275 |
package org.ninjatasks.spi
import org.ninjatasks.UnitSpec
/**
* Tests for the ResultUpdater class.
* Created by Gilad Ber on 6/14/2014.
*/
class ResultUpdaterTest extends UnitSpec
{
trait Updaters {
val plus = ResultUpdater((a: Int, b: Int) => a + b, 0)
val mult = ResultUpdater((a: Int, b: Int) => a * b, 1)
}
"A result updater" should "combine addition successfully" in {
new Updaters {
val id = plus map (x => x)
id update 1
id.result should be(1)
}
}
it should "combine consecutive values successfully" in {
new Updaters {
plus update 1
plus update 2
plus.result should be (3)
mult update 2
mult update 3
mult.result should be (6)
}
}
it should "map identity successfully" in {
new Updaters {
plus map (x => x) update 1
plus.result should be(1)
}
}
it should "map constant addition successfully" in {
new Updaters {
val pplus = plus map(_ + 5)
pplus update 3
pplus.result should be (8)
val pmult = mult map (_ + 1)
pmult update 2
pmult.result should be (3)
pmult update 3
pmult.result should be (7)
}
}
it should "compose 2 maps successfully" in {
new Updaters {
val u = mult map(_ * 2) map (_ + 3)
u update 2
u.result should be (7)
u update 3
u.result should be (15)
}
}
it should "compose 3 maps successfully" in {
new Updaters {
val u = mult map(_ * 2) map (_ + 3) map (_ % 17)
u update 2
u.result should be (7)
u update 3
u.result should be (15)
u update 2
u.result should be (10)
}
}
it should "filter jobs successfully" in {
new Updaters {
val f = plus filter(_ > 5)
f update 1
f.result should be (0)
f update 5
f.result should be (0)
f update 6
f.result should be (6)
f update 1
f.result should be (6)
f update 7
f.result should be (13)
}
}
it should "apply 2 filters successfully" in {
new Updaters {
val f = mult filter (_ > 3) filter (_ % 7 != 0)
f update 2
f.result should be (1)
f update 4
f.result should be (4)
f update 7
f.result should be (4)
f update 8
f.result should be (32)
f update 14
f.result should be (32)
}
}
it should "map jobs successfully" in {
new Updaters {
val map : Int => Int = x => 5 * x
val mj = plus.mapJobs(map)((x: Int, y: Int) => (x + 1) * y)
mj update 1
mj.result should be (5)
mj update 2
mj.result should be (60)
}
}
it should "apply two job maps successfully" in {
new Updaters {
val map: Int => Double = x => x.toDouble + 1d
val map2: Double => List[Double] = x => List(x)
val mj = plus.mapJobs(map)((x: Int, y: Double) => (x + y).toInt).
mapJobs(map2)((x: Int, list: List[Double]) => (x + 2 * list(0)).toInt)
mj update 2
mj.result should be (6)
mj update 3
mj.result should be (14)
}
}
it should "apply foreach successfully" in {
new Updaters {
var sum: Int = 0
val aggregator = plus foreach(x => sum = sum + x)
aggregator update 1
sum should be (1)
aggregator update 2
sum should be (3)
aggregator update 3
sum should be (6)
}
}
it should "compose foreach after filter" in {
new Updaters {
var x: Int = 0
val u = plus filter (_ > 0) foreach (y => x = x + y)
u update -1
x should be (0)
u update 1
x should be (1)
u update 0
x should be (1)
u update -2
x should be (1)
u update 5
x should be (6)
}
}
it should "fold job results successfully" in {
new Updaters {
val map: (List[Int], Int) => List[Int] = (list, e) => list ::: (e :: Nil)
val folded = plus.fold(map)(Nil)
folded update 1
folded.result should be (List(1))
folded update 2
folded.result should be (List(1, 2))
for (i <- 1 to 5) folded update 1
folded.result should be (List(1, 2, 1, 1, 1, 1, 1))
}
}
it should "compose fold after filter successfully" in {
new Updaters {
val map: (List[Int], Int) => List[Int] = (list, e) => list ::: (e :: Nil)
val foldThenFilter = plus.fold(map)(Nil) filter (_ > 0)
val filterThenFold = plus.filter(_ > 0).fold(map)(Nil)
filterThenFold update 1
filterThenFold.result should be (List(1))
filterThenFold update -1
filterThenFold.result should be (List(1))
foldThenFilter update 1
foldThenFilter.result should be (List(1))
foldThenFilter update -1
foldThenFilter.result should be (List(1))
}
}
it should "compose result map with job map" in {
new Updaters {
val combine: (List[Int], Int) => List[Int] = (list, x) => list ::: (x :: Nil)
val rmap: Int => List[Int] = x => List(x)
val u = plus.map(rmap).mapJobs(_ * 2)(combine)
u.result should be (List(0))
u update 1
u.result should be (List(0, 2))
u update 2
u.result should be (List(0, 2, 4))
}
}
it should "compose result map with job map with filter" in {
val combine: (List[Int], Int) => List[Int] = (list, x) => list ::: (x :: Nil)
val rmap: Int => List[Int] = x => List(x)
new Updaters
{
val u = plus.filter(_ > 0).map(rmap).mapJobs(_ * 2)(combine)
u.result should be(List(0))
u update -1
u.result should be(List(0))
u update 1
u.result should be(List(0, 2))
u update -2
u.result should be(List(0, 2))
u update 1
u.result should be(List(0, 2, 2))
}
new Updaters {
val u2 = plus.map(rmap).mapJobs(_ * 2)(combine).filter(_ > 4)
u2.result should be (List(0))
u2 update 2
u2.result should be (List(0))
u2 update 3
u2.result should be (List(0, 6))
u2 update 1
u2.result should be (List(0, 6))
}
}
it should "compose filter with fold with map jobs" in {
new Updaters {
val map = (list: List[Int], list2: List[Int]) => list ::: (list2 map(_ * 2))
val combine: (Int, List[Int]) => Int = (x, list) => x + list(0)
val u = plus.filter(_ > 0).mapJobs(List(_))(combine).fold(map)(Nil)
u.result should be (Nil)
u update 1
u.result should be (List(2))
u update 0
u.result should be (List(2))
u update -1
u.result should be (List(2))
u update 5
u.result should be (List(2, 10))
}
}
}
| giladber/ninja-tasks | src/test/scala/org/ninjatasks/spi/ResultUpdaterTest.scala | Scala | apache-2.0 | 6,050 |
package sample
import scala.annotation.tailrec
/**
* An example service for Spring to inject. Calculates Fibonacci number using tail recursion
* Created by Richard Thorne (ilan toren) on 6/18/14.
*/
class FibonacciService {
def fibonacci(n: Int) : BigInt ={
@tailrec def fib( n: Int, a:BigInt, b: BigInt) : BigInt = n match {
case 0 => a
case _ => {
fib(n-1, b, a+b)
}
}
fib(n, 0, 1)
}
}
| ilantoren/akka-router-scala-spring | app/sample/FibonacciService.scala | Scala | apache-2.0 | 437 |
/*
*
* Copyright (c) 2017 Radicalbit
*
* This file is part of flink-JPMML
*
* flink-JPMML is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* flink-JPMML is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with flink-JPMML. If not, see <http://www.gnu.org/licenses/>.
*
*/
import com.typesafe.sbt.SbtPgp.autoImportImpl.PgpKeys
import sbt.Keys.{publishMavenStyle, _}
import sbt.{Def, url, _}
import sbtrelease.ReleasePlugin.autoImport.{releaseCrossBuild, releasePublishArtifactsAction}
import xerial.sbt.Sonatype._
object PublishSettings {
lazy val settings: Seq[Def.Setting[_]] = sonatypeSettings ++ Seq(
publishTo := Some(
if (isSnapshot.value)
Opts.resolver.sonatypeSnapshots
else
Opts.resolver.sonatypeStaging
),
publishMavenStyle := true,
licenses := Seq("AGPL-3.0" -> url("https://opensource.org/licenses/AGPL-3.0")),
homepage := Some(url("https://github.com/FlinkML/flink-jpmml")),
scmInfo := Some(
ScmInfo(
url("https://github.com/FlinkML/flink-jpmml"),
"scm:git:[email protected]:FlinkML/flink-jpmml.git"
)
),
developers := List(
Developer(id = "spi-x-i",
name = "Andrea Spina",
email = "[email protected]",
url = url("https://github.com/spi-x-i")),
Developer(id = "francescofrontera",
name = "Francesco Frontera",
email = "[email protected]",
url = url("https://github.com/francescofrontera")),
Developer(id = "riccardo14",
name = "Riccardo Diomedi",
email = "[email protected]",
url = url("https://github.com/riccardo14")),
Developer(id = "maocorte",
name = "Mauro Cortellazzi",
email = "[email protected]",
url = url("https://github.com/maocorte"))
),
autoAPIMappings := true,
releaseCrossBuild := true,
releasePublishArtifactsAction := PgpKeys.publishSigned.value
)
}
| maocorte/flink-jpmml | project/PublishSettings.scala | Scala | agpl-3.0 | 2,534 |
/*******************************************************************************
* Copyright (c) 2014 eBay Software Foundation.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package org.ebaysf.ostara.upgrade
import java.io.File
import java.io.FileReader
import java.net.HttpURLConnection
import java.net.URL
import java.util.HashMap
import java.util.Properties
import org.apache.commons.io.FileUtils
import org.apache.commons.lang.StringUtils
import org.apache.commons.lang.SystemUtils.JAVA_IO_TMPDIR
import org.apache.commons.lang.text.StrSubstitutor
import org.apache.maven.model.Dependency
import org.apache.maven.model.Model
import org.apache.maven.model.Plugin
import org.apache.maven.model.io.xpp3.MavenXpp3Reader
import org.codehaus.plexus.util.xml.Xpp3Dom
import org.ebaysf.ostara.upgrade.util.POMModifierUtil.getLatestVersion
import grizzled.slf4j.Logging
import PomReport._
import java.io.InputStreamReader
import java.io.BufferedWriter
import java.io.FileWriter
import org.apache.maven.model.io.xpp3.MavenXpp3Writer
import scala.collection.JavaConversions._
import org.apache.maven.model.PluginManagement
object MigratorUtils extends Logging {
val POM_XML = "pom.xml"
def getPomFromDirectory(m:String, parent:File):File= {
val moduleDir = new File(if(parent.isFile()) parent.getParentFile() else parent, m)
if(moduleDir.isFile()) moduleDir else (new File(moduleDir, s"$POM_XML"))
}
def getNormalizedAbsoluteModuleName(parent:File, m:String):String = {
parent.getCanonicalPath() + " # " + normalizeModuleName(m)
}
def normalizeModuleName(m:String):String= {
if(m.endsWith(POM_XML)) {
m.dropRight(POM_XML.length).stripSuffix("/")
} else m.stripSuffix("/").stripSuffix("\\")
}
def extractTextContentFromXml(xpath:String, xmlFile:File):String = {
val xPath = javax.xml.xpath.XPathFactory.newInstance().newXPath();
return xPath.evaluate(xpath, new org.xml.sax.InputSource(new FileReader(xmlFile)))
}
def readPom(file:File):Model={
info(s"Reading $file")
val reader = new FileReader(if(file.isDirectory()) new File(file, POM_XML) else file)
val model = new MavenXpp3Reader().read(reader);
reader.close
return model
}
def readModelFromClasspath(sourcePomFile:String):Model ={
var model:Model = null;
try {
val templateReader = new InputStreamReader(getClass().getClassLoader().getResourceAsStream(sourcePomFile));
val reader = new MavenXpp3Reader();
model = reader.read(templateReader);
templateReader.close();
} catch {
case th:Throwable => error(th, th)
}
return model;
}
// TODO Can this be merged into findDependency?
def getDependency(dependencies:java.util.List[Dependency], artifactId:String):Dependency ={
for( d <- dependencies ){
if( d.getArtifactId() == artifactId ){
return d;
}
}
return null;
}
def findDependency(lst:java.util.List[Dependency], dep:Dependency, remove:Boolean = false, defaultGroupId:String=null):Boolean = {
for(i <- 0 to lst.size-1) {
val crtDep = lst.get(i)
if(crtDep.getArtifactId() == dep.getArtifactId() && (if(crtDep.getGroupId==null) defaultGroupId == dep.getGroupId else if(dep.getGroupId==null) crtDep.getGroupId == defaultGroupId else crtDep.getGroupId() == dep.getGroupId())) {
if(remove) {
lst.remove(i)
}
return true;
}
}
return false;
}
def removeChild(node:Xpp3Dom, childName:String) {
val childIdx = getChildIndex(node, childName)
if(childIdx != -1) {
node.removeChild(childIdx)
}
}
def getChildIndex(node:Xpp3Dom, childName:String):Int = {
for(i <- 0 to node.getChildCount() - 1) {
if(node.getChildren()(i).getName() == childName) {
return i
}
}
return -1
}
def findMBPInstruction(mbp:Plugin, instruction:String, caseSensitive:Boolean=true): Xpp3Dom = {
if(mbp!=null) {
val conf = mbp.getConfiguration()
if(conf != null) {
for(c <- conf.asInstanceOf[Xpp3Dom].getChildren();
if "instructions" == c.getName();
i <- c.getChildren();
if(caseSensitive && instruction == i.getName() || !caseSensitive && instruction.equalsIgnoreCase(i.getName())) )
return i
}
}
null
}
def createMavenPlugin(groupId:String, artifactId:String, version:String=null):Plugin = {
val p = new Plugin()
p setGroupId groupId
p setArtifactId artifactId
p setVersion version
p
}
def findPlugin(plugins:Seq[Plugin], groupId:String, artifactId:String):Plugin = {
for(p <- plugins) {
if (p.getGroupId == groupId && p.getArtifactId == artifactId) return p
}
return null
}
def adjustPluginVersions(plugins:java.util.List[Plugin], managedPlugins:java.util.List[Plugin], report:PomReport, pluginManagement:Boolean = false) {
var pluginsToRemove = Set[Plugin]()
for(plugin <- asScalaBuffer(plugins)) {
debug(s"Checking plugin $plugin")
import scala.collection.JavaConversions._
// Check if it's managed
val managedPlugin = findPlugin(managedPlugins, plugin.getGroupId(), plugin.getArtifactId())
if(managedPlugin != null) {
val dep = createNiceDependency(plugin.getGroupId(), plugin.getArtifactId())
report.addMissingArtifact(dep, NOT_MISSING, s"Removed version override for plugin managed by RaptorPlatform", null)
plugin.setVersion(null)
}
}
if(pluginManagement) for(plugin <- pluginsToRemove) plugins.remove(plugin)
}
def savePom(pomFile: File, model: Model, backup:Boolean=true): Unit = {
if(backup) {
val backupPomFile = new File(pomFile.getAbsolutePath() + ".bak." + System.currentTimeMillis())
debug(s"Backing up POM to ${backupPomFile}")
FileUtils.copyFile(pomFile, backupPomFile)
}
debug(s"Writing upgraded POM to ${pomFile}")
val out = new BufferedWriter(new FileWriter(pomFile));
val writer = new MavenXpp3Writer();
writer.write(out, model);
out.close();
}
def getRelativePathToParent(pomFile:File, parentPomFile:File):String= {
if(parentPomFile == null) null
else getRelativePath(pomFile, parentPomFile.getParentFile())
}
def getRelativePath(pomFile:File, parentPomFile:File):String = parentPomFile.toURI().relativize(pomFile.toURI()).toString();
/*
def checkDependenciesAvailability(deps:java.util.List[Dependency], updateToLatest:Boolean = false, report:PomReport, projectArtifacts:java.util.List[(File, Dependency)], dependencyManagement:Boolean, properties:List[java.util.Properties], projectGroupId:String=null) {
var depsToRemove = Set[Dependency]()
import scala.collection.JavaConversions._
for(dep <- asScalaBuffer(deps)) {
debug(s"Looking for artifact $dep")
// Check if it belongs to the project
if(findDependency(projectArtifacts.map(_._2), dep, false, projectGroupId)) {
debug(s"Skipping dependency as it belongs to the project: $dep")
} else
if(findDependency(UpgradeMain.platformModel.getDependencyManagement().getDependencies(), dep, false)) { // Check if it's managed
if(!StringUtils.isEmpty(dep.getVersion())) {
if(dependencyManagement) {
if(!UpgradeMain.provider) {
depsToRemove += dep
report.addMissingArtifact(dep, NOT_MISSING, s"Removed artifact from dependency management section because its version is managed by RaptorPlatform", null)
}
} else {
if(!UpgradeMain.provider) {
report.addMissingArtifact(dep, NOT_MISSING, s"Removed version override for artifact managed by RaptorPlatform", null)
dep.setVersion(null)
}
}
}
} else {
UpgradeMain.checkArtifactAvailability(dep, updateToLatest, report, properties)
}
}
if(dependencyManagement) for(dep <- depsToRemove) deps.remove(dep)
}*/
def getLatestArtifactVersion(repo:String, gid:String, aid:String):String= {
import org.ebaysf.ostara.upgrade.util.POMModifierUtil._
import org.apache.commons.lang.SystemUtils._
try {
return getLatestVersion(JAVA_IO_TMPDIR, repo, gid, aid, null)
} catch {
case th:Throwable => warn(th.getMessage, th); return null
}
}
def getResponseCode(urlString:String):Integer = {
val u = new URL(urlString);
val huc = u.openConnection().asInstanceOf[HttpURLConnection];
huc.setRequestMethod("GET");
huc.setRequestProperty("User-Agent", "Mozilla/5.0 (Windows; U; Windows NT 6.0; en-US; rv:1.9.1.2) Gecko/20090729 Firefox/3.5.2 (.NET CLR 3.5.30729)");
huc.connect();
return huc.getResponseCode();
}
// TODO this needs to be refined and errors different from 404 should at least be included in the report
def artifactExists(aUrl:String):Boolean = {
try {
getResponseCode(aUrl) == 200
} catch {
case th:Throwable => warn(th, th); return false;
}
}
def buildArtifactUrl(dep:Dependency, includeVersion:Boolean=false, props:List[java.util.Properties] = List()):String = {
var versionString:String = ""
if(includeVersion && !StringUtils.isEmpty(dep.getVersion)) {
versionString = "/" + evaluateVersion(dep, props)
}
dep.getGroupId().replace('.', '/') + '/' + dep.getArtifactId() + versionString
}
/** Enrich the properties by including the builtin Maven ones when possible: http://docs.codehaus.org/display/MAVENUSER/MavenPropertiesGuide */
def extractModelProperties(model:Model):Properties ={
val outProps = new Properties()
def v = model.getVersion()
if(!StringUtils.isEmpty(v)) {
outProps.put("version", v)
outProps.put("project.version", v)
outProps.put("pom.version", v)
for(entry <- scala.collection.JavaConversions.propertiesAsScalaMap(model.getProperties)) {
outProps.put(entry._1, evaluateVersion(entry._2, List(outProps, model.getProperties)))
}
} else {
outProps.putAll(model.getProperties())
}
return outProps
}
def evaluateVersion(dep:Dependency, props:List[Properties] = List()):String={
dep.setVersion(evaluateVersion(dep.getVersion, props))
return dep.getVersion
}
def evaluateVersion(version:String, props:List[Properties]):String={
if(version != null && version.contains('$') && props != null) {
for(prop <- props) {
if(prop != null) {
val map = new HashMap[String, String]()
for (name <- prop.stringPropertyNames.toArray(Array[String]())) {
map.put(name, prop.getProperty(name));
}
return StrSubstitutor.replace(version, map)
}
}
}
return version
}
def cloneDependency(dep:Dependency, props:List[java.util.Properties] = List()):Dependency={
// TODO Kind of error prone, maybe the Maven code has this logic already?
val clonedDep = createDependency(dep.getGroupId(), dep.getArtifactId(), evaluateVersion(dep, props), dep.getScope(), dep.getType())
clonedDep.setExclusions(dep.getExclusions())
clonedDep.setClassifier(dep.getClassifier())
clonedDep.setOptional(dep.getOptional) // Work with the string value as the boolean signatures are just a wrapper
clonedDep.setSystemPath(dep.getSystemPath)
return clonedDep
}
def createXpp3Dom(name:String, value:String):Xpp3Dom = {
val node = new Xpp3Dom(name)
node.setValue(value)
return node
}
def createNiceDependency(groupId:String, artifactId:String, version:String = null, scope:String = null, atype:String = null):NiceDependency = new NiceDependency(createDependency(groupId, artifactId, version, scope, atype))
def createNiceDependency(d:Dependency):NiceDependency = new NiceDependency(d)
def createDependency(groupId:String, artifactId:String, version:String = null, scope:String = null, atype:String = null): Dependency = {
var dep:Dependency = new Dependency();
setGAV(dep, groupId, artifactId, version)
dep.setScope(scope)
dep.setType(atype)
dep
}
def setGAV(artifact: {def setGroupId(value: String); def setArtifactId(value: String); def setVersion(v: String)},
groupId:String, artifactId:String, version:String = null): Unit = {
artifact.setGroupId(if(groupId != null) groupId.trim else null)
artifact.setArtifactId(artifactId.trim)
artifact.setVersion(if(version != null) version.trim else null)
}
}
| eBay/ostara | ostara-upgrade/src/main/scala/org/ebaysf/ostara/upgrade/MigrationUtils.scala | Scala | apache-2.0 | 13,247 |
package lolchat.model
case class Friend(
name: String,
id: String,
chatMode: ChatMode,
isOnline: Boolean,
groupName: Vector[String],
selectedChamp: Option[String],
gameStatus: Option[String],
level: Int,
wins: Int,
statusMsg: String,
rankedTier: Option[String],
rankedDivision: Option[String],
leagueName: Option[String],
gameStartTime: Option[Long],
profileIconId: Option[Int]
)
| Thangiee/League-of-Legend-Chat-Lib-Scala | lib/src/main/scala/lolchat/model/Friend.scala | Scala | mit | 412 |
package org.wselwood.teatimer.gui
import javafx.scene.control.{Label, Slider, Button}
import javafx.fxml.{Initializable, FXML}
import java.net.URL
import java.util.ResourceBundle
import javafx.beans.property.{SimpleIntegerProperty, SimpleBooleanProperty}
import javafx.scene.media.AudioClip
import org.wselwood.teatimer.TimeState
import org.wselwood.common.gui.ChangeListener
import org.wselwood.common.tasks.RecurringEvent
/**
* Controller for our tea timer.
*
* User: wselwood
* Date: 04/06/12
* Time: 13:04
*
*/
class TeaTimerController extends Initializable {
// the current state of what should be shown on the screen.
var timerState = new TimeState()
@FXML var secondsSlider : Slider = null
@FXML var minutesSlider : Slider = null
@FXML var hoursSlider : Slider = null
@FXML var secondsLabel : Label = null
@FXML var minutesLabel : Label = null
@FXML var hoursLabel : Label = null
@FXML var startStopButton : Button = null
@FXML var resetButton : Button = null
// Keeps track of the state. Are we currently counting down or setting the values.
var timerRunning : SimpleBooleanProperty = new SimpleBooleanProperty(false)
// The current value of the count down.
var timerCount : SimpleIntegerProperty = new SimpleIntegerProperty(0)
// The thread that will be used to wait for a second and then fire another event.
// Held at the controller level so we can interrupt it when we need to stop.
private var ticker : RecurringEvent = null
// The last time we ran, how long was the timer set for.
// Keep this so we can reset back when the timer expires or the user hits the reset button after stopping the timer
// half way through.
private var lastStartTime : Int = -1
/**
* Setup the various listeners.
* @param here ignored
* @param res ignored as well.
*/
def initialize(here: URL, res: ResourceBundle) {
secondsSlider.valueProperty().bindBidirectional(timerState.seconds)
minutesSlider.valueProperty().bindBidirectional(timerState.minutes)
hoursSlider.valueProperty().bindBidirectional(timerState.hours)
timerState.seconds.addListener(ChangeListener.pluralLabelUpdater(secondsLabel, "second"))
timerState.minutes.addListener(ChangeListener.pluralLabelUpdater(minutesLabel, "minute"))
timerState.hours.addListener(ChangeListener.pluralLabelUpdater(hoursLabel, "hour"))
timerRunning.addListener(ChangeListener( { (oldValue: java.lang.Boolean, newValue: java.lang.Boolean) =>
if (newValue) {
// we are about to start the timer.
this.startRun()
}
else {
// we are stopping the timer.
this.stopRun()
}
}))
timerCount.addListener(ChangeListener( { (oldValue: Number, newValue: Number) =>
this.setSlidersToCurrentTime(newValue.intValue())
if (newValue.intValue() <= 0) {
this.timerRunning.set(false)
}
}))
}
/**
* Takes care of the start/stop button
*
* If there is a currently running thread we need to interrupt it so we don't get another tick a short time
* after pressing the stop button.
*/
def startStopButtonHandler() {
if (ticker != null ) {
ticker.stop()
}
timerRunning.setValue(! timerRunning.get()) // flip the state, the listener on the timerRunning property will
// take care of every thing else for us.
}
/**
* Takes care of the reset button.
*
* If we stopped half way through a run the reset should be back to the last started time, rather than zero.
* The second time we press it though we should reset back to zero.
*/
def resetButtonHandler() {
if (lastStartTime > 0) {
setSlidersToCurrentTime(lastStartTime)
lastStartTime = -1
}
else {
setSlidersToCurrentTime(0)
}
}
/**
* Start the timer counting.
*/
def startRun() {
startStopButton.setText("Stop")
resetButton.setDisable(true)
if(lastStartTime == -1) {
lastStartTime = calculateNumberOfSeconds()
timerCount.set(lastStartTime)
}
else {
timerCount.set(calculateNumberOfSeconds())
}
if (timerCount.get() == 0) {
stopRun()
}
else {
ticker = new RecurringEvent(1000, { this.timerCount.set(this.timerCount.get() - 1) })
ticker.runAgain = {() => this.timerCount.get() > 0 && this.timerRunning.get()}
ticker.start()
}
}
/**
* Stop the timer counting, either by expiry or the user pressing the stop button.
*/
def stopRun() {
startStopButton.setText("Start")
resetButton.setDisable(false)
if (timerCount.get() <= 0 && lastStartTime > 0) {
timerExpired()
}
}
/**
* Effect at the end when the timer expires.
*/
def timerExpired() {
val boomSound : AudioClip = new AudioClip(this.getClass.getResource("/res/Explode.wav").toString)
boomSound.play()
setSlidersToCurrentTime(lastStartTime)
lastStartTime = -1
}
/**
* Work out the number of seconds the sliders are currently displaying.
* @return the number of seconds that the three sliders are currently displaying.
*/
def calculateNumberOfSeconds() : Int = {
timerState.seconds.get() + (timerState.minutes.get() * 60) + (timerState.hours.get() * 60 * 60)
}
/**
* Given a time, set the sliders to show it. This will ripple through to the labels thanks to the listeners.
* @param time number of seconds to set the sliders to.
*/
def setSlidersToCurrentTime(time : Int) {
var t = time
if (t == 0) { // short cut the zero option so we don't have worry about div by zero
timerState.seconds.set(0)
timerState.minutes.set(0)
timerState.hours.set(0)
}
else {
val hours = t / (60 * 60)
timerState.hours.set(hours)
t = t % (60 * 60 )
val minutes = t / 60
timerState.minutes.set(minutes)
t = t % 60
timerState.seconds.set(t)
}
}
def shutDown() {
if (ticker != null ) {
ticker.stop()
}
}
}
| wselwood/TeaTimer | src/org/wselwood/teatimer/gui/TeaTimerController.scala | Scala | bsd-3-clause | 6,861 |
package io.reactors
package protocol
import io.reactors.container.RHashSet
import io.reactors.container.RRing
import scala.annotation.unchecked
/** Collection that stores `Valve` objects, and acts as a `Valve` itself.
*/
class MultiValve[@specialized(Int, Long, Double) T: Arrayable](val window: Int) {
private[reactors] var ring: RRing[T] = _
private[reactors] var valves: RHashSet[(Valve[T], RCell[Long])] = _
private[reactors] var slowest: Signal[Long] = _
private[reactors] var oldest: Long = _
private[reactors] var next: RCell[Long] = _
private[reactors] var flush: Connector[Unit] = _
private[reactors] var rawOut: Valve[T] = _
def init(self: MultiValve[T]) {
ring = new RRing[T](window)
valves = new RHashSet[(Valve[T], RCell[Long])]
slowest = valves.map(_._2).toSignalAggregate(Long.MaxValue)(math.min)
oldest = 0L
next = RCell(0L)
flush = Reactor.self.system.channels.daemon.open[Unit]
flush.events on {
val total = slowest() - oldest
oldest += total
ring.dequeueMany(total.toInt)
}
rawOut = {
val c = Reactor.self.system.channels.daemon.shortcut.open[T]
val forwarding = c.events onEvent { x =>
if (ring.available()) {
ring.enqueue(x)
next := next() + 1
if (slowest() > next()) ring.dequeue()
} else throw new IllegalStateException("Valve is not available.")
}
Valve(
c.channel,
ring.available,
forwarding.andThen(c.seal()).andThen(valves.clear()).andThen(flush.seal())
)
}
}
init(this)
def size: Int = valves.size
def bufferSize: Int = ring.size
def out: Valve[T] = rawOut
def +=(v: Valve[T]): Subscription = {
val pos = RCell(math.min(slowest(), next()))
valves += (v, pos)
val morePending = (pos zip next)(_ < _).changes.toSignal(pos() < next())
val available = (v.available zip morePending)(_ && _)
val moving = available.is(true) on {
while (available()) {
val idx = (pos() - oldest).toInt
val x = ring(idx)
v.channel ! x
pos := pos() + 1
}
val total = slowest() - oldest
if (total > 0) flush.channel ! ()
}
moving.chain(available).chain(morePending).andThen(valves -= (v, pos))
}
}
object MultiValve {
/** Variant of multi-valve optimized for the case when it contains a single valve.
*/
class Biased[@spec(Int, Long, Double) T: Arrayable](val window: Int) {
private[reactors] var implementation: RCell[AnyRef] = _
private[reactors] var rawOut: Valve[T] = _
def init(self: Biased[T]) {
implementation = RCell(null)
rawOut = {
val c = Reactor.self.system.channels.daemon.shortcut.open[T]
c.events onEvent { x =>
implementation() match {
case null =>
case v if v.isInstanceOf[Valve[_]] => v.asInstanceOf[Valve[T]].channel ! x
case m: MultiValve[T] @unchecked => m.out.channel ! x
}
}
val available = implementation.toEager.map({
case null => RCell(true)
case v if v.isInstanceOf[Valve[_]] => v.asInstanceOf[Valve[T]].available
case m: MultiValve[_] => m.out.available
}).mux.changed(false).toEmpty
val sub = Subscription(c.seal()).andThen {
implementation() match {
case null =>
case v if v.isInstanceOf[Valve[_]] =>
case m: MultiValve[_] => m.out.subscription.unsubscribe()
}
}
Valve(c.channel, available, sub)
}
}
init(this)
def out = rawOut
def +=(v: Valve[T]): Subscription = {
implementation() match {
case null =>
implementation := v
Subscription(implementation := null)
case w: Valve[T] @unchecked =>
val multi = new MultiValve[T](window)
multi += v
val sub = multi += w
implementation := multi
sub
case m: MultiValve[T] @unchecked =>
m += v
}
}
}
}
| storm-enroute/reactive-collections | reactors-protocol/shared/src/main/scala/io/reactors/protocol/MultiValve.scala | Scala | bsd-3-clause | 4,046 |
package controllers
import javax.inject.Inject
import play.api.i18n.MessagesApi
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import com.overviewdocs.messages.DocumentSetCommands
import com.overviewdocs.models.{CloneJob,DocumentSet}
import controllers.auth.AuthorizedAction
import controllers.auth.Authorities.{userOwningDocumentSet,userViewingDocumentSet}
import controllers.backend.{CloneJobBackend,DocumentSetBackend}
import controllers.util.JobQueueSender
class CloneImportJobController @Inject() (
cloneJobBackend: CloneJobBackend,
documentSetBackend: DocumentSetBackend,
jobQueueSender: JobQueueSender,
val controllerComponents: ControllerComponents
) extends BaseController {
def create(sourceDocumentSetId: Long) = authorizedAction(userViewingDocumentSet(sourceDocumentSetId)).async { implicit request =>
documentSetBackend.show(sourceDocumentSetId).flatMap(_ match {
case None => Future.successful(NotFound) // Extremely unlikely race
case Some(originalDocumentSet) => {
for {
documentSet <- documentSetBackend.create(cloneAttributes(originalDocumentSet), request.user.email)
cloneJob <- cloneJobBackend.create(CloneJob.CreateAttributes(originalDocumentSet.id, documentSet.id))
} yield {
jobQueueSender.send(DocumentSetCommands.CloneDocumentSet(cloneJob))
Redirect(routes.DocumentSetController.show(documentSet.id))
.flashing("event" -> "document-set-create-clone")
}
}
})
}
def delete(documentSetId: Long, cloneJobId: Int) = authorizedAction(userOwningDocumentSet(documentSetId)).async { implicit request =>
for {
_ <- cloneJobBackend.cancel(documentSetId, cloneJobId)
} yield Redirect(routes.DocumentSetController.show(documentSetId))
}
private def cloneAttributes(documentSet: DocumentSet) = DocumentSet.CreateAttributes(
title=documentSet.title,
query=documentSet.query,
documentCount=documentSet.documentCount,
documentProcessingErrorCount=documentSet.documentProcessingErrorCount,
importOverflowCount=documentSet.importOverflowCount,
metadataSchema=documentSet.metadataSchema
)
}
| overview/overview-server | web/app/controllers/CloneImportJobController.scala | Scala | agpl-3.0 | 2,203 |
//
// Copyright 2012-2020 Paytronix Systems, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package com.paytronix.utils.validation
import scala.reflect.{ClassTag, classTag}
import scalaz.{Failure, Success}
import scalaz.NonEmptyList.nels
import base.{Validated, ValidationError}
object reflection {
import string.nonBlank
val invalidClassName = ValidationError("invalid_class_name", "invalid class name")
def lookupError(e: Exception): ValidationError = ValidationError("unknown_error", "error while looking up class: " + e.toString)
/** Assert that a String is nonblank and refers to a loadable class */
def className[A: ClassTag](classLoader: ClassLoader): String => Validated[Class[_ <: A]] =
classNameE[A](invalidClassName, lookupError)(classLoader)
/** Assert that a String is nonblank and refers to a loadable class */
def classNameE[A: ClassTag](unknownClassError: ValidationError, lookupError: Exception => ValidationError)(classLoader: ClassLoader): String => Validated[Class[_ <: A]] =
nonBlank and { s =>
try Success(Class.forName(s, true, classLoader).asSubclass(classTag[A].runtimeClass.asInstanceOf[Class[A]]))
catch {
case e: ClassNotFoundException => Failure(nels(unknownClassError))
case e: Exception => Failure(nels(lookupError(e)))
}
}
}
| paytronix/utils-open | validation/src/main/scala/com/paytronix/utils/validation/reflection.scala | Scala | apache-2.0 | 1,918 |
/*
* RunningSum.scala
* (FScape)
*
* Copyright (c) 2001-2022 Hanns Holger Rutz. All rights reserved.
*
* This software is published under the GNU Affero General Public License v3+
*
*
* For further information, please contact Hanns Holger Rutz at
* [email protected]
*/
package de.sciss.fscape
package stream
import akka.stream.{Attributes, FanInShape2, Inlet, Outlet}
import de.sciss.fscape.stream.impl.{NodeImpl, RunningValueImpl, StageImpl}
object RunningSum {
def apply[A, E <: BufElem[A]](in: Outlet[E], gate: OutI)(implicit b: Builder, tpe: StreamType[A, E]): Outlet[E] = {
val stage0 = new Stage[A, E](b.layer)
val stage = b.add(stage0)
b.connect(in , stage.in0)
b.connect(gate, stage.in1)
stage.out
}
private final val name = "RunningSum"
private type Shp[E] = FanInShape2[E, BufI, E]
private final class Stage[A, E <: BufElem[A]](layer: Layer)(implicit a: Allocator, tpe: StreamType[A, E])
extends StageImpl[Shp[E]](name) { stage =>
val shape: Shape = new FanInShape2(
in0 = Inlet [E](s"${stage.name}.in" ),
in1 = InI (s"${stage.name}.trig"),
out = Outlet[E](s"${stage.name}.out" )
)
def createLogic(attr: Attributes): NodeImpl[Shape] = {
val res: RunningValueImpl[_, _] = if (tpe.isDouble) {
new RunningValueImpl[Double, BufD](stage.name, layer, shape.asInstanceOf[Shp[BufD]], 0.0)(_ + _)
} else if (tpe.isInt) {
new RunningValueImpl[Int , BufI](stage.name, layer, shape.asInstanceOf[Shp[BufI]], 0 )(_ + _)
} else {
assert (tpe.isLong)
new RunningValueImpl[Long , BufL](stage.name, layer, shape.asInstanceOf[Shp[BufL]], 0L )(_ + _)
}
res.asInstanceOf[RunningValueImpl[A, E]]
}
}
} | Sciss/FScape-next | core/shared/src/main/scala/de/sciss/fscape/stream/RunningSum.scala | Scala | agpl-3.0 | 1,760 |
package com.dataintuitive.luciuscore
package api
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Dataset
import org.apache.spark.sql.SparkSession
import model.v4._
import genes._
import filters._
import signatures._
import correlations._
import lenses.CombinedPerturbationLenses.lengthLens
object TopTable extends ApiFunctionTrait {
case class SpecificData(
head: Int,
tail: Int,
signatureQuery: List[String],
featuresQuery: List[String],
filters: Seq[(String, Seq[String])]
)
type JobOutput = Array[Map[String, Any]]
def header(data: JobData) = s"Selected features: ${data.specificData.featuresQuery.toString}"
val infoMsg = s"Top Table wrt Zhang Score"
val helpMsg =
s"""
|$infoMsg
|
|- head: number of entries to return for a toptable
|- tail: if head=0, number of entries to return for bottom table
|- query: signature (or gene list) for calculating Zhang scores
|- features: list of features to return with (optional, all features are returned if not provided)
|- filters: The filters to apply
|""".stripMargin
def result(data: JobData)(implicit sparkSession: SparkSession) = {
val CachedData(db, _, genesDb, _) = data.cachedData
val SpecificData(head, tail, signatureQuery, featuresQuery, filters) = data.specificData
implicit val genes = genesDb
val signatureSpecified = !(signatureQuery.headOption.getOrElse(".*") == ".*")
val featuresSpecified = !(featuresQuery.headOption.getOrElse(".*") == ".*")
val qfilters = filters.map{ case(key,values) => QFilter(key, values) }
// TODO: Make sure we continue with all symbols, or just make the job invalid when it isn't!
val signature = new SymbolSignature(signatureQuery.toArray)
val iSignature = signature.toIndexSignature
// Just taking the first element t vector is incorrect, what do we use options for after all?!
// So... a version of takeWhile to the rescue
val vLength = lengthLens.get(db.first)
val query = iSignature.toOrderedRankVector(vLength)
// Calculate Zhang score for all entries that contain a rank vector
// This should be used in a flatMap
def updateZhang(x: Perturbation, query: Array[Double]): Option[ScoredPerturbation] = {
x.profiles.profile.flatMap(_.r) match {
case Some(r) => Some(ScoredPerturbation(ZhangScoreFunctions.connectionScore(r, query), x))
case _ => None
}
}
// Add Zhang score if signature is present
// Filter as soon as possible
val zhangAdded: RDD[ScoredPerturbation] =
db.rdd
.filter(row => FilterFunctions.isMatch(qfilters, row.filters))
.flatMap { updateZhang(_, query) }
val topN =
if (head > 0) {
implicit val descendingOrdering = Ordering.by((sp:ScoredPerturbation) => -sp.score)
zhangAdded
.takeOrdered(head)
} else {
implicit val descendingOrdering = Ordering.by((sp:ScoredPerturbation) => sp.score)
zhangAdded
.takeOrdered(tail)
}
// Create a feature selection, depending on the input
// TODO: Add target information and make sure it gets parsed correctly!
val features = {
if (featuresSpecified) featuresQuery
else
List(
"id",
"zhang",
"batch",
"plate",
"well",
"cell",
"dose",
"dose_unit",
"year",
"time",
"time_unit",
"trt",
"trt_id",
"trt_name",
"smiles",
"inchikey",
"targets",
"filters"
)
}
val result =
if (head > 0)
topN
.sortBy((sp:ScoredPerturbation) => -sp.score)
.map(entry => Extractors.ScoredPerturbationExtractor(entry, features))
else
topN
.sortBy((sp:ScoredPerturbation) => sp.score)
.map(entry => Extractors.ScoredPerturbationExtractor(entry, features))
result.map(_.zip(features).map(_.swap).toMap)
}
}
| data-intuitive/LuciusCore | src/main/scala/com/dataintuitive/luciuscore/api/TopTable.scala | Scala | apache-2.0 | 4,058 |
package metabrowse
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.scalajs.js
import monaco.Range
import monaco.Promise
import monaco.Uri
import monaco.editor.IActionDescriptor
import monaco.editor.IEditor
import monaco.editor.IEditorConstructionOptions
import monaco.editor.IEditorOverrideServices
import monaco.editor.IStandaloneCodeEditor
import monaco.services.IResourceInput
import monaco.services.IEditorService
import org.scalajs.dom
class MetabrowseEditorService(index: MetabrowseSemanticdbIndex)
extends IEditorService {
private lazy val editor: IStandaloneCodeEditor = {
val app = dom.document.getElementById("editor")
app.innerHTML = ""
val options = jsObject[IEditorConstructionOptions]
options.readOnly = true
options.scrollBeyondLastLine = false
val overrides = jsObject[IEditorOverrideServices]
overrides("textModelService") = MetabrowseTextModelService
overrides("editorService") = this
val editor = monaco.editor.Editor.create(app, options, overrides)
editor.asInstanceOf[js.Dynamic].getControl = { () =>
// NOTE: getControl() is defined on SimpleEditor and is called when changing files.
editor
}
editor
}
def addAction(action: IActionDescriptor): Unit =
editor.addAction(action)
def resize(): Unit =
editor.layout()
def open(input: IResourceInput): Future[IStandaloneCodeEditor] = {
val selection = input.options.selection
for {
MetabrowseMonacoDocument(document, model) <- MetabrowseTextModelService
.modelDocument(
input.resource
)
} yield {
editor.setModel(model.`object`.textEditorModel)
index.dispatch(MetabrowseEvent.SetDocument(document))
selection.foreach { irange =>
val range = Range.lift(irange)
editor.setSelection(range)
editor.revealPositionInCenter(range.getStartPosition())
editor.focus()
}
editor
}
}
override def openEditor(
input: IResourceInput,
sideBySide: js.UndefOr[Boolean] = js.undefined
): Promise[IEditor] =
open(input).toMonacoPromise
}
| scalameta/metadoc | metabrowse-js/src/main/scala/metabrowse/MetabrowseEditorService.scala | Scala | apache-2.0 | 2,161 |
package se.culvertsoft.mgen.idlparser
import java.io.File
import scala.collection.JavaConversions.mapAsJavaMap
import scala.collection.JavaConversions.seqAsJavaList
import scala.xml.XML.loadFile
import se.culvertsoft.mgen.api.model.Project
import se.culvertsoft.mgen.idlparser.util.XmlUtils.RichXmlNode
object ParseModule {
def apply(
file: File,
settings0: Map[String, String],
project: Project) {
val absoluteFilePath = file.getCanonicalPath()
println(s" parsing module: ${absoluteFilePath}")
val path = absoluteFilePath
val base = new File(project.absoluteFilePath).getParent()
val filePath = new File(base).toURI().relativize(new File(path).toURI()).getPath();
// Calculate module path
val modulePath = file.getName.stripSuffix(".xml")
// Read in module xml source code
val moduleXml = scala.xml.Utility.trim(loadFile(file))
if (moduleXml.label.toLowerCase() != "module") {
throw new RuntimeException(s"Tried to load ${file.getPath} as module, but it was not a module file!")
}
// Parse settings
val settings = settings0 ++ moduleXml.getSettings()
// Create the module
val module = project.getOrCreateModule(modulePath, if (filePath != null) filePath else file.getPath, absoluteFilePath, settings);
// Parse enumerations
val enumsXml = moduleXml.getAllNodeContents("Enums")
val enums = enumsXml.map { ParseEnum(_, module) }
module.addEnums(enums)
// Parse types
val typesXml = moduleXml.getAllNodeContents("Types")
val types = typesXml.map { ParseType(_, module) }
module.addClasses(types)
}
} | culvertsoft/mgen | mgen-idlparser/src/main/scala/se/culvertsoft/mgen/idlparser/ParseModule.scala | Scala | mit | 1,628 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.linker
sealed abstract class CheckedBehavior {
import CheckedBehavior._
def optimized: CheckedBehavior = this match {
case Fatal => Unchecked
case _ => this
}
}
object CheckedBehavior {
case object Compliant extends CheckedBehavior
case object Fatal extends CheckedBehavior
case object Unchecked extends CheckedBehavior
}
| nicolasstucki/scala-js | linker/shared/src/main/scala/org/scalajs/linker/CheckedBehavior.scala | Scala | apache-2.0 | 642 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.http.codegen
import wvlet.airframe.surface.reflect.ReflectSurfaceFactory
import wvlet.airspec.AirSpec
/**
*/
class ClassScannerTest extends AirSpec {
test("decoded URL encoded file paths") {
ClassScanner.decodePath(
"/lib/0.0.1%2Btest/xxxx-0.0.1%2Btest.jar"
) shouldBe "/lib/0.0.1+test/xxxx-0.0.1+test.jar"
}
test("Skip abstract class") {
// https://github.com/wvlet/airframe/issues/1607
val cl = classOf[io.grpc.stub.AbstractStub[_]]
val s = ReflectSurfaceFactory.ofClass(cl)
val m = ReflectSurfaceFactory.methodsOfClass(cl)
}
}
| wvlet/airframe | airframe-http-codegen/src/test/scala/wvlet/airframe/http/codegen/ClassScannerTest.scala | Scala | apache-2.0 | 1,159 |
package org.sameersingh.mf
import org.junit._
import Assert._
class MatrixTest {
@Test
def testPruning = {
val numRows = 5
val numCols = 5
val m = new Matrix("m")
for (i <- 0 until numRows)
for (j <- 0 until numCols) {
if (i < j) {
m += new Cell {
val row: ID = SimpleID(i, "r")
val col: ID = SimpleID(j, "c")
val value: Val = DoubleValue(1.0)
def isTrain: Boolean = true
val inMatrix: ObservedMatrix = m
}
}
}
val pruned = Matrix.prune(m, 1, 1)
assertEquals(m.cells.size, pruned.cells.size)
val actuallyPruned = Matrix.prune(m, 2, 2)
assertEquals(8, actuallyPruned.cells.size)
}
}
| sameersingh/mf | src/test/scala/org/sameersingh/mf/MatrixTest.scala | Scala | apache-2.0 | 734 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.testkit
import akka.http.scaladsl.testkit.ScalatestRouteTest
import org.scalatest.{FlatSpecLike, Matchers}
import org.squbs.unicomplex.{RouteDefinition, WebContext}
class MyRoute extends RouteDefinition {
val route =
path("ping") {
get {
complete {
"pong"
}
}
}
}
class MyRouteWithContext extends RouteDefinition with WebContext {
val route =
path("ping") {
get {
complete {
s"pong from $webContext"
}
}
}
}
class MyTestRoute2 extends RouteDefinition {
val route =
path("foo" / "bar" / Segment.?) { segment =>
complete {
segment match {
case Some(s) => s"Hello, got $s"
case None => "Hello, got nothing"
}
}
} ~
path("foo" / "bar") {
complete {
"Not even a trailing slash!"
}
} ~
path("foo" / "baz" / IntNumber.?) { num =>
complete {
num match {
case Some(n) => s"Hello, got half of ${n * 2}"
case None => "Hello, got no int"
}
}
} ~
path ("foo" / "baz") {
complete {
"No trailing slash either"
}
}
}
class TestRouteSpec extends FlatSpecLike with Matchers with ScalatestRouteTest {
it should "respond to string and int segments" in {
val route = TestRoute[MyTestRoute2]
Get("/foo/bar/xyz") ~> route ~> check {
responseAs[String] should be ("Hello, got xyz")
}
Get("/foo/bar/") ~> route ~> check {
responseAs[String] should be ("Hello, got nothing")
}
Get("/foo/bar") ~> route ~> check {
responseAs[String] should be ("Not even a trailing slash!")
}
Get("/foo/baz/5") ~> route ~> check {
responseAs[String] should be ("Hello, got half of 10")
}
Get("/foo/baz/") ~> route ~> check {
responseAs[String] should be ("Hello, got no int")
}
Get("/foo/baz") ~> route ~> check {
responseAs[String] should be ("No trailing slash either")
}
}
it should "return pong on a ping" in {
val route = TestRoute[MyRoute]
Get("/ping") ~> route ~> check {
responseAs[String] should be ("pong")
}
}
it should "return pong from nothing on a ping" in {
val route = TestRoute[MyRouteWithContext]
Get("/ping") ~> route ~> check {
responseAs[String] should be ("pong from ")
}
}
it should "return pong from context on a ping" in {
val route = TestRoute[MyRouteWithContext]("test")
Get("/test/ping") ~> route ~> check {
responseAs[String] should be ("pong from test")
}
}
}
| Harikiranvuyyuru/squbs | squbs-testkit/src/test/scala/org/squbs/testkit/TestRouteSpec.scala | Scala | apache-2.0 | 3,188 |
package org.ucf.scala
/**
* In computer science, a programming language is said to have first-class functions
* if it treats functions as first-class citizens. This means the language supports
* passing functions as arguments to other functions, returning them as the values
* from other functions, and assigning them to variables or storing them in data
* structures.[1] Some programming language theorists require support for anonymous
* functions ([[function literals]]) as well
*/
object FirstClassFunction{
/**
* The syntax of a function literal
*
*
* sum: a functional value (Int, Int) => Int
* (x:Int, y:Int) : function parameters, here have two Int parameters: x and y
* => : right now sign designates that this functions converts the thing on the left (input parameters)
* to the thing of the right (function body)
* { x + y} : function body, you can include multipe statments in this body block.
*
* A function literal is compiled into a class that when instantiated at runtime is a
* functional value. The distinction between function literals and function values is
* that function literals exist in the source code, whereas function values exists as
* objects instantiated a class of [[FunctionN]] at runtime.
*/
val sum = (x:Int, y:Int) => {
x + y
}
val someNumbers = List(-11, -10, -5, 0, 5, 10)
someNumbers.filter((x:Int) => x > 0) // standarm form of function literal
someNumbers.filter((x) => x > 0) // Short forms of function literals to leave out
// redundant informaiton (parameter type)
someNumbers.filter(x => x > 0) // redundant informaiton (parameter type and parentheses)
someNumbers.filter(_ > 0) // Underscoe as placeholders for one or more parameters,
// so long as each parameter appears only one time within the function literal
/**
* Sometimes when you use underscores as placeholders for parameters, the compiler
* might not have enough information to infer missing parameter types. For example:
*
* val f = _ + _
*
* Cannot be compiled since missing type for expanded function
*/
// f:(Int, Int) => Int = <function2>
val sum_ = (_:Int) + (_:Int) // same semantics with sum function
} | bingrao/Scala-Learning | Function/src/main/scala/org/ucf/scala/FirstClassFunction.scala | Scala | mit | 2,327 |
/**
* Copyright (C) 2012 - 101loops.com <[email protected]>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
| crashnote/crashnote-java | modules/core/src/test/scala/com/crashnote/test/core/unit/report/ThrowableLogEvtSpec.scala | Scala | apache-2.0 | 626 |
package io.aos.ebnf.spl.ast
sealed abstract class Operator
case class ComparisonOperator(operator: String, value: String) extends Operator
case class InOperator(valueList: Seq[String], negate: Boolean = false) extends Operator
case class ExistsOperator(negate: Boolean = false) extends Operator
| echalkpad/t4f-data | parser/ebnf/src/main/scala/io/aos/ebnf/spl/ast/Operator.scala | Scala | apache-2.0 | 300 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.apache.spark.sql.functions._
import org.apache.spark.sql.functions.{log => logarithm}
import org.apache.spark.sql.test.SharedSQLContext
private object MathExpressionsTestData {
case class DoubleData(a: java.lang.Double, b: java.lang.Double)
case class NullDoubles(a: java.lang.Double)
}
class MathExpressionsSuite extends QueryTest with SharedSQLContext {
import MathExpressionsTestData._
import testImplicits._
private lazy val doubleData = (1 to 10).map(i => DoubleData(i * 0.2 - 1, i * -0.2 + 1)).toDF()
private lazy val nnDoubleData = (1 to 10).map(i => DoubleData(i * 0.1, i * -0.1)).toDF()
private lazy val nullDoubles =
Seq(NullDoubles(1.0), NullDoubles(2.0), NullDoubles(3.0), NullDoubles(null)).toDF()
private def testOneToOneMathFunction[
@specialized(Int, Long, Float, Double) T,
@specialized(Int, Long, Float, Double) U](
c: Column => Column,
f: T => U): Unit = {
checkAnswer(
doubleData.select(c('a)),
(1 to 10).map(n => Row(f((n * 0.2 - 1).asInstanceOf[T])))
)
checkAnswer(
doubleData.select(c('b)),
(1 to 10).map(n => Row(f((-n * 0.2 + 1).asInstanceOf[T])))
)
checkAnswer(
doubleData.select(c(lit(null))),
(1 to 10).map(_ => Row(null))
)
}
private def testOneToOneNonNegativeMathFunction(c: Column => Column, f: Double => Double): Unit =
{
checkAnswer(
nnDoubleData.select(c('a)),
(1 to 10).map(n => Row(f(n * 0.1)))
)
if (f(-1) === math.log1p(-1)) {
checkAnswer(
nnDoubleData.select(c('b)),
(1 to 9).map(n => Row(f(n * -0.1))) :+ Row(null)
)
}
checkAnswer(
nnDoubleData.select(c(lit(null))),
(1 to 10).map(_ => Row(null))
)
}
private def testTwoToOneMathFunction(
c: (Column, Column) => Column,
d: (Column, Double) => Column,
f: (Double, Double) => Double): Unit = {
checkAnswer(
nnDoubleData.select(c('a, 'a)),
nnDoubleData.collect().toSeq.map(r => Row(f(r.getDouble(0), r.getDouble(0))))
)
checkAnswer(
nnDoubleData.select(c('a, 'b)),
nnDoubleData.collect().toSeq.map(r => Row(f(r.getDouble(0), r.getDouble(1))))
)
checkAnswer(
nnDoubleData.select(d('a, 2.0)),
nnDoubleData.collect().toSeq.map(r => Row(f(r.getDouble(0), 2.0)))
)
checkAnswer(
nnDoubleData.select(d('a, -0.5)),
nnDoubleData.collect().toSeq.map(r => Row(f(r.getDouble(0), -0.5)))
)
val nonNull = nullDoubles.collect().toSeq.filter(r => r.get(0) != null)
checkAnswer(
nullDoubles.select(c('a, 'a)).orderBy('a.asc),
Row(null) +: nonNull.map(r => Row(f(r.getDouble(0), r.getDouble(0))))
)
}
test("sin") {
testOneToOneMathFunction(sin, math.sin)
}
test("asin") {
testOneToOneMathFunction(asin, math.asin)
}
test("sinh") {
testOneToOneMathFunction(sinh, math.sinh)
}
test("cos") {
testOneToOneMathFunction(cos, math.cos)
}
test("acos") {
testOneToOneMathFunction(acos, math.acos)
}
test("cosh") {
testOneToOneMathFunction(cosh, math.cosh)
}
test("tan") {
testOneToOneMathFunction(tan, math.tan)
}
test("atan") {
testOneToOneMathFunction(atan, math.atan)
}
test("tanh") {
testOneToOneMathFunction(tanh, math.tanh)
}
test("toDegrees") {
testOneToOneMathFunction(toDegrees, math.toDegrees)
checkAnswer(
sql("SELECT degrees(0), degrees(1), degrees(1.5)"),
Seq((1, 2)).toDF().select(toDegrees(lit(0)), toDegrees(lit(1)), toDegrees(lit(1.5)))
)
}
test("toRadians") {
testOneToOneMathFunction(toRadians, math.toRadians)
checkAnswer(
sql("SELECT radians(0), radians(1), radians(1.5)"),
Seq((1, 2)).toDF().select(toRadians(lit(0)), toRadians(lit(1)), toRadians(lit(1.5)))
)
}
test("cbrt") {
testOneToOneMathFunction(cbrt, math.cbrt)
}
test("ceil and ceiling") {
testOneToOneMathFunction(ceil, (d: Double) => math.ceil(d).toLong)
checkAnswer(
sql("SELECT ceiling(0), ceiling(1), ceiling(1.5)"),
Row(0L, 1L, 2L))
}
test("conv") {
val df = Seq(("333", 10, 2)).toDF("num", "fromBase", "toBase")
checkAnswer(df.select(conv('num, 10, 16)), Row("14D"))
checkAnswer(df.select(conv(lit(100), 2, 16)), Row("4"))
checkAnswer(df.select(conv(lit(3122234455L), 10, 16)), Row("BA198457"))
checkAnswer(df.selectExpr("conv(num, fromBase, toBase)"), Row("101001101"))
checkAnswer(df.selectExpr("""conv("100", 2, 10)"""), Row("4"))
checkAnswer(df.selectExpr("""conv("-10", 16, -10)"""), Row("-16"))
checkAnswer(
df.selectExpr("""conv("9223372036854775807", 36, -16)"""), Row("-1")) // for overflow
}
test("floor") {
testOneToOneMathFunction(floor, (d: Double) => math.floor(d).toLong)
}
test("factorial") {
val df = (0 to 5).map(i => (i, i)).toDF("a", "b")
checkAnswer(
df.select(factorial('a)),
Seq(Row(1), Row(1), Row(2), Row(6), Row(24), Row(120))
)
checkAnswer(
df.selectExpr("factorial(a)"),
Seq(Row(1), Row(1), Row(2), Row(6), Row(24), Row(120))
)
}
test("rint") {
testOneToOneMathFunction(rint, math.rint)
}
test("round") {
val df = Seq(5, 55, 555).map(Tuple1(_)).toDF("a")
checkAnswer(
df.select(round('a), round('a, -1), round('a, -2)),
Seq(Row(5, 10, 0), Row(55, 60, 100), Row(555, 560, 600))
)
val pi = 3.1415
checkAnswer(
sql(s"SELECT round($pi, -3), round($pi, -2), round($pi, -1), " +
s"round($pi, 0), round($pi, 1), round($pi, 2), round($pi, 3)"),
Seq(Row(BigDecimal("0E3"), BigDecimal("0E2"), BigDecimal("0E1"), BigDecimal(3),
BigDecimal("3.1"), BigDecimal("3.14"), BigDecimal("3.142")))
)
}
test("exp") {
testOneToOneMathFunction(exp, math.exp)
}
test("expm1") {
testOneToOneMathFunction(expm1, math.expm1)
}
test("signum / sign") {
testOneToOneMathFunction[Double, Double](signum, math.signum)
checkAnswer(
sql("SELECT sign(10), signum(-11)"),
Row(1, -1))
}
test("pow / power") {
testTwoToOneMathFunction(pow, pow, math.pow)
checkAnswer(
sql("SELECT pow(1, 2), power(2, 1)"),
Seq((1, 2)).toDF().select(pow(lit(1), lit(2)), pow(lit(2), lit(1)))
)
}
test("hex") {
val data = Seq((28, -28, 100800200404L, "hello")).toDF("a", "b", "c", "d")
checkAnswer(data.select(hex('a)), Seq(Row("1C")))
checkAnswer(data.select(hex('b)), Seq(Row("FFFFFFFFFFFFFFE4")))
checkAnswer(data.select(hex('c)), Seq(Row("177828FED4")))
checkAnswer(data.select(hex('d)), Seq(Row("68656C6C6F")))
checkAnswer(data.selectExpr("hex(a)"), Seq(Row("1C")))
checkAnswer(data.selectExpr("hex(b)"), Seq(Row("FFFFFFFFFFFFFFE4")))
checkAnswer(data.selectExpr("hex(c)"), Seq(Row("177828FED4")))
checkAnswer(data.selectExpr("hex(d)"), Seq(Row("68656C6C6F")))
checkAnswer(data.selectExpr("hex(cast(d as binary))"), Seq(Row("68656C6C6F")))
}
test("unhex") {
val data = Seq(("1C", "737472696E67")).toDF("a", "b")
checkAnswer(data.select(unhex('a)), Row(Array[Byte](28.toByte)))
checkAnswer(data.select(unhex('b)), Row("string".getBytes))
checkAnswer(data.selectExpr("unhex(a)"), Row(Array[Byte](28.toByte)))
checkAnswer(data.selectExpr("unhex(b)"), Row("string".getBytes))
checkAnswer(data.selectExpr("""unhex("##")"""), Row(null))
checkAnswer(data.selectExpr("""unhex("G123")"""), Row(null))
}
test("hypot") {
testTwoToOneMathFunction(hypot, hypot, math.hypot)
}
test("atan2") {
testTwoToOneMathFunction(atan2, atan2, math.atan2)
}
test("log / ln") {
testOneToOneNonNegativeMathFunction(org.apache.spark.sql.functions.log, math.log)
checkAnswer(
sql("SELECT ln(0), ln(1), ln(1.5)"),
Seq((1, 2)).toDF().select(logarithm(lit(0)), logarithm(lit(1)), logarithm(lit(1.5)))
)
}
test("log10") {
testOneToOneNonNegativeMathFunction(log10, math.log10)
}
test("log1p") {
testOneToOneNonNegativeMathFunction(log1p, math.log1p)
}
test("shift left") {
val df = Seq[(Long, Integer, Short, Byte, Integer, Integer)]((21, 21, 21, 21, 21, null))
.toDF("a", "b", "c", "d", "e", "f")
checkAnswer(
df.select(
shiftLeft('a, 1), shiftLeft('b, 1), shiftLeft('c, 1), shiftLeft('d, 1),
shiftLeft('f, 1)),
Row(42.toLong, 42, 42.toShort, 42.toByte, null))
checkAnswer(
df.selectExpr(
"shiftLeft(a, 1)", "shiftLeft(b, 1)", "shiftLeft(b, 1)", "shiftLeft(d, 1)",
"shiftLeft(f, 1)"),
Row(42.toLong, 42, 42.toShort, 42.toByte, null))
}
test("shift right") {
val df = Seq[(Long, Integer, Short, Byte, Integer, Integer)]((42, 42, 42, 42, 42, null))
.toDF("a", "b", "c", "d", "e", "f")
checkAnswer(
df.select(
shiftRight('a, 1), shiftRight('b, 1), shiftRight('c, 1), shiftRight('d, 1),
shiftRight('f, 1)),
Row(21.toLong, 21, 21.toShort, 21.toByte, null))
checkAnswer(
df.selectExpr(
"shiftRight(a, 1)", "shiftRight(b, 1)", "shiftRight(c, 1)", "shiftRight(d, 1)",
"shiftRight(f, 1)"),
Row(21.toLong, 21, 21.toShort, 21.toByte, null))
}
test("shift right unsigned") {
val df = Seq[(Long, Integer, Short, Byte, Integer, Integer)]((-42, 42, 42, 42, 42, null))
.toDF("a", "b", "c", "d", "e", "f")
checkAnswer(
df.select(
shiftRightUnsigned('a, 1), shiftRightUnsigned('b, 1), shiftRightUnsigned('c, 1),
shiftRightUnsigned('d, 1), shiftRightUnsigned('f, 1)),
Row(9223372036854775787L, 21, 21.toShort, 21.toByte, null))
checkAnswer(
df.selectExpr(
"shiftRightUnsigned(a, 1)", "shiftRightUnsigned(b, 1)", "shiftRightUnsigned(c, 1)",
"shiftRightUnsigned(d, 1)", "shiftRightUnsigned(f, 1)"),
Row(9223372036854775787L, 21, 21.toShort, 21.toByte, null))
}
test("binary log") {
val df = Seq[(Integer, Integer)]((123, null)).toDF("a", "b")
checkAnswer(
df.select(org.apache.spark.sql.functions.log("a"),
org.apache.spark.sql.functions.log(2.0, "a"),
org.apache.spark.sql.functions.log("b")),
Row(math.log(123), math.log(123) / math.log(2), null))
checkAnswer(
df.selectExpr("log(a)", "log(2.0, a)", "log(b)"),
Row(math.log(123), math.log(123) / math.log(2), null))
}
test("abs") {
val input =
Seq[(java.lang.Double, java.lang.Double)]((null, null), (0.0, 0.0), (1.5, 1.5), (-2.5, 2.5))
checkAnswer(
input.toDF("key", "value").select(abs($"key").alias("a")).sort("a"),
input.map(pair => Row(pair._2)))
checkAnswer(
input.toDF("key", "value").selectExpr("abs(key) a").sort("a"),
input.map(pair => Row(pair._2)))
}
test("log2") {
val df = Seq((1, 2)).toDF("a", "b")
checkAnswer(
df.select(log2("b") + log2("a")),
Row(1))
checkAnswer(sql("SELECT LOG2(8), LOG2(null)"), Row(3, null))
}
test("sqrt") {
val df = Seq((1, 4)).toDF("a", "b")
checkAnswer(
df.select(sqrt("a"), sqrt("b")),
Row(1.0, 2.0))
checkAnswer(sql("SELECT SQRT(4.0), SQRT(null)"), Row(2.0, null))
checkAnswer(df.selectExpr("sqrt(a)", "sqrt(b)", "sqrt(null)"), Row(1.0, 2.0, null))
}
test("negative") {
checkAnswer(
sql("SELECT negative(1), negative(0), negative(-1)"),
Row(-1, 0, 1))
}
test("positive") {
val df = Seq((1, -1, "abc")).toDF("a", "b", "c")
checkAnswer(df.selectExpr("positive(a)"), Row(1))
checkAnswer(df.selectExpr("positive(b)"), Row(-1))
}
}
| chenc10/Spark-PAF | sql/core/src/test/scala/org/apache/spark/sql/MathExpressionsSuite.scala | Scala | apache-2.0 | 12,382 |
package io.youi.communication
import fabric.Value
import java.util.concurrent.atomic.AtomicLong
import fabric.rw._
case class Message(id: Long,
`type`: MessageType,
name: Option[String] = None,
method: Option[String] = None,
params: Option[Value] = None,
returnValue: Option[Value] = None,
bytes: Option[Long] = None,
errorMessage: Option[String] = None)
object Message {
private val idGenerator = new AtomicLong(0L)
implicit val rw: ReaderWriter[Message] = ccRW
def invoke(name: String, method: String, params: Value): Message = Message(
id = idGenerator.incrementAndGet(),
`type` = MessageType.Invoke,
name = Some(name),
method = Some(method),
params = Some(params)
)
def response(id: Long, name: String, method: String, returnValue: Value): Message = Message(
id = id,
`type` = MessageType.Response,
name = Some(name),
method = Some(method),
returnValue = Some(returnValue)
)
def uploadStart(fileName: String, bytes: Long): Message = Message(
id = idGenerator.incrementAndGet(),
`type` = MessageType.UploadStart,
name = Some(fileName),
bytes = Some(bytes)
)
def uploadComplete(id: Long, fileName: String): Message = Message(
id = id,
`type` = MessageType.UploadComplete,
name = Some(fileName)
)
def error(id: Long, message: String): Message = Message(
id = id,
`type` = MessageType.Error,
errorMessage = Some(message)
)
} | outr/youi | communication/src/main/scala/io/youi/communication/Message.scala | Scala | mit | 1,567 |
package org.jetbrains.plugins.scala
package codeInspection
package collections
/**
* Nikolay.Tropin
* 5/30/13
*/
class FilterSizeTest extends OperationsOnCollectionInspectionTest {
override val hint = ScalaInspectionBundle.message("filter.size.hint")
def test_1(): Unit = {
val selected = s"Array().${START}filter(x => true).size$END"
checkTextHasError(selected)
val text = "Array().filter(x => true).size"
val result = "Array().count(x => true)"
testQuickFix(text, result, hint)
}
def test_2(): Unit = {
val selected = s"List().${START}filter(x => true).length$END"
checkTextHasError(selected)
val text = "List().filter(x => true).length"
val result = "List().count(x => true)"
testQuickFix(text, result, hint)
}
def test_3(): Unit = {
val selected = s"Map(1 -> 2) ${START}filter (x => true) size$END"
checkTextHasError(selected)
val text = "Map(1 -> 2) filter (x => true) size"
val result = "Map(1 -> 2) count (x => true)"
testQuickFix(text, result, hint)
}
def test_4(): Unit = {
val selected = s"List().${START}filter {x => true}.size$END"
checkTextHasError(selected)
val text =
"""List().filter {
| x => true
|}.size
|""".stripMargin
val result =
"""List().count {
| x => true
|}
|""".stripMargin
testQuickFix(text, result, hint)
}
def test_SCL15437(): Unit = {
val selected =
s"""
|trait LengthTest {
| def foo(): Unit = {
| Seq().${START}filter(_ => true).size$END
|
|
| }
|}
""".stripMargin
checkTextHasError(selected)
val text =
"""
|trait LengthTest {
| def foo(): Unit = {
| Seq().filter(_ => true).size
|
|
| }
|}
""".stripMargin
val result =
"""
|trait LengthTest {
| def foo(): Unit = {
| Seq().count(_ => true)
|
|
| }
|}
""".stripMargin
testQuickFix(text, result, hint)
}
override val classOfInspection = classOf[FilterSizeInspection]
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/codeInspection/collections/FilterSizeTest.scala | Scala | apache-2.0 | 2,169 |
package edu.cmu.lti.nlp.amr.ConceptInvoke
import edu.cmu.lti.nlp.amr._
import edu.cmu.lti.nlp.amr.Train._
import edu.cmu.lti.nlp.amr.BasicFeatureVector._
import edu.cmu.lti.nlp.amr.ConceptInvoke.PhraseConceptPair._
import scala.collection.mutable.ArrayBuffer
import scala.collection.{mutable => m, immutable => i}
object Concepts {
val implementedFeatures = m.Set("fromNERTagger", "dateExpression") // TODO: check
}
class Concepts(options: m.Map[Symbol, String],
phraseConceptPairs: Array[PhraseConceptPair]) {
// This class contains the code used to invoke concepts.
// Concepts are invoked by calling the invoke() method, which returns a list of all
// the concepts that match a span starting at index i of the tokenized sentence.
/******* Concept sources to add *********
- Nominalizations
- List of -er => person ARG0-of things
- Entities from large list
*****************************************/
val conceptTable: m.Map[String, List[PhraseConceptPair]] = m.Map() // maps the first word in the phrase to a list of phraseConceptPairs
for (pair <- phraseConceptPairs) {
val word = pair.words(0)
conceptTable(word) = pair :: conceptTable.getOrElse(word, List())
//logger(2, "conceptTable("+word+") = "+conceptTable(word))
}
val conceptSources = options.getOrElse('stage1SyntheticConcepts, "NER,DateExpr").split(",").toSet
val implementedConceptSources = m.Set("NER","DateExpr","OntoNotes","NEPassThrough","PassThrough","WordNetPassThrough","WordNetPassThrough","verbs","nominalizations")
assert(conceptSources.filterNot(x => implementedConceptSources.contains(x)).size == 0, "Unknown conceptSources: " + conceptSources.filterNot(x => implementedConceptSources.contains(x)).toList.mkString(", "))
private var tokens : Array[String] = Array() // stores sentence.drop(i) (used in the dateEntity code to make it more concise)
var ontoNotes : m.Set[String] = m.Set() // could be multi-map instead
var lemmas : m.Set[String] = m.Set() // TODO: check for lemma in a large morph-analyzed corpus
if (options.contains('stage1Predicates)) {
val Pred = """(.+)-([0-9]+)""".r
for (predicate <- Source.fromFile(options('stage1Predicates)).getLines) {
val Pred(verb, sense) = predicate
ontoNotes += verb
}
}
def invoke(input: Input, i: Int, trainingIndex: Option[Int]) : List[PhraseConceptPair] = {
// returns a list of all concepts that can be invoke starting at
// position i in input.sentence (i.e. position i in the tokenized input)
// Note: none of the concepts returned have spans that go past the end of the sentence
val sentence = input.sentence
if (sentence.size <= i || i < 0) {
return List()
}
var conceptList = if (options.contains('stage1TrainingLeaveOneOut) && trainingIndex != None) {
conceptTable.getOrElse(sentence(i), List()).filter(
x => x.words == sentence.slice(i, i+x.words.size).toList && // TODO: is this case insensitive??
x.trainingIndices.filter(j => abs(j - trainingIndex.get) > 20).size > 0 // filter to concepts seen far away
).toList
} else {
conceptTable.getOrElse(sentence(i), List()).filter(
x => x.words == sentence.slice(i, i+x.words.size).toList // TODO: is this case insensitive??
).toList
}
if (conceptSources.contains("NER") && options.contains('ner)) {
conceptList = input.ner.annotation.filter(_.start == i).map(x => namedEntity(input, x)).toList ::: conceptList
//conceptList = input.ner.annotation.filter(_.start == i).map(x => PhraseConceptPair.entity(input, x)).toList ::: conceptList
}
if (conceptSources.contains("DateExpr")) {
conceptList = dateEntities(input, i) ::: conceptList
}
var onlyPassThrough = conceptList.size == 0 // onlyPassThrough indicates the only the pass through rules apply for this span
if (conceptSources.contains("OntoNotes")) {
conceptList = ontoNotesLookup(input, i, onlyPassThrough) ::: conceptList
}
if (conceptSources.contains("NEPassThrough")) {
conceptList = NEPassThrough(input, i, onlyPassThrough) ::: conceptList
}
if (conceptSources.contains("PassThrough")) {
conceptList = passThrough(input, i, onlyPassThrough) ::: conceptList
}
if (conceptSources.contains("WordNetPassThrough")) {
conceptList = wordnetPassThrough(input, i, onlyPassThrough) ::: conceptList
}
if (conceptSources.contains("verbs")) {
conceptList = verbs(input, i, onlyPassThrough) ::: conceptList
}
if (conceptSources.contains("nominalizations")) {
conceptList = nominalizations(input, i, onlyPassThrough) ::: conceptList
}
// Normalize the concept list so there are no duplicates by adding all their features
val conceptSet : m.Map[(List[String], String), PhraseConceptPair] = m.Map()
for (concept <- conceptList.filter(x => x.words == sentence.slice(i, i+x.words.size).toList)) { // TODO: make this case insensitive?
val key = (concept.words, concept.graphFrag)
if (conceptSet.contains(key)) {
val old = conceptSet(key)
val feats = new FeatureVector()
feats += old.features
feats += concept.features
val trainingIndices = concept.trainingIndices ::: old.trainingIndices
conceptSet(key) = PhraseConceptPair(old.words, old.graphFrag, feats, trainingIndices)
} else {
conceptSet(key) = concept
}
}
return conceptSet.values.toList
}
def ontoNotesLookup(input: Input, i: Int, onlyPassThrough: Boolean) : List[PhraseConceptPair] = {
val stems = Wordnet.stemmer(input.sentence(i))
val concepts = stems.filter(stem => ontoNotes.contains(stem)).map(stem => PhraseConceptPair(
List(input.sentence(i)),
stem+"-01", // first sense is most common
FeatureVector(m.Map("OntoNotes" -> 1.0)),
List()))
if (onlyPassThrough) { concepts.map(x => x.features.fmap("OntoNotesOnly") = 1.0 ) }
return concepts
}
def NEPassThrough(input: Input, i: Int, onlyPassThrough: Boolean) : List[PhraseConceptPair] = {
// TOOD: improve this to check if the words were observed other places
var concepts = List[PhraseConceptPair]()
for { j <- Range(1,7)
if i + j < input.sentence.size
words = input.sentence.slice(i,i+j).toList
if words.filterNot(x => x.matches("[A-Za-z0-9.-]*")).size == 0 } { // TODO: improve this regex
concepts = PhraseConceptPair(
words,
if (options.contains('stage1Wiki)) {
"(thing :wiki - :name (name "+words.map(x => ":op "+x).mkString(" ")+"))"
} else {
"(thing :name (name "+words.map(x => ":op "+x).mkString(" ")+"))"
},
FeatureVector(m.Map("NEPassThrough" -> 1.0, "NEPassThrough_len" -> j)),
List()) :: concepts
}
if (onlyPassThrough) { concepts.map(x => x.features.fmap("NEPassThroughOnly") = 1.0 ) }
return concepts
}
def passThrough(input: Input, i: Int, onlyPassThrough: Boolean) : List[PhraseConceptPair] = {
if(input.sentence(i).matches("[A-Za-z0-9]*")) { // TODO: improve this regex
List(PhraseConceptPair(
List(input.sentence(i)),
input.sentence(i),
FeatureVector(m.Map("PassThrough" -> 1.0,
"PassThroughOnly" -> (if(onlyPassThrough) { 1 } else { 0 }) )),
List()))
} else { List() }
}
def wordnetPassThrough(input: Input, i: Int, onlyPassThrough: Boolean) : List[PhraseConceptPair] = {
val word = input.sentence(i)
val stems = Wordnet.stemmer(word)
// TODO: add stems from large annotated corpus
if (stems.size > 0) {
List(PhraseConceptPair(
List(word),
stems.minBy(_.size),
FeatureVector(m.Map("WordnetPassThrough" -> 1.0,
"WordnetPassThroughOnly" -> (if(onlyPassThrough) { 1 } else { 0 }) )),
List()))
} else { List() }
}
def verbs(input: Input, i: Int, onlyPassThrough: Boolean) : List[PhraseConceptPair] = {
var concepts = List[PhraseConceptPair]()
val pos : Array[String] = input.pos.slice(i,i+1) // NOTE: pos.slice is defined in Annotation.slice
if (pos.size > 0 && pos(pos.size-1).startsWith("V")) { // it's a verb
val word = input.sentence(i)
val stems = Wordnet.stemmer(word)
val stem = if (stems.size > 0) { stems.minBy(_.size) } else { word } // TODO: check in large corpus
concepts = List(PhraseConceptPair(
List(word),
stem+"-00", // 00 sense for missing predicates
FeatureVector(m.Map("AddedVerb" -> 1.0,
"AddedVerbOnly" -> (if(onlyPassThrough) { 1 } else { 0 }) )),
List()))
}
return concepts
}
def nominalizations(input: Input, i: Int, onlyPassThrough: Boolean) : List[PhraseConceptPair] = {
// (no change) budget -> budget-01
// (drop -e in predicate that ends in -ate) proliferate-01 -> proliferation, state-01 -> statement
// (drop -ify in predicate) intensify-01 -> intensity, ratify-01 -> ratification
// (drop -ance or -ances) assistance -> assist-01
// (drop -ment or -ments) development -> develop-02
// (drop -ing) discriminating -> discriminate-02 (also drop -e)
// (drop -ion) discrimination -> discriminate-02, discussion -> discuss-01
// (drop -s) addicts -> addict-01, arrests -> arrest-01
// (drop -ant or -ants) combatants -> combat-01
// (drop -ure) seizure -> seize-01, departure -> depart-01, failure -> fail-01 (not always tho: manufacture -> manufacture-01)
// not as common: (drop -ation) determination -> determine-01 (lots of counter-examples: exaggeration -> exaggerate-01)
// not common (-ees) employees -> employ-01, attendees -> attend-01
// -er: parser -> thing :ARG0-of parse-00 (not very common)
return List()
}
// verbalization (modern -> modernize (to make modern), etc, not use in AMR modernize -> modernize-01)
def namedEntity(input: Input, entity: Entity) : PhraseConceptPair = {
val Input(_, sentence, notTokenized, _, _, ner, _) = input
val entityType : String = entity.label match {
case "PER" => "person" // also president
case "ORG" => "organization" // also company, government-organization, criminal-organization
case "LOC" => "country" // also city, world-region, continent, county
case "MISC" => "thing" // also treaty, publication, newspaper, product, war
}
val (start, end) = ner.getSpan((entity.start, entity.end)) // start and end in ner.snt, which is the tokenized text
val (notTokStart, notTokEnd) = notTokenized.getSpan((start, end)) // start and end in notTokenized.snt, which is the original untokenized text
val graphFrag = if (options.contains('stage1Wiki)) {
"(" + entityType + ":wiki - :name (name " + notTokenized.snt.slice(notTokStart, notTokEnd).map(x => ":op \\"" + x.replaceAllLiterally("\\"","") + "\\"").mkString(" ") + "))" // there should be no " in named entities (TODO: does the AMR style guide say if you can escape them?)
} else {
"(" + entityType + " :name (name " + notTokenized.snt.slice(notTokStart, notTokEnd).map(x => ":op \\"" + x.replaceAllLiterally("\\"","") + "\\"").mkString(" ") + "))" // there should be no " in named entities (TODO: does the AMR style guide say if you can escape them?)
}
logger(0, "NER Entity: "+graphFrag)
//logger(1, "(start, end) = "+(start,end))
//logger(1, "ner.snt = "+ner.snt.toList)
//logger(1, "ner.tok = "+ner.tok.toList)
//logger(1, "notTokenized.snt = "+notTokenized.snt.toList)
//logger(1, "notTokenized.tok = "+notTokenized.tok.toList)
return PhraseConceptPair(sentence.slice(start, end).toList,
graphFrag,
FeatureVector(m.Map("ner" -> 1.0, "ner_len" -> (end - start))))
}
def dateEntities(input: Input, start: Int) : List[PhraseConceptPair] = {
logger(2, "Finding date entities")
var list : ArrayBuffer[PhraseConceptPair] = ArrayBuffer()
tokens = input.sentence.drop(start)
val string = tokens.mkString("\\t")
var monthRegex = "January|February|March|April|May|June|July|August|September|October|November|December|(?:Jan|Feb|Mar|Apr|Jun|Jul|Aug|Sept?|Oct|Nov|Dec)[.]?"
monthRegex = monthRegex + "|" + monthRegex.toLowerCase
// 021114 => (date-entity :day 14 :month 11 :year 2002)
val SixDigitDate = """(([0-9][0-9])([0-9][0-9])([0-9][0-9]))(?:\\t.*)?""".r // (?: ) non-capturing group
if (SixDigitDate.pattern.matcher(string).matches) {
list += {
var SixDigitDate(matching, year, month, day) = string
if (year.toInt < 40) { year = "20"+year } else { year = "19"+year }
if (day == "00" && month == "00") { mkYear(matching, year) // 170000 => (date-entity :year 2017)
} else if (day == "00") { mkMonthYear(matching, month, year) // 021100 => (date-entity :month 11 :year 2002)
} else { mkDayMonthYear(matching, day, month, year) } // 021114 => (date-entity :day 14 :month 11 :year 2002)
}
}
// 17 July 2003 => (date-entity :day 17 :month 7 :year 2003)
val DayMonthYear = ("""(([0-9]?[0-9])\\t("""+monthRegex+""")\\t([0-9][0-9][0-9][0-9]))(?:\\t.*)?""").r // (?: ) non-capturing group
if (DayMonthYear.pattern.matcher(string).matches) {
list += {
var DayMonthYear(matching, day, month, year) = string
mkDayMonthYear(matching, day, month, year)
}
}
// July 2003 => (date-entity :month 7 :year 2003)
val MonthYear = ("(("+monthRegex+""")\\t([0-9][0-9][0-9][0-9]))(?:\\t.*)?""").r
if (MonthYear.pattern.matcher(string).matches) {
list += {
var MonthYear(matching, month, year) = string
mkMonthYear(matching, month, year)
}
}
// July 18 , 2008 => (date-entity :day 18 :month 7 :year 2008)
val MonthDayYear = ("(("+monthRegex+""")\\t?([0-9][0-9]?)\\t?,?\\t([0-9][0-9][0-9][0-9]))(?:\\t.*)?""").r
if (MonthDayYear.pattern.matcher(string).matches) {
list += {
var MonthDayYear(matching, month, day, year) = string
mkDayMonthYear(matching, day, month, year)
}
}
// 2007-02-27 => (date-entity :day 27 :month 2 :year 2007)
// 20030106 => (date-entity :day 6 :month 1 :year 2003)
val EightDigitDate = ("""(([0-9]?[0-9][0-9]?[0-9])\\t?[.-]?\\t?([0-9][0-9])\\t?[.-]?\\t?([0-9][0-9]))(?:\\t.*)?""").r // (?: ) non-capturing group
if (EightDigitDate.pattern.matcher(string).matches) {
list += {
var EightDigitDate(matching, year, month, day) = string
mkDayMonthYear(matching, day, month, year)
}
}
// 1713 => (date-entity :year 1713)
val Year = """(([0-9][0-9][0-9][0-9]))(?:\\t.*)?""".r
if (Year.pattern.matcher(string).matches) {
list += {
var Year(matching, year) = string
mkYear(matching, year)
}
}
// March => (date-entity :month 3)
val Month = ("(("+monthRegex+"""))(?:\\t.*)?""").r
if (Month.pattern.matcher(string).matches) {
list += {
var Month(matching, month) = string
mkMonth(matching, month)
}
}
return list.toList
}
def mkDayMonthYear(matching: String, day: String, month: String, year: String) : PhraseConceptPair = {
//logger(0, "mkDayMonthYear("+matching+","+day+","+month+","+year+")")
PhraseConceptPair(tokens.take(matching.count(_ == '\\t')+1).toList,
"(date-entity :day "+day.toInt.toString+" :month "+monthStr(month)+" :year "+year+")",
FeatureVector(m.Map("datex1" -> 1.0, "datex_len" -> (matching.count(_ == '\\t') + 1))))
}
def mkMonthYear(matching: String, month: String, year: String) : PhraseConceptPair = {
PhraseConceptPair(tokens.take(matching.count(_ == '\\t')+1).toList,
"(date-entity :month "+monthStr(month)+" :year "+year+")",
FeatureVector(m.Map("datex2" -> 1.0, "datex_len" -> (matching.count(_ == '\\t') + 1))))
}
def mkMonth(matching: String, month: String) : PhraseConceptPair = {
PhraseConceptPair(tokens.take(matching.count(_ == '\\t')+1).toList,
"(date-entity :month "+monthStr(month)+")",
FeatureVector(m.Map("datex3" -> 1.0, "datex_len" -> (matching.count(_ == '\\t') + 1))))
}
def mkYear(matching: String, year: String) : PhraseConceptPair = {
PhraseConceptPair(tokens.take(matching.count(_ == '\\t')+1).toList,
"(date-entity :year "+year+")",
FeatureVector(m.Map("datex4" -> 1.0, "datex_len" -> (matching.count(_ == '\\t') + 1))))
}
def monthStr(month: String) : String = {
if (month.matches("[0-9]*")) {
month.toInt.toString
} else {
month.take(3).toLowerCase match {
case "jan" => "1"
case "feb" => "2"
case "mar" => "3"
case "apr" => "4"
case "may" => "5"
case "jun" => "6"
case "jul" => "7"
case "aug" => "8"
case "sep" => "9"
case "oct" => "10"
case "nov" => "11"
case "dec" => "12"
case _ => month
}
}
}
}
| jflanigan/jamr | src/ConceptInvoke/Concepts.scala | Scala | bsd-2-clause | 18,748 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.serializer.JavaSerializer
import org.roaringbitmap.RoaringBitmap
import scala.util.Random
class MapStatusSuite extends SparkFunSuite {
test("compressSize") {
assert(MapStatus.compressSize(0L) === 0)
assert(MapStatus.compressSize(1L) === 1)
assert(MapStatus.compressSize(2L) === 8)
assert(MapStatus.compressSize(10L) === 25)
assert((MapStatus.compressSize(1000000L) & 0xFF) === 145)
assert((MapStatus.compressSize(1000000000L) & 0xFF) === 218)
// This last size is bigger than we can encode in a byte, so check that we just return 255
assert((MapStatus.compressSize(1000000000000000000L) & 0xFF) === 255)
}
test("decompressSize") {
assert(MapStatus.decompressSize(0) === 0)
for (size <- Seq(2L, 10L, 100L, 50000L, 1000000L, 1000000000L)) {
val size2 = MapStatus.decompressSize(MapStatus.compressSize(size))
assert(size2 >= 0.99 * size && size2 <= 1.11 * size,
"size " + size + " decompressed to " + size2 + ", which is out of range")
}
}
test("MapStatus should never report non-empty blocks' sizes as 0") {
import Math._
for (
numSizes <- Seq(1, 10, 100, 1000, 10000);
mean <- Seq(0L, 100L, 10000L, Int.MaxValue.toLong);
stddev <- Seq(0.0, 0.01, 0.5, 1.0)
) {
val sizes = Array.fill[Long](numSizes)(abs(round(Random.nextGaussian() * stddev)) + mean)
val status = MapStatus(BlockManagerId("a", "b", 10), sizes)
val status1 = compressAndDecompressMapStatus(status)
for (i <- 0 until numSizes) {
if (sizes(i) != 0) {
val failureMessage = s"Failed with $numSizes sizes with mean=$mean, stddev=$stddev"
assert(status.getSizeForBlock(i) !== 0, failureMessage)
assert(status1.getSizeForBlock(i) !== 0, failureMessage)
}
}
}
}
test("large tasks should use " + classOf[HighlyCompressedMapStatus].getName) {
val sizes = Array.fill[Long](2001)(150L)
val status = MapStatus(null, sizes)
assert(status.isInstanceOf[HighlyCompressedMapStatus])
assert(status.getSizeForBlock(10) === 150L)
assert(status.getSizeForBlock(50) === 150L)
assert(status.getSizeForBlock(99) === 150L)
assert(status.getSizeForBlock(2000) === 150L)
}
test("HighlyCompressedMapStatus: estimated size should be the average non-empty block size") {
val sizes = Array.tabulate[Long](3000) { i => i.toLong }
val avg = sizes.sum / sizes.filter(_ != 0).length
val loc = BlockManagerId("a", "b", 10)
val status = MapStatus(loc, sizes)
val status1 = compressAndDecompressMapStatus(status)
assert(status1.isInstanceOf[HighlyCompressedMapStatus])
assert(status1.location == loc)
for (i <- 0 until 3000) {
val estimate = status1.getSizeForBlock(i)
if (sizes(i) > 0) {
assert(estimate === avg)
}
}
}
def compressAndDecompressMapStatus(status: MapStatus): MapStatus = {
val ser = new JavaSerializer(new SparkConf)
val buf = ser.newInstance().serialize(status)
ser.newInstance().deserialize[MapStatus](buf)
}
test("RoaringBitmap: runOptimize succeeded") {
val r = new RoaringBitmap
(1 to 200000).foreach(i =>
if (i % 200 != 0) {
r.add(i)
}
)
val size1 = r.getSizeInBytes
val success = r.runOptimize()
r.trim()
val size2 = r.getSizeInBytes
assert(size1 > size2)
assert(success)
}
test("RoaringBitmap: runOptimize failed") {
val r = new RoaringBitmap
(1 to 200000).foreach(i =>
if (i % 200 == 0) {
r.add(i)
}
)
val size1 = r.getSizeInBytes
val success = r.runOptimize()
r.trim()
val size2 = r.getSizeInBytes
assert(size1 === size2)
assert(!success)
}
}
| chenc10/Spark-PAF | core/src/test/scala/org/apache/spark/scheduler/MapStatusSuite.scala | Scala | apache-2.0 | 4,685 |
package sbt
import java.io.PrintWriter
object MainLogging
{
def multiLogger(config: MultiLoggerConfig): Logger =
{
import config._
val multi = new MultiLogger(console :: backed :: extra)
// sets multi to the most verbose for clients that inspect the current level
multi setLevel Level.unionAll(backingLevel :: screenLevel :: extra.map(_.getLevel))
// set the specific levels
console setLevel screenLevel
backed setLevel backingLevel
console setTrace screenTrace
backed setTrace backingTrace
multi: Logger
}
def globalDefault(writer: PrintWriter, backing: GlobalLogBacking): GlobalLogging =
{
val backed = defaultBacked()(writer)
val full = multiLogger(defaultMultiConfig( backed ) )
GlobalLogging(full, backed, backing)
}
def defaultMultiConfig(backing: AbstractLogger): MultiLoggerConfig =
new MultiLoggerConfig(defaultScreen(ConsoleLogger.noSuppressedMessage), backing, Nil, Level.Info, Level.Debug, -1, Int.MaxValue)
def defaultScreen(): AbstractLogger = ConsoleLogger()
def defaultScreen(suppressedMessage: SuppressedTraceContext => Option[String]): AbstractLogger = ConsoleLogger(suppressedMessage = suppressedMessage)
def defaultBacked(useColor: Boolean = ConsoleLogger.formatEnabled): PrintWriter => ConsoleLogger =
to => ConsoleLogger(ConsoleLogger.printWriterOut(to), useColor = useColor)
}
final case class MultiLoggerConfig(console: AbstractLogger, backed: AbstractLogger, extra: List[AbstractLogger],
screenLevel: Level.Value, backingLevel: Level.Value, screenTrace: Int, backingTrace: Int) | jroper/sbt | util/log/MainLogging.scala | Scala | bsd-3-clause | 1,554 |
package com.gilt.opm
import java.util.{ConcurrentModificationException, Date, UUID}
import com.gilt.opm.lock.LockManager
import com.gilt.opm.query._
import com.gilt.opm.storage.MongoMapper
import com.mongodb.casbah.Imports._
import com.mongodb.casbah.commons.Implicits.wrapDBObj
import com.mongodb.casbah.commons.{MongoDBList, MongoDBObject}
import com.mongodb.casbah.{WriteConcern => CWriteConcern}
import org.bson.types.BasicBSONList
/**
* Mixing to provide mongo storage for OpmObjects.
*
* Implementers of the trait must supply a Mongo Collection instance, and may optionally
* override the default wavelength.
*
* Should be able to store any OpmObject instance, and whatever that instance aggregates,
* as long as Casbah can persist it as-is (so, maybe needs to implement DBObject?). I
* suspect we'll need to revisit that as we use real objects. The casbah documentation
* is pretty weak as to how it relates to the DBObject support in the java driver.
*
* @author Eric Bowman
* @since 8/22/12 12:18 PM
*/
trait OpmMongoStorage[V <: OpmObject] extends OpmStorage[V] with MongoMapper with LockManager {
import com.gilt.opm.OpmFactory._
import com.gilt.opm.OpmIntrospection.{ClassField, MetaFields, TimestampField}
import com.gilt.opm.OpmMongoStorage._
def collection: MongoCollection
def writeConcern = CWriteConcern.valueOf("SAFE")
private implicit lazy val _writeConcern = writeConcern
def wavelength: Int = 5
// value frame + (wavelength - 1) diff frames
def toMongoMapper: OpmToMongoMapper = noOpMongoMapper
// Note: If a field is an Option, this class will wrap it correctly; in fromMongoMapper, you only need to map to the
// base class. If you include the Option, you'll end up with something like this: Some(Some(...)) instead of Some(...).
def fromMongoMapper: OpmFromMongoMapper = noOpMongoMapper
private[this] lazy val defaultToMongoMapper: PartialFunction[(String, Option[Class[_]], AnyRef), AnyRef] = {
case (f, _, s) if s.isInstanceOf[String] => s
case (f, _, d) if d.isInstanceOf[Date] => d
case (f, _, u) if u.isInstanceOf[UUID] => u
case (f, _, n) if n == None => None
case (f, optFieldClass, some) if some.isInstanceOf[Some[_]] => Some(mapToMongo(f, optFieldClass, OpmField(some.asInstanceOf[Option[AnyRef]].get)))
case (f, optFieldClass, iter) if iter.isInstanceOf[Iterable[_]] =>
val b = MongoDBList.newBuilder
// So that we can work around type-erasure for nested collections, as we write a collection,
// we write the first element of the collection a special object that tells us what the type of
// the collection was. So then when we are loading it if we find this special object, we use it
// to set the type of the collection. ComplexCollectionTest exercises this logic. Note that we
// preserve backwards compatibility here -- if this object isn't present, we just run the old logic,
// which will generate a Vector instead of the specific collection type. If you comment out where
// we write the MongoDBObject below, you can see that test fail and better understand the kind of
// situation where this helps.
val anIter = iter.asInstanceOf[Iterable[_]]
if (anIter.nonEmpty && optFieldClass.isEmpty) {
b += MongoDBObject("_t_" -> collectionCname(anIter))
}
iter.asInstanceOf[Iterable[_]].foreach(item => b += mapToMongo(f, None, OpmField(item)))
b.result()
case (f, optFieldClass, tuple) if tuple.isInstanceOf[Tuple2[_, _]] =>
// tuples get encoded as lists-of-2. This is needed for properly encoding a Map.
val t = tuple.asInstanceOf[Tuple2[_, _]]
(mapToMongo(f, None, OpmField(t._1)), mapToMongo(f, None, OpmField(t._2)))
case (f, _, o) if o.isInstanceOf[OpmObject] =>
val proxy = OpmFactory.recoverModel(o.asInstanceOf[OpmObject])
val builder = MongoDBObject.newBuilder
builder += "_nested_opm_" -> true
builder += Classname -> proxy.clazz.getName
builder += Timestamp -> proxy.timestamp
builder += Key -> proxy.key
nestedToStorage(Option(o.asInstanceOf[OpmObject]))(proxy.manifest).foreach {
storage =>
storage.maybePut(o.asInstanceOf[OpmObject])(proxy.manifest)
}
builder.result()
}
/// translates a iterable into the string we use to name the container class.
private def collectionCname(instance: Iterable[_]): String = {
instance match {
case list if list.isInstanceOf[List[_]] => "l"
case set if set.isInstanceOf[Set[_]] => "s"
case map if map.isInstanceOf[Map[_, _]] => "m"
case vec if vec.isInstanceOf[Vector[_]] => "v"
case _ => sys.error("Dont know how to encode collection %s of type %s".format(instance, instance.getClass))
}
}
// given an iterable and the name used to encode it (generated by collectionCname), produces
// a collection of the correct type.
private def collectionCnameDecoder[T](list: Iterable[T], cname: String): Iterable[Any] = {
cname match {
case "l" => list.toList
case "s" => list.toSet
case "m" => mongoListToMap(list)
case "v" => list.toIndexedSeq
case unknown => sys.error("Ooops, unknown cname %s".format(unknown))
}
}
// Turns an iterable that we loaded from mongo, that we know needs to be a map, into a map.
private def mongoListToMap[T](list: Iterable[T]): Map[Any, Any] = {
list.map {
case v if v.isInstanceOf[Iterable[_]] =>
val vit = v.asInstanceOf[Iterable[_]]
(vit.head, vit.last)
}.toMap
}
private[this] lazy val defaultFromMongoMapper: PartialFunction[(String, Option[Class[_]], AnyRef), AnyRef] = {
// BSON can't represent every primitive type, so these have to be handled specially
case (_, Some(cls), n: Number) if cls == classOf[Byte] => n.byteValue.asInstanceOf[AnyRef]
case (_, Some(cls), s: String) if cls == classOf[Char] && !s.isEmpty => s.charAt(0).asInstanceOf[AnyRef]
case (_, Some(cls), n: Number) if cls == classOf[Short] => n.shortValue.asInstanceOf[AnyRef]
case (_, Some(cls), n: Number) if cls == classOf[Float] => n.floatValue.asInstanceOf[AnyRef]
case (_, _, s) if s.isInstanceOf[String] => s
case (_, _, d) if d.isInstanceOf[Date] => d
case (_, _, u) if u.isInstanceOf[UUID] => u
case (_, _, n) if n == None => None
case (field, fieldClassOpt, some) if some.isInstanceOf[Some[_]] =>
mapFromMongo(field, fieldClassOpt, some.asInstanceOf[Some[_]].get).asInstanceOf[AnyRef]
case (field, fieldClassOpt, iter) if iter.isInstanceOf[BasicBSONList] =>
// here we look for the functionality we added later, where we store type information
// about the container type. We stay backwards compatible here by looking for that
// special object, and behaving specially if it's found, and falling back to previous
// behaviour if it is not. This is tested by ComplexCollectionTest.
import scala.collection.JavaConverters._
val loadedSeq = iter.asInstanceOf[BasicBSONList].asScala.toIndexedSeq
val cts: (Option[String], IndexedSeq[AnyRef]) = loadedSeq.headOption.map {
case obj: BasicDBObject if wrapDBObj(obj).get("_t_").isDefined =>
(Some(obj.get("_t_").toString), loadedSeq.tail)
case _ =>
(None, loadedSeq)
} getOrElse {
(None, loadedSeq)
}
cts._2.map(mapFromMongo(field, None, _).value) match {
// here we deal with container types we can can infer the collection type from the OpmObject method signature
case aSet if fieldClassOpt == Some(classOf[Set[_]]) => aSet.toSet
case aMap if fieldClassOpt == Some(classOf[Map[_, _]]) => mongoListToMap(aMap)
case aList if fieldClassOpt == Some(classOf[List[_]]) => aList.toList
case aVector if fieldClassOpt == Some(classOf[Vector[_]]) => aVector.toIndexedSeq
case anIndexedSeq if fieldClassOpt == Some(classOf[IndexedSeq[_]]) => anIndexedSeq.toIndexedSeq
case aSeq if fieldClassOpt == Some(classOf[Seq[_]]) => aSeq.toIndexedSeq
case other if cts._1.isDefined => collectionCnameDecoder(other, cts._1.get)
case other => other
}
case (field, fieldClassOpt, o) if o.isInstanceOf[DBObject] && o.asInstanceOf[DBObject].get("_nested_opm_").asInstanceOf[Boolean] =>
val mongoDbObject = wrapDBObj(o.asInstanceOf[DBObject])
val className = mongoDbObject.as[String](Classname)
val timestamp = mongoDbObject.as[Long](Timestamp)
val key = mongoDbObject.as[String](Key)
val clazz = Class.forName(className)
assert(classOf[OpmObject].isAssignableFrom(clazz))
val loadedOpt = nestedToStorage(None)(Manifest.classType(clazz)).map {
storage: OpmStorage[_] =>
storage.get(key)(Manifest.classType(clazz))
}.getOrElse {
sys.error("Could not find an OpmStorage instance for class %s (did you override nestedToStorage?)".format(clazz))
}
loadedOpt.map {
opm =>
val richOpm = OpmObject.toSetter(opm.asInstanceOf[OpmObject])(Manifest.classType(clazz))
richOpm.timeline.find(_.opmTimestamp == timestamp).getOrElse {
sys.error("Could not load an object(%s, %s) with opmTimestamp %s".format(className, key, timestamp))
}
}.getOrElse {
sys.error("Could not figure out how to load (%s, %s, %s)".format(field, fieldClassOpt, o))
}
}
// if the supplied toMongo mapper(s) can't handle a particular value, it gets passed through here. You might
// want to set a breakpoint here and/or add some printlns to understand what's going on, and add new
// handles in the defaultToMongoMapper or your own custom toMongoMapper
private[this] lazy val identity: PartialFunction[(String, Option[Class[_]], AnyRef), AnyRef] = {
case x => x._3
}
// maps a field to its mongo representation. Note that the field value is largely ignored except for
// log messages and error reports. If the fieldType exists, that means that we were able to derive enough
// metadata about it from the signature of the OpmObject trait so that we don't need to encode any more data
// about the types in the database itself -- we can derive it from the class. If it's None, then we can't,
// so the runtime needs to write some metadata about things like container types so that, for example, nested
// collections can be recovered from mongo.
private[this] def mapToMongo(field: String, fieldType: Option[Class[_]], value: OpmField): Any = {
value match {
case OpmField(_, Some(pending)) => MongoDBObject(Pending -> pending.time)
case OpmField(ref: AnyRef, None) =>
(toMongoMapper orElse defaultToMongoMapper orElse identity)((field, fieldType, ref))
case OpmField(anyVal, None) =>
anyVal
}
}
// Can be used by other parts of the system to map values to Mongo-compatible - used specifically in search
// queries for non-standard values. This will intentionally blow an exception if the field does not exist.
private[opm] def mapToMongo(field: String, value: Any)(implicit mf: Manifest[V]): Any = {
val method = mf.runtimeClass.getMethod(field)
mapToMongo(field, Option(method.getReturnType), OpmField(value))
}
private[this] def mapFromMongo(field: String, fieldType: Option[Class[_]], value: Any): OpmField = {
val isOption = fieldType.isDefined && fieldType.get.isAssignableFrom(classOf[Option[_]])
value match {
case dBObject: DBObject if !value.isInstanceOf[BasicBSONList] && wrapDBObj(dBObject).contains(Pending) =>
OpmField(null, Some(NanoTimestamp(wrapDBObj(dBObject).get(Pending).get.asInstanceOf[Long])))
case _ => OpmField(Option(value).map { _ =>
val result = (fromMongoMapper orElse defaultFromMongoMapper orElse identity)(field, fieldType, value.asInstanceOf[AnyRef])
Option(result).map { _ =>
if (isOption) Some(result) else result
}.getOrElse {
if (isOption) None else null
}
}.getOrElse {
if (isOption) None else null
})
}
}
private[this] val sortFields = MongoDBObject(Timestamp -> -1, Type -> 1)
private lazy val installedKeyIndex: Unit = collection.ensureIndex(MongoDBObject(Key -> 1))
override def put(obj: V)(implicit mf: Manifest[V]) {
installedKeyIndex
val model: OpmProxy = recoverModel(obj)
require(model.key != OpmIntrospection.UndefinedKey, "You can't put an object created without a key")
if (collection.findOne(MongoDBObject(Key -> model.key)).isEmpty) {
create(model)
} else {
update(model)
}
}
// writes the model to the database.
private[this] def create(model: OpmProxy)(implicit mf: Manifest[OpmObject]) {
val history = model #:: model.history.toStream
if (history.size > 1) {
history.zip(history.tail).foreach(r => require(r._1.timestamp != r._2.timestamp, "Equal timestamps: %s".format(r)))
}
writeWavelets(model.key, history)
}
// updates the database with the latest changes to the object. Assumes an object with this key has already
// been passed to the create method.
private[this] def update(model: OpmProxy)(implicit mf: Manifest[V]) {
// Two cases to consider: the client user may have loaded an object and then added to it,
// in which case we need to stitch. Or, he may have created a new object with this key,
// and we need to completely replace the old timeline with this timeline. The only way to
// be sure is to load what's in the database and try to find where the histories converge.
// create lock object for model.key
// block for some amount of time if it is not available. Fail on timeout.
// Once we have the lock, confirm that model is a "fast forward" update of
// oldModel. If there is anything in oldModel that's not in model, blow an exception.
// then do the write and clear the lock.
import scala.util.control.Exception._
allCatch either this.lock(model.key) match {
case Right(lock) =>
try {
val oldModel: Option[V] = get(model.key)
require(oldModel.isDefined, "Tried to update %s; not already in the database".format(model))
val firstTimestamp = oldModel.get.opmTimestamp
val curStream = model #:: model.history.toStream
val alreadyWritten = curStream.dropWhile(_.timestamp > firstTimestamp)
if (alreadyWritten.nonEmpty && alreadyWritten.head.timestamp == firstTimestamp) {
// we need to stitch (but this is basically fast-forward)
// This is hard. I have a picture that might help explain this, but expect to invest some time
// forming the mental model if you really want to understand this.
val mongoStream = collection.find(MongoDBObject(Key -> model.key)).sort(sortFields).toStream.map(wrapDBObj)
val lastFrame = mongoStream.take(wavelength)
require(lastFrame.nonEmpty, "No mongo records found for key %s; did you create first?".format(model.key))
val oldPhase = (wavelength + lastFrame.takeWhile(_.as[String](Type) == DiffType).size) % wavelength
val updateSize = curStream.takeWhile(_.timestamp > lastFrame.head.as[Long](Timestamp)).size
val startPhase = (wavelength - (updateSize % wavelength) + oldPhase) % wavelength
val initialDiffCount = (wavelength - startPhase) % wavelength
writeDiffs(model.key, curStream.zip(curStream.tail).take(initialDiffCount))
writeWavelets(model.key, curStream.drop(initialDiffCount).take(updateSize - initialDiffCount))
} else {
// todo: this could be quite slow if there were many, many changes made since these were loaded!
// note this is an optimized version of:
// if (!model.history.map(_.timestamp).toSet.intersect(oldModel.get.timeline.map(_.opmTimestamp).toSet).isEmpty) {
import OpmObject._
val timestamps: Stream[Long] = model.history.toStream.map(_.timestamp)
val oldTimeline = oldModel.get.timeline
val intersect = timestamps.flatMap(ts => oldTimeline.find(_.opmTimestamp == ts)).nonEmpty
if (intersect) {
// model & oldModel share history, but the most recent oldModel timestamp is not in
// model's history (so our histories have diverged, and we don't know what to do)
throw new ConcurrentModificationException("Object %s was modified concurrently".format(model))
} else {
// we're rewriting (replacing!) history ... hope that's what you wanted
remove(model.key)
create(model)
}
}
} finally {
lock.unlock()
}
case Left(e) if e.getMessage.contains("Timed out") =>
sys.error("Could not acquire write lock for %s within %s ms".format(model.key, waitMs))
case Left(e) => throw e
}
}
// when we retrieve by a key, we always load back to the first value frame. So that may mean
// we load `wavelength` records, or it could mean that we load 1 record ... it all depends on what the
// most recent record is. Let's say that we have a wavelength of 5 and the tip is 2 diff records
// then a value record. So we load the last 5 records so we are guaranteed to get a value record.
// then we take the two diff records and the value record and assemble 3 fully-formed
// OpmObjects. We construct a stream so that if the user tries to look at past history
// it can find and load the next wavelet. This strikes me as "complicated", and is an advanced
// use of scala streams (?), and also has some memory risk. We could obviate that possibly by
// keeping softkeys and a mechanism to load on demand, but I guess we'll see.
override def get(key: String)(implicit mf: Manifest[V]): Option[V] = {
val mongoStream = collection.find(MongoDBObject(Key -> key)).sort(sortFields).map(wrapDBObj).toStream
val initialDiffs = mongoStream.takeWhile(_.as[String](Type) == DiffType)
mongoStream.dropWhile(_.as[String](Type) == DiffType).headOption.flatMap {
lastValueObj =>
val lastValue = toOpmProxy(key, lastValueObj.asDBObject)
// We have to look forwards in time if there is a set of diffs right at the tip of the
// mongo record stream; so special processing to assemble those from the first value object,
// and the diffs that come after it in time.
val initialObjs = if (initialDiffs.isEmpty) {
Seq.empty
} else {
initialDiffs.reverse.foldLeft(Seq(lastValue)) {
(objs: Seq[OpmProxy], dbObj: MongoDBObject) =>
val changes: Set[Diff] = objToDiffSet(dbObj, mf.runtimeClass, Forward)
OpmProxy(key, OpmFactory.evolve(objs.head.fields, changes)) +: objs
}
}
// this is fairly magic, leveraging streams & recursion.
// This defines a new stream which maps the stream of OpmProxy
// instances (which are in turn being lazy-loaded from mongo)
// into a stream of final objects. A final object is tricky,
// since each instance needs a reference to the stream itself,
// as its timeline. A lot of staring at the screen and scratching
// my head were required to get this amazingly short, deep piece
// of code in place.
def assembleFinalObjects(stream: Stream[OpmProxy]): Stream[V] = {
if (stream.isEmpty) {
Stream.empty
} else {
lazy val tail = assembleFinalObjects(stream.tail)
val head = stream.head.copy(history = new OpmProxy.History(tail.map(OpmFactory.recoverModel(_))))
OpmFactory.newProxy(head) #:: tail
}
}
assembleFinalObjects(initialObjs.toStream #::: loadStream(key, lastValue, mongoStream.drop(initialObjs.size), mf.runtimeClass)).headOption
}
}
def allRecords(implicit mf: Manifest[V]): OpmQueryResult[V] = {
val mongoStream = collection.distinct(Key).
toStream.
flatMap((key: Any) => get(key.toString))
OpmQueryResult(mongoStream, Some((field, value) => mapToMongo(field, value))).search(OpmQueryNoFilter, matchInverse = false)
}
/**
* Use this to pull a list of object keys that have been updated within the given time period.
*
* Returns a Stream of key Strings that have been updated within the requested date range.
*
* @param start: The timestamp to start at. If None, it will start at the beginning of time.
* @param end: The timestamp to end at. If None, it will not cut off at any date.
*/
def getUpdatedKeys(start: Option[NanoTimestamp], end: Option[NanoTimestamp]): Stream[String] = {
val builder = MongoDBList.newBuilder
start.foreach { startTimestamp =>
builder += MongoDBObject("$or" -> MongoDBList(
MongoDBObject("$and" -> MongoDBList(
MongoDBObject(Type -> DiffType),
OpmPropertyGreaterThanOrEqual(ForwardTimestamp, startTimestamp.time).toMongoDBObject()
)),
MongoDBObject("$and" -> MongoDBList(
MongoDBObject(Type -> ValueType),
OpmPropertyGreaterThanOrEqual(Timestamp, startTimestamp.time).toMongoDBObject()
))
))
}
end.foreach { endTimestamp =>
builder += MongoDBObject("$or" -> MongoDBList(
MongoDBObject("$and" -> MongoDBList(
MongoDBObject(Type -> DiffType),
OpmPropertyLessThanOrEqual(ForwardTimestamp, endTimestamp.time).toMongoDBObject()
)),
MongoDBObject("$and" -> MongoDBList(
MongoDBObject(Type -> ValueType),
OpmPropertyLessThanOrEqual(Timestamp, endTimestamp.time).toMongoDBObject()
))
))
}
val query = builder.result()
collection.distinct(Key, query.size match {
case 0 => null
case 1 => query.head.asInstanceOf[DBObject]
case _ => MongoDBObject("$and" -> query)
}).
toStream.
map(_.toString)
}
/**
* Kicks off a search process. The fully-chained search looks like this: search(_.propertyName).equals("value")
*
* This uses the defined to-mongo mapping to help translate searched-for values. For example, Gilt Guid's can't be
* deserialized to JSON without defining toMongoMapper; search makes use of this since it's here already.
*
* Returns the list of objects that match the query.
*
* @see com.gilt.opm.query.OpmSearcher
* @param v: A 'method' that indicates which property should be searched against.
* @tparam T: The class of the property being searched. In practice this will be inferred from the property given.
*/
def search[T](v: V => T)(implicit mf: Manifest[V]): OpmSearcherHelper[V, T] = {
OpmSearcher[V](
finishSearch = (query, matchInverse) => finishSearch(query, matchInverse),
valueTranslator = Some((field, value) => mapToMongo(field, value))
).search(v)
}
/**
* Completes the search process with the query collected by OpmSearcher, returning a result object that
* can be further chained for more-detailed searches.
*
* @param query: The requested query, as determined by the chained search call.
* @param matchInverse: Match the inverse of the query, i.e. 'not'.
*/
private def finishSearch(query: OpmPropertyQuery, matchInverse: Boolean)(implicit mf: Manifest[V]): OpmQueryResult[V] = {
// The mongoStream simply pulls the keys of records for which the property matches the given value in either a
// value or diff record. This may include records that no longer match the query, so the stream is again filtered
// by the same query once the OPM objects are constructed [the .find(query) below].
val mongoStream = collection.distinct(Key, MongoDBObject("$or" -> MongoDBList(
query.toMongoDBObject("%s.".format(Instance), matchInverse),
query.toMongoDBObject("%s.".format(Forward), matchInverse)
))).
toStream.
flatMap((key: Any) => get(key.toString))
OpmQueryResult[V](mongoStream, Some((field, value) => mapToMongo(field, value))).search(query, matchInverse)
}
// deletes all records with the given key, doing nothing if the key doesn't exist.
override def remove(key: String) {
collection.remove(MongoDBObject(Key -> key))
}
private[this] def loadStream(key: String, head: OpmProxy, cursorStream: Stream[MongoDBObject], clazz: Class[_]): Stream[OpmProxy] = {
cursorStream.headOption.map {
prevObj =>
if (prevObj.as[String](Type) == ValueType) {
val prev = toOpmProxy(key, prevObj.asDBObject)
prev #:: loadStream(key, prev, cursorStream.tail, clazz)
} else {
assert(prevObj.as[String](Type) == DiffType, "Unknown type: %s".format(prevObj))
val changes: Set[Diff] = objToDiffSet(prevObj, clazz, Reverse)
val prev = OpmProxy(key, OpmFactory.evolve(head.fields, changes))
prev #:: loadStream(key, prev, cursorStream.tail, clazz)
}
}.getOrElse(Stream.empty)
}
private[this] def objToDiffSet(obj: MongoDBObject, clazz: Class[_], direction: String): Set[Diff] = {
require(direction == Forward || direction == Reverse,
"direction must be either %s or %s; was %s".format(Forward, Reverse, direction))
import scala.language.existentials
val diffIterable = wrapDBObj(obj.as[DBObject](direction)).map { case (key, value) =>
val valueType = if (OpmIntrospection.MetaFields.contains(key)) None
else Some(OpmProxy.fieldType(key, clazz))
value match {
/* Unfortunately, a mapping from key to null can mean one of several things:
* - a. (in a forwards diff) a non-Option value was removed from the model entirely
* - b. (in a forwards diff) an Option value was removed from the model entirely
* - c. (in a forwards diff) an Option was set from Some to None
* - d. (in a reverse diff) a non-Option value was added to the model
* - e. (in a reverse diff) an Option value was added to the model
* - f. (in a reverse diff) an Option was set from None to Some
* I can't handle (e) without breaking (f), so if a reverse diff is used to contruct a
* previous version from before the addition of an Option value, accessing that value
* will return None rather then throw NoSuchMethodException. This actually seems like the
* correct behavior.
*/
case null if valueType.isEmpty || !valueType.get.isAssignableFrom(classOf[Option[_]]) =>
Diff(key, None) // (a), (b), (d)
case null =>
Diff(key, Some(OpmField(None))) // (c), (e), (f)
case value: Any =>
Diff(key, Some(mapFromMongo(key, valueType, value)))
}
}
diffIterable.toSet
}
private[this] def toOpmProxy(key: String, valueRecord: DBObject): OpmProxy = {
require(valueRecord.get(Type) == ValueType, "Record was not value record: %s".format(valueRecord))
val instance = wrapDBObj(valueRecord.get(Instance).asInstanceOf[DBObject])
// casbah blows an exception if you do instance(key) and expect a null value back.
val fields = instance.keys.map(key => instance.get(key).map(key -> _).getOrElse(key -> null)).toMap
val record = wrapDBObj(valueRecord)
val clazz = Class.forName(record.as[String](Classname))
val timeStamp = record.as[Long](Timestamp)
opmProxy(key, clazz, timeStamp, fields.map(kv => kv._1 -> mapFromMongo(kv._1, Some(OpmProxy.fieldType(kv._1, clazz)), kv._2)))
}
private[this] def opmProxy(key: String, clazz: Class[_], timeStamp: Long, fields: Map[String, OpmField]) = {
OpmProxy(key, fields ++ Map(ClassField -> OpmField(clazz), TimestampField -> OpmField(timeStamp)))
}
// given a sequence of phase=0 waves, writes them to the database
private[this] def writeWavelets(key: String, stream: Seq[OpmProxy]) {
stream.grouped(wavelength).foreach(writeWavelet(key, _))
}
// given a "wavelet" of models, write it to the database. this means
// writing a single value record, followed by wavelength - 1 diff records
private[this] def writeWavelet(key: String, models: Seq[OpmProxy]) {
writeValue(key, models.head)
if (models.size > 1) {
writeDiffs(key, models.zip(models.tail))
}
}
// writes a sequence of diff records. For each pair, the _1 member is expected to have
// have been created after the _2 member
private[this] def writeDiffs(key: String, pairs: Seq[(OpmProxy, OpmProxy)]) {
pairs.foreach {
(pair: (OpmProxy, OpmProxy)) =>
require(pair._1.timestamp > pair._2.timestamp, "time ordering not maintained: %s".format(pair))
writeDiff(key = key, later = pair._1, earlier = pair._2)
}
}
private[this] def createId(key: Any, obj: OpmProxy, recordType: String): String = {
require(recordType == ValueType || recordType == DiffType, "Unknown record type %s".format(recordType))
"%s:%s:%s".format(key, obj.timestamp, recordType)
}
// writes a single bi-directional diff record
private[this] def writeDiff(key: Any, later: OpmProxy, earlier: OpmProxy) {
val forwardTimestamp = later.timestamp
val reverseTimestamp = earlier.timestamp
val forwardDiffs = diffModels(later, earlier)
val reverseDiffs = diffModels(earlier, later)
val builder = MongoDBObject.newBuilder
builder += "_id" -> createId(key, later, DiffType)
builder += Key -> key
builder += Type -> DiffType
builder += Timestamp -> reverseTimestamp
builder += ForwardTimestamp -> forwardTimestamp
// todo: encode the timestamp as a delta instead of an absolute value, to save some bytes
val forwardDiffBuilder = MongoDBObject.newBuilder
forwardDiffBuilder += (TimestampField -> later.timestamp)
builder += Forward -> forwardDiffs.foldLeft(forwardDiffBuilder) {
case (b, Diff(field, newValueType)) =>
b += field -> newValueType.map(mapToMongo(field, Some(later.fieldType(field)), _))
b
}.result()
val reverseDiffBuilder = MongoDBObject.newBuilder
reverseDiffBuilder += (TimestampField -> earlier.timestamp)
builder += Reverse -> reverseDiffs.foldLeft(reverseDiffBuilder) {
case (b, Diff(field, newValueType)) =>
b += field -> newValueType.map(mapToMongo(field, Some(later.fieldType(field)), _))
b
}.result()
collection.save(builder.result())
}
// writes a single value record
private def writeValue(key: Any, obj: OpmProxy) {
val fields = obj.fields
val builder = MongoDBObject.newBuilder
builder += "_id" -> createId(key, obj, ValueType)
obj.history.toStream.headOption.foreach {
prev =>
builder += PrevKey -> createId(key, prev, if (wavelength == 1) ValueType else DiffType)
}
builder += Key -> key
builder += Type -> ValueType
builder += Timestamp -> fields(TimestampField).value
builder += Classname -> fields(ClassField).value.asInstanceOf[Class[_]].getName
builder += Instance -> fields.filterNot(f => MetaFields(f._1)).foldLeft(MongoDBObject.newBuilder) {
case (b, f) =>
b += f._1 -> mapToMongo(f._1, Some(obj.fieldType(f._1)), f._2)
b
}.result
collection.save(builder.result())
}
}
object OpmMongoStorage {
val Key = "k"
val PrevKey = "p"
val Timestamp = "ts"
val ForwardTimestamp = "fts"
val Classname = "c"
val Instance = "i"
val Forward = "f"
val Pending = "pnd"
val Reverse = "r"
val Type = "t"
// There are 2 types of record, a "ValueType" record and a "DiffType" record. One tricky thing is that we end up
// with two records with the same timestamp when we store a value record: one to hold the class, and one to hold
// the diff to the next node (assuming that ValueType record has saves after it). So we actually depend on the fact
// that "d" sorts before "v" when we fetch, ordering first by time stamp (descending) and then type (ascending)
// in order to get a view that makes sense going backwards through time. This way we see the forward diff from
// the value record the first diff after it, BEFORE we see the value record. Got it? Sorry, this is tricky stuff.
// Turn back!
val ValueType = "v"
val DiffType = "d"
}
trait OpmAuditedMongoStorage[T <: OpmAuditedObject[U], U] extends OpmMongoStorage[T] with OpmAuditedStorage[T, U]
| gilt/opm | src/main/scala/com/gilt/opm/OpmMongoStorage.scala | Scala | mit | 32,410 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.utils
import org.apache.flink.api.common.operators.Order
import org.apache.flink.table.api.TableException
import org.apache.flink.table.planner.calcite.FlinkPlannerImpl
import org.apache.calcite.rel.RelFieldCollation.Direction
import org.apache.calcite.rel.`type`._
import org.apache.calcite.rel.{RelCollation, RelFieldCollation}
import org.apache.calcite.rex.{RexLiteral, RexNode}
import scala.collection.mutable
/**
* Common methods for Flink sort operators.
*/
object SortUtil {
/**
* Returns limit start value (never null).
*/
def getLimitStart(offset: RexNode): Long = if (offset != null) RexLiteral.intValue(offset) else 0L
/**
* Returns limit end value (never null).
*/
def getLimitEnd(offset: RexNode, fetch: RexNode): Long = {
if (fetch != null) {
getLimitStart(offset) + RexLiteral.intValue(fetch)
} else {
Long.MaxValue
}
}
/**
* Returns the direction of the first sort field.
*
* @param collationSort The list of sort collations.
* @return The direction of the first sort field.
*/
def getFirstSortDirection(collationSort: RelCollation): Direction = {
collationSort.getFieldCollations.get(0).direction
}
/**
* Returns the first sort field.
*
* @param collationSort The list of sort collations.
* @param rowType The row type of the input.
* @return The first sort field.
*/
def getFirstSortField(collationSort: RelCollation, rowType: RelDataType): RelDataTypeField = {
val idx = collationSort.getFieldCollations.get(0).getFieldIndex
rowType.getFieldList.get(idx)
}
/** Returns the default null direction if not specified. */
def getNullDefaultOrders(ascendings: Array[Boolean]): Array[Boolean] = {
ascendings.map { asc =>
FlinkPlannerImpl.defaultNullCollation.last(!asc)
}
}
/** Returns the default null direction if not specified. */
def getNullDefaultOrder(ascending: Boolean): Boolean = {
FlinkPlannerImpl.defaultNullCollation.last(!ascending)
}
def getKeysAndOrders(
fieldCollations: Seq[RelFieldCollation]): (Array[Int], Array[Boolean], Array[Boolean]) = {
val fieldMappingDirections = fieldCollations.map(c =>
(c.getFieldIndex, directionToOrder(c.getDirection)))
val keys = fieldMappingDirections.map(_._1)
val orders = fieldMappingDirections.map(_._2 == Order.ASCENDING)
val nullsIsLast = fieldCollations.map(_.nullDirection).map {
case RelFieldCollation.NullDirection.LAST => true
case RelFieldCollation.NullDirection.FIRST => false
case RelFieldCollation.NullDirection.UNSPECIFIED =>
throw new TableException(s"Do not support UNSPECIFIED for null order.")
}.toArray
deduplicateSortKeys(keys.toArray, orders.toArray, nullsIsLast)
}
def deduplicateSortKeys(
keys: Array[Int],
orders: Array[Boolean],
nullsIsLast: Array[Boolean]): (Array[Int], Array[Boolean], Array[Boolean]) = {
val keySet = new mutable.HashSet[Int]
val keyBuffer = new mutable.ArrayBuffer[Int]
val orderBuffer = new mutable.ArrayBuffer[Boolean]
val nullsIsLastBuffer = new mutable.ArrayBuffer[Boolean]
for (i <- keys.indices) {
if (keySet.add(keys(i))) {
keyBuffer += keys(i)
orderBuffer += orders(i)
nullsIsLastBuffer += nullsIsLast(i)
}
}
(keyBuffer.toArray, orderBuffer.toArray, nullsIsLastBuffer.toArray)
}
def directionToOrder(direction: Direction): Order = {
direction match {
case Direction.ASCENDING | Direction.STRICTLY_ASCENDING => Order.ASCENDING
case Direction.DESCENDING | Direction.STRICTLY_DESCENDING => Order.DESCENDING
case _ => throw new IllegalArgumentException("Unsupported direction.")
}
}
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/utils/SortUtil.scala | Scala | apache-2.0 | 4,604 |
package com.yoohaemin.hufsclassroom.repository
import org.scalacheck._
import org.scalatest.{FlatSpecLike, Matchers}
import org.scalatest.prop.PropertyChecks
import com.yoohaemin.hufsclassroom.model.{Email, User, UserName}
class ConversionSpec extends UserArbitraries with FlatSpecLike with Matchers {
forAll { (dto: UserDTO) =>
it should s"convert a dto: $dto into an User" in {
dto.toUser should be(User(UserName(dto._2), Email(dto._3)))
}
}
}
trait UserArbitraries extends PropertyChecks {
implicit val userDtoArbitraries: Arbitrary[UserDTO] = Arbitrary[UserDTO] {
for {
i <- Gen.posNum[Int]
u <- Gen.alphaStr
e <- Gen.alphaStr
} yield (i, u, e)
}
}
| yoo-haemin/hufs-classroom | service/test/src/com/yoohaemin/hufsclassroom/repository/ConversionSpec.scala | Scala | agpl-3.0 | 708 |
package scorex.transaction.state.database.blockchain
import org.h2.mvstore.{MVMap, MVStore}
import scorex.account.Account
import scorex.block.Block
import scorex.block.Block.BlockId
import scorex.consensus.ConsensusModule
import scorex.transaction.BlockStorage._
import scorex.transaction.History.BlockchainScore
import scorex.transaction.{BlockChain, TransactionModule}
import scorex.utils.ScorexLogging
import scala.collection.JavaConversions._
import scala.collection.concurrent.TrieMap
import scala.util.{Failure, Success, Try}
/**
* If no datafolder provided, blockchain lives in RAM (useful for tests)
*/
class StoredBlockchain(db: MVStore)
(implicit consensusModule: ConsensusModule[_],
transactionModule: TransactionModule[_])
extends BlockChain with ScorexLogging {
case class BlockchainPersistence(database: MVStore) {
val blocks: MVMap[Int, Array[Byte]] = database.openMap("blocks")
val signatures: MVMap[Int, BlockId] = database.openMap("signatures")
val signaturesReverse: MVMap[BlockId, Int] = database.openMap("signaturesReverse")
private val BlocksCacheSizeLimit: Int = 1000
private var blocksCacheSize: Int = 0
private val blocksCache: TrieMap[Int, Option[Block]] = TrieMap.empty
//TOOD remove when no blockchains without signaturesReverse remains
if (signaturesReverse.size() != signatures.size()) {
signaturesReverse.clear()
signatures.keySet().foreach(k => signaturesReverse.put(signatures.get(k), k))
database.commit()
}
val scoreMap: MVMap[Int, BigInt] = database.openMap("score")
//if there are some uncommited changes from last run, discard'em
if (signatures.size() > 0) database.rollback()
def writeBlock(height: Int, block: Block): Try[Unit] = Try {
blocks.put(height, block.bytes)
val blockScore = consensusModule.blockScore(block)
scoreMap.put(height, ConsensusModule.cumulativeBlockScore(score(), blockScore))
signatures.put(height, block.uniqueId)
signaturesReverse.put(block.uniqueId, height)
}
def readBlock(height: Int): Option[Block] = {
if(blocksCacheSize > BlocksCacheSizeLimit) {
blocksCacheSize = 0
blocksCache.clear()
} else {
blocksCacheSize = blocksCacheSize + 1
}
blocksCache.getOrElseUpdate(height,
Try(Option(blocks.get(height))).toOption.flatten.flatMap(b => Block.parseBytes(b).toOption))
}
def deleteBlock(height: Int): Unit = {
blocksCache.remove(height)
blocks.remove(height)
val vOpt = Option(signatures.remove(height))
vOpt.map(v => signaturesReverse.remove(v))
}
def contains(id: BlockId): Boolean = Option(signaturesReverse.get(id)).isDefined
def height(): Int = signatures.size()
def heightOf(id: BlockId): Option[Int] = Option(signaturesReverse.get(id))
def score(): BlockchainScore = if (height() > 0) scoreMap.get(height()) else 0
def score(id: BlockId): BlockchainScore = heightOf(id).map(scoreMap.get(_)).getOrElse(0)
}
private val blockStorage: BlockchainPersistence = BlockchainPersistence(db)
override def appendBlock(block: Block): Try[BlocksToProcess] = synchronized {
Try {
val parent = block.referenceField
if ((height() == 0) || (lastBlock.uniqueId sameElements parent.value)) {
val h = height() + 1
blockStorage.writeBlock(h, block) match {
case Success(_) => Seq(block)
case Failure(t) => throw new Error("Error while storing blockchain a change: " + t)
}
} else {
throw new Error(s"Appending block ${block.json} which parent is not last block in blockchain")
}
}
}
override private[transaction] def discardBlock(): BlockChain = synchronized {
require(height() > 1, "Chain is empty or contains genesis block only, can't make rollback")
val h = height()
blockStorage.deleteBlock(h)
this
}
override def blockAt(height: Int): Option[Block] = synchronized {
blockStorage.readBlock(height)
}
override def lastBlockIds(howMany: Int): Seq[BlockId] =
(Math.max(1, height() - howMany + 1) to height()).flatMap(i => Option(blockStorage.signatures.get(i))).reverse
override def contains(signature: Array[Byte]): Boolean = blockStorage.contains(signature)
override def height(): Int = blockStorage.height()
override def score(): BlockchainScore = blockStorage.score()
override def scoreOf(id: BlockId): BlockchainScore = blockStorage.score(id)
override def heightOf(blockSignature: Array[Byte]): Option[Int] = blockStorage.heightOf(blockSignature)
override def blockById(blockId: BlockId): Option[Block] = heightOf(blockId).flatMap(blockAt)
override def children(block: Block): Seq[Block] = heightOf(block).flatMap(h => blockAt(h + 1)).toSeq
override def generatedBy(account: Account, from: Int, to: Int): Seq[Block] = {
(from to to).toStream.flatMap { h =>
blockAt(h).flatMap { block =>
if (consensusModule.generators(block).contains(account)) Some(block) else None
}
}
}
override def toString: String = ((1 to height()) map { h =>
val bl = blockAt(h).get
s"$h -- ${bl.uniqueId.mkString} -- ${bl.referenceField.value.mkString}"
}).mkString("\\n")
}
| alexeykiselev/WavesScorex | scorex-transaction/src/main/scala/scorex/transaction/state/database/blockchain/StoredBlockchain.scala | Scala | cc0-1.0 | 5,294 |
package com.tresata.ganitha.mahout
import org.apache.mahout.math.{ Vector, DenseVector }
import org.scalatest.FunSpec
import Implicits._
class RichVectorSpec extends FunSpec {
describe("A RichVector") {
it("should support apply constructors") {
assert(RichVector(6, List((1, 1.0), (3, 2.0), (5, 3.0))) == RichVector(Array(0.0, 1.0, 0.0, 2.0, 0.0, 3.0)))
}
it("should support getters and setters") {
val vector0 = RichVector(Array(1.0, 2.0, 3.0))
vector0(1) = 5.0
assert(vector0(1) == 5.0)
vector0.updateMany(List((1, 1.0), (2, 2.0)))
assert(vector0 == RichVector(Array(1.0, 1.0, 2.0)))
}
it("should support aggregation") {
val vector0 = new DenseVector(Array(1.0, 2.0, 3.0))
val vector1 = new DenseVector(Array(4.0, 5.0, 6.0))
assert(vector0.aggregate((_ : Double) + (_ : Double), math.pow(_ : Double, 2)) == 14)
assert(vector0.aggregate(vector1, (_ : Double) + (_ : Double), (_ : Double) * (_ : Double)) == 32)
assert(vector0.nonZero.map((x : (Int, Double)) => x._2 * x._2).sum == 14)
assert(vector0.vectorMap(vector1){ _ * _}.fold(0.0){_ + _} == 32)
}
it("should support scalar operations") {
val vector0 = new DenseVector(Array(1.0, 2.0, 3.0))
assert(((vector0 * 2) / 2) == vector0)
assert(((vector0 + 2) - 2) == vector0)
}
it("should support vector operations") {
val vector0 = new DenseVector(Array(1.0, 2.0, 3.0))
val vector1 = new DenseVector(Array(4.0, 5.0, 6.0))
assert(vector0 + vector1 - vector0 == vector1)
assert((vector0 * vector1) / vector1 == vector0)
}
it("should support iterable operations") {
val vector0 = new DenseVector(Array(1.0, 2.0, 3.0))
assert(vector0.count(_ > 1.0) == 2)
assert(vector0.foldLeft(0D)(_ + _) == 6.0)
assert(vector0.nonZero.foldLeft(0D)(_ + _._2) == 6.0)
}
it("should support map-like operations") {
val vector0 = new DenseVector(Array(1.0, 2.0, 3.0))
assert(vector0.map((_ : Double) * 2) sameElements new DenseVector(Array(2.0, 4.0, 6.0)))
assert(vector0.vectorMap(_ * 2) == new DenseVector(Array(2.0, 4.0, 6.0)))
}
}
}
| tresata/ganitha | ganitha-mahout/src/test/scala/com/tresata/ganitha/mahout/RichVectorSpec.scala | Scala | apache-2.0 | 2,196 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.iterators
import org.apache.accumulo.core.client.IteratorSetting
import org.locationtech.geomesa.index.filters.S3Filter
import org.locationtech.geomesa.index.index.s3.S3IndexValues
class S3Iterator extends RowFilterIterator[S3Filter](S3Filter)
object S3Iterator {
def configure(values: S3IndexValues, prefix: Int, priority: Int): IteratorSetting = {
val is = new IteratorSetting(priority, "s3", classOf[S3Iterator])
// index space values for comparing in the iterator
S3Filter.serializeToStrings(S3Filter(values)).foreach { case (k, v) => is.addOption(k, v) }
// account for shard and table sharing bytes
is.addOption(RowFilterIterator.RowOffsetKey, prefix.toString)
is
}
}
| locationtech/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/main/scala/org/locationtech/geomesa/accumulo/iterators/S3Iterator.scala | Scala | apache-2.0 | 1,212 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.test.SharedSQLContext
class SortSuite extends SparkPlanTest with SharedSQLContext {
// This test was originally added as an example of how to use [[SparkPlanTest]];
//这个测试最初是作为一个如何使用的一个例子添加的
// it's not designed to be a comprehensive test of ExternalSort.
test("basic sorting using ExternalSort") {//使用外部排序的基本排序
val input = Seq(
("Hello", 4, 2.0),
("Hello", 1, 1.0),
("World", 8, 3.0)
)
checkAnswer(
input.toDF("a", "b", "c"),
ExternalSort('a.asc :: 'b.asc :: Nil, global = true, _: SparkPlan),
input.sortBy(t => (t._1, t._2)).map(Row.fromTuple),
sortAnswers = false)
checkAnswer(
input.toDF("a", "b", "c"),
ExternalSort('b.asc :: 'a.asc :: Nil, global = true, _: SparkPlan),
input.sortBy(t => (t._2, t._1)).map(Row.fromTuple),
sortAnswers = false)
}
}
| tophua/spark1.52 | sql/core/src/test/scala/org/apache/spark/sql/execution/SortSuite.scala | Scala | apache-2.0 | 1,875 |
package com.themillhousegroup.l7.xml
import scala.xml._
import scala.xml.Group
import scala.xml.Comment
import scala.xml.NamespaceBinding
/**
* Knows how to write an `Elem` *just-so* to be minimally different from
* a "natively-created" L7 XML file.
*/
object LayerSevenXMLWriter {
def write(w: java.io.Writer,
node: Node,
minimizeTags: MinimizeMode.Value = MinimizeMode.Default) {
w.write("""<?xml version="1.0" encoding="UTF-8" standalone="no"?>""")
w.write(LayerSevenXMLWriter.serialize(node, minimizeTags = minimizeTags).toString)
}
/**
* Ripped wholesale from scala.xml.Utility and bashed into shape.
* Namely, put out elements in the form:
*
* <Elem xmlns:foo att1="bar" att2="baz"> ...
*
* for "compatibility" with Layer7 XML output.
*
* If we don't do this, Elems get serialized as:
*
* <Elem att2="baz" att1="bar" xmlns:foo> ...
*
* i.e. namespace at the end, and attribute-order flipped.
*/
def serialize(
x: Node,
pscope: NamespaceBinding = TopScope,
sb: StringBuilder = new StringBuilder,
stripComments: Boolean = false,
decodeEntities: Boolean = true,
preserveWhitespace: Boolean = false,
minimizeTags: MinimizeMode.Value = MinimizeMode.Default): StringBuilder =
{
x match {
case c: Comment if !stripComments => c buildString sb
case s: SpecialNode => s buildString sb
case g: Group =>
for (c <- g.nodes) serialize(c, g.scope, sb, minimizeTags = minimizeTags); sb
case el: Elem =>
// print tag with namespace declarations
sb.append('<')
el.nameToString(sb)
el.scope.buildString(sb, pscope)
if (el.attributes ne null) flipAttribs(el).attributes.buildString(sb)
if (el.child.isEmpty &&
(minimizeTags == MinimizeMode.Always ||
(minimizeTags == MinimizeMode.Default && el.minimizeEmpty))) {
// no children, so use short form: <xyz .../>
sb.append("/>")
} else {
// children, so use long form: <xyz ...>...</xyz>
sb.append('>')
sequenceToXML(el.child, el.scope, sb, stripComments)
sb.append("</")
el.nameToString(sb)
sb.append('>')
}
case _ => throw new IllegalArgumentException("Don't know how to serialize a " + x.getClass.getName)
}
}
/** Ripped from scala.xml.Utility and used by serialize for child nodes */
def sequenceToXML(
children: Seq[Node],
pscope: NamespaceBinding = TopScope,
sb: StringBuilder = new StringBuilder,
stripComments: Boolean = false,
decodeEntities: Boolean = true,
preserveWhitespace: Boolean = false,
minimizeTags: MinimizeMode.Value = MinimizeMode.Default): Unit =
{
if (children.isEmpty) return
else if (children forall isAtomAndNotText) { // add space
val it = children.iterator
val f = it.next()
serialize(f, pscope, sb, stripComments, decodeEntities, preserveWhitespace, minimizeTags)
while (it.hasNext) {
val x = it.next()
sb.append(' ')
serialize(x, pscope, sb, stripComments, decodeEntities, preserveWhitespace, minimizeTags)
}
} else children foreach { serialize(_, pscope, sb, stripComments, decodeEntities, preserveWhitespace, minimizeTags) }
}
private[xml] def isAtomAndNotText(x: Node) = x.isAtom && !x.isInstanceOf[Text]
/**
* By default, Scala's XML support reads attributes in 'reverse order' -
* while it's not normally a problem, it is when we are trying to
* minimise diffs when we write it back. So we reverse them here.
*/
private def flipAttribs(e: Elem): Elem = {
var nm = MetaData.normalize(Null, e.scope)
e.attributes.toSeq.reverse.foreach { att =>
att match {
case md: MetaData => nm = nm.append(md, TopScope)
}
}
e.copy(attributes = nm)
}
}
| themillhousegroup/l7-merge | src/main/scala/com/themillhousegroup/l7/xml/LayerSevenXMLWriter.scala | Scala | mit | 3,966 |
package com.twitter.inject.server
import com.google.common.net.{HttpHeaders, MediaType}
import com.google.inject.Stage
import com.twitter.conversions.time._
import com.twitter.finagle.builder.ClientBuilder
import com.twitter.finagle.http._
import com.twitter.finagle.service.Backoff._
import com.twitter.finagle.service.RetryPolicy
import com.twitter.finagle.service.RetryPolicy._
import com.twitter.finagle.stats.{InMemoryStatsReceiver, NullStatsReceiver, StatsReceiver}
import com.twitter.finagle.{ChannelClosedException, Service}
import com.twitter.inject.PoolUtils
import com.twitter.inject.app.{InjectionServiceModule, StartupTimeoutException}
import com.twitter.inject.conversions.map._
import com.twitter.inject.modules.InMemoryStatsReceiverModule
import com.twitter.inject.server.EmbeddedTwitterServer._
import com.twitter.inject.server.PortUtils._
import com.twitter.server.AdminHttpServer
import com.twitter.util._
import java.net.{InetSocketAddress, URI}
import java.util.concurrent.TimeUnit._
import org.apache.commons.lang.reflect.FieldUtils
import org.scalatest.Matchers
object EmbeddedTwitterServer {
private def resolveFlags(useSocksProxy: Boolean, flags: Map[String, String]) = {
if (useSocksProxy) {
flags ++ Map(
"com.twitter.server.resolverZkHosts" -> PortUtils.loopbackAddressForPort(2181),
"com.twitter.finagle.socks.socksProxyHost" -> PortUtils.loopbackAddress,
"com.twitter.finagle.socks.socksProxyPort" -> "50001")
}
else {
flags
}
}
}
/**
* EmbeddedTwitterServer allows a [[com.twitter.server.TwitterServer]] serving http or thrift endpoints to be started
* locally (on ephemeral ports) and tested through it's http/thrift interfaces.
*
* Note: All initialization fields are lazy to aid running multiple tests inside an IDE at the same time
* since IDEs typically "pre-construct" ALL the tests before running each one.
*
* @param twitterServer The [[com.twitter.server.TwitterServer]] to be started for testing.
* @param flags Command line flags (e.g. "foo"->"bar" is translated into -foo=bar). See: [[com.twitter.app.Flag]].
* @param args Extra command line arguments.
* @param waitForWarmup Once the server is started, wait for server warmup to be completed
* @param stage [[com.google.inject.Stage]] used to create the server's injector. Since EmbeddedTwitterServer is used for testing,
* we default to Stage.DEVELOPMENT. This makes it possible to only mock objects that are used in a given test,
* at the expense of not checking that the entire object graph is valid. As such, you should always have at
* least one Stage.PRODUCTION test for your service (which eagerly creates all classes at startup)
* @param useSocksProxy Use a tunneled socks proxy for external service discovery/calls (useful for manually run external
* integration tests that connect to external services).
* @param defaultRequestHeaders Headers to always send to the embedded server.
* @param streamResponse Toggle to not unwrap response content body to allow caller to stream response.
* @param verbose Enable verbose logging during test runs.
* @param disableTestLogging Disable all logging emitted from the test infrastructure.
* @param maxStartupTimeSeconds Maximum seconds to wait for embedded server to start. If exceeded a
* [[com.twitter.inject.app.StartupTimeoutException]] is thrown.
*/
class EmbeddedTwitterServer(
twitterServer: com.twitter.server.TwitterServer,
flags: Map[String, String] = Map(),
args: Seq[String] = Seq(),
waitForWarmup: Boolean = true,
stage: Stage = Stage.DEVELOPMENT,
useSocksProxy: Boolean = false,
defaultRequestHeaders: Map[String, String] = Map(),
streamResponse: Boolean = false,
verbose: Boolean = false,
disableTestLogging: Boolean = false,
maxStartupTimeSeconds: Int = 60)
extends Matchers {
/* Additional Constructors */
def this(twitterServer: Ports) = {
this(twitterServer, stage = Stage.PRODUCTION)
}
/* Main Constructor */
require(!isSingletonObject(twitterServer),
"server must be a new instance rather than a singleton (e.g. \\"new " +
"FooServer\\" instead of \\"FooServerMain\\" where FooServerMain is " +
"defined as \\"object FooServerMain extends FooServer\\"")
if (isInjectable) {
// overwrite com.google.inject.Stage if the underlying
// embedded server is a com.twitter.inject.server.TwitterServer.
injectableServer.stage = stage
// Add framework override modules
injectableServer.addFrameworkOverrideModules(InMemoryStatsReceiverModule)
}
/* Fields */
val name = twitterServer.name
private val mainRunnerFuturePool = PoolUtils.newFixedPool("Embedded " + name)
//Mutable state
private var starting = false
private var started = false
protected[inject] var closed = false
private var _mainResult: Future[Unit] = _
// This needs to be volatile because it is set in mainRunnerFuturePool onFailure
// which is a different thread than waitForServerStarted, where it's read.
@volatile private var startupFailedThrowable: Option[Throwable] = None
/* Lazy Fields */
lazy val httpAdminClient = {
start()
createHttpClient(
"httpAdminClient",
httpAdminPort)
}
lazy val isInjectable = twitterServer.isInstanceOf[TwitterServer]
lazy val injectableServer = twitterServer.asInstanceOf[TwitterServer]
lazy val injector = {
start()
injectableServer.injector
}
lazy val statsReceiver = if (isInjectable) injector.instance[StatsReceiver] else new InMemoryStatsReceiver
lazy val inMemoryStatsReceiver = statsReceiver.asInstanceOf[InMemoryStatsReceiver]
lazy val adminHostAndPort = PortUtils.loopbackAddressForPort(httpAdminPort)
/* Public */
def bind[T : Manifest](instance: T): EmbeddedTwitterServer = {
bindInstance[T](instance)
this
}
def mainResult: Future[Unit] = {
start()
if (_mainResult == null) {
throw new Exception("Server needs to be started by calling EmbeddedTwitterServer#start()")
}
else {
_mainResult
}
}
def isStarted = started
// NOTE: Start is called in various places to "lazily start the server" as needed
def start(): Unit = {
if (!starting && !started) {
starting = true //mutation
runNonExitingMain()
if (waitForWarmup) {
waitForServerStarted()
}
started = true //mutation
starting = false //mutation
}
}
def close(): Unit = {
if (!closed) {
twitterServer.log.clearHandlers()
infoBanner(s"Closing ${this.getClass.getSimpleName}: " + name)
try {
Await.result(twitterServer.close())
mainRunnerFuturePool.executor.shutdown()
} catch {
case e: Throwable =>
info(s"Error while closing ${this.getClass.getSimpleName}: $e")
}
closed = true
}
}
/**
* NOTE: We avoid using slf4j-api info logging so that we can differentiate the
* underlying server logs from the testing framework logging without requiring a
* test logging configuration to be loaded.
* @param str - the string message to log
*/
def info(str: String): Unit = {
if (!disableTestLogging) {
println(str)
}
}
def infoBanner(str: String): Unit = {
info("\\n")
info("=" * 75)
info(str)
info("=" * 75)
}
def assertStarted(started: Boolean = true): Unit = {
assert(isInjectable)
start()
injectableServer.started should be(started)
}
def assertHealthy(healthy: Boolean = true): Unit = {
healthResponse(healthy).get()
}
def isHealthy: Boolean = {
httpAdminPort != 0 &&
healthResponse(shouldBeHealthy = true).isReturn
}
def httpAdminPort: Int = {
getPort(twitterServer.adminBoundAddress)
}
def adminHttpServerRoutes: Seq[AdminHttpServer.Route] = {
val allRoutesField = FieldUtils.getField(twitterServer.getClass, "com$twitter$server$AdminHttpServer$$allRoutes", true)
allRoutesField.get(twitterServer).asInstanceOf[Seq[AdminHttpServer.Route]]
}
def clearStats(): Unit = {
inMemoryStatsReceiver.counters.clear()
inMemoryStatsReceiver.stats.clear()
inMemoryStatsReceiver.gauges.clear()
}
def statsMap = inMemoryStatsReceiver.stats.iterator.toMap.mapKeys(keyStr).toSortedMap
def countersMap = inMemoryStatsReceiver.counters.iterator.toMap.mapKeys(keyStr).toSortedMap
def gaugeMap = inMemoryStatsReceiver.gauges.iterator.toMap.mapKeys(keyStr).toSortedMap
def printStats(includeGauges: Boolean = true): Unit = {
infoBanner(name + " Stats")
for ((key, values) <- statsMap) {
val avg = values.sum / values.size
val valuesStr = values.mkString("[", ", ", "]")
info(f"$key%-70s = $avg = $valuesStr")
}
info("\\nCounters:")
for ((key, value) <- countersMap) {
info(f"$key%-70s = $value")
}
if (includeGauges) {
info("\\nGauges:")
for ((key, value) <- inMemoryStatsReceiver.gauges.iterator.toMap.mapKeys(keyStr).toSortedMap) {
info(f"$key%-70s = ${value()}")
}
}
}
def getCounter(name: String): Int = {
countersMap.getOrElse(name, 0)
}
def assertCounter(name: String, expected: Int): Unit = {
getCounter(name) should equal(expected)
}
def assertCounter(name: String)(callback: Int => Boolean): Unit = {
callback(getCounter(name)) should be(true)
}
def getStat(name: String): Seq[Float] = {
statsMap.getOrElse(name, Seq())
}
def assertStat(name: String, expected: Seq[Float]): Unit = {
getStat(name) should equal(expected)
}
def getGauge(name: String): Float = {
gaugeMap.get(name) map { _.apply() } getOrElse 0f
}
def assertGauge(name: String, expected: Float): Unit = {
val value = getGauge(name)
value should equal(expected)
}
def httpGetAdmin(
path: String,
accept: MediaType = null,
headers: Map[String, String] = Map(),
suppress: Boolean = false,
andExpect: Status = Status.Ok,
withLocation: String = null,
withBody: String = null): Response = {
val request = createApiRequest(path, Method.Get)
httpExecute(httpAdminClient, request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody)
}
/* Protected */
protected def httpExecute(
client: Service[Request, Response],
request: Request,
headers: Map[String, String] = Map(),
suppress: Boolean = false,
andExpect: Status = Status.Ok,
withLocation: String = null,
withBody: String = null): Response = {
start()
/* Pre - Execute */
/* Don't overwrite request.headers potentially in given request */
val defaultHeaders = defaultRequestHeaders filterKeys { !request.headerMap.contains(_) }
addOrRemoveHeaders(request, defaultHeaders)
// headers added last so they can overwrite "defaults"
addOrRemoveHeaders(request, headers)
printRequest(request, suppress)
/* Execute */
val response = handleRequest(request, client = client)
/* Post - Execute */
printResponseMetadata(response, suppress)
printResponseBody(response, suppress)
if (andExpect != null && response.status != andExpect) {
assert(response.status == andExpect, receivedResponseStr(response))
}
if (withBody != null) {
assert(response.contentString == withBody, receivedResponseStr(response))
}
if (withLocation != null) {
assert(response.location.get.endsWith(withLocation), "\\nDiffering Location\\n\\nExpected Location is: "
+ withLocation
+ " \\nActual Location is: "
+ response.location.get
+ receivedResponseStr(response))
}
response
}
protected def createHttpClient(
name: String,
port: Int,
tcpConnectTimeout: Duration = 60.seconds,
connectTimeout: Duration = 60.seconds,
requestTimeout: Duration = 300.seconds,
retryPolicy: RetryPolicy[Try[Any]] = httpRetryPolicy,
secure: Boolean = false): Service[Request, Response] = {
val host = new InetSocketAddress(PortUtils.loopbackAddress, port)
val builder = ClientBuilder()
.name(name)
.codec(Http(_streaming = streamResponse))
.tcpConnectTimeout(tcpConnectTimeout)
.connectTimeout(connectTimeout)
.requestTimeout(requestTimeout)
.hosts(host)
.hostConnectionLimit(75)
.retryPolicy(retryPolicy)
.reportTo(NullStatsReceiver)
.failFast(false)
if (secure)
builder.tlsWithoutValidation().build()
else
builder.build()
}
protected def httpRetryPolicy: RetryPolicy[Try[Any]] = {
backoff(
constant(1.second) take 15) {
case Throw(e: ChannelClosedException) =>
println("Retrying ChannelClosedException")
true
}
}
protected def prettyRequestBody(request: Request): String = {
request.contentString
}
protected def printNonEmptyResponseBody(response: Response): Unit = {
info(response.contentString + "\\n")
}
protected def createApiRequest(path: String, method: Method = Method.Get): Request = {
val pathToUse = if (path.startsWith("http"))
URI.create(path).getPath
else
path
Request(method, pathToUse)
}
protected def nonInjectableServerStarted(): Boolean = {
isHealthy
}
protected def logStartup(): Unit = {
infoBanner("Server Started: " + name)
info(s"AdminHttp -> http://$adminHostAndPort/admin")
}
protected def updateFlags(map: Map[String, String]) = {
if (!verbose)
map + ("log.level" -> "WARNING")
else
map
}
protected def combineArgs(): Array[String] = {
val flagsStr =
flagsAsArgs(
updateFlags(
resolveFlags(useSocksProxy, flags)))
("-admin.port=" + PortUtils.ephemeralLoopback) +: (args ++ flagsStr).toArray
}
protected def bindInstance[T: Manifest](instance: T): Unit = {
if (!isInjectable) {
throw new IllegalStateException("Cannot call bind() with a non-injectable underlying server." )
}
injectableServer.addFrameworkOverrideModules(new InjectionServiceModule(instance))
}
/* Private */
private def keyStr(keys: Seq[String]): String = {
keys.mkString("/")
}
private def receivedResponseStr(response: Response): String = {
"\\n\\nReceived Response:\\n" + response.encodeString()
}
private def handleRequest(
request: Request,
client: Service[Request, Response]): Response = {
val futureResponse = client(request)
val elapsed = Stopwatch.start()
try {
Await.result(futureResponse)
} catch {
case e: Throwable =>
println("ERROR in request: " + request + " " + e + " in " + elapsed().inUnit(MILLISECONDS) + " ms")
throw e
}
}
private def printRequest(request: Request, suppress: Boolean): Unit = {
if (!suppress) {
val headers = request.headerMap.mkString(
"[Header]\\t",
"\\n[Header]\\t",
"")
val msg = "HTTP " + request.method + " " + request.uri + "\\n" + headers
if (request.contentString.isEmpty)
infoBanner(msg)
else
infoBanner(msg + "\\n" + prettyRequestBody(request))
}
}
private def printResponseMetadata(response: Response, suppress: Boolean): Unit = {
if (!suppress) {
info("-" * 75)
info("[Status]\\t" + response.status)
info(response.headerMap.mkString(
"[Header]\\t",
"\\n[Header]\\t",
""))
}
}
private def printResponseBody(response: Response, suppress: Boolean): Unit = {
if (!suppress) {
if (response.isChunked) {
//no-op
}
else if (response.contentString.isEmpty) {
info("*EmptyBody*")
}
else {
printNonEmptyResponseBody(response)
}
}
}
// Deletes request headers with null-values in map.
private def addOrRemoveHeaders(request: Request, headers: Map[String, String]): Unit = {
for ((key, value) <- headers) {
if (value == null) {
request.headerMap.remove(key)
} else {
request.headerMap.set(key, value)
}
}
}
private def addAcceptHeader(
accept: MediaType,
headers: Map[String, String]): Map[String, String] = {
if (accept != null)
headers + (HttpHeaders.ACCEPT -> accept.toString)
else
headers
}
private def healthResponse(shouldBeHealthy: Boolean = true): Try[Response] = {
val expectedBody = if (shouldBeHealthy) "OK\\n" else ""
Try {
httpGetAdmin(
"/health",
andExpect = Status.Ok,
withBody = expectedBody,
suppress = !verbose)
}
}
private def flagsAsArgs(flags: Map[String, String]): Iterable[String] = {
flags.map { case (k, v) => "-" + k + "=" + v }
}
private def isSingletonObject(server: com.twitter.server.TwitterServer) = {
import scala.reflect.runtime.currentMirror
currentMirror.reflect(server).symbol.isModuleClass
}
private def runNonExitingMain(): Unit = {
// we call distinct here b/c port flag args can potentially be added multiple times
val allArgs = combineArgs().distinct
info("Starting " + name + " with args: " + allArgs.mkString(" "))
_mainResult = mainRunnerFuturePool {
try {
twitterServer.nonExitingMain(allArgs)
} catch {
case e: OutOfMemoryError if e.getMessage == "PermGen space" =>
println("OutOfMemoryError(PermGen) in server startup. " +
"This is most likely due to the incorrect setting of a client " +
"flag (not defined or invalid). Increase your PermGen to see the exact error message (e.g. -XX:MaxPermSize=256m)")
e.printStackTrace()
System.exit(-1)
case e if !NonFatal.isNonFatal(e) =>
println("Fatal exception in server startup.")
throw new Exception(e) // Need to rethrow as a NonFatal for FuturePool to "see" the exception :/
}
} onFailure { e =>
//If we rethrow, the exception will be suppressed by the Future Pool's monitor. Instead we save off the exception and rethrow outside the pool
startupFailedThrowable = Some(e)
}
}
private def waitForServerStarted(): Unit = {
for (i <- 1 to maxStartupTimeSeconds) {
info("Waiting for warmup phases to complete...")
if (startupFailedThrowable.isDefined) {
println(s"\\nEmbedded server $name failed to startup")
throw startupFailedThrowable.get
}
if ((isInjectable && injectableServer.started) || (!isInjectable && nonInjectableServerStarted)) {
started = true
logStartup()
return
}
Thread.sleep(1000)
}
throw new StartupTimeoutException(s"App: $name failed to startup within $maxStartupTimeSeconds seconds.")
}
}
| syamantm/finatra | inject/inject-server/src/test/scala/com/twitter/inject/server/EmbeddedTwitterServer.scala | Scala | apache-2.0 | 18,758 |
package org.sisioh.aws4s.core.auth
import java.io.{ File, InputStream }
import com.amazonaws.auth._
import org.sisioh.aws4s.PimpedType
object AWSCredentialsFactory {
def createAnonymous(): AWSCredentials = new AnonymousAWSCredentials()
def createProperties(file: File): AWSCredentials =
new PropertiesCredentials(file)
def createProperties(inputStream: InputStream): AWSCredentials =
new PropertiesCredentials(inputStream)
def createBasic(accessKey: String, secretKey: String): AWSCredentials =
new BasicAWSCredentials(accessKey, secretKey)
def createBasicSession(awsAccessKey: String, awsSecretKey: String, sessionToken: String): AWSSessionCredentials =
new BasicSessionCredentials(awsAccessKey, awsSecretKey, sessionToken)
}
class RichAWSCredentials(val underlying: AWSCredentials) extends AnyVal with PimpedType[AWSCredentials] {
def awsAccessKeyId: String = underlying.getAWSAccessKeyId
def awsSecretKey: String = underlying.getAWSSecretKey
}
| sisioh/aws4s | aws4s-core/src/main/scala/org/sisioh/aws4s/core/auth/RichAWSCredentials.scala | Scala | mit | 990 |
package com.github.mdr.mash.compiler
import com.github.mdr.mash.compiler.BareStringify.bareStringify
import com.github.mdr.mash.parser.AbstractSyntax.{ AstNode, Program }
import com.github.mdr.mash.parser.{ Abstractifier, MashParser, Provenance }
import org.scalatest.{ FlatSpec, Matchers }
class BareStringifyTest extends FlatSpec with Matchers {
"unbound" ==> """ "unbound" """
{
implicit val bindings = Set("bound")
"bound" ==> "bound"
}
"class A { def foo = fields }" ==> """class A { def foo = "fields" }"""
"class A { def foo = toString }" ==> """class A { def foo = "toString" }"""
"class A { def foo = bar; def bar = 42 }" ==> "class A { def foo = bar; def bar = 42 }"
"def doSomething (@flag @(shortFlag d) dryRun = false) = 42" ==>
"""def doSomething (@flag @(shortFlag "d") dryRun = false) = 42"""
// Can make this pass, but requires running simple evaluation earlier, not sure it's worth it
// "class A { @(alias 'a') def aardvark = 42; def b = a }" ==>
// "class A { @(alias 'a') def aardvark = 42; def b = a }"
private implicit class RichString(input: String)(implicit val bindings: Set[String] = Set()) {
def ==>(expected: String): Unit = {
s"Identifying rich strings in '$input'" should s"result in '$expected'" in {
val inputProgram = compile(input)
val actualProgram = removeSourceInfo(bareStringify(inputProgram, bindings))
val expectedProgram = removeSourceInfo(compile(expected))
actualProgram should equal(expectedProgram)
}
}
}
private def removeSourceInfo(expr: AstNode): AstNode = expr.transform { case e ⇒ e.withSourceInfoOpt(None) }
private def compile(input: String): Program = {
val concreteProgram = MashParser.parseForgiving(input)
val abstractifier = new Abstractifier(Provenance.internal(input))
val abstractProgram = abstractifier.abstractify(concreteProgram)
abstractProgram
}
}
| mdr/mash | src/test/scala/com/github/mdr/mash/compiler/BareStringifyTest.scala | Scala | mit | 1,942 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.database.s3
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.apache.openwhisk.core.database.s3.S3AttachmentStoreProvider.S3Config
import org.apache.openwhisk.core.entity.WhiskEntity
@RunWith(classOf[JUnitRunner])
class S3WithPrefixTests extends S3AttachmentStoreMinioTests {
override protected val bucketPrefix: String = "master"
behavior of "S3Config"
it should "work with none prefix" in {
val config = S3Config("foo", None)
config.prefixFor[WhiskEntity] shouldBe "whiskentity"
}
it should "work with optional prefix" in {
val config = S3Config("foo", Some("bar"))
config.prefixFor[WhiskEntity] shouldBe "bar/whiskentity"
}
}
| csantanapr/incubator-openwhisk | tests/src/test/scala/org/apache/openwhisk/core/database/s3/S3WithPrefixTests.scala | Scala | apache-2.0 | 1,530 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package es.alvsanand.sgc.ftp.normal
import es.alvsanand.sgc.ftp.{FTPCredentials, ProxyConfiguration}
import org.scalatest._
class FTPSgcConnectorFactoryTest extends FlatSpec with Matchers with OptionValues
with Inside with Inspectors with BeforeAndAfterAll {
it should "fail with obligatory parameters" in {
a[IllegalArgumentException] shouldBe thrownBy(FTPSgcConnectorFactory
.get(FTPParameters(null, 21, null, null)))
a[IllegalArgumentException] shouldBe thrownBy(FTPSgcConnectorFactory
.get(FTPParameters("host", 21, null, null)))
a[IllegalArgumentException] shouldBe thrownBy(FTPSgcConnectorFactory
.get(FTPParameters("host", 21, "directory", null)))
a[IllegalArgumentException] shouldBe thrownBy(FTPSgcConnectorFactory
.get(FTPParameters("host", 21, "directory", FTPCredentials(null))))
a[IllegalArgumentException] shouldBe thrownBy(FTPSgcConnectorFactory
.get(FTPParameters("host", 21, "directory", FTPCredentials("user"), proxy = Option(ProxyConfiguration("")))))
}
it should "work with obligatory parameters" in {
noException should be thrownBy(
FTPSgcConnectorFactory.get(FTPParameters("host", 21, "directory", FTPCredentials("user")))
)
noException should be thrownBy(
FTPSgcConnectorFactory.get(FTPParameters("host", 21, "directory", FTPCredentials("user"),
proxy = Option(ProxyConfiguration("proxyHost"))))
)
}
it should "work with proxy parameters" in {
var p = Option(ProxyConfiguration("proxyHost", user = Option("user")))
var parameters = FTPParameters("host", 21, "directory", FTPCredentials("user"),
proxy = p)
noException should be thrownBy(FTPSgcConnectorFactory.get(parameters))
FTPSgcConnectorFactory.get(parameters).asInstanceOf[FTPSgcConnector].usesProxy() should
be(true)
p = Option(ProxyConfiguration("proxyHost", user = Option("user"), password = Option("")))
parameters = FTPParameters("host", 21, "directory", FTPCredentials("user"),
proxy = p)
noException should be thrownBy(FTPSgcConnectorFactory.get(parameters))
FTPSgcConnectorFactory.get(parameters).asInstanceOf[FTPSgcConnector].usesProxy() should
be(true)
}
}
| alvsanand/spark-generic-connector | sgc-ftp/src/test/scala/es/alvsanand/sgc/ftp/normal/FTPSgcConnectorFactoryTest.scala | Scala | apache-2.0 | 3,016 |
package utils
import common._
/**
* Libreria de funciones para Listas de enteros
*/
object ListIntUtils {
/**
* Buscar
* Dada una lista y una función de comparación, devuelve el valor que cumple la condición.
*/
def buscar(lista: List[Int], com:(Int, Int) => Boolean): Int =
if (lista.tail.isEmpty) lista.head
else if (com(lista.head, buscar(lista.tail,com))) lista.head
else buscar(lista.tail,com)
/*
* Busca el Maximo
*/
def max(lista: List[Int]) : Int = buscar(lista, (a:Int, b:Int) => if (a > b) true else false)
/*
* Busca el minimo
*/
def min(lista: List[Int]) : Int = buscar(lista, (a:Int, b:Int) => if (a < b) true else false)
/*
* Busca la mediana
* En el ámbito de la estadÃstica, la mediana representa el
* valor de la variable de posición central en un conjunto de datos ordenados.
*/
def mediana(lista: List[Int]) : Int =
if(lista.tail.isEmpty) lista.head
else if(contar(lista)%2!=0) ObtenerElemento(QuickSort(lista),(contar(lista)/2)+1)
else (ObtenerElemento(QuickSort(lista),contar(lista)/2)+ObtenerElemento(QuickSort(lista),(contar(lista)/2)+1))/2
def maximos(lista: List[Int],e: Int) : List[Int]=
filtrar(lista,(x)=>(x>e))
def minimos(lista: List[Int],e: Int) : List[Int]=
filtrar(lista,(x)=>(x<=e))
def QuickSort(xs: List[Int]) : List[Int]=
if(xs.isEmpty || xs.tail.isEmpty) xs
else QuickSort(minimos(xs.tail,xs.head))++(xs.head::QuickSort(maximos(xs.tail,xs.head)))
def ObtenerElemento(lista: List[Int], posicion: Int) :Int=
if(posicion==1) lista.head
else ObtenerElemento(lista.tail,posicion-1)
/**
* Cuenta los elementos
*/
def contar(lista: List[Int]) : Int =
lista.foldLeft(0)((sum,x)=> sum+1)
def acc(lista: List[Int]) : Int =
lista.foldLeft(0)((sum,x)=> sum+1);
/**
* Filtra los elementos de la lista xs segun la funcion p
*/
def filtrar(xs: List[Int], p: Int => Boolean): List[Int] =
if (xs.isEmpty) xs
else if(p(xs.head)) xs.head::filtrar(xs.tail,p)
else filtrar(xs.tail,p)
/**
* Filtra los elementos pares
*/
def filtrarPares(xs: List[Int]): List[Int] =
filtrar(xs,(x)=>(x%2==0))
/**
* Filtra los elementos multiplos de 3
*/
def filtrarMultiplosDeTres(xs: List[Int]): List[Int] =
filtrar(xs,(x)=>(x%3==0))
/**
* Acumula los elementos aplicandoles fx
*/
def acumular(lista: List[Int])(fx: (Int) => Int): Int =
if(lista.isEmpty) 0
else lista.foldLeft(0)((acc,x)=>acc+fx(x))
/**
* Acumula todos los elementos de una lista
*/
def acumularUnidad(lista: List[Int]): Int =
if(lista.isEmpty) 0
else acumular(lista)((x)=>(x%10))
/**
* Acumula el dobles de los elementos de una lista
*/
def acumularDoble(lista: List[Int]): Int =
if(lista.isEmpty) 0
else acumular(lista)((x)=>(x*2))
/**
* Acumula el cuadrado de los elementos de una lista
*/
def acumularCuadrado(lista: List[Int]): Int =
if(lista.isEmpty) 0
else acumular(lista)((x)=>(x*x))
}
| julian-lanfranco/funcional-fcyt | funsets/src/main/scala/utils/ListIntUtils.scala | Scala | gpl-2.0 | 3,183 |
/*******************************************************************************
* Copyright (c) 2014 Łukasz Szpakowski.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
******************************************************************************/
package pl.luckboy.purfuncor.frontend.resolver
import scala.util.parsing.input.Position
import scalaz._
import scalaz.Scalaz._
import pl.luckboy.purfuncor.util._
import pl.luckboy.purfuncor.common._
import pl.luckboy.purfuncor.frontend._
import pl.luckboy.purfuncor.common.Tree
import pl.luckboy.purfuncor.frontend.Bind
import pl.luckboy.purfuncor.common.Result._
object Resolver
{
def treeForFile[T, U, V, W](tree: Tree[GlobalSymbol, AbstractCombinator[Symbol, T, U], TreeInfo[V, W]], file: Option[java.io.File])(implicit showing: Showing[Tree[GlobalSymbol, AbstractCombinator[Symbol, T, U], TreeInfo[V, W]]]) =
tree.copy(
combs = tree.combs.mapValues { _.withFile(file) },
treeInfo = tree.treeInfo.copy(
typeTree = tree.treeInfo.typeTree.copy(
combs = tree.treeInfo.typeTree.combs.mapValues { _.withFile(file) }
)
)
)
private def transformTermNel1[T, U](terms: NonEmptyList[T])(transform: T => ValidationNel[AbstractError, U]) =
terms.tail.foldLeft(transform(terms.head).map { NonEmptyList(_) }) {
(res, t) => (transform(t) |@| res)(_ <:: _)
}.map { _.reverse }
private def transformArgNel4[T, U](args: NonEmptyList[T])(prefix: String, getName: T => Option[String], getPos: T => Position, transform: T => ValidationNel[AbstractError, U]) =
args.tail.foldLeft((transform(args.head).map { NonEmptyList(_) }, Set() ++ getName(args.head))) {
case (p @ (res, usedNames), a) =>
getName(a).map {
name =>
val res2 = if(usedNames.contains(name))
(res |@| Error("already defined " + prefix + " " + name, none, getPos(a)).failureNel[Unit]) { (as, _) => as }
else
(transform(a) |@| res) { _ <:: _ }
(res2, usedNames + name)
}.getOrElse(((transform(a) |@| res) { _ <:: _ }, usedNames))
}._1.map { _.reverse }
def transformTermNel[T, U](terms: NonEmptyList[Term[SimpleTerm[parser.Symbol, T, TypeSimpleTerm[parser.Symbol, U]]]])(scope: Scope) =
transformTermNel1(terms)(transformTerm(_)(scope))
def transformBind[T, U](bind: Bind[parser.Symbol, T, TypeSimpleTerm[parser.Symbol, U]])(scope: Scope) =
bind match {
case Bind(name, body, pos) => transformTerm(body)(scope).map { Bind(name, _, pos) }
}
def transformBindNel[T, U](binds: NonEmptyList[Bind[parser.Symbol, T, TypeSimpleTerm[parser.Symbol, U]]])(scope: Scope) =
binds.tail.foldLeft((transformBind(binds.head)(scope).map { NonEmptyList(_) }, Set(binds.head.name))) {
case ((res, usedNames), b) =>
val res2 = if(usedNames.contains(b.name))
(res |@| Error("already defined local variable " + b.name, none, b.pos).failureNel[Unit]) { (bs, _) => bs }
else
res
((transformBind(b)(scope) |@| res2)(_ <:: _), usedNames + b.name)
}._1.map { _.reverse }
def transformArgNel[T](args: NonEmptyList[Arg[TypeSimpleTerm[parser.Symbol, T]]])(scope: Scope) =
transformArgNel4(args)("argument", _.name, _.pos, transformArg(_)(scope))
def transformArgs[T](args: List[Arg[TypeSimpleTerm[parser.Symbol, T]]])(scope: Scope) =
args.toNel.map { transformArgNel(_)(scope).map { _.list } }.getOrElse(Nil.successNel)
def transformArg[T](arg: Arg[TypeSimpleTerm[parser.Symbol, T]])(scope: Scope): ValidationNel[AbstractError, Arg[TypeSimpleTerm[Symbol, T]]] =
arg.typ.map {
transformTypeTerm(_)(scope.copy(localVarNames = Set())).map { tt => Arg(arg.name, some(tt), arg.pos) }
}.getOrElse(Arg(arg.name, none, arg.pos).successNel)
def transformCase[T, U](cas: Case[parser.Symbol, T, TypeSimpleTerm[parser.Symbol, U]])(scope: Scope) =
(transformTypeTerm(cas.typ)(scope.copy(localVarNames = Set())) |@| transformTerm(cas.body)(scope.withLocalVars(cas.name.toSet))) {
Case(cas.name, _, _, cas.lambdaInfo)
}
def transformCaseNel[T, U](cases: NonEmptyList[Case[parser.Symbol, T, TypeSimpleTerm[parser.Symbol, U]]])(scope: Scope) =
transformTermNel1(cases)(transformCase(_)(scope))
def transformTerm[T, U](term: Term[SimpleTerm[parser.Symbol, T, TypeSimpleTerm[parser.Symbol, U]]])(scope: Scope): ValidationNel[AbstractError, Term[SimpleTerm[Symbol, T, TypeSimpleTerm[Symbol, U]]]] =
term match {
case App(fun, args, pos) =>
(transformTerm(fun)(scope) |@| transformTermNel(args)(scope)) { App(_, _, pos) }
case Simple(Let(binds, body, lambdaInfo), pos) =>
val newScope = scope.withLocalVars(binds.map { _.name }.toSet)
(transformBindNel(binds)(scope) |@| transformTerm(body)(newScope)) { (bs, t) => Simple(Let(bs, t, lambdaInfo), pos) }
case Simple(Lambda(args, body, lambdaInfo), pos) =>
val newScope = scope.withLocalVars(args.list.flatMap { _.name }.toSet)
(transformArgNel(args)(scope) |@| transformTerm(body)(newScope)) { (as, t) => Simple(Lambda(as, t, lambdaInfo), pos) }
case Simple(Var(sym, lambdaInfo), pos) =>
transformSymbol(sym)(scope).map { s => Simple(Var(s, lambdaInfo), pos) }
case Simple(Literal(value), pos) =>
Simple(Literal(value), pos).successNel
case Simple(TypedTerm(term, typ), pos) =>
(transformTerm(term)(scope) |@| transformTypeTerm(typ)(scope.copy(localVarNames = Set()))) { (t, tt) => Simple(TypedTerm(t, tt), pos) }
case Simple(Construct(n, lambdaInfo), pos) =>
Simple(Construct(n, lambdaInfo), pos).successNel
case Simple(Select(term, cases, lambdaInfo), pos) =>
(transformTerm(term)(scope) |@| transformCaseNel(cases)(scope)) { (t, cs) => Simple(Select(t, cs, lambdaInfo), pos) }
case Simple(Extract(term, args, body, lambdaInfo), pos) =>
val newScope = scope.withLocalVars(args.list.flatMap { _.name }.toSet)
(transformTerm(term)(scope) |@| transformArgNel(args)(scope) |@| transformTerm(body)(newScope)) {
(t1, as, t2) => Simple(Extract(t1, as, t2, lambdaInfo), pos)
}
}
def transformTypeTermNel[T](terms: NonEmptyList[Term[TypeSimpleTerm[parser.Symbol, T]]])(scope: Scope) =
transformTermNel1(terms)(transformTypeTerm(_)(scope))
def transformTypeArgNel(args: NonEmptyList[TypeArg]) =
transformArgNel4(args)("type argument", _.name, _.pos, _.successNel)
def transformTypeArgs(args: List[TypeArg]) =
args.toNel.map { transformTypeArgNel(_).map { _.list } }.getOrElse(Nil.successNel)
def transformTypeTermOption[T](term: Option[Term[TypeSimpleTerm[parser.Symbol, T]]])(scope: Scope) =
term.map { transformTypeTerm(_)(scope).map(some) }.getOrElse(none.successNel)
def transformTypeTerm[T](term: Term[TypeSimpleTerm[parser.Symbol, T]])(scope: Scope): ValidationNel[AbstractError, Term[TypeSimpleTerm[Symbol, T]]] =
term match {
case App(fun, args, pos) =>
(transformTypeTerm(fun)(scope) |@| transformTypeTermNel(args)(scope)) { App(_, _, pos)}
case Simple(TypeLambda(args, body, lambdaInfo), pos) =>
val newScope = scope.withLocalVars(args.list.flatMap { _.name }.toSet)
(transformTypeArgNel(args) |@| transformTypeTerm(body)(newScope)) { case (as, t) => Simple(TypeLambda(as, t, lambdaInfo), pos) }
case Simple(TypeVar(sym), pos) =>
transformTypeSymbol(sym)(scope).map { s => Simple(TypeVar(s), pos) }
case Simple(TypeLiteral(value), pos) =>
Simple(TypeLiteral(value), pos).successNel
case Simple(KindedTypeTerm(term, kind), pos) =>
transformTypeTerm(term)(scope).map { t => Simple(KindedTypeTerm(t, kind), pos) }
}
private def getSymbol4[T](name: String, pos: Position)(scope: Scope)(prefix: String, contains: (NameTable, String) => Boolean, make: (ModuleSymbol, String) => T, importedSyms: Scope => Map[String, Set[T]]) = {
val undefinedSymErrRes = (Error("undefined " + prefix + " " + name, none, pos): AbstractError).failureNel[T]
scope.currentModuleSyms.foldLeft(undefinedSymErrRes) {
(res, moduleSym) =>
res.orElse {
scope.nameTree.getNameTable(moduleSym).map {
nameTable => if(contains(nameTable, name)) make(moduleSym, name).successNel[AbstractError] else res
}.getOrElse(res)
}
}.orElse {
importedSyms(scope).get(name).map {
syms =>
if(syms.size <= 1)
syms.headOption.map { _.successNel[AbstractError] }.getOrElse(undefinedSymErrRes)
else
Error("reference to " + name + " is ambiguous", none, pos).failureNel[T]
}.getOrElse(undefinedSymErrRes)
}
}
def getGlobalSymbol(name: String, pos: Position)(scope: Scope) =
getSymbol4(name, pos)(scope)("variable", _.combNames.contains(_), _.globalSymbolFromName(_), _.importedCombSyms)
def getTypeGlobalSymbol(name: String, pos: Position)(scope: Scope) =
getSymbol4(name, pos)(scope)("type variable", _.typeCombNames.contains(_), _.globalSymbolFromName(_), _.importedTypeCombSyms)
def getModuleSymbol(name: String, pos: Position)(scope: Scope) =
getSymbol4(name, pos)(scope)("module", _.moduleNames.contains(_), _ + _, _.importedModuleSyms)
def transformGlobalSymbolForInstance(sym: parser.Symbol)(scope: Scope) =
sym match {
case parser.GlobalSymbol(names, pos) =>
val combSym = GlobalSymbol(names)
if(scope.nameTree.containsComb(combSym))
combSym.successNel
else
Error("undefined global variable " + combSym, none, pos).failureNel
case parser.NormalSymbol(NonEmptyList(name), pos) =>
getGlobalSymbol(name, pos)(scope)
case parser.NormalSymbol(names, pos) =>
getModuleSymbol(names.head, pos)(scope).flatMap {
moduleSym =>
names.tail.toNel.map {
tail =>
val combSym = moduleSym.globalSymbolFromNames(tail)
if(scope.nameTree.containsComb(combSym))
combSym.successNel
else
Error("undefined global variable " + combSym, none, pos).failureNel
}.getOrElse(FatalError("tail of list is empty", none, pos).failureNel)
}
}
def transformSymbol(sym: parser.Symbol)(scope: Scope) =
sym match {
case parser.NormalSymbol(NonEmptyList(name), _) =>
if(scope.localVarNames.contains(name))
LocalSymbol(name).successNel
else
transformGlobalSymbolForInstance(sym)(scope)
case _ =>
transformGlobalSymbolForInstance(sym)(scope)
}
def transformTypeSymbol(sym: parser.Symbol)(scope: Scope) =
sym match {
case parser.GlobalSymbol(names, pos) =>
val typeCombSym = GlobalSymbol(names)
if(scope.nameTree.containsTypeComb(typeCombSym))
typeCombSym.successNel
else
Error("undefined global type variable " + typeCombSym, none, pos).failureNel
case parser.NormalSymbol(NonEmptyList(name), pos) =>
if(scope.localVarNames.contains(name))
LocalSymbol(name).successNel
else
getTypeGlobalSymbol(name, pos)(scope)
case parser.NormalSymbol(names, pos) =>
getModuleSymbol(names.head, pos)(scope).flatMap {
moduleSym =>
names.tail.toNel.map {
tail =>
val typeCombSym = moduleSym.globalSymbolFromNames(tail)
if(scope.nameTree.containsTypeComb(typeCombSym))
typeCombSym.successNel
else
Error("undefined global type variable " + typeCombSym, none, pos).failureNel
}.getOrElse(FatalError("tail of list is empty", none, pos).failureNel)
}
}
def transformGlobalSymbol(sym: parser.Symbol)(currentModuleSym: ModuleSymbol) =
sym match {
case parser.GlobalSymbol(names, _) => GlobalSymbol(names)
case parser.NormalSymbol(names, _) => currentModuleSym.globalSymbolFromNames(names)
}
def transformModuleSymbol(sym: parser.ModuleSymbol)(currentModuleSym: ModuleSymbol) =
sym match {
case parser.GlobalModuleSymbol(names, _) => ModuleSymbol(names)
case parser.NormalModuleSymbol(names, _) => currentModuleSym ++ names.list
}
def addDefToNameTreeS(definition: parser.Def)(currentModuleSym: ModuleSymbol)(nameTree: NameTree): (NameTree, ValidationNel[AbstractError, Unit]) =
definition match {
case parser.ImportDef(sym) =>
(nameTree, ().successNel[AbstractError])
case parser.CombinatorDef(sym, _, _, _) =>
val sym2 = transformGlobalSymbol(sym)(currentModuleSym)
if(nameTree.containsComb(sym2))
(nameTree, Error("already defined global variable " + sym2, none, sym.pos).failureNel)
else
(nameTree |+| NameTree.fromGlobalSymbol(sym2), ().successNel[AbstractError])
case parser.PolyCombinatorDef(sym, _) =>
val sym2 = transformGlobalSymbol(sym)(currentModuleSym)
if(nameTree.containsComb(sym2))
(nameTree, Error("already defined global variable " + sym2, none, sym.pos).failureNel)
else
(nameTree |+| NameTree.fromGlobalSymbol(sym2), ().successNel[AbstractError])
case parser.TypeCombinatorDef(sym, _, _, _) =>
val sym2 = transformGlobalSymbol(sym)(currentModuleSym)
if(nameTree.containsTypeComb(sym2))
(nameTree, Error("already defined global type variable " + sym2, none, sym.pos).failureNel)
else
(nameTree |+| NameTree.fromTypeGlobalSymbol(sym2), ().successNel[AbstractError])
case parser.UnittypeCombinatorDef(_, sym, _) =>
val sym2 = transformGlobalSymbol(sym)(currentModuleSym)
if(nameTree.containsTypeComb(sym2))
(nameTree, Error("already defined global type variable " + sym2, none, sym.pos).failureNel)
else
(nameTree |+| NameTree.fromTypeGlobalSymbol(sym2), ().successNel[AbstractError])
case parser.ModuleDef(sym, defs) =>
defs.foldLeft((nameTree, ().successNel[AbstractError])) {
case ((nt, res), d) =>
val (nt2, res2) = addDefToNameTreeS(d)(transformModuleSymbol(sym)(currentModuleSym))(nt)
(nt2, res |+| res2)
}
case parser.InstanceDef(_, _) =>
(nameTree, ().successNel[AbstractError])
case parser.SelectConstructInstanceDef(_, _) =>
(nameTree, ().successNel[AbstractError])
}
def addDefToNameTree(definition: parser.Def)(currentModuleSym: ModuleSymbol) =
State(addDefToNameTreeS(definition)(currentModuleSym))
def nameTreeFromParseTreeS(parseTree: parser.ParseTree)(nameTree: NameTree) =
parseTree.defs.foldLeft((nameTree, ().successNel[AbstractError])) {
case ((nt, res), d) =>
val (nt2, res2) = addDefToNameTreeS(d)(ModuleSymbol.root)(nt)
(nt2, res |+| res2)
}
def nameTreeFromParseTree(parseTree: parser.ParseTree) =
State(nameTreeFromParseTreeS(parseTree))
def transformImportModuleSymbol(sym: parser.ModuleSymbol)(scope: Scope) =
sym match {
case parser.GlobalModuleSymbol(names, pos) =>
val moduleSym = ModuleSymbol(names)
if(scope.nameTree.containsModule(moduleSym))
moduleSym.successNel
else
Error("undefined module " + moduleSym, none, pos).failureNel
case parser.NormalModuleSymbol(names, pos) =>
getModuleSymbol(names.head, pos)(scope).flatMap {
moduleSym =>
val moduleSym2 = moduleSym ++ names.tail
if(scope.nameTree.containsModule(moduleSym2))
moduleSym2.successNel
else
Error("undefined module " + moduleSym2, none, pos).failureNel
}
}
def transformDefsS[T](defs: List[parser.Def])(scope: Scope)(tree: Tree[GlobalSymbol, AbstractCombinator[Symbol, parser.LambdaInfo, TypeSimpleTerm[Symbol, parser.TypeLambdaInfo]], TreeInfo[parser.TypeLambdaInfo, T]]): (Tree[GlobalSymbol, AbstractCombinator[Symbol, parser.LambdaInfo, TypeSimpleTerm[Symbol, parser.TypeLambdaInfo]], TreeInfo[parser.TypeLambdaInfo, T]], ValidationNel[AbstractError, Unit]) =
defs.foldLeft(((tree, ().successNel[AbstractError]), scope)) {
case ((p @ (tree2, res), scope), d) =>
d match {
case parser.ImportDef(sym) =>
transformImportModuleSymbol(sym)(scope).map {
sym2 =>
scope.nameTree.getNameTable(sym2).map {
nt =>
val combSyms = nt.combNames.map { name => (name, sym2.globalSymbolFromName(name)) }.toMap
val typeCombSyms = nt.typeCombNames.map { name => (name, sym2.globalSymbolFromName(name)) }.toMap
val moduleSyms = nt.moduleNames.map { name => (name, sym2 + name) }.toMap
(p, scope.withImportedCombs(combSyms).withImportedCombs(typeCombSyms).withImportedModules(moduleSyms))
}.getOrElse {
((tree2, res |+| Error("undefined module " + sym2, none, sym.pos).failureNel), scope)
}
} match {
case Success(pp) => pp
case res2 => ((tree2, res |+| res2.map { _ => () }), scope)
}
case parser.CombinatorDef(sym, typ, args, body) =>
val sym2 = transformGlobalSymbol(sym)(scope.currentModuleSyms.head)
if(scope.nameTree.containsComb(sym2)) {
val newScope = scope.withLocalVars(args.flatMap { _.name }.toSet)
val res2 = (transformTypeTermOption(typ)(scope) |@| transformArgs(args)(scope) |@| transformTerm(body)(newScope)) { (tt, as, t) => (tt, as, t) }
res2 match {
case Success((tt, as, t)) =>
((tree2.copy(combs = tree2.combs + (sym2 -> Combinator(tt, as, t, parser.LambdaInfo, none))), (res |@| res2) { (u, _) => u }), scope)
case Failure(_) =>
((tree2, (res |@| res2) { (u, _) => u }), scope)
}
} else
((tree, res |+| FatalError("name tree doesn't contain combinator", none, sym.pos).failureNel[Unit]), scope)
case parser.PolyCombinatorDef(sym, typ) =>
val sym2 = transformGlobalSymbol(sym)(scope.currentModuleSyms.head)
if(scope.nameTree.containsComb(sym2)) {
val res2 = transformTypeTermOption(typ)(scope)
res2 match {
case Success(tt) =>
((tree2.copy(combs = tree2.combs + (sym2 -> PolyCombinator(tt, none))), (res |@| res2) { (u, _) => u }), scope)
case Failure(_) =>
((tree2, (res |@| res2) { (u, _) => u }), scope)
}
} else
((tree, res |+| FatalError("name tree doesn't contain combinator", none, sym.pos).failureNel[Unit]), scope)
case parser.TypeCombinatorDef(sym, kind, args, body) =>
val sym2 = transformGlobalSymbol(sym)(scope.currentModuleSyms.head)
if(scope.nameTree.containsTypeComb(sym2)) {
val newScope = scope.withLocalVars(args.flatMap { _.name }.toSet)
val res2 = (transformTypeArgs(args) |@| transformTypeTerm(body)(newScope)) { (as, t) => (as, t) }
res2 match {
case Success((as, t)) =>
val treeInfo2 = tree2.treeInfo
val typeTree2 = treeInfo2.typeTree
((tree2.copy(treeInfo = treeInfo2.copy(typeTree = typeTree2.copy(combs = typeTree2.combs + (sym2 -> TypeCombinator(kind, as, t, parser.TypeLambdaInfo, none))))), (res |@| res2) { (u, _) => u }), scope)
case Failure(_) =>
((tree2, (res |@| res2) { (u, _) => u }), scope)
}
} else
((tree, res |+| FatalError("name tree doesn't contain type combinator", none, sym.pos).failureNel[Unit]), scope)
case parser.UnittypeCombinatorDef(n, sym, kind) =>
val sym2 = transformGlobalSymbol(sym)(scope.currentModuleSyms.head)
if(scope.nameTree.containsTypeComb(sym2)) {
val treeInfo2 = tree2.treeInfo
val typeTree2 = treeInfo2.typeTree
((tree2.copy(treeInfo = treeInfo2.copy(typeTree = typeTree2.copy(combs = typeTree2.combs + (sym2 -> UnittypeCombinator(n, kind, none))))), res), scope)
} else
((tree, res |+| FatalError("name tree doesn't contain type combinator", none, sym.pos).failureNel[Unit]), scope)
case parser.ModuleDef(sym, defs2) =>
val sym2 = transformModuleSymbol(sym)(scope.currentModuleSyms.head)
val (newTree2, res2) = transformDefsS(defs2)(scope.withCurrentModule(sym2))(tree2)
((newTree2, res |+| res2), scope)
case parser.InstanceDef(polyCombSym, instCombSym) =>
val res2 = (transformGlobalSymbolForInstance(polyCombSym)(scope) |@| transformGlobalSymbolForInstance(instCombSym)(scope)) { (s1, s2) => (s1, s2) }
res2 match {
case Success((s1, s2)) =>
((tree2.copy(treeInfo = tree2.treeInfo.copy(insts = tree2.treeInfo.insts |+| Map(s1 -> List(Instance(s2, polyCombSym.pos, none))))), (res |@| res2) { (u, _) => u }), scope)
case Failure(_) =>
((tree2, (res |@| res2) { (u, _) => u }), scope)
}
case parser.SelectConstructInstanceDef(supertype, types) =>
val res2 = (transformTypeTerm(supertype)(scope) |@| transformTypeTermNel(types)(scope)) { (tt, tts) => (tt, tts) }
res2 match {
case Success((tt, tts)) =>
((tree2.copy(treeInfo = tree2.treeInfo.copy(selectConstructInsts = tree2.treeInfo.selectConstructInsts :+ SelectConstructInstance(tt, tts, none))), (res |@| res2) { (u, _) => u }), scope)
case Failure(_) =>
((tree2, (res |@| res2) { (u, _) => u }), scope)
}
}
}._1
def transformDefs[T](defs: List[parser.Def])(scope: Scope) =
State(transformDefsS[T](defs)(scope))
def transformParseTreeS[T](parseTree: parser.ParseTree)(nameTree: NameTree)(tree: Tree[GlobalSymbol, AbstractCombinator[Symbol, parser.LambdaInfo, TypeSimpleTerm[Symbol, parser.TypeLambdaInfo]], TreeInfo[parser.TypeLambdaInfo, T]]) =
transformDefsS[T](parseTree.defs)(Scope.fromNameTree(nameTree))(tree)
def transformParseTree[T](parseTree: parser.ParseTree)(nameTree: NameTree) =
State(transformParseTreeS[T](parseTree)(nameTree))
def transform(parseTrees: List[(Option[java.io.File], parser.ParseTree)])(nameTree: NameTree) = {
val (newNameTree, res1) = parseTrees.foldLeft((nameTree, ().successNel[AbstractError])) {
case (p @ (nt, res), (file, pt)) =>
nameTreeFromParseTree(pt).map {
res2 => res |+| resultForFile(res2, file)
}.run(nt)
}
val (newTree, res2) = parseTrees.foldLeft((Tree[GlobalSymbol, AbstractCombinator[Symbol, parser.LambdaInfo, TypeSimpleTerm[Symbol, parser.TypeLambdaInfo]], TreeInfo[parser.TypeLambdaInfo, TypeTreeInfo]](Map(), TreeInfo(Tree[GlobalSymbol, AbstractTypeCombinator[Symbol, parser.TypeLambdaInfo], TypeTreeInfo](Map(), TypeTreeInfo), Map(), Nil)), res1)) {
case (p @ (t, res), (file, pt)) =>
val (newTree, newRes) = transformParseTree[TypeTreeInfo](pt)(newNameTree).map {
res2 => res |+| resultForFile(res2, file)
}.run(t)
(treeForFile(newTree, file), newRes)
}
res2.map { _ => newTree }
}
def transformString(s: String)(nameTree: NameTree) =
for {
parseTree <- parser.Parser.parseString(s)
tree <- transform(List(none -> parseTree))(nameTree)
} yield tree
def transformFile(file: java.io.File)(nameTree: NameTree) =
for {
parseTree <- parser.Parser.parseFile(file)
tree <- transform(List(some(file) -> parseTree))(nameTree)
} yield tree
def transformFiles(files: List[java.io.File])(nameTree: NameTree) = {
val res1 = files.foldLeft(List[(Option[java.io.File], parser.ParseTree)]().successNel[AbstractError]) {
case (res, file) => (res |@| parser.Parser.parseFile(file)) { (pts, pt) => pts :+ (some(file) -> pt) }
}
res1.flatMap { transform(_)(nameTree) }
}
def transformTermString(s: String)(scope: Scope) =
for {
term <- parser.Parser.parseTermString(s)
term2 <- transformTerm(term)(scope)
} yield term2
def transformTypeTermString(s: String)(scope: Scope) =
for {
typeTerm <- parser.Parser.parseTypeTermString(s)
typeTerm2 <- transformTypeTerm(typeTerm)(scope)
} yield typeTerm2
}
| luckboy/Purfuncor | src/main/scala/pl/luckboy/purfuncor/frontend/resolver/Resolver.scala | Scala | mpl-2.0 | 24,904 |
package models
import java.util.UUID
import config.ConfigBanana
import org.w3.banana.{FOAFPrefix, PointedGraph, XSDPrefix}
import org.w3.banana.binder.PGBinder
import play.api.libs.json.Json
import scala.language.implicitConversions
import scala.util.Try
/** Describes a verification of a task by a user.
*
* @param time unix time in milliseconds
*/
case class Verification(_id: UUID,
verifier_id: UUID,
task_id: UUID,
time: Long,
value: Option[Boolean]
) extends MongoEntity
object Verification extends ConfigBanana {
implicit val verificationFormat = Json.format[Verification]
}
/** Binds verification case class to rdf. Unfinished
*
*/
case class VerificationDump(
_id: UUID,
verifier: String,
link: Link,
value: Option[Boolean]
)
object VerificationDump extends ConfigBanana {
import ops._
import recordBinder._
import org.w3.banana.syntax._
val veritaskPrefix = "http://www.veritask.de/"
val _id = property[UUID](URI(veritaskPrefix + "verification_id"))
val verifier = property[String](URI(veritaskPrefix + "verifier"))
val link= property[Link](URI(veritaskPrefix + "link"))
val value = optional[Boolean](URI(veritaskPrefix + "value"))
implicit val binder: PGBinder[Rdf, VerificationDump] =
pgbWithId[VerificationDump](t => URI(veritaskPrefix + t._id))
.apply(_id, verifier, link, value)(VerificationDump.apply, VerificationDump.unapply)
}
| BonarBeavis/veritask | app/models/Verification.scala | Scala | apache-2.0 | 1,668 |
package com.raquo.domtypes.fixtures.tags
/** Tag represents an Element builder */
class Tag[+Element](
val name: String,
val void: Boolean
)
| raquo/scala-dom-types | shared/src/test/scala/com/raquo/domtypes/fixtures/tags/Tag.scala | Scala | mit | 146 |
package example.herding.cats
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
import FreeMonoids._
object FreeMonoidCheckLawsSpec extends Properties("FreeMonoids") {
property("free monoid laws") = forAll { (c: Char) ⇒
f(c) == g(c)
}
}
| stevenchen3/feed-cats | src/test/scala/cats/FreeMonoidCheckLawsSpec.scala | Scala | mit | 266 |
package com.walmart.labs.pcs.normalize
import kafka.message._
import kafka.serializer._
import kafka.utils._
import java.util.Properties
import kafka.utils.Logging
import scala.collection.JavaConversions._
import kafka.consumer._
class KafkaConsumer(
topic: String,
/** topic
* The high-level API hides the details of brokers from the consumer and allows consuming off the cluster of machines
* without concern for the underlying topology. It also maintains the state of what has been consumed. The high-level API
* also provides the ability to subscribe to topics that match a filter expression (i.e., either a whitelist or a blacklist
* regular expression). This topic is a whitelist only but can change with re-factoring below on the filterSpec
*/
groupId: String,
/** groupId
* A string that uniquely identifies the group of consumer processes to which this consumer belongs. By setting the same
* group id multiple processes indicate that they are all part of the same consumer group.
*/
zookeeperConnect: String,
/**
* Specifies the zookeeper connection string in the form hostname:port where host and port are the host and port of
* a zookeeper server. To allow connecting through other zookeeper nodes when that zookeeper machine is down you can also
* specify multiple hosts in the form hostname1:port1,hostname2:port2,hostname3:port3. The server may also have a zookeeper
* chroot path as part of it's zookeeper connection string which puts its data under some path in the global zookeeper namespace.
* If so the consumer should use the same chroot path in its connection string. For example to give a chroot path of /chroot/path
* you would give the connection string as hostname1:port1,hostname2:port2,hostname3:port3/chroot/path.
*/
readFromStartOfStream: Boolean = true
/**
* What to do when there is no initial offset in Zookeeper or if an offset is out of range:
* 1) smallest : automatically reset the offset to the smallest offset
* 2) largest : automatically reset the offset to the largest offset
* 3) anything else: throw exception to the consumer. If this is set to largest, the consumer may lose some
messages when the number of partitions, for the topics it subscribes to, changes on the broker.
****************************************************************************************
To prevent data loss during partition addition, set auto.offset.reset to smallest
This make sense to change to true if you know you are listening for new data only as of
after you connect to the stream new things are coming out. you can audit/reconcile in
another consumer which this flag allows you to toggle if it is catch-up and new stuff or
just new stuff coming out of the stream. This will also block waiting for new stuff so
it makes a good listener.
//readFromStartOfStream: Boolean = true
readFromStartOfStream: Boolean = false
****************************************************************************************
*/
) extends Logging {
val props = new Properties()
props.put("group.id", groupId)
props.put("zookeeper.connect", zookeeperConnect)
props.put("auto.offset.reset", if(readFromStartOfStream) "smallest" else "largest")
val config = new ConsumerConfig(props)
val connector = Consumer.create(config)
val filterSpec = new Whitelist(topic)
info("setup:start topic=%s for zk=%s and groupId=%s".format(topic,zookeeperConnect,groupId))
val stream = connector.createMessageStreamsByFilter(filterSpec, 1, new DefaultDecoder(), new DefaultDecoder()).get(0)
info("setup:complete topic=%s for zk=%s and groupId=%s".format(topic,zookeeperConnect,groupId))
def read(write: (Array[Byte])=>Unit) = {
info("reading on stream now")
for(messageAndTopic <- stream) {
try {
info("writing from stream")
write(messageAndTopic.message)
info("written to stream")
} catch {
case e: Throwable =>
if (true) { //this is objective even how to conditionalize on it
error("Error processing message, skipping this message: ", e)
} else {
throw e
}
}
}
}
def close() {
connector.shutdown()
}
} | ArthurZhong/SparkStormKafkaTest | src/main/scala/com/walmart/labs/pcs/kafka/KafkaConsumer.scala | Scala | apache-2.0 | 4,267 |
package com.recursivity.jpa
import javax.persistence.{Entity, Table, Id, Column}
import reflect.BeanProperty
/**
* Created by IntelliJ IDEA.
* User: wfaler
* Date: 10/02/2011
* Time: 00:06
* To change this template use File | Settings | File Templates.
*/
@Entity
@Table(name="test_beans")
class MyBean{
@Id
@Column
@BeanProperty
var id: String = null
@Column
@BeanProperty
var value: String = null
} | bowler-framework/recursivity-jpa | src/test/scala/com/recursivity/jpa/MyBean.scala | Scala | bsd-3-clause | 422 |
package cz.kamenitxan.jakon.validation.validators
import java.lang.annotation.Annotation
import java.lang.reflect.Field
import cz.kamenitxan.jakon.validation.{ValidationResult, Validator}
class PositiveOrZeroValidator extends Validator {
private val error = "NOT_POSITIVE"
private val nan = "NOT_A_NUMBER"
override def isValid(value: String, a: Annotation, field: Field, data: Map[Field, String]): Option[ValidationResult] = {
if (value == null) return Option.empty
val numberValue = try {
value.toDouble
} catch {
case _: NumberFormatException => return ValidationResult(nan)
}
if (numberValue >= 0) {
Option.empty
} else {
ValidationResult(error)
}
}
}
| kamenitxan/Jakon | modules/backend/src/main/scala/cz/kamenitxan/jakon/validation/validators/PositiveOrZeroValidator.scala | Scala | bsd-3-clause | 691 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.net.URI
import java.util.concurrent.ThreadPoolExecutor
import java.util.concurrent.TimeUnit._
import scala.util.control.NonFatal
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, FileSystem, GlobFilter, Path}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{DataFrame, Dataset, SparkSession}
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.connector.read.streaming
import org.apache.spark.sql.connector.read.streaming.{ReadAllAvailable, ReadLimit, ReadMaxFiles, SupportsAdmissionControl}
import org.apache.spark.sql.execution.datasources.{DataSource, InMemoryFileIndex, LogicalRelation}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.ThreadUtils
/**
* A very simple source that reads files from the given directory as they appear.
*/
class FileStreamSource(
sparkSession: SparkSession,
path: String,
fileFormatClassName: String,
override val schema: StructType,
partitionColumns: Seq[String],
metadataPath: String,
options: Map[String, String]) extends SupportsAdmissionControl with Source with Logging {
import FileStreamSource._
private val sourceOptions = new FileStreamOptions(options)
private val hadoopConf = sparkSession.sessionState.newHadoopConf()
@transient private val fs = new Path(path).getFileSystem(hadoopConf)
private val qualifiedBasePath: Path = {
fs.makeQualified(new Path(path)) // can contain glob patterns
}
private val sourceCleaner: Option[FileStreamSourceCleaner] = FileStreamSourceCleaner(
fs, qualifiedBasePath, sourceOptions, hadoopConf)
private val optionsWithPartitionBasePath = sourceOptions.optionMapWithoutPath ++ {
if (!SparkHadoopUtil.get.isGlobPath(new Path(path)) && options.contains("path")) {
Map("basePath" -> path)
} else {
Map()
}}
private val metadataLog =
new FileStreamSourceLog(FileStreamSourceLog.VERSION, sparkSession, metadataPath)
private var metadataLogCurrentOffset = metadataLog.getLatest().map(_._1).getOrElse(-1L)
/** Maximum number of new files to be considered in each batch */
private val maxFilesPerBatch = sourceOptions.maxFilesPerTrigger
private val fileSortOrder = if (sourceOptions.latestFirst) {
logWarning(
"""'latestFirst' is true. New files will be processed first, which may affect the watermark
|value. In addition, 'maxFileAge' will be ignored.""".stripMargin)
implicitly[Ordering[Long]].reverse
} else {
implicitly[Ordering[Long]]
}
private val maxFileAgeMs: Long = if (sourceOptions.latestFirst && maxFilesPerBatch.isDefined) {
Long.MaxValue
} else {
sourceOptions.maxFileAgeMs
}
private val fileNameOnly = sourceOptions.fileNameOnly
if (fileNameOnly) {
logWarning("'fileNameOnly' is enabled. Make sure your file names are unique (e.g. using " +
"UUID), otherwise, files with the same name but under different paths will be considered " +
"the same and causes data lost.")
}
/** A mapping from a file that we have processed to some timestamp it was last modified. */
// Visible for testing and debugging in production.
val seenFiles = new SeenFilesMap(maxFileAgeMs, fileNameOnly)
metadataLog.allFiles().foreach { entry =>
seenFiles.add(entry.path, entry.timestamp)
}
seenFiles.purge()
logInfo(s"maxFilesPerBatch = $maxFilesPerBatch, maxFileAgeMs = $maxFileAgeMs")
private var unreadFiles: Seq[(String, Long)] = _
/**
* Returns the maximum offset that can be retrieved from the source.
*
* `synchronized` on this method is for solving race conditions in tests. In the normal usage,
* there is no race here, so the cost of `synchronized` should be rare.
*/
private def fetchMaxOffset(limit: ReadLimit): FileStreamSourceOffset = synchronized {
val newFiles = if (unreadFiles != null) {
logDebug(s"Reading from unread files - ${unreadFiles.size} files are available.")
unreadFiles
} else {
// All the new files found - ignore aged files and files that we have seen.
fetchAllFiles().filter {
case (path, timestamp) => seenFiles.isNewFile(path, timestamp)
}
}
// Obey user's setting to limit the number of files in this batch trigger.
val (batchFiles, unselectedFiles) = limit match {
case files: ReadMaxFiles if !sourceOptions.latestFirst =>
// we can cache and reuse remaining fetched list of files in further batches
val (bFiles, usFiles) = newFiles.splitAt(files.maxFiles())
if (usFiles.size < files.maxFiles() * DISCARD_UNSEEN_FILES_RATIO) {
// Discard unselected files if the number of files are smaller than threshold.
// This is to avoid the case when the next batch would have too few files to read
// whereas there're new files available.
logTrace(s"Discarding ${usFiles.length} unread files as it's smaller than threshold.")
(bFiles, null)
} else {
(bFiles, usFiles)
}
case files: ReadMaxFiles =>
// implies "sourceOptions.latestFirst = true" which we want to refresh the list per batch
(newFiles.take(files.maxFiles()), null)
case _: ReadAllAvailable => (newFiles, null)
}
if (unselectedFiles != null && unselectedFiles.nonEmpty) {
logTrace(s"Taking first $MAX_CACHED_UNSEEN_FILES unread files.")
unreadFiles = unselectedFiles.take(MAX_CACHED_UNSEEN_FILES)
logTrace(s"${unreadFiles.size} unread files are available for further batches.")
} else {
unreadFiles = null
logTrace(s"No unread file is available for further batches.")
}
batchFiles.foreach { file =>
seenFiles.add(file._1, file._2)
logDebug(s"New file: $file")
}
val numPurged = seenFiles.purge()
logTrace(
s"""
|Number of new files = ${newFiles.size}
|Number of files selected for batch = ${batchFiles.size}
|Number of unread files = ${Option(unreadFiles).map(_.size).getOrElse(0)}
|Number of seen files = ${seenFiles.size}
|Number of files purged from tracking map = $numPurged
""".stripMargin)
if (batchFiles.nonEmpty) {
metadataLogCurrentOffset += 1
val fileEntries = batchFiles.map { case (p, timestamp) =>
FileEntry(path = p, timestamp = timestamp, batchId = metadataLogCurrentOffset)
}.toArray
if (metadataLog.add(metadataLogCurrentOffset, fileEntries)) {
logInfo(s"Log offset set to $metadataLogCurrentOffset with ${batchFiles.size} new files")
} else {
throw new IllegalStateException("Concurrent update to the log. Multiple streaming jobs " +
s"detected for $metadataLogCurrentOffset")
}
}
FileStreamSourceOffset(metadataLogCurrentOffset)
}
override def getDefaultReadLimit: ReadLimit = {
maxFilesPerBatch.map(ReadLimit.maxFiles).getOrElse(super.getDefaultReadLimit)
}
/**
* For test only. Run `func` with the internal lock to make sure when `func` is running,
* the current offset won't be changed and no new batch will be emitted.
*/
def withBatchingLocked[T](func: => T): T = synchronized {
func
}
/** Return the latest offset in the [[FileStreamSourceLog]] */
def currentLogOffset: Long = synchronized { metadataLogCurrentOffset }
/**
* Returns the data that is between the offsets (`start`, `end`].
*/
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
val startOffset = start.map(FileStreamSourceOffset(_).logOffset).getOrElse(-1L)
val endOffset = FileStreamSourceOffset(end).logOffset
assert(startOffset <= endOffset)
val files = metadataLog.get(Some(startOffset + 1), Some(endOffset)).flatMap(_._2)
logInfo(s"Processing ${files.length} files from ${startOffset + 1}:$endOffset")
logTrace(s"Files are:\n\t" + files.mkString("\n\t"))
val newDataSource =
DataSource(
sparkSession,
paths = files.map(f => new Path(new URI(f.path)).toString),
userSpecifiedSchema = Some(schema),
partitionColumns = partitionColumns,
className = fileFormatClassName,
options = optionsWithPartitionBasePath)
Dataset.ofRows(sparkSession, LogicalRelation(newDataSource.resolveRelation(
checkFilesExist = false), isStreaming = true))
}
/**
* If the source has a metadata log indicating which files should be read, then we should use it.
* Only when user gives a non-glob path that will we figure out whether the source has some
* metadata log
*
* None means we don't know at the moment
* Some(true) means we know for sure the source DOES have metadata
* Some(false) means we know for sure the source DOSE NOT have metadata
*/
@volatile private[sql] var sourceHasMetadata: Option[Boolean] =
if (SparkHadoopUtil.get.isGlobPath(new Path(path))) Some(false) else None
private def allFilesUsingInMemoryFileIndex() = {
val globbedPaths = SparkHadoopUtil.get.globPathIfNecessary(fs, qualifiedBasePath)
val fileIndex = new InMemoryFileIndex(sparkSession, globbedPaths, options, Some(new StructType))
fileIndex.allFiles()
}
private def allFilesUsingMetadataLogFileIndex() = {
// Note if `sourceHasMetadata` holds, then `qualifiedBasePath` is guaranteed to be a
// non-glob path
new MetadataLogFileIndex(sparkSession, qualifiedBasePath,
CaseInsensitiveMap(options), None).allFiles()
}
private def setSourceHasMetadata(newValue: Option[Boolean]): Unit = newValue match {
case Some(true) =>
if (sourceCleaner.isDefined) {
throw new UnsupportedOperationException("Clean up source files is not supported when" +
" reading from the output directory of FileStreamSink.")
}
sourceHasMetadata = Some(true)
case _ =>
sourceHasMetadata = newValue
}
/**
* Returns a list of files found, sorted by their timestamp.
*/
private def fetchAllFiles(): Seq[(String, Long)] = {
val startTime = System.nanoTime
var allFiles: Seq[FileStatus] = null
sourceHasMetadata match {
case None =>
if (FileStreamSink.hasMetadata(Seq(path), hadoopConf, sparkSession.sessionState.conf)) {
setSourceHasMetadata(Some(true))
allFiles = allFilesUsingMetadataLogFileIndex()
} else {
allFiles = allFilesUsingInMemoryFileIndex()
if (allFiles.isEmpty) {
// we still cannot decide
} else {
// decide what to use for future rounds
// double check whether source has metadata, preventing the extreme corner case that
// metadata log and data files are only generated after the previous
// `FileStreamSink.hasMetadata` check
if (FileStreamSink.hasMetadata(Seq(path), hadoopConf, sparkSession.sessionState.conf)) {
setSourceHasMetadata(Some(true))
allFiles = allFilesUsingMetadataLogFileIndex()
} else {
setSourceHasMetadata(Some(false))
// `allFiles` have already been fetched using InMemoryFileIndex in this round
}
}
}
case Some(true) => allFiles = allFilesUsingMetadataLogFileIndex()
case Some(false) => allFiles = allFilesUsingInMemoryFileIndex()
}
val files = allFiles.sortBy(_.getModificationTime)(fileSortOrder).map { status =>
(status.getPath.toUri.toString, status.getModificationTime)
}
val endTime = System.nanoTime
val listingTimeMs = NANOSECONDS.toMillis(endTime - startTime)
if (listingTimeMs > 2000) {
// Output a warning when listing files uses more than 2 seconds.
logWarning(s"Listed ${files.size} file(s) in $listingTimeMs ms")
} else {
logTrace(s"Listed ${files.size} file(s) in $listingTimeMs ms")
}
logTrace(s"Files are:\n\t" + files.mkString("\n\t"))
files
}
override def getOffset: Option[Offset] = {
throw new UnsupportedOperationException(
"latestOffset(Offset, ReadLimit) should be called instead of this method")
}
override def latestOffset(startOffset: streaming.Offset, limit: ReadLimit): streaming.Offset = {
Some(fetchMaxOffset(limit)).filterNot(_.logOffset == -1).orNull
}
override def toString: String = s"FileStreamSource[$qualifiedBasePath]"
/**
* Informs the source that Spark has completed processing all data for offsets less than or
* equal to `end` and will only request offsets greater than `end` in the future.
*/
override def commit(end: Offset): Unit = {
val logOffset = FileStreamSourceOffset(end).logOffset
sourceCleaner.foreach { cleaner =>
val files = metadataLog.get(Some(logOffset), Some(logOffset)).flatMap(_._2)
val validFileEntities = files.filter(_.batchId == logOffset)
logDebug(s"completed file entries: ${validFileEntities.mkString(",")}")
validFileEntities.foreach(cleaner.clean)
}
}
override def stop(): Unit = sourceCleaner.foreach(_.stop())
}
object FileStreamSource {
/** Timestamp for file modification time, in ms since January 1, 1970 UTC. */
type Timestamp = Long
val DISCARD_UNSEEN_FILES_RATIO = 0.2
val MAX_CACHED_UNSEEN_FILES = 10000
case class FileEntry(path: String, timestamp: Timestamp, batchId: Long) extends Serializable
/**
* A custom hash map used to track the list of files seen. This map is not thread-safe.
*
* To prevent the hash map from growing indefinitely, a purge function is available to
* remove files "maxAgeMs" older than the latest file.
*/
class SeenFilesMap(maxAgeMs: Long, fileNameOnly: Boolean) {
require(maxAgeMs >= 0)
/** Mapping from file to its timestamp. */
private val map = new java.util.HashMap[String, Timestamp]
/** Timestamp of the latest file. */
private var latestTimestamp: Timestamp = 0L
/** Timestamp for the last purge operation. */
private var lastPurgeTimestamp: Timestamp = 0L
@inline private def stripPathIfNecessary(path: String) = {
if (fileNameOnly) new Path(new URI(path)).getName else path
}
/** Add a new file to the map. */
def add(path: String, timestamp: Timestamp): Unit = {
map.put(stripPathIfNecessary(path), timestamp)
if (timestamp > latestTimestamp) {
latestTimestamp = timestamp
}
}
/**
* Returns true if we should consider this file a new file. The file is only considered "new"
* if it is new enough that we are still tracking, and we have not seen it before.
*/
def isNewFile(path: String, timestamp: Timestamp): Boolean = {
// Note that we are testing against lastPurgeTimestamp here so we'd never miss a file that
// is older than (latestTimestamp - maxAgeMs) but has not been purged yet.
timestamp >= lastPurgeTimestamp && !map.containsKey(stripPathIfNecessary(path))
}
/** Removes aged entries and returns the number of files removed. */
def purge(): Int = {
lastPurgeTimestamp = latestTimestamp - maxAgeMs
val iter = map.entrySet().iterator()
var count = 0
while (iter.hasNext) {
val entry = iter.next()
if (entry.getValue < lastPurgeTimestamp) {
count += 1
iter.remove()
}
}
count
}
def size: Int = map.size()
}
private[sql] abstract class FileStreamSourceCleaner extends Logging {
private val cleanThreadPool: Option[ThreadPoolExecutor] = {
val numThreads = SQLConf.get.getConf(SQLConf.FILE_SOURCE_CLEANER_NUM_THREADS)
if (numThreads > 0) {
logDebug(s"Cleaning file source on $numThreads separate thread(s)")
Some(ThreadUtils.newDaemonCachedThreadPool("file-source-cleaner-threadpool", numThreads))
} else {
logDebug("Cleaning file source on main thread")
None
}
}
def stop(): Unit = cleanThreadPool.foreach(ThreadUtils.shutdown(_))
def clean(entry: FileEntry): Unit = {
cleanThreadPool match {
case Some(p) =>
p.submit(new Runnable {
override def run(): Unit = {
cleanTask(entry)
}
})
case None =>
cleanTask(entry)
}
}
protected def cleanTask(entry: FileEntry): Unit
}
private[sql] object FileStreamSourceCleaner {
def apply(
fileSystem: FileSystem,
sourcePath: Path,
option: FileStreamOptions,
hadoopConf: Configuration): Option[FileStreamSourceCleaner] = option.cleanSource match {
case CleanSourceMode.ARCHIVE =>
require(option.sourceArchiveDir.isDefined)
val path = new Path(option.sourceArchiveDir.get)
val archiveFs = path.getFileSystem(hadoopConf)
val qualifiedArchivePath = archiveFs.makeQualified(path)
Some(new SourceFileArchiver(fileSystem, sourcePath, archiveFs, qualifiedArchivePath))
case CleanSourceMode.DELETE =>
Some(new SourceFileRemover(fileSystem))
case _ => None
}
}
private[sql] class SourceFileArchiver(
fileSystem: FileSystem,
sourcePath: Path,
baseArchiveFileSystem: FileSystem,
baseArchivePath: Path) extends FileStreamSourceCleaner with Logging {
assertParameters()
private def assertParameters(): Unit = {
require(fileSystem.getUri == baseArchiveFileSystem.getUri, "Base archive path is located " +
s"on a different file system than the source files. source path: $sourcePath" +
s" / base archive path: $baseArchivePath")
require(!isBaseArchivePathMatchedAgainstSourcePattern, "Base archive path cannot be set to" +
" the path where archived path can possibly match with source pattern. Ensure the base " +
"archive path doesn't match with source pattern in depth, where the depth is minimum of" +
" depth on both paths.")
}
private def getAncestorEnsuringDepth(path: Path, depth: Int): Path = {
var newPath = path
while (newPath.depth() > depth) {
newPath = newPath.getParent
}
newPath
}
private def isBaseArchivePathMatchedAgainstSourcePattern: Boolean = {
// We should disallow end users to set base archive path which path matches against source
// pattern to avoid checking each source file. There're couple of cases which allow
// FileStreamSource to read any depth of subdirectory under the source pattern, so we should
// consider all three cases 1) both has same depth 2) base archive path is longer than source
// pattern 3) source pattern is longer than base archive path. To handle all cases, we take
// min of depth for both paths, and check the match.
val minDepth = math.min(sourcePath.depth(), baseArchivePath.depth())
val sourcePathMinDepth = getAncestorEnsuringDepth(sourcePath, minDepth)
val baseArchivePathMinDepth = getAncestorEnsuringDepth(baseArchivePath, minDepth)
val sourceGlobFilters: Seq[GlobFilter] = buildSourceGlobFilters(sourcePathMinDepth)
var matched = true
// pathToCompare should have same depth as sourceGlobFilters.length
var pathToCompare = baseArchivePathMinDepth
var index = 0
do {
// GlobFilter only matches against its name, not full path so it's safe to compare
if (!sourceGlobFilters(index).accept(pathToCompare)) {
matched = false
} else {
pathToCompare = pathToCompare.getParent
index += 1
}
} while (matched && !pathToCompare.isRoot)
matched
}
private def buildSourceGlobFilters(sourcePath: Path): Seq[GlobFilter] = {
val filters = new scala.collection.mutable.ArrayBuffer[GlobFilter]()
var currentPath = sourcePath
while (!currentPath.isRoot) {
filters += new GlobFilter(currentPath.getName)
currentPath = currentPath.getParent
}
filters.toSeq
}
override protected def cleanTask(entry: FileEntry): Unit = {
val curPath = new Path(new URI(entry.path))
val newPath = new Path(baseArchivePath.toString.stripSuffix("/") + curPath.toUri.getPath)
try {
logDebug(s"Creating directory if it doesn't exist ${newPath.getParent}")
if (!fileSystem.exists(newPath.getParent)) {
fileSystem.mkdirs(newPath.getParent)
}
logDebug(s"Archiving completed file $curPath to $newPath")
if (!fileSystem.rename(curPath, newPath)) {
logWarning(s"Fail to move $curPath to $newPath / skip moving file.")
}
} catch {
case NonFatal(e) =>
logWarning(s"Fail to move $curPath to $newPath / skip moving file.", e)
}
}
}
private[sql] class SourceFileRemover(fileSystem: FileSystem)
extends FileStreamSourceCleaner with Logging {
override protected def cleanTask(entry: FileEntry): Unit = {
val curPath = new Path(new URI(entry.path))
try {
logDebug(s"Removing completed file $curPath")
if (!fileSystem.delete(curPath, false)) {
logWarning(s"Failed to remove $curPath / skip removing file.")
}
} catch {
case NonFatal(e) =>
// Log to error but swallow exception to avoid process being stopped
logWarning(s"Fail to remove $curPath / skip removing file.", e)
}
}
}
}
| shuangshuangwang/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/FileStreamSource.scala | Scala | apache-2.0 | 22,346 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO2
package com.google.protobuf.descriptor
import _root_.scalapb.internal.compat.JavaConverters._
/** @param deprecated
* Is this enum value deprecated?
* Depending on the target platform, this can emit Deprecated annotations
* for the enum value, or it will be completely ignored; in the very least,
* this is a formalization for deprecating enum values.
* @param uninterpretedOption
* The parser stores options it doesn't recognize here. See above.
*/
@SerialVersionUID(0L)
final case class EnumValueOptions(
deprecated: _root_.scala.Option[_root_.scala.Boolean] = _root_.scala.None,
uninterpretedOption: _root_.scala.Seq[com.google.protobuf.descriptor.UninterpretedOption] = _root_.scala.Seq.empty,
unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty
) extends scalapb.GeneratedMessage with scalapb.lenses.Updatable[EnumValueOptions] with _root_.scalapb.ExtendableMessage[EnumValueOptions] {
@transient
private[this] var __serializedSizeMemoized: _root_.scala.Int = 0
private[this] def __computeSerializedSize(): _root_.scala.Int = {
var __size = 0
if (deprecated.isDefined) {
val __value = deprecated.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeBoolSize(1, __value)
};
uninterpretedOption.foreach { __item =>
val __value = __item
__size += 2 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
__size += unknownFields.serializedSize
__size
}
override def serializedSize: _root_.scala.Int = {
var __size = __serializedSizeMemoized
if (__size == 0) {
__size = __computeSerializedSize() + 1
__serializedSizeMemoized = __size
}
__size - 1
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
deprecated.foreach { __v =>
val __m = __v
_output__.writeBool(1, __m)
};
uninterpretedOption.foreach { __v =>
val __m = __v
_output__.writeTag(999, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
unknownFields.writeTo(_output__)
}
def getDeprecated: _root_.scala.Boolean = deprecated.getOrElse(false)
def clearDeprecated: EnumValueOptions = copy(deprecated = _root_.scala.None)
def withDeprecated(__v: _root_.scala.Boolean): EnumValueOptions = copy(deprecated = Option(__v))
def clearUninterpretedOption = copy(uninterpretedOption = _root_.scala.Seq.empty)
def addUninterpretedOption(__vs: com.google.protobuf.descriptor.UninterpretedOption *): EnumValueOptions = addAllUninterpretedOption(__vs)
def addAllUninterpretedOption(__vs: Iterable[com.google.protobuf.descriptor.UninterpretedOption]): EnumValueOptions = copy(uninterpretedOption = uninterpretedOption ++ __vs)
def withUninterpretedOption(__v: _root_.scala.Seq[com.google.protobuf.descriptor.UninterpretedOption]): EnumValueOptions = copy(uninterpretedOption = __v)
def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v)
def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => deprecated.orNull
case 999 => uninterpretedOption
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => deprecated.map(_root_.scalapb.descriptors.PBoolean(_)).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 999 => _root_.scalapb.descriptors.PRepeated(uninterpretedOption.iterator.map(_.toPMessage).toVector)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion: com.google.protobuf.descriptor.EnumValueOptions.type = com.google.protobuf.descriptor.EnumValueOptions
// @@protoc_insertion_point(GeneratedMessage[google.protobuf.EnumValueOptions])
}
object EnumValueOptions extends scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.EnumValueOptions] with scalapb.JavaProtoSupport[com.google.protobuf.descriptor.EnumValueOptions, com.google.protobuf.DescriptorProtos.EnumValueOptions] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.EnumValueOptions] with scalapb.JavaProtoSupport[com.google.protobuf.descriptor.EnumValueOptions, com.google.protobuf.DescriptorProtos.EnumValueOptions] = this
def toJavaProto(scalaPbSource: com.google.protobuf.descriptor.EnumValueOptions): com.google.protobuf.DescriptorProtos.EnumValueOptions = {
val javaPbOut = com.google.protobuf.DescriptorProtos.EnumValueOptions.newBuilder
scalaPbSource.deprecated.foreach(javaPbOut.setDeprecated)
javaPbOut.addAllUninterpretedOption(_root_.scalapb.internal.compat.toIterable(scalaPbSource.uninterpretedOption.iterator.map(com.google.protobuf.descriptor.UninterpretedOption.toJavaProto(_))).asJava)
javaPbOut.build
}
def fromJavaProto(javaPbSource: com.google.protobuf.DescriptorProtos.EnumValueOptions): com.google.protobuf.descriptor.EnumValueOptions = com.google.protobuf.descriptor.EnumValueOptions(
deprecated = if (javaPbSource.hasDeprecated) Some(javaPbSource.getDeprecated.booleanValue) else _root_.scala.None,
uninterpretedOption = javaPbSource.getUninterpretedOptionList.asScala.iterator.map(com.google.protobuf.descriptor.UninterpretedOption.fromJavaProto(_)).toSeq
)
def parseFrom(`_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.descriptor.EnumValueOptions = {
var __deprecated: _root_.scala.Option[_root_.scala.Boolean] = _root_.scala.None
val __uninterpretedOption: _root_.scala.collection.immutable.VectorBuilder[com.google.protobuf.descriptor.UninterpretedOption] = new _root_.scala.collection.immutable.VectorBuilder[com.google.protobuf.descriptor.UninterpretedOption]
var `_unknownFields__`: _root_.scalapb.UnknownFieldSet.Builder = null
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 8 =>
__deprecated = Option(_input__.readBool())
case 7994 =>
__uninterpretedOption += _root_.scalapb.LiteParser.readMessage[com.google.protobuf.descriptor.UninterpretedOption](_input__)
case tag =>
if (_unknownFields__ == null) {
_unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder()
}
_unknownFields__.parseField(tag, _input__)
}
}
com.google.protobuf.descriptor.EnumValueOptions(
deprecated = __deprecated,
uninterpretedOption = __uninterpretedOption.result(),
unknownFields = if (_unknownFields__ == null) _root_.scalapb.UnknownFieldSet.empty else _unknownFields__.result()
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.descriptor.EnumValueOptions] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage eq scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.descriptor.EnumValueOptions(
deprecated = __fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Boolean]]),
uninterpretedOption = __fieldsMap.get(scalaDescriptor.findFieldByNumber(999).get).map(_.as[_root_.scala.Seq[com.google.protobuf.descriptor.UninterpretedOption]]).getOrElse(_root_.scala.Seq.empty)
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = DescriptorProtoCompanion.javaDescriptor.getMessageTypes().get(15)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = DescriptorProtoCompanion.scalaDescriptor.messages(15)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = {
var __out: _root_.scalapb.GeneratedMessageCompanion[_] = null
(__number: @_root_.scala.unchecked) match {
case 999 => __out = com.google.protobuf.descriptor.UninterpretedOption
}
__out
}
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.google.protobuf.descriptor.EnumValueOptions(
deprecated = _root_.scala.None,
uninterpretedOption = _root_.scala.Seq.empty
)
implicit class EnumValueOptionsLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.descriptor.EnumValueOptions]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.descriptor.EnumValueOptions](_l) {
def deprecated: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Boolean] = field(_.getDeprecated)((c_, f_) => c_.copy(deprecated = Option(f_)))
def optionalDeprecated: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Boolean]] = field(_.deprecated)((c_, f_) => c_.copy(deprecated = f_))
def uninterpretedOption: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.descriptor.UninterpretedOption]] = field(_.uninterpretedOption)((c_, f_) => c_.copy(uninterpretedOption = f_))
}
final val DEPRECATED_FIELD_NUMBER = 1
final val UNINTERPRETED_OPTION_FIELD_NUMBER = 999
def of(
deprecated: _root_.scala.Option[_root_.scala.Boolean],
uninterpretedOption: _root_.scala.Seq[com.google.protobuf.descriptor.UninterpretedOption]
): _root_.com.google.protobuf.descriptor.EnumValueOptions = _root_.com.google.protobuf.descriptor.EnumValueOptions(
deprecated,
uninterpretedOption
)
// @@protoc_insertion_point(GeneratedMessageCompanion[google.protobuf.EnumValueOptions])
}
| scalapb/ScalaPB | scalapb-runtime/src/main/scalajvm/com/google/protobuf/descriptor/EnumValueOptions.scala | Scala | apache-2.0 | 10,468 |
package com.wixpress.petri.experiments.domain
import org.joda.time.DateTime
import scala.beans.BooleanBeanProperty
import com.fasterxml.jackson.annotation.JsonIgnore
import com.fasterxml.jackson.databind.annotation.JsonDeserialize
/**
* @author: talyag
* @since: 7/3/14
*/
@JsonDeserialize(builder = classOf[ExperimentSnapshotBuilder])
case class ExperimentSnapshot(
key: String,
@BooleanBeanProperty fromSpec: Boolean, //not needed for conductableExperiment
creationDate: DateTime, //not needed for conductableExperiment
description: String, //not needed for conductableExperiment
startDate: DateTime,
endDate: DateTime,
groups: java.util.List[TestGroup],
scope: String,
@BooleanBeanProperty paused: Boolean,
name: String, //not needed for conductableExperiment
creator: String, //not needed for conductableExperiment
@BooleanBeanProperty featureToggle: Boolean,
originalId: Int,
linkedId: Int,
@BooleanBeanProperty persistent: Boolean,
filters: java.util.List[Filter],
@BooleanBeanProperty onlyForLoggedInUsers: Boolean,
comment: String,
updater: String, //not needed for conductableExperiment
conductLimit: Int
) {
//not needed for conductableExperiment
@JsonIgnore
def isValid: Boolean = new FiltersValidator().checkValidity(filters)
}
| drorweiss/petri | wix-petri-core/src/main/java/com/wixpress/petri/experiments/domain/ExperimentSnapshot.scala | Scala | bsd-3-clause | 1,907 |
import sbt._
class BowlerParentProject(info: ProjectInfo) extends DefaultProject(info){
val jsch = "com.jcraft" % "jsch" % "0.1.44"
val specs2 = "org.specs2" % "specs2_2.9.0" % "1.3" % "test"
def specs2Framework = new TestFramework("org.specs2.runner.SpecsFramework")
override def testFrameworks = super.testFrameworks ++ Seq(specs2Framework)
val jschRepo ="Jsch Repo" at "http://jsch.sourceforge.net/maven2/"
val scalaToolsRepo = "Scala-Tools repo" at "http://scala-tools.org/repo-releases/"
} | wfaler/ScalaSSH | project/build/ScalaSshProject.scala | Scala | bsd-3-clause | 514 |
/*
* Copyright (C) 2011 romain
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.domain.modifier
import org.openmole.core.tools.obj.ClassUtils
import org.openmole.core.workflow.data._
import org.openmole.core.workflow.domain._
import org.openmole.core.workflow.tools.FromContext
import collection.JavaConversions._
import ClassUtils._
import scala.util.Random
object GroupDomain {
def apply[T](domain: Domain[T] with Discrete[T], size: FromContext[Int])(implicit m: Manifest[T]) =
new GroupDomain(domain, size)
}
sealed class GroupDomain[T](val domain: Domain[T] with Discrete[T], val size: FromContext[Int])(implicit m: Manifest[T]) extends Domain[Array[T]] with Discrete[Array[T]] {
override def inputs = domain.inputs
override def iterator(context: Context)(implicit rng: Random): Iterator[Array[T]] =
domain.iterator(context).grouped(size.from(context)).map {
i ⇒ i.toArray
}
}
| ISCPIF/PSEExperiments | openmole-src/openmole/plugins/org.openmole.plugin.domain.modifier/src/main/scala/org/openmole/plugin/domain/modifier/GroupDomain.scala | Scala | agpl-3.0 | 1,571 |
/*******************************************************************************
Copyright (c) 2013, KAIST, S-Core.
All rights reserved.
Use is subject to license terms.
This distribution may include materials developed by third parties.
******************************************************************************/
package kr.ac.kaist.jsaf.analysis.typing.models.DOMHtml5
import scala.collection.mutable.{Map=>MMap, HashMap=>MHashMap}
import kr.ac.kaist.jsaf.analysis.typing.Semantics
import kr.ac.kaist.jsaf.analysis.typing.domain._
import kr.ac.kaist.jsaf.analysis.typing.domain.{BoolFalse => F, BoolTrue => T}
import kr.ac.kaist.jsaf.analysis.typing.models._
import kr.ac.kaist.jsaf.analysis.cfg.{CFG, CFGExpr}
import kr.ac.kaist.jsaf.analysis.typing.{ControlPoint, Helper, PreHelper}
import kr.ac.kaist.jsaf.analysis.typing.domain.Heap
import kr.ac.kaist.jsaf.analysis.typing.domain.Context
import kr.ac.kaist.jsaf.analysis.typing.models.AbsBuiltinFunc
import kr.ac.kaist.jsaf.analysis.typing.models.AbsConstValue
import scala.Some
import kr.ac.kaist.jsaf.analysis.typing.AddressManager._
// Modeled based on MicroSoft MSDN
// http://msdn.microsoft.com/en-us/library/windows/apps/hh696634.aspx
// JavaScript Console Commands : non-standard
object Console extends DOM {
private val name = "Console"
/* predefined locatoins */
val loc_ins = newSystemRecentLoc(name + "Ins")
val loc_proto = newSystemRecentLoc(name + "Proto")
/* constructor */
private val prop_cons: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
("@hasinstance", AbsConstValue(PropValueNullTop)),
("length", AbsConstValue(PropValue(ObjectValue(Value(AbsNumber.alpha(0)), F, F, F)))),
("prototype", AbsConstValue(PropValue(ObjectValue(Value(loc_proto), F, F, F))))
)
/* prorotype */
private val prop_proto: List[(String, AbsProperty)] = List(
("@class", AbsConstValue(PropValue(AbsString.alpha("Object")))),
("@proto", AbsConstValue(PropValue(ObjectValue(ObjProtoLoc, F, F, F)))),
("@extensible", AbsConstValue(PropValue(BoolTrue))),
// API
("assert", AbsBuiltinFunc("Console.assert", 2)),
("clear", AbsBuiltinFunc("Console.clear", 0)),
("dir", AbsBuiltinFunc("Console.dir", 1)),
("error", AbsBuiltinFunc("Console.error", 1)),
("info", AbsBuiltinFunc("Console.info", 1)),
("log", AbsBuiltinFunc("Console.log", 1)),
("warn", AbsBuiltinFunc("Console.warn", 1))
)
/* global */
private val prop_global: List[(String, AbsProperty)] = List(
("console", AbsConstValue(PropValue(ObjectValue(loc_ins, T, F, T))))
)
/* no constructor */
/* initial property list */
def getInitList(): List[(Loc, List[(String, AbsProperty)])] = List(
(loc_ins, prop_cons), (loc_proto, prop_proto), (GlobalLoc, prop_global)
)
def getSemanticMap(): Map[String, SemanticFun] = {
Map(
("Console.assert" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
}
)),
("Console.clear" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
}
)),
("Console.dir" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
}
)),
("Console.error" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
}
)),
("Console.info" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
}
)),
("Console.log" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
}
)),
("Console.warn" -> (
(sem: Semantics, h: Heap, ctx: Context, he: Heap, ctxe: Context, cp: ControlPoint, cfg: CFG, fun: String, args: CFGExpr) => {
((Helper.ReturnStore(h, Value(UndefTop)), ctx), (he, ctxe))
}
))
)
}
def getPreSemanticMap(): Map[String, SemanticFun] = {
Map(
)
}
def getDefMap(): Map[String, AccessFun] = {
Map(
)
}
def getUseMap(): Map[String, AccessFun] = {
Map(
)
}
/* instance */
def getInstance(): Option[Loc] = Some (loc_ins)
}
| darkrsw/safe | src/main/scala/kr/ac/kaist/jsaf/analysis/typing/models/DOMObject/Console.scala | Scala | bsd-3-clause | 5,246 |
// Copyright: 2010 - 2017 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.util
import Predef.{ any2stringadd => _, _ => _ }
import org.scalatest._
class MapSpec extends FlatSpec with Matchers {
import map._
val as = Map(
1 -> Set('a, 'b, 'c),
2 -> Set('b)
)
val bs = Map(
1 -> Set('c, 'd),
2 -> Set('a),
3 -> Set('e)
)
val merged = Map(
1 -> Set('a, 'b, 'c, 'd),
2 -> Set('a, 'b),
3 -> Set('e)
)
"map._" should "map values eagerly" in {
var count = 0
val mapped = merged.mapValuesEagerly { syms =>
count += 1
syms.head
}
count shouldBe 3 // not lazy
mapped shouldBe Map(1 -> 'a, 2 -> 'a, 3 -> 'e)
}
it should "merge multimap sets" in {
(as merge Map.empty) shouldBe as
(Map.empty[Int, Set[Symbol]] merge as) shouldBe as
(as merge bs) shouldBe merged
}
}
| ensime/ensime-server | util/src/test/scala/org/ensime/util/MapSpec.scala | Scala | gpl-3.0 | 938 |
class Cont[A0](x0: A0) { type A = A0; val x: A = x0 }
object Test {
val c: { type A; val x: A } & { type A = Int } = new Cont(1)
println(c.x : Int) // error: not an instance of Selectable
} | som-snytt/dotty | tests/neg/i2871.scala | Scala | apache-2.0 | 193 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.{Properties, Random}
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import org.mockito.ArgumentMatchers.{any, anyBoolean, anyInt, anyString}
import org.mockito.Mockito._
import org.mockito.invocation.InvocationOnMock
import org.apache.spark._
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config
import org.apache.spark.resource.ResourceUtils._
import org.apache.spark.resource.TestResourceIDs._
import org.apache.spark.serializer.SerializerInstance
import org.apache.spark.storage.BlockManagerId
import org.apache.spark.util.{AccumulatorV2, ManualClock}
class FakeDAGScheduler(sc: SparkContext, taskScheduler: FakeTaskScheduler)
extends DAGScheduler(sc) {
override def taskStarted(task: Task[_], taskInfo: TaskInfo) {
taskScheduler.startedTasks += taskInfo.index
}
override def taskEnded(
task: Task[_],
reason: TaskEndReason,
result: Any,
accumUpdates: Seq[AccumulatorV2[_, _]],
metricPeaks: Array[Long],
taskInfo: TaskInfo) {
taskScheduler.endedTasks(taskInfo.index) = reason
}
override def executorAdded(execId: String, host: String) {}
override def executorLost(execId: String, reason: ExecutorLossReason) {}
override def taskSetFailed(
taskSet: TaskSet,
reason: String,
exception: Option[Throwable]): Unit = {
taskScheduler.taskSetsFailed += taskSet.id
}
override def speculativeTaskSubmitted(task: Task[_]): Unit = {
taskScheduler.speculativeTasks += task.partitionId
}
}
// Get the rack for a given host
object FakeRackUtil {
private val hostToRack = new mutable.HashMap[String, String]()
var numBatchInvocation = 0
var numSingleHostInvocation = 0
def cleanUp() {
hostToRack.clear()
numBatchInvocation = 0
numSingleHostInvocation = 0
}
def assignHostToRack(host: String, rack: String) {
hostToRack(host) = rack
}
def getRacksForHosts(hosts: Seq[String]): Seq[Option[String]] = {
assert(hosts.toSet.size == hosts.size) // no dups in hosts
if (hosts.nonEmpty && hosts.length != 1) {
numBatchInvocation += 1
} else if (hosts.length == 1) {
numSingleHostInvocation += 1
}
hosts.map(hostToRack.get(_))
}
}
/**
* A mock TaskSchedulerImpl implementation that just remembers information about tasks started and
* feedback received from the TaskSetManagers. Note that it's important to initialize this with
* a list of "live" executors and their hostnames for isExecutorAlive and hasExecutorsAliveOnHost
* to work, and these are required for locality in TaskSetManager.
*/
class FakeTaskScheduler(sc: SparkContext, liveExecutors: (String, String)* /* execId, host */)
extends TaskSchedulerImpl(sc)
{
val startedTasks = new ArrayBuffer[Long]
val endedTasks = new mutable.HashMap[Long, TaskEndReason]
val finishedManagers = new ArrayBuffer[TaskSetManager]
val taskSetsFailed = new ArrayBuffer[String]
val speculativeTasks = new ArrayBuffer[Int]
val executors = new mutable.HashMap[String, String]
// this must be initialized before addExecutor
override val defaultRackValue: Option[String] = Some("default")
for ((execId, host) <- liveExecutors) {
addExecutor(execId, host)
}
for ((execId, host) <- liveExecutors; rack <- getRackForHost(host)) {
hostsByRack.getOrElseUpdate(rack, new mutable.HashSet[String]()) += host
}
dagScheduler = new FakeDAGScheduler(sc, this)
def removeExecutor(execId: String) {
executors -= execId
val host = executorIdToHost.get(execId)
assert(host != None)
val hostId = host.get
val executorsOnHost = hostToExecutors(hostId)
executorsOnHost -= execId
for (rack <- getRackForHost(hostId); hosts <- hostsByRack.get(rack)) {
hosts -= hostId
if (hosts.isEmpty) {
hostsByRack -= rack
}
}
}
override def taskSetFinished(manager: TaskSetManager): Unit = finishedManagers += manager
override def isExecutorAlive(execId: String): Boolean = executors.contains(execId)
override def hasExecutorsAliveOnHost(host: String): Boolean = executors.values.exists(_ == host)
override def hasHostAliveOnRack(rack: String): Boolean = {
hostsByRack.get(rack) != None
}
def addExecutor(execId: String, host: String) {
executors.put(execId, host)
val executorsOnHost = hostToExecutors.getOrElseUpdate(host, new mutable.HashSet[String])
executorsOnHost += execId
executorIdToHost += execId -> host
for (rack <- getRackForHost(host)) {
hostsByRack.getOrElseUpdate(rack, new mutable.HashSet[String]()) += host
}
}
override def getRacksForHosts(hosts: Seq[String]): Seq[Option[String]] = {
FakeRackUtil.getRacksForHosts(hosts)
}
}
/**
* A Task implementation that results in a large serialized task.
*/
class LargeTask(stageId: Int) extends Task[Array[Byte]](stageId, 0, 0) {
val randomBuffer = new Array[Byte](TaskSetManager.TASK_SIZE_TO_WARN_KIB * 1024)
val random = new Random(0)
random.nextBytes(randomBuffer)
override def runTask(context: TaskContext): Array[Byte] = randomBuffer
override def preferredLocations: Seq[TaskLocation] = Seq[TaskLocation]()
}
class TaskSetManagerSuite extends SparkFunSuite with LocalSparkContext with Logging {
import TaskLocality.{ANY, PROCESS_LOCAL, NO_PREF, NODE_LOCAL, RACK_LOCAL}
private val conf = new SparkConf
val LOCALITY_WAIT_MS = conf.get(config.LOCALITY_WAIT)
val MAX_TASK_FAILURES = 4
var sched: FakeTaskScheduler = null
override def beforeEach(): Unit = {
super.beforeEach()
FakeRackUtil.cleanUp()
sched = null
}
override def afterEach(): Unit = {
if (sched != null) {
sched.dagScheduler.stop()
sched.stop()
sched = null
}
super.afterEach()
}
test("TaskSet with no preferences") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
val accumUpdates = taskSet.tasks.head.metrics.internalAccums
// Offer a host with NO_PREF as the constraint,
// we should get a nopref task immediately since that's what we only have
val taskOption = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption.isDefined)
clock.advance(1)
// Tell it the task has finished
manager.handleSuccessfulTask(0, createTaskResult(0, accumUpdates))
assert(sched.endedTasks(0) === Success)
assert(sched.finishedManagers.contains(manager))
}
test("multiple offers with no preferences") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(3)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
val accumUpdatesByTask: Array[Seq[AccumulatorV2[_, _]]] = taskSet.tasks.map { task =>
task.metrics.internalAccums
}
// First three offers should all find tasks
for (i <- 0 until 3) {
val taskOption = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption.isDefined)
val task = taskOption.get
assert(task.executorId === "exec1")
}
assert(sched.startedTasks.toSet === Set(0, 1, 2))
// Re-offer the host -- now we should get no more tasks
assert(manager.resourceOffer("exec1", "host1", NO_PREF) === None)
// Finish the first two tasks
manager.handleSuccessfulTask(0, createTaskResult(0, accumUpdatesByTask(0)))
manager.handleSuccessfulTask(1, createTaskResult(1, accumUpdatesByTask(1)))
assert(sched.endedTasks(0) === Success)
assert(sched.endedTasks(1) === Success)
assert(!sched.finishedManagers.contains(manager))
// Finish the last task
manager.handleSuccessfulTask(2, createTaskResult(2, accumUpdatesByTask(2)))
assert(sched.endedTasks(2) === Success)
assert(sched.finishedManagers.contains(manager))
}
test("skip unsatisfiable locality levels") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execC", "host2"))
val taskSet = FakeTask.createTaskSet(1, Seq(TaskLocation("host1", "execB")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// An executor that is not NODE_LOCAL should be rejected.
assert(manager.resourceOffer("execC", "host2", ANY) === None)
// Because there are no alive PROCESS_LOCAL executors, the base locality level should be
// NODE_LOCAL. So, we should schedule the task on this offered NODE_LOCAL executor before
// any of the locality wait timers expire.
assert(manager.resourceOffer("execA", "host1", ANY).get.index === 0)
}
test("basic delay scheduling") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host2", "exec2")),
Seq(TaskLocation("host1"), TaskLocation("host2", "exec2")),
Seq() // Last task has no locality prefs
)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// First offer host1, exec1: first task should be chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
assert(manager.resourceOffer("exec1", "host1", PROCESS_LOCAL) == None)
clock.advance(LOCALITY_WAIT_MS)
// Offer host1, exec1 again, at NODE_LOCAL level: the node local (task 3) should
// get chosen before the noPref task
assert(manager.resourceOffer("exec1", "host1", NODE_LOCAL).get.index == 2)
// Offer host2, exec2, at NODE_LOCAL level: we should choose task 2
assert(manager.resourceOffer("exec2", "host2", NODE_LOCAL).get.index == 1)
// Offer host2, exec2 again, at NODE_LOCAL level: we should get noPref task
// after failing to find a node_Local task
assert(manager.resourceOffer("exec2", "host2", NODE_LOCAL) == None)
clock.advance(LOCALITY_WAIT_MS)
assert(manager.resourceOffer("exec2", "host2", NO_PREF).get.index == 3)
}
test("we do not need to delay scheduling when we only have noPref tasks in the queue") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec3", "host2"))
val taskSet = FakeTask.createTaskSet(3,
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host2", "exec3")),
Seq() // Last task has no locality prefs
)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// First offer host1, exec1: first task should be chosen
assert(manager.resourceOffer("exec1", "host1", PROCESS_LOCAL).get.index === 0)
assert(manager.resourceOffer("exec3", "host2", PROCESS_LOCAL).get.index === 1)
assert(manager.resourceOffer("exec3", "host2", NODE_LOCAL) == None)
assert(manager.resourceOffer("exec3", "host2", NO_PREF).get.index === 2)
}
test("delay scheduling with fallback") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc,
("exec1", "host1"), ("exec2", "host2"), ("exec3", "host3"))
val taskSet = FakeTask.createTaskSet(5,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(TaskLocation("host2")),
Seq(TaskLocation("host3")),
Seq(TaskLocation("host2"))
)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// First offer host1: first task should be chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
// Offer host1 again: nothing should get chosen
assert(manager.resourceOffer("exec1", "host1", ANY) === None)
clock.advance(LOCALITY_WAIT_MS)
// Offer host1 again: second task (on host2) should get chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 1)
// Offer host1 again: third task (on host2) should get chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 2)
// Offer host2: fifth task (also on host2) should get chosen
assert(manager.resourceOffer("exec2", "host2", ANY).get.index === 4)
// Now that we've launched a local task, we should no longer launch the task for host3
assert(manager.resourceOffer("exec2", "host2", ANY) === None)
clock.advance(LOCALITY_WAIT_MS)
// After another delay, we can go ahead and launch that task non-locally
assert(manager.resourceOffer("exec2", "host2", ANY).get.index === 3)
}
test("delay scheduling with failed hosts") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"),
("exec3", "host3"))
val taskSet = FakeTask.createTaskSet(3,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(TaskLocation("host3"))
)
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// First offer host1: first task should be chosen
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
// After this, nothing should get chosen, because we have separated tasks with unavailable
// preference from the noPrefPendingTasks
assert(manager.resourceOffer("exec1", "host1", ANY) === None)
// Now mark host2 as dead
sched.removeExecutor("exec2")
manager.executorLost("exec2", "host2", SlaveLost())
// nothing should be chosen
assert(manager.resourceOffer("exec1", "host1", ANY) === None)
clock.advance(LOCALITY_WAIT_MS * 2)
// task 1 and 2 would be scheduled as nonLocal task
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 1)
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 2)
// all finished
assert(manager.resourceOffer("exec1", "host1", ANY) === None)
assert(manager.resourceOffer("exec2", "host2", ANY) === None)
}
test("task result lost") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val clock = new ManualClock
clock.advance(1)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
// Tell it the task has finished but the result was lost.
manager.handleFailedTask(0, TaskState.FINISHED, TaskResultLost)
assert(sched.endedTasks(0) === TaskResultLost)
// Re-offer the host -- now we should get task 0 again.
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
}
test("repeated failures lead to task set abortion") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val clock = new ManualClock
clock.advance(1)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// Fail the task MAX_TASK_FAILURES times, and check that the task set is aborted
// after the last failure.
(1 to manager.maxTaskFailures).foreach { index =>
val offerResult = manager.resourceOffer("exec1", "host1", ANY)
assert(offerResult.isDefined,
"Expect resource offer on iteration %s to return a task".format(index))
assert(offerResult.get.index === 0)
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
if (index < MAX_TASK_FAILURES) {
assert(!sched.taskSetsFailed.contains(taskSet.id))
} else {
assert(sched.taskSetsFailed.contains(taskSet.id))
}
}
}
test("executors should be blacklisted after task failure, in spite of locality preferences") {
val rescheduleDelay = 300L
val conf = new SparkConf().
set(config.BLACKLIST_ENABLED, true).
set(config.BLACKLIST_TIMEOUT_CONF, rescheduleDelay).
// don't wait to jump locality levels in this test
set(config.LOCALITY_WAIT.key, "0")
sc = new SparkContext("local", "test", conf)
// two executors on same host, one on different.
sched = new FakeTaskScheduler(sc, ("exec1", "host1"),
("exec1.1", "host1"), ("exec2", "host2"))
// affinity to exec1 on host1 - which we will fail.
val taskSet = FakeTask.createTaskSet(1, Seq(TaskLocation("host1", "exec1")))
val clock = new ManualClock
clock.advance(1)
// We don't directly use the application blacklist, but its presence triggers blacklisting
// within the taskset.
val mockListenerBus = mock(classOf[LiveListenerBus])
val blacklistTrackerOpt = Some(new BlacklistTracker(mockListenerBus, conf, None, clock))
val manager = new TaskSetManager(sched, taskSet, 4, blacklistTrackerOpt, clock)
{
val offerResult = manager.resourceOffer("exec1", "host1", PROCESS_LOCAL)
assert(offerResult.isDefined, "Expect resource offer to return a task")
assert(offerResult.get.index === 0)
assert(offerResult.get.executorId === "exec1")
// Cause exec1 to fail : failure 1
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
assert(!sched.taskSetsFailed.contains(taskSet.id))
// Ensure scheduling on exec1 fails after failure 1 due to blacklist
assert(manager.resourceOffer("exec1", "host1", PROCESS_LOCAL).isEmpty)
assert(manager.resourceOffer("exec1", "host1", NODE_LOCAL).isEmpty)
assert(manager.resourceOffer("exec1", "host1", RACK_LOCAL).isEmpty)
assert(manager.resourceOffer("exec1", "host1", ANY).isEmpty)
}
// Run the task on exec1.1 - should work, and then fail it on exec1.1
{
val offerResult = manager.resourceOffer("exec1.1", "host1", NODE_LOCAL)
assert(offerResult.isDefined,
"Expect resource offer to return a task for exec1.1, offerResult = " + offerResult)
assert(offerResult.get.index === 0)
assert(offerResult.get.executorId === "exec1.1")
// Cause exec1.1 to fail : failure 2
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
assert(!sched.taskSetsFailed.contains(taskSet.id))
// Ensure scheduling on exec1.1 fails after failure 2 due to blacklist
assert(manager.resourceOffer("exec1.1", "host1", NODE_LOCAL).isEmpty)
}
// Run the task on exec2 - should work, and then fail it on exec2
{
val offerResult = manager.resourceOffer("exec2", "host2", ANY)
assert(offerResult.isDefined, "Expect resource offer to return a task")
assert(offerResult.get.index === 0)
assert(offerResult.get.executorId === "exec2")
// Cause exec2 to fail : failure 3
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
assert(!sched.taskSetsFailed.contains(taskSet.id))
// Ensure scheduling on exec2 fails after failure 3 due to blacklist
assert(manager.resourceOffer("exec2", "host2", ANY).isEmpty)
}
// Despite advancing beyond the time for expiring executors from within the blacklist,
// we *never* expire from *within* the stage blacklist
clock.advance(rescheduleDelay)
{
val offerResult = manager.resourceOffer("exec1", "host1", PROCESS_LOCAL)
assert(offerResult.isEmpty)
}
{
val offerResult = manager.resourceOffer("exec3", "host3", ANY)
assert(offerResult.isDefined)
assert(offerResult.get.index === 0)
assert(offerResult.get.executorId === "exec3")
assert(manager.resourceOffer("exec3", "host3", ANY).isEmpty)
// Cause exec3 to fail : failure 4
manager.handleFailedTask(offerResult.get.taskId, TaskState.FINISHED, TaskResultLost)
}
// we have failed the same task 4 times now : task id should now be in taskSetsFailed
assert(sched.taskSetsFailed.contains(taskSet.id))
}
test("new executors get added and lost") {
// Assign host2 to rack2
FakeRackUtil.assignHostToRack("host2", "rack2")
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc)
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host1", "execB")),
Seq(TaskLocation("host2", "execC")),
Seq())
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// Only ANY is valid
assert(manager.myLocalityLevels.sameElements(Array(NO_PREF, ANY)))
// Add a new executor
sched.addExecutor("execD", "host1")
manager.executorAdded()
// Valid locality should contain NODE_LOCAL and ANY
assert(manager.myLocalityLevels.sameElements(Array(NODE_LOCAL, NO_PREF, ANY)))
// Add another executor
sched.addExecutor("execC", "host2")
manager.executorAdded()
// Valid locality should contain PROCESS_LOCAL, NODE_LOCAL, RACK_LOCAL and ANY
assert(manager.myLocalityLevels.sameElements(
Array(PROCESS_LOCAL, NODE_LOCAL, NO_PREF, RACK_LOCAL, ANY)))
// test if the valid locality is recomputed when the executor is lost
sched.removeExecutor("execC")
manager.executorLost("execC", "host2", SlaveLost())
assert(manager.myLocalityLevels.sameElements(Array(NODE_LOCAL, NO_PREF, ANY)))
sched.removeExecutor("execD")
manager.executorLost("execD", "host1", SlaveLost())
assert(manager.myLocalityLevels.sameElements(Array(NO_PREF, ANY)))
}
test("Executors exit for reason unrelated to currently running tasks") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc)
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host1", "execB")),
Seq(TaskLocation("host2", "execC")),
Seq())
val clock = new ManualClock()
clock.advance(1)
val manager = new TaskSetManager(sched, taskSet, 1, clock = clock)
sched.addExecutor("execA", "host1")
manager.executorAdded()
sched.addExecutor("execC", "host2")
manager.executorAdded()
assert(manager.resourceOffer("exec1", "host1", ANY).isDefined)
sched.removeExecutor("execA")
manager.executorLost(
"execA",
"host1",
ExecutorExited(143, false, "Terminated for reason unrelated to running tasks"))
assert(!sched.taskSetsFailed.contains(taskSet.id))
assert(manager.resourceOffer("execC", "host2", ANY).isDefined)
sched.removeExecutor("execC")
manager.executorLost(
"execC", "host2", ExecutorExited(1, true, "Terminated due to issue with running tasks"))
assert(sched.taskSetsFailed.contains(taskSet.id))
}
test("test RACK_LOCAL tasks") {
// Assign host1 to rack1
FakeRackUtil.assignHostToRack("host1", "rack1")
// Assign host2 to rack1
FakeRackUtil.assignHostToRack("host2", "rack1")
// Assign host3 to rack2
FakeRackUtil.assignHostToRack("host3", "rack2")
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc,
("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
val taskSet = FakeTask.createTaskSet(2,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host1", "execA")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, RACK_LOCAL, ANY)))
// Set allowed locality to ANY
clock.advance(LOCALITY_WAIT_MS * 3)
// Offer host3
// No task is scheduled if we restrict locality to RACK_LOCAL
assert(manager.resourceOffer("execC", "host3", RACK_LOCAL) === None)
// Task 0 can be scheduled with ANY
assert(manager.resourceOffer("execC", "host3", ANY).get.index === 0)
// Offer host2
// Task 1 can be scheduled with RACK_LOCAL
assert(manager.resourceOffer("execB", "host2", RACK_LOCAL).get.index === 1)
}
test("do not emit warning when serialized task is small") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
assert(!manager.emittedTaskSizeWarning)
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
assert(!manager.emittedTaskSizeWarning)
}
test("emit warning when serialized task is large") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = new TaskSet(Array(new LargeTask(0)), 0, 0, 0, null)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
assert(!manager.emittedTaskSizeWarning)
assert(manager.resourceOffer("exec1", "host1", ANY).get.index === 0)
assert(manager.emittedTaskSizeWarning)
}
test("Not serializable exception thrown if the task cannot be serialized") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = new TaskSet(
Array(new NotSerializableFakeTask(1, 0), new NotSerializableFakeTask(0, 1)), 0, 0, 0, null)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
intercept[TaskNotSerializableException] {
manager.resourceOffer("exec1", "host1", ANY)
}
assert(manager.isZombie)
}
test("abort the job if total size of results is too large") {
val conf = new SparkConf().set(config.MAX_RESULT_SIZE.key, "2m")
sc = new SparkContext("local", "test", conf)
def genBytes(size: Int): (Int) => Array[Byte] = { (x: Int) =>
val bytes = Array.ofDim[Byte](size)
scala.util.Random.nextBytes(bytes)
bytes
}
// multiple 1k result
val r = sc.makeRDD(0 until 10, 10).map(genBytes(1024)).collect()
assert(10 === r.size)
// single 10M result
val thrown = intercept[SparkException] {sc.makeRDD(genBytes(10 << 20)(0), 1).collect()}
assert(thrown.getMessage().contains("bigger than spark.driver.maxResultSize"))
// multiple 1M results
val thrown2 = intercept[SparkException] {
sc.makeRDD(0 until 10, 10).map(genBytes(1 << 20)).collect()
}
assert(thrown2.getMessage().contains("bigger than spark.driver.maxResultSize"))
}
test("[SPARK-13931] taskSetManager should not send Resubmitted tasks after being a zombie") {
val conf = new SparkConf().set(config.SPECULATION_ENABLED, true)
sc = new SparkContext("local", "test", conf)
sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execB", "host2"))
sched.initialize(new FakeSchedulerBackend() {
override def killTask(
taskId: Long,
executorId: String,
interruptThread: Boolean,
reason: String): Unit = {}
})
// Keep track of the number of tasks that are resubmitted,
// so that the test can check that no tasks were resubmitted.
var resubmittedTasks = 0
val dagScheduler = new FakeDAGScheduler(sc, sched) {
override def taskEnded(
task: Task[_],
reason: TaskEndReason,
result: Any,
accumUpdates: Seq[AccumulatorV2[_, _]],
metricPeaks: Array[Long],
taskInfo: TaskInfo): Unit = {
super.taskEnded(task, reason, result, accumUpdates, metricPeaks, taskInfo)
reason match {
case Resubmitted => resubmittedTasks += 1
case _ =>
}
}
}
sched.dagScheduler.stop()
sched.setDAGScheduler(dagScheduler)
val singleTask = new ShuffleMapTask(0, 0, null, new Partition {
override def index: Int = 0
}, Seq(TaskLocation("host1", "execA")), new Properties, null)
val taskSet = new TaskSet(Array(singleTask), 0, 0, 0, null)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
// Offer host1, which should be accepted as a PROCESS_LOCAL location
// by the one task in the task set
val task1 = manager.resourceOffer("execA", "host1", TaskLocality.PROCESS_LOCAL).get
// Mark the task as available for speculation, and then offer another resource,
// which should be used to launch a speculative copy of the task.
manager.speculatableTasks += singleTask.partitionId
manager.addPendingTask(singleTask.partitionId, speculatable = true)
val task2 = manager.resourceOffer("execB", "host2", TaskLocality.ANY).get
assert(manager.runningTasks === 2)
assert(manager.isZombie === false)
val directTaskResult = new DirectTaskResult[String](null, Seq(), Array()) {
override def value(resultSer: SerializerInstance): String = ""
}
// Complete one copy of the task, which should result in the task set manager
// being marked as a zombie, because at least one copy of its only task has completed.
manager.handleSuccessfulTask(task1.taskId, directTaskResult)
assert(manager.isZombie)
assert(resubmittedTasks === 0)
assert(manager.runningTasks === 1)
manager.executorLost("execB", "host2", new SlaveLost())
assert(manager.runningTasks === 0)
assert(resubmittedTasks === 0)
}
test("[SPARK-22074] Task killed by other attempt task should not be resubmitted") {
val conf = new SparkConf().set(config.SPECULATION_ENABLED, true)
sc = new SparkContext("local", "test", conf)
// Set the speculation multiplier to be 0 so speculative tasks are launched immediately
sc.conf.set(config.SPECULATION_MULTIPLIER, 0.0)
sc.conf.set(config.SPECULATION_QUANTILE, 0.5)
sc.conf.set(config.SPECULATION_ENABLED, true)
var killTaskCalled = false
sched = new FakeTaskScheduler(sc, ("exec1", "host1"),
("exec2", "host2"), ("exec3", "host3"))
sched.initialize(new FakeSchedulerBackend() {
override def killTask(
taskId: Long,
executorId: String,
interruptThread: Boolean,
reason: String): Unit = {
// Check the only one killTask event in this case, which triggered by
// task 2.1 completed.
assert(taskId === 2)
assert(executorId === "exec3")
assert(interruptThread)
assert(reason === "another attempt succeeded")
killTaskCalled = true
}
})
// Keep track of the number of tasks that are resubmitted,
// so that the test can check that no tasks were resubmitted.
var resubmittedTasks = 0
val dagScheduler = new FakeDAGScheduler(sc, sched) {
override def taskEnded(
task: Task[_],
reason: TaskEndReason,
result: Any,
accumUpdates: Seq[AccumulatorV2[_, _]],
metricPeaks: Array[Long],
taskInfo: TaskInfo): Unit = {
super.taskEnded(task, reason, result, accumUpdates, metricPeaks, taskInfo)
reason match {
case Resubmitted => resubmittedTasks += 1
case _ =>
}
}
}
sched.dagScheduler.stop()
sched.setDAGScheduler(dagScheduler)
val taskSet = FakeTask.createShuffleMapTaskSet(4, 0, 0,
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host3", "exec3")),
Seq(TaskLocation("host2", "exec2")))
val clock = new ManualClock()
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
val accumUpdatesByTask: Array[Seq[AccumulatorV2[_, _]]] = taskSet.tasks.map { task =>
task.metrics.internalAccums
}
// Offer resources for 4 tasks to start
for ((exec, host) <- Seq(
"exec1" -> "host1",
"exec1" -> "host1",
"exec3" -> "host3",
"exec2" -> "host2")) {
val taskOption = manager.resourceOffer(exec, host, NO_PREF)
assert(taskOption.isDefined)
val task = taskOption.get
assert(task.executorId === exec)
// Add an extra assert to make sure task 2.0 is running on exec3
if (task.index == 2) {
assert(task.attemptNumber === 0)
assert(task.executorId === "exec3")
}
}
assert(sched.startedTasks.toSet === Set(0, 1, 2, 3))
clock.advance(1)
// Complete the 2 tasks and leave 2 task in running
for (id <- Set(0, 1)) {
manager.handleSuccessfulTask(id, createTaskResult(id, accumUpdatesByTask(id)))
assert(sched.endedTasks(id) === Success)
}
// checkSpeculatableTasks checks that the task runtime is greater than the threshold for
// speculating. Since we use a threshold of 0 for speculation, tasks need to be running for
// > 0ms, so advance the clock by 1ms here.
clock.advance(1)
assert(manager.checkSpeculatableTasks(0))
assert(sched.speculativeTasks.toSet === Set(2, 3))
// Offer resource to start the speculative attempt for the running task 2.0
val taskOption = manager.resourceOffer("exec2", "host2", ANY)
assert(taskOption.isDefined)
val task4 = taskOption.get
assert(task4.index === 2)
assert(task4.taskId === 4)
assert(task4.executorId === "exec2")
assert(task4.attemptNumber === 1)
// Complete the speculative attempt for the running task
manager.handleSuccessfulTask(4, createTaskResult(2, accumUpdatesByTask(2)))
// Make sure schedBackend.killTask(2, "exec3", true, "another attempt succeeded") gets called
assert(killTaskCalled)
// Host 3 Losts, there's only task 2.0 on it, which killed by task 2.1
manager.executorLost("exec3", "host3", SlaveLost())
// Check the resubmittedTasks
assert(resubmittedTasks === 0)
}
test("speculative and noPref task should be scheduled after node-local") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(
sc, ("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host2"), TaskLocation("host1")),
Seq(),
Seq(TaskLocation("host3", "execC")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
assert(manager.resourceOffer("execA", "host1", PROCESS_LOCAL).get.index === 0)
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL) == None)
assert(manager.resourceOffer("execA", "host1", NO_PREF).get.index == 1)
manager.speculatableTasks += 1
manager.addPendingTask(1, speculatable = true)
clock.advance(LOCALITY_WAIT_MS)
// schedule the nonPref task
assert(manager.resourceOffer("execA", "host1", NO_PREF).get.index === 2)
// schedule the speculative task
assert(manager.resourceOffer("execB", "host2", NO_PREF).get.index === 1)
clock.advance(LOCALITY_WAIT_MS * 3)
// schedule non-local tasks
assert(manager.resourceOffer("execB", "host2", ANY).get.index === 3)
}
test("node-local tasks should be scheduled right away " +
"when there are only node-local and no-preference tasks") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(
sc, ("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(),
Seq(TaskLocation("host3")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// node-local tasks are scheduled without delay
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL).get.index === 0)
assert(manager.resourceOffer("execA", "host2", NODE_LOCAL).get.index === 1)
assert(manager.resourceOffer("execA", "host3", NODE_LOCAL).get.index === 3)
assert(manager.resourceOffer("execA", "host3", NODE_LOCAL) === None)
// schedule no-preference after node local ones
assert(manager.resourceOffer("execA", "host3", NO_PREF).get.index === 2)
}
test("SPARK-4939: node-local tasks should be scheduled right after process-local tasks finished")
{
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execB", "host2"))
val taskSet = FakeTask.createTaskSet(4,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(ExecutorCacheTaskLocation("host1", "execA")),
Seq(ExecutorCacheTaskLocation("host2", "execB")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// process-local tasks are scheduled first
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL).get.index === 2)
assert(manager.resourceOffer("execB", "host2", NODE_LOCAL).get.index === 3)
// node-local tasks are scheduled without delay
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL).get.index === 0)
assert(manager.resourceOffer("execB", "host2", NODE_LOCAL).get.index === 1)
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL) == None)
assert(manager.resourceOffer("execB", "host2", NODE_LOCAL) == None)
}
test("SPARK-4939: no-pref tasks should be scheduled after process-local tasks finished") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("execA", "host1"), ("execB", "host2"))
val taskSet = FakeTask.createTaskSet(3,
Seq(),
Seq(ExecutorCacheTaskLocation("host1", "execA")),
Seq(ExecutorCacheTaskLocation("host2", "execB")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// process-local tasks are scheduled first
assert(manager.resourceOffer("execA", "host1", PROCESS_LOCAL).get.index === 1)
assert(manager.resourceOffer("execB", "host2", PROCESS_LOCAL).get.index === 2)
// no-pref tasks are scheduled without delay
assert(manager.resourceOffer("execA", "host1", PROCESS_LOCAL) == None)
assert(manager.resourceOffer("execA", "host1", NODE_LOCAL) == None)
assert(manager.resourceOffer("execA", "host1", NO_PREF).get.index === 0)
assert(manager.resourceOffer("execA", "host1", ANY) == None)
}
test("Ensure TaskSetManager is usable after addition of levels") {
// Regression test for SPARK-2931
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc)
val taskSet = FakeTask.createTaskSet(2,
Seq(TaskLocation("host1", "execA")),
Seq(TaskLocation("host2", "execB.1")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
// Only ANY is valid
assert(manager.myLocalityLevels.sameElements(Array(ANY)))
// Add a new executor
sched.addExecutor("execA", "host1")
sched.addExecutor("execB.2", "host2")
manager.executorAdded()
assert(manager.pendingTasks.noPrefs.size === 0)
// Valid locality should contain PROCESS_LOCAL, NODE_LOCAL and ANY
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, ANY)))
assert(manager.resourceOffer("execA", "host1", ANY) !== None)
clock.advance(LOCALITY_WAIT_MS * 4)
assert(manager.resourceOffer("execB.2", "host2", ANY) !== None)
sched.removeExecutor("execA")
sched.removeExecutor("execB.2")
manager.executorLost("execA", "host1", SlaveLost())
manager.executorLost("execB.2", "host2", SlaveLost())
clock.advance(LOCALITY_WAIT_MS * 4)
sched.addExecutor("execC", "host3")
manager.executorAdded()
// Prior to the fix, this line resulted in an ArrayIndexOutOfBoundsException:
assert(manager.resourceOffer("execC", "host3", ANY) !== None)
}
test("Test that locations with HDFSCacheTaskLocation are treated as PROCESS_LOCAL.") {
// Regression test for SPARK-2931
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc,
("execA", "host1"), ("execB", "host2"), ("execC", "host3"))
val taskSet = FakeTask.createTaskSet(3,
Seq(TaskLocation("host1")),
Seq(TaskLocation("host2")),
Seq(TaskLocation("hdfs_cache_host3")))
val clock = new ManualClock
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, ANY)))
sched.removeExecutor("execA")
manager.executorAdded()
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, ANY)))
sched.removeExecutor("execB")
manager.executorAdded()
assert(manager.myLocalityLevels.sameElements(Array(PROCESS_LOCAL, NODE_LOCAL, ANY)))
sched.removeExecutor("execC")
manager.executorAdded()
assert(manager.myLocalityLevels.sameElements(Array(ANY)))
}
test("Test TaskLocation for different host type.") {
assert(TaskLocation("host1") === HostTaskLocation("host1"))
assert(TaskLocation("hdfs_cache_host1") === HDFSCacheTaskLocation("host1"))
assert(TaskLocation("executor_host1_3") === ExecutorCacheTaskLocation("host1", "3"))
assert(TaskLocation("executor_some.host1_executor_task_3") ===
ExecutorCacheTaskLocation("some.host1", "executor_task_3"))
}
test("Kill other task attempts when one attempt belonging to the same task succeeds") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(4)
// Set the speculation multiplier to be 0 so speculative tasks are launched immediately
sc.conf.set(config.SPECULATION_MULTIPLIER, 0.0)
sc.conf.set(config.SPECULATION_ENABLED, true)
val clock = new ManualClock()
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
val accumUpdatesByTask: Array[Seq[AccumulatorV2[_, _]]] = taskSet.tasks.map { task =>
task.metrics.internalAccums
}
// Offer resources for 4 tasks to start
for ((k, v) <- List(
"exec1" -> "host1",
"exec1" -> "host1",
"exec2" -> "host2",
"exec2" -> "host2")) {
val taskOption = manager.resourceOffer(k, v, NO_PREF)
assert(taskOption.isDefined)
val task = taskOption.get
assert(task.executorId === k)
}
assert(sched.startedTasks.toSet === Set(0, 1, 2, 3))
clock.advance(1)
// Complete the 3 tasks and leave 1 task in running
for (id <- Set(0, 1, 2)) {
manager.handleSuccessfulTask(id, createTaskResult(id, accumUpdatesByTask(id)))
assert(sched.endedTasks(id) === Success)
}
// checkSpeculatableTasks checks that the task runtime is greater than the threshold for
// speculating. Since we use a threshold of 0 for speculation, tasks need to be running for
// > 0ms, so advance the clock by 1ms here.
clock.advance(1)
assert(manager.checkSpeculatableTasks(0))
assert(sched.speculativeTasks.toSet === Set(3))
// Offer resource to start the speculative attempt for the running task
val taskOption5 = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption5.isDefined)
val task5 = taskOption5.get
assert(task5.index === 3)
assert(task5.taskId === 4)
assert(task5.executorId === "exec1")
assert(task5.attemptNumber === 1)
sched.backend = mock(classOf[SchedulerBackend])
// Complete the speculative attempt for the running task
manager.handleSuccessfulTask(4, createTaskResult(3, accumUpdatesByTask(3)))
// Verify that it kills other running attempt
verify(sched.backend).killTask(3, "exec2", true, "another attempt succeeded")
// Because the SchedulerBackend was a mock, the 2nd copy of the task won't actually be
// killed, so the FakeTaskScheduler is only told about the successful completion
// of the speculated task.
assert(sched.endedTasks(3) === Success)
}
test("Killing speculative tasks does not count towards aborting the taskset") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(5)
// Set the speculation multiplier to be 0 so speculative tasks are launched immediately
sc.conf.set(config.SPECULATION_MULTIPLIER, 0.0)
sc.conf.set(config.SPECULATION_QUANTILE, 0.6)
sc.conf.set(config.SPECULATION_ENABLED, true)
val clock = new ManualClock()
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
val accumUpdatesByTask: Array[Seq[AccumulatorV2[_, _]]] = taskSet.tasks.map { task =>
task.metrics.internalAccums
}
// Offer resources for 5 tasks to start
val tasks = new ArrayBuffer[TaskDescription]()
for ((k, v) <- List(
"exec1" -> "host1",
"exec1" -> "host1",
"exec1" -> "host1",
"exec2" -> "host2",
"exec2" -> "host2")) {
val taskOption = manager.resourceOffer(k, v, NO_PREF)
assert(taskOption.isDefined)
val task = taskOption.get
assert(task.executorId === k)
tasks += task
}
assert(sched.startedTasks.toSet === (0 until 5).toSet)
clock.advance(1)
// Complete 3 tasks and leave 2 tasks in running
for (id <- Set(0, 1, 2)) {
manager.handleSuccessfulTask(id, createTaskResult(id, accumUpdatesByTask(id)))
assert(sched.endedTasks(id) === Success)
}
def runningTaskForIndex(index: Int): TaskDescription = {
tasks.find { task =>
task.index == index && !sched.endedTasks.contains(task.taskId)
}.getOrElse {
throw new RuntimeException(s"couldn't find index $index in " +
s"tasks: ${tasks.map { t => t.index -> t.taskId }} with endedTasks:" +
s" ${sched.endedTasks.keys}")
}
}
// have each of the running tasks fail 3 times (not enough to abort the stage)
(0 until 3).foreach { attempt =>
Seq(3, 4).foreach { index =>
val task = runningTaskForIndex(index)
logInfo(s"failing task $task")
val endReason = ExceptionFailure("a", "b", Array(), "c", None)
manager.handleFailedTask(task.taskId, TaskState.FAILED, endReason)
sched.endedTasks(task.taskId) = endReason
assert(!manager.isZombie)
val nextTask = manager.resourceOffer(s"exec2", s"host2", NO_PREF)
assert(nextTask.isDefined, s"no offer for attempt $attempt of $index")
tasks += nextTask.get
}
}
// we can't be sure which one of our running tasks will get another speculative copy
val originalTasks = Seq(3, 4).map { index => index -> runningTaskForIndex(index) }.toMap
// checkSpeculatableTasks checks that the task runtime is greater than the threshold for
// speculating. Since we use a threshold of 0 for speculation, tasks need to be running for
// > 0ms, so advance the clock by 1ms here.
clock.advance(1)
assert(manager.checkSpeculatableTasks(0))
assert(sched.speculativeTasks.toSet === Set(3, 4))
// Offer resource to start the speculative attempt for the running task
val taskOption5 = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption5.isDefined)
val speculativeTask = taskOption5.get
assert(speculativeTask.index === 3 || speculativeTask.index === 4)
assert(speculativeTask.taskId === 11)
assert(speculativeTask.executorId === "exec1")
assert(speculativeTask.attemptNumber === 4)
sched.backend = mock(classOf[SchedulerBackend])
// Complete the speculative attempt for the running task
manager.handleSuccessfulTask(speculativeTask.taskId, createTaskResult(3, accumUpdatesByTask(3)))
// Verify that it kills other running attempt
val origTask = originalTasks(speculativeTask.index)
verify(sched.backend).killTask(origTask.taskId, "exec2", true, "another attempt succeeded")
// Because the SchedulerBackend was a mock, the 2nd copy of the task won't actually be
// killed, so the FakeTaskScheduler is only told about the successful completion
// of the speculated task.
assert(sched.endedTasks(4) === Success)
// also because the scheduler is a mock, our manager isn't notified about the task killed event,
// so we do that manually
manager.handleFailedTask(origTask.taskId, TaskState.KILLED, TaskKilled("test"))
// this task has "failed" 4 times, but one of them doesn't count, so keep running the stage
assert(manager.tasksSuccessful === 4)
assert(!manager.isZombie)
// now run another speculative task
val taskOpt6 = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOpt6.isDefined)
val speculativeTask2 = taskOpt6.get
assert(speculativeTask2.index === 3 || speculativeTask2.index === 4)
assert(speculativeTask2.index !== speculativeTask.index)
assert(speculativeTask2.attemptNumber === 4)
// Complete the speculative attempt for the running task
manager.handleSuccessfulTask(speculativeTask2.taskId,
createTaskResult(3, accumUpdatesByTask(3)))
// Verify that it kills other running attempt
val origTask2 = originalTasks(speculativeTask2.index)
verify(sched.backend).killTask(origTask2.taskId, "exec2", true, "another attempt succeeded")
assert(manager.tasksSuccessful === 5)
assert(manager.isZombie)
}
test("SPARK-19868: DagScheduler only notified of taskEnd when state is ready") {
// dagScheduler.taskEnded() is async, so it may *seem* ok to call it before we've set all
// appropriate state, eg. isZombie. However, this sets up a race that could go the wrong way.
// This is a super-focused regression test which checks the zombie state as soon as
// dagScheduler.taskEnded() is called, to ensure we haven't introduced a race.
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val mockDAGScheduler = mock(classOf[DAGScheduler])
sched.dagScheduler.stop()
sched.dagScheduler = mockDAGScheduler
val taskSet = FakeTask.createTaskSet(numTasks = 1, stageId = 0, stageAttemptId = 0)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = new ManualClock(1))
when(mockDAGScheduler.taskEnded(any(), any(), any(), any(), any(), any())).thenAnswer(
(invocationOnMock: InvocationOnMock) => assert(manager.isZombie))
val taskOption = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption.isDefined)
// this would fail, inside our mock dag scheduler, if it calls dagScheduler.taskEnded() too soon
manager.handleSuccessfulTask(0, createTaskResult(0))
}
test("SPARK-17894: Verify TaskSetManagers for different stage attempts have unique names") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(numTasks = 1, stageId = 0, stageAttemptId = 0)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = new ManualClock)
assert(manager.name === "TaskSet_0.0")
// Make sure a task set with the same stage ID but different attempt ID has a unique name
val taskSet2 = FakeTask.createTaskSet(numTasks = 1, stageId = 0, stageAttemptId = 1)
val manager2 = new TaskSetManager(sched, taskSet2, MAX_TASK_FAILURES, clock = new ManualClock)
assert(manager2.name === "TaskSet_0.1")
// Make sure a task set with the same attempt ID but different stage ID also has a unique name
val taskSet3 = FakeTask.createTaskSet(numTasks = 1, stageId = 1, stageAttemptId = 1)
val manager3 = new TaskSetManager(sched, taskSet3, MAX_TASK_FAILURES, clock = new ManualClock)
assert(manager3.name === "TaskSet_1.1")
}
test("don't update blacklist for shuffle-fetch failures, preemption, denied commits, " +
"or killed tasks") {
// Setup a taskset, and fail some tasks for a fetch failure, preemption, denied commit,
// and killed task.
val conf = new SparkConf().
set(config.BLACKLIST_ENABLED, true)
sc = new SparkContext("local", "test", conf)
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(4)
val tsm = new TaskSetManager(sched, taskSet, 4)
// we need a spy so we can attach our mock blacklist
val tsmSpy = spy(tsm)
val blacklist = mock(classOf[TaskSetBlacklist])
when(tsmSpy.taskSetBlacklistHelperOpt).thenReturn(Some(blacklist))
// make some offers to our taskset, to get tasks we will fail
val taskDescs = Seq(
"exec1" -> "host1",
"exec2" -> "host1"
).flatMap { case (exec, host) =>
// offer each executor twice (simulating 2 cores per executor)
(0 until 2).flatMap{ _ => tsmSpy.resourceOffer(exec, host, TaskLocality.ANY)}
}
assert(taskDescs.size === 4)
// now fail those tasks
tsmSpy.handleFailedTask(taskDescs(0).taskId, TaskState.FAILED,
FetchFailed(BlockManagerId(taskDescs(0).executorId, "host1", 12345), 0, 0L, 0, 0, "ignored"))
tsmSpy.handleFailedTask(taskDescs(1).taskId, TaskState.FAILED,
ExecutorLostFailure(taskDescs(1).executorId, exitCausedByApp = false, reason = None))
tsmSpy.handleFailedTask(taskDescs(2).taskId, TaskState.FAILED,
TaskCommitDenied(0, 2, 0))
tsmSpy.handleFailedTask(taskDescs(3).taskId, TaskState.KILLED, TaskKilled("test"))
// Make sure that the blacklist ignored all of the task failures above, since they aren't
// the fault of the executor where the task was running.
verify(blacklist, never())
.updateBlacklistForFailedTask(anyString(), anyString(), anyInt(), anyString())
}
test("update application blacklist for shuffle-fetch") {
// Setup a taskset, and fail some one task for fetch failure.
val conf = new SparkConf()
.set(config.BLACKLIST_ENABLED, true)
.set(config.SHUFFLE_SERVICE_ENABLED, true)
.set(config.BLACKLIST_FETCH_FAILURE_ENABLED, true)
sc = new SparkContext("local", "test", conf)
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(4)
val blacklistTracker = new BlacklistTracker(sc, None)
val tsm = new TaskSetManager(sched, taskSet, 4, Some(blacklistTracker))
// make some offers to our taskset, to get tasks we will fail
val taskDescs = Seq(
"exec1" -> "host1",
"exec2" -> "host2"
).flatMap { case (exec, host) =>
// offer each executor twice (simulating 2 cores per executor)
(0 until 2).flatMap{ _ => tsm.resourceOffer(exec, host, TaskLocality.ANY)}
}
assert(taskDescs.size === 4)
assert(!blacklistTracker.isExecutorBlacklisted(taskDescs(0).executorId))
assert(!blacklistTracker.isNodeBlacklisted("host1"))
// Fail the task with fetch failure
tsm.handleFailedTask(taskDescs(0).taskId, TaskState.FAILED,
FetchFailed(BlockManagerId(taskDescs(0).executorId, "host1", 12345), 0, 0L, 0, 0, "ignored"))
assert(blacklistTracker.isNodeBlacklisted("host1"))
}
test("update blacklist before adding pending task to avoid race condition") {
// When a task fails, it should apply the blacklist policy prior to
// retrying the task otherwise there's a race condition where run on
// the same executor that it was intended to be black listed from.
val conf = new SparkConf().
set(config.BLACKLIST_ENABLED, true)
// Create a task with two executors.
sc = new SparkContext("local", "test", conf)
val exec = "executor1"
val host = "host1"
val exec2 = "executor2"
val host2 = "host2"
sched = new FakeTaskScheduler(sc, (exec, host), (exec2, host2))
val taskSet = FakeTask.createTaskSet(1)
val clock = new ManualClock
val mockListenerBus = mock(classOf[LiveListenerBus])
val blacklistTracker = new BlacklistTracker(mockListenerBus, conf, None, clock)
val taskSetManager = new TaskSetManager(sched, taskSet, 1, Some(blacklistTracker))
val taskSetManagerSpy = spy(taskSetManager)
val taskDesc = taskSetManagerSpy.resourceOffer(exec, host, TaskLocality.ANY)
// Assert the task has been black listed on the executor it was last executed on.
when(taskSetManagerSpy.addPendingTask(anyInt(), anyBoolean(), anyBoolean())).thenAnswer(
(invocationOnMock: InvocationOnMock) => {
val task: Int = invocationOnMock.getArgument(0)
assert(taskSetManager.taskSetBlacklistHelperOpt.get.
isExecutorBlacklistedForTask(exec, task))
}
)
// Simulate a fake exception
val e = new ExceptionFailure("a", "b", Array(), "c", None)
taskSetManagerSpy.handleFailedTask(taskDesc.get.taskId, TaskState.FAILED, e)
verify(taskSetManagerSpy, times(1)).addPendingTask(0, false, false)
}
test("SPARK-21563 context's added jars shouldn't change mid-TaskSet") {
sc = new SparkContext("local", "test")
val addedJarsPreTaskSet = Map[String, Long](sc.addedJars.toSeq: _*)
assert(addedJarsPreTaskSet.size === 0)
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet1 = FakeTask.createTaskSet(3)
val manager1 = new TaskSetManager(sched, taskSet1, MAX_TASK_FAILURES, clock = new ManualClock)
// all tasks from the first taskset have the same jars
val taskOption1 = manager1.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption1.get.addedJars === addedJarsPreTaskSet)
val taskOption2 = manager1.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption2.get.addedJars === addedJarsPreTaskSet)
// even with a jar added mid-TaskSet
val jarPath = Thread.currentThread().getContextClassLoader.getResource("TestUDTF.jar")
sc.addJar(jarPath.toString)
val addedJarsMidTaskSet = Map[String, Long](sc.addedJars.toSeq: _*)
assert(addedJarsPreTaskSet !== addedJarsMidTaskSet)
val taskOption3 = manager1.resourceOffer("exec1", "host1", NO_PREF)
// which should have the old version of the jars list
assert(taskOption3.get.addedJars === addedJarsPreTaskSet)
// and then the jar does appear in the next TaskSet
val taskSet2 = FakeTask.createTaskSet(1)
val manager2 = new TaskSetManager(sched, taskSet2, MAX_TASK_FAILURES, clock = new ManualClock)
val taskOption4 = manager2.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption4.get.addedJars === addedJarsMidTaskSet)
}
test("SPARK-24677: Avoid NoSuchElementException from MedianHeap") {
val conf = new SparkConf().set(config.SPECULATION_ENABLED, true)
sc = new SparkContext("local", "test", conf)
// Set the speculation multiplier to be 0 so speculative tasks are launched immediately
sc.conf.set(config.SPECULATION_MULTIPLIER, 0.0)
sc.conf.set(config.SPECULATION_QUANTILE, 0.1)
sc.conf.set(config.SPECULATION_ENABLED, true)
sched = new FakeTaskScheduler(sc)
sched.initialize(new FakeSchedulerBackend())
val dagScheduler = new FakeDAGScheduler(sc, sched)
sched.setDAGScheduler(dagScheduler)
val taskSet = FakeTask.createTaskSet(10)
sched.submitTasks(taskSet)
sched.resourceOffers(
(0 until 8).map { idx => WorkerOffer(s"exec-$idx", s"host-$idx", 1) })
val taskSetManager = sched.taskSetManagerForAttempt(0, 0).get
assert(taskSetManager.runningTasks === 8)
taskSetManager.markPartitionCompleted(8)
assert(taskSetManager.successfulTaskDurations.isEmpty())
taskSetManager.checkSpeculatableTasks(0)
}
test("SPARK-24755 Executor loss can cause task to not be resubmitted") {
val conf = new SparkConf().set(config.SPECULATION_ENABLED, true)
sc = new SparkContext("local", "test", conf)
// Set the speculation multiplier to be 0 so speculative tasks are launched immediately
sc.conf.set(config.SPECULATION_MULTIPLIER, 0.0)
sc.conf.set(config.SPECULATION_QUANTILE, 0.5)
sc.conf.set(config.SPECULATION_ENABLED, true)
var killTaskCalled = false
sched = new FakeTaskScheduler(sc, ("exec1", "host1"),
("exec2", "host2"), ("exec3", "host3"))
sched.initialize(new FakeSchedulerBackend() {
override def killTask(
taskId: Long,
executorId: String,
interruptThread: Boolean,
reason: String): Unit = {
// Check the only one killTask event in this case, which triggered by
// task 2.1 completed.
assert(taskId === 2)
assert(executorId === "exec3")
assert(interruptThread)
assert(reason === "another attempt succeeded")
killTaskCalled = true
}
})
// Keep track of the index of tasks that are resubmitted,
// so that the test can check that task is resubmitted correctly
var resubmittedTasks = new mutable.HashSet[Int]
val dagScheduler = new FakeDAGScheduler(sc, sched) {
override def taskEnded(
task: Task[_],
reason: TaskEndReason,
result: Any,
accumUpdates: Seq[AccumulatorV2[_, _]],
metricPeaks: Array[Long],
taskInfo: TaskInfo): Unit = {
super.taskEnded(task, reason, result, accumUpdates, metricPeaks, taskInfo)
reason match {
case Resubmitted => resubmittedTasks += taskInfo.index
case _ =>
}
}
}
sched.dagScheduler.stop()
sched.setDAGScheduler(dagScheduler)
val taskSet = FakeTask.createShuffleMapTaskSet(4, 0, 0,
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host1", "exec1")),
Seq(TaskLocation("host3", "exec3")),
Seq(TaskLocation("host2", "exec2")))
val clock = new ManualClock()
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
val accumUpdatesByTask: Array[Seq[AccumulatorV2[_, _]]] = taskSet.tasks.map { task =>
task.metrics.internalAccums
}
// Offer resources for 4 tasks to start
for ((exec, host) <- Seq(
"exec1" -> "host1",
"exec1" -> "host1",
"exec3" -> "host3",
"exec2" -> "host2")) {
val taskOption = manager.resourceOffer(exec, host, NO_PREF)
assert(taskOption.isDefined)
val task = taskOption.get
assert(task.executorId === exec)
// Add an extra assert to make sure task 2.0 is running on exec3
if (task.index == 2) {
assert(task.attemptNumber === 0)
assert(task.executorId === "exec3")
}
}
assert(sched.startedTasks.toSet === Set(0, 1, 2, 3))
clock.advance(1)
// Complete the 2 tasks and leave 2 task in running
for (id <- Set(0, 1)) {
manager.handleSuccessfulTask(id, createTaskResult(id, accumUpdatesByTask(id)))
assert(sched.endedTasks(id) === Success)
}
// checkSpeculatableTasks checks that the task runtime is greater than the threshold for
// speculating. Since we use a threshold of 0 for speculation, tasks need to be running for
// > 0ms, so advance the clock by 1ms here.
clock.advance(1)
assert(manager.checkSpeculatableTasks(0))
assert(sched.speculativeTasks.toSet === Set(2, 3))
// Offer resource to start the speculative attempt for the running task 2.0
val taskOption = manager.resourceOffer("exec2", "host2", ANY)
assert(taskOption.isDefined)
val task4 = taskOption.get
assert(task4.index === 2)
assert(task4.taskId === 4)
assert(task4.executorId === "exec2")
assert(task4.attemptNumber === 1)
// Complete the speculative attempt for the running task
manager.handleSuccessfulTask(4, createTaskResult(2, accumUpdatesByTask(2)))
// Make sure schedBackend.killTask(2, "exec3", true, "another attempt succeeded") gets called
assert(killTaskCalled)
assert(resubmittedTasks.isEmpty)
// Host 2 Losts, meaning we lost the map output task4
manager.executorLost("exec2", "host2", SlaveLost())
// Make sure that task with index 2 is re-submitted
assert(resubmittedTasks.contains(2))
}
private def createTaskResult(
id: Int,
accumUpdates: Seq[AccumulatorV2[_, _]] = Seq.empty,
metricPeaks: Array[Long] = Array.empty): DirectTaskResult[Int] = {
val valueSer = SparkEnv.get.serializer.newInstance()
new DirectTaskResult[Int](valueSer.serialize(id), accumUpdates, metricPeaks)
}
test("SPARK-13343 speculative tasks that didn't commit shouldn't be marked as success") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(4)
// Set the speculation multiplier to be 0 so speculative tasks are launched immediately
sc.conf.set(config.SPECULATION_MULTIPLIER, 0.0)
sc.conf.set(config.SPECULATION_ENABLED, true)
val clock = new ManualClock()
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
val accumUpdatesByTask: Array[Seq[AccumulatorV2[_, _]]] = taskSet.tasks.map { task =>
task.metrics.internalAccums
}
// Offer resources for 4 tasks to start
for ((k, v) <- List(
"exec1" -> "host1",
"exec1" -> "host1",
"exec2" -> "host2",
"exec2" -> "host2")) {
val taskOption = manager.resourceOffer(k, v, NO_PREF)
assert(taskOption.isDefined)
val task = taskOption.get
assert(task.executorId === k)
}
assert(sched.startedTasks.toSet === Set(0, 1, 2, 3))
clock.advance(1)
// Complete the 3 tasks and leave 1 task in running
for (id <- Set(0, 1, 2)) {
manager.handleSuccessfulTask(id, createTaskResult(id, accumUpdatesByTask(id)))
assert(sched.endedTasks(id) === Success)
}
// checkSpeculatableTasks checks that the task runtime is greater than the threshold for
// speculating. Since we use a threshold of 0 for speculation, tasks need to be running for
// > 0ms, so advance the clock by 1ms here.
clock.advance(1)
assert(manager.checkSpeculatableTasks(0))
assert(sched.speculativeTasks.toSet === Set(3))
// Offer resource to start the speculative attempt for the running task
val taskOption5 = manager.resourceOffer("exec1", "host1", NO_PREF)
assert(taskOption5.isDefined)
val task5 = taskOption5.get
assert(task5.index === 3)
assert(task5.taskId === 4)
assert(task5.executorId === "exec1")
assert(task5.attemptNumber === 1)
sched.backend = mock(classOf[SchedulerBackend])
sched.dagScheduler.stop()
sched.dagScheduler = mock(classOf[DAGScheduler])
// Complete one attempt for the running task
val result = createTaskResult(3, accumUpdatesByTask(3))
manager.handleSuccessfulTask(3, result)
// There is a race between the scheduler asking to kill the other task, and that task
// actually finishing. We simulate what happens if the other task finishes before we kill it.
verify(sched.backend).killTask(4, "exec1", true, "another attempt succeeded")
manager.handleSuccessfulTask(4, result)
val info3 = manager.taskInfos(3)
val info4 = manager.taskInfos(4)
assert(info3.successful)
assert(info4.killed)
verify(sched.dagScheduler).taskEnded(
manager.tasks(3),
TaskKilled("Finish but did not commit due to another attempt succeeded"),
null,
Seq.empty,
Array.empty,
info4)
verify(sched.dagScheduler).taskEnded(manager.tasks(3), Success, result.value(),
result.accumUpdates, Array.empty, info3)
}
test("SPARK-13704 Rack Resolution is done with a batch of de-duped hosts") {
val conf = new SparkConf()
.set(config.LOCALITY_WAIT, 0L)
.set(config.LOCALITY_WAIT_RACK, 1L)
sc = new SparkContext("local", "test", conf)
// Create a cluster with 20 racks, with hosts spread out among them
val execAndHost = (0 to 199).map { i =>
FakeRackUtil.assignHostToRack("host" + i, "rack" + (i % 20))
("exec" + i, "host" + i)
}
sched = new FakeTaskScheduler(sc, execAndHost: _*)
// make a taskset with preferred locations on the first 100 hosts in our cluster
val locations = new ArrayBuffer[Seq[TaskLocation]]()
for (i <- 0 to 99) {
locations += Seq(TaskLocation("host" + i))
}
val taskSet = FakeTask.createTaskSet(100, locations: _*)
val clock = new ManualClock
// make sure we only do one rack resolution call, for the entire batch of hosts, as this
// can be expensive. The FakeTaskScheduler calls rack resolution more than the real one
// -- that is outside of the scope of this test, we just want to check the task set manager.
FakeRackUtil.numBatchInvocation = 0
FakeRackUtil.numSingleHostInvocation = 0
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
assert(FakeRackUtil.numBatchInvocation === 1)
assert(FakeRackUtil.numSingleHostInvocation === 0)
// with rack locality, reject an offer on a host with an unknown rack
assert(manager.resourceOffer("otherExec", "otherHost", TaskLocality.RACK_LOCAL).isEmpty)
(0 until 20).foreach { rackIdx =>
(0 until 5).foreach { offerIdx =>
// if we offer hosts which are not in preferred locations,
// we'll reject them at NODE_LOCAL level,
// but accept them at RACK_LOCAL level if they're on OK racks
val hostIdx = 100 + rackIdx
assert(manager.resourceOffer("exec" + hostIdx, "host" + hostIdx, TaskLocality.NODE_LOCAL)
.isEmpty)
assert(manager.resourceOffer("exec" + hostIdx, "host" + hostIdx, TaskLocality.RACK_LOCAL)
.isDefined)
}
}
// check no more expensive calls to the rack resolution. manager.resourceOffer() will call
// the single-host resolution, but the real rack resolution would have cached all hosts
// by that point.
assert(FakeRackUtil.numBatchInvocation === 1)
}
test("TaskSetManager allocate resource addresses from available resources") {
import TestUtils._
sc = new SparkContext("local", "test")
sc.conf.set(TASK_GPU_ID.amountConf, "2")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"))
val taskSet = FakeTask.createTaskSet(1)
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES)
val availableResources = Map(GPU -> ArrayBuffer("0", "1", "2", "3"))
val taskOption = manager.resourceOffer("exec1", "host1", NO_PREF, availableResources)
assert(taskOption.isDefined)
val allocatedResources = taskOption.get.resources
assert(allocatedResources.size == 1)
assert(allocatedResources(GPU).addresses sameElements Array("0", "1"))
// Allocated resource addresses should still present in `availableResources`, they will only
// get removed inside TaskSchedulerImpl later.
assert(availableResources(GPU) sameElements Array("0", "1", "2", "3"))
}
test("SPARK-26755 Ensure that a speculative task is submitted only once for execution") {
sc = new SparkContext("local", "test")
sched = new FakeTaskScheduler(sc, ("exec1", "host1"), ("exec2", "host2"))
val taskSet = FakeTask.createTaskSet(4)
// Set the speculation multiplier to be 0 so speculative tasks are launched immediately
sc.conf.set(config.SPECULATION_MULTIPLIER, 0.0)
sc.conf.set(config.SPECULATION_ENABLED, true)
sc.conf.set(config.SPECULATION_QUANTILE, 0.5)
val clock = new ManualClock()
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
val accumUpdatesByTask: Array[Seq[AccumulatorV2[_, _]]] = taskSet.tasks.map { task =>
task.metrics.internalAccums
}
// Offer resources for 4 tasks to start, 2 on each exec
Seq("exec1" -> "host1", "exec2" -> "host2").foreach { case (exec, host) =>
(0 until 2).foreach { _ =>
val taskOption = manager.resourceOffer(exec, host, NO_PREF)
assert(taskOption.isDefined)
assert(taskOption.get.executorId === exec)
}
}
assert(sched.startedTasks.toSet === Set(0, 1, 2, 3))
clock.advance(1)
// Complete the first 2 tasks and leave the other 2 tasks in running
for (id <- Set(0, 2)) {
manager.handleSuccessfulTask(id, createTaskResult(id, accumUpdatesByTask(id)))
assert(sched.endedTasks(id) === Success)
}
// checkSpeculatableTasks checks that the task runtime is greater than the threshold for
// speculating. Since we use a threshold of 0 for speculation, tasks need to be running for
// > 0ms, so advance the clock by 1ms here.
clock.advance(1)
assert(manager.checkSpeculatableTasks(0))
assert(sched.speculativeTasks.toSet === Set(1, 3))
assert(manager.copiesRunning(1) === 1)
assert(manager.copiesRunning(3) === 1)
// Offer resource to start the speculative attempt for the running task. We offer more
// resources, and ensure that speculative tasks get scheduled appropriately -- only one extra
// copy per speculatable task
val taskOption2 = manager.resourceOffer("exec1", "host1", NO_PREF)
val taskOption3 = manager.resourceOffer("exec2", "host2", NO_PREF)
assert(taskOption2.isDefined)
val task2 = taskOption2.get
// Ensure that task index 3 is launched on host1 and task index 4 on host2
assert(task2.index === 3)
assert(task2.taskId === 4)
assert(task2.executorId === "exec1")
assert(task2.attemptNumber === 1)
assert(taskOption3.isDefined)
val task3 = taskOption3.get
assert(task3.index === 1)
assert(task3.taskId === 5)
assert(task3.executorId === "exec2")
assert(task3.attemptNumber === 1)
clock.advance(1)
// Running checkSpeculatableTasks again should return false
assert(!manager.checkSpeculatableTasks(0))
assert(manager.copiesRunning(1) === 2)
assert(manager.copiesRunning(3) === 2)
// Offering additional resources should not lead to any speculative tasks being respawned
assert(manager.resourceOffer("exec1", "host1", ANY).isEmpty)
assert(manager.resourceOffer("exec2", "host2", ANY).isEmpty)
assert(manager.resourceOffer("exec3", "host3", ANY).isEmpty)
}
test("SPARK-26755 Ensure that a speculative task obeys original locality preferences") {
sc = new SparkContext("local", "test")
// Set the speculation multiplier to be 0 so speculative tasks are launched immediately
sc.conf.set(config.SPECULATION_MULTIPLIER, 0.0)
sc.conf.set(config.SPECULATION_ENABLED, true)
sc.conf.set(config.SPECULATION_QUANTILE, 0.5)
// Launch a new set of tasks with locality preferences
sched = new FakeTaskScheduler(sc, ("exec1", "host1"),
("exec2", "host2"), ("exec3", "host3"), ("exec4", "host4"))
val taskSet = FakeTask.createTaskSet(3,
Seq(TaskLocation("host1"), TaskLocation("host3")),
Seq(TaskLocation("host2")),
Seq(TaskLocation("host3")))
val clock = new ManualClock()
val manager = new TaskSetManager(sched, taskSet, MAX_TASK_FAILURES, clock = clock)
val accumUpdatesByTask2: Array[Seq[AccumulatorV2[_, _]]] = taskSet.tasks.map { task =>
task.metrics.internalAccums
}
// Offer resources for 3 tasks to start
Seq("exec1" -> "host1", "exec2" -> "host2", "exec3" -> "host3").foreach { case (exec, host) =>
val taskOption = manager.resourceOffer(exec, host, NO_PREF)
assert(taskOption.isDefined)
assert(taskOption.get.executorId === exec)
}
assert(sched.startedTasks.toSet === Set(0, 1, 2))
clock.advance(1)
// Finish one task and mark the others as speculatable
manager.handleSuccessfulTask(2, createTaskResult(2, accumUpdatesByTask2(2)))
assert(sched.endedTasks(2) === Success)
clock.advance(1)
assert(manager.checkSpeculatableTasks(0))
assert(sched.speculativeTasks.toSet === Set(0, 1))
// Ensure that the speculatable tasks obey the original locality preferences
assert(manager.resourceOffer("exec4", "host4", NODE_LOCAL).isEmpty)
// task 1 does have a node-local preference for host2 -- but we've already got a regular
// task running there, so we should not schedule a speculative there as well.
assert(manager.resourceOffer("exec2", "host2", NODE_LOCAL).isEmpty)
assert(manager.resourceOffer("exec3", "host3", NODE_LOCAL).isDefined)
assert(manager.resourceOffer("exec4", "host4", ANY).isDefined)
// Since, all speculatable tasks have been launched, making another offer
// should not schedule any more tasks
assert(manager.resourceOffer("exec1", "host1", ANY).isEmpty)
assert(!manager.checkSpeculatableTasks(0))
assert(manager.resourceOffer("exec1", "host1", ANY).isEmpty)
}
}
| bdrillard/spark | core/src/test/scala/org/apache/spark/scheduler/TaskSetManagerSuite.scala | Scala | apache-2.0 | 77,025 |
package breeze.util
/*
Copyright 2010 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import scala.util.hashing.MurmurHash3
import java.util
/**
* A BloomFilter is an approximate set that sometimes gives false positives. That is,
* if bf(x) returns true, then it might have been added to the set. If it returns false, then
* it definitely has not. This is useful for caching and approximation.
*
* @author dlwh
*/
@SerialVersionUID(1L)
class BloomFilter[@specialized(Int, Long) T](val numBuckets: Int, val numHashFunctions: Int, val bits: util.BitSet) extends (T=>Boolean) with Serializable {
def this(numBuckets: Int, numHashFunctions: Int) = this(numBuckets, numHashFunctions, new util.BitSet(numBuckets))
def this(numBuckets: Int) = this(numBuckets, 3)
def activeBuckets(key: T) = {
val hash1 = key.##
val hash2 = math.abs(MurmurHash3.mixLast(0, hash1))
for {
i <- 0 to numHashFunctions
} yield {
val h = hash1 + i * hash2
val nextHash = if (h < 0) ~h else h
nextHash % numBuckets
}
}
def apply(o: T): Boolean = {
activeBuckets(o).forall(i => bits.get(i))
}
def contains(o: T) = apply(o)
/**
*
* Calculates the load of the bloom filter. If this is near 1, there will be lots of false positives.
*
* @return the fraction of bits that are set
*/
def load: Double = bits.cardinality().toDouble / numBuckets
override def equals(other: Any) = other match {
case that: BloomFilter[_] =>
this.numBuckets == that.numBuckets && this.numHashFunctions == that.numHashFunctions && this.bits == that.bits
case _ => false
}
def +=(o: T): this.type = {
activeBuckets(o).foreach(i => bits.set(i))
this
}
def &(that: BloomFilter[T]) = {
checkCompatibility(that)
new BloomFilter[T](this.numBuckets, this.numHashFunctions, this.bits & that.bits)
}
private def checkCompatibility(that: BloomFilter[T]) {
require(that.numBuckets == this.numBuckets, "Must have the same number of buckets to intersect")
require(that.numHashFunctions == this.numHashFunctions, "Must have the same number of hash functions to intersect")
}
def |(that: BloomFilter[T]) = {
checkCompatibility(that)
new BloomFilter[T](this.numBuckets, this.numHashFunctions, this.bits | that.bits)
}
def |=(that: BloomFilter[T]):this.type = {
checkCompatibility(that)
this.bits |= that.bits
this
}
def &=(that: BloomFilter[T]):this.type = {
checkCompatibility(that)
this.bits &= that.bits
this
}
def &~=(that: BloomFilter[T]):this.type = {
checkCompatibility(that)
this.bits &~= that.bits
this
}
def &~(that: BloomFilter[T]) = {
checkCompatibility(that)
new BloomFilter[T](this.numBuckets, this.numHashFunctions, this.bits &~ that.bits)
}
}
object BloomFilter {
/**
* Returns the optimal number of buckets (m) and hash functions (k)
*
* The formula is:
* {{{
* val m = ceil(-(n * log(p)) / log(pow(2.0, log(2.0))))
* val k = round(log(2.0) * m / n)
* }}}
*
* @param expectedNumItems
* @param falsePositiveRate
* @return
*/
def optimalSize(expectedNumItems: Double, falsePositiveRate: Double): (Int, Int) = {
val n = expectedNumItems
val p = falsePositiveRate
import scala.math._
val m = ceil(-(n * log(p)) / log(pow(2.0, log(2.0))))
val k = round(log(2.0) * m / n)
(m.toInt, k.toInt)
}
/**
* Returns a BloomFilter that is optimally sized for the expected number of inputs and false positive rate
* @param expectedNumItems
* @param falsePositiveRate
* @tparam T
* @return
*/
def optimallySized[T](expectedNumItems: Double, falsePositiveRate: Double): BloomFilter[T] = {
val (buckets, funs) = optimalSize(expectedNumItems, falsePositiveRate)
new BloomFilter(buckets, funs)
}
}
| wstcpyt/breeze | math/src/main/scala/breeze/util/BloomFilter.scala | Scala | apache-2.0 | 4,364 |
// https://leetcode.com/problems/roman-to-integer
object Solution {
def romanToInt(s: String): Int = {
val t = Map(
'I' -> 1,
'V' -> 5,
'X' -> 10,
'L' -> 50,
'C' -> 100,
'D' -> 500,
'M' -> 1000
)
val xs = s map t
xs zipAll (xs.tail, 0, 0) map { case (x, nx) => if (x < nx) -x else x } sum
}
}
| airt/codegames | leetcode/013-roman-to-integer.scala | Scala | mit | 358 |
/*
* Copyright 2013 Akiyoshi Sugiki, University of Tsukuba
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kumoi.shell.ps
import kumoi.shell.event._
/**
* An exit message for process termination.
*
* @author Akiyoshi SUGIKI
*/
/*
case object SigAbort
case object SigAlarm
case object SigBus
case object SigChild
case object SigCont
case object SigFpe
case object SigHup
case object SigIllegal
case object SigInterrupt
case object SigKill
case object SigPipe
case object SigQuit
case object SigSegmentViolation
case object SigStop
case object SigTerm
case object SigTSTP
case object SigTTOU
case object SigUser1
case object SigUser2
case object SigUrgent
*/
case class SigStop(reason: AnyRef) extends Event
case class SigCont(reason: AnyRef) extends Event
case class SigTerm(reason: AnyRef) extends Event
| axi-sugiki/kumoi | src/kumoi/shell/ps/Signals.scala | Scala | apache-2.0 | 1,338 |
package com.arcusys.learn.liferay.services
import javax.portlet.PortletPreferences
import com.liferay.portal.service.PortletPreferencesLocalServiceUtil
object PortletPreferencesLocalServiceHelper {
def getStrictPreferences(companyId: Long,
ownerId: Long,
ownerType: Int,
plId: Long,
portletId: String): PortletPreferences =
PortletPreferencesLocalServiceUtil.getStrictPreferences(companyId, ownerId, ownerType, plId, portletId)
def fetchPreferences(companyId: Long,
ownerId: Long,
ownerType: Int,
plId: Long,
portletId: String): Option[PortletPreferences] = Option {
PortletPreferencesLocalServiceUtil
.fetchPreferences(companyId, ownerId, ownerType, plId, portletId)
}
}
| arcusys/JSCORM | learn-liferay620-services/src/main/scala/com/arcusys/learn/liferay/services/PortletPreferencesLocalServiceHelper.scala | Scala | gpl-3.0 | 896 |
/**
* Copyright 2013-2015, AlwaysResolve Project (alwaysresolve.org), MOYD.CO LTD
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package records
import java.util
import io.netty.buffer.ByteBuf
import org.apache.commons.codec.binary.Base64
import org.slf4j.LoggerFactory
import payload.RRData
case class DNSKEY(
flags: Short,
protocol: Byte,
algorithm: Byte,
publicKey: Array[Byte],
timetolive: Long // No default value because of scala costraints
) extends AbstractRecord {
val description = "DNSKEY"
val logger = LoggerFactory.getLogger("app")
def isEqualTo(any: Any) = any match {
case r: DNSKEY => r.flags == flags && r.protocol == protocol && r.algorithm == algorithm && r.publicKey == publicKey
case _ => false
}
def toByteArray = RRData.shortToBytes(flags.toShort) ++ Array[Byte](protocol) ++ Array[Byte](algorithm) ++ publicKey
def toCompressedByteArray(input: (Array[Byte], Map[String, Int])) = {
// This is very inefficient, using it for debug purposes only. Encoding statically?
//val enc = Base64.decodeBase64(publicKey)
logger.debug(util.Arrays.toString(RRData.shortToBytes(publicKey.length.toShort)))
(input._1 ++ RRData.shortToBytes(flags) ++ Array[Byte](algorithm) ++ Array[Byte](protocol) ++ Base64.decodeBase64(publicKey), input._2)
}
}
object DNSKEY {
val logger = LoggerFactory.getLogger("app")
def apply(buf: ByteBuf, recordclass: Int, size: Int, offset: Int = 0) = {
logger.error("Should not be called")
val flags = buf.readUnsignedShort.toShort
val protocol = buf.readByte()
val algorithm = buf.readByte()
val publicKey = Array[Byte]()
new DNSKEY(flags, protocol, algorithm, publicKey, 60)
}
} | Moydco/AlwaysResolveDNS | src/main/scala/records/DNSKEY.scala | Scala | apache-2.0 | 2,220 |
/* NSC -- new Scala compiler
* Copyright 2005-2011 LAMP/EPFL
* @author Alexander Spoon
*/
package spark.repl
import scala.tools.nsc._
import scala.tools.nsc.interpreter._
import Predef.{ println => _, _ }
import java.io.{ BufferedReader, FileReader, PrintWriter }
import scala.sys.process.Process
import session._
import scala.tools.nsc.interpreter.{ Results => IR }
import scala.tools.util.{ SignalManager, Signallable, Javap }
import scala.annotation.tailrec
import scala.util.control.Exception.{ ignoring }
import scala.collection.mutable.ListBuffer
import scala.concurrent.ops
import util.{ ClassPath, Exceptional, stringFromWriter, stringFromStream }
import interpreter._
import io.{ File, Sources }
import spark.Logging
import spark.SparkContext
/** The Scala interactive shell. It provides a read-eval-print loop
* around the Interpreter class.
* After instantiation, clients should call the main() method.
*
* If no in0 is specified, then input will come from the console, and
* the class will attempt to provide input editing feature such as
* input history.
*
* @author Moez A. Abdel-Gawad
* @author Lex Spoon
* @version 1.2
*/
class SparkILoop(in0: Option[BufferedReader], val out: PrintWriter, val master: Option[String])
extends AnyRef
with LoopCommands
with Logging
{
def this(in0: BufferedReader, out: PrintWriter, master: String) = this(Some(in0), out, Some(master))
def this(in0: BufferedReader, out: PrintWriter) = this(Some(in0), out, None)
def this() = this(None, new PrintWriter(Console.out, true), None)
var in: InteractiveReader = _ // the input stream from which commands come
var settings: Settings = _
var intp: SparkIMain = _
/*
lazy val power = {
val g = intp.global
Power[g.type](this, g)
}
*/
// TODO
// object opt extends AestheticSettings
//
@deprecated("Use `intp` instead.", "2.9.0")
def interpreter = intp
@deprecated("Use `intp` instead.", "2.9.0")
def interpreter_= (i: SparkIMain): Unit = intp = i
def history = in.history
/** The context class loader at the time this object was created */
protected val originalClassLoader = Thread.currentThread.getContextClassLoader
// Install a signal handler so we can be prodded.
private val signallable =
/*if (isReplDebug) Signallable("Dump repl state.")(dumpCommand())
else*/ null
// classpath entries added via :cp
var addedClasspath: String = ""
/** A reverse list of commands to replay if the user requests a :replay */
var replayCommandStack: List[String] = Nil
/** A list of commands to replay if the user requests a :replay */
def replayCommands = replayCommandStack.reverse
/** Record a command for replay should the user request a :replay */
def addReplay(cmd: String) = replayCommandStack ::= cmd
/** Try to install sigint handler: ignore failure. Signal handler
* will interrupt current line execution if any is in progress.
*
* Attempting to protect the repl from accidental exit, we only honor
* a single ctrl-C if the current buffer is empty: otherwise we look
* for a second one within a short time.
*/
private def installSigIntHandler() {
def onExit() {
Console.println("") // avoiding "shell prompt in middle of line" syndrome
sys.exit(1)
}
ignoring(classOf[Exception]) {
SignalManager("INT") = {
if (intp == null)
onExit()
else if (intp.lineManager.running)
intp.lineManager.cancel()
else if (in.currentLine != "") {
// non-empty buffer, so make them hit ctrl-C a second time
SignalManager("INT") = onExit()
io.timer(5)(installSigIntHandler()) // and restore original handler if they don't
}
else onExit()
}
}
}
/** Close the interpreter and set the var to null. */
def closeInterpreter() {
if (intp ne null) {
intp.close
intp = null
Thread.currentThread.setContextClassLoader(originalClassLoader)
}
}
class SparkILoopInterpreter extends SparkIMain(settings, out) {
override lazy val formatting = new Formatting {
def prompt = SparkILoop.this.prompt
}
override protected def createLineManager() = new Line.Manager {
override def onRunaway(line: Line[_]): Unit = {
val template = """
|// She's gone rogue, captain! Have to take her out!
|// Calling Thread.stop on runaway %s with offending code:
|// scala> %s""".stripMargin
echo(template.format(line.thread, line.code))
// XXX no way to suppress the deprecation warning
line.thread.stop()
in.redrawLine()
}
}
override protected def parentClassLoader = {
SparkHelper.explicitParentLoader(settings).getOrElse( classOf[SparkILoop].getClassLoader )
}
}
/** Create a new interpreter. */
def createInterpreter() {
if (addedClasspath != "")
settings.classpath append addedClasspath
intp = new SparkILoopInterpreter
intp.setContextClassLoader()
installSigIntHandler()
}
/** print a friendly help message */
def helpCommand(line: String): Result = {
if (line == "") helpSummary()
else uniqueCommand(line) match {
case Some(lc) => echo("\\n" + lc.longHelp)
case _ => ambiguousError(line)
}
}
private def helpSummary() = {
val usageWidth = commands map (_.usageMsg.length) max
val formatStr = "%-" + usageWidth + "s %s %s"
echo("All commands can be abbreviated, e.g. :he instead of :help.")
echo("Those marked with a * have more detailed help, e.g. :help imports.\\n")
commands foreach { cmd =>
val star = if (cmd.hasLongHelp) "*" else " "
echo(formatStr.format(cmd.usageMsg, star, cmd.help))
}
}
private def ambiguousError(cmd: String): Result = {
matchingCommands(cmd) match {
case Nil => echo(cmd + ": no such command. Type :help for help.")
case xs => echo(cmd + " is ambiguous: did you mean " + xs.map(":" + _.name).mkString(" or ") + "?")
}
Result(true, None)
}
private def matchingCommands(cmd: String) = commands filter (_.name startsWith cmd)
private def uniqueCommand(cmd: String): Option[LoopCommand] = {
// this lets us add commands willy-nilly and only requires enough command to disambiguate
matchingCommands(cmd) match {
case List(x) => Some(x)
// exact match OK even if otherwise appears ambiguous
case xs => xs find (_.name == cmd)
}
}
/** Print a welcome message */
def printWelcome() {
echo("""Welcome to
____ __
/ __/__ ___ _____/ /__
_\\ \\/ _ \\/ _ `/ __/ '_/
/___/ .__/\\_,_/_/ /_/\\_\\ version 0.7.3
/_/
""")
import Properties._
val welcomeMsg = "Using Scala %s (%s, Java %s)".format(
versionString, javaVmName, javaVersion)
echo(welcomeMsg)
}
/** Show the history */
lazy val historyCommand = new LoopCommand("history", "show the history (optional num is commands to show)") {
override def usage = "[num]"
def defaultLines = 20
def apply(line: String): Result = {
if (history eq NoHistory)
return "No history available."
val xs = words(line)
val current = history.index
val count = try xs.head.toInt catch { case _: Exception => defaultLines }
val lines = history.asStrings takeRight count
val offset = current - lines.size + 1
for ((line, index) <- lines.zipWithIndex)
echo("%3d %s".format(index + offset, line))
}
}
private def echo(msg: String) = {
out println msg
out.flush()
}
private def echoNoNL(msg: String) = {
out print msg
out.flush()
}
/** Search the history */
def searchHistory(_cmdline: String) {
val cmdline = _cmdline.toLowerCase
val offset = history.index - history.size + 1
for ((line, index) <- history.asStrings.zipWithIndex ; if line.toLowerCase contains cmdline)
echo("%d %s".format(index + offset, line))
}
private var currentPrompt = Properties.shellPromptString
def setPrompt(prompt: String) = currentPrompt = prompt
/** Prompt to print when awaiting input */
def prompt = currentPrompt
import LoopCommand.{ cmd, nullary }
/** Standard commands **/
lazy val standardCommands = List(
cmd("cp", "<path>", "add a jar or directory to the classpath", addClasspath),
cmd("help", "[command]", "print this summary or command-specific help", helpCommand),
historyCommand,
cmd("h?", "<string>", "search the history", searchHistory),
cmd("imports", "[name name ...]", "show import history, identifying sources of names", importsCommand),
cmd("implicits", "[-v]", "show the implicits in scope", implicitsCommand),
cmd("javap", "<path|class>", "disassemble a file or class name", javapCommand),
nullary("keybindings", "show how ctrl-[A-Z] and other keys are bound", keybindingsCommand),
cmd("load", "<path>", "load and interpret a Scala file", loadCommand),
nullary("paste", "enter paste mode: all input up to ctrl-D compiled together", pasteCommand),
//nullary("power", "enable power user mode", powerCmd),
nullary("quit", "exit the interpreter", () => Result(false, None)),
nullary("replay", "reset execution and replay all previous commands", replay),
shCommand,
nullary("silent", "disable/enable automatic printing of results", verbosity),
cmd("type", "<expr>", "display the type of an expression without evaluating it", typeCommand)
)
/** Power user commands */
lazy val powerCommands: List[LoopCommand] = List(
//nullary("dump", "displays a view of the interpreter's internal state", dumpCommand),
//cmd("phase", "<phase>", "set the implicit phase for power commands", phaseCommand),
cmd("wrap", "<method>", "name of method to wrap around each repl line", wrapCommand) withLongHelp ("""
|:wrap
|:wrap clear
|:wrap <method>
|
|Installs a wrapper around each line entered into the repl.
|Currently it must be the simple name of an existing method
|with the specific signature shown in the following example.
|
|def timed[T](body: => T): T = {
| val start = System.nanoTime
| try body
| finally println((System.nanoTime - start) + " nanos elapsed.")
|}
|:wrap timed
|
|If given no argument, :wrap names the wrapper installed.
|An argument of clear will remove the wrapper if any is active.
|Note that wrappers do not compose (a new one replaces the old
|one) and also that the :phase command uses the same machinery,
|so setting :wrap will clear any :phase setting.
""".stripMargin.trim)
)
/*
private def dumpCommand(): Result = {
echo("" + power)
history.asStrings takeRight 30 foreach echo
in.redrawLine()
}
*/
private val typeTransforms = List(
"scala.collection.immutable." -> "immutable.",
"scala.collection.mutable." -> "mutable.",
"scala.collection.generic." -> "generic.",
"java.lang." -> "jl.",
"scala.runtime." -> "runtime."
)
private def importsCommand(line: String): Result = {
val tokens = words(line)
val handlers = intp.languageWildcardHandlers ++ intp.importHandlers
val isVerbose = tokens contains "-v"
handlers.filterNot(_.importedSymbols.isEmpty).zipWithIndex foreach {
case (handler, idx) =>
val (types, terms) = handler.importedSymbols partition (_.name.isTypeName)
val imps = handler.implicitSymbols
val found = tokens filter (handler importsSymbolNamed _)
val typeMsg = if (types.isEmpty) "" else types.size + " types"
val termMsg = if (terms.isEmpty) "" else terms.size + " terms"
val implicitMsg = if (imps.isEmpty) "" else imps.size + " are implicit"
val foundMsg = if (found.isEmpty) "" else found.mkString(" // imports: ", ", ", "")
val statsMsg = List(typeMsg, termMsg, implicitMsg) filterNot (_ == "") mkString ("(", ", ", ")")
intp.reporter.printMessage("%2d) %-30s %s%s".format(
idx + 1,
handler.importString,
statsMsg,
foundMsg
))
}
}
private def implicitsCommand(line: String): Result = {
val intp = SparkILoop.this.intp
import intp._
import global.Symbol
def p(x: Any) = intp.reporter.printMessage("" + x)
// If an argument is given, only show a source with that
// in its name somewhere.
val args = line split "\\\\s+"
val filtered = intp.implicitSymbolsBySource filter {
case (source, syms) =>
(args contains "-v") || {
if (line == "") (source.fullName.toString != "scala.Predef")
else (args exists (source.name.toString contains _))
}
}
if (filtered.isEmpty)
return "No implicits have been imported other than those in Predef."
filtered foreach {
case (source, syms) =>
p("/* " + syms.size + " implicit members imported from " + source.fullName + " */")
// This groups the members by where the symbol is defined
val byOwner = syms groupBy (_.owner)
val sortedOwners = byOwner.toList sortBy { case (owner, _) => intp.afterTyper(source.info.baseClasses indexOf owner) }
sortedOwners foreach {
case (owner, members) =>
// Within each owner, we cluster results based on the final result type
// if there are more than a couple, and sort each cluster based on name.
// This is really just trying to make the 100 or so implicits imported
// by default into something readable.
val memberGroups: List[List[Symbol]] = {
val groups = members groupBy (_.tpe.finalResultType) toList
val (big, small) = groups partition (_._2.size > 3)
val xss = (
(big sortBy (_._1.toString) map (_._2)) :+
(small flatMap (_._2))
)
xss map (xs => xs sortBy (_.name.toString))
}
val ownerMessage = if (owner == source) " defined in " else " inherited from "
p(" /* " + members.size + ownerMessage + owner.fullName + " */")
memberGroups foreach { group =>
group foreach (s => p(" " + intp.symbolDefString(s)))
p("")
}
}
p("")
}
}
protected def newJavap() = new Javap(intp.classLoader, new SparkIMain.ReplStrippingWriter(intp)) {
override def tryClass(path: String): Array[Byte] = {
// Look for Foo first, then Foo$, but if Foo$ is given explicitly,
// we have to drop the $ to find object Foo, then tack it back onto
// the end of the flattened name.
def className = intp flatName path
def moduleName = (intp flatName path.stripSuffix("$")) + "$"
val bytes = super.tryClass(className)
if (bytes.nonEmpty) bytes
else super.tryClass(moduleName)
}
}
private lazy val javap =
try newJavap()
catch { case _: Exception => null }
private def typeCommand(line: String): Result = {
intp.typeOfExpression(line) match {
case Some(tp) => tp.toString
case _ => "Failed to determine type."
}
}
private def javapCommand(line: String): Result = {
if (javap == null)
return ":javap unavailable on this platform."
if (line == "")
return ":javap [-lcsvp] [path1 path2 ...]"
javap(words(line)) foreach { res =>
if (res.isError) return "Failed: " + res.value
else res.show()
}
}
private def keybindingsCommand(): Result = {
if (in.keyBindings.isEmpty) "Key bindings unavailable."
else {
echo("Reading jline properties for default key bindings.")
echo("Accuracy not guaranteed: treat this as a guideline only.\\n")
in.keyBindings foreach (x => echo ("" + x))
}
}
private def wrapCommand(line: String): Result = {
def failMsg = "Argument to :wrap must be the name of a method with signature [T](=> T): T"
val intp = SparkILoop.this.intp
val g: intp.global.type = intp.global
import g._
words(line) match {
case Nil =>
intp.executionWrapper match {
case "" => "No execution wrapper is set."
case s => "Current execution wrapper: " + s
}
case "clear" :: Nil =>
intp.executionWrapper match {
case "" => "No execution wrapper is set."
case s => intp.clearExecutionWrapper() ; "Cleared execution wrapper."
}
case wrapper :: Nil =>
intp.typeOfExpression(wrapper) match {
case Some(PolyType(List(targ), MethodType(List(arg), restpe))) =>
intp setExecutionWrapper intp.pathToTerm(wrapper)
"Set wrapper to '" + wrapper + "'"
case Some(x) =>
failMsg + "\\nFound: " + x
case _ =>
failMsg + "\\nFound: <unknown>"
}
case _ => failMsg
}
}
private def pathToPhaseWrapper = intp.pathToTerm("$r") + ".phased.atCurrent"
/*
private def phaseCommand(name: String): Result = {
// This line crashes us in TreeGen:
//
// if (intp.power.phased set name) "..."
//
// Exception in thread "main" java.lang.AssertionError: assertion failed: ._7.type
// at scala.Predef$.assert(Predef.scala:99)
// at scala.tools.nsc.ast.TreeGen.mkAttributedQualifier(TreeGen.scala:69)
// at scala.tools.nsc.ast.TreeGen.mkAttributedQualifier(TreeGen.scala:44)
// at scala.tools.nsc.ast.TreeGen.mkAttributedRef(TreeGen.scala:101)
// at scala.tools.nsc.ast.TreeGen.mkAttributedStableRef(TreeGen.scala:143)
//
// But it works like so, type annotated.
val phased: Phased = power.phased
import phased.NoPhaseName
if (name == "clear") {
phased.set(NoPhaseName)
intp.clearExecutionWrapper()
"Cleared active phase."
}
else if (name == "") phased.get match {
case NoPhaseName => "Usage: :phase <expr> (e.g. typer, erasure.next, erasure+3)"
case ph => "Active phase is '%s'. (To clear, :phase clear)".format(phased.get)
}
else {
val what = phased.parse(name)
if (what.isEmpty || !phased.set(what))
"'" + name + "' does not appear to represent a valid phase."
else {
intp.setExecutionWrapper(pathToPhaseWrapper)
val activeMessage =
if (what.toString.length == name.length) "" + what
else "%s (%s)".format(what, name)
"Active phase is now: " + activeMessage
}
}
}
*/
/** Available commands */
def commands: List[LoopCommand] = standardCommands /* ++ (
if (isReplPower) powerCommands else Nil
)*/
val replayQuestionMessage =
"""|The repl compiler has crashed spectacularly. Shall I replay your
|session? I can re-run all lines except the last one.
|[y/n]
""".trim.stripMargin
private val crashRecovery: PartialFunction[Throwable, Unit] = {
case ex: Throwable =>
if (settings.YrichExes.value) {
val sources = implicitly[Sources]
echo("\\n" + ex.getMessage)
echo(
if (isReplDebug) "[searching " + sources.path + " for exception contexts...]"
else "[searching for exception contexts...]"
)
echo(Exceptional(ex).force().context())
}
else {
echo(util.stackTraceString(ex))
}
ex match {
case _: NoSuchMethodError | _: NoClassDefFoundError =>
echo("Unrecoverable error.")
throw ex
case _ =>
def fn(): Boolean = in.readYesOrNo(replayQuestionMessage, { echo("\\nYou must enter y or n.") ; fn() })
if (fn()) replay()
else echo("\\nAbandoning crashed session.")
}
}
/** The main read-eval-print loop for the repl. It calls
* command() for each line of input, and stops when
* command() returns false.
*/
def loop() {
def readOneLine() = {
out.flush()
in readLine prompt
}
// return false if repl should exit
def processLine(line: String): Boolean =
if (line eq null) false // assume null means EOF
else command(line) match {
case Result(false, _) => false
case Result(_, Some(finalLine)) => addReplay(finalLine) ; true
case _ => true
}
while (true) {
try if (!processLine(readOneLine)) return
catch crashRecovery
}
}
/** interpret all lines from a specified file */
def interpretAllFrom(file: File) {
val oldIn = in
val oldReplay = replayCommandStack
try file applyReader { reader =>
in = SimpleReader(reader, out, false)
echo("Loading " + file + "...")
loop()
}
finally {
in = oldIn
replayCommandStack = oldReplay
}
}
/** create a new interpreter and replay all commands so far */
def replay() {
closeInterpreter()
createInterpreter()
for (cmd <- replayCommands) {
echo("Replaying: " + cmd) // flush because maybe cmd will have its own output
command(cmd)
echo("")
}
}
/** fork a shell and run a command */
lazy val shCommand = new LoopCommand("sh", "run a shell command (result is implicitly => List[String])") {
override def usage = "<command line>"
def apply(line: String): Result = line match {
case "" => showUsage()
case _ =>
val toRun = classOf[ProcessResult].getName + "(" + string2codeQuoted(line) + ")"
intp interpret toRun
()
}
}
def withFile(filename: String)(action: File => Unit) {
val f = File(filename)
if (f.exists) action(f)
else echo("That file does not exist")
}
def loadCommand(arg: String) = {
var shouldReplay: Option[String] = None
withFile(arg)(f => {
interpretAllFrom(f)
shouldReplay = Some(":load " + arg)
})
Result(true, shouldReplay)
}
def addClasspath(arg: String): Unit = {
val f = File(arg).normalize
if (f.exists) {
addedClasspath = ClassPath.join(addedClasspath, f.path)
val totalClasspath = ClassPath.join(settings.classpath.value, addedClasspath)
echo("Added '%s'. Your new classpath is:\\n\\"%s\\"".format(f.path, totalClasspath))
replay()
}
else echo("The path '" + f + "' doesn't seem to exist.")
}
def powerCmd(): Result = {
if (isReplPower) "Already in power mode."
else enablePowerMode()
}
def enablePowerMode() = {
//replProps.power setValue true
//power.unleash()
//echo(power.banner)
}
def verbosity() = {
val old = intp.printResults
intp.printResults = !old
echo("Switched " + (if (old) "off" else "on") + " result printing.")
}
/** Run one command submitted by the user. Two values are returned:
* (1) whether to keep running, (2) the line to record for replay,
* if any. */
def command(line: String): Result = {
if (line startsWith ":") {
val cmd = line.tail takeWhile (x => !x.isWhitespace)
uniqueCommand(cmd) match {
case Some(lc) => lc(line.tail stripPrefix cmd dropWhile (_.isWhitespace))
case _ => ambiguousError(cmd)
}
}
else if (intp.global == null) Result(false, None) // Notice failure to create compiler
else Result(true, interpretStartingWith(line))
}
private def readWhile(cond: String => Boolean) = {
Iterator continually in.readLine("") takeWhile (x => x != null && cond(x))
}
def pasteCommand(): Result = {
echo("// Entering paste mode (ctrl-D to finish)\\n")
val code = readWhile(_ => true) mkString "\\n"
echo("\\n// Exiting paste mode, now interpreting.\\n")
intp interpret code
()
}
private object paste extends Pasted {
val ContinueString = " | "
val PromptString = "scala> "
def interpret(line: String): Unit = {
echo(line.trim)
intp interpret line
echo("")
}
def transcript(start: String) = {
// Printing this message doesn't work very well because it's buried in the
// transcript they just pasted. Todo: a short timer goes off when
// lines stop coming which tells them to hit ctrl-D.
//
// echo("// Detected repl transcript paste: ctrl-D to finish.")
apply(Iterator(start) ++ readWhile(_.trim != PromptString.trim))
}
}
import paste.{ ContinueString, PromptString }
/** Interpret expressions starting with the first line.
* Read lines until a complete compilation unit is available
* or until a syntax error has been seen. If a full unit is
* read, go ahead and interpret it. Return the full string
* to be recorded for replay, if any.
*/
def interpretStartingWith(code: String): Option[String] = {
// signal completion non-completion input has been received
in.completion.resetVerbosity()
def reallyInterpret = {
val reallyResult = intp.interpret(code)
(reallyResult, reallyResult match {
case IR.Error => None
case IR.Success => Some(code)
case IR.Incomplete =>
if (in.interactive && code.endsWith("\\n\\n")) {
echo("You typed two blank lines. Starting a new command.")
None
}
else in.readLine(ContinueString) match {
case null =>
// we know compilation is going to fail since we're at EOF and the
// parser thinks the input is still incomplete, but since this is
// a file being read non-interactively we want to fail. So we send
// it straight to the compiler for the nice error message.
intp.compileString(code)
None
case line => interpretStartingWith(code + "\\n" + line)
}
})
}
/** Here we place ourselves between the user and the interpreter and examine
* the input they are ostensibly submitting. We intervene in several cases:
*
* 1) If the line starts with "scala> " it is assumed to be an interpreter paste.
* 2) If the line starts with "." (but not ".." or "./") it is treated as an invocation
* on the previous result.
* 3) If the Completion object's execute returns Some(_), we inject that value
* and avoid the interpreter, as it's likely not valid scala code.
*/
if (code == "") None
else if (!paste.running && code.trim.startsWith(PromptString)) {
paste.transcript(code)
None
}
else if (Completion.looksLikeInvocation(code) && intp.mostRecentVar != "") {
interpretStartingWith(intp.mostRecentVar + code)
}
else {
def runCompletion = in.completion execute code map (intp bindValue _)
/** Due to my accidentally letting file completion execution sneak ahead
* of actual parsing this now operates in such a way that the scala
* interpretation always wins. However to avoid losing useful file
* completion I let it fail and then check the others. So if you
* type /tmp it will echo a failure and then give you a Directory object.
* It's not pretty: maybe I'll implement the silence bits I need to avoid
* echoing the failure.
*/
if (intp isParseable code) {
val (code, result) = reallyInterpret
//if (power != null && code == IR.Error)
// runCompletion
result
}
else runCompletion match {
case Some(_) => None // completion hit: avoid the latent error
case _ => reallyInterpret._2 // trigger the latent error
}
}
}
// runs :load `file` on any files passed via -i
def loadFiles(settings: Settings) = settings match {
case settings: GenericRunnerSettings =>
for (filename <- settings.loadfiles.value) {
val cmd = ":load " + filename
command(cmd)
addReplay(cmd)
echo("")
}
case _ =>
}
/** Tries to create a JLineReader, falling back to SimpleReader:
* unless settings or properties are such that it should start
* with SimpleReader.
*/
def chooseReader(settings: Settings): InteractiveReader = {
if (settings.Xnojline.value || Properties.isEmacsShell)
SimpleReader()
else try SparkJLineReader(
if (settings.noCompletion.value) NoCompletion
else new SparkJLineCompletion(intp)
)
catch {
case ex @ (_: Exception | _: NoClassDefFoundError) =>
echo("Failed to created SparkJLineReader: " + ex + "\\nFalling back to SimpleReader.")
SimpleReader()
}
}
def initializeSpark() {
intp.beQuietDuring {
command("""
spark.repl.Main.interp.out.println("Creating SparkContext...");
spark.repl.Main.interp.out.flush();
@transient val sc = spark.repl.Main.interp.createSparkContext();
spark.repl.Main.interp.out.println("Spark context available as sc.");
spark.repl.Main.interp.out.flush();
""")
command("import spark.SparkContext._")
}
echo("Type in expressions to have them evaluated.")
echo("Type :help for more information.")
}
var sparkContext: SparkContext = null
def createSparkContext(): SparkContext = {
val master = this.master match {
case Some(m) => m
case None => {
val prop = System.getenv("MASTER")
if (prop != null) prop else "local"
}
}
val jars = Option(System.getenv("ADD_JARS")).map(_.split(','))
.getOrElse(new Array[String](0))
.map(new java.io.File(_).getAbsolutePath)
sparkContext = new SparkContext(master, "Spark shell", System.getenv("SPARK_HOME"), jars)
sparkContext
}
def process(settings: Settings): Boolean = {
// Ensure logging is initialized before any Spark threads try to use logs
// (because SLF4J initialization is not thread safe)
initLogging()
printWelcome()
echo("Initializing interpreter...")
// Add JARS specified in Spark's ADD_JARS variable to classpath
val jars = Option(System.getenv("ADD_JARS")).map(_.split(',')).getOrElse(new Array[String](0))
jars.foreach(settings.classpath.append(_))
this.settings = settings
createInterpreter()
// sets in to some kind of reader depending on environmental cues
in = in0 match {
case Some(reader) => SimpleReader(reader, out, true)
case None => chooseReader(settings)
}
loadFiles(settings)
// it is broken on startup; go ahead and exit
if (intp.reporter.hasErrors)
return false
try {
// this is about the illusion of snappiness. We call initialize()
// which spins off a separate thread, then print the prompt and try
// our best to look ready. Ideally the user will spend a
// couple seconds saying "wow, it starts so fast!" and by the time
// they type a command the compiler is ready to roll.
intp.initialize()
initializeSpark()
if (isReplPower) {
echo("Starting in power mode, one moment...\\n")
enablePowerMode()
}
loop()
}
finally closeInterpreter()
true
}
/** process command-line arguments and do as they request */
def process(args: Array[String]): Boolean = {
val command = new CommandLine(args.toList, msg => echo("scala: " + msg))
def neededHelp(): String =
(if (command.settings.help.value) command.usageMsg + "\\n" else "") +
(if (command.settings.Xhelp.value) command.xusageMsg + "\\n" else "")
// if they asked for no help and command is valid, we call the real main
neededHelp() match {
case "" => command.ok && process(command.settings)
case help => echoNoNL(help) ; true
}
}
@deprecated("Use `process` instead", "2.9.0")
def main(args: Array[String]): Unit = {
if (isReplDebug)
System.out.println(new java.util.Date)
process(args)
}
@deprecated("Use `process` instead", "2.9.0")
def main(settings: Settings): Unit = process(settings)
}
object SparkILoop {
implicit def loopToInterpreter(repl: SparkILoop): SparkIMain = repl.intp
private def echo(msg: String) = Console println msg
// Designed primarily for use by test code: take a String with a
// bunch of code, and prints out a transcript of what it would look
// like if you'd just typed it into the repl.
def runForTranscript(code: String, settings: Settings): String = {
import java.io.{ BufferedReader, StringReader, OutputStreamWriter }
stringFromStream { ostream =>
Console.withOut(ostream) {
val output = new PrintWriter(new OutputStreamWriter(ostream), true) {
override def write(str: String) = {
// completely skip continuation lines
if (str forall (ch => ch.isWhitespace || ch == '|')) ()
// print a newline on empty scala prompts
else if ((str contains '\\n') && (str.trim == "scala> ")) super.write("\\n")
else super.write(str)
}
}
val input = new BufferedReader(new StringReader(code)) {
override def readLine(): String = {
val s = super.readLine()
// helping out by printing the line being interpreted.
if (s != null)
output.println(s)
s
}
}
val repl = new SparkILoop(input, output)
if (settings.classpath.isDefault)
settings.classpath.value = sys.props("java.class.path")
repl process settings
}
}
}
/** Creates an interpreter loop with default settings and feeds
* the given code to it as input.
*/
def run(code: String, sets: Settings = new Settings): String = {
import java.io.{ BufferedReader, StringReader, OutputStreamWriter }
stringFromStream { ostream =>
Console.withOut(ostream) {
val input = new BufferedReader(new StringReader(code))
val output = new PrintWriter(new OutputStreamWriter(ostream), true)
val repl = new SparkILoop(input, output)
if (sets.classpath.isDefault)
sets.classpath.value = sys.props("java.class.path")
repl process sets
}
}
}
def run(lines: List[String]): String = run(lines map (_ + "\\n") mkString)
// provide the enclosing type T
// in order to set up the interpreter's classpath and parent class loader properly
def breakIf[T: Manifest](assertion: => Boolean, args: NamedParam*): Unit =
if (assertion) break[T](args.toList)
// start a repl, binding supplied args
def break[T: Manifest](args: List[NamedParam]): Unit = {
val msg = if (args.isEmpty) "" else " Binding " + args.size + " value%s.".format(
if (args.size == 1) "" else "s"
)
echo("Debug repl starting." + msg)
val repl = new SparkILoop {
override def prompt = "\\ndebug> "
}
repl.settings = new Settings(echo)
repl.settings.embeddedDefaults[T]
repl.createInterpreter()
repl.in = SparkJLineReader(repl)
// rebind exit so people don't accidentally call sys.exit by way of predef
repl.quietRun("""def exit = println("Type :quit to resume program execution.")""")
args foreach (p => repl.bind(p.name, p.tpe, p.value))
repl.loop()
echo("\\nDebug repl exiting.")
repl.closeInterpreter()
}
}
| prabeesh/Spark-Kestrel | repl/src/main/scala/spark/repl/SparkILoop.scala | Scala | bsd-3-clause | 35,807 |
package breeze.linalg
import operators._
import support._
import breeze.math.MutableInnerProductSpace
import breeze.math.Semiring
import DenseMatrix._
/** Import this to provide access to a DenseMatrix[Double] as a MutableInnerProductSpace, so it can be used in optimization. */
object MutableInnerProductSpaceDenseMatrixDouble {
// //implicit val canDotD_f = new CanDotDDenseMatrix[Float]
// //implicit val canDotD_i = new CanDotDDenseMatrix[Int]
implicit val space_d = {
class CanDotDDenseMatrix extends BinaryOp[DenseMatrix[Double], DenseMatrix[Double], OpMulInner, Double] {
override def apply(a: DenseMatrix[Double], b: DenseMatrix[Double]):Double = {
require(a.rows == b.rows, "Vector row dimensions must match!")
require(a.cols == b.cols, "Vector col dimensions must match!")
val aVec = a.toDenseVector
val bVec = b.toDenseVector
aVec.dot(bVec)
}
}
implicit val canDotD_d = new CanDotDDenseMatrix()
MutableInnerProductSpace.make[DenseMatrix[Double], Int, Double]
}
} | ktakagaki/breeze | src/main/scala/breeze/linalg/MutableInnerProductSpaceDenseMatrix.scala | Scala | apache-2.0 | 1,065 |
package models.hbase090
import models.hbase.{HBase, HBaseContext}
/*
* Copyright 2014 YMC. See LICENSE for details.
*/
class HBaseContext090 extends HBaseContext {
override val hBase:HBase = new HBase090
override val logFileParser = new LogFileParser090
}
| Connexity/hannibal | hbase/0.90/scala/models/hbase090/HBaseContext090.scala | Scala | apache-2.0 | 264 |
package com.mesosphere.universe.v2.model
import com.netaporter.uri.Uri
import io.circe.JsonObject
case class PackageFiles(
revision: String,
sourceUri: Uri,
packageJson: PackageDetails,
marathonJsonMustache: String,
commandJson: Option[Command] = None,
configJson: Option[JsonObject] = None,
resourceJson: Option[Resource] = None
)
| movicha/cosmos | cosmos-model/src/main/scala/com/mesosphere/universe/v2/model/PackageFiles.scala | Scala | apache-2.0 | 348 |
package com.gjos.scala.swoc.protocol
import org.scalatest.{WordSpec, Matchers}
import scala.io.Source
import com.gjos.scala.swoc.util.{JsonConverters, Resource}
import com.gjos.scala.swoc.Bot
class BoardSpec extends WordSpec with Matchers {
val us = Player.Black
def loadBoard(resource: String) = {
val lines = Source.fromFile(Resource.testResource(resource)).getLines()
val jsonBoard = lines.next()
val board = JsonConverters.createMoveRequest(jsonBoard).board
board
}
"Board" should {
"rate a situation correctly" in {
val board = loadBoard("reinforce-weaker.txt")
val x0 = board.score(us)
val goodMove = Move(MoveType.Attack, Location fromLabel "E4", Location fromLabel "D3")
val badMove = Move(MoveType.Attack, Location fromLabel "A4", Location fromLabel "E4")
val x1 = Board.applyMove(board, badMove).score(us)
val x2 = Board.applyMove(board, goodMove).score(us)
x1 should be < x0
x2 should be > x0
}
}
}
| Oduig/swoc2014 | Greedy/src/test/scala/com/gjos/scala/swoc/protocol/BoardSpec.scala | Scala | apache-2.0 | 996 |
package feeds.acl
import com.typesafe.config.ConfigFactory
import controllers.ArrivalGenerator
import drt.server.feeds.acl.AclFeed
import drt.server.feeds.acl.AclFeed.{arrivalsFromCsvContent, contentFromFileName, latestFileForPort, sftpClient}
import drt.shared
import drt.shared.FlightsApi.Flights
import drt.shared.PaxTypesAndQueues._
import drt.shared.Terminals._
import drt.shared.api.Arrival
import drt.shared.{VoyageNumber, _}
import server.feeds.{ArrivalsFeedFailure, ArrivalsFeedSuccess}
import services.SDate
import services.crunch.{CrunchTestLike, TestConfig}
import scala.collection.immutable.{List, SortedMap}
import scala.concurrent.duration._
class AclFeedSpec extends CrunchTestLike {
val regularTerminalMapping: Terminal => Terminal = (t: Terminal) => t
"ACL feed failures" >> {
val aclFeed = AclFeed("nowhere.nowhere", "badusername", "badpath", PortCode("BAD"), (_: Terminal) => T1, 1000L)
val result = aclFeed.requestArrivals.getClass
val expected = classOf[ArrivalsFeedFailure]
result === expected
}
"ACL feed parsing" >> {
"Given ACL csv content containing a header line and one arrival line " +
"When I ask for the arrivals " +
"Then I should see a list containing the appropriate Arrival" >> {
val csvContent =
"""A/C,ACReg,Airport,ArrDep,CreDate,Date,DOOP,EditDate,Icao Aircraft Type,Icao Last/Next Station,Icao Orig/Dest Station,LastNext,LastNextCountry,Ope,OpeGroup,OpeName,OrigDest,OrigDestCountry,Res,Season,Seats,ServNo,ST,ove.ind,Term,Time,TurnOpe,TurnServNo,OpeFlightNo,LoadFactor
|32A,,LHR,A,09SEP2016 0606,2017-10-13,0000500,29SEP2017 0959,A320,EDDK,EDDK,CGN,DE,4U,STAR ALLIANCE,GERMANWINGS GMBH,CGN,DE,T2-Intl & CTA,S17,180,0460,J,,2I,0710,4U,0461,4U0460,0.827777802944183
""".stripMargin
val arrivals = arrivalsFromCsvContent(csvContent, regularTerminalMapping)
val expected = List(Arrival(Operator = Option(Operator("4U")), Status = ArrivalStatus("ACL Forecast"), Estimated = None, Actual = None,
EstimatedChox = None, ActualChox = None, Gate = None, Stand = None, MaxPax = Option(180), ActPax = Option(149),
TranPax = None, RunwayID = None, BaggageReclaimId = None, AirportID = PortCode("LHR"), Terminal = T2,
rawICAO = "4U0460", rawIATA = "4U0460", Origin = PortCode("CGN"), FeedSources = Set(shared.AclFeedSource),
Scheduled = 1507878600000L, PcpTime = None))
arrivals === expected
}
"Given ACL csv content containing a header line and one departure line " +
"When I ask for the arrivals " +
"Then I should see an empty list" >> {
val csvContent =
"""A/C,ACReg,Airport,ArrDep,CreDate,Date,DOOP,EditDate,Icao Aircraft Type,Icao Last/Next Station,Icao Orig/Dest Station,LastNext,LastNextCountry,Ope,OpeGroup,OpeName,OrigDest,OrigDestCountry,Res,Season,Seats,ServNo,ST,ove.ind,Term,Time,TurnOpe,TurnServNo,OpeFlightNo,LoadFactor
|32A,,LHR,D,09SEP2016 0606,2017-10-13,0000500,29SEP2017 0959,A320,EDDK,EDDK,CGN,DE,4U,STAR ALLIANCE,GERMANWINGS GMBH,CGN,DE,T2-Intl & CTA,S17,180,0460,J,,2I,0710,4U,0461,4U0460,0.827777802944183
""".stripMargin
val arrivals = arrivalsFromCsvContent(csvContent, regularTerminalMapping)
val expected = List()
arrivals === expected
}
"Given ACL csv content containing a header line and one positioning flight " +
"When I ask for the arrivals " +
"Then I should see an empty list" >> {
val csvContent =
"""A/C,ACReg,Airport,ArrDep,CreDate,Date,DOOP,EditDate,Icao Aircraft Type,Icao Last/Next Station,Icao Orig/Dest Station,LastNext,LastNextCountry,Ope,OpeGroup,OpeName,OrigDest,OrigDestCountry,Res,Season,Seats,ServNo,ST,ove.ind,Term,Time,TurnOpe,TurnServNo,OpeFlightNo,LoadFactor
|32A,,LHR,D,09SEP2016 0606,2017-10-13,0000500,29SEP2017 0959,A320,EDDK,EDDK,CGN,DE,4U,STAR ALLIANCE,GERMANWINGS GMBH,CGN,DE,T2-Intl & CTA,S17,180,0460,J,,2I,0710,4U,0461,4U0460P,0.827777802944183
""".stripMargin
val arrivals = arrivalsFromCsvContent(csvContent, regularTerminalMapping)
val expected = List()
arrivals === expected
}
"Given ACL csv content containing 0 for MaxPax and 0 for load factor " +
"When I ask for the arrivals " +
"Then I should see 0 being used for both Max and Act pax" >> {
val csvContent =
"""A/C,ACReg,Airport,ArrDep,CreDate,Date,DOOP,EditDate,Icao Aircraft Type,Icao Last/Next Station,Icao Orig/Dest Station,LastNext,LastNextCountry,Ope,OpeGroup,OpeName,OrigDest,OrigDestCountry,Res,Season,Seats,ServNo,ST,ove.ind,Term,Time,TurnOpe,TurnServNo,OpeFlightNo,LoadFactor
|32A,,LHR,A,09SEP2016 0606,2017-10-13,0000500,29SEP2017 0959,A320,EDDK,EDDK,CGN,DE,4U,STAR ALLIANCE,GERMANWINGS GMBH,CGN,DE,T2-Intl & CTA,S17,0,0460,J,,2I,0710,4U,0461,4U0460,0
""".stripMargin
val arrivals = arrivalsFromCsvContent(csvContent, regularTerminalMapping)
val expected = List(
Arrival(
Operator = Option(Operator("4U")),
Status = ArrivalStatus("ACL Forecast"),
Estimated = None,
Actual = None,
EstimatedChox = None,
ActualChox = None,
Gate = None,
Stand = None,
MaxPax = Option(0),
ActPax = Option(0),
TranPax = None,
RunwayID = None,
BaggageReclaimId = None,
AirportID = PortCode("LHR"),
Terminal = T2,
rawICAO = "4U0460",
rawIATA = "4U0460",
Origin = PortCode("CGN"),
FeedSources = Set(shared.AclFeedSource),
Scheduled = 1507878600000L,
PcpTime = None))
arrivals === expected
}
"Given ACL csv content containing 200 for MaxPax but 0 for load factor " +
"When I ask for the arrivals " +
"Then I should see 0 being used for act pax and 200 for Max Pax" >> {
val csvContent =
"""A/C,ACReg,Airport,ArrDep,CreDate,Date,DOOP,EditDate,Icao Aircraft Type,Icao Last/Next Station,Icao Orig/Dest Station,LastNext,LastNextCountry,Ope,OpeGroup,OpeName,OrigDest,OrigDestCountry,Res,Season,Seats,ServNo,ST,ove.ind,Term,Time,TurnOpe,TurnServNo,OpeFlightNo,LoadFactor
|32A,,LHR,A,09SEP2016 0606,2017-10-13,0000500,29SEP2017 0959,A320,EDDK,EDDK,CGN,DE,4U,STAR ALLIANCE,GERMANWINGS GMBH,CGN,DE,T2-Intl & CTA,S17,200,0460,J,,2I,0710,4U,0461,4U0460,0
""".stripMargin
val arrivals = arrivalsFromCsvContent(csvContent, regularTerminalMapping)
val expected = List(
Arrival(
Operator = Option(Operator("4U")),
Status = ArrivalStatus("ACL Forecast"),
Estimated = None,
Actual = None,
EstimatedChox = None,
ActualChox = None,
Gate = None,
Stand = None,
MaxPax = Option(200),
ActPax = Option(0),
TranPax = None,
RunwayID = None,
BaggageReclaimId = None,
AirportID = PortCode("LHR"),
Terminal = T2,
rawICAO = "4U0460",
rawIATA = "4U0460",
Origin = PortCode("CGN"),
FeedSources = Set(shared.AclFeedSource),
Scheduled = 1507878600000L,
PcpTime = None))
arrivals === expected
}
"ACL Flights " >> {
"Given an ACL feed with one flight and no live flights" +
"When I ask for a crunch " +
"Then I should see that flight in the PortState" >> {
val scheduled = "2017-01-01T00:00Z"
val arrival = ArrivalGenerator.arrival(iata = "BA0001", schDt = scheduled, actPax = Option(10))
val aclFlight = Flights(List(arrival))
val fiveMinutes = 600d / 60
val crunch = runCrunchGraph(TestConfig(
now = () => SDate(scheduled),
airportConfig = defaultAirportConfig.copy(terminalProcessingTimes = Map(T1 -> Map(eeaMachineReadableToDesk -> fiveMinutes)))))
offerAndWait(crunch.aclArrivalsInput, ArrivalsFeedSuccess(aclFlight))
val expected = Set(arrival.copy(FeedSources = Set(AclFeedSource)))
crunch.portStateTestProbe.fishForMessage(3 seconds) {
case ps: PortState =>
val flightsResult = ps.flights.values.map(_.apiFlight).toSet
flightsResult == expected
}
success
}
"Given an ACL feed with one flight and the same flight in the live feed" +
"When I ask for a crunch " +
"Then I should see the one flight in the PortState with the ACL flightcode and live chox" >> {
val scheduled = "2017-01-01T00:00Z"
val aclFlight = ArrivalGenerator.arrival(iata = "BA0001", schDt = scheduled, actPax = Option(10))
val aclFlights = Flights(List(aclFlight))
val liveFlight = ArrivalGenerator.arrival(iata = "BAW001", schDt = scheduled, actPax = Option(20), actChoxDt = "2017-01-01T00:30Z")
val liveFlights = Flights(List(liveFlight))
val fiveMinutes = 600d / 60
val crunch = runCrunchGraph(TestConfig(
now = () => SDate(scheduled),
airportConfig = defaultAirportConfig.copy(terminalProcessingTimes = Map(T1 -> Map(eeaMachineReadableToDesk -> fiveMinutes)))))
offerAndWait(crunch.aclArrivalsInput, ArrivalsFeedSuccess(aclFlights))
offerAndWait(crunch.liveArrivalsInput, ArrivalsFeedSuccess(liveFlights))
val expected = Set(liveFlight.copy(CarrierCode = aclFlight.CarrierCode, VoyageNumber = aclFlight.VoyageNumber, FeedSources = Set(AclFeedSource, LiveFeedSource)))
crunch.portStateTestProbe.fishForMessage(3 seconds) {
case ps: PortState =>
val flightsResult = ps.flights.values.map(_.apiFlight).toSet
flightsResult == expected
}
success
}
"Given some initial ACL & live arrivals, one ACL arrival and no live arrivals " +
"When I ask for a crunch " +
"Then I should only see the new ACL arrival with the initial live arrivals" >> {
val scheduledLive = "2017-01-01T00:00Z"
val initialAcl = Set(
ArrivalGenerator.arrival(iata = "BA0001", schDt = "2017-01-01T00:05Z", actPax = Option(150), status = ArrivalStatus("forecast")),
ArrivalGenerator.arrival(iata = "BA0002", schDt = "2017-01-01T00:15Z", actPax = Option(151), status = ArrivalStatus("forecast")))
val initialLive = Set(
ArrivalGenerator.arrival(iata = "BA0003", schDt = "2017-01-01T00:25Z", actPax = Option(99), status = ArrivalStatus("scheduled")))
val newAcl = Set(
ArrivalGenerator.arrival(iata = "BA0011", schDt = "2017-01-01T00:10Z", actPax = Option(105), status = ArrivalStatus("forecast")))
val crunch = runCrunchGraph(TestConfig(now = () => SDate(scheduledLive)))
offerAndWait(crunch.liveArrivalsInput, ArrivalsFeedSuccess(Flights(initialLive.toList)))
offerAndWait(crunch.aclArrivalsInput, ArrivalsFeedSuccess(Flights(initialAcl.toList)))
offerAndWait(crunch.aclArrivalsInput, ArrivalsFeedSuccess(Flights(newAcl.toList)))
val expected = initialLive.map(_.copy(FeedSources = Set(LiveFeedSource))) ++ newAcl.map(_.copy(FeedSources = Set(AclFeedSource)))
crunch.portStateTestProbe.fishForMessage(3 seconds) {
case ps: PortState =>
val flightsResult = ps.flights.values.map(_.apiFlight).toSet
flightsResult == expected
}
success
}
"Given some initial arrivals, no ACL arrivals and one live arrival " +
"When I ask for a crunch " +
"Then I should only see the initial arrivals updated with the live arrival" >> {
val scheduledLive = "2017-01-01T00:00Z"
val initialAcl1 = ArrivalGenerator.arrival(iata = "BA0001", schDt = "2017-01-01T00:05Z", actPax = Option(150), status = ArrivalStatus("forecast"))
val initialAcl2 = ArrivalGenerator.arrival(iata = "BA0002", schDt = "2017-01-01T00:15Z", actPax = Option(151), status = ArrivalStatus("forecast"))
val initialAcl = Set(initialAcl1, initialAcl2)
val initialLive = SortedMap[UniqueArrival, Arrival]() ++ List(ArrivalGenerator.arrival(iata = "BA0001", schDt = "2017-01-01T00:05Z", actPax = Option(99), status = ArrivalStatus("scheduled"))).map(a => (a.unique, a))
val newLive = Set(
ArrivalGenerator.arrival(iata = "BAW0001", schDt = "2017-01-01T00:05Z", actPax = Option(105), status = ArrivalStatus("estimated"), estDt = "2017-01-01T00:06Z"))
val crunch = runCrunchGraph(TestConfig(
now = () => SDate(scheduledLive),
initialLiveArrivals = initialLive
))
offerAndWait(crunch.aclArrivalsInput, ArrivalsFeedSuccess(Flights(initialAcl.toList)))
offerAndWait(crunch.liveArrivalsInput, ArrivalsFeedSuccess(Flights(newLive.toList)))
val expected = newLive.map(_.copy(CarrierCode = CarrierCode("BA"), VoyageNumber = VoyageNumber(1), FeedSources = Set(LiveFeedSource, AclFeedSource))) + initialAcl2.copy(FeedSources = Set(AclFeedSource))
crunch.portStateTestProbe.fishForMessage(3 seconds) {
case ps: PortState =>
val flightsResult = ps.flights.values.map(_.apiFlight).toSet
flightsResult == expected
}
success
}
"Given one ACL arrival followed by one live arrival and initial arrivals which don't match them " +
"When I ask for a crunch " +
"Then I should only see the new ACL & live arrivals plus the initial live arrival" >> {
val scheduledLive = "2017-01-01T00:00Z"
val initialAcl1 = ArrivalGenerator.arrival(iata = "BA0001", schDt = "2017-01-01T00:01Z", actPax = Option(150), status = ArrivalStatus("forecast"))
val initialAcl2 = ArrivalGenerator.arrival(iata = "BA0002", schDt = "2017-01-01T00:02Z", actPax = Option(151), status = ArrivalStatus("forecast"))
val initialAcl = SortedMap[UniqueArrival, Arrival]() ++ List(initialAcl1, initialAcl2).map(a => (a.unique, a))
val initialLive = SortedMap[UniqueArrival, Arrival]() ++ List(ArrivalGenerator.arrival(iata = "BA0003", schDt = "2017-01-01T00:03Z", actPax = Option(99), status = ArrivalStatus("scheduled"))).map(a => (a.unique, a))
val newAcl = Flights(Seq(ArrivalGenerator.arrival(iata = "BA0004", schDt = "2017-01-01T00:04Z", actPax = Option(100))))
val newLive = Flights(Seq(ArrivalGenerator.arrival(iata = "BAW0005", schDt = "2017-01-01T00:05Z", actPax = Option(101))))
val crunch = runCrunchGraph(TestConfig(
now = () => SDate(scheduledLive),
initialForecastBaseArrivals = initialAcl,
initialLiveArrivals = initialLive
))
offerAndWait(crunch.aclArrivalsInput, ArrivalsFeedSuccess(newAcl))
offerAndWait(crunch.liveArrivalsInput, ArrivalsFeedSuccess(newLive))
val expected = Set(VoyageNumber(3), VoyageNumber(4), VoyageNumber(5))
crunch.portStateTestProbe.fishForMessage(5 seconds) {
case ps: PortState =>
val voyageNos = ps.flights.values.map(_.apiFlight.VoyageNumber).toSet
voyageNos == expected
}
success
}
"Given one ACL arrival followed by the same single ACL arrival " +
"When I ask for a crunch " +
"Then I should still see the arrival, ie it should not have been removed" >> {
val scheduledLive = "2017-01-01T00:00Z"
val aclArrival = ArrivalGenerator.arrival(iata = "BA0001", schDt = "2017-01-01T00:05Z", actPax = Option(150), status = ArrivalStatus("forecast"), feedSources = Set(AclFeedSource))
val aclInput1 = Flights(Seq(aclArrival))
val aclInput2 = Flights(Seq(aclArrival))
val crunch = runCrunchGraph(TestConfig(now = () => SDate(scheduledLive)))
offerAndWait(crunch.aclArrivalsInput, ArrivalsFeedSuccess(aclInput1))
offerAndWait(crunch.aclArrivalsInput, ArrivalsFeedSuccess(aclInput2))
val portStateFlightLists = crunch.portStateTestProbe.receiveWhile(3 seconds) {
case PortState(f, _, _) => f.values.map(_.apiFlight)
}
val nonEmptyFlightsList = List(aclArrival.copy(FeedSources = Set(AclFeedSource)))
val expected = List(nonEmptyFlightsList)
portStateFlightLists.distinct === expected
}
}
"Looking at flights" >> {
skipped("Integration test for ACL - requires SSL certificate to run")
val ftpServer = ConfigFactory.load.getString("acl.host")
val username = ConfigFactory.load.getString("acl.username")
val path = ConfigFactory.load.getString("acl.keypath")
val sftp = sftpClient(AclFeed.sshClient(ftpServer, username, path))
val latestFile = latestFileForPort(sftp, PortCode("MAN"), 100000L)
println(s"latestFile: $latestFile")
val aclArrivals: List[Arrival] = arrivalsFromCsvContent(contentFromFileName(sftp, latestFile), regularTerminalMapping)
val todayArrivals = aclArrivals
.filter(_.Scheduled < SDate("2017-10-05T23:00").millisSinceEpoch)
.groupBy(_.Terminal)
todayArrivals.foreach {
case (tn, _) =>
val tByUniqueId = todayArrivals(tn).groupBy(_.uniqueId)
println(s"uniques for $tn: ${tByUniqueId.keys.size} flights")
tByUniqueId.filter {
case (_, a) => a.length > 1
}.foreach {
case (uid, a) => println(s"non-unique: $uid -> $a")
}
}
todayArrivals.keys.foreach(t => println(s"terminal $t has ${todayArrivals(t).size} flights"))
success
}
}
}
| UKHomeOffice/drt-scalajs-spa-exploration | server/src/test/scala/feeds/acl/AclFeedSpec.scala | Scala | apache-2.0 | 17,517 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.