code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
Copyright (C) 2013-2018 Expedia Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.hotels.styx.support.generators
import org.scalacheck.Gen
case class CookieHeaderString(text: String)
class CookieHeaderGenerator {
def cookieNames: Gen[String] = Gen.oneOf(
"n",
"na",
"name",
"name_",
"name__",
"name___",
"name____",
"name_____",
"name______",
"name_______"
)
case class CookieTemplate(name: String, value: String)
type ActionFunction = Function1[List[CookieTemplate], String]
def badCookieHeaders: Gen[CookieHeaderString] = for {
cookies <- cookieList
text <- perturbedCookieHeader(cookies)
} yield CookieHeaderString(text)
def cookieList: Gen[List[CookieTemplate]] = for {
count <- Gen.choose(1, 50)
cookies <- Gen.resize(count, Gen.nonEmptyListOf(httpCookie))
} yield cookies
def httpCookie: Gen[CookieTemplate] = for {
name <- cookieNames
value <- Gen.alphaStr
} yield CookieTemplate(name, value)
def perturbedCookieHeader(cookies: List[CookieTemplate]): Gen[String] = {
for {
action <- Gen.delay(
genInsertInvalidCharacterIntoName(cookies)
)
} yield action(cookies)
}
def genPrefixNameWithDollar(cookies: List[CookieTemplate]): Gen[ActionFunction] = for {
cookieIndex <- Gen.choose(0, cookies.length - 1)
} yield prefixNameWithDollar(cookieIndex)
def prefixNameWithDollar(cookieIndex: Int)(cookies: List[CookieTemplate]): String = {
require(cookies.nonEmpty)
val newCookie = CookieTemplate("$" + cookies(cookieIndex).name, cookies(cookieIndex).value)
toCookieHeaderString(cookieIndex, cookies, newCookie)
}
def genInsertInvalidCharacterIntoName(cookies: List[CookieTemplate]): Gen[ActionFunction] = for {
cookieIndex <- Gen.choose(0, cookies.length - 1)
position <- Gen.choose(0, cookies(cookieIndex).name.length - 1)
replacement <- genInvalidCookieNameCharacter
} yield insertInvalidCharacterInCookieName(cookieIndex, position, replacement)
def insertInvalidCharacterInCookieName(cookieIndex: Int, position: Int, replacement: Char)(cookies: List[CookieTemplate]): String = {
require(cookies.nonEmpty)
val oldName = cookies(cookieIndex).name
val newName = oldName.substring(0, position) + replacement + oldName.substring(1 + position)
val newCookie = CookieTemplate(newName, cookies(cookieIndex).value)
toCookieHeaderString(cookieIndex, cookies, newCookie)
}
def genInvalidCookieNameCharacter = Gen.oneOf(
"[\\"()/<>?@\\\\[\\\\]\\\\\\\\]~"
)
def toCookieHeaderString(cookieIndex: Int, cookies: List[CookieTemplate], newCookie: CookieTemplate): String = {
cookies.patch(cookieIndex, Seq(newCookie), 1)
.map(cookie => "%s=%s".format(cookie.name, cookie.value))
.mkString("; ")
}
}
object CookieHeaderGenerator {
def apply() = new CookieHeaderGenerator()
}
| mikkokar/styx | system-tests/e2e-suite/src/test/scala/com/hotels/styx/support/generators/CookieHeaderGenerator.scala | Scala | apache-2.0 | 3,394 |
/*
* MarginalMAPVEStrategy.scala
* A class that solves a marginal MAP problem using VE.
*
* Created By: William Kretschmer ([email protected])
* Creation Date: July 1, 2015
*
* Copyright 2015 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email [email protected] for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.algorithm.structured.strategy.solve
import com.cra.figaro.algorithm.structured.Problem
import com.cra.figaro.algorithm.factored.factors.Factor
import com.cra.figaro.algorithm.factored.factors.Variable
import com.cra.figaro.algorithm.structured.solver
import com.cra.figaro.algorithm.structured.NestedProblem
/**
* A solving strategy that uses MPE VE to solve non-nested problems, and performs the MAP step at the top level.
* It is assumed that at the top level, "toPreserve" elements are the MAP elements.
*/
class MarginalMAPVEStrategy extends SolvingStrategy {
def solve(problem: Problem, toEliminate: Set[Variable[_]], toPreserve: Set[Variable[_]], factors: List[Factor[Double]]):
(List[Factor[Double]], Map[Variable[_], Factor[_]]) = {
problem match {
case _: NestedProblem[_] => {
// A problem needed for the initial step of summing out the non-MAP variables
// Use marginal VE for this
solver.marginalVariableElimination(problem, toEliminate, toPreserve, factors)
}
case _ => {
// Sum over the remaining non-MAP variables (i.e. toEliminate), and MAP the rest (i.e. toPreserve)
// marginalizedFactors is a set of factors over just the MAP variables
val (marginalizedFactors, _) = solver.marginalVariableElimination(problem, toEliminate, toPreserve, factors)
// Now that we have eliminated the sum variables, we effectively just do MPE over the remaining variables
// For MPE, we eliminate all remaining variables (i.e. toPreserve), and preserve no variables (i.e. Set())
solver.mpeVariableElimination(problem, toPreserve, Set(), marginalizedFactors)
}
}
}
} | scottcb/figaro | Figaro/src/main/scala/com/cra/figaro/algorithm/structured/strategy/solve/MarginalMAPVEStrategy.scala | Scala | bsd-3-clause | 2,113 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.commons.source
import cascading.pipe.Pipe
import cascading.tuple.Fields
import com.twitter.elephantbird.mapreduce.io.ThriftWritable
import com.twitter.elephantbird.util.{ThriftUtils, TypeRef}
import com.twitter.scalding._
import org.apache.hadoop.io.{LongWritable, Writable}
import org.apache.thrift.TBase
trait LongThriftTransformer[V <: TBase[_, _]] extends Source {
def mt: Manifest[V]
def fields: Fields
// meant to override fields within WritableSequenceFileScheme.
val keyType = classOf[LongWritable]
val valueType = classOf[ThriftWritable[V]].asInstanceOf[Class[Writable]]
override protected def transformForRead(pipe: Pipe): Pipe =
new RichPipe(pipe).mapTo(fields -> fields) { v: (LongWritable, ThriftWritable[V]) =>
v._2.setConverter(mt.runtimeClass.asInstanceOf[Class[V]])
(v._1.get, v._2.get)
}
override protected def transformForWrite(pipe: Pipe) =
new RichPipe(pipe).mapTo(fields -> fields) { v: (Long, V) =>
val key = new LongWritable(v._1)
val value = new ThriftWritable(v._2, typeRef)
(key, value)
}
lazy val typeRef = ThriftUtils.getTypeRef(mt.runtimeClass).asInstanceOf[TypeRef[TBase[_, _]]]
}
| twitter/scalding | scalding-commons/src/main/scala/com/twitter/scalding/commons/source/LongThriftTransformer.scala | Scala | apache-2.0 | 1,765 |
/* Copyright 2009-2018 EPFL, Lausanne */
package inox
package ast
import scala.collection.immutable.BitSet
/** Expression definitions for Pure Scala.
*
* Every expression in Inox inherits from [[Expressions.Expr]].
* Expressions can be manipulated with functions in [[Constructors]] and [[ExprOps]].
*
* If you are looking for things such as function or class definitions,
* please have a look in [[inox.ast.Definitions]].
*
* @define encodingof Encoding of
* @define noteBitvector (32-bit vector)
* @define noteReal (Real)
*/
trait Expressions { self: Trees =>
/** Represents an expression in Inox. */
abstract class Expr extends Tree with Typed
/** Trait which gets mixed-in to expressions without subexpressions */
trait Terminal {
self: Expr =>
}
/** Local assumption
*
* @param pred The predicate to be assumed
* @param body The expression following `assume(pred)`
*/
sealed case class Assume(pred: Expr, body: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
checkParamType(pred, BooleanType(), body.getType)
}
/** Variable
*
* @param id The identifier of this variable
*/
sealed case class Variable(id: Identifier, tpe: Type, flags: Seq[Flag])
extends Expr with Terminal with VariableSymbol {
/** Transforms this [[Variable]] into a [[Definitions.ValDef ValDef]] */
def toVal = to[ValDef]
def freshen = copy(id.freshen)
override def equals(that: Any) = super[VariableSymbol].equals(that)
override def hashCode = super[VariableSymbol].hashCode
def copy(id: Identifier = id, tpe: Type = tpe, flags: Seq[Flag] = flags) =
Variable(id, tpe, flags).copiedFrom(this)
}
object Variable {
def fresh(name: String, tpe: Type, alwaysShowUniqueID: Boolean = false) =
Variable(FreshIdentifier(name, alwaysShowUniqueID), tpe, Seq.empty)
}
/** $encodingof `val ... = ...; ...`
*
* @param vd The ValDef used in body, defined just after '''val'''
* @param value The value assigned to the identifier, after the '''=''' sign
* @param body The expression following the ``val ... = ... ;`` construct
* @see [[SymbolOps.let the let constructor]]
*/
sealed case class Let(vd: ValDef, value: Expr, body: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
checkParamType(value, vd.getType, body.getType)
}
/* Higher-order Functions */
/** $encodingof `callee(args...)`, where [[callee]] is an expression of a function type (not a method) */
sealed case class Application(callee: Expr, args: Seq[Expr]) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = callee.getType match {
case FunctionType(from, to) => checkParamTypes(args, from, to)
case _ => Untyped
}
}
/** $encodingof `(args) => body` */
sealed case class Lambda(params: Seq[ValDef], body: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
FunctionType(params.map(_.getType), body.getType).getType
def paramSubst(realArgs: Seq[Expr]) = {
require(realArgs.size == params.size)
(params zip realArgs).toMap
}
def withParamSubst(realArgs: Seq[Expr], e: Expr) = {
exprOps.replaceFromSymbols(paramSubst(realArgs), e)
}
}
/** $encodingof `forall(...)` (universal quantification) */
sealed case class Forall(params: Seq[ValDef], body: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = body.getType
}
/** $encodingof `choose(...)` (returns a value satisfying the provided predicate) */
sealed case class Choose(res: ValDef, pred: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
checkParamType(pred, BooleanType(), res.getType)
}
/* Control flow */
/** $encodingof `function(...)` (function invocation) */
sealed case class FunctionInvocation(id: Identifier, tps: Seq[Type], args: Seq[Expr])
extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = {
s.lookupFunction(id)
.filter(fd => tps.size == fd.tparams.size && args.size == fd.params.size)
.map(_.typed(tps))
.map(tfd => checkParamTypes(args, tfd.params.map(_.getType), tfd.getType))
.getOrElse(Untyped)
}
def tfd(implicit s: Symbols): TypedFunDef = s.getFunction(id, tps)
def inlined(implicit s: Symbols): Expr = {
val tfd = this.tfd
exprOps.freshenLocals((tfd.params zip args).foldRight(tfd.fullBody) {
case ((vd, e), body) => s.let(vd, e, body)
})
}
}
/** $encodingof `if(...) ... else ...` */
sealed case class IfExpr(cond: Expr, thenn: Expr, elze: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = {
if (s.isSubtypeOf(cond.getType, BooleanType())) s.leastUpperBound(thenn.getType, elze.getType)
else Untyped
}
}
/** Literals */
sealed abstract class Literal[+T] extends Expr with Terminal {
val value: T
}
/** $encodingof a character literal */
sealed case class CharLiteral(value: Char) extends Literal[Char] {
def getType(implicit s: Symbols): Type = CharType()
}
/** $encodingof a n-bit bitvector literal */
sealed case class BVLiteral(signed: Boolean, value: BitSet, size: Int) extends Literal[BitSet] {
def getType(implicit s: Symbols): Type = BVType(signed, size)
def toBigInt: BigInt = {
val res = value.foldLeft(BigInt(0))((res, i) => res + BigInt(2).pow(i-1))
if (signed && value(size))
res - BigInt(2).pow(size)
else
res
}
}
object BVLiteral {
def apply(signed: Boolean, bi: BigInt, size: Int): BVLiteral = {
assert(bi >= 0 || signed, "You can only create an unsigned BVLiteral from a non-negative number.")
def extract(bi: BigInt): BitSet = (1 to size).foldLeft(BitSet.empty) {
case (res, i) => if ((bi & BigInt(2).pow(i-1)) > 0) res + i else res
}
val bitSet = if (bi >= 0 || !signed) extract(bi) else {
val bs = extract(-bi)
(1 to size).foldLeft((BitSet.empty, false)) { case ((res, seen1), i) =>
if (bs(i) && !seen1) (res + i, true)
else (if (!seen1 || bs(i)) res else res + i, seen1)
}._1
}
BVLiteral(signed, bitSet, size)
}
}
object Int8Literal {
def apply(x: Byte): BVLiteral = BVLiteral(true, BigInt(x), 8)
def unapply(e: Expr): Option[Byte] = e match {
case b @ BVLiteral(true, _, 8) => Some(b.toBigInt.toByte)
case _ => None
}
}
object Int16Literal {
def apply(x: Short): BVLiteral = BVLiteral(true, BigInt(x), 16)
def unapply(e: Expr): Option[Short] = e match {
case b @ BVLiteral(true, _, 16) => Some(b.toBigInt.toShort)
case _ => None
}
}
object Int32Literal {
def apply(x: Int): BVLiteral = BVLiteral(true, BigInt(x), 32)
def unapply(e: Expr): Option[Int] = e match {
case b @ BVLiteral(true, _, 32) => Some(b.toBigInt.toInt)
case _ => None
}
}
object Int64Literal {
def apply(x: Long): BVLiteral = BVLiteral(true, BigInt(x), 64)
def unapply(e: Expr): Option[Long] = e match {
case b @ BVLiteral(true, _, 64) => Some(b.toBigInt.toLong)
case _ => None
}
}
/** $encodingof an infinite precision integer literal */
sealed case class IntegerLiteral(value: BigInt) extends Literal[BigInt] {
def getType(implicit s: Symbols): Type = IntegerType()
}
/** $encodingof a fraction literal */
sealed case class FractionLiteral(numerator: BigInt, denominator: BigInt) extends Literal[(BigInt, BigInt)] {
val value = (numerator, denominator)
def getType(implicit s: Symbols): Type = RealType()
}
/** $encodingof a boolean literal '''true''' or '''false''' */
sealed case class BooleanLiteral(value: Boolean) extends Literal[Boolean] {
def getType(implicit s: Symbols): Type = BooleanType()
}
/** $encodingof a string literal */
sealed case class StringLiteral(value: String) extends Literal[String] {
def getType(implicit s: Symbols): Type = StringType()
}
/** $encodingof the unit literal `()` */
sealed case class UnitLiteral() extends Literal[Unit] {
val value = ()
def getType(implicit s: Symbols): Type = UnitType()
}
/** Generic values. Represent values of the generic type `tp`.
* This is useful e.g. to present counterexamples of generic types.
*/
sealed case class GenericValue(tp: TypeParameter, id: Int) extends Expr with Terminal {
def getType(implicit s: Symbols): Type = tp.getType
}
/** $encodingof `ct(args...)`
*
* @param ct The case class name and inherited attributes
* @param args The arguments of the case class
*/
sealed case class ADT(id: Identifier, tps: Seq[Type], args: Seq[Expr]) extends Expr with CachingTyped {
def getConstructor(implicit s: Symbols) = s.getConstructor(id, tps)
override protected def computeType(implicit s: Symbols): Type =
s.lookupConstructor(id).flatMap { cons =>
s.lookupSort(cons.sort)
.filter(_.tparams.size == tps.size)
.flatMap { sort =>
sort.typed(tps).constructors
.find(_.id == id)
.filter(_.fields.size == args.size)
.map(tcons => checkParamTypes(args, tcons.fields.map(_.getType), ADTType(sort.id, tps)))
}
}.getOrElse(Untyped)
}
/** $encodingof `.isInstanceOf[...]` */
sealed case class IsConstructor(expr: Expr, id: Identifier) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getADTType(expr) match {
case ADTType(sort, _) => (s.lookupSort(sort), s.lookupConstructor(id)) match {
case (Some(sort), Some(cons)) if sort.id == cons.sort => BooleanType()
case _ => Untyped
}
case _ => Untyped
}
}
/** $encodingof `value.selector` where value is of a case class type
*
* If you are not sure about the requirement you should use
* [[SymbolOps.adtSelector the adtSelector constructor]]
*/
sealed case class ADTSelector(adt: Expr, selector: Identifier) extends Expr with CachingTyped {
def constructor(implicit s: Symbols) = {
val tpe = getADTType(adt).asInstanceOf[ADTType]
tpe.getSort.constructors.find(_.fields.exists(_.id == selector)).get
}
def selectorIndex(implicit s: Symbols) = constructor.definition.selectorID2Index(selector)
override protected def computeType(implicit s: Symbols): Type = getADTType(adt) match {
case ADTType(id, tps) =>
s.lookupSort(id)
.filter(_.tparams.size == tps.size)
.map(_.typed(tps)).toSeq
.flatMap(_.constructors.flatMap(_.fields))
.find(_.id == selector).map(_.getType).getOrElse(Untyped)
case _ => Untyped
}
}
/** $encodingof `... == ...` */
sealed case class Equals(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = {
if (s.leastUpperBound(lhs.getType, rhs.getType) != Untyped) BooleanType()
else Untyped
}
}
/* Propositional logic */
/** $encodingof `... && ...`
*
* [[exprs]] must contain at least two elements; if you are not sure about this,
* you should use [[Constructors.and the and constructor]]
* or [[Constructors.andJoin the andJoin constructor]]
*/
sealed case class And(exprs: Seq[Expr]) extends Expr with CachingTyped {
require(exprs.size >= 2)
override protected def computeType(implicit s: Symbols): Type =
checkParamTypes(exprs, List.fill(exprs.size)(BooleanType()), BooleanType())
}
object And {
def apply(a: Expr, b: Expr): Expr = And(Seq(a, b))
}
/** $encodingof `... || ...`
*
* [[exprs]] must contain at least two elements; if you are not sure about this,
* you should use [[Constructors#or the or constructor]] or
* [[Constructors#orJoin the orJoin constructor]]
*/
sealed case class Or(exprs: Seq[Expr]) extends Expr with CachingTyped {
require(exprs.size >= 2)
override protected def computeType(implicit s: Symbols): Type =
checkParamTypes(exprs, List.fill(exprs.size)(BooleanType()), BooleanType())
}
object Or {
def apply(a: Expr, b: Expr): Expr = Or(Seq(a, b))
}
/** $encodingof `... ==> ...` (logical implication).
*
* @see [[Constructors.implies]]
*/
sealed case class Implies(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
checkAllTypes(Seq(lhs, rhs), BooleanType(), BooleanType())
}
/** $encodingof `!...`
*
* @see [[Constructors.not]]
*/
sealed case class Not(expr: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
checkParamType(expr, BooleanType(), BooleanType())
}
/* String Theory */
/** $encodingof `lhs + rhs` for strings */
sealed case class StringConcat(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
checkAllTypes(Seq(lhs, rhs), StringType(), StringType())
}
/** $encodingof `lhs.subString(start, end)` for strings */
sealed case class SubString(expr: Expr, start: Expr, end: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
checkParamTypes(Seq(expr, start, end), Seq(StringType(), IntegerType(), IntegerType()), StringType())
}
/** $encodingof `lhs.length` for strings */
sealed case class StringLength(expr: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
checkParamType(expr, StringType(), IntegerType())
}
/* General arithmetic */
/** $encodingof `... + ...` */
sealed case class Plus(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
getIntegerType(lhs, rhs) orElse getRealType(lhs, rhs) orElse getBVType(lhs, rhs)
}
/** $encodingof `... - ...` */
sealed case class Minus(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
getIntegerType(lhs, rhs) orElse getRealType(lhs, rhs) orElse getBVType(lhs, rhs)
}
/** $encodingof `- ...` */
sealed case class UMinus(expr: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
getIntegerType(expr) orElse getRealType(expr) orElse getBVType(expr)
}
/** $encodingof `... * ...` */
sealed case class Times(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
getIntegerType(lhs, rhs) orElse getRealType(lhs, rhs) orElse getBVType(lhs, rhs)
}
/** $encodingof `... / ...`
*
* Division and Remainder follows Java/Scala semantics. Division corresponds
* to / operator on BigInt and Remainder corresponds to %. Note that in
* Java/Scala % is called remainder and the "mod" operator (Modulo in Inox) is also
* defined on BigInteger and differs from Remainder. The "mod" operator
* returns an always positive remainder, while Remainder could return
* a negative remainder. The following must hold:
*
* Division(x, y) * y + Remainder(x, y) == x
*/
sealed case class Division(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
getIntegerType(lhs, rhs) orElse getRealType(lhs, rhs) orElse getBVType(lhs, rhs)
}
/** $encodingof `... % ...` (can return negative numbers)
*
* @see [[Expressions.Division]]
*/
sealed case class Remainder(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
getIntegerType(lhs, rhs) orElse getBVType(lhs, rhs)
}
/** $encodingof `... mod ...` (cannot return negative numbers)
*
* @see [[Expressions.Division]]
*/
sealed case class Modulo(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
getIntegerType(lhs, rhs) orElse getBVType(lhs, rhs)
}
/** $encodingof `... < ...`*/
sealed case class LessThan(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = if (
getIntegerType(lhs, rhs).isTyped ||
getRealType(lhs, rhs).isTyped ||
getBVType(lhs, rhs).isTyped ||
getCharType(lhs, rhs).isTyped
) BooleanType() else Untyped
}
/** $encodingof `... > ...`*/
sealed case class GreaterThan(lhs: Expr, rhs: Expr) extends Expr with CachingTyped{
override protected def computeType(implicit s: Symbols): Type = if (
getIntegerType(lhs, rhs).isTyped ||
getRealType(lhs, rhs).isTyped ||
getBVType(lhs, rhs).isTyped ||
getCharType(lhs, rhs).isTyped
) BooleanType() else Untyped
}
/** $encodingof `... <= ...`*/
sealed case class LessEquals(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = if (
getIntegerType(lhs, rhs).isTyped ||
getRealType(lhs, rhs).isTyped ||
getBVType(lhs, rhs).isTyped ||
getCharType(lhs, rhs).isTyped
) BooleanType() else Untyped
}
/** $encodingof `... >= ...`*/
sealed case class GreaterEquals(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = if (
getIntegerType(lhs, rhs).isTyped ||
getRealType(lhs, rhs).isTyped ||
getBVType(lhs, rhs).isTyped ||
getCharType(lhs, rhs).isTyped
) BooleanType() else Untyped
}
/* Bit-vector operations */
/** $encodingof `~...` $noteBitvector */
sealed case class BVNot(e: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBVType(e)
}
/** $encodingof `... & ...` $noteBitvector */
sealed case class BVAnd(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBVType(lhs, rhs)
}
/** $encodingof `... | ...` $noteBitvector */
sealed case class BVOr(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBVType(lhs, rhs)
}
/** $encodingof `... ^ ...` $noteBitvector */
sealed case class BVXor(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBVType(lhs, rhs)
}
/** $encodingof `... << ...` $noteBitvector */
sealed case class BVShiftLeft(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBVType(lhs, rhs)
}
/** $encodingof `... >> ...` $noteBitvector (arithmetic shift, sign-preserving) */
sealed case class BVAShiftRight(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBVType(lhs, rhs)
}
/** $encodingof `... >>> ...` $noteBitvector (logical shift) */
sealed case class BVLShiftRight(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBVType(lhs, rhs)
}
/** $encodingof `... .toByte` and other narrowing casts */
sealed case class BVNarrowingCast(expr: Expr, newType: BVType) extends Expr with CachingTyped {
// The expression is well typed iff `expr` is well typed and the BVTypes' size match a narrowing cast.
override protected def computeType(implicit s: Symbols): Type = cast match {
case Some((from, to)) => newType
case _ => Untyped
}
// Returns the pair of sizes from -> to
def cast(implicit s: Symbols): Option[(Int, Int)] = getBVType(expr) match {
case BVType(s, from) if s == newType.signed && from > newType.size => Some(from -> newType.size)
case _ => None
}
}
/** $encodingof `... .toInt` and other widening casts */
sealed case class BVWideningCast(expr: Expr, newType: BVType) extends Expr with CachingTyped {
// The expression is well typed iff `expr` is well typed and the BVTypes' size match a widening cast.
override protected def computeType(implicit s: Symbols): Type = cast match {
case Some((from, to)) => newType
case _ => Untyped
}
// Returns the pair of sizes from -> to
def cast(implicit s: Symbols): Option[(Int, Int)] = getBVType(expr) match {
case BVType(s, from) if s == newType.signed && from < newType.size => Some(from -> newType.size)
case _ => None
}
}
/** Bitvector conversion from unsigned to signed */
sealed case class BVUnsignedToSigned(expr: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBVType(expr) match {
case BVType(false, size) => BVType(true, size)
case _ => Untyped
}
}
/** Bitvector conversion from signed to unsigned */
sealed case class BVSignedToUnsigned(expr: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBVType(expr) match {
case BVType(true, size) => BVType(false, size)
case _ => Untyped
}
}
/* Tuple operations */
/** $encodingof `(..., ....)` (tuple)
*
* [[exprs]] should always contain at least 2 elements.
* If you are not sure about this requirement, you should use
* [[Constructors.tupleWrap the tupleWrap constructor]]
*
* @param exprs The expressions in the tuple
*/
sealed case class Tuple(exprs: Seq[Expr]) extends Expr with CachingTyped {
require(exprs.size >= 2)
override protected def computeType(implicit s: Symbols): Type = TupleType(exprs.map(_.getType)).getType
}
/** $encodingof `(tuple)._i`
*
* Index is 1-based, first element of tuple is 1.
* If you are not sure that [[tuple]] is indeed of a TupleType, you should use one of the
* [[SymbolOps.tupleSelect(t:SymbolOps\\.this\\.trees\\.Expr,index:Int,originalSize:Int)*]]
* [[SymbolOps.tupleSelect(t:SymbolOps\\.this\\.trees\\.Expr,index:Int,isTuple:Boolean)*]]
* constructors
*/
sealed case class TupleSelect(tuple: Expr, index: Int) extends Expr with CachingTyped {
require(index >= 1)
override protected def computeType(implicit s: Symbols): Type = getTupleType(tuple) match {
case tp @ TupleType(ts) if index <= ts.size => ts(index - 1)
case _ => Untyped
}
}
/* Set operations */
/** $encodingof `Set[base](elements)` */
sealed case class FiniteSet(elements: Seq[Expr], base: Type) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
checkParamTypes(elements.map(_.getType), List.fill(elements.size)(base), SetType(base))
}
/** $encodingof `set + elem` */
sealed case class SetAdd(set: Expr, elem: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getSetType(set) match {
case st @ SetType(base) => checkParamType(elem, base, st)
case _ => Untyped
}
}
/** $encodingof `set.contains(element)` or `set(element)` */
sealed case class ElementOfSet(element: Expr, set: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols) = getSetType(set) match {
case SetType(base) => checkParamType(element, base, BooleanType())
case _ => Untyped
}
}
/** $encodingof `set.subsetOf(set2)` */
sealed case class SubsetOf(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getSetType(lhs, rhs) match {
case st: SetType => BooleanType()
case _ => Untyped
}
}
/** $encodingof `set & set2` */
sealed case class SetIntersection(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getSetType(lhs, rhs)
}
/** $encodingof `set ++ set2` */
sealed case class SetUnion(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getSetType(lhs, rhs)
}
/** $encodingof `set -- set2` */
sealed case class SetDifference(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getSetType(lhs, rhs)
}
/* Bag operations */
/** $encodingof `Bag[base](elements)` */
sealed case class FiniteBag(elements: Seq[(Expr, Expr)], base: Type) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type =
checkParamTypes(
elements.map(_._1.getType) ++ elements.map(_._2.getType),
List.fill(elements.size)(base) ++ List.fill(elements.size)(IntegerType()),
BagType(base)
)
}
/** $encodingof `bag + elem` */
sealed case class BagAdd(bag: Expr, elem: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBagType(bag, BagType(elem.getType))
}
/** $encodingof `bag.get(element)` or `bag(element)` */
sealed case class MultiplicityInBag(element: Expr, bag: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBagType(bag) match {
case BagType(base) => checkParamType(element, base, IntegerType())
case _ => Untyped
}
}
/** $encodingof `lhs & rhs` */
sealed case class BagIntersection(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBagType(lhs, rhs)
}
/** $encodingof `lhs ++ rhs` */
sealed case class BagUnion(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBagType(lhs, rhs)
}
/** $encodingof `lhs -- rhs` */
sealed case class BagDifference(lhs: Expr, rhs: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getBagType(lhs, rhs)
}
/* Total map operations */
/** $encodingof `Map[keyType, valueType](key1 -> value1, key2 -> value2 ...)` */
sealed case class FiniteMap(pairs: Seq[(Expr, Expr)], default: Expr, keyType: Type, valueType: Type)
extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = checkParamTypes(
pairs.map(_._1.getType) ++ pairs.map(_._2.getType) :+ default.getType,
List.fill(pairs.size)(keyType) ++ List.fill(pairs.size + 1)(valueType),
MapType(keyType, valueType)
)
}
/** $encodingof `map.apply(key)` (or `map(key)`) */
sealed case class MapApply(map: Expr, key: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getMapType(map) match {
case MapType(from, to) => checkParamType(key, from, to)
case _ => Untyped
}
}
/** $encodingof `map.updated(key, value)` (or `map + (key -> value)`) */
sealed case class MapUpdated(map: Expr, key: Expr, value: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = getMapType(map) match {
case mt @ MapType(from, to) => checkParamType(key, from, getMapType(mt, MapType(from, value.getType)))
case _ => Untyped
}
}
/**
* Special operation that merges two maps using a set.
* The resulting map is a map that contains the key-value pairs of map1 for all keys that are in the mask,
* and the key-value pairs of map2 for all keys that are not in the mask.
*/
case class MapMerge(mask: Expr, map1: Expr, map2: Expr) extends Expr with CachingTyped {
override protected def computeType(implicit s: Symbols): Type = (getMapType(map1, map2), getSetType(mask)) match {
case (mt @ MapType(from, to), SetType(mask)) => checkParamType(mask, from, mt)
case _ => Untyped
}
}
}
| epfl-lara/inox | src/main/scala/inox/ast/Expressions.scala | Scala | apache-2.0 | 28,454 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.export.formats
import java.io.OutputStream
import java.nio.charset.StandardCharsets
import java.util.concurrent.atomic.AtomicBoolean
import java.util.concurrent.locks.ReentrantLock
import java.util.concurrent.{ConcurrentLinkedQueue, Executors, TimeUnit}
import javax.xml.namespace.QName
import net.opengis.wfs.WfsFactory
import org.geotools.data.simple.{SimpleFeatureCollection, SimpleFeatureIterator}
import org.geotools.data.store.{DataFeatureCollection, ReTypingFeatureCollection}
import org.geotools.feature.simple.SimpleFeatureTypeBuilder
import org.geotools.geometry.jts.ReferencedEnvelope
import org.geotools.wfs.WFSConfiguration
import org.geotools.xsd.Encoder
import org.locationtech.geomesa.tools.export.formats.FeatureExporter.{ByteCounter, ByteCounterExporter}
import org.locationtech.geomesa.tools.export.formats.GmlExporter.AsyncFeatureCollection
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
/**
* GML exporter implementation.
*
* The geotools GML export classes only support encoding a feature collection. To support our usage
* pattern (start, export n times, end), we create an asynchronous feature collection and do the actual
* encoding in a separate thread. The encoder thread will block until there are more features to export,
* so that we only get a single feature collection in the xml.
*
* @param os output stream
* @param counter counter
* @param configuration wfs configuration (gml3 vs gml2)
*/
class GmlExporter private (os: OutputStream, counter: ByteCounter, configuration: WFSConfiguration)
extends ByteCounterExporter(counter) {
private val encoder: Encoder = {
val props = configuration.getProperties.asInstanceOf[java.util.Set[QName]]
props.add(org.geotools.gml2.GMLConfiguration.OPTIMIZED_ENCODING)
props.add(org.geotools.gml2.GMLConfiguration.NO_FEATURE_BOUNDS)
val e = new Encoder(configuration)
e.getNamespaces.declarePrefix("geomesa", "http://geomesa.org")
e.setEncoding(StandardCharsets.UTF_8)
e.setIndenting(true)
e
}
private val es = Executors.newSingleThreadExecutor()
private var fc: AsyncFeatureCollection = _
override def start(sft: SimpleFeatureType): Unit = {
fc = new AsyncFeatureCollection(sft)
val features = if (sft.getName.getNamespaceURI != null) { fc } else {
val builder = new SimpleFeatureTypeBuilder()
builder.init(sft)
builder.setNamespaceURI("http://geomesa.org")
new ReTypingFeatureCollection(fc, builder.buildFeatureType())
}
val collection = WfsFactory.eINSTANCE.createFeatureCollectionType()
collection.getFeature.asInstanceOf[java.util.List[SimpleFeatureCollection]].add(features)
def encode(): Unit = encoder.encode(collection, org.geotools.wfs.WFS.FeatureCollection, os)
val runnable = new Runnable() {
override def run(): Unit = {
if (System.getProperty(GmlExporter.TransformerProperty) != null) { encode() } else {
// explicitly set the default java transformer, to avoid picking up saxon (which causes errors)
// the default class is hard-coded in javax.xml.transform.TransformerFactory.newInstance() ...
System.setProperty(GmlExporter.TransformerProperty,
classOf[com.sun.org.apache.xalan.internal.xsltc.trax.TransformerFactoryImpl].getName)
try { encode() } finally {
System.clearProperty(GmlExporter.TransformerProperty)
}
}
}
}
es.execute(runnable)
}
override def export(features: Iterator[SimpleFeature]): Option[Long] = {
var count = 0L
val counting = features.map { f => count += 1; f }
while (counting.nonEmpty) {
// export in chunks of 100 so that the exporter thread gets notified and doesn't keep blocking
fc.addAsync(counting.take(100))
}
Some(count)
}
override def close(): Unit = {
try {
fc.endAsync()
es.shutdown()
es.awaitTermination(Long.MaxValue, TimeUnit.MILLISECONDS)
} finally {
os.close()
}
}
}
object GmlExporter {
private val TransformerProperty = classOf[javax.xml.transform.TransformerFactory].getName
/**
* Create a GML3 exporter
*
* @param os output stream
* @param counter byte counter
* @return
*/
def apply(os: OutputStream, counter: ByteCounter): GmlExporter =
new GmlExporter(os, counter, new org.geotools.wfs.v1_1.WFSConfiguration())
/**
* Create a GML2 exporter
*
* @param os output stream
* @param counter byte counter
* @return
*/
def gml2(os: OutputStream, counter: ByteCounter): GmlExporter =
new GmlExporter(os, counter, new org.geotools.wfs.v1_0.WFSConfiguration_1_0())
/**
* Feature collection that lets us add additional features in an asynchronous fashion. The consumer
* thread will be blocked on calls to 'hasNext' until the producer thread adds features or indicates
* completion
*
* @param sft simple feature type
*/
private class AsyncFeatureCollection(sft: SimpleFeatureType) extends DataFeatureCollection(null, sft) {
private val buffer = new ConcurrentLinkedQueue[SimpleFeature]()
private val done = new AtomicBoolean(false)
private val lock = new ReentrantLock()
private val condition = lock.newCondition()
private val iter: SimpleFeatureIterator = new SimpleFeatureIterator() {
private var current: SimpleFeature = _
override def hasNext: Boolean = {
if (current != null) {
return true
}
lock.lock()
try {
current = buffer.poll()
// note: we need to loop here to skip 'spurious wake-ups'
while (current == null) {
if (done.get) {
return false
}
condition.await()
current = buffer.poll()
}
true
} finally {
lock.unlock()
}
}
override def next(): SimpleFeature = {
// note: we shouldn't need to synchronize this as next/hasNext should be a single caller thread
val result = current
current = null
result
}
override def close(): Unit = endAsync()
}
/**
* Add features to be returned from this feature collection
*
* @param features features
*/
def addAsync(features: Iterator[SimpleFeature]): Unit = {
lock.lock()
try {
features.foreach(buffer.add)
condition.signal()
} finally {
lock.unlock()
}
}
/**
* Signal that there are no more features that will be added
*/
def endAsync(): Unit = {
lock.lock()
try {
done.set(true)
condition.signal()
} finally {
lock.unlock()
}
}
override protected def features(): SimpleFeatureIterator = iter
override def getBounds: ReferencedEnvelope = org.locationtech.geomesa.utils.geotools.wholeWorldEnvelope
override def getCount: Int = 0
}
}
| elahrvivaz/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/export/formats/GmlExporter.scala | Scala | apache-2.0 | 7,495 |
package org.jetbrains.plugins.scala.failed.resolve
/**
* Created by Anton.Yalyshev on 20/04/16.
*/
class MacrosTest extends FailedResolveCaretTestBase {
def testSCL8507(): Unit = doResolveCaretTest(
s"""
|object x extends App {
| import shapeless._
| case class Foo(i: Int, s: String, b:Boolean)
| val foo = Foo(23, "foo", true)
|
| val gen = Generic[Foo]
| val hfoo = gen.to(foo)
|
| val foo2 = gen.from(hfoo.<caret>head :: "bar" :: hfoo.tail.tail)
|
| println(foo2)
|}
""".stripMargin)
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/failed/resolve/MacrosTest.scala | Scala | apache-2.0 | 597 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import scala.reflect.ClassTag
import org.apache.spark.{Partition, SparkContext, SparkException, TaskContext}
import org.apache.spark.storage.RDDBlockId
/**
* A dummy CheckpointRDD that exists to provide informative error messages during failures.
*
* This is simply a placeholder because the original checkpointed RDD is expected to be
* fully cached. Only if an executor fails or if the user explicitly unpersists the original
* RDD will Spark ever attempt to compute this CheckpointRDD. When this happens, however,
* we must provide an informative error message.
*
* @param sc the active SparkContext
* @param rddId the ID of the checkpointed RDD
* @param numPartitions the number of partitions in the checkpointed RDD
*/
private[spark] class LocalCheckpointRDD[T: ClassTag](
sc: SparkContext,
rddId: Int,
numPartitions: Int)
extends CheckpointRDD[T](sc) {
def this(rdd: RDD[T]) {
this(rdd.context, rdd.id, rdd.partitions.length)
}
protected override def getPartitions: Array[Partition] = {
(0 until numPartitions).toArray.map { i => new CheckpointRDDPartition(i) }
}
/**
* Throw an exception indicating that the relevant block is not found.
*
* This should only be called if the original RDD is explicitly unpersisted or if an
* executor is lost. Under normal circumstances, however, the original RDD (our child)
* is expected to be fully cached and so all partitions should already be computed and
* available in the block storage.
*/
override def compute(partition: Partition, context: TaskContext): Iterator[T] = {
throw new SparkException(
s"Checkpoint block ${RDDBlockId(rddId, partition.index)} not found! Either the executor " +
s"that originally checkpointed this partition is no longer alive, or the original RDD is " +
s"unpersisted. If this problem persists, you may consider using `rdd.checkpoint()` " +
s"instead, which is slower than local checkpointing but more fault-tolerant.")
}
}
| sh-cho/cshSpark | rdd/LocalCheckpointRDD.scala | Scala | apache-2.0 | 2,836 |
object FolderSourced {
override def toString = "folder-sourced"
} | rvanider/scala-script | test/modules/folder-sourced.scala | Scala | mit | 67 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.param.shared
import java.io.PrintWriter
import scala.reflect.ClassTag
import scala.xml.Utility
/**
* Code generator for shared params (sharedParams.scala). Run under the Spark folder with
* {{{
* build/sbt "mllib/runMain org.apache.spark.ml.param.shared.SharedParamsCodeGen"
* }}}
*/
private[shared] object SharedParamsCodeGen {
def main(args: Array[String]): Unit = {
val params = Seq(
ParamDesc[Double]("regParam", "regularization parameter (>= 0)",
isValid = "ParamValidators.gtEq(0)"),
ParamDesc[Int]("maxIter", "maximum number of iterations (>= 0)",
isValid = "ParamValidators.gtEq(0)"),
ParamDesc[String]("featuresCol", "features column name", Some("\\"features\\"")),
ParamDesc[String]("labelCol", "label column name", Some("\\"label\\"")),
ParamDesc[String]("predictionCol", "prediction column name", Some("\\"prediction\\"")),
ParamDesc[String]("rawPredictionCol", "raw prediction (a.k.a. confidence) column name",
Some("\\"rawPrediction\\"")),
ParamDesc[String]("probabilityCol", "Column name for predicted class conditional" +
" probabilities. Note: Not all models output well-calibrated probability estimates!" +
" These probabilities should be treated as confidences, not precise probabilities",
Some("\\"probability\\"")),
ParamDesc[String]("varianceCol", "Column name for the biased sample variance of prediction"),
ParamDesc[Double]("threshold",
"threshold in binary classification prediction, in range [0, 1]",
isValid = "ParamValidators.inRange(0, 1)", finalMethods = false, finalFields = false),
ParamDesc[Array[Double]]("thresholds", "Thresholds in multi-class classification" +
" to adjust the probability of predicting each class." +
" Array must have length equal to the number of classes, with values > 0" +
" excepting that at most one value may be 0." +
" The class with largest value p/t is predicted, where p is the original probability" +
" of that class and t is the class's threshold",
isValid = "(t: Array[Double]) => t.forall(_ >= 0) && t.count(_ == 0) <= 1",
finalMethods = false),
ParamDesc[String]("inputCol", "input column name"),
ParamDesc[Array[String]]("inputCols", "input column names"),
ParamDesc[String]("outputCol", "output column name", Some("uid + \\"__output\\"")),
ParamDesc[Int]("checkpointInterval", "set checkpoint interval (>= 1) or " +
"disable checkpoint (-1). E.g. 10 means that the cache will get checkpointed " +
"every 10 iterations", isValid = "(interval: Int) => interval == -1 || interval >= 1"),
ParamDesc[Boolean]("fitIntercept", "whether to fit an intercept term", Some("true")),
ParamDesc[String]("handleInvalid", "how to handle invalid entries. Options are skip (which " +
"will filter out rows with bad values), or error (which will throw an error). More " +
"options may be added later",
isValid = "ParamValidators.inArray(Array(\\"skip\\", \\"error\\"))", finalFields = false),
ParamDesc[Boolean]("standardization", "whether to standardize the training features" +
" before fitting the model", Some("true")),
ParamDesc[Long]("seed", "random seed", Some("this.getClass.getName.hashCode.toLong")),
ParamDesc[Double]("elasticNetParam", "the ElasticNet mixing parameter, in range [0, 1]." +
" For alpha = 0, the penalty is an L2 penalty. For alpha = 1, it is an L1 penalty",
isValid = "ParamValidators.inRange(0, 1)"),
ParamDesc[Double]("tol", "the convergence tolerance for iterative algorithms (>= 0)",
isValid = "ParamValidators.gtEq(0)"),
ParamDesc[Double]("stepSize", "Step size to be used for each iteration of optimization (>" +
" 0)", isValid = "ParamValidators.gt(0)", finalFields = false),
ParamDesc[String]("weightCol", "weight column name. If this is not set or empty, we treat " +
"all instance weights as 1.0"),
ParamDesc[String]("solver", "the solver algorithm for optimization", finalFields = false),
ParamDesc[Int]("aggregationDepth", "suggested depth for treeAggregate (>= 2)", Some("2"),
isValid = "ParamValidators.gtEq(2)", isExpertParam = true))
val code = genSharedParams(params)
val file = "src/main/scala/org/apache/spark/ml/param/shared/sharedParams.scala"
val writer = new PrintWriter(file)
writer.write(code)
writer.close()
}
/** Description of a param. */
private case class ParamDesc[T: ClassTag](
name: String,
doc: String,
defaultValueStr: Option[String] = None,
isValid: String = "",
finalMethods: Boolean = true,
finalFields: Boolean = true,
isExpertParam: Boolean = false) {
require(name.matches("[a-z][a-zA-Z0-9]*"), s"Param name $name is invalid.")
require(doc.nonEmpty) // TODO: more rigorous on doc
def paramTypeName: String = {
val c = implicitly[ClassTag[T]].runtimeClass
c match {
case _ if c == classOf[Int] => "IntParam"
case _ if c == classOf[Long] => "LongParam"
case _ if c == classOf[Float] => "FloatParam"
case _ if c == classOf[Double] => "DoubleParam"
case _ if c == classOf[Boolean] => "BooleanParam"
case _ if c.isArray && c.getComponentType == classOf[String] => s"StringArrayParam"
case _ if c.isArray && c.getComponentType == classOf[Double] => s"DoubleArrayParam"
case _ => s"Param[${getTypeString(c)}]"
}
}
def valueTypeName: String = {
val c = implicitly[ClassTag[T]].runtimeClass
getTypeString(c)
}
private def getTypeString(c: Class[_]): String = {
c match {
case _ if c == classOf[Int] => "Int"
case _ if c == classOf[Long] => "Long"
case _ if c == classOf[Float] => "Float"
case _ if c == classOf[Double] => "Double"
case _ if c == classOf[Boolean] => "Boolean"
case _ if c == classOf[String] => "String"
case _ if c.isArray => s"Array[${getTypeString(c.getComponentType)}]"
}
}
}
/** Generates the HasParam trait code for the input param. */
private def genHasParamTrait(param: ParamDesc[_]): String = {
val name = param.name
val Name = name(0).toUpper +: name.substring(1)
val Param = param.paramTypeName
val T = param.valueTypeName
val doc = param.doc
val defaultValue = param.defaultValueStr
val defaultValueDoc = defaultValue.map { v =>
s" (default: $v)"
}.getOrElse("")
val setDefault = defaultValue.map { v =>
s"""
| setDefault($name, $v)
|""".stripMargin
}.getOrElse("")
val isValid = if (param.isValid != "") {
", " + param.isValid
} else {
""
}
val groupStr = if (param.isExpertParam) {
Array("expertParam", "expertGetParam")
} else {
Array("param", "getParam")
}
val methodStr = if (param.finalMethods) {
"final def"
} else {
"def"
}
val fieldStr = if (param.finalFields) {
"final val"
} else {
"val"
}
val htmlCompliantDoc = Utility.escape(doc)
s"""
|/**
| * Trait for shared param $name$defaultValueDoc.
| */
|private[ml] trait Has$Name extends Params {
|
| /**
| * Param for $htmlCompliantDoc.
| * @group ${groupStr(0)}
| */
| $fieldStr $name: $Param = new $Param(this, "$name", "$doc"$isValid)
|$setDefault
| /** @group ${groupStr(1)} */
| $methodStr get$Name: $T = $$($name)
|}
|""".stripMargin
}
/** Generates Scala source code for the input params with header. */
private def genSharedParams(params: Seq[ParamDesc[_]]): String = {
val header =
"""/*
| * Licensed to the Apache Software Foundation (ASF) under one or more
| * contributor license agreements. See the NOTICE file distributed with
| * this work for additional information regarding copyright ownership.
| * The ASF licenses this file to You under the Apache License, Version 2.0
| * (the "License"); you may not use this file except in compliance with
| * the License. You may obtain a copy of the License at
| *
| * http://www.apache.org/licenses/LICENSE-2.0
| *
| * Unless required by applicable law or agreed to in writing, software
| * distributed under the License is distributed on an "AS IS" BASIS,
| * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
| * See the License for the specific language governing permissions and
| * limitations under the License.
| */
|
|package org.apache.spark.ml.param.shared
|
|import org.apache.spark.ml.param._
|
|// DO NOT MODIFY THIS FILE! It was generated by SharedParamsCodeGen.
|
|// scalastyle:off
|""".stripMargin
val footer = "// scalastyle:on\\n"
val traits = params.map(genHasParamTrait).mkString
header + traits + footer
}
}
| mike0sv/spark | mllib/src/main/scala/org/apache/spark/ml/param/shared/SharedParamsCodeGen.scala | Scala | apache-2.0 | 9,983 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.h2o
import org.apache.spark.SparkContext
import org.apache.spark.h2o.util.SparkTestContext
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfter, FunSuite, Matchers}
import water.{DKV, Key}
import org.junit.runner.RunWith
/**
* Testing creation of H2O cloud in distributed environment.
*/
@RunWith(classOf[JUnitRunner])
class H2OContextLocalClusterSuite extends FunSuite
with Matchers with BeforeAndAfter with SparkTestContext {
val swassembly = sys.props.getOrElse("sparkling.test.assembly",
fail("The variable 'sparkling.test.assembly' is not set! It should point to assembly jar file."))
test("verify H2O cloud building on local cluster") {
// For distributed testing we need to pass around jar containing all implementation classes plus test classes
val conf = defaultSparkConf.setJars(swassembly :: Nil)
sc = new SparkContext("local-cluster[3,2,1024]", "test-local-cluster", conf)
hc = H2OContext.getOrCreate(sc)
assert(water.H2O.CLOUD.members().length == 3, "H2O cloud should have 3 members")
// Does not reset
resetContext()
}
// IGNORED since we are not able to initialize client in the process several times
ignore("2nd run to verify that test does not overlap") {
val conf = defaultSparkConf.setJars(swassembly :: Nil)
sc = new SparkContext("local-cluster[3,2,721]", "test-local-cluster", conf)
hc = H2OContext.getOrCreate(sc)
resetContext()
}
}
| nilbody/sparkling-water | core/src/test/scala/org/apache/spark/h2o/H2OContextLocalClusterSuite.scala | Scala | apache-2.0 | 2,275 |
object i0 {
def i1(i2: Int, i3: Int, i4: Int) = i4
val i5 = null;
i4(1, 2, 3): _
'i17 = 1
val i9 = .length
(i2: Int, i10: Int): (Int => (_)) => 42
def i11 = i11
type i18 = i10 { type i10 <: i1.i2 } = { i32: i0 =>
type i6
type i79 <: i7{type i9, i10 <: i5.i2] }
val i9: i8 { type i4 = i13.i9 }
val i16 = new i1 {} }
import i9.{ Set, i8 => }i9 {
object i10 extends i6 with i4 {
val i5 = new i9
var i9: i3.i2
type i15 = i11.i1
}
def i10: Unit =
i16 {
val i18 = new i6()
val i15 = i11
}
} | som-snytt/dotty | tests/fuzzy/628b3c175445b95d9155223a2651ad97c6091657.scala | Scala | apache-2.0 | 484 |
package controllers
import com.bryzek.apidoc.spec.v0.models.Method
import lib.{ApiClient, Config}
import models.MainTemplate
import com.bryzek.apidoc.api.v0.models.User
import play.api.mvc._
import play.api.mvc.Results.Redirect
import scala.concurrent.{ Await, Future }
import scala.concurrent.duration._
import play.api.Play.current
import java.util.UUID
class AuthenticatedRequest[A](val user: User, request: Request[A]) extends WrappedRequest[A](request) {
lazy val api = Authenticated.api(Some(user))
def mainTemplate(title: Option[String] = None): MainTemplate = {
MainTemplate(
requestPath = request.path,
title = title,
user = Some(user)
)
}
}
object Authenticated extends ActionBuilder[AuthenticatedRequest] {
import scala.concurrent.ExecutionContext.Implicits.global
def api(user: Option[User] = None) = ApiClient(user).client
def invokeBlock[A](request: Request[A], block: (AuthenticatedRequest[A]) => Future[Result]) = {
lazy val returnUrl: Option[String] = {
Method(request.method) match {
case Method.Get => Some(request.uri)
case Method.Connect | Method.Delete | Method.Head | Method.Options | Method.Patch | Method.Post | Method.Put | Method.Trace => None
case Method.UNDEFINED(_) => None
}
}
request.session.get("user_guid").map { userGuid =>
ApiClient.getUser(userGuid) match {
case None => {
// have a user guid, but user does not exist
Future.successful(Redirect(routes.LoginController.index(return_url = returnUrl)).withNewSession)
}
case Some(u: User) => {
block(new AuthenticatedRequest(u, request))
}
}
} getOrElse {
Future.successful(Redirect(routes.LoginController.index(return_url = returnUrl)).withNewSession)
}
}
}
| Seanstoppable/apidoc | www/app/controllers/AuthenticatedRequest.scala | Scala | mit | 1,835 |
package com.programmaticallyspeaking.ncd.nashorn
import com.programmaticallyspeaking.ncd.host._
import com.programmaticallyspeaking.ncd.host.types.{ObjectPropertyDescriptor, PropertyDescriptorType}
import com.programmaticallyspeaking.ncd.infra.StringAnyMap
import org.scalatest.Inside
import org.scalatest.prop.TableDrivenPropertyChecks
class ObjectPropertiesNoJavaTest extends RealMarshallerTestFixture with Inside with TableDrivenPropertyChecks with ObjectPropertyTesting {
import RealMarshallerTest._
override val scriptExecutor: ScriptExecutorBase = ScriptExecutorNoJava
"Object property expansion when Java isn't around" - {
"works for a script object" in {
val expr = "{ foo: 42 }"
val expected = Map("foo" -> 42)
evaluateExpression(expr) { (host, actual) =>
expand(host, actual) should equal (expected)
}
}
}
}
class ObjectPropertiesTest extends RealMarshallerTestFixture with Inside with TableDrivenPropertyChecks with ObjectPropertyTesting {
import RealMarshallerTest._
val complexValues = Table(
("desc", "expression", "expected"),
("array", "[42]", Map("0" -> 42, "length" -> 1)),
("object", "{'a':'b'}", Map("a" -> "b")),
("RegExp", "/.*/", Map("multiline" -> false, "source" -> ".*", "global" -> false, "lastIndex" -> 0, "ignoreCase" -> false)),
("Java Array",
"""(function() {
|var StringArray = Java.type("java.lang.String[]");
|var arr = new StringArray(2);
|arr[0] = "testing";
|arr[1] = "foobar";
|return arr;
|})()
""".stripMargin, Map("0" -> "testing", "1" -> "foobar", "length" -> 2)),
// ("Java Iterator",
// """(function() {
// |var ArrayList = Java.type("java.util.ArrayList");
// |var list = new ArrayList(1);
// |list.add("testing");
// |return list.iterator();
// |})()
// """.stripMargin, Map("0" -> "testing"))
("property with get/set",
"""(function() {
|var obj = {};
|var foo = 0;
|Object.defineProperty(obj, "foo", {
| get: function () { return foo; },
| set: function (value) { foo = value; }
|});
|return obj;
|})()
""".stripMargin, Map("foo" -> Map("get" -> "<function>", "set" -> "<function>"))),
("JSObject array (classname)", s"createInstance('${classOf[ClassNameBasedArrayJSObject].getName}')", Map("0" -> "a", "1" -> "b", "length" -> 2)),
("JSObject array (isArray)", s"createInstance('${classOf[IsArrayBasedArrayJSObject].getName}')", Map("0" -> "a", "1" -> "b", "length" -> 2)),
("JSObject array (slot only)", s"createInstance('${classOf[OnlySlotBasedArrayJSObject].getName}')", Map("0" -> "a", "1" -> "b", "length" -> 2)),
("JSObject array (slot with misbehaving getMember)", s"createInstance('${classOf[SlotBasedArrayJSObjectThatMisbehavesForGetMember].getName}')", Map("0" -> "a", "1" -> "b", "length" -> 2)),
("JSObject object", s"createInstance('${classOf[ObjectLikeJSObject].getName}')", Map("a" -> 42, "b" -> 43)),
("Scala instance with val", s"createInstance('${classOf[ClassWithVal].getName}')", Map("foo" -> "bar")),
("Scala instance with var", s"createInstance('${classOf[ClassWithVar].getName}')", Map("foo" -> "var")),
("Scala instance with private val", s"createInstance('${classOf[ClassWithPrivateVal].getName}')", Map("foo" -> "priv-val")),
("Scala instance with JavaBeans property", s"createInstance('${classOf[ClassWithJavaBeans].getName}')", Map("fooBar" -> Map("get" -> "<function>", "set" -> "<function>"), "_foo" -> "bar")),
("Scala instance with JavaBeans property (no set)", s"createInstance('${classOf[ClassWithJavaBeansOnlyGet].getName}')", Map("foo" -> Map("get" -> "<function>"), "_foo" -> "bar")),
("Scala instance with JavaBeans property (no get)", s"createInstance('${classOf[ClassWithJavaBeansOnlySet].getName}')", Map("foo" -> Map("set" -> "<function>"), "_foo" -> "bar")),
("Scala instance with inherited JavaBeans property", s"createInstance('${classOf[JavaBeansSubClass].getName}')", Map.empty),
("Hashtable-based object", s"createInstance('${classOf[HashtableDerivate].getName}')", Map("foo" -> "bar", "bar" -> "baz")),
("Hashtable-based object with complex keys and int values", s"createInstance('${classOf[HashtableComplexKeysIntValues].getName}')", Map("foo" -> 1, "bar" -> 2))
)
val complexValuesAlsoInherited = Table(
("desc", "expression", "expected"),
("Scala instance with inherited field", s"createInstance('${classOf[SubClass].getName}')", Map("foo" -> "priv-val", "sub" -> "qux"))
)
val complexValuesOnlyAccessors = Table(
("desc", "expression", "expected"),
("JSObject object", s"createInstance('${classOf[ObjectLikeJSObject].getName}')", Map.empty[String, Any]),
("Hashtable-based object", s"createInstance('${classOf[HashtableDerivate].getName}')", Map.empty),
("Scala instance with JavaBeans property", s"createInstance('${classOf[ClassWithJavaBeans].getName}')", Map("fooBar" -> Map("get" -> "<function>", "set" -> "<function>"))),
("Java Array",
"""(function() {
|var arr = new (Java.type("java.lang.String[]"))(1);
|arr[0] = "testing";
|return arr;
|})()
""".stripMargin, Map.empty)
)
val complexValuesIncludingProto = Table(
("desc", "expression", "expected"),
("Script object with prototype",
"""(function() {
| var base = { foo: 41 };
| var obj = Object.create(base);
| obj.bar = 42;
| return obj;
|})()
""".stripMargin, Map("bar" -> 42, "__proto__" -> Map("foo" -> 41, "__proto__" -> AnyObject)))
)
def testProperties(clazz: Class[_])(handler: (Seq[(String, ObjectPropertyDescriptor)] => Unit)): Unit = {
val expr = s"createInstance('${clazz.getName}')"
testProperties(expr)(handler)
}
"Object property expansion works for" - {
forAll(complexValues) { (desc, expr, expected) =>
desc + " (only own)" in {
evaluateExpression(expr) { (host, actual) =>
expand(host, actual) should equal (expected)
}
}
}
forAll(complexValuesAlsoInherited) { (desc, expr, expected) =>
desc + " (also inherited)" in {
evaluateExpression(expr) { (host, actual) =>
expand(host, actual, includeInherited = true) should equal (expected)
}
}
}
forAll(complexValuesOnlyAccessors) { (desc, expr, expected) =>
desc + " (own, only accessors)" in {
evaluateExpression(expr) { (host, actual) =>
expand(host, actual, includeInherited = false, onlyAccessors = true) should equal (expected)
}
}
}
forAll(complexValuesIncludingProto) { (desc, expr, expected) =>
desc + " (own, including __proto__)" in {
evaluateExpression(expr) { (host, actual) =>
implicit val eq = anyEqWithMapSupport
expand(host, actual, expandProto = true) should equal (expected)
}
}
}
"Java NPE" - {
val expr = "(function(){try{throw new java.lang.NullPointerException();}catch(e){return e;}})()"
"with an extra/internal property 'Message' which is EmptyNode" in {
evaluateExpression(expr) { (host, actual) =>
actual match {
case cn: ComplexNode =>
val props = host.getObjectProperties(cn.objectId, true, false)
val messageProp = props.find(_._1 == "[[Message]]")
val messageValue = messageProp.flatMap(_._2.value)
messageValue should be (Some(EmptyNode))
case other => fail("Unexpected: " + other)
}
}
}
}
"Java Exception" - {
val expr = "(function(){try{throw new java.lang.IllegalArgumentException('oops');}catch(e){return e;}})()"
def evaluateException(handler: (Map[String, Any] => Unit)) = {
evaluateExpression(expr) { (host, actual) =>
expand(host, actual) match {
case StringAnyMap(aMap) => handler(aMap)
case other => fail("Unexpected: " + other)
}
}
}
lazy val expanded = {
var aMap: Map[String, Any] = null
evaluateException { m => aMap = m }
aMap
}
"with an extra/internal property 'JavaStack' (which cannot be evaluated yet...)" in {
val st = getStringProperty(expanded, "[[JavaStack]]")
st should startWith ("java.lang.IllegalArgumentException: oops")
}
"with an extra/internal property 'Message' (which cannot be evaluated yet...)" in {
val st = getStringProperty(expanded, "[[Message]]")
st should startWith ("oops")
}
}
"Scala object" - {
"should mark a val as non-writable" in {
testProperties(classOf[ClassWithVal]) { props =>
props.find(_._1 == "foo").map(_._2.isWritable) should be (Some(false))
}
}
"should mark a var as writable" in {
testProperties(classOf[ClassWithVar]) { props =>
props.find(_._1 == "foo").map(_._2.isWritable) should be (Some(true))
}
}
"should mark an own property as own" in {
testProperties(classOf[SubClass]) { props =>
props.find(_._1 == "sub").map(_._2.isOwn) should be (Some(true))
}
}
"should mark an inherited property as not own" in {
testProperties(classOf[SubClass]) { props =>
props.find(_._1 == "foo").map(_._2.isOwn) should be (Some(false))
}
}
"should create an accessor property for a JavaBeans property" in {
testProperties(classOf[ClassWithJavaBeans]) { props =>
props.find(_._1 == "fooBar").map(_._2.descriptorType) should be (Some(PropertyDescriptorType.Accessor))
}
}
}
"Object modification" - {
"should be visible after first property evaluation (i.e. not hidden by the cache)" in {
evaluateExpression("{'a':'b'}") {
case (host, c: ComplexNode) =>
// Prime the cache, then evaluate
val ignored = expand(host, c)
host.callFunctionOn(StackFrame.TopId, None, "function (x) { x['a'] = 'c'; }", Seq(c.objectId)).get
expand(host, c) should be (Map("a" -> "c"))
case (_, other) => fail("Unknown: " + other)
}
}
}
"Bound function" - {
val expr = "(function () { function Add(a,b) { return a + b; }; return Add.bind(null, 1); })()"
"with an internal property 'TargetFunction'" in {
testProperties(expr) { props =>
getDescriptorFor(props, "[[TargetFunction]]").value match {
case Some(FunctionNode(name, _, _)) => name should be ("Add")
case other => fail("Unexpected: " + other)
}
}
}
}
"function with multiple scopes" - {
val expr =
"""(function f() {
| var x = 1;
| return x1();
|
| function x1() {
| var y = 2;
| return y1;
|
| function y1() {
| return x + y;
| }
| }
|})()
""".stripMargin
"and gives multiple internal 'Scopes'" in {
testProperties(expr) { props =>
getDescriptorFor(props, "[[Scopes]]").value match {
case Some(ScopeList(size, _)) => size should be (2)
case other => fail("Unexpected: " + other)
}
}
}
}
"Regular function that captures something" - {
val expr = "(function (global) { function Add(a) { return a + global.x?0:1; }; return Add; })(this)"
"with an internal property 'Scopes' of size 1" in {
testProperties(expr) { props =>
getDescriptorFor(props, "[[Scopes]]").value match {
case Some(ScopeList(size, _)) => size should be (1)
case other => fail("Unexpected: " + other)
}
}
}
def getScopesProps(host: ScriptHost, props: Seq[(String, ObjectPropertyDescriptor)], onlyAccessors: Boolean) =
getDescriptorFor(props, "[[Scopes]]").value.map(expand(host, _, onlyAccessors = onlyAccessors)) match {
case Some(StringAnyMap(aMap)) => aMap
case other => fail("Unexpected [[Scopes]] expansion: " + other)
}
"with an actual scope in 'Scopes'" in {
evaluateExpression(expr) {
case (host, c: ComplexNode) =>
val props = host.getObjectProperties(c.objectId, true, false)
getScopesProps(host, props, false).get("0") match {
case Some(head) =>
head should be (Map("scope" -> true, "name" -> "", "type" -> "closure"))
case None => fail("no scopes")
}
case (_, other) => fail("Unknown: " + other)
}
}
"without Scopes properties if only accessor properties are requested" in {
evaluateExpression(expr) {
case (host, c: ComplexNode) =>
val props = host.getObjectProperties(c.objectId, true, false)
getScopesProps(host, props, true).size should be (0)
case (_, other) => fail("Unknown: " + other)
}
}
}
"Regular function that captures Nothing" - {
val expr = "(function Add(a,b) { return a + b; })"
"with an internal property 'Scopes' that is empty" in {
testProperties(expr) { props =>
getDescriptorFor(props, "[[Scopes]]").value match {
case Some(ScopeList(size, _)) => size should be (0)
case other => fail("Unexpected: " + other)
}
}
}
}
}
def getDescriptorFor(props: Seq[(String, ObjectPropertyDescriptor)], name: String) = props.find(_._1 == name) match {
case Some((_, desc)) => desc
case None => fail(s"Missing $name")
}
def getStringProperty(from: Map[String, Any], prop: String): String = from.get(prop) match {
case Some(st: String) => st
case Some(st) => fail(s"Unexpected $prop: " + st)
case None => fail(s"Missing $prop (available: ${from.keys.mkString(", ")})")
}
}
class ClassWithVal {
val foo = "bar"
}
class ClassWithVar {
var foo = "var"
}
class ClassWithPrivateVal {
private val foo = "priv-val"
}
class SubClass extends ClassWithPrivateVal {
private val sub = "qux"
}
class ClassWithJavaBeans {
private var _foo = "bar"
def getFooBar() = _foo
def setFooBar(s: String): Unit = _foo = s
}
class JavaBeansSubClass extends ClassWithJavaBeans
class ClassWithJavaBeansOnlyGet {
private val _foo = "bar"
def getFoo() = _foo
}
class ClassWithJavaBeansOnlySet {
private var _foo = "bar"
def setFoo(s: String): Unit = _foo = s
}
class HashtableDerivate extends java.util.Hashtable[Object, Object] {
put("foo", "bar")
put("bar", "baz")
}
class HashtableComplexKeysIntValues extends java.util.Hashtable[Key, Int] {
put(new Key("foo"), 1)
put(new Key("bar"), 2)
}
class Key(s: String) {
override def toString: String = s
} | provegard/ncdbg | src/test/scala/com/programmaticallyspeaking/ncd/nashorn/ObjectPropertiesTest.scala | Scala | bsd-3-clause | 15,029 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.protocol.v5.content
import org.apache.toree.kernel.protocol.v5.KernelMessageContent
import play.api.libs.json._
case class ExecuteInput(
code: String,
execution_count: Int
) extends KernelMessageContent {
override def content : String =
Json.toJson(this)(ExecuteInput.executeInputWrites).toString
}
object ExecuteInput extends TypeString {
implicit val executeInputReads = Json.reads[ExecuteInput]
implicit val executeInputWrites = Json.writes[ExecuteInput]
/**
* Returns the type string associated with this object.
*
* @return The type as a string
*/
override def toTypeString: String = "execute_input"
}
| Myllyenko/incubator-toree | protocol/src/main/scala/org/apache/toree/kernel/protocol/v5/content/ExecuteInput.scala | Scala | apache-2.0 | 1,487 |
package org.jetbrains.plugins.hocon
package parser
import com.intellij.psi.impl.DebugUtil.psiToString
import org.junit.runner.RunWith
import org.junit.runners.AllTests
@RunWith(classOf[AllTests])
class HoconParserTest extends HoconFileSetTestCase("parser") {
override protected def transform(data: Seq[String]): String = {
val psiFile = HoconFileSetTestCase.createPseudoPhysicalHoconFile(data.head)
psiToString(psiFile, false).replace(":" + psiFile.getName, "")
}
}
object HoconParserTest extends TestSuiteCompanion[HoconParserTest]
| triplequote/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/hocon/parser/HoconParserTest.scala | Scala | apache-2.0 | 549 |
/*
* Copyright 2013-2015 James Shade
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.shade.time
import java.time.Instant
import org.scalatest.mock.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
class InstantWrapperSpec extends WordSpec with Matchers with MockitoSugar {
import org.shade.time.TimeImplicits.InstantWrapper
"isAtOrBefore" should {
val millis = 435345345L
val thisInstant = Instant.ofEpochMilli(millis)
"return false if the other instant is long before this instant" in {
val otherInstant = Instant.ofEpochMilli(millis - 22332232L)
thisInstant isAtOrBefore otherInstant shouldBe false
}
"return false if the other instant is one millisecond before this instant" in {
val otherInstant = Instant.ofEpochMilli(millis - 1L)
thisInstant isAtOrBefore otherInstant shouldBe false
}
"return true if the other instant is the same as this instant" in {
val otherInstant = Instant.ofEpochMilli(millis)
thisInstant isAtOrBefore otherInstant shouldBe true
}
"return true if the other instant is one millisecond after this instant" in {
val otherInstant = Instant.ofEpochMilli(millis + 1L)
thisInstant isAtOrBefore otherInstant shouldBe true
}
"return true if the other instant is long after this instant" in {
val otherInstant = Instant.ofEpochMilli(millis + 5345435435L)
thisInstant isAtOrBefore otherInstant shouldBe true
}
}
"isAtOrAfter" should {
val millis = 435345345L
val thisInstant = Instant.ofEpochMilli(millis)
"return true if the other instant is long before this instant" in {
val otherInstant = Instant.ofEpochMilli(millis - 22332232L)
thisInstant isAtOrAfter otherInstant shouldBe true
}
"return true if the other instant is one millisecond before this instant" in {
val otherInstant = Instant.ofEpochMilli(millis - 1L)
thisInstant isAtOrAfter otherInstant shouldBe true
}
"return true if the other instant is the same as this instant" in {
val otherInstant = Instant.ofEpochMilli(millis)
thisInstant isAtOrAfter otherInstant shouldBe true
}
"return false if the other instant is one millisecond after this instant" in {
val otherInstant = Instant.ofEpochMilli(millis + 1L)
thisInstant isAtOrAfter otherInstant shouldBe false
}
"return false if the other instant is long after this instant" in {
val otherInstant = Instant.ofEpochMilli(millis + 5345435435L)
thisInstant isAtOrAfter otherInstant shouldBe false
}
}
}
| jamesshade/time8 | src/test/scala/org/shade/time/InstantWrapperSpec.scala | Scala | apache-2.0 | 3,123 |
package freez.view.tfingertree
package strict
import annotation.tailrec
object Free extends freez.view.DequeFreeComp {
type Deque[R[_, _], A, B] = TFingerTree[R, A, B]
}
| mandubian/freez | src/main/scala/view/tfingertree_strict/Free.scala | Scala | apache-2.0 | 185 |
package org.hirosezouen.hzutil
import org.scalatest.FunSuite
import org.hirosezouen.hzutil._
class PackageObjectTest extends FunSuite {
test("unsignedBigEndianShortBytes2Int") {
assertResult(0x0000ABCD.toInt)(unsignedBigEndianShortBytes2Int(Array[Byte](0xAB.toByte,0xCD.toByte)))
assertResult(0x0000FE01.toInt)(unsignedBigEndianShortBytes2Int(Array[Byte](0xFE.toByte,0x01.toByte)))
}
test("int2unsignedBigEndianShortBytes") {
assert(Array[Byte](0xAB.toByte,0xCD.toByte) sameElements int2unsignedBigEndianShortBytes(0x0000ABCD.toInt))
assert(Array[Byte](0xFE.toByte,0x01.toByte) sameElements int2unsignedBigEndianShortBytes(0x0000FE01.toInt))
}
test("unsignedBingEndianIntBytes2Long") {
assertResult(0x00000000ABCDEF12L)(unsignedBingEndianIntBytes2Long(Array[Byte](0xAB.toByte,0xCD.toByte,0xEF.toByte,0x12.toByte)))
assertResult(0x00000000FEDCBA98L)(unsignedBingEndianIntBytes2Long(Array[Byte](0xFE.toByte,0xDC.toByte,0xBA.toByte,0x98.toByte)))
}
test("long2unsignedBigEndianIntBytes") {
assert(Array[Byte](0xAB.toByte,0xCD.toByte,0xEF.toByte,0x12.toByte) sameElements long2unsignedBigEndianIntBytes(0x00000000ABCDEF12L.toInt))
assert(Array[Byte](0xFE.toByte,0xDC.toByte,0xBA.toByte,0x98.toByte) sameElements long2unsignedBigEndianIntBytes(0x00000000FEDCBA98L.toInt))
}
test("hexDump") {
val e = f"00000000 : 30313233343536373839414243444546 : 0123456789ABCDEF%n" +
f"00000010 : 303132333435363738390d0a41424344 : 0123456789??ABCD%n" +
f"00000020 : 30313233343536373839 : 0123456789%n"
val a = Array[Byte](0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0x41,0x42,0x43,0x44,0x45,0x46,
0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0x0d,0x0a,0x41,0x42,0x43,0x44,
0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39)
/*
println(e)
println(e.length)
println(e.map(c => c.toInt).map(c => f"$c%02x").mkString(","))
val h = hexDump(a)
println(h)
println(h.length)
println(h.map(c => c.toInt).map(c => f"$c%02x").mkString(","))
*/
assertResult(e)(hexDump(a))
}
test("string2ByteArray_1") {
val e1 = Array[Byte](32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88,
89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106,
107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122,
123, 124, 125, 126)
import java.nio.charset.Charset
implicit val cs = Charset.forName("Shift_JIS")
val a1 = string2ByteArray(""" !"#$%&'()*+,-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\\\]^_`abcdefghijklmnopqrstuvwxyz{|}~""")
// println(a1.map(s =>f"$s%02X").mkString)
assert(e1 sameElements a1)
}
test("string2ByteArray_2") {
val e1 = Array(0x82, 0xA0, 0x82, 0xA2, 0x82, 0xA4, 0x82, 0xA6, 0x82, 0xA8, 0x94, 0x5C,
0x00, 0x01, 0x02, 0xab, 0xcd, 0xef, 0xff, 0x78, 0x7A, 0x7A, 0x61, 0x78, 0x61).map(_.toByte)
import java.nio.charset.Charset
val a1 = string2ByteArray("""あいうえお能\\x00\\x01\\x02\\xab\\xcd\\xef\\xff\\xzz\\a\\x\\a""")(Charset.forName("Shift_JIS"))
// println(a1.map(s => f"$s%02X").mkString)
assert(e1 sameElements a1)
}
test("hexStr2byteArray") {
val e1 = "000102030405060708090a0b0c0d0e0ff0e1d2c3b4a5968778695a4b3c2d1e0f"
val a1 = Array(0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07,0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f,
0xf0,0xe1,0xd2,0xc3,0xb4,0xa5,0x96,0x87,0x78,0x69,0x5a,0x4b,0x3c,0x2d,0x1e,0x0f).map(_.toByte)
assert(hexStr2byteArray(e1) sameElements a1)
}
}
| chokopapashi/HZUtils | src/test/scala/org/hirosezouen/hzutil/PackageObjectTest.scala | Scala | bsd-3-clause | 4,173 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wsutil
import javax.inject.Inject
import cmwell.domain.{FReference, FString}
import cmwell.fts._
import cmwell.util.concurrent.retry
import cmwell.web.ld.cmw.CMWellRDFHelper
import cmwell.web.ld.exceptions.UnretrievableIdentifierException
import cmwell.ws.util.PrefixRequirement
import com.typesafe.scalalogging.LazyLogging
import cmwell.syntaxutils._
import ld.cmw.{PassiveFieldTypesCache, PassiveFieldTypesCacheTrait}
import logic.CRUDServiceFS
import scala.concurrent.{ExecutionContext, Future, Promise, duration}
import duration.DurationInt
import scala.util.{Failure, Success, Try}
sealed trait RawFieldFilter {
def fieldOperator: FieldOperator
}
sealed trait UnresolvedFieldKey {
def externalKey: String
}
sealed trait FieldKey {
def externalKey: String
def internalKey: String
def metaPath: String
}
sealed trait DirectFieldKey extends FieldKey {
def infoPath: String
override def metaPath: String = infoPath
}
case class UnresolvedURIFieldKey(uri: String) extends UnresolvedFieldKey {
override val externalKey = "$" + uri + "$"
}
case class URIFieldKey(uri: String, first: String, last: String) extends FieldKey {
//override val firstLast = retry(7,1.seconds)(Future.fromTry(FieldKey.namespaceUri(uri)))
override val internalKey = s"$first.$last"
override val externalKey = s"$first.$$$last"
override val metaPath = s"/meta/ns/$last/$first"
}
case class UnresolvedPrefixFieldKey(first: String,prefix: String) extends UnresolvedFieldKey {
override val externalKey = first + "." + prefix
}
case class PrefixFieldKey(first: String, last: String, prefix: String) extends FieldKey {
//override lazy val firstLast = retry(7,1.seconds)(FieldKey.resolvePrefix(first,prefix))
override val internalKey = s"$first.$last"
override val externalKey = s"$first.$$$last"
override val metaPath = s"/meta/ns/$last/$first"
}
case class NnFieldKey(externalKey: String) extends DirectFieldKey {
override def internalKey = externalKey
override def infoPath = {
if(externalKey.startsWith("system.") || externalKey.startsWith("content.") || externalKey.startsWith("link.")) s"/meta/sys/${externalKey.drop("system.".length)}"
else s"/meta/nn/$externalKey"
}
}
case class HashedFieldKey(first: String,hash: String) extends DirectFieldKey {
override val internalKey = first + "." + hash
override val externalKey = first + ".$" + hash
override def infoPath = s"/meta/ns/$hash/$first"
}
case class UnevaluatedQuadFilter(override val fieldOperator: FieldOperator = Must,
valueOperator: ValueOperator,
quadAlias: String) extends RawFieldFilter
// TODO: EmptyFieldFilter can mean no ghost skips in yg/gqp
//case object RawEmptyFieldFilter extends RawFieldFilter {
// override def fieldOperator = Must
//}
case class RawSingleFieldFilter(override val fieldOperator: FieldOperator = Must,
valueOperator: ValueOperator,
key: Either[UnresolvedFieldKey,DirectFieldKey],
value: Option[String]) extends RawFieldFilter
case class RawMultiFieldFilter(override val fieldOperator: FieldOperator = Must,
filters:Seq[RawFieldFilter]) extends RawFieldFilter
object RawFieldFilter extends PrefixRequirement {
private[this] val bo1 = scala.collection.breakOut[Seq[RawFieldFilter],FieldFilter,Vector[FieldFilter]]
private[this] val bo2 = scala.collection.breakOut[Set[String],FieldFilter,Vector[FieldFilter]]
def eval(rff: RawFieldFilter, cache: PassiveFieldTypesCacheTrait, cmwellRDFHelper: CMWellRDFHelper)(implicit ec: ExecutionContext): Future[FieldFilter] = rff match {
case UnevaluatedQuadFilter(fo,vo,alias) => {
val fieldFilterWithExplicitUrlOpt = cmwellRDFHelper.getQuadUrlForAlias(alias).map(v => SingleFieldFilter(fo, vo, "system.quad", Some(v)))
prefixRequirement(fieldFilterWithExplicitUrlOpt.nonEmpty, s"The alias '$alias' provided for quad in search does not exist. Use explicit quad URL, or register a new alias using `graphAlias` meta operation.")
Future.successful(fieldFilterWithExplicitUrlOpt.get)
}
case RawMultiFieldFilter(fo,rs) => Future.traverse(rs)(eval(_,cache,cmwellRDFHelper))(bo1,ec).map(MultiFieldFilter(fo, _))
case RawSingleFieldFilter(fo,vo,fk,v) => FieldKey.eval(fk,cache,cmwellRDFHelper)(ec).transform {
case Success(s) if s.isEmpty => Failure(new NoSuchElementException(s"cannot build FieldFilter from empty fields [$rff] - this might mean you try to query a field that does not (yet) exist."))
case anyOtherCase => anyOtherCase.map { s =>
if (s.size == 1) mkSingleFieldFilter(fo, vo, s.head, v)
else MultiFieldFilter(fo, s.map(mkSingleFieldFilter(Should, vo, _, v))(bo2))
}
}
}
def mkSingleFieldFilter(fieldOp: FieldOperator, valueOp: ValueOperator, fieldName: String, value: Option[String]) = valueOp match {
case Equals if fieldName.indexOf('$') == 1 ||
fieldName.startsWith("system.") ||
fieldName.startsWith("content.") => SingleFieldFilter(fieldOp,Contains,fieldName,value)
case _ => SingleFieldFilter(fieldOp,valueOp,fieldName,value)
}
}
sealed trait RawSortParam
case class RawFieldSortParam(rawFieldSortParam: List[RawSortParam.RawFieldSortParam]) extends RawSortParam
case object RawNullSortParam extends RawSortParam
object RawSortParam extends LazyLogging {
type RawFieldSortParam = (Either[UnresolvedFieldKey,DirectFieldKey], FieldSortOrder)
val empty = RawFieldSortParam(Nil)
private[this] val bo = scala.collection.breakOut[Set[String],SortParam.FieldSortParam,List[SortParam.FieldSortParam]]
// private[this] val indexedFieldsNamesCache =
// new SingleElementLazyAsyncCache[Set[String]](Settings.fieldsNamesCacheTimeout.toMillis,Set.empty)(CRUDServiceFS.ftsService.getMappings(withHistory = true))(scala.concurrent.ExecutionContext.Implicits.global)
def eval(rsps: RawSortParam, crudServiceFS: CRUDServiceFS, cache: PassiveFieldTypesCache, cmwellRDFHelper: CMWellRDFHelper)(implicit ec: ExecutionContext): Future[SortParam] = rsps match {
case RawNullSortParam => Future.successful(NullSortParam)
case RawFieldSortParam(rfsp) => {
val indexedFieldsNamesFut = crudServiceFS.ESMappingsCache.getAndUpdateIfNeeded
Future.traverse(rfsp) {
case (fk, ord) => FieldKey.eval(fk,cache,cmwellRDFHelper).map(_.map(_ -> ord)(bo))
// following code could gives precedence to mangled fields over unmangled ones
}.flatMap(pairs => indexedFieldsNamesFut.map {
indexedFieldsNamesWithTypeConcatenation => {
val indexedFieldsNames = indexedFieldsNamesWithTypeConcatenation.map(_.takeWhile(':'.!=))
FieldSortParams(pairs.foldRight(List.empty[SortParam.FieldSortParam]) {
(currentFieldMangledList, reduced) => {
val (mangled, unmangled) = {
val filtered = currentFieldMangledList.filter {
case (cur, _) => {
(cur.length > 1 && cur(1) == '$') ||
cur.startsWith("system.") ||
cur.startsWith("content.") ||
cur.startsWith("link.") ||
indexedFieldsNames(cur)
}
}
val prePartition = if(filtered.nonEmpty) filtered else {
logger.warn(s"currentFieldMangledList was filtered up to an empty list: $currentFieldMangledList ,\\n$indexedFieldsNames")
currentFieldMangledList
}
prePartition.partition {
case (name, order) => name.length > 1 && name.charAt(1) == '$'
}
}
mangled.foldRight(unmangled.foldRight(reduced)(_ :: _))(_ :: _)
}
})
}
})
}
}
}
object FieldKey extends LazyLogging with PrefixRequirement {
def eval(fieldKey: Either[UnresolvedFieldKey,DirectFieldKey], cache: PassiveFieldTypesCacheTrait, cmwellRDFHelper: CMWellRDFHelper)(implicit ec: ExecutionContext): Future[Set[String]] = fieldKey match {
case Right(NnFieldKey(key)) if key.startsWith("system.") || key.startsWith("content.") || key.startsWith("link.") => Future.successful(Set(key))
case Right(dFieldKey) => enrichWithTypes(dFieldKey, cache)
case Left(uFieldKey) => resolve(uFieldKey, cmwellRDFHelper).flatMap(enrichWithTypes(_,cache))
}
def enrichWithTypes(fk: FieldKey, cache: PassiveFieldTypesCacheTrait): Future[Set[String]] = {
import scala.concurrent.ExecutionContext.Implicits.global
cache.get(fk).transform {
case Failure(err) => Failure(new Exception(s"resolving type mangling for field [$fk] failed",err))
case s => s.map(_.map {
case 's' => fk.internalKey
case c => s"$c$$${fk.internalKey}"
})
}
}
def resolve(ufk: UnresolvedFieldKey, cmwellRDFHelper: CMWellRDFHelper): Future[FieldKey] = {
import scala.concurrent.ExecutionContext.Implicits.global
ufk match {
case UnresolvedPrefixFieldKey(first, prefix) => resolvePrefix(cmwellRDFHelper, first, prefix).map {
case (first, hash) => PrefixFieldKey(first, hash, prefix)
}
case UnresolvedURIFieldKey(uri) => Future.fromTry(namespaceUri(cmwellRDFHelper, uri).map {
case (first, hash) => URIFieldKey(uri, first, hash)
})
}
}
def namespaceUri(cmwellRDFHelper: CMWellRDFHelper,u: String): Try[(String,String)] = {
val p = org.apache.jena.rdf.model.ResourceFactory.createProperty(u)
val first = p.getLocalName
val ns = p.getNameSpace
cmwellRDFHelper.urlToHash(ns) match {
case None => Failure(new UnretrievableIdentifierException(s"could not find namespace URI: $ns"))
case Some(internalIdentifier) => Success(first -> internalIdentifier)
}
}
def resolvePrefix(cmwellRDFHelper: CMWellRDFHelper, first: String, requestedPrefix: String)(implicit ec: ExecutionContext): Future[(String,String)] = {
Try(cmwellRDFHelper.getIdentifierForPrefixAsync(requestedPrefix)).fold({
case t: Throwable =>
Future.failed[(String,String)](new Exception("resolvePrefix failed",t))
}, _.transform {
case scala.util.Success(identifier) => Success(first -> identifier)
case scala.util.Failure(e: UnretrievableIdentifierException) => Failure(e)
case scala.util.Failure(e: IllegalArgumentException) => Failure(new UnretrievableIdentifierException(e.getMessage, e))
case scala.util.Failure(e) => {
logger.error(s"couldn't find the prefix: $requestedPrefix", e)
Failure(new UnretrievableIdentifierException(s"couldn't find the prefix: $requestedPrefix", e))
}
})
}
} | nruppin/CM-Well | server/cmwell-ws/app/wsutil/RawFieldFilter.scala | Scala | apache-2.0 | 11,389 |
/*
Copyright 2016 ScalABM
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.economicsl.agora.markets.tradables
import java.util.UUID
/** Base trait defining the interface for any object whose ownership can be transferred via a `Market`. */
trait Tradable {
/** A unique identifier used to distinguish a `Tradable` from other `Tradable` objects. */
def uuid: UUID
}
| EconomicSL/agora | src/main/scala/org/economicsl/agora/markets/tradables/Tradable.scala | Scala | apache-2.0 | 863 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package xml
package transform
import scala.collection.Seq
/**
* A class for XML transformations.
*
* @author Burak Emir
*/
abstract class BasicTransformer extends Function1[Node, Node] {
protected def unchanged(n: Node, ns: Seq[Node]) =
ns.length == 1 && (ns.head == n)
/**
* Call transform(Node) for each node in ns, append results
* to NodeBuffer.
*/
def transform(it: Iterator[Node], nb: NodeBuffer): Seq[Node] =
it.foldLeft(nb)(_ ++= transform(_)).toSeq
/**
* Call transform(Node) to each node in ns, yield ns if nothing changes,
* otherwise a new sequence of concatenated results.
*/
def transform(ns: Seq[Node]): Seq[Node] = {
val changed = ns flatMap transform
if (changed.length != ns.length || changed.zip(ns).exists(p => p._1 != p._2)) changed
else ns
}
def transform(n: Node): Seq[Node] = {
if (n.doTransform) n match {
case Group(xs) => Group(transform(xs)) // un-group the hack Group tag
case _ =>
val ch = n.child
val nch = transform(ch)
if (ch eq nch) n
else Elem(n.prefix, n.label, n.attributes, n.scope, nch.isEmpty, nch: _*)
}
else n
}
def apply(n: Node): Node = {
val seq = transform(n)
if (seq.length > 1)
throw new UnsupportedOperationException("transform must return single node for root")
else seq.head
}
}
| scala/scala-xml | shared/src/main/scala/scala/xml/transform/BasicTransformer.scala | Scala | apache-2.0 | 1,679 |
// Copyright 2015 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.common.concurrent
import com.twitter.util.Future
import io.fsq.common.base.{Failure, Outcome, Success}
/**
* Inspired by [[io.fsq.common.concurrent.FutureOption]], this is a monadic wrapper
* around Future[Outcome[S, F]] so you can easily combine Future[S], Option[S], Outcome[S, F], etc.
* into a for yield.
*
* Example:
*
* val result: Future[Outcome[Venue, String]] = (for {
* userid <- FutureOutcome.lift(msg.useridOption, "no-userid")
* user <- FutureOutcome.lift(services.futureDb.fetchOne(Q(User).where(_.userid eqs userid)), "no-user")
* venue <- FutureOutcome(services.foo.bar(user)) // where bar returns Future[Outcome[Venue, String]]
* _ <- FutureOutcome.failWhen(venue.isHomeOrOffice, "home-or-office-venue")
* } yield venue).resolve
*/
final class FutureOutcome[+S, +F](val resolve: Future[Outcome[S, F]]) extends AnyVal {
def map[T](f: S => T): FutureOutcome[T, F] = new FutureOutcome(resolve.map(_.map(f)))
def flatMap[T, FF >: F](f: S => FutureOutcome[T, FF]): FutureOutcome[T, FF] =
new FutureOutcome(resolve.flatMap {
case Success(s) => f(s).resolve
case Failure(fa) => Future(Failure(fa))
})
def foreach[T](f: S => Unit): Unit = new FutureOutcome(resolve.foreach(_.foreach(f)))
def filter[FF >: F](f: S => Boolean, failure: FF): FutureOutcome[S, FF] = {
new FutureOutcome(resolve.map(_.filter(f, failure)))
}
def withFilter[FF >: F](f: S => Boolean, failure: FF): FutureOutcome[S, FF] = {
new FutureOutcome(resolve.map(_.filter(f, failure)))
}
def flatten[T, FF >: F](implicit asFutureOutcome: (S) => FutureOutcome[T, FF]): FutureOutcome[T, FF] = {
this.flatMap(asFutureOutcome)
}
def orElse[B >: S, FF >: F](f: => FutureOutcome[B, FF]): FutureOutcome[B, FF] = FutureOutcome(resolve.flatMap({
case Success(v) => Future.value(Success(v))
case Failure(_) => f.resolve
}))
}
object FutureOutcome {
def apply[S, F](f: Future[Outcome[S, F]]): FutureOutcome[S, F] = new FutureOutcome(f)
def apply[S, F](o: Outcome[Future[S], F]): FutureOutcome[S, F] = new FutureOutcome(o match {
case Success(fs) => fs.map(s => Success(s))
case Failure(fa) => Future.value(Failure(fa))
})
def lift[S, F](f: Future[S]): FutureOutcome[S, F] = new FutureOutcome(f.map(Success(_)))
def lift[S, F](o: Outcome[S, F]): FutureOutcome[S, F] = new FutureOutcome(Future.value(o))
def lift[S, F](o: Option[S], fa: => F): FutureOutcome[S, F] = new FutureOutcome(Future.value(Outcome(o, fa)))
def lift[S, F](fo: Future[Option[S]], fa: => F): FutureOutcome[S, F] = new FutureOutcome(fo.map(o => Outcome(o, fa)))
def value[S, F](a: S): FutureOutcome[S, F] = new FutureOutcome(Future.value(Success(a)))
def exception(e: Exception): FutureOutcome[Nothing, Nothing] = new FutureOutcome(Future.exception(e))
def failure[F](fa: => F): FutureOutcome[Nothing, F] = new FutureOutcome(Future.value(Failure(fa)))
def success[S](s: => S): FutureOutcome[S, Nothing] = new FutureOutcome(Future.value(Success(s)))
// Returns the failure value if cond is true, else Future[Success(Unit)]. Useful in for comprehensions.
def failWhen[F](cond: Boolean, f: => F): FutureOutcome[Unit, F] = {
if (cond) FutureOutcome.failure(f) else FutureOutcome.success(Unit)
}
}
| foursquare/fsqio | src/jvm/io/fsq/common/concurrent/FutureOutcome.scala | Scala | apache-2.0 | 3,358 |
/*
* Copyright 2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package util
import scala.util.parsing.combinator.{PackratParsers, Parsers, ImplicitConversions}
import xml.{Elem, NodeSeq}
import net.liftweb.common._
sealed trait CssSelector {
def subNodes: Box[SubNode]
def withSubnode(sn: SubNode): CssSelector
}
final case class ElemSelector(elem: String, subNodes: Box[SubNode]) extends
CssSelector {
def withSubnode(sn: SubNode): CssSelector = this.copy(subNodes = Full(sn))
}
final case class StarSelector(subNodes: Box[SubNode]) extends CssSelector {
def withSubnode(sn: SubNode): CssSelector = this.copy(subNodes = Full(sn))
}
final case class IdSelector(id: String, subNodes: Box[SubNode]) extends
CssSelector {
def withSubnode(sn: SubNode): CssSelector = this.copy(subNodes = Full(sn))
}
final case class ClassSelector(clss: String, subNodes: Box[SubNode]) extends
CssSelector {
def withSubnode(sn: SubNode): CssSelector = this.copy(subNodes = Full(sn))
}
final case class NameSelector(name: String, subNodes: Box[SubNode]) extends
CssSelector {
def withSubnode(sn: SubNode): CssSelector = this.copy(subNodes = Full(sn))
}
final case class EnclosedSelector(selector: CssSelector, kid: CssSelector) extends CssSelector {
def subNodes: Box[SubNode] = Empty
def withSubnode(sn: SubNode): CssSelector = this
}
final case class AttrSelector(name: String, value: String,
subNodes: Box[SubNode]) extends CssSelector {
def withSubnode(sn: SubNode): CssSelector = this.copy(subNodes = Full(sn))
}
sealed trait SubNode
object SubNode {
def unapply(bind: CssBind): Option[Box[SubNode]] =
Some(bind.css.flatMap(_.subNodes))
}
sealed trait WithKids {
def transform(original: NodeSeq, newNs: NodeSeq): NodeSeq
}
final case class KidsSubNode() extends SubNode with WithKids {
def transform(original: NodeSeq, newNs: NodeSeq): NodeSeq = newNs
}
final case class PrependKidsSubNode() extends SubNode with WithKids {
def transform(original: NodeSeq, newNs: NodeSeq): NodeSeq = newNs ++ original
}
final case object DontMergeAttributes extends SubNode {
}
final case class SurroundKids() extends SubNode with WithKids {
def transform(original: NodeSeq, newNs: NodeSeq): NodeSeq = {
var changed = false
val res: NodeSeq = newNs.flatMap{
case e: Elem if !changed =>
changed = true
new Elem(e.prefix,
e.label, e.attributes,
e.scope, e.child ++ original :_*)
case x => x
}
if (changed) res else newNs ++ original
}
}
final case class AppendKidsSubNode() extends SubNode with WithKids {
def transform(original: NodeSeq, newNs: NodeSeq): NodeSeq = original ++ newNs
}
sealed trait AttributeRule
final case class AttrSubNode(attr: String) extends SubNode with AttributeRule
final case class AttrAppendSubNode(attr: String) extends SubNode with AttributeRule
final case class AttrRemoveSubNode(attr: String) extends SubNode with AttributeRule
final case class SelectThisNode(kids: Boolean) extends SubNode
/**
* Parse a subset of CSS into the appropriate selector objects
*/
object CssSelectorParser extends PackratParsers with ImplicitConversions {
private val cache = new LRUMap[String, CssSelector](25000)
/**
* Parse a String into a CSS Selector
*/
def parse(_toParse: String): Box[CssSelector] = synchronized {
// trim off leading and trailing spaces
val toParse = _toParse.trim
// this method is synchronized because the Parser combinator is not
// thread safe, so we'll only parse one at a time, but given that most
// of the selectors will be cached, it's not really a performance hit
cache.get(toParse) or {
internalParse(toParse).map {
sel => {
// cache the result
cache(toParse) = sel
sel
}
}
}
}
import scala.util.parsing.input.CharSequenceReader
type Elem = Char
type UnitParser=Parser[Unit]
private def internalParse(toParse: String): Box[CssSelector] = {
val reader: Input = new CharSequenceReader(toParse, 0)
topParser(reader) match {
case Success(v, _) => Full(v)
case x => Empty
}
}
private implicit def str2chars(s: String): List[Char] = new scala.collection.immutable.WrappedString(s).toList
private lazy val _topParser: Parser[CssSelector] = {
phrase(idMatch |
nameMatch |
classMatch |
attrMatch |
elemMatch |
starMatch |
colonMatch)
}
private def fixAll(all: List[CssSelector], sn: Option[SubNode]): CssSelector = {
(all, sn) match {
// case (Nil, Some())
case (r :: Nil, None) => r
case (r :: Nil, Some(sn)) => r.withSubnode(sn)
case (lst, None) => lst.reduceRight((b, a) => EnclosedSelector(b, a))
case (lst, Some(sn)) => (lst.dropRight(1) ::: lst.takeRight(1).map(_.withSubnode(sn))).reduceRight((b, a) => EnclosedSelector(b, a))
}
}
private lazy val topParser: Parser[CssSelector] =
phrase(rep1((_idMatch | _nameMatch | _classMatch | _attrMatch | _elemMatch |
_colonMatch | _starMatch) <~ (rep1(' ') | 26.toChar)) ~ opt(subNode)) ^^ {
case (one :: Nil) ~ sn => fixAll(List(one), sn)
case all ~ None if all.takeRight(1).head == StarSelector(Empty) =>
fixAll(all.dropRight(1), Some(KidsSubNode()))
case all ~ sn => fixAll(all, sn)
}
private lazy val _colonMatch: Parser[CssSelector] =
(':' ~> id) ^? {
case "button" => AttrSelector("type", "button", Empty)
case "checkbox" => AttrSelector("type", "checkbox", Empty)
case "file" => AttrSelector("type", "file", Empty)
case "password" => AttrSelector("type", "password", Empty)
case "radio" => AttrSelector("type", "radio", Empty)
case "reset" => AttrSelector("type", "reset", Empty)
case "submit" => AttrSelector("type", "submit", Empty)
case "text" => AttrSelector("type", "text", Empty)
}
private lazy val colonMatch: Parser[CssSelector] =
':' ~> id ~ opt(subNode) ^? {
case "button" ~ sn => AttrSelector("type", "button", sn)
case "checkbox" ~ sn => AttrSelector("type", "checkbox", sn)
case "file" ~ sn => AttrSelector("type", "file", sn)
case "password" ~ sn => AttrSelector("type", "password", sn)
case "radio" ~ sn => AttrSelector("type", "radio", sn)
case "reset" ~ sn => AttrSelector("type", "reset", sn)
case "submit" ~ sn => AttrSelector("type", "submit", sn)
case "text" ~ sn => AttrSelector("type", "text", sn)
}
private lazy val idMatch: Parser[CssSelector] = '#' ~> id ~ opt(subNode) ^^ {
case id ~ sn => IdSelector(id, sn)
}
private lazy val _idMatch: Parser[CssSelector] = '#' ~> id ^^ {
case id => IdSelector(id, Empty)
}
private lazy val nameMatch: Parser[CssSelector] = '@' ~> id ~ opt(subNode) ^^ {
case name ~ sn => NameSelector(name, sn)
}
private lazy val _nameMatch: Parser[CssSelector] = '@' ~> id ^^ {
case name => NameSelector(name, Empty)
}
private lazy val elemMatch: Parser[CssSelector] = id ~ opt(subNode) ^^ {
case elem ~ sn => ElemSelector(elem, sn)
}
private lazy val _elemMatch: Parser[CssSelector] = id ^^ {
case elem => ElemSelector(elem, Empty)
}
private lazy val starMatch: Parser[CssSelector] = '*' ~> opt(subNode) ^^ {
case sn => StarSelector(sn)
}
private lazy val _starMatch: Parser[CssSelector] = '*' ^^ {
case sn => StarSelector(Empty)
}
private lazy val classMatch: Parser[CssSelector] =
'.' ~> attrName ~ opt(subNode) ^^ {
case cls ~ sn => ClassSelector(cls, sn)
}
private lazy val attrMatch: Parser[CssSelector] =
attrName ~ '=' ~ attrConst ~ opt(subNode) ^^ {
case "id" ~ _ ~ const ~ sn => IdSelector(const, sn)
case "name" ~ _ ~ const ~ sn => NameSelector(const, sn)
case n ~ _ ~ v ~ sn => AttrSelector(n, v, sn)
}
private lazy val _classMatch: Parser[CssSelector] =
'.' ~> attrName ^^ {
case cls => ClassSelector(cls, Empty)
}
private lazy val _attrMatch: Parser[CssSelector] =
attrName ~ '=' ~ attrConst ^^ {
case "id" ~ _ ~ const => IdSelector(const, Empty)
case "name" ~ _ ~ const => NameSelector(const, Empty)
case n ~ _ ~ v => AttrSelector(n, v, Empty)
}
private lazy val id: Parser[String] = letter ~
rep(letter | number | '-' | '_' | ':' | '.') ^^ {
case first ~ rest => (first :: rest).mkString
}
private def isLetter(c: Char): Boolean = c.isLetter
private def isNumber(c: Char): Boolean = c.isDigit
private lazy val letter: Parser[Char] = elem("letter", isLetter)
private lazy val number: Parser[Char] = elem("number", isNumber)
private lazy val subNode: Parser[SubNode] = rep(' ') ~>
((opt('*') ~ '[' ~> attrName <~ '+' ~ ']' ^^ {
name => AttrAppendSubNode(name)
}) |
(opt('*') ~ '[' ~> attrName <~ '!' ~ ']' ^^ {
name => AttrRemoveSubNode(name)
}) | (opt('*') ~ '[' ~> attrName <~ ']' ^^ {
name => AttrSubNode(name)
}) |
('!' ~ '!' ^^ (a => DontMergeAttributes)) |
('<' ~ '*' ~ '>') ^^ (a => SurroundKids()) |
('-' ~ '*' ^^ (a => PrependKidsSubNode())) |
('>' ~ '*' ^^ (a => PrependKidsSubNode())) |
('*' ~ '+' ^^ (a => AppendKidsSubNode())) |
('*' ~ '<' ^^ (a => AppendKidsSubNode())) |
'*' ^^ (a => KidsSubNode()) |
'^' ~ '*' ^^ (a => SelectThisNode(true)) |
'^' ~ '^' ^^ (a => SelectThisNode(false)))
private lazy val attrName: Parser[String] = (letter | '_' | ':') ~
rep(letter | number | '-' | '_' | ':' | '.') ^^ {
case first ~ rest => (first :: rest).mkString
}
private lazy val attrConst: Parser[String] = {
(('\'' ~> rep(elem("isValid", (c: Char) => {
c != '\'' && c >= ' '
})) <~ '\'') ^^ {
case s => s.mkString
}) |
(('"' ~> rep(elem("isValid", (c: Char) => {
c != '"' && c >= ' '
})) <~ '"') ^^ {
case s => s.mkString
}) |
(rep1(elem("isValid", (c: Char) => {
c != '\'' && c != '"' && c > ' '
})) ^^ {
case s => s.mkString
})
}
}
| pbrant/framework | core/util/src/main/scala/net/liftweb/util/CssSelector.scala | Scala | apache-2.0 | 10,630 |
/*
* Copyright 2014 – 2015 Paul Horn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.knutwalker.esclient
import org.elasticsearch.common.settings.ImmutableSettings
import scala.concurrent._
import org.elasticsearch.node.NodeBuilder.nodeBuilder
import org.elasticsearch.index.query.QueryBuilders._
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, Matchers, FlatSpec}
import org.elasticsearch.node.Node
import org.elasticsearch.client.Client
import org.elasticsearch.action.{ActionResponse, ActionRequest, ActionRequestBuilder}
import org.elasticsearch.action.search.SearchResponse
import java.util.UUID
class ESClientSpec extends FlatSpec with Matchers with BeforeAndAfterAll with BeforeAndAfter {
var node: Node = _
var client: Client = _
private def execute[Request <: ActionRequest[Request], Response <: ActionResponse](rb: ActionRequestBuilder[Request, Response, _, _])(implicit am: ActionMagnet[Request, Response]): Response = {
val r = rb.request()
val f = client.execute(r)
Await.result(f, duration.Duration.Inf)
}
override protected def beforeAll(): Unit = {
node = nodeBuilder()
.local(true)
.settings(ImmutableSettings.builder()
.put("cluster.name", UUID.randomUUID().toString)
.put("http.enabled", false))
.node()
client = node.client()
}
override protected def afterAll(): Unit = {
node.close()
}
before {
client.admin().cluster().prepareHealth("test").setWaitForYellowStatus().execute().actionGet()
client.prepareDelete("test", "test", "1")
.execute().actionGet()
client.prepareIndex("test", "test", "1")
.setSource("""{"foo": "bar", "bar": "baz"}""")
.execute().actionGet()
client.admin().indices().prepareRefresh("test").execute().actionGet()
}
"The ESClient" should "provide an implicit conversion for a SearchAction" in {
val req = client.prepareSearch("test")
.setQuery(matchAllQuery())
.addFields("foo", "bar")
val res = execute(req)
val hit = res.getHits.getHits.head
res shouldBe a [SearchResponse]
res.getHits.getMaxScore shouldBe 1.0F +- 0.001F
res.getHits.getTotalHits shouldBe 1
hit.index shouldBe "test"
hit.`type` shouldBe "test"
hit.fields.get("foo").getValue[String] shouldBe "bar"
hit.fields.get("bar").getValue[String] shouldBe "baz"
}
}
| knutwalker/esclient | src/test/scala/de/knutwalker/esclient/ESClientSpec.scala | Scala | apache-2.0 | 2,896 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.lang.{Long => JLong}
import java.time.Duration
import java.util.{Optional, Properties}
import java.util.concurrent.TimeUnit
import kafka.integration.KafkaServerTestHarness
import kafka.server.KafkaConfig
import kafka.utils.TestUtils
import kafka.utils.TestUtils.consumeRecords
import org.apache.kafka.clients.consumer.{ConsumerConfig, KafkaConsumer, OffsetAndMetadata}
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerRecord}
import org.apache.kafka.common.{KafkaException, TopicPartition}
import org.apache.kafka.common.errors.{ProducerFencedException, TimeoutException}
import org.junit.{After, Before, Test}
import org.junit.Assert._
import org.scalatest.Assertions.fail
import scala.collection.JavaConverters._
import scala.collection.mutable.Buffer
import scala.collection.Seq
import scala.concurrent.ExecutionException
class TransactionsTest extends KafkaServerTestHarness {
val numServers = 3
val transactionalProducerCount = 2
val transactionalConsumerCount = 1
val nonTransactionalConsumerCount = 1
val topic1 = "topic1"
val topic2 = "topic2"
val transactionalProducers = Buffer[KafkaProducer[Array[Byte], Array[Byte]]]()
val transactionalConsumers = Buffer[KafkaConsumer[Array[Byte], Array[Byte]]]()
val nonTransactionalConsumers = Buffer[KafkaConsumer[Array[Byte], Array[Byte]]]()
override def generateConfigs: Seq[KafkaConfig] = {
TestUtils.createBrokerConfigs(numServers, zkConnect).map(KafkaConfig.fromProps(_, serverProps()))
}
@Before
override def setUp(): Unit = {
super.setUp()
val numPartitions = 4
val topicConfig = new Properties()
topicConfig.put(KafkaConfig.MinInSyncReplicasProp, 2.toString)
createTopic(topic1, numPartitions, numServers, topicConfig)
createTopic(topic2, numPartitions, numServers, topicConfig)
for (_ <- 0 until transactionalProducerCount)
createTransactionalProducer("transactional-producer")
for (_ <- 0 until transactionalConsumerCount)
createReadCommittedConsumer("transactional-group")
for (_ <- 0 until nonTransactionalConsumerCount)
createReadUncommittedConsumer("non-transactional-group")
}
@After
override def tearDown(): Unit = {
transactionalProducers.foreach(_.close())
transactionalConsumers.foreach(_.close())
nonTransactionalConsumers.foreach(_.close())
super.tearDown()
}
@Test
def testBasicTransactions() = {
val producer = transactionalProducers.head
val consumer = transactionalConsumers.head
val unCommittedConsumer = nonTransactionalConsumers.head
producer.initTransactions()
producer.beginTransaction()
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, "2", "2", willBeCommitted = false))
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "4", "4", willBeCommitted = false))
producer.flush()
producer.abortTransaction()
producer.beginTransaction()
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "1", "1", willBeCommitted = true))
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, "3", "3", willBeCommitted = true))
producer.commitTransaction()
consumer.subscribe(List(topic1, topic2).asJava)
unCommittedConsumer.subscribe(List(topic1, topic2).asJava)
val records = consumeRecords(consumer, 2)
records.foreach { record =>
TestUtils.assertCommittedAndGetValue(record)
}
val allRecords = consumeRecords(unCommittedConsumer, 4)
val expectedValues = List("1", "2", "3", "4").toSet
allRecords.foreach { record =>
assertTrue(expectedValues.contains(TestUtils.recordValueAsString(record)))
}
}
@Test
def testReadCommittedConsumerShouldNotSeeUndecidedData(): Unit = {
val producer1 = transactionalProducers.head
val producer2 = createTransactionalProducer("other")
val readCommittedConsumer = transactionalConsumers.head
val readUncommittedConsumer = nonTransactionalConsumers.head
producer1.initTransactions()
producer2.initTransactions()
producer1.beginTransaction()
producer2.beginTransaction()
val latestVisibleTimestamp = System.currentTimeMillis()
producer2.send(new ProducerRecord(topic1, 0, latestVisibleTimestamp, "x".getBytes, "1".getBytes))
producer2.send(new ProducerRecord(topic2, 0, latestVisibleTimestamp, "x".getBytes, "1".getBytes))
producer2.flush()
val latestWrittenTimestamp = latestVisibleTimestamp + 1
producer1.send(new ProducerRecord(topic1, 0, latestWrittenTimestamp, "a".getBytes, "1".getBytes))
producer1.send(new ProducerRecord(topic1, 0, latestWrittenTimestamp, "b".getBytes, "2".getBytes))
producer1.send(new ProducerRecord(topic2, 0, latestWrittenTimestamp, "c".getBytes, "3".getBytes))
producer1.send(new ProducerRecord(topic2, 0, latestWrittenTimestamp, "d".getBytes, "4".getBytes))
producer1.flush()
producer2.send(new ProducerRecord(topic1, 0, latestWrittenTimestamp, "x".getBytes, "2".getBytes))
producer2.send(new ProducerRecord(topic2, 0, latestWrittenTimestamp, "x".getBytes, "2".getBytes))
producer2.commitTransaction()
// ensure the records are visible to the read uncommitted consumer
val tp1 = new TopicPartition(topic1, 0)
val tp2 = new TopicPartition(topic2, 0)
readUncommittedConsumer.assign(Set(tp1, tp2).asJava)
consumeRecords(readUncommittedConsumer, 8)
val readUncommittedOffsetsForTimes = readUncommittedConsumer.offsetsForTimes(Map(
tp1 -> (latestWrittenTimestamp: JLong),
tp2 -> (latestWrittenTimestamp: JLong)
).asJava)
assertEquals(2, readUncommittedOffsetsForTimes.size)
assertEquals(latestWrittenTimestamp, readUncommittedOffsetsForTimes.get(tp1).timestamp)
assertEquals(latestWrittenTimestamp, readUncommittedOffsetsForTimes.get(tp2).timestamp)
readUncommittedConsumer.unsubscribe()
// we should only see the first two records which come before the undecided second transaction
readCommittedConsumer.assign(Set(tp1, tp2).asJava)
val records = consumeRecords(readCommittedConsumer, 2)
records.foreach { record =>
assertEquals("x", new String(record.key))
assertEquals("1", new String(record.value))
}
// even if we seek to the end, we should not be able to see the undecided data
assertEquals(2, readCommittedConsumer.assignment.size)
readCommittedConsumer.seekToEnd(readCommittedConsumer.assignment)
readCommittedConsumer.assignment.asScala.foreach { tp =>
assertEquals(1L, readCommittedConsumer.position(tp))
}
// undecided timestamps should not be searchable either
val readCommittedOffsetsForTimes = readCommittedConsumer.offsetsForTimes(Map(
tp1 -> (latestWrittenTimestamp: JLong),
tp2 -> (latestWrittenTimestamp: JLong)
).asJava)
assertNull(readCommittedOffsetsForTimes.get(tp1))
assertNull(readCommittedOffsetsForTimes.get(tp2))
}
@Test
def testDelayedFetchIncludesAbortedTransaction(): Unit = {
val producer1 = transactionalProducers.head
val producer2 = createTransactionalProducer("other")
producer1.initTransactions()
producer2.initTransactions()
producer1.beginTransaction()
producer2.beginTransaction()
producer2.send(new ProducerRecord(topic1, 0, "x".getBytes, "1".getBytes))
producer2.flush()
producer1.send(new ProducerRecord(topic1, 0, "y".getBytes, "1".getBytes))
producer1.send(new ProducerRecord(topic1, 0, "y".getBytes, "2".getBytes))
producer1.flush()
producer2.send(new ProducerRecord(topic1, 0, "x".getBytes, "2".getBytes))
producer2.flush()
producer1.abortTransaction()
producer2.commitTransaction()
// ensure that the consumer's fetch will sit in purgatory
val consumerProps = new Properties()
consumerProps.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, "100000")
consumerProps.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, "100")
val readCommittedConsumer = createReadCommittedConsumer(props = consumerProps)
readCommittedConsumer.assign(Set(new TopicPartition(topic1, 0)).asJava)
val records = consumeRecords(readCommittedConsumer, numRecords = 2)
assertEquals(2, records.size)
val first = records.head
assertEquals("x", new String(first.key))
assertEquals("1", new String(first.value))
assertEquals(0L, first.offset)
val second = records.last
assertEquals("x", new String(second.key))
assertEquals("2", new String(second.value))
assertEquals(3L, second.offset)
}
@Test
def testSendOffsets() = {
// The basic plan for the test is as follows:
// 1. Seed topic1 with 1000 unique, numbered, messages.
// 2. Run a consume/process/produce loop to transactionally copy messages from topic1 to topic2 and commit
// offsets as part of the transaction.
// 3. Randomly abort transactions in step2.
// 4. Validate that we have 1000 unique committed messages in topic2. If the offsets were committed properly with the
// transactions, we should not have any duplicates or missing messages since we should process in the input
// messages exactly once.
val consumerGroupId = "foobar-consumer-group"
val numSeedMessages = 500
TestUtils.seedTopicWithNumberedRecords(topic1, numSeedMessages, servers)
val producer = transactionalProducers(0)
val consumer = createReadCommittedConsumer(consumerGroupId, maxPollRecords = numSeedMessages / 4)
consumer.subscribe(List(topic1).asJava)
producer.initTransactions()
var shouldCommit = false
var recordsProcessed = 0
try {
while (recordsProcessed < numSeedMessages) {
val records = TestUtils.pollUntilAtLeastNumRecords(consumer, Math.min(10, numSeedMessages - recordsProcessed))
producer.beginTransaction()
shouldCommit = !shouldCommit
records.foreach { record =>
val key = new String(record.key(), "UTF-8")
val value = new String(record.value(), "UTF-8")
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, key, value, willBeCommitted = shouldCommit))
}
producer.sendOffsetsToTransaction(TestUtils.consumerPositions(consumer).asJava, consumerGroupId)
if (shouldCommit) {
producer.commitTransaction()
recordsProcessed += records.size
debug(s"committed transaction.. Last committed record: ${new String(records.last.value(), "UTF-8")}. Num " +
s"records written to $topic2: $recordsProcessed")
} else {
producer.abortTransaction()
debug(s"aborted transaction Last committed record: ${new String(records.last.value(), "UTF-8")}. Num " +
s"records written to $topic2: $recordsProcessed")
TestUtils.resetToCommittedPositions(consumer)
}
}
} finally {
consumer.close()
}
// In spite of random aborts, we should still have exactly 1000 messages in topic2. I.e. we should not
// re-copy or miss any messages from topic1, since the consumed offsets were committed transactionally.
val verifyingConsumer = transactionalConsumers(0)
verifyingConsumer.subscribe(List(topic2).asJava)
val valueSeq = TestUtils.pollUntilAtLeastNumRecords(verifyingConsumer, numSeedMessages).map { record =>
TestUtils.assertCommittedAndGetValue(record).toInt
}
val valueSet = valueSeq.toSet
assertEquals(s"Expected $numSeedMessages values in $topic2.", numSeedMessages, valueSeq.size)
assertEquals(s"Expected ${valueSeq.size} unique messages in $topic2.", valueSeq.size, valueSet.size)
}
@Test
def testFencingOnCommit() = {
val producer1 = transactionalProducers(0)
val producer2 = transactionalProducers(1)
val consumer = transactionalConsumers(0)
consumer.subscribe(List(topic1, topic2).asJava)
producer1.initTransactions()
producer1.beginTransaction()
producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "1", "1", willBeCommitted = false))
producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, "3", "3", willBeCommitted = false))
producer2.initTransactions() // ok, will abort the open transaction.
producer2.beginTransaction()
producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "2", "4", willBeCommitted = true))
producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, "2", "4", willBeCommitted = true))
try {
producer1.commitTransaction()
fail("Should not be able to commit transactions from a fenced producer.")
} catch {
case _: ProducerFencedException =>
// good!
case e: Exception =>
fail("Got an unexpected exception from a fenced producer.", e)
}
producer2.commitTransaction() // ok
val records = consumeRecords(consumer, 2)
records.foreach { record =>
TestUtils.assertCommittedAndGetValue(record)
}
}
@Test
def testFencingOnSendOffsets() = {
val producer1 = transactionalProducers(0)
val producer2 = transactionalProducers(1)
val consumer = transactionalConsumers(0)
consumer.subscribe(List(topic1, topic2).asJava)
producer1.initTransactions()
producer1.beginTransaction()
producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "1", "1", willBeCommitted = false))
producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, "3", "3", willBeCommitted = false))
producer2.initTransactions() // ok, will abort the open transaction.
producer2.beginTransaction()
producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "2", "4", willBeCommitted = true))
producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, "2", "4", willBeCommitted = true))
try {
producer1.sendOffsetsToTransaction(Map(new TopicPartition("foobartopic", 0) -> new OffsetAndMetadata(110L)).asJava,
"foobarGroup")
fail("Should not be able to send offsets from a fenced producer.")
} catch {
case _: ProducerFencedException =>
// good!
case e: Exception =>
fail("Got an unexpected exception from a fenced producer.", e)
}
producer2.commitTransaction() // ok
val records = consumeRecords(consumer, 2)
records.foreach { record =>
TestUtils.assertCommittedAndGetValue(record)
}
}
@Test
def testOffsetMetadataInSendOffsetsToTransaction() = {
val tp = new TopicPartition(topic1, 0)
val groupId = "group"
val producer = transactionalProducers.head
val consumer = createReadCommittedConsumer(groupId)
consumer.subscribe(List(topic1).asJava)
producer.initTransactions()
producer.beginTransaction()
val offsetAndMetadata = new OffsetAndMetadata(110L, Optional.of(15), "some metadata")
producer.sendOffsetsToTransaction(Map(tp -> offsetAndMetadata).asJava, groupId)
producer.commitTransaction() // ok
// The call to commit the transaction may return before all markers are visible, so we initialize a second
// producer to ensure the transaction completes and the committed offsets are visible.
val producer2 = transactionalProducers(1)
producer2.initTransactions()
TestUtils.waitUntilTrue(() => offsetAndMetadata.equals(consumer.committed(Set(tp).asJava).get(tp)), "cannot read committed offset")
}
@Test
def testFencingOnSend(): Unit = {
val producer1 = transactionalProducers(0)
val producer2 = transactionalProducers(1)
val consumer = transactionalConsumers(0)
consumer.subscribe(List(topic1, topic2).asJava)
producer1.initTransactions()
producer1.beginTransaction()
producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "1", "1", willBeCommitted = false))
producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, "3", "3", willBeCommitted = false))
producer2.initTransactions() // ok, will abort the open transaction.
producer2.beginTransaction()
producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "2", "4", willBeCommitted = true)).get()
producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, "2", "4", willBeCommitted = true)).get()
try {
val result = producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "1", "5", willBeCommitted = false))
val recordMetadata = result.get()
error(s"Missed a producer fenced exception when writing to ${recordMetadata.topic}-${recordMetadata.partition}. Grab the logs!!")
servers.foreach { server =>
error(s"log dirs: ${server.logManager.liveLogDirs.map(_.getAbsolutePath).head}")
}
fail("Should not be able to send messages from a fenced producer.")
} catch {
case _: ProducerFencedException =>
producer1.close()
case e: ExecutionException =>
assertTrue(e.getCause.isInstanceOf[ProducerFencedException])
case e: Exception =>
fail("Got an unexpected exception from a fenced producer.", e)
}
producer2.commitTransaction() // ok
val records = consumeRecords(consumer, 2)
records.foreach { record =>
TestUtils.assertCommittedAndGetValue(record)
}
}
@Test
def testFencingOnAddPartitions(): Unit = {
val producer1 = transactionalProducers(0)
val producer2 = transactionalProducers(1)
val consumer = transactionalConsumers(0)
consumer.subscribe(List(topic1, topic2).asJava)
producer1.initTransactions()
producer1.beginTransaction()
producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "1", "1", willBeCommitted = false))
producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, "3", "3", willBeCommitted = false))
producer1.abortTransaction()
producer2.initTransactions() // ok, will abort the open transaction.
producer2.beginTransaction()
producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "2", "4", willBeCommitted = true))
.get(20, TimeUnit.SECONDS)
producer2.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic2, "2", "4", willBeCommitted = true))
.get(20, TimeUnit.SECONDS)
try {
producer1.beginTransaction()
val result = producer1.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "1", "5", willBeCommitted = false))
val recordMetadata = result.get()
error(s"Missed a producer fenced exception when writing to ${recordMetadata.topic}-${recordMetadata.partition}. Grab the logs!!")
servers.foreach { server =>
error(s"log dirs: ${server.logManager.liveLogDirs.map(_.getAbsolutePath).head}")
}
fail("Should not be able to send messages from a fenced producer.")
} catch {
case _: ProducerFencedException =>
case e: ExecutionException =>
assertTrue(e.getCause.isInstanceOf[ProducerFencedException])
case e: Exception =>
fail("Got an unexpected exception from a fenced producer.", e)
}
producer2.commitTransaction() // ok
val records = consumeRecords(consumer, 2)
records.foreach { record =>
TestUtils.assertCommittedAndGetValue(record)
}
}
@Test
def testFencingOnTransactionExpiration(): Unit = {
val producer = createTransactionalProducer("expiringProducer", transactionTimeoutMs = 100)
producer.initTransactions()
producer.beginTransaction()
// The first message and hence the first AddPartitions request should be successfully sent.
val firstMessageResult = producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "1", "1", willBeCommitted = false)).get()
assertTrue(firstMessageResult.hasOffset)
// Wait for the expiration cycle to kick in.
Thread.sleep(600)
try {
// Now that the transaction has expired, the second send should fail with a ProducerFencedException.
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic1, "2", "2", willBeCommitted = false)).get()
fail("should have raised a ProducerFencedException since the transaction has expired")
} catch {
case _: ProducerFencedException =>
case e: ExecutionException =>
assertTrue(e.getCause.isInstanceOf[ProducerFencedException])
}
// Verify that the first message was aborted and the second one was never written at all.
val nonTransactionalConsumer = nonTransactionalConsumers.head
nonTransactionalConsumer.subscribe(List(topic1).asJava)
// Attempt to consume the one written record. We should not see the second. The
// assertion does not strictly guarantee that the record wasn't written, but the
// data is small enough that had it been written, it would have been in the first fetch.
val records = TestUtils.consumeRecords(nonTransactionalConsumer, numRecords = 1)
assertEquals(1, records.size)
assertEquals("1", TestUtils.recordValueAsString(records.head))
val transactionalConsumer = transactionalConsumers.head
transactionalConsumer.subscribe(List(topic1).asJava)
val transactionalRecords = TestUtils.consumeRecordsFor(transactionalConsumer, 1000)
assertTrue(transactionalRecords.isEmpty)
}
@Test
def testMultipleMarkersOneLeader(): Unit = {
val firstProducer = transactionalProducers.head
val consumer = transactionalConsumers.head
val unCommittedConsumer = nonTransactionalConsumers.head
val topicWith10Partitions = "largeTopic"
val topicWith10PartitionsAndOneReplica = "largeTopicOneReplica"
val topicConfig = new Properties()
topicConfig.put(KafkaConfig.MinInSyncReplicasProp, 2.toString)
createTopic(topicWith10Partitions, 10, numServers, topicConfig)
createTopic(topicWith10PartitionsAndOneReplica, 10, 1, new Properties())
firstProducer.initTransactions()
firstProducer.beginTransaction()
sendTransactionalMessagesWithValueRange(firstProducer, topicWith10Partitions, 0, 5000, willBeCommitted = false)
sendTransactionalMessagesWithValueRange(firstProducer, topicWith10PartitionsAndOneReplica, 5000, 10000, willBeCommitted = false)
firstProducer.abortTransaction()
firstProducer.beginTransaction()
sendTransactionalMessagesWithValueRange(firstProducer, topicWith10Partitions, 10000, 11000, willBeCommitted = true)
firstProducer.commitTransaction()
consumer.subscribe(List(topicWith10PartitionsAndOneReplica, topicWith10Partitions).asJava)
unCommittedConsumer.subscribe(List(topicWith10PartitionsAndOneReplica, topicWith10Partitions).asJava)
val records = consumeRecords(consumer, 1000)
records.foreach { record =>
TestUtils.assertCommittedAndGetValue(record)
}
val allRecords = consumeRecords(unCommittedConsumer, 11000)
val expectedValues = Range(0, 11000).map(_.toString).toSet
allRecords.foreach { record =>
assertTrue(expectedValues.contains(TestUtils.recordValueAsString(record)))
}
}
@Test(expected = classOf[KafkaException])
def testConsecutivelyRunInitTransactions(): Unit = {
val producer = createTransactionalProducer(transactionalId = "normalProducer")
producer.initTransactions()
producer.initTransactions()
fail("Should have raised a KafkaException")
}
@Test(expected = classOf[TimeoutException])
def testCommitTransactionTimeout(): Unit = {
val producer = createTransactionalProducer("transactionalProducer", maxBlockMs = 1000)
producer.initTransactions()
producer.beginTransaction()
producer.send(new ProducerRecord[Array[Byte], Array[Byte]](topic1, "foobar".getBytes))
for (i <- 0 until servers.size)
killBroker(i) // pretend all brokers not available
try {
producer.commitTransaction()
} finally {
producer.close(Duration.ZERO)
}
}
private def sendTransactionalMessagesWithValueRange(producer: KafkaProducer[Array[Byte], Array[Byte]], topic: String,
start: Int, end: Int, willBeCommitted: Boolean): Unit = {
for (i <- start until end) {
producer.send(TestUtils.producerRecordWithExpectedTransactionStatus(topic, i.toString, i.toString, willBeCommitted))
}
producer.flush()
}
private def serverProps() = {
val serverProps = new Properties()
serverProps.put(KafkaConfig.AutoCreateTopicsEnableProp, false.toString)
// Set a smaller value for the number of partitions for the __consumer_offsets topic
// so that the creation of that topic/partition(s) and subsequent leader assignment doesn't take relatively long
serverProps.put(KafkaConfig.OffsetsTopicPartitionsProp, 1.toString)
serverProps.put(KafkaConfig.TransactionsTopicPartitionsProp, 3.toString)
serverProps.put(KafkaConfig.TransactionsTopicReplicationFactorProp, 2.toString)
serverProps.put(KafkaConfig.TransactionsTopicMinISRProp, 2.toString)
serverProps.put(KafkaConfig.ControlledShutdownEnableProp, true.toString)
serverProps.put(KafkaConfig.UncleanLeaderElectionEnableProp, false.toString)
serverProps.put(KafkaConfig.AutoLeaderRebalanceEnableProp, false.toString)
serverProps.put(KafkaConfig.GroupInitialRebalanceDelayMsProp, "0")
serverProps.put(KafkaConfig.TransactionsAbortTimedOutTransactionCleanupIntervalMsProp, "200")
serverProps
}
private def createReadCommittedConsumer(group: String = "group",
maxPollRecords: Int = 500,
props: Properties = new Properties) = {
val consumer = TestUtils.createConsumer(TestUtils.getBrokerListStrFromServers(servers),
groupId = group,
enableAutoCommit = false,
readCommitted = true,
maxPollRecords = maxPollRecords)
transactionalConsumers += consumer
consumer
}
private def createReadUncommittedConsumer(group: String) = {
val consumer = TestUtils.createConsumer(TestUtils.getBrokerListStrFromServers(servers),
groupId = group,
enableAutoCommit = false)
nonTransactionalConsumers += consumer
consumer
}
private def createTransactionalProducer(transactionalId: String,
transactionTimeoutMs: Long = 60000,
maxBlockMs: Long = 60000): KafkaProducer[Array[Byte], Array[Byte]] = {
val producer = TestUtils.createTransactionalProducer(transactionalId, servers,
transactionTimeoutMs = transactionTimeoutMs,
maxBlockMs = maxBlockMs)
transactionalProducers += producer
producer
}
}
| noslowerdna/kafka | core/src/test/scala/integration/kafka/api/TransactionsTest.scala | Scala | apache-2.0 | 27,503 |
package com.github.opengrabeso.mixtio.facade
import org.scalajs.dom
import scala.scalajs.js
import scala.scalajs.js.annotation._
import mapboxgl_util._
import GeoJSON._
@JSGlobal
@js.native
object mapboxgl extends js.Any {
var accessToken: String = js.native
@js.native // https://github.com/mapbox/mapbox-gl-js/blob/master/flow-typed/point-geometry.js
class Point(val x: Double, val y: Double) extends js.Object {
}
@js.native // https://github.com/mapbox/mapbox-gl-js/blob/master/src/util/evented.js
class Event(val `type`: String, data: js.Object = js.native) extends js.Object {
}
@js.native // https://github.com/mapbox/mapbox-gl-js/blob/master/src/ui/events.js
class MapMouseEvent(`type`: String, data: js.Object = js.native) extends Event(`type`, data) {
val target: Map = js.native
val originalEvent: dom.MouseEvent = js.native
val point: Point = js.native
val lngLat: LngLat = js.native
}
@js.native // https://github.com/mapbox/mapbox-gl-js/blob/master/src/util/evented.js
class Evented extends js.Object {
def on(event: String, callback: js.Function1[Event, Unit]): Unit = js.native
def off(event: String, callback: js.Function1[Event, Unit]): Unit = js.native
}
@js.native // https://github.com/mapbox/mapbox-gl-js/blob/master/src/ui/map.js
class Map(options: js.Object) extends Evented {
def queryRenderedFeatures(geometry: Point, options: js.Object): js.Array[Feature] = js.native
def queryRenderedFeatures(geometry: js.Array[Point], options: js.Object): js.Array[Feature] = js.native
def queryRenderedFeatures(options: js.Object = js.native): js.Array[Feature] = js.native
def addSource(name: String, content: js.Object): Unit = js.native
def addLayer(layer: js.Object): Unit = js.native
def getContainer(): dom.Element = js.native
def getSource(name: String): js.UndefOr[js.Dynamic] = js.native
def getBounds(): LngLatBounds = js.native
def getCanvas(): dom.raw.HTMLCanvasElement = js.native
def setPaintProperty(name1: String, name2: String, value: js.Any): Unit = js.native
def setLayoutProperty(name1: String, name2: String, value: js.Any): Unit = js.native
def fitBounds(bounds: LngLatBounds, options: js.Object = js.native, eventData: js.Object = js.native): Unit = js.native
def unproject(p: Point): LngLat = js.native
}
@js.native // https://github.com/mapbox/mapbox-gl-js/blob/master/src/ui/popup.js
class Popup extends js.Object {
def remove(): Unit = js.native
def setLngLat(lngLat: LngLat): Popup = js.native
def setHTML(html: String): Popup = js.native
def setText(html: String): Popup = js.native
def setDOMContent(node: dom.Node): Popup = js.native
def addTo(map: Map): Popup = js.native
}
}
| OndrejSpanel/Stravamat | frontend/src/main/scala/com/github/opengrabeso/mixtio/facade/mapboxgl.scala | Scala | gpl-2.0 | 2,774 |
package org.allenai.common
import org.allenai.common.testkit.UnitSpec
import spray.json._
import java.io.FileInputStream
import java.io.FileOutputStream
import java.io.ObjectInputStream
import java.io.ObjectOutputStream
import java.nio.file.Files
class EnumSpec extends UnitSpec {
"all" should "return all registerd Enum's" in {
assert(FakeEnum.all.size === 3)
assert(FakeEnum.all.toSet === Set(FakeEnum.Value1, FakeEnum.Value2, FakeEnum.Value3))
}
"withId" should "retrieve correct Enum" in {
assert(FakeEnum.withId("Value1") === FakeEnum.Value1)
assert(FakeEnum.withId("Value2") === FakeEnum.Value2)
assert(FakeEnum.withId("Value3") === FakeEnum.Value3)
}
it should "throw NoSuchElementException" in {
intercept[NoSuchElementException] {
FakeEnum.withId("foo")
}
}
"toString" should "act like builtin Enumeration" in {
assert(FakeEnum.Value1.toString === "Value1")
}
"JSON serialization" should "work" in {
FakeEnum.all foreach { enum =>
val js = enum.toJson
assert(js.convertTo[FakeEnum] eq enum)
}
}
"Java serialization" should "work with no constructor argument" in {
FakeEnum.all foreach { enum =>
val tmp = Files.createTempFile(enum.id, "dat")
val tmpFile = tmp.toFile()
tmpFile.deleteOnExit()
Resource.using(new ObjectOutputStream(new FileOutputStream(tmpFile))) { os =>
os.writeObject(enum)
}
val obj = Resource.using(new ObjectInputStream(new FileInputStream(tmpFile))) { is =>
is.readObject()
}
obj should equal(enum)
tmpFile.delete()
}
}
it should "work with a constructor argument" in {
FakeEnumWithId.all foreach { enum =>
val tmp = Files.createTempFile(enum.id, "dat")
val tmpFile = tmp.toFile()
tmpFile.deleteOnExit()
Resource.using(new ObjectOutputStream(new FileOutputStream(tmpFile))) { os =>
os.writeObject(enum)
}
val obj = Resource.using(new ObjectInputStream(new FileInputStream(tmpFile))) { is =>
is.readObject()
}
obj should equal(enum)
tmpFile.delete()
}
}
}
// Test enum. Must be defined outside of spec otherwise serialization tests will
// fail due to scalatest WordSpec not being serializable.
sealed abstract class FakeEnum extends Enum[FakeEnum]
object FakeEnum extends EnumCompanion[FakeEnum] {
case object Value1 extends FakeEnum
case object Value2 extends FakeEnum
case object Value3 extends FakeEnum
register(Value1, Value2, Value3)
}
// Test enum. Must be defined outside of spec otherwise serialization tests will
// fail due to scalatest WordSpec not being serializable.
sealed abstract class FakeEnumWithId(override val id: String) extends Enum[FakeEnumWithId]
object FakeEnumWithId extends EnumCompanion[FakeEnumWithId] {
case object Value1 extends FakeEnumWithId("one")
case object Value2 extends FakeEnumWithId("two")
case object Value3 extends FakeEnumWithId("three")
register(Value1, Value2, Value3)
}
| allenai/common | core/src/test/scala/org/allenai/common/EnumSpec.scala | Scala | apache-2.0 | 3,018 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.codegen.over
import org.apache.flink.table.api.TableConfig
import org.apache.flink.table.planner.codegen.CodeGenUtils.{ROW_DATA, newName}
import org.apache.flink.table.planner.codegen.Indenter.toISC
import org.apache.flink.table.planner.codegen.{CodeGenUtils, CodeGeneratorContext, GenerateUtils}
import org.apache.flink.table.planner.plan.nodes.exec.spec.SortSpec
import org.apache.flink.table.runtime.generated.{GeneratedRecordComparator, RecordComparator}
import org.apache.flink.table.types.logical.RowType
/**
* RANGE allow the compound ORDER BY and the random type when the bound is current row.
*/
class MultiFieldRangeBoundComparatorCodeGenerator(
tableConfig: TableConfig,
inputType: RowType,
sortSpec: SortSpec,
isLowerBound: Boolean = true) {
def generateBoundComparator(name: String): GeneratedRecordComparator = {
val className = newName(name)
val input = CodeGenUtils.DEFAULT_INPUT1_TERM
val current = CodeGenUtils.DEFAULT_INPUT2_TERM
// In order to avoid the loss of precision in long cast to int.
def generateReturnCode(comp: String): String = {
if (isLowerBound) s"return $comp >= 0 ? 1 : -1;" else s"return $comp > 0 ? 1 : -1;"
}
val ctx = CodeGeneratorContext(tableConfig)
val compareCode = GenerateUtils.generateRowCompare(ctx, inputType, sortSpec, input, current)
val code =
j"""
public class $className implements ${classOf[RecordComparator].getCanonicalName} {
private final Object[] references;
${ctx.reuseMemberCode()}
public $className(Object[] references) {
this.references = references;
${ctx.reuseInitCode()}
${ctx.reuseOpenCode()}
}
@Override
public int compare($ROW_DATA $input, $ROW_DATA $current) {
int ret = compareInternal($input, $current);
${generateReturnCode("ret")}
}
private int compareInternal($ROW_DATA $input, $ROW_DATA $current) {
$compareCode
return 0;
}
}
""".stripMargin
new GeneratedRecordComparator(
className, code, ctx.references.toArray, ctx.tableConfig.getConfiguration)
}
}
| apache/flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/planner/codegen/over/MultiFieldRangeBoundComparatorCodeGenerator.scala | Scala | apache-2.0 | 3,030 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.runtime
import java.io.InputStream
import org.apache.flink.api.common.ExecutionConfig
import org.apache.flink.api.common.typeutils.{TypeSerializer, TypeSerializerSerializationUtil, TypeSerializerSnapshot}
import org.apache.flink.api.java.typeutils.runtime.TupleSerializerConfigSnapshot
import org.apache.flink.api.scala.createTypeInformation
import org.apache.flink.api.scala.runtime.TupleSerializerCompatibilityTestGenerator._
import org.apache.flink.api.scala.typeutils.CaseClassSerializer
import org.apache.flink.core.memory.DataInputViewStreamWrapper
import org.junit.Assert.{assertEquals, assertNotNull, assertTrue}
import org.junit.Test
/**
* Test for ensuring backwards compatibility of tuples and case classes across Scala versions.
*/
class TupleSerializerCompatibilityTest {
@Test
def testCompatibilityWithFlink_1_3(): Unit = {
var is: InputStream = null
try {
is = getClass.getClassLoader.getResourceAsStream(SNAPSHOT_RESOURCE)
val snapshotIn = new DataInputViewStreamWrapper(is)
val deserialized = TypeSerializerSerializationUtil.readSerializersAndConfigsWithResilience(
snapshotIn,
getClass.getClassLoader)
assertEquals(1, deserialized.size)
val oldSerializer: TypeSerializer[TestCaseClass] =
deserialized.get(0).f0.asInstanceOf[TypeSerializer[TestCaseClass]]
val oldConfigSnapshot: TypeSerializerSnapshot[TestCaseClass] =
deserialized.get(0).f1.asInstanceOf[TypeSerializerSnapshot[TestCaseClass]]
// test serializer and config snapshot
assertNotNull(oldSerializer)
assertNotNull(oldConfigSnapshot)
assertTrue(oldSerializer.isInstanceOf[CaseClassSerializer[_]])
assertTrue(oldConfigSnapshot.isInstanceOf[TupleSerializerConfigSnapshot[_]])
assertTrue(oldConfigSnapshot.isInstanceOf[TupleSerializerConfigSnapshot[_]])
val currentSerializer = createTypeInformation[TestCaseClass]
.createSerializer(new ExecutionConfig())
assertTrue(oldConfigSnapshot
.resolveSchemaCompatibility(currentSerializer)
.isCompatibleAsIs)
// test old data serialization
is.close()
is = getClass.getClassLoader.getResourceAsStream(DATA_RESOURCE)
var dataIn = new DataInputViewStreamWrapper(is)
assertEquals(TEST_DATA_1, oldSerializer.deserialize(dataIn))
assertEquals(TEST_DATA_2, oldSerializer.deserialize(dataIn))
assertEquals(TEST_DATA_3, oldSerializer.deserialize(dataIn))
// test new data serialization
is.close()
is = getClass.getClassLoader.getResourceAsStream(DATA_RESOURCE)
dataIn = new DataInputViewStreamWrapper(is)
assertEquals(TEST_DATA_1, currentSerializer.deserialize(dataIn))
assertEquals(TEST_DATA_2, currentSerializer.deserialize(dataIn))
assertEquals(TEST_DATA_3, currentSerializer.deserialize(dataIn))
} finally {
if (is != null) {
is.close()
}
}
}
}
| hequn8128/flink | flink-scala/src/test/scala/org/apache/flink/api/scala/runtime/TupleSerializerCompatibilityTest.scala | Scala | apache-2.0 | 3,783 |
import org.clapper.classutil.ClassFinder
import java.io.File
import com.factor10.plugins._
object HosturApplication {
def main(args: Array[String]): Unit = {
println("HosturApplication")
val classpath = args.map(new File(_))
val finder = ClassFinder(classpath)
val classes = finder.getClasses // classes is an Stream[ClassInfo]
val classMap = ClassFinder.classInfoMap(classes.toIterator) // runs stream out, once
val plugins = ClassFinder.concreteSubclasses("com.factor10.plugins.FormattingPlugin", classMap)
val data = Map("some" -> "field")
plugins.foreach { pluginString =>
val plugin: FormattingPlugin = Class.forName(pluginString.name).newInstance().asInstanceOf[FormattingPlugin]
println(s"${plugin.name} (${pluginString.name}): ${plugin.convert(data)}")
}
}
} | marhel/splug | hostur/src/main/scala/Application.scala | Scala | mit | 798 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.hadoopcompatibility.scala
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.scala.hadoop.{mapred, mapreduce}
import org.apache.hadoop.fs.{Path => HadoopPath}
import org.apache.hadoop.mapred.{JobConf, FileInputFormat => MapredFileInputFormat, InputFormat => MapredInputFormat}
import org.apache.hadoop.mapreduce.lib.input.{FileInputFormat => MapreduceFileInputFormat}
import org.apache.hadoop.mapreduce.{Job, InputFormat => MapreduceInputFormat}
/**
* HadoopInputs is a utility class to use Apache Hadoop InputFormats with Apache Flink.
*
* It provides methods to create Flink InputFormat wrappers for Hadoop
* [[org.apache.hadoop.mapred.InputFormat]] and [[org.apache.hadoop.mapreduce.InputFormat]].
*
* Key value pairs produced by the Hadoop InputFormats are converted into [[Tuple2]] where
* the first field is the key and the second field is the value.
*
*/
object HadoopInputs {
/**
* Creates a Flink [[org.apache.flink.api.common.io.InputFormat]] that wraps the given Hadoop
* [[org.apache.hadoop.mapred.FileInputFormat]].
*/
def readHadoopFile[K, V](
mapredInputFormat: MapredFileInputFormat[K, V],
key: Class[K],
value: Class[V],
inputPath: String,
job: JobConf)(implicit tpe: TypeInformation[(K, V)]): mapred.HadoopInputFormat[K, V] = {
// set input path in JobConf
MapredFileInputFormat.addInputPath(job, new HadoopPath(inputPath))
// wrap mapredInputFormat
createHadoopInput(mapredInputFormat, key, value, job)
}
/**
* Creates a Flink [[org.apache.flink.api.common.io.InputFormat]] that wraps the given Hadoop
* [[org.apache.hadoop.mapred.FileInputFormat]].
*/
def readHadoopFile[K, V](
mapredInputFormat: MapredFileInputFormat[K, V],
key: Class[K],
value: Class[V],
inputPath: String)(implicit tpe: TypeInformation[(K, V)]): mapred.HadoopInputFormat[K, V] = {
readHadoopFile(mapredInputFormat, key, value, inputPath, new JobConf)
}
/**
* Creates a Flink [[org.apache.flink.api.common.io.InputFormat]] that reads a Hadoop sequence
* file with the given key and value classes.
*/
def readSequenceFile[K, V](
key: Class[K],
value: Class[V],
inputPath: String)(implicit tpe: TypeInformation[(K, V)]): mapred.HadoopInputFormat[K, V] = {
readHadoopFile(
new org.apache.hadoop.mapred.SequenceFileInputFormat[K, V],
key,
value,
inputPath
)
}
/**
* Creates a Flink [[org.apache.flink.api.common.io.InputFormat]] that wraps the given Hadoop
* [[org.apache.hadoop.mapred.InputFormat]].
*/
def createHadoopInput[K, V](
mapredInputFormat: MapredInputFormat[K, V],
key: Class[K],
value: Class[V],
job: JobConf)(implicit tpe: TypeInformation[(K, V)]): mapred.HadoopInputFormat[K, V] = {
new mapred.HadoopInputFormat[K, V](mapredInputFormat, key, value, job)
}
/**
* Creates a Flink [[org.apache.flink.api.common.io.InputFormat]] that wraps the given Hadoop
* [[org.apache.hadoop.mapreduce.lib.input.FileInputFormat]].
*/
def readHadoopFile[K, V](
mapreduceInputFormat: MapreduceFileInputFormat[K, V],
key: Class[K],
value: Class[V],
inputPath: String,
job: Job)(implicit tpe: TypeInformation[(K, V)]): mapreduce.HadoopInputFormat[K, V] = {
// set input path in Job
MapreduceFileInputFormat.addInputPath(job, new HadoopPath(inputPath))
// wrap mapreduceInputFormat
createHadoopInput(mapreduceInputFormat, key, value, job)
}
/**
* Creates a Flink [[org.apache.flink.api.common.io.InputFormat]] that wraps the given Hadoop
* [[org.apache.hadoop.mapreduce.lib.input.FileInputFormat]].
*/
def readHadoopFile[K, V](
mapreduceInputFormat: MapreduceFileInputFormat[K, V],
key: Class[K],
value: Class[V],
inputPath: String)(implicit tpe: TypeInformation[(K, V)]): mapreduce.HadoopInputFormat[K, V] =
{
readHadoopFile(mapreduceInputFormat, key, value, inputPath, Job.getInstance)
}
/**
* Creates a Flink [[org.apache.flink.api.common.io.InputFormat]] that wraps the given Hadoop
* [[org.apache.hadoop.mapreduce.InputFormat]].
*/
def createHadoopInput[K, V](
mapreduceInputFormat: MapreduceInputFormat[K, V],
key: Class[K],
value: Class[V],
job: Job)(implicit tpe: TypeInformation[(K, V)]): mapreduce.HadoopInputFormat[K, V] = {
new mapreduce.HadoopInputFormat[K, V](mapreduceInputFormat, key, value, job)
}
}
| jinglining/flink | flink-connectors/flink-hadoop-compatibility/src/main/scala/org/apache/flink/hadoopcompatibility/scala/HadoopInputs.scala | Scala | apache-2.0 | 5,380 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.keras.nn
import com.intel.analytics.bigdl.keras.KerasBaseSpec
import com.intel.analytics.bigdl.nn.keras.{Sequential => KSequential}
import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.nn.keras.Dense
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.bigdl.utils.serializer.ModuleSerializationTest
import scala.util.Random
class DenseSpec extends KerasBaseSpec {
def weightConverter(in: Array[Tensor[Float]]): Array[Tensor[Float]] = Array(in(0).t(), in(1))
"Dense" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[3])
|input = np.random.uniform(0, 1, [1, 3])
|output_tensor = Dense(2, activation="relu")(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = KSequential[Float]()
val dense = Dense[Float](2, activation = "relu", inputShape = Shape(3))
seq.add(dense)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 2))
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode, weightConverter)
}
"Dense nD input" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[10, 5, 7])
|input = np.random.uniform(0, 1, [2, 10, 5, 7])
|output_tensor = \\
|Dense(2, init='one', input_shape=(10, 5, 7))(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = KSequential[Float]()
val dense = Dense[Float](2, init = "one", inputShape = Shape(10, 5, 7))
seq.add(dense)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 10, 5, 2))
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode, weightConverter, precision = 1e-4)
}
}
class DenseSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val dense = Dense[Float](10, inputShape = Shape(20))
dense.build(Shape(2, 20))
val input = Tensor[Float](2, 20).apply1(_ => Random.nextFloat())
runSerializationTest(dense, input)
}
}
| wzhongyuan/BigDL | spark/dl/src/test/scala/com/intel/analytics/bigdl/keras/nn/DenseSpec.scala | Scala | apache-2.0 | 2,881 |
package org.oc.ld32.entity.ai
import org.oc.ld32.entity.EntityEnemy
object Tasks {
def createFromID(entity: EntityEnemy, id: String, priority: Int = 1): AITask = {
if(id.equals("patrol")) {
new AIPatrol(priority, entity)
} else if(id.equals("wander")) {
new AIWander(priority, entity)
} else {
null
}
}
}
abstract class AITask(val priority: Int, val owner: EntityEnemy) {
var running = false
def start = running = true
def stop = running = false
def shouldContinue: Boolean
def canExecute: Boolean
def reset: Unit
def perform(delta: Float): Unit
}
| OurCraft/LD32 | src/main/scala/org/oc/ld32/entity/ai/AITask.scala | Scala | apache-2.0 | 628 |
package com.sksamuel.scapegoat.inspections
import com.sksamuel.scapegoat.PluginRunner
import org.scalatest.{FreeSpec, Matchers}
/** @author Stephen Samuel */
class EmptyMethodTest extends FreeSpec with ASTSugar with Matchers with PluginRunner {
override val inspections = Seq(new EmptyMethod)
"empty empty" - {
"should report warning" in {
val code = """object Test {
def foo = { }
def foo2 = true
def foo3 = {
()
}
def foo4 = {
"sammy"
()
}
} """.stripMargin
compileCodeSnippet(code)
compiler.scapegoat.reporter.warnings.size shouldBe 2
}
}
}
| RichardBradley/scapegoat | src/test/scala/com/sksamuel/scapegoat/inspections/EmptyMethodTest.scala | Scala | apache-2.0 | 806 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mongo.channel.test.join
import java.util.concurrent.{ TimeUnit, CountDownLatch }
import java.util.concurrent.atomic.AtomicLong
import com.mongodb.{ BasicDBObject, DBObject }
import mongo.channel.test.mongo.{ MongoDbEnviroment, MongoIntegrationEnv }
import org.scalatest.concurrent.ScalaFutures
import org.specs2.mutable.Specification
import rx.lang.scala.Subscriber
import rx.lang.scala.schedulers.ExecutionContextScheduler
import scala.collection.mutable
import scala.concurrent.ExecutionContext
import scalaz.concurrent.Task
import scalaz.stream.Process._
import scalaz.stream.io
class JoinMongoSpec extends Specification with ScalaFutures {
import MongoIntegrationEnv._
import join.Join
import mongo._
import dsl.mongo._
import join.mongo.{ MongoProcess, MongoObservable, MongoObsCursorError, MongoObsFetchError }
val pageSize = 7
val qLang = for {
_ ← "index" $gte 0 $lte 5
_ ← sort("index" → Order.Ascending)
q ← limit(6)
} yield q
def count = new CountDownLatch(1)
def responses = new AtomicLong(0)
"Join with MongoProcess" in new MongoDbEnviroment {
initMongo
type Module = MongoProcess
def qProg(outer: Module#Record) = for { q ← "lang" $eq outer.get("index").asInstanceOf[Int] } yield q
val cmd: (Module#Record, Module#Record) ⇒ String =
(outer, inner) ⇒
s"PK:${outer.get("index")} - [FK:${inner.get("lang")} - ${inner.get("name")}]"
val buffer = mutable.Buffer.empty[String]
val SinkBuffer = io.fillBuffer(buffer)
implicit val c = client
val join = (Join[Module] inner (qLang, LANGS, qProg(_), PROGRAMMERS, TEST_DB))(cmd)
(for {
joinLine ← eval(Task.now(client.getDB(TEST_DB))) through join.source
_ ← joinLine to SinkBuffer
} yield ())
.onFailure { ex ⇒ logger.debug(s"MongoProcess has been completed with error: ${ex.getMessage}"); halt }
.onComplete(eval_(Task.delay { c.close }))
.runLog.run
buffer.size === MongoIntegrationEnv.programmersSize
}
"Join with MongoObservable" in new MongoDbEnviroment {
initMongo
type Module = MongoObservable
def qProg(outer: Module#Record) = for { q ← "lang" $eq outer.get("index").asInstanceOf[Int] } yield q
val cmb: (Module#Record, Module#Record) ⇒ String =
(outer, inner) ⇒
s"PK:${outer.get("index")} - [FK:${inner.get("lang")} - ${inner.get("name")}]"
val c0 = count
val res = responses
implicit val c = client
val join = (Join[Module] inner (qLang, LANGS, qProg(_), PROGRAMMERS, TEST_DB))(cmb)
val S = new Subscriber[String] {
override def onStart() = request(pageSize)
override def onNext(n: String) = {
logger.info(s"onNext: $n")
if (res.getAndIncrement % pageSize == 0) {
logger.info(s"★ ★ ★ Fetched page:[$pageSize] ★ ★ ★ ")
request(pageSize)
}
}
override def onError(e: Throwable) = {
logger.info(s"★ ★ ★ MongoObservable has been completed with error: ${e.getMessage}")
c0.countDown()
}
override def onCompleted() = {
logger.info("★ ★ ★ MongoObservable has been completed")
c0.countDown()
c.close()
}
}
join
.observeOn(ExecutionContextScheduler(ExecutionContext.fromExecutor(executor)))
.subscribe(S)
c0.await()
res.get === MongoIntegrationEnv.programmersSize
}
"Run Mongo Observable with MongoObsCursorError error" in new MongoDbEnviroment {
initMongo
type Module = MongoObsCursorError
val qLang = for { q ← "index" $gte 0 $lte 5 } yield q
def qProg(outer: Module#Record) = for { q ← "lang" $eq outer.get("index").asInstanceOf[Int] } yield q
val cmd: (Module#Record, Module#Record) ⇒ String =
(outer, inner) ⇒
s"PK:${outer.get("index")} - [FK:${inner.get("lang")} - ${inner.get("name")}]"
val c0 = count
val res = responses
implicit val c = client
val join = (Join[Module] inner (qLang, LANGS, qProg(_), PROGRAMMERS, TEST_DB))(cmd)
val S = new Subscriber[String] {
override def onStart() = request(pageSize)
override def onNext(n: String) = {
logger.info(s"onNext: $n")
if (responses.getAndIncrement() % pageSize == 0) {
request(pageSize)
}
}
override def onError(e: Throwable) = {
logger.info(s"★ ★ ★ MongoObsCursorError has been completed with error: ${e.getMessage}")
c0.countDown()
c.close()
}
override def onCompleted() = {
c.close()
logger.info("★ ★ ★ MongoObsCursorError has been completed")
}
}
join
.observeOn(ExecutionContextScheduler(ExecutionContext.fromExecutor(executor)))
.subscribe(S)
c0.await(5, TimeUnit.SECONDS) mustEqual true
}
"Run Mongo Observable with MongoObsFetchError" in new MongoDbEnviroment {
initMongo
type Module = MongoObsFetchError
val qLang = for { q ← "index" $gte 0 $lte 5 } yield q
def qProg(outer: Module#Record) = for { q ← "lang" $eq outer.get("index").asInstanceOf[Int] } yield q
val cmd: (Module#Record, Module#Record) ⇒ String =
(outer, inner) ⇒
s"PK:${outer.get("index")} - [FK:${inner.get("lang")} - ${inner.get("name")}]"
val c0 = count
val res = responses
implicit val c = client
val query = (Join[Module] inner (qLang, LANGS, qProg(_), PROGRAMMERS, TEST_DB))(cmd)
val S = new Subscriber[String] {
override def onStart() = request(pageSize)
override def onNext(n: String) = {
logger.info(s"onNext: $n")
if (responses.getAndIncrement() % pageSize == 0) {
request(pageSize)
}
}
override def onError(e: Throwable) = {
logger.info(s"★ ★ ★ MongoObsFetchError has been completed with error: ${e.getMessage}")
c0.countDown()
c.close()
}
override def onCompleted() = {
c.close()
logger.info("★ ★ ★ MongoObsFetchError has been completed")
}
}
query
.observeOn(ExecutionContextScheduler(ExecutionContext.fromExecutor(executor)))
.subscribe(S)
c0.await(5, TimeUnit.SECONDS) mustEqual true
}
"Convert to case class" in {
import dbtypes._
case class DbRecord(persistence_id: String, partition_nr: Long, sequence_nr: Long)
val x: DBObject = new BasicDBObject()
.append("persistence_id", "key-a")
.append("partition_nr", 1l)
.append("sequence_nr", 100l)
val out = x.as[DbRecord]
println(out)
true mustEqual true
}
} | haghard/nosql-join-stream | src/test/scala/mongo/channel/test/join/JoinMongoSpec.scala | Scala | apache-2.0 | 7,197 |
package com.twitter.finagle.serverset2.client
import java.util.concurrent.LinkedBlockingDeque
import com.twitter.util.{Monitor, Updatable}
private[client] object EventDeliveryThread
extends Thread("com.twitter.zookeeper.client.internal event delivery") {
private val q = new LinkedBlockingDeque[(Updatable[WatchState], WatchState)]
def offer(u: Updatable[WatchState], s: WatchState) {
q.offer((u, s))
}
override def run() {
while (true) {
val (u, s) = q.take()
try {
u() = s
} catch {
case exc: Throwable => Monitor.handle(exc)
}
}
}
setDaemon(true)
start()
}
| mkhq/finagle | finagle-serversets/src/main/scala/com/twitter/finagle/serverset2/client/EventDeliveryThread.scala | Scala | apache-2.0 | 635 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.tools
import joptsimple.OptionParser
import org.apache.kafka.common.security._
import kafka.utils.{CommandLineUtils, Exit, Logging, ZKGroupTopicDirs, ZkUtils}
@deprecated("This class has been deprecated and will be removed in a future release.", "0.11.0.0")
object VerifyConsumerRebalance extends Logging {
def main(args: Array[String]) {
val parser = new OptionParser(false)
warn("WARNING: VerifyConsumerRebalance is deprecated and will be dropped in a future release following 0.11.0.0.")
val zkConnectOpt = parser.accepts("zookeeper.connect", "ZooKeeper connect string.").
withRequiredArg().defaultsTo("localhost:2181").ofType(classOf[String])
val groupOpt = parser.accepts("group", "Consumer group.").
withRequiredArg().ofType(classOf[String])
parser.accepts("help", "Print this message.")
if(args.length == 0)
CommandLineUtils.printUsageAndDie(parser, "Validate that all partitions have a consumer for a given consumer group.")
val options = parser.parse(args : _*)
if (options.has("help")) {
parser.printHelpOn(System.out)
Exit.exit(0)
}
CommandLineUtils.checkRequiredArgs(parser, options, groupOpt)
val zkConnect = options.valueOf(zkConnectOpt)
val group = options.valueOf(groupOpt)
var zkUtils: ZkUtils = null
try {
zkUtils = ZkUtils(zkConnect,
30000,
30000,
JaasUtils.isZkSecurityEnabled())
debug("zkConnect = %s; group = %s".format(zkConnect, group))
// check if the rebalancing operation succeeded.
try {
if(validateRebalancingOperation(zkUtils, group))
println("Rebalance operation successful !")
else
println("Rebalance operation failed !")
} catch {
case e2: Throwable => error("Error while verifying current rebalancing operation", e2)
}
}
finally {
if (zkUtils != null)
zkUtils.close()
}
}
private def validateRebalancingOperation(zkUtils: ZkUtils, group: String): Boolean = {
info("Verifying rebalancing operation for consumer group " + group)
var rebalanceSucceeded: Boolean = true
/**
* A successful rebalancing operation would select an owner for each available partition
* This means that for each partition registered under /brokers/topics/[topic]/[broker-id], an owner exists
* under /consumers/[consumer_group]/owners/[topic]/[broker_id-partition_id]
*/
val consumersPerTopicMap = zkUtils.getConsumersPerTopic(group, excludeInternalTopics = false)
val partitionsPerTopicMap = zkUtils.getPartitionsForTopics(consumersPerTopicMap.keySet.toSeq)
partitionsPerTopicMap.foreach { case (topic, partitions) =>
val topicDirs = new ZKGroupTopicDirs(group, topic)
info("Alive partitions for topic %s are %s ".format(topic, partitions.toString))
info("Alive consumers for topic %s => %s ".format(topic, consumersPerTopicMap.get(topic)))
val partitionsWithOwners = zkUtils.getChildrenParentMayNotExist(topicDirs.consumerOwnerDir)
if(partitionsWithOwners.isEmpty) {
error("No owners for any partitions for topic " + topic)
rebalanceSucceeded = false
}
debug("Children of " + topicDirs.consumerOwnerDir + " = " + partitionsWithOwners.toString)
val consumerIdsForTopic = consumersPerTopicMap.get(topic)
// for each available partition for topic, check if an owner exists
partitions.foreach { partition =>
// check if there is a node for [partition]
if(!partitionsWithOwners.contains(partition.toString)) {
error("No owner for partition [%s,%d]".format(topic, partition))
rebalanceSucceeded = false
}
// try reading the partition owner path for see if a valid consumer id exists there
val partitionOwnerPath = topicDirs.consumerOwnerDir + "/" + partition
val partitionOwner = zkUtils.readDataMaybeNull(partitionOwnerPath)._1 match {
case Some(m) => m
case None => null
}
if(partitionOwner == null) {
error("No owner for partition [%s,%d]".format(topic, partition))
rebalanceSucceeded = false
}
else {
// check if the owner is a valid consumer id
consumerIdsForTopic match {
case Some(consumerIds) =>
if(!consumerIds.map(c => c.toString).contains(partitionOwner)) {
error(("Owner %s for partition [%s,%d] is not a valid member of consumer " +
"group %s").format(partitionOwner, topic, partition, group))
rebalanceSucceeded = false
}
else
info("Owner of partition [%s,%d] is %s".format(topic, partition, partitionOwner))
case None => {
error("No consumer ids registered for topic " + topic)
rebalanceSucceeded = false
}
}
}
}
}
rebalanceSucceeded
}
}
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/main/scala/kafka/tools/VerifyConsumerRebalance.scala | Scala | apache-2.0 | 5,850 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.physical.rdbms.model
import slamdata.Predef._
trait TypeMapper {
def map(ctpe: ColumnType): String
def comap(colTypeStr: String): ColumnType
}
object TypeMapper {
def apply(mapping: ColumnType => String, inverse: String => ColumnType): TypeMapper = new TypeMapper {
def map(ctpe: ColumnType): String = mapping(ctpe)
def comap(colTypeStr: String): ColumnType = inverse(colTypeStr)
}
} | jedesah/Quasar | rdbms/src/main/scala/quasar/physical/rdbms/model/TypeMapper.scala | Scala | apache-2.0 | 1,029 |
/* Copyright (c) 2016 Lucas Satabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package lingua
import scala.annotation.implicitNotFound
/** A proof that some input or output accepts epsilons and how to extract a non-epsilon value.
*
* @author Lucas Satabin
*/
@implicitNotFound("Could not prove that ${EpsIn} accepts epsilon values")
trait EpsilonProof[EpsIn, NoEpsIn] {
val Eps: EpsIn
def unapplyNoEps(in: EpsIn): Option[NoEpsIn]
def applyEps(in: NoEpsIn): EpsIn
}
object EpsilonProof {
implicit def OptionEpsilon[T]: EpsilonProof[Option[T], T] = new EpsilonProof[Option[T], T] {
val Eps = None
def unapplyNoEps(o: Option[T]) = o
def applyEps(t: T) = Some(t)
}
}
object NoEps {
@inline
def unapply[EpsIn, NoEpsIn](in: EpsIn)(implicit proof: EpsilonProof[EpsIn, NoEpsIn]): Option[NoEpsIn] =
proof.unapplyNoEps(in)
@inline
def apply[EpsIn, NoEpsIn](in: NoEpsIn)(implicit proof: EpsilonProof[EpsIn, NoEpsIn]): EpsIn =
proof.applyEps(in)
}
| satabin/lingua | fst/src/main/scala/lingua/EpsilonProof.scala | Scala | apache-2.0 | 1,508 |
package com.github.projectflink.common.als
import org.jblas.FloatMatrix
object ALSUtils {
def outerProduct(vector: FloatMatrix, matrix: FloatMatrix, factors: Int): Unit = {
val vd = vector.data
val md = matrix.data
var row = 0
var pos = 0
while(row < factors){
var col = 0
while(col <= row){
md(pos) = vd(row) * vd(col)
col += 1
pos += 1
}
row += 1
}
}
def outerProductInPlace(vector: FloatMatrix, matrix: FloatMatrix, factors: Int): Unit = {
val vd = vector.data
val md = matrix.data
var row = 0
var pos = 0
while(row < factors){
var col = 0
while(col <= row){
md(pos) += vd(row) * vd(col)
col += 1
pos += 1
}
row += 1
}
}
def generateFullMatrix(triangularMatrix: FloatMatrix, fmatrix: FloatMatrix, factors: Int): Unit
= {
var row = 0
var pos = 0
val fmd = fmatrix.data
val tmd = triangularMatrix.data
while(row < factors){
var col = 0
while(col < row){
fmd(row*factors + col) = tmd(pos)
fmd(col*factors + row) = tmd(pos)
pos += 1
col += 1
}
fmd(row*factors + row) = tmd(pos)
pos += 1
row += 1
}
}
}
| mxm/flink-perf | perf-common/src/main/scala/com/github/projectflink/common/als/ALSUtils.scala | Scala | apache-2.0 | 1,265 |
package org.http4s.headers
import cats.implicits._
import org.http4s.Uri
class RefererSpec extends HeaderLaws {
checkAll("Referer", headerLaws(`Retry-After`))
def getUri(uri: String): Uri =
Uri.fromString(uri).fold(_ => sys.error(s"Failure on uri: $uri"), identity)
"render" should {
"format an absolute url" in {
Referer(getUri("http://localhost:8080")).renderString must_== "Referer: http://localhost:8080"
}
"format a relative url" in {
Referer(getUri("../../index.html")).renderString must_== "Referer: ../../index.html"
}
}
"parse" should {
"accept absolute url" in {
Referer.parse("http://localhost:8080").map(_.uri) must beRight(getUri("http://localhost:8080"))
}
"accept relative url" in {
Referer.parse("../../index.html").map(_.uri) must beRight(getUri("../../index.html"))
}
}
}
| ZizhengTai/http4s | tests/src/test/scala/org/http4s/headers/RefererSpec.scala | Scala | apache-2.0 | 867 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras.metrics
import com.intel.analytics.bigdl.nn.abstractnn.Activity
import com.intel.analytics.bigdl.optim.{ValidationMethod, ValidationResult}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import scala.collection.mutable.ArrayBuffer
/**
* Represent an accuracy result. It's equal to the probability that a
* classifier will rank a randomly chosen positive instance higher
* than a randomly chosen negative one.
* Refer: https://en.wikipedia.org/wiki/Receiver_operating_characteristic
* @param tp True positive numbers
* @param fp False positive numbers
* @param po Positive numbers
* @param ne Negative numbers
*/
class AucScore(private val tp: Tensor[Float], private val fp: Tensor[Float],
private var po: Tensor[Float], private var ne: Tensor[Float])
extends ValidationResult {
require(fp.dim() == tp.dim(), "fp dimension should be the same with tp dimension")
require(po.dim() == ne.dim(), "positive value dimension should be the" +
"same with negative value dimension")
override def result(): (Float, Int) = {
(getScores.last, (po.valueAt(1) + ne.valueAt(1)).toInt)
}
// scalastyle:off methodName
override def +(other: ValidationResult): ValidationResult = {
val otherResult = other.asInstanceOf[AucScore]
this.fp.add(otherResult.fp)
this.tp.add(otherResult.tp)
this.po.add(otherResult.po)
this.ne.add(otherResult.ne)
this
}
// scalastyle:on methodName
override protected def format(): String = {
val scores = getScores
var str = s"(Average score: ${scores.last}, count: ${(po.valueAt(1) + ne.valueAt(1)).toInt})"
if (scores.length > 1) {
str += s"\\nscore for each class is:\\n"
scores.take(scores.length - 1).foreach(s => str += s"${s} \\n")
}
str
}
private def computeAUC(slicedTp: Tensor[Float], slicedFp: Tensor[Float],
slicedPo: Float, slicedNe: Float): Float = {
val epsilon = 1e-6.toFloat
val tpr = slicedTp.clone().add(epsilon).div(slicedPo + epsilon)
val fpr = slicedFp.clone().div(slicedNe + epsilon)
(tpr.narrow(1, 1, tpr.nElement() - 1) + tpr.narrow(1, 2, tpr.nElement() - 1)).dot(
(fpr.narrow(1, 1, fpr.nElement() - 1) - fpr.narrow(1, 2, fpr.nElement() - 1))) / 2
}
// return score, the first n element is auc for each class, the last element is the average auc
private def getScores: Array[Float] = {
val scores = new ArrayBuffer[Float]()
if (fp.dim() == 1) scores.append(computeAUC(tp, fp, po.valueAt(1), ne.valueAt(1)))
else {
val classNum = fp.size(2)
val weights = Tensor.ones[Float](classNum)
for(i <- 1 to classNum) {
scores.append(computeAUC(tp.select(2, i), fp.select(2, i), po.valueAt(i), ne.valueAt(i)))
}
val averageScore = Tensor(scores.toArray, Array(classNum)).dot(weights) / weights.sum()
scores.append(averageScore)
}
scores.toArray
}
override def equals(obj: Any): Boolean = {
if (obj == null) {
return false
}
if (!obj.isInstanceOf[AucScore]) {
return false
}
val other = obj.asInstanceOf[AucScore]
if (this.eq(other)) {
return true
}
this.tp.equals(other.tp) &&
this.fp.equals(other.fp) &&
this.po == other.po &&
this.ne == other.ne
}
override def hashCode(): Int = {
val seed = 37
var hash = 1
hash = hash * seed + this.po.sum().toInt
hash = hash * seed + this.ne.sum().toInt
hash = hash * seed + this.fp.sum().toInt
hash = hash * seed + this.tp.sum().toInt
hash
}
}
/**
* Area under ROC cure.
* Metric for binary(0/1) classification, support single label and multiple labels.
* @param thresholdNum The number of thresholds. The quality of approximation
* may vary depending on thresholdNum.
*/
class AUC[T](thresholdNum: Int = 200)(implicit ev: TensorNumeric[T])
extends ValidationMethod[T] {
override def apply(output: Activity, target: Activity):
ValidationResult = {
val _output = if (output.asInstanceOf[Tensor[T]].dim() == 2) {
output.asInstanceOf[Tensor[T]].clone().squeeze(2)
} else {
output.asInstanceOf[Tensor[T]].clone().squeeze()
}
val _target = if (target.asInstanceOf[Tensor[T]].dim() == 2) {
target.asInstanceOf[Tensor[T]].clone().squeeze(2)
} else {
target.asInstanceOf[Tensor[T]].clone().squeeze()
}
require(_output.dim() <= 2 && _target.dim() <= 2,
s"${_output.dim()} dim format is not supported")
require(_output.dim() == _target.dim(),
s"output dimension must be the same with target!!" +
s"out dimension is: ${_output.dim()}, target dimension is: ${_target.dim()}")
if (_output.dim() == 1) {
val (tp, fp, po, ne) = getRocCurve(_output, _target)
new AucScore(tp, fp, Tensor(Array[Float](po), Array(1)),
Tensor(Array[Float](ne), Array(1)))
} else {
val classNum = _output.size(2)
val _tp = Tensor[Float](thresholdNum, classNum)
val _fp = Tensor[Float](thresholdNum, classNum)
val _po = new Array[Float](classNum)
val _ne = new Array[Float](classNum)
for(i <- 1 to classNum) {
val _output_i = _output.select(2, i)
val _target_i = _target.select(2, i)
val res = getRocCurve(_output_i, _target_i)
_tp.select(2, i).copy(res._1)
_fp.select(2, i).copy(res._2)
_po(i - 1) = res._3
_ne(i - 1) = res._4
}
new AucScore(_tp, _fp, Tensor(_po, Array(classNum)), Tensor(_ne, Array(classNum)))
}
}
override def format(): String = {
"AucScore"
}
// get tp(true positive), fp(flase positive), positive, negative
private def getRocCurve(output: Tensor[T], target: Tensor[T]):
(Tensor[Float], Tensor[Float], Float, Float) = {
val thresholds = new Array[Float](thresholdNum)
val kepsilon = 1e-7.toFloat
thresholds(0) = 0 - kepsilon
thresholds(thresholdNum - 1) = 1 + kepsilon
for (i <- 1 until thresholdNum - 1) {
thresholds(i) = (i + 1) * 1.0f / (thresholdNum - 1)
}
val tp = new Array[Float](thresholdNum)
val fp = new Array[Float](thresholdNum)
var j = 0
while (j < thresholdNum) {
output.map(target, (a, b) => {
val fb = ev.toType[Float](b)
if (fb != 1 && fb != 0) {
throw new UnsupportedOperationException("Only support binary(0/1) target")
}
if (ev.isGreaterEq(a, ev.fromType[Float](thresholds(j)))) {
if (fb == 1) tp(j) += 1
else fp(j) += 1
}
a })
j += 1
}
(Tensor(tp, Array(thresholdNum)), Tensor(fp, Array(thresholdNum)),
ev.toType[Float](target.sum()),
ev.toType[Float](target.mul(ev.fromType(-1)).add(ev.fromType(1)).sum))
}
}
| intel-analytics/analytics-zoo | zoo/src/main/scala/com/intel/analytics/zoo/pipeline/api/keras/metrics/AUC.scala | Scala | apache-2.0 | 7,459 |
@author(AnnotatedCode.authorName)
@version(major=1,minor=0)
object AnnotatedCode {
final val authorName = "Grzegorz Balcerek"
private[this] val commentText = "needs to be a var"
@todo @fixme @comment(commentText) val num1 = 2
val num2 = num1*2 : @comment("is this value correct?")
def hello(@fixme("unnecessary parameter") x: Int = 3) = "hello!"
val size: Int @fixme("change to Long") = commentText.length
}
| grzegorzbalcerek/scala-book-examples | examples/AnnotatedCode.scala | Scala | mit | 421 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2015, Gary Keorkunian **
** **
\\* */
package squants.radio
import squants._
import squants.energy.{ErgsPerSecond, Watts}
import squants.space._
/**
* @author florianNussberger
* @since 0.6
*
* @param value Double
*/
final class SpectralIrradiance private (val value: Double, val unit: SpectralIrradianceUnit)
extends Quantity[SpectralIrradiance] {
def dimension = SpectralIrradiance
def toWattsPerCubicMeter = to(WattsPerCubicMeter)
def toWattsPerSquareMeterPerNanometer = to(WattsPerSquareMeterPerNanometer)
def toWattsPerSquareMeterPerMicron = to(WattsPerSquareMeterPerMicron)
def toErgsPerSecondPerSquareCentimeterPerAngstrom = to(ErgsPerSecondPerSquareCentimeterPerAngstrom)
}
object SpectralIrradiance extends Dimension[SpectralIrradiance] {
private[radio] def apply[A](n: A, unit: SpectralIrradianceUnit)(implicit num: Numeric[A]) = new SpectralIrradiance(num.toDouble(n), unit)
def apply(value: Any) = parse(value)
def name = "SpectralIrradiance"
def primaryUnit = WattsPerCubicMeter
def siUnit = WattsPerCubicMeter
def units = Set(WattsPerCubicMeter, WattsPerSquareMeterPerMicron, WattsPerSquareMeterPerNanometer, ErgsPerSecondPerSquareCentimeterPerAngstrom)
}
trait SpectralIrradianceUnit extends UnitOfMeasure[SpectralIrradiance] with UnitConverter {
def apply[A](n: A)(implicit num: Numeric[A]) = SpectralIrradiance(n, this)
}
object WattsPerCubicMeter extends SpectralIrradianceUnit with PrimaryUnit with SiUnit {
val symbol = Watts.symbol + "/" + CubicMeters.symbol
}
object WattsPerSquareMeterPerNanometer extends SpectralIrradianceUnit with SiUnit {
val conversionFactor = 1 / MetricSystem.Nano
val symbol = Watts.symbol + "/" + SquareMeters.symbol + "/" + Nanometers.symbol
}
object WattsPerSquareMeterPerMicron extends SpectralIrradianceUnit with SiUnit {
val conversionFactor = 1 / MetricSystem.Micro
val symbol = Watts.symbol + "/" + SquareMeters.symbol + "/" + Microns.symbol
}
object ErgsPerSecondPerSquareCentimeterPerAngstrom extends SpectralIrradianceUnit {
val conversionFactor = ErgsPerSecond.conversionFactor / SquareCentimeters.conversionFactor / Angstroms.conversionFactor
val symbol = ErgsPerSecond.symbol + "/" + SquareCentimeters.symbol + "/" + Angstroms.symbol
}
object SpectralIrradianceConversions {
lazy val wattPerCubicMeter = WattsPerCubicMeter(1)
lazy val wattPerSquareMeterPerNanometer = WattsPerSquareMeterPerNanometer(1)
lazy val wattPerSquareMeterPerMicron = WattsPerSquareMeterPerMicron(1)
lazy val ergPerSecondPerSquareCentimeterPerAngstrom = ErgsPerSecondPerSquareCentimeterPerAngstrom(1)
implicit class SpectralIrradianceConversions[A](n: A)(implicit num: Numeric[A]) {
def wattsPerCubicMeter = WattsPerCubicMeter(n)
def wattsPerSquareMeterPerNanometer = WattsPerSquareMeterPerNanometer(n)
def wattsPerSquareMeterPerMicron = WattsPerSquareMeterPerMicron(n)
def ergsPerSecondPerSquareCentimeterPerAngstrom = ErgsPerSecondPerSquareCentimeterPerAngstrom(n)
}
implicit object SpectralIrradianceNumeric extends AbstractQuantityNumeric[SpectralIrradiance](SpectralIrradiance.primaryUnit)
}
| garyKeorkunian/squants | shared/src/main/scala/squants/radio/SpectralIrradiance.scala | Scala | apache-2.0 | 3,614 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.benchmarks
import ai.h2o.sparkling.H2OFrame
import ai.h2o.sparkling.ml.models.H2OMOJOModel
import ai.h2o.sparkling.ml.utils.EstimatorCommonUtils
class TrainAlgorithmFromH2OFrameBenchmark(context: BenchmarkContext, algorithmBundle: AlgorithmBundle)
extends AlgorithmBenchmarkBase[H2OFrame, H2OFrame](context, algorithmBundle)
with EstimatorCommonUtils {
override protected def initialize(): H2OFrame = loadDataToH2OFrame()
override protected def convertInput(input: H2OFrame): H2OFrame = input
override protected def train(trainingFrame: H2OFrame): H2OMOJOModel = {
val (name, params) = algorithmBundle.h2oAlgorithm
val newParams = params ++ Map(
"training_frame" -> trainingFrame.frameId,
"response_column" -> context.datasetDetails.labelCol)
trainAndGetMOJOModel(s"/3/ModelBuilders/$name", newParams)
}
override protected def cleanUp(frame: H2OFrame, output: H2OMOJOModel): Unit = frame.delete()
}
| h2oai/sparkling-water | benchmarks/src/main/scala/ai/h2o/sparkling/benchmarks/TrainAlgorithmFromH2OFrameBenchmark.scala | Scala | apache-2.0 | 1,770 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.network
import java.io.IOException
import java.net._
import java.nio.channels._
import java.nio.channels.{Selector => NSelector}
import java.util.concurrent._
import java.util.concurrent.atomic._
import com.yammer.metrics.core.Gauge
import kafka.cluster.{BrokerEndPoint, EndPoint}
import kafka.common.KafkaException
import kafka.metrics.KafkaMetricsGroup
import kafka.security.CredentialProvider
import kafka.server.KafkaConfig
import kafka.utils._
import org.apache.kafka.common.errors.InvalidRequestException
import org.apache.kafka.common.metrics._
import org.apache.kafka.common.network.{ChannelBuilders, KafkaChannel, ListenerName, Selectable, Send, Selector => KSelector}
import org.apache.kafka.common.security.auth.KafkaPrincipal
import org.apache.kafka.common.protocol.SecurityProtocol
import org.apache.kafka.common.protocol.types.SchemaException
import org.apache.kafka.common.utils.{Time, Utils}
import scala.collection._
import JavaConverters._
import scala.util.control.{ControlThrowable, NonFatal}
/**
* An NIO socket server. The threading model is
* 1 Acceptor thread that handles new connections
* Acceptor has N Processor threads that each have their own selector and read requests from sockets
* M Handler threads that handle requests and produce responses back to the processor threads for writing.
*/
class SocketServer(val config: KafkaConfig, val metrics: Metrics, val time: Time, val credentialProvider: CredentialProvider) extends Logging with KafkaMetricsGroup {
private val endpoints = config.listeners.map(l => l.listenerName -> l).toMap
private val numProcessorThreads = config.numNetworkThreads
private val maxQueuedRequests = config.queuedMaxRequests
private val totalProcessorThreads = numProcessorThreads * endpoints.size
private val maxConnectionsPerIp = config.maxConnectionsPerIp
private val maxConnectionsPerIpOverrides = config.maxConnectionsPerIpOverrides
this.logIdent = "[Socket Server on Broker " + config.brokerId + "], "
val requestChannel = new RequestChannel(totalProcessorThreads, maxQueuedRequests)
private val processors = new Array[Processor](totalProcessorThreads)
private[network] val acceptors = mutable.Map[EndPoint, Acceptor]()
private var connectionQuotas: ConnectionQuotas = _
/**
* Start the socket server
*/
def startup() {
this.synchronized {
connectionQuotas = new ConnectionQuotas(maxConnectionsPerIp, maxConnectionsPerIpOverrides)
val sendBufferSize = config.socketSendBufferBytes
val recvBufferSize = config.socketReceiveBufferBytes
val brokerId = config.brokerId
var processorBeginIndex = 0
config.listeners.foreach { endpoint =>
val listenerName = endpoint.listenerName
val securityProtocol = endpoint.securityProtocol
val processorEndIndex = processorBeginIndex + numProcessorThreads
for (i <- processorBeginIndex until processorEndIndex)
processors(i) = newProcessor(i, connectionQuotas, listenerName, securityProtocol)
val acceptor = new Acceptor(endpoint, sendBufferSize, recvBufferSize, brokerId,
processors.slice(processorBeginIndex, processorEndIndex), connectionQuotas)
acceptors.put(endpoint, acceptor)
Utils.newThread(s"kafka-socket-acceptor-$listenerName-$securityProtocol-${endpoint.port}", acceptor, false).start()
acceptor.awaitStartup()
processorBeginIndex = processorEndIndex
}
}
newGauge("NetworkProcessorAvgIdlePercent",
new Gauge[Double] {
private val ioWaitRatioMetricNames = processors.map { p =>
metrics.metricName("io-wait-ratio", "socket-server-metrics", p.metricTags)
}
def value = ioWaitRatioMetricNames.map { metricName =>
Option(metrics.metric(metricName)).fold(0.0)(_.value)
}.sum / totalProcessorThreads
}
)
info("Started " + acceptors.size + " acceptor threads")
}
// register the processor threads for notification of responses
requestChannel.addResponseListener(id => processors(id).wakeup())
/**
* Shutdown the socket server
*/
def shutdown() = {
info("Shutting down")
this.synchronized {
acceptors.values.foreach(_.shutdown)
processors.foreach(_.shutdown)
}
info("Shutdown completed")
}
def boundPort(listenerName: ListenerName): Int = {
try {
acceptors(endpoints(listenerName)).serverChannel.socket.getLocalPort
} catch {
case e: Exception => throw new KafkaException("Tried to check server's port before server was started or checked for port of non-existing protocol", e)
}
}
/* `protected` for test usage */
protected[network] def newProcessor(id: Int, connectionQuotas: ConnectionQuotas, listenerName: ListenerName,
securityProtocol: SecurityProtocol): Processor = {
new Processor(id,
time,
config.socketRequestMaxBytes,
requestChannel,
connectionQuotas,
config.connectionsMaxIdleMs,
listenerName,
securityProtocol,
config,
metrics,
credentialProvider
)
}
/* For test usage */
private[network] def connectionCount(address: InetAddress): Int =
Option(connectionQuotas).fold(0)(_.get(address))
/* For test usage */
private[network] def processor(index: Int): Processor = processors(index)
}
/**
* A base class with some helper variables and methods
*/
private[kafka] abstract class AbstractServerThread(connectionQuotas: ConnectionQuotas) extends Runnable with Logging {
private val startupLatch = new CountDownLatch(1)
// `shutdown()` is invoked before `startupComplete` and `shutdownComplete` if an exception is thrown in the constructor
// (e.g. if the address is already in use). We want `shutdown` to proceed in such cases, so we first assign an open
// latch and then replace it in `startupComplete()`.
@volatile private var shutdownLatch = new CountDownLatch(0)
private val alive = new AtomicBoolean(true)
def wakeup(): Unit
/**
* Initiates a graceful shutdown by signaling to stop and waiting for the shutdown to complete
*/
def shutdown(): Unit = {
alive.set(false)
wakeup()
shutdownLatch.await()
}
/**
* Wait for the thread to completely start up
*/
def awaitStartup(): Unit = startupLatch.await
/**
* Record that the thread startup is complete
*/
protected def startupComplete(): Unit = {
// Replace the open latch with a closed one
shutdownLatch = new CountDownLatch(1)
startupLatch.countDown()
}
/**
* Record that the thread shutdown is complete
*/
protected def shutdownComplete(): Unit = shutdownLatch.countDown()
/**
* Is the server still running?
*/
protected def isRunning: Boolean = alive.get
/**
* Close the connection identified by `connectionId` and decrement the connection count.
*/
def close(selector: KSelector, connectionId: String): Unit = {
val channel = selector.channel(connectionId)
if (channel != null) {
debug(s"Closing selector connection $connectionId")
val address = channel.socketAddress
if (address != null)
connectionQuotas.dec(address)
selector.close(connectionId)
}
}
/**
* Close `channel` and decrement the connection count.
*/
def close(channel: SocketChannel): Unit = {
if (channel != null) {
debug("Closing connection from " + channel.socket.getRemoteSocketAddress())
connectionQuotas.dec(channel.socket.getInetAddress)
swallowError(channel.socket().close())
swallowError(channel.close())
}
}
}
/**
* Thread that accepts and configures new connections. There is one of these per endpoint.
*/
private[kafka] class Acceptor(val endPoint: EndPoint,
val sendBufferSize: Int,
val recvBufferSize: Int,
brokerId: Int,
processors: Array[Processor],
connectionQuotas: ConnectionQuotas) extends AbstractServerThread(connectionQuotas) with KafkaMetricsGroup {
private val nioSelector = NSelector.open()
val serverChannel = openServerSocket(endPoint.host, endPoint.port)
this.synchronized {
processors.foreach { processor =>
Utils.newThread(s"kafka-network-thread-$brokerId-${endPoint.listenerName}-${endPoint.securityProtocol}-${processor.id}",
processor, false).start()
}
}
/**
* Accept loop that checks for new connection attempts
*/
def run() {
serverChannel.register(nioSelector, SelectionKey.OP_ACCEPT)
startupComplete()
try {
var currentProcessor = 0
while (isRunning) {
try {
val ready = nioSelector.select(500)
if (ready > 0) {
val keys = nioSelector.selectedKeys()
val iter = keys.iterator()
while (iter.hasNext && isRunning) {
try {
val key = iter.next
iter.remove()
if (key.isAcceptable)
accept(key, processors(currentProcessor))
else
throw new IllegalStateException("Unrecognized key state for acceptor thread.")
// round robin to the next processor thread
currentProcessor = (currentProcessor + 1) % processors.length
} catch {
case e: Throwable => error("Error while accepting connection", e)
}
}
}
}
catch {
// We catch all the throwables to prevent the acceptor thread from exiting on exceptions due
// to a select operation on a specific channel or a bad request. We don't want
// the broker to stop responding to requests from other clients in these scenarios.
case e: ControlThrowable => throw e
case e: Throwable => error("Error occurred", e)
}
}
} finally {
debug("Closing server socket and selector.")
swallowError(serverChannel.close())
swallowError(nioSelector.close())
shutdownComplete()
}
}
/*
* Create a server socket to listen for connections on.
*/
private def openServerSocket(host: String, port: Int): ServerSocketChannel = {
val socketAddress =
if(host == null || host.trim.isEmpty)
new InetSocketAddress(port)
else
new InetSocketAddress(host, port)
val serverChannel = ServerSocketChannel.open()
serverChannel.configureBlocking(false)
if (recvBufferSize != Selectable.USE_DEFAULT_BUFFER_SIZE)
serverChannel.socket().setReceiveBufferSize(recvBufferSize)
try {
serverChannel.socket.bind(socketAddress)
info("Awaiting socket connections on %s:%d.".format(socketAddress.getHostString, serverChannel.socket.getLocalPort))
} catch {
case e: SocketException =>
throw new KafkaException("Socket server failed to bind to %s:%d: %s.".format(socketAddress.getHostString, port, e.getMessage), e)
}
serverChannel
}
/*
* Accept a new connection
*/
def accept(key: SelectionKey, processor: Processor) {
val serverSocketChannel = key.channel().asInstanceOf[ServerSocketChannel]
val socketChannel = serverSocketChannel.accept()
try {
connectionQuotas.inc(socketChannel.socket().getInetAddress)
socketChannel.configureBlocking(false)
socketChannel.socket().setTcpNoDelay(true)
socketChannel.socket().setKeepAlive(true)
if (sendBufferSize != Selectable.USE_DEFAULT_BUFFER_SIZE)
socketChannel.socket().setSendBufferSize(sendBufferSize)
debug("Accepted connection from %s on %s and assigned it to processor %d, sendBufferSize [actual|requested]: [%d|%d] recvBufferSize [actual|requested]: [%d|%d]"
.format(socketChannel.socket.getRemoteSocketAddress, socketChannel.socket.getLocalSocketAddress, processor.id,
socketChannel.socket.getSendBufferSize, sendBufferSize,
socketChannel.socket.getReceiveBufferSize, recvBufferSize))
processor.accept(socketChannel)
} catch {
case e: TooManyConnectionsException =>
info("Rejected connection from %s, address already has the configured maximum of %d connections.".format(e.ip, e.count))
close(socketChannel)
}
}
/**
* Wakeup the thread for selection.
*/
@Override
def wakeup = nioSelector.wakeup()
}
/**
* Thread that processes all requests from a single connection. There are N of these running in parallel
* each of which has its own selector
*/
private[kafka] class Processor(val id: Int,
time: Time,
maxRequestSize: Int,
requestChannel: RequestChannel,
connectionQuotas: ConnectionQuotas,
connectionsMaxIdleMs: Long,
listenerName: ListenerName,
securityProtocol: SecurityProtocol,
config: KafkaConfig,
metrics: Metrics,
credentialProvider: CredentialProvider) extends AbstractServerThread(connectionQuotas) with KafkaMetricsGroup {
private object ConnectionId {
def fromString(s: String): Option[ConnectionId] = s.split("-") match {
case Array(local, remote) => BrokerEndPoint.parseHostPort(local).flatMap { case (localHost, localPort) =>
BrokerEndPoint.parseHostPort(remote).map { case (remoteHost, remotePort) =>
ConnectionId(localHost, localPort, remoteHost, remotePort)
}
}
case _ => None
}
}
private case class ConnectionId(localHost: String, localPort: Int, remoteHost: String, remotePort: Int) {
override def toString: String = s"$localHost:$localPort-$remoteHost:$remotePort"
}
private val newConnections = new ConcurrentLinkedQueue[SocketChannel]()
private val inflightResponses = mutable.Map[String, RequestChannel.Response]()
private[kafka] val metricTags = mutable.LinkedHashMap(
"listener" -> listenerName.value,
"networkProcessor" -> id.toString
).asJava
newGauge("IdlePercent",
new Gauge[Double] {
def value = {
Option(metrics.metric(metrics.metricName("io-wait-ratio", "socket-server-metrics", metricTags))).fold(0.0)(_.value)
}
},
// for compatibility, only add a networkProcessor tag to the Yammer Metrics alias (the equivalent Selector metric
// also includes the listener name)
Map("networkProcessor" -> id.toString)
)
private val selector = new KSelector(
maxRequestSize,
connectionsMaxIdleMs,
metrics,
time,
"socket-server",
metricTags,
false,
true,
ChannelBuilders.serverChannelBuilder(listenerName, securityProtocol, config, credentialProvider.credentialCache))
override def run() {
startupComplete()
while (isRunning) {
try {
// setup any new connections that have been queued up
configureNewConnections()
// register any new responses for writing
processNewResponses()
poll()
processCompletedReceives()
processCompletedSends()
processDisconnected()
} catch {
// We catch all the throwables here to prevent the processor thread from exiting. We do this because
// letting a processor exit might cause a bigger impact on the broker. Usually the exceptions thrown would
// be either associated with a specific socket channel or a bad request. We just ignore the bad socket channel
// or request. This behavior might need to be reviewed if we see an exception that need the entire broker to stop.
case e: ControlThrowable => throw e
case e: Throwable =>
error("Processor got uncaught exception.", e)
}
}
debug("Closing selector - processor " + id)
swallowError(closeAll())
shutdownComplete()
}
private def processNewResponses() {
var curr = requestChannel.receiveResponse(id)
while (curr != null) {
try {
curr.responseAction match {
case RequestChannel.NoOpAction =>
// There is no response to send to the client, we need to read more pipelined requests
// that are sitting in the server's socket buffer
updateRequestMetrics(curr.request)
trace("Socket server received empty response to send, registering for read: " + curr)
val channelId = curr.request.connectionId
if (selector.channel(channelId) != null || selector.closingChannel(channelId) != null)
selector.unmute(channelId)
case RequestChannel.SendAction =>
val responseSend = curr.responseSend.getOrElse(
throw new IllegalStateException(s"responseSend must be defined for SendAction, response: $curr"))
sendResponse(curr, responseSend)
case RequestChannel.CloseConnectionAction =>
updateRequestMetrics(curr.request)
trace("Closing socket connection actively according to the response code.")
close(selector, curr.request.connectionId)
}
} finally {
curr = requestChannel.receiveResponse(id)
}
}
}
/* `protected` for test usage */
protected[network] def sendResponse(response: RequestChannel.Response, responseSend: Send) {
val connectionId = response.request.connectionId
trace(s"Socket server received response to send to $connectionId, registering for write and sending data: $response")
val channel = selector.channel(connectionId)
// `channel` can be null if the selector closed the connection because it was idle for too long
if (channel == null) {
warn(s"Attempting to send response via channel for which there is no open connection, connection id $connectionId")
response.request.updateRequestMetrics(0L)
}
else {
selector.send(responseSend)
inflightResponses += (connectionId -> response)
}
}
private def poll() {
try selector.poll(300)
catch {
case e @ (_: IllegalStateException | _: IOException) =>
error(s"Closing processor $id due to illegal state or IO exception")
swallow(closeAll())
shutdownComplete()
throw e
}
}
private def processCompletedReceives() {
selector.completedReceives.asScala.foreach { receive =>
try {
val openChannel = selector.channel(receive.source)
// Only methods that are safe to call on a disconnected channel should be invoked on 'openOrClosingChannel'.
val openOrClosingChannel = if (openChannel != null) openChannel else selector.closingChannel(receive.source)
val session = RequestChannel.Session(new KafkaPrincipal(KafkaPrincipal.USER_TYPE, openOrClosingChannel.principal.getName), openOrClosingChannel.socketAddress)
val req = RequestChannel.Request(processor = id, connectionId = receive.source, session = session,
buffer = receive.payload, startTimeNanos = time.nanoseconds,
listenerName = listenerName, securityProtocol = securityProtocol)
requestChannel.sendRequest(req)
selector.mute(receive.source)
} catch {
case e @ (_: InvalidRequestException | _: SchemaException) =>
// note that even though we got an exception, we can assume that receive.source is valid. Issues with constructing a valid receive object were handled earlier
error(s"Closing socket for ${receive.source} because of error", e)
close(selector, receive.source)
}
}
}
private def processCompletedSends() {
selector.completedSends.asScala.foreach { send =>
val resp = inflightResponses.remove(send.destination).getOrElse {
throw new IllegalStateException(s"Send for ${send.destination} completed, but not in `inflightResponses`")
}
updateRequestMetrics(resp.request)
selector.unmute(send.destination)
}
}
private def updateRequestMetrics(request: RequestChannel.Request) {
val channel = selector.channel(request.connectionId)
val openOrClosingChannel = if (channel != null) channel else selector.closingChannel(request.connectionId)
val networkThreadTimeNanos = if (openOrClosingChannel != null) openOrClosingChannel.getAndResetNetworkThreadTimeNanos() else 0L
request.updateRequestMetrics(networkThreadTimeNanos)
}
private def processDisconnected() {
selector.disconnected.keySet.asScala.foreach { connectionId =>
val remoteHost = ConnectionId.fromString(connectionId).getOrElse {
throw new IllegalStateException(s"connectionId has unexpected format: $connectionId")
}.remoteHost
inflightResponses.remove(connectionId).foreach(response => updateRequestMetrics(response.request))
// the channel has been closed by the selector but the quotas still need to be updated
connectionQuotas.dec(InetAddress.getByName(remoteHost))
}
}
/**
* Queue up a new connection for reading
*/
def accept(socketChannel: SocketChannel) {
newConnections.add(socketChannel)
wakeup()
}
/**
* Register any new connections that have been queued up
*/
private def configureNewConnections() {
while (!newConnections.isEmpty) {
val channel = newConnections.poll()
try {
debug(s"Processor $id listening to new connection from ${channel.socket.getRemoteSocketAddress}")
val localHost = channel.socket().getLocalAddress.getHostAddress
val localPort = channel.socket().getLocalPort
val remoteHost = channel.socket().getInetAddress.getHostAddress
val remotePort = channel.socket().getPort
val connectionId = ConnectionId(localHost, localPort, remoteHost, remotePort).toString
selector.register(connectionId, channel)
} catch {
// We explicitly catch all non fatal exceptions and close the socket to avoid a socket leak. The other
// throwables will be caught in processor and logged as uncaught exceptions.
case NonFatal(e) =>
val remoteAddress = channel.getRemoteAddress
// need to close the channel here to avoid a socket leak.
close(channel)
error(s"Processor $id closed connection from $remoteAddress", e)
}
}
}
/**
* Close the selector and all open connections
*/
private def closeAll() {
selector.channels.asScala.foreach { channel =>
close(selector, channel.id)
}
selector.close()
}
/* For test usage */
private[network] def channel(connectionId: String): Option[KafkaChannel] =
Option(selector.channel(connectionId))
/**
* Wakeup the thread for selection.
*/
@Override
def wakeup = selector.wakeup()
}
class ConnectionQuotas(val defaultMax: Int, overrideQuotas: Map[String, Int]) {
private val overrides = overrideQuotas.map { case (host, count) => (InetAddress.getByName(host), count) }
private val counts = mutable.Map[InetAddress, Int]()
def inc(address: InetAddress) {
counts.synchronized {
val count = counts.getOrElseUpdate(address, 0)
counts.put(address, count + 1)
val max = overrides.getOrElse(address, defaultMax)
if (count >= max)
throw new TooManyConnectionsException(address, max)
}
}
def dec(address: InetAddress) {
counts.synchronized {
val count = counts.getOrElse(address,
throw new IllegalArgumentException(s"Attempted to decrease connection count for address with no connections, address: $address"))
if (count == 1)
counts.remove(address)
else
counts.put(address, count - 1)
}
}
def get(address: InetAddress): Int = counts.synchronized {
counts.getOrElse(address, 0)
}
}
class TooManyConnectionsException(val ip: InetAddress, val count: Int) extends KafkaException("Too many connections from %s (maximum = %d)".format(ip, count))
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/main/scala/kafka/network/SocketServer.scala | Scala | apache-2.0 | 24,848 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.boot.layer
import akka.actor.{ActorRef, ActorSystem, Props}
import org.apache.toree.comm.{CommRegistrar, CommStorage}
import org.apache.toree.interpreter.Interpreter
import org.apache.toree.kernel.api.Kernel
import org.apache.toree.kernel.protocol.v5.MessageType.MessageType
import org.apache.toree.kernel.protocol.v5.SocketType.SocketType
import org.apache.toree.kernel.protocol.v5.handler._
import org.apache.toree.kernel.protocol.v5.interpreter.InterpreterActor
import org.apache.toree.kernel.protocol.v5.interpreter.tasks.InterpreterTaskFactory
import org.apache.toree.kernel.protocol.v5.kernel.ActorLoader
import org.apache.toree.kernel.protocol.v5.magic.{MagicParser, PostProcessor}
import org.apache.toree.kernel.protocol.v5.relay.ExecuteRequestRelay
import org.apache.toree.kernel.protocol.v5.{MessageType, SocketType, SystemActorType, LanguageInfo}
import org.apache.toree.magic.MagicManager
import org.apache.toree.plugins.PluginManager
import org.apache.toree.utils.LogLike
/**
* Represents the Akka handler initialization. All actors (not needed in bare
* initialization) should be constructed here.
*/
trait HandlerInitialization {
/**
* Initializes and registers all handlers.
*
* @param actorSystem The actor system needed for registration
* @param actorLoader The actor loader needed for registration
* @param kernel The kernel api needed for registration
* @param interpreter The main interpreter needed for registration
* @param magicManager The magic manager needed for registration
* @param commRegistrar The comm registrar needed for registration
* @param commStorage The comm storage needed for registration
*/
def initializeHandlers(
actorSystem: ActorSystem, actorLoader: ActorLoader,
kernel: Kernel,
interpreter: Interpreter, pluginManager: PluginManager,
magicManager: MagicManager, commRegistrar: CommRegistrar,
commStorage: CommStorage,
responseMap: collection.mutable.Map[String, ActorRef]
): Unit
}
/**
* Represents the standard implementation of HandlerInitialization.
*/
trait StandardHandlerInitialization extends HandlerInitialization {
this: LogLike =>
/**
* Initializes and registers all handlers.
*
* @param actorSystem The actor system needed for registration
* @param actorLoader The actor loader needed for registration
* @param kernel The kernel api needed for registration
* @param interpreter The main interpreter needed for registration
* @param pluginManager The plugin manager needed for registration
* @param commRegistrar The comm registrar needed for registration
* @param commStorage The comm storage needed for registration
*/
def initializeHandlers(
actorSystem: ActorSystem, actorLoader: ActorLoader,
kernel: Kernel,
interpreter: Interpreter, pluginManager: PluginManager,
magicManager: MagicManager, commRegistrar: CommRegistrar,
commStorage: CommStorage,
responseMap: collection.mutable.Map[String, ActorRef]
): Unit = {
initializeKernelHandlers(
actorSystem, actorLoader, interpreter, kernel, commRegistrar, commStorage, responseMap
)
initializeSystemActors(
actorSystem, actorLoader, interpreter, pluginManager, magicManager
)
}
private def initializeSystemActors(
actorSystem: ActorSystem, actorLoader: ActorLoader,
interpreter: Interpreter, pluginManager: PluginManager,
magicManager: MagicManager
): Unit = {
logger.debug("Creating interpreter actor")
val interpreterActor = actorSystem.actorOf(
Props(classOf[InterpreterActor], new InterpreterTaskFactory(interpreter)),
name = SystemActorType.Interpreter.toString
)
logger.debug("Creating execute request relay actor")
val postProcessor = new PostProcessor(interpreter)
val magicParser = new MagicParser(magicManager)
val executeRequestRelayActor = actorSystem.actorOf(
Props(classOf[ExecuteRequestRelay],
actorLoader, pluginManager, magicParser, postProcessor
),
name = SystemActorType.ExecuteRequestRelay.toString
)
}
private def initializeKernelHandlers(
actorSystem: ActorSystem, actorLoader: ActorLoader,
interpreter: Interpreter, kernel: Kernel,
commRegistrar: CommRegistrar, commStorage: CommStorage,
responseMap: collection.mutable.Map[String, ActorRef]
): Unit = {
def initializeRequestHandler[T](clazz: Class[T], messageType: MessageType, extraArguments: AnyRef*) = {
logger.debug("Creating %s handler".format(messageType.toString))
actorSystem.actorOf(
Props(clazz, actorLoader +: extraArguments: _*),
name = messageType.toString
)
}
def initializeInputHandler[T](
clazz: Class[T],
messageType: MessageType
): Unit = {
logger.debug("Creating %s handler".format(messageType.toString))
actorSystem.actorOf(
Props(clazz, actorLoader, responseMap),
name = messageType.toString
)
}
// TODO: Figure out how to pass variable number of arguments to actor
def initializeCommHandler[T](clazz: Class[T], messageType: MessageType) = {
logger.debug("Creating %s handler".format(messageType.toString))
actorSystem.actorOf(
Props(clazz, actorLoader, commRegistrar, commStorage),
name = messageType.toString
)
}
def initializeSocketHandler(socketType: SocketType, messageType: MessageType): Unit = {
logger.debug("Creating %s to %s socket handler ".format(messageType.toString ,socketType.toString))
actorSystem.actorOf(
Props(classOf[GenericSocketMessageHandler], actorLoader, socketType),
name = messageType.toString
)
}
val langInfo = interpreter.languageInfo
val internalInfo = LanguageInfo(
name=langInfo.name,
version=langInfo.version,
file_extension=langInfo.fileExtension,
pygments_lexer=langInfo.pygmentsLexer)
// These are the handlers for messages coming into the
initializeRequestHandler(classOf[ExecuteRequestHandler],
MessageType.Incoming.ExecuteRequest, kernel)
initializeRequestHandler(classOf[KernelInfoRequestHandler],
MessageType.Incoming.KernelInfoRequest, internalInfo)
initializeRequestHandler(classOf[CommInfoRequestHandler],
MessageType.Incoming.CommInfoRequest, commStorage)
initializeRequestHandler(classOf[CodeCompleteHandler],
MessageType.Incoming.CompleteRequest)
initializeRequestHandler(classOf[IsCompleteHandler],
MessageType.Incoming.IsCompleteRequest)
initializeInputHandler(classOf[InputRequestReplyHandler],
MessageType.Incoming.InputReply)
initializeCommHandler(classOf[CommOpenHandler],
MessageType.Incoming.CommOpen)
initializeCommHandler(classOf[CommMsgHandler],
MessageType.Incoming.CommMsg)
initializeCommHandler(classOf[CommCloseHandler],
MessageType.Incoming.CommClose)
// These are handlers for messages leaving the kernel through the sockets
initializeSocketHandler(SocketType.Shell, MessageType.Outgoing.KernelInfoReply)
initializeSocketHandler(SocketType.Shell, MessageType.Outgoing.CommInfoReply)
initializeSocketHandler(SocketType.Shell, MessageType.Outgoing.ExecuteReply)
initializeSocketHandler(SocketType.Shell, MessageType.Outgoing.CompleteReply)
initializeSocketHandler(SocketType.Shell, MessageType.Outgoing.IsCompleteReply)
initializeSocketHandler(SocketType.StdIn, MessageType.Outgoing.InputRequest)
initializeSocketHandler(SocketType.IOPub, MessageType.Outgoing.ExecuteResult)
initializeSocketHandler(SocketType.IOPub, MessageType.Outgoing.Stream)
initializeSocketHandler(SocketType.IOPub, MessageType.Outgoing.DisplayData)
initializeSocketHandler(SocketType.IOPub, MessageType.Outgoing.ClearOutput)
initializeSocketHandler(SocketType.IOPub, MessageType.Outgoing.ExecuteInput)
initializeSocketHandler(SocketType.IOPub, MessageType.Outgoing.Status)
initializeSocketHandler(SocketType.IOPub, MessageType.Outgoing.Error)
initializeSocketHandler(SocketType.IOPub, MessageType.Outgoing.CommOpen)
initializeSocketHandler(SocketType.IOPub, MessageType.Outgoing.CommMsg)
initializeSocketHandler(SocketType.IOPub, MessageType.Outgoing.CommClose)
}
}
| mariusvniekerk/incubator-toree | kernel/src/main/scala/org/apache/toree/boot/layer/HandlerInitialization.scala | Scala | apache-2.0 | 9,113 |
package org.bitcoins.node
import akka.actor.ActorSystem
import org.bitcoins.chain.config.ChainAppConfig
import org.bitcoins.chain.models.BlockHeaderDAO
import org.bitcoins.core.api.chain.ChainApi
import org.bitcoins.core.api.chain.ChainQueryApi.FilterResponse
import org.bitcoins.core.api.chain.db.{
BlockHeaderDb,
CompactFilterDb,
CompactFilterHeaderDb
}
import org.bitcoins.core.api.node.NodeType
import org.bitcoins.core.protocol.BlockStamp
import org.bitcoins.node.config.NodeAppConfig
import org.bitcoins.node.models.Peer
import org.bitcoins.node.networking.peer.{
ControlMessageHandler,
DataMessageHandler
}
import scala.concurrent.Future
case class NeutrinoNode(
private var dataMessageHandler: DataMessageHandler,
nodeConfig: NodeAppConfig,
chainConfig: ChainAppConfig,
actorSystem: ActorSystem,
configPeersOverride: Vector[Peer] = Vector.empty)
extends Node {
require(
nodeConfig.nodeType == NodeType.NeutrinoNode,
s"We need our Neutrino mode enabled to be able to construct a Neutrino node!")
implicit override def system: ActorSystem = actorSystem
implicit override def nodeAppConfig: NodeAppConfig = nodeConfig
override def chainAppConfig: ChainAppConfig = chainConfig
val controlMessageHandler: ControlMessageHandler = ControlMessageHandler(this)
override val peerManager: PeerManager = PeerManager(this, configPeersOverride)
override def getDataMessageHandler: DataMessageHandler = dataMessageHandler
override def updateDataMessageHandler(
dataMessageHandler: DataMessageHandler): NeutrinoNode = {
this.dataMessageHandler = dataMessageHandler
this
}
override def start(): Future[NeutrinoNode] = {
val res = for {
node <- super.start()
chainApi <- chainApiFromDb()
bestHash <- chainApi.getBestBlockHash()
_ <- peerManager.randomPeerMsgSenderWithCompactFilters
.sendGetCompactFilterCheckPointMessage(stopHash = bestHash.flip)
} yield {
node.asInstanceOf[NeutrinoNode]
}
res.failed.foreach(logger.error("Cannot start Neutrino node", _))
res
}
/** Starts to sync our node with our peer
* If our local best block hash is the same as our peers
* we will not sync, otherwise we will keep syncing
* until our best block hashes match up
*
* @return
*/
override def sync(): Future[Unit] = {
val blockchainsF =
BlockHeaderDAO()(executionContext, chainConfig).getBlockchains()
for {
chainApi <- chainApiFromDb()
header <- chainApi.getBestBlockHeader()
bestFilterHeaderOpt <- chainApi.getBestFilterHeader()
bestFilterOpt <- chainApi.getBestFilter()
blockchains <- blockchainsF
// Get all of our cached headers in case of a reorg
cachedHeaders = blockchains.flatMap(_.headers).map(_.hashBE.flip)
_ <- peerManager.randomPeerMsgSender.sendGetHeadersMessage(cachedHeaders)
_ <- syncFilters(bestFilterHeaderOpt = bestFilterHeaderOpt,
bestFilterOpt = bestFilterOpt,
bestBlockHeader = header,
chainApi = chainApi)
} yield {
logger.info(
s"Starting sync node, height=${header.height} hash=${header.hashBE.hex}")
}
}
private def syncFilters(
bestFilterHeaderOpt: Option[CompactFilterHeaderDb],
bestFilterOpt: Option[CompactFilterDb],
bestBlockHeader: BlockHeaderDb,
chainApi: ChainApi): Future[Unit] = {
// If we have started syncing filters headers
(bestFilterHeaderOpt, bestFilterOpt) match {
case (None, None) | (None, Some(_)) =>
//do nothing if we haven't started syncing
Future.unit
case (Some(bestFilterHeader), Some(bestFilter)) =>
val isFilterHeaderSynced =
bestFilterHeader.blockHashBE == bestBlockHeader.hashBE
val isFiltersSynced = {
//check if we have started syncing filters,
//and if so, see if filter headers and filters
//were in sync
bestFilter.hashBE == bestFilterHeader.filterHashBE
}
if (isFilterHeaderSynced && isFiltersSynced) {
//means we are in sync, with filter heads & block headers & filters
//if there _both_ filter headers and block headers are on
//an old tip, our event driven node will start syncing
//filters after block headers are in sync
//do nothing
Future.unit
} else {
syncCompactFilters(bestFilterHeader, chainApi, Some(bestFilter))
}
case (Some(bestFilterHeader), None) =>
syncCompactFilters(bestFilterHeader, chainApi, None)
}
}
/** Starts sync compact filer headers.
* Only starts syncing compact filters if our compact filter headers are in sync with block headers
*/
private def syncCompactFilters(
bestFilterHeader: CompactFilterHeaderDb,
chainApi: ChainApi,
bestFilterOpt: Option[CompactFilterDb]): Future[Unit] = {
val sendCompactFilterHeaderMsgF = {
peerManager.randomPeerMsgSenderWithCompactFilters
.sendNextGetCompactFilterHeadersCommand(
chainApi = chainApi,
filterHeaderBatchSize = chainConfig.filterHeaderBatchSize,
prevStopHash = bestFilterHeader.blockHashBE)
}
sendCompactFilterHeaderMsgF.flatMap { isSyncFilterHeaders =>
// If we have started syncing filters
if (
!isSyncFilterHeaders &&
bestFilterOpt.isDefined &&
bestFilterOpt.get.hashBE != bestFilterHeader.filterHashBE
) {
//means we are not syncing filter headers, and our filters are NOT
//in sync with our compact filter headers
logger.info(s"Starting sync filters in NeutrinoNode.sync()")
peerManager.randomPeerMsgSenderWithCompactFilters
.sendNextGetCompactFilterCommand(
chainApi = chainApi,
filterBatchSize = chainConfig.filterBatchSize,
startHeight = bestFilterOpt.get.height)
.map(_ => ())
} else {
Future.unit
}
}
}
/** Gets the number of compact filters in the database */
override def getFilterCount(): Future[Int] =
chainApiFromDb().flatMap(_.getFilterCount())
/** Returns the block height of the given block stamp */
override def getHeightByBlockStamp(blockStamp: BlockStamp): Future[Int] =
chainApiFromDb().flatMap(_.getHeightByBlockStamp(blockStamp))
override def getFiltersBetweenHeights(
startHeight: Int,
endHeight: Int): Future[Vector[FilterResponse]] =
chainApiFromDb().flatMap(_.getFiltersBetweenHeights(startHeight, endHeight))
}
| bitcoin-s/bitcoin-s | node/src/main/scala/org/bitcoins/node/NeutrinoNode.scala | Scala | mit | 6,627 |
package com.varunvats.practice.tree
object PathsWithSum {
def apply(tree: BinaryTreeNode[Int], sum: Int): Int = {
val (allSums, allNodes) = apply(Some(tree))
val allPossibleSums = allSums ++: allNodes
allPossibleSums.count(_ == sum)
}
private def apply(nodeO: Option[BinaryTreeNode[Int]]): (Seq[Int], Seq[Int]) =
nodeO.fold((Seq.empty[Int], Seq.empty[Int])) { node =>
val leftResult = apply(node.left)
val rightResult = apply(node.right)
merge(leftResult, rightResult, node)
}
private def merge(leftResult: (Seq[Int], Seq[Int]),
rightResult: (Seq[Int], Seq[Int]),
node: BinaryTreeNode[Int]): (Seq[Int], Seq[Int]) = {
val (leftSums, leftNodes) = leftResult
val (rightSums, rightNodes) = rightResult
val leftValO = node.left.map(_.data)
val rightValO = node.right.map(_.data)
val newSums = (leftValO ++: rightValO ++: leftSums ++: rightSums).map(s => s + node.data)
val allSums = newSums ++: leftSums ++: rightSums
val allNodes = node.data +: leftNodes ++: rightNodes
(allSums, allNodes)
}
}
| varunvats/practice | jvm/src/main/scala/com/varunvats/practice/tree/PathsWithSum.scala | Scala | mit | 1,115 |
package me.frmr.kafka.detective.util
import java.util.concurrent._
import java.util.concurrent.atomic._
class PrefixNamedThreadFactory(prefix: String) extends ThreadFactory {
private[this] var threadNumber = new AtomicInteger(0)
private[this] def threadName: String = {
val currentNumber = threadNumber.getAndIncrement()
s"$prefix-$currentNumber"
}
def newThread(runnable: Runnable): Thread = {
new Thread(runnable, threadName)
}
}
| farmdawgnation/kafka-detective | daemon/src/main/scala/me/frmr/kafka/detective/util/PrefixNamedThreadFactory.scala | Scala | apache-2.0 | 458 |
package lila.tournament
// All durations are expressed in seconds
case class TournamentClock(limit: Int, increment: Int) {
def limitInMinutes = chessClock.limitInMinutes
def show = chessClock.show
lazy val chessClock = chess.Clock(limit, increment)
def hasIncrement = increment > 0
}
| JimmyMow/lila | modules/tournament/src/main/TournamentClock.scala | Scala | mit | 297 |
package cz.kamenitxan.jakon.core.dynamic.entity
/**
* Created by TPa on 13.04.2020.
*/
abstract class AbstractJsonResponse(val status: ResponseStatus, val data: Any) {
}
| kamenitxan/Jakon | modules/backend/src/main/scala/cz/kamenitxan/jakon/core/dynamic/entity/AbstractJsonResponse.scala | Scala | bsd-3-clause | 174 |
/*
* Copyright 2017 TWO SIGMA OPEN SOURCE, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twosigma.beakerx.scala.chart.xychart.plotitem
import org.junit.Test
import org.scalatest.Matchers._
class LineTest {
@Test
def createLineWithNumberList_lineHasXsYsListsIsNotEmpty(): Unit = { //when
val line = new Line {
y = Seq(10, 20)
}
//then
line.getX shouldNot be ('empty)
line.getY shouldNot be ('empty)
}
@Test
def createLineWithNumberListAndString_lineHasXsYsListsAndDisplayNameIsNotEmpty(): Unit = {
val testString = "display_name"
val line = new Line {
y = Seq(10, 20)
displayName = testString
}
line.getX shouldNot be('empty)
line.getY shouldNot be('empty)
line.getDisplayName shouldBe testString
}
@Test
def createLineWithFourProperties_lineHasXsYsListsDisplayNameAndWidthIsNotEmpty(): Unit = {
val testString = "display_name"
val testWidth = 0.5f
val line = new Line {
displayName = testString
x = Seq(10, 20)
y = Seq(30, 40)
width = testWidth
}
line.getX shouldNot be('empty)
line.getY shouldNot be('empty)
line.getDisplayName shouldBe testString
line.getWidth shouldBe testWidth
}
@Test
def unsetOptions(): Unit = {
val line = new Line
line.getInterpolation shouldBe null
line.interpolation shouldBe None
line.getStyle shouldBe null
line.style shouldBe None
}
} | jpallas/beakerx | kernel/scala/src/test/scala/com/twosigma/beakerx/scala/chart/xychart/plotitem/LineTest.scala | Scala | apache-2.0 | 1,971 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt.internal
/**
* Provides extension methods that allow us to wrap FunctionN instances with another FunctionN that
* delegates to the input FunctionN, but overrides the toString method with a user specified
* label. Anonymous functions have the default java toString which is just the class name. Because
* the anonymous class is generated implicitly, it can be impossible to tell which anonymous
* class you are looking at if there are multiple anonymous functions of the same type in a
* given object/class/package. We can overcome this limitation by explicitly setting the toString
* of the anonymous function instances. This is a fairly crude solution to the problem that
* is not ready to be exposed to users. Until the api matures or we decide that it's worth
* exposing to users, these should remain sbt package private.
*/
private[sbt] object LabeledFunctions {
/**
* Adds extension methods to a zero argument function.
* @param f the function to extend
* @tparam R the function result type
*/
private[sbt] implicit class Function0Ops[R](val f: () => R) extends AnyVal {
/**
* Add a label to the function.
* @param string the new toString method for the function
* @return a wrapped function with an overridden toString method.
*/
def label(string: String): () => R = new LabeledFunction0(f, string)
}
/**
* Adds extension methods to a single argument function.
* @param f the function to extend
* @tparam T the input parameter
* @tparam R the function result type
*/
private[sbt] implicit class Function1Ops[T, R](val f: T => R) extends AnyVal {
/**
* Add a label to the function.
* @param string the new toString method for the function
* @return a wrapped function with an overridden toString method.
*/
def label(string: String): T => R = new LabeledFunction1(f, string)
}
/**
* Adds extension methods to a two argument function.
* @param f the function to extend
* @tparam T1 the first function input parameter
* @tparam T2 the second function input parameter
* @tparam R the function result type
*/
private[sbt] implicit class Function2Ops[T1, T2, R](val f: (T1, T2) => R) extends AnyVal {
/**
* Add a label to the function.
* @param string the new toString method for the function
* @return a wrapped function with an overridden toString method.
*/
def label(string: String): (T1, T2) => R = new LabeledFunction2(f, string)
}
/**
* Adds extension methods to a three argument function.
* @param f the function to extend
* @tparam T1 the first function input parameter
* @tparam T2 the second function input parameter
* @tparam T3 the third function input parameter
* @tparam R the function result type
*/
private[sbt] implicit class Function3Ops[T1, T2, T3, R](val f: (T1, T2, T3) => R) extends AnyVal {
/**
* Add a label to the function.
* @param string the new toString method for the function
* @return a wrapped function with an overridden toString method.
*/
def label(string: String): (T1, T2, T3) => R = new LabeledFunction3(f, string)
}
/**
* Adds extension methods to a three argument function.
* @param f the function to extend
* @tparam T1 the first function input parameter
* @tparam T2 the second function input parameter
* @tparam T3 the third function input parameter
* @tparam T4 the fourth function input parameter
* @tparam R the function result type
*/
private[sbt] implicit class Function4Ops[T1, T2, T3, T4, R](val f: (T1, T2, T3, T4) => R)
extends AnyVal {
/**
* Add a label to the function.
* @param string the new toString method for the function
* @return a wrapped function with an overridden toString method.
*/
def label(string: String): (T1, T2, T3, T4) => R = new LabeledFunction4(f, string)
}
private class LabeledFunction0[+R](private val f: () => R, label: String) extends (() => R) {
override def apply(): R = f()
override def toString: String = label
override def equals(o: Any): Boolean = o match {
case that: LabeledFunction0[R] @unchecked => this.f == that.f
case that: (() => R) @unchecked => this.f == that
case _ => false
}
override def hashCode: Int = f.hashCode
}
private class LabeledFunction1[-T, +R](private val f: T => R, label: String) extends (T => R) {
override def apply(t: T): R = f(t)
override def toString: String = label
override def equals(o: Any): Boolean = o match {
case that: LabeledFunction1[T, R] @unchecked => this.f == that.f
case that: (T => R) @unchecked => this.f == that
case _ => false
}
override def hashCode: Int = f.hashCode
}
private class LabeledFunction2[-T1, -T2, +R](private val f: (T1, T2) => R, label: String)
extends ((T1, T2) => R) {
override def apply(t1: T1, t2: T2): R = f(t1, t2)
override def toString: String = label
override def equals(o: Any): Boolean = o match {
case that: LabeledFunction2[T1, T2, R] @unchecked => this.f == that.f
case that: ((T1, T2) => R) @unchecked => this.f == that
case _ => false
}
override def hashCode: Int = f.hashCode
}
private class LabeledFunction3[-T1, -T2, -T3, +R](private val f: (T1, T2, T3) => R, label: String)
extends ((T1, T2, T3) => R) {
override def apply(t1: T1, t2: T2, t3: T3): R = f(t1, t2, t3)
override def toString: String = label
override def equals(o: Any): Boolean = o match {
case that: LabeledFunction3[T1, T2, T3, R] @unchecked => this.f == that.f
case that: ((T1, T2, T3) => R) @unchecked => this.f == that
case _ => false
}
override def hashCode: Int = f.hashCode
}
private class LabeledFunction4[-T1, -T2, -T3, T4, +R](
private val f: (T1, T2, T3, T4) => R,
label: String
) extends ((T1, T2, T3, T4) => R) {
override def apply(t1: T1, t2: T2, t3: T3, t4: T4): R = f(t1, t2, t3, t4)
override def toString: String = label
override def equals(o: Any): Boolean = o match {
case that: LabeledFunction4[T1, T2, T3, T4, R] @unchecked => this.f == that.f
case that: ((T1, T2, T3, T4) => R) @unchecked => this.f == that
case _ => false
}
override def hashCode: Int = f.hashCode
}
}
| sbt/sbt | main-command/src/main/scala/sbt/internal/LabeledFunctions.scala | Scala | apache-2.0 | 6,784 |
package me.snov.sns.model
import spray.json._
case class Configuration(
version: Int = 1,
timestamp: Long = System.currentTimeMillis(),
subscriptions: List[Subscription],
topics: List[Topic]
)
object Configuration extends DefaultJsonProtocol {
implicit val format = jsonFormat4(Configuration.apply)
}
| s12v/sns | src/main/scala/me/snov/sns/model/Configuration.scala | Scala | apache-2.0 | 434 |
package com.antiparagon.cvexperimenter.chessscanner
import org.opencv.core.{Core, Rect}
import org.opencv.imgcodecs.Imgcodecs
import org.scalatest._
/**
* This test file is meant to test all the example chessboard images saved in the
* 'CVExperimenter/images/Chess Scanner/Starting Position' folder.
*
* Created by wmckay on 12/11/16.
*/
class ChessboardFinderTester extends FlatSpec with Matchers {
System.loadLibrary(Core.NATIVE_LIBRARY_NAME)
val IMG_FOLDER = "images/Chess Scanner/Starting Position/"
val OUTPUT_FOLDER = "Debug Images/"
val ALGEBRAIC_NOTATION = "algebraic_notation.png"
val ALGEBRAIC_NOTATION_RECT = new Rect(12, 11, 251, 255)
val BEGINN1 = "Beginn1.png"
val BEGINN1_RECT = new Rect(13, 13, 223, 223)
val BOARD_SETUP = "BoardSetup.jpg"
val BOARD_SETUP_RECT = new Rect(13, 13, 223, 223)
val CHESS_BOARD_SET_UP_MODIFIED = "chess_board_set_up_modified.jpg"
val CHESS_BOARD_SET_UP_MODIFIED_RECT = new Rect(9, 9, 350, 350)
val CHESS_MODIFIED = "chess_modified.png"
val CHESS_MODIFIED_RECT = new Rect(2, 0, 241, 245)
val CHESS_BOARD_NEW = "chess-board-new.jpg"
val CHESS_BOARD_NEW_RECT = new Rect(24, 1, 374, 371)
val CHESS_BOARD_2 = "ChessBoard2.jpg"
val CHESS_BOARD_2_RECT = new Rect(49, 50, 630, 635)
val CHESS_KID_MODIFIED = "chesskid_modified.png"
val CHESS_KID_MODIFIED_RECT = new Rect(42, 10, 632, 632)
val DIAGRAM_OF_CHESS_BOARD_SETUP_MODIFIED = "diagram-of-chess-board-setup_modified.png"
val DIAGRAM_OF_CHESS_BOARD_SETUP_MODIFIED_RECT = new Rect(16, 11, 266, 274)
val FENBOARD = "fenboard.png"
val FENBOARD_RECT = new Rect(14, 0, 242, 244)
val FRITZBSMALL = "fritzbsmall.png"
val FRITZBSMALL_RECT = new Rect(0, 0, 200, 202)
val KID_CHESS_SETUP_BOARD = "kid-chess-setup-board.png"
val KID_CHESS_SETUP_BOARD_RECT = new Rect(18, 17, 322, 323)
val NUMBER = "number.png"
val NUMBER_RECT = new Rect(7, 88, 448, 450)
val POSITION = "position.png"
val POSITION_RECT = new Rect(16, 0, 262, 263)
val PURPLE = "purple.png"
val PURPLE_RECT = new Rect(2, 2, 466, 260)
val STAGRAM_MODIFIED = "stagram_modified.png"
val STAGRAM_MODIFIED_RECT = new Rect(15, 13, 298, 300)
val STARTING_POSITION = "StartingPosition.png"
val STARTING_POSITION_RECT = new Rect(95, 49, 323, 324)
val VP_BLACKARRAY_MODIFIED = "VP-Blackarray_modified.png"
val VP_BLACKARRAY_MODIFIED_RECT = new Rect(6, 8, 280, 279)
"ChessboardFinder" should "return Rect when given image " + ALGEBRAIC_NOTATION in {
val img = Imgcodecs.imread(IMG_FOLDER + ALGEBRAIC_NOTATION)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(ALGEBRAIC_NOTATION) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (ALGEBRAIC_NOTATION_RECT)
}
// "ChessboardFinder" should "return Rect when given image " + BEGINN1 in {
// val img = Imgcodecs.imread(IMG_FOLDER + BEGINN1)
// assert(!img.empty())
// val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(BEGINN1) + "_").findChessboard(img)
// assert(rect.isDefined)
// rect.get should be (BEGINN1_RECT)
// }
"ChessboardFinder" should "return Rect when given image " + BOARD_SETUP in {
val img = Imgcodecs.imread(IMG_FOLDER + BOARD_SETUP)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(BOARD_SETUP) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (BOARD_SETUP_RECT)
}
"ChessboardFinder" should "return Rect when given image " + CHESS_BOARD_SET_UP_MODIFIED in {
val img = Imgcodecs.imread(IMG_FOLDER + CHESS_BOARD_SET_UP_MODIFIED)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(CHESS_BOARD_SET_UP_MODIFIED) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (CHESS_BOARD_SET_UP_MODIFIED_RECT)
}
"ChessboardFinder" should "return Rect when given image " + CHESS_MODIFIED in {
val img = Imgcodecs.imread(IMG_FOLDER + CHESS_MODIFIED)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(CHESS_MODIFIED) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (CHESS_MODIFIED_RECT)
}
"ChessboardFinder" should "return Rect when given image " + CHESS_BOARD_NEW in {
val img = Imgcodecs.imread(IMG_FOLDER + CHESS_BOARD_NEW)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(CHESS_BOARD_NEW) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (CHESS_BOARD_NEW_RECT)
}
"ChessboardFinder" should "return Rect when given image " + CHESS_BOARD_2 in {
val img = Imgcodecs.imread(IMG_FOLDER + CHESS_BOARD_2)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(CHESS_BOARD_2) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (CHESS_BOARD_2_RECT)
}
"ChessboardFinder" should "return Rect when given image " + CHESS_KID_MODIFIED in {
val img = Imgcodecs.imread(IMG_FOLDER + CHESS_KID_MODIFIED)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(CHESS_KID_MODIFIED) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (CHESS_KID_MODIFIED_RECT)
}
"ChessboardFinder" should "return Rect when given image " + DIAGRAM_OF_CHESS_BOARD_SETUP_MODIFIED in {
val img = Imgcodecs.imread(IMG_FOLDER + DIAGRAM_OF_CHESS_BOARD_SETUP_MODIFIED)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(DIAGRAM_OF_CHESS_BOARD_SETUP_MODIFIED) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (DIAGRAM_OF_CHESS_BOARD_SETUP_MODIFIED_RECT)
}
"ChessboardFinder" should "return Rect when given image " + FENBOARD in {
val img = Imgcodecs.imread(IMG_FOLDER + FENBOARD)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(FENBOARD) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (FENBOARD_RECT)
}
"ChessboardFinder" should "return Rect when given image " + FRITZBSMALL in {
val img = Imgcodecs.imread(IMG_FOLDER + FRITZBSMALL)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(FRITZBSMALL) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (FRITZBSMALL_RECT)
}
"ChessboardFinder" should "return Rect when given image " + KID_CHESS_SETUP_BOARD in {
val img = Imgcodecs.imread(IMG_FOLDER + KID_CHESS_SETUP_BOARD)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(KID_CHESS_SETUP_BOARD) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (KID_CHESS_SETUP_BOARD_RECT)
}
"ChessboardFinder" should "return Rect when given image " + NUMBER in {
val img = Imgcodecs.imread(IMG_FOLDER + NUMBER)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(NUMBER) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (NUMBER_RECT)
}
"ChessboardFinder" should "return Rect when given image " + POSITION in {
val img = Imgcodecs.imread(IMG_FOLDER + POSITION)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(POSITION) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (POSITION_RECT)
}
"ChessboardFinder" should "return Rect when given image " + PURPLE in {
val img = Imgcodecs.imread(IMG_FOLDER + PURPLE)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(PURPLE) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (PURPLE_RECT)
}
"ChessboardFinder" should "return Rect when given image " + STAGRAM_MODIFIED in {
val img = Imgcodecs.imread(IMG_FOLDER + STAGRAM_MODIFIED)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(STAGRAM_MODIFIED) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (STAGRAM_MODIFIED_RECT)
}
"ChessboardFinder" should "return Rect when given image " + STARTING_POSITION in {
val img = Imgcodecs.imread(IMG_FOLDER + STARTING_POSITION)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(STARTING_POSITION) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (STARTING_POSITION_RECT)
}
"ChessboardFinder" should "return Rect when given image " + VP_BLACKARRAY_MODIFIED in {
val img = Imgcodecs.imread(IMG_FOLDER + VP_BLACKARRAY_MODIFIED)
assert(!img.empty())
val rect = ChessboardFinder(OUTPUT_FOLDER + removeExt(VP_BLACKARRAY_MODIFIED) + "_").findChessboard(img)
assert(rect.isDefined)
rect.get should be (VP_BLACKARRAY_MODIFIED_RECT)
}
def removeExt(filename: String): String = {
if(filename.contains(".")) filename.substring(0, filename.lastIndexOf('.'))
else filename
}
}
| antiparagon/CVExperimenter | src/test/scala/com/antiparagon/cvexperimenter/chessscanner/ChessboardFinderTester.scala | Scala | mit | 8,896 |
package com.trafficland.augmentsbt.distribute
import com.typesafe.sbt.packager.Keys._
import com.typesafe.sbt.packager.linux.LinuxPlugin.autoImport.{Linux, daemonUser}
import com.typesafe.sbt.packager.rpm.RpmPlugin.autoImport.{rpmDaemonLogFile, rpmVendor}
import sbt._
import com.trafficland.augmentsbt.rpm.RPMPlugin
object StartupScriptPlugin extends AutoPlugin {
import autoImport._
override def requires = RPMPlugin
object autoImport {
val startScriptMainArguments: SettingKey[Seq[String]] = SettingKey[Seq[String]](
"start-script-main-arguments",
"arguments passed to the main class")
val startScriptJavaOptions: SettingKey[Seq[String]] = SettingKey[Seq[String]](
"start-script-java-options",
"option pairs for the java executable (with -D flag if necessary, i.e., \\"-Dsome.value=1337\\")")
val startScriptConfigFileName: SettingKey[String] = SettingKey[String](
"start-script-config-file-name",
"configuration file name passed as -Dconfig.file system setting")
val loggingConfigFileName: SettingKey[Option[String]] = SettingKey[Option[String]](
"logging-config-file",
"Logback configuration file in the conf directory")
}
override lazy val projectSettings = Seq(
startScriptJavaOptions := Seq.empty,
startScriptMainArguments := Seq.empty,
daemonUser in Linux := "coreservices",
rpmDaemonLogFile := "stdout.log",
defaultLinuxLogsLocation := s"/var/log/${rpmVendor.value}",
startScriptConfigFileName := "prod.conf",
loggingConfigFileName := Some("logback.xml"),
executableScriptName := "start",
bashScriptExtraDefines <++= (loggingConfigFileName, startScriptJavaOptions, startScriptConfigFileName, startScriptMainArguments) map { (logOpt, extraOpts, config, mainArgs) =>
val loggingArgOpt = logOpt.map { log =>
s"-Dlogback.configurationFile=$$app_home/../conf/$log"
}
val configArg = s"-Dconfig.file=$$app_home/../conf/$config"
val javaArgs = extraOpts ++ loggingArgOpt :+ configArg
val addJavaArgs = javaArgs.map(arg => s"addJava $arg")
val addMainArgs = mainArgs.map(arg => s"addApp $arg")
addMainArgs ++ addJavaArgs
}
)
}
| ereichert/augment-sbt | src/main/scala/com/trafficland/augmentsbt/distribute/StartupScriptPlugin.scala | Scala | apache-2.0 | 2,203 |
package controllers
import play.api.mvc._
import models.{Lot, NeoStart}
import play.api.libs.json.{Json, JsArray}
object Application extends Controller {
def index = Action {
Ok(views.html.index("Welcome to Infostark", models.Lot.all))
}
def search = Action {
Ok(views.html.lot.search("Infostark", models.Lot.all))
}
def users = Action {
NeoStart.increase
Ok(views.html.users(NeoStart.mainName))
}
} | animotron/panimo | app/controllers/Application.scala | Scala | agpl-3.0 | 432 |
package pl.writeonly.son2.impl
object Types {
val CLI = "cli"
val MAIN = "main"
val PIPER = "piper"
val STREAMER = "streamer"
}
| writeonly/son2 | scallions-clis/scallions-main/src/test/scala/pl/writeonly/son2/impl/Types.scala | Scala | apache-2.0 | 137 |
package roc
package postgresql
import java.nio.charset.StandardCharsets
import org.scalacheck.Gen
import org.specs2.ScalaCheck
/** Used for generating valid Postgresql Lexical structures
*
* @see http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html
* for more on what constitues a valid SQL Identifier
*/
trait PostgresqlLexicalGen extends ScalaCheck {
// see http://www.postgresql.org/docs/current/static/sql-syntax-lexical.html
// for more on what constitues a valid SQL Identifier
protected val UnicodeCapitalEnglish = '\\u0041' to '\\u005A'
protected val UnicodeLowerEnglish = '\\u0061' to '\\u007A'
protected val UnicodeNonLatin = '\\u0400' to '\\u1FFE'
protected val UnicodeUnderscore = "_".getBytes(StandardCharsets.UTF_8).map(_.toChar).head
protected val UnicodeDollarSign = "$".getBytes(StandardCharsets.UTF_8).map(_.toChar).head
protected val UnicodeNumbers = '\\u0030' to '\\u0039'
protected val BeginningChars = UnicodeUnderscore :: List(UnicodeCapitalEnglish,
UnicodeLowerEnglish, UnicodeNonLatin).flatten
protected val SubsequentChars = UnicodeDollarSign :: BeginningChars ::: UnicodeNumbers.toList
protected lazy val genValidBeginningIdentifier: Gen[Char] = for {
char <- Gen.oneOf(BeginningChars)
} yield char
protected lazy val genValidSubsequentIdentifier: Gen[Char] = for {
char <- Gen.oneOf(SubsequentChars)
} yield char
protected lazy val genValidSQLIdentifier: Gen[String] = for {
firstChar <- genValidBeginningIdentifier
chars <- Gen.listOf(genValidSubsequentIdentifier)
} yield {
val xs = firstChar :: chars
xs.map(_.toString).reduce(_ + _)
}
protected lazy val genValidNumberOfShortColumns: Gen[Short] =
Gen.chooseNum[Short](0, 1663) // the maximum number of Postgresql columns is 1663
protected lazy val genValidNumberOfIntColumns: Gen[Int] =
genValidNumberOfShortColumns.map(_.toInt)
protected lazy val genValidNonZeroNumberOfShortColumns: Gen[Short] =
Gen.chooseNum[Short](1, 1663) // the maximum number of Postgresql columns is 1663
}
| finagle/roc | core/src/test/scala/roc/postgresql/PostgresqlLexicalGen.scala | Scala | bsd-3-clause | 2,113 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.util.concurrent._
import java.util.concurrent.locks.ReentrantLock
import scala.concurrent.{Awaitable, ExecutionContext, ExecutionContextExecutor, Future}
import scala.concurrent.duration.{Duration, FiniteDuration}
import scala.language.higherKinds
import scala.util.control.NonFatal
import com.google.common.util.concurrent.ThreadFactoryBuilder
import org.apache.spark.SparkException
import org.apache.spark.rpc.RpcAbortException
private[spark] object ThreadUtils {
private val sameThreadExecutionContext =
ExecutionContext.fromExecutorService(sameThreadExecutorService())
// Inspired by Guava MoreExecutors.sameThreadExecutor; inlined and converted
// to Scala here to avoid Guava version issues
def sameThreadExecutorService(): ExecutorService = new AbstractExecutorService {
private val lock = new ReentrantLock()
private val termination = lock.newCondition()
private var runningTasks = 0
private var serviceIsShutdown = false
override def shutdown(): Unit = {
lock.lock()
try {
serviceIsShutdown = true
} finally {
lock.unlock()
}
}
override def shutdownNow(): java.util.List[Runnable] = {
shutdown()
java.util.Collections.emptyList()
}
override def isShutdown: Boolean = {
lock.lock()
try {
serviceIsShutdown
} finally {
lock.unlock()
}
}
override def isTerminated: Boolean = synchronized {
lock.lock()
try {
serviceIsShutdown && runningTasks == 0
} finally {
lock.unlock()
}
}
override def awaitTermination(timeout: Long, unit: TimeUnit): Boolean = {
var nanos = unit.toNanos(timeout)
lock.lock()
try {
while (nanos > 0 && !isTerminated()) {
nanos = termination.awaitNanos(nanos)
}
isTerminated()
} finally {
lock.unlock()
}
}
override def execute(command: Runnable): Unit = {
lock.lock()
try {
if (isShutdown()) throw new RejectedExecutionException("Executor already shutdown")
runningTasks += 1
} finally {
lock.unlock()
}
try {
command.run()
} finally {
lock.lock()
try {
runningTasks -= 1
if (isTerminated()) termination.signalAll()
} finally {
lock.unlock()
}
}
}
}
/**
* An `ExecutionContextExecutor` that runs each task in the thread that invokes `execute/submit`.
* The caller should make sure the tasks running in this `ExecutionContextExecutor` are short and
* never block.
*/
def sameThread: ExecutionContextExecutor = sameThreadExecutionContext
/**
* Create a thread factory that names threads with a prefix and also sets the threads to daemon.
*/
def namedThreadFactory(prefix: String): ThreadFactory = {
new ThreadFactoryBuilder().setDaemon(true).setNameFormat(prefix + "-%d").build()
}
/**
* Wrapper over newCachedThreadPool. Thread names are formatted as prefix-ID, where ID is a
* unique, sequentially assigned integer.
*/
def newDaemonCachedThreadPool(prefix: String): ThreadPoolExecutor = {
val threadFactory = namedThreadFactory(prefix)
Executors.newCachedThreadPool(threadFactory).asInstanceOf[ThreadPoolExecutor]
}
/**
* Create a cached thread pool whose max number of threads is `maxThreadNumber`. Thread names
* are formatted as prefix-ID, where ID is a unique, sequentially assigned integer.
*/
def newDaemonCachedThreadPool(
prefix: String, maxThreadNumber: Int, keepAliveSeconds: Int = 60): ThreadPoolExecutor = {
val threadFactory = namedThreadFactory(prefix)
val threadPool = new ThreadPoolExecutor(
maxThreadNumber, // corePoolSize: the max number of threads to create before queuing the tasks
maxThreadNumber, // maximumPoolSize: because we use LinkedBlockingDeque, this one is not used
keepAliveSeconds,
TimeUnit.SECONDS,
new LinkedBlockingQueue[Runnable],
threadFactory)
threadPool.allowCoreThreadTimeOut(true)
threadPool
}
/**
* Wrapper over newFixedThreadPool. Thread names are formatted as prefix-ID, where ID is a
* unique, sequentially assigned integer.
*/
def newDaemonFixedThreadPool(nThreads: Int, prefix: String): ThreadPoolExecutor = {
val threadFactory = namedThreadFactory(prefix)
Executors.newFixedThreadPool(nThreads, threadFactory).asInstanceOf[ThreadPoolExecutor]
}
/**
* Wrapper over newSingleThreadExecutor.
*/
def newDaemonSingleThreadExecutor(threadName: String): ExecutorService = {
val threadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat(threadName).build()
Executors.newSingleThreadExecutor(threadFactory)
}
/**
* Wrapper over ScheduledThreadPoolExecutor.
*/
def newDaemonSingleThreadScheduledExecutor(threadName: String): ScheduledExecutorService = {
val threadFactory = new ThreadFactoryBuilder().setDaemon(true).setNameFormat(threadName).build()
val executor = new ScheduledThreadPoolExecutor(1, threadFactory)
// By default, a cancelled task is not automatically removed from the work queue until its delay
// elapses. We have to enable it manually.
executor.setRemoveOnCancelPolicy(true)
executor
}
/**
* Wrapper over ScheduledThreadPoolExecutor.
*/
def newDaemonThreadPoolScheduledExecutor(threadNamePrefix: String, numThreads: Int)
: ScheduledExecutorService = {
val threadFactory = new ThreadFactoryBuilder()
.setDaemon(true)
.setNameFormat(s"$threadNamePrefix-%d")
.build()
val executor = new ScheduledThreadPoolExecutor(numThreads, threadFactory)
// By default, a cancelled task is not automatically removed from the work queue until its delay
// elapses. We have to enable it manually.
executor.setRemoveOnCancelPolicy(true)
executor
}
/**
* Run a piece of code in a new thread and return the result. Exception in the new thread is
* thrown in the caller thread with an adjusted stack trace that removes references to this
* method for clarity. The exception stack traces will be like the following
*
* SomeException: exception-message
* at CallerClass.body-method (sourcefile.scala)
* at ... run in separate thread using org.apache.spark.util.ThreadUtils ... ()
* at CallerClass.caller-method (sourcefile.scala)
* ...
*/
def runInNewThread[T](
threadName: String,
isDaemon: Boolean = true)(body: => T): T = {
@volatile var exception: Option[Throwable] = None
@volatile var result: T = null.asInstanceOf[T]
val thread = new Thread(threadName) {
override def run(): Unit = {
try {
result = body
} catch {
case NonFatal(e) =>
exception = Some(e)
}
}
}
thread.setDaemon(isDaemon)
thread.start()
thread.join()
exception match {
case Some(realException) =>
// Remove the part of the stack that shows method calls into this helper method
// This means drop everything from the top until the stack element
// ThreadUtils.runInNewThread(), and then drop that as well (hence the `drop(1)`).
val baseStackTrace = Thread.currentThread().getStackTrace().dropWhile(
! _.getClassName.contains(this.getClass.getSimpleName)).drop(1)
// Remove the part of the new thread stack that shows methods call from this helper method
val extraStackTrace = realException.getStackTrace.takeWhile(
! _.getClassName.contains(this.getClass.getSimpleName))
// Combine the two stack traces, with a place holder just specifying that there
// was a helper method used, without any further details of the helper
val placeHolderStackElem = new StackTraceElement(
s"... run in separate thread using ${ThreadUtils.getClass.getName.stripSuffix("$")} ..",
" ", "", -1)
val finalStackTrace = extraStackTrace ++ Seq(placeHolderStackElem) ++ baseStackTrace
// Update the stack trace and rethrow the exception in the caller thread
realException.setStackTrace(finalStackTrace)
throw realException
case None =>
result
}
}
/**
* Construct a new ForkJoinPool with a specified max parallelism and name prefix.
*/
def newForkJoinPool(prefix: String, maxThreadNumber: Int): ForkJoinPool = {
// Custom factory to set thread names
val factory = new ForkJoinPool.ForkJoinWorkerThreadFactory {
override def newThread(pool: ForkJoinPool) =
new ForkJoinWorkerThread(pool) {
setName(prefix + "-" + super.getName)
}
}
new ForkJoinPool(maxThreadNumber, factory,
null, // handler
false // asyncMode
)
}
// scalastyle:off awaitresult
/**
* Preferred alternative to `Await.result()`.
*
* This method wraps and re-throws any exceptions thrown by the underlying `Await` call, ensuring
* that this thread's stack trace appears in logs.
*
* In addition, it calls `Awaitable.result` directly to avoid using `ForkJoinPool`'s
* `BlockingContext`. Codes running in the user's thread may be in a thread of Scala ForkJoinPool.
* As concurrent executions in ForkJoinPool may see some [[ThreadLocal]] value unexpectedly, this
* method basically prevents ForkJoinPool from running other tasks in the current waiting thread.
* In general, we should use this method because many places in Spark use [[ThreadLocal]] and it's
* hard to debug when [[ThreadLocal]]s leak to other tasks.
*/
@throws(classOf[SparkException])
def awaitResult[T](awaitable: Awaitable[T], atMost: Duration): T = {
try {
// `awaitPermission` is not actually used anywhere so it's safe to pass in null here.
// See SPARK-13747.
val awaitPermission = null.asInstanceOf[scala.concurrent.CanAwait]
awaitable.result(atMost)(awaitPermission)
} catch {
case e: SparkFatalException =>
throw e.throwable
// TimeoutException and RpcAbortException is thrown in the current thread, so not need to warp
// the exception.
case NonFatal(t)
if !t.isInstanceOf[TimeoutException] && !t.isInstanceOf[RpcAbortException] =>
throw new SparkException("Exception thrown in awaitResult: ", t)
}
}
// scalastyle:on awaitresult
// scalastyle:off awaitready
/**
* Preferred alternative to `Await.ready()`.
*
* @see [[awaitResult]]
*/
@throws(classOf[SparkException])
def awaitReady[T](awaitable: Awaitable[T], atMost: Duration): awaitable.type = {
try {
// `awaitPermission` is not actually used anywhere so it's safe to pass in null here.
// See SPARK-13747.
val awaitPermission = null.asInstanceOf[scala.concurrent.CanAwait]
awaitable.ready(atMost)(awaitPermission)
} catch {
// TimeoutException is thrown in the current thread, so not need to warp the exception.
case NonFatal(t) if !t.isInstanceOf[TimeoutException] =>
throw new SparkException("Exception thrown in awaitResult: ", t)
}
}
// scalastyle:on awaitready
def shutdown(
executor: ExecutorService,
gracePeriod: Duration = FiniteDuration(30, TimeUnit.SECONDS)): Unit = {
executor.shutdown()
executor.awaitTermination(gracePeriod.toMillis, TimeUnit.MILLISECONDS)
if (!executor.isShutdown) {
executor.shutdownNow()
}
}
/**
* Transforms input collection by applying the given function to each element in parallel fashion.
* Comparing to the map() method of Scala parallel collections, this method can be interrupted
* at any time. This is useful on canceling of task execution, for example.
*
* @param in - the input collection which should be transformed in parallel.
* @param prefix - the prefix assigned to the underlying thread pool.
* @param maxThreads - maximum number of thread can be created during execution.
* @param f - the lambda function will be applied to each element of `in`.
* @tparam I - the type of elements in the input collection.
* @tparam O - the type of elements in resulted collection.
* @return new collection in which each element was given from the input collection `in` by
* applying the lambda function `f`.
*/
def parmap[I, O](in: Seq[I], prefix: String, maxThreads: Int)(f: I => O): Seq[O] = {
val pool = newForkJoinPool(prefix, maxThreads)
try {
implicit val ec = ExecutionContext.fromExecutor(pool)
val futures = in.map(x => Future(f(x)))
val futureSeq = Future.sequence(futures)
awaitResult(futureSeq, Duration.Inf)
} finally {
pool.shutdownNow()
}
}
}
| jkbradley/spark | core/src/main/scala/org/apache/spark/util/ThreadUtils.scala | Scala | apache-2.0 | 13,621 |
package com.criteo.sre.storage.sgrastar.singularity
package lucene
import java.nio.file.Paths
import org.apache.cassandra.config.DatabaseDescriptor
import org.slf4j.LoggerFactory
import scala.collection.mutable.HashMap
object MetricsIndexManager {
private val log = LoggerFactory.getLogger(getClass)
private val mainDataDirectory = DatabaseDescriptor.loadConfig().data_file_directories(0)
private val indices = HashMap[String, MetricsIndex]()
/** Gets or create a keyspace-specific metrics index.
*
* This method is thread-safe, and is not performance-critical.
*/
def getOrCreateKeyspaceIndex(keyspace: String) = synchronized {
indices.get(keyspace) match {
case Some(index) => {
index
}
case None => {
val indexPath = Option(Paths.get(mainDataDirectory, "sgrastar-idx", keyspace))
val index = new MetricsIndex(keyspace, indexPath, 100000)
indices.put(keyspace, index)
index
}
}
}
}
| dpanth3r/cassandra-graphite-poc | src/main/scala/sgrastar/singularity/lucene/MetricsIndexManager.scala | Scala | apache-2.0 | 986 |
/*
* SPDX-License-Identifier: Apache-2.0
* Copyright 2016-2020 Daniel Urban and contributors listed in NOTICE.txt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package dev.tauri.choam
package kcas
import scala.concurrent.ExecutionContext
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import org.scalactic.TypeCheckedTripleEquals
class EMCASSpec
extends AnyFlatSpec
with Matchers
with TypeCheckedTripleEquals {
sealed trait Obj
final case object A extends Obj
final case object B extends Obj
final case object C extends Obj
implicit val ec: ExecutionContext =
ExecutionContext.global
private def polluteTheHeap[A](desc: AnyRef): A =
desc.asInstanceOf[A]
"EMCAS" should "allow null as ov or nv" in {
val r1 = Ref.mk[String](null)
val r2 = Ref.mk[String]("x")
val desc = EMCAS
.start()
.withCAS(r1, null, "x")
.withCAS(r2, "x", null)
val snap = desc.snapshot()
assert(desc.tryPerform())
assert(EMCAS.tryReadOne(r1) eq "x")
assert(EMCAS.tryReadOne(r2) eq null)
assert(!snap.load().tryPerform())
assert(EMCAS.tryReadOne(r1) eq "x")
assert(EMCAS.tryReadOne(r2) eq null)
}
"EMCAS Read" should "help the other operation" in {
val r1 = Ref.mkWithId("r1")(0L, 0L, 0L, 0L)
val r2 = Ref.mkWithId("r2")(0L, 0L, 0L, 42L)
val other: EMCAS.MCASDescriptor = EMCAS
.start()
.withCAS(r1, "r1", "x")
.withCAS(r2, "r2", "y")
.asInstanceOf[EMCAS.MCASDescriptor]
other.sort()
val d0 = other.words.get(0)
assert(d0.address eq r1)
r1.unsafeSet(polluteTheHeap[String](d0))
val res = EMCAS.tryReadOne(r1)
res should === ("x")
EMCAS.tryReadOne(r1) should === ("x")
EMCAS.tryReadOne(r2) should === ("y")
assert(other.getStatus() eq EMCASStatus.SUCCESSFUL)
}
it should "roll back the other op if necessary" in {
val r1 = Ref.mkWithId("r1")(0L, 0L, 0L, 0L)
val r2 = Ref.mkWithId("r2")(0L, 0L, 0L, 99L)
val other = EMCAS
.start()
.withCAS(r1, "r1", "x")
.withCAS(r2, "zzz", "y") // this will fail
.asInstanceOf[EMCAS.MCASDescriptor]
other.sort()
val d0 = other.words.get(0)
assert(d0.address eq r1)
r1.unsafeSet(polluteTheHeap[String](d0))
val res = EMCAS.tryReadOne(r1)
res should === ("r1")
EMCAS.tryReadOne(r1) should === ("r1")
EMCAS.tryReadOne(r2) should === ("r2")
}
}
| durban/exp-reagents | core/src/test/scala/dev/tauri/choam/kcas/EMCASSpec.scala | Scala | apache-2.0 | 2,949 |
package scalariform.formatter
// format: OFF
class IfExprFormatterTest extends AbstractExpressionFormatterTest {
"if(x>y)(x)else(y)" ==> "if (x > y) (x) else (y)"
"if (true) 3 else 4" ==> "if (true) 3 else 4"
"""if (true)
|println("Hello world")""" ==>
"""if (true)
| println("Hello world")"""
"""if (true) // Comment
|println("Hello world")""" ==>
"""if (true) // Comment
| println("Hello world")"""
"""if (1 == 1)
|println("wibble")
|else
|println("wobble")""" ==>
"""if (1 == 1)
| println("wibble")
|else
| println("wobble")"""
"""if (1 == 1)
|println("wibble")
|else if (1 == 2)
|println("wobble")
|else
|println("wobble")""" ==>
"""if (1 == 1)
| println("wibble")
|else if (1 == 2)
| println("wobble")
|else
| println("wobble")"""
"""if (1 == 1) println("wibble")
| else println("wobble")""" ==>
"""if (1 == 1) println("wibble")
|else println("wobble")"""
""" if (1==1) println("wibble") """ ==> """if (1 == 1) println("wibble")"""
"if(x>y){x}else{y}" ==> "if (x > y) { x } else { y }"
"""if (x > y) {
|println("Foo") }""" ==>
"""if (x > y) {
| println("Foo")
|}"""
"""if (x > y) {
|println("Foo") } else {
|println("Bar") }""" ==>
"""if (x > y) {
| println("Foo")
|} else {
| println("Bar")
|}"""
"""if (x > y) {
|println("Foo")
|println("Bar")}""" ==>
"""if (x > y) {
| println("Foo")
| println("Bar")
|}"""
"""if (1 == 2) {
|println("bob")
|println("bob")
|if (2 == 3) {
|println("fred")
|println("fred")
|} else {
|if (3 == 4) {
|println("bob")
|println("bob")
|} else if (4 == 5) {
|println("fred")
|println("fred")
|}
|println("bob")
|}
|}""" ==>
"""if (1 == 2) {
| println("bob")
| println("bob")
| if (2 == 3) {
| println("fred")
| println("fred")
| } else {
| if (3 == 4) {
| println("bob")
| println("bob")
| } else if (4 == 5) {
| println("fred")
| println("fred")
| }
| println("bob")
| }
|}"""
"""if (1 == 2) { println("bob")
|}""" ==>
"""if (1 == 2) {
| println("bob")
|}"""
"""if (1 == 2) { println("bob")
|println("fred")
|}""" ==>
"""if (1 == 2) {
| println("bob")
| println("fred")
|}"""
"""if (true) {
| println("wobble") }
|else {
|println("wobble") }""" ==>
"""if (true) {
| println("wobble")
|} else {
| println("wobble")
|}"""
"""if (true){}""" ==>
"""if (true) {}"""
"""if (true){
|}""" ==>
"""if (true) {
|}"""
"if (true){;}" ==> "if (true) { ; }"
"""if (1 == 2) {println("wibble");println("wobble")}""" ==>
"""if (1 == 2) { println("wibble"); println("wobble") }"""
"""if (true)
|{ x }""" ==>
"""if (true) { x }"""
"""if (a)
|if (b)
|x
|else
|y""" ==>
"""if (a)
| if (b)
| x
| else
| y"""
"""if (true) a
|else
|if (true)
|b""" ==>
"""if (true) a
|else if (true)
| b"""
"""if /*a*/ ( /*b*/ true /*c*/ ) /*d*/ {
| 1
|} /*e*/ else /*f*/ {
| 2
|}""" ==>
"""if /*a*/ ( /*b*/ true /*c*/ ) /*d*/ {
| 1
|} /*e*/ else /*f*/ {
| 2
|}"""
"""if (true) 1 else
|{ 2 } """ ==>
"""if (true) 1 else { 2 }"""
"""if (true) { 1
|}
|else 2""" ==>
"""if (true) {
| 1
|} else 2"""
"""if (true) 1 else
|{
|2 }""" ==>
"""if (true) 1 else {
| 2
|}"""
"""if (true) {
| 1
|} + 2
| else {
|2
|} + 2""" ==>
"""if (true) {
| 1
|} + 2
|else {
| 2
|} + 2"""
"""if (condition) // comment
|1
|else // comment
|2""" ==>
"""if (condition) // comment
| 1
|else // comment
| 2"""
"""if (c)// comment
| 1
|else// comment
| 2""" ==>
"""if (c) // comment
| 1
|else // comment
| 2"""
"""if (true) // Foo
|{} else // Bar
|{}""" ==>
"""if (true) // Foo
|{} else // Bar
|{}"""
"""if//a
| (//b
|true//c
|)//d
|{}//e
|else//f
|{}""" ==>
"""if //a
|( //b
|true //c
|) //d
|{} //e
|else //f
|{}"""
"""if (true)
|for {
| y <- ys}yield {
|1} else {
|2
|}""" ==>
"""if (true)
| for {
| y <- ys
| } yield {
| 1
| }
|else {
| 2
|}"""
"""if (true) 1;
|else 2""" ==>
"""if (true) 1;
|else 2"""
"""if (b)
| c
| { d }
|else
| e""" ==>
"""if (b)
| c { d }
|else
| e"""
"""if (a) {}
|else b""" ==>
"""if (a) {}
|else b"""
"""if (a) {}
|else {}""" ==>
"""if (a) {}
|else {}"""
"""if (a) {println()}
|else {println()}""" ==>
"""if (a) { println() }
|else { println() }"""
"""Some(if (a)
|b
|else
|c)""" ==>
"""Some(if (a)
| b
|else
| c)"""
"""Some(if (a) {
|b
|} else
|c)""" ==>
"""Some(if (a) {
| b
|} else
| c)"""
"""Some(if (a) { b } else
|c)""" ==>
"""Some(if (a) { b } else
| c)"""
"""if (cond)
|42 else
| 42""" ==>
"""if (cond)
| 42
|else
| 42"""
"""if (a)
|b else {
|c
|}""" ==>
"""if (a)
| b
|else {
| c
|}"""
"""if (a)
|b else if (c) {
|d} else e""" ==>
"""if (a)
| b
|else if (c) {
| d
|} else e"""
override val debug = false
}
| gawkermedia/scalariform | scalariform/src/test/scala/scalariform/formatter/IfExprFormatterTest.scala | Scala | mit | 5,595 |
package com.amazzeo.elmtactoe.actors
import akka.actor._
import com.amazzeo.elmtactoe.models._
import com.amazzeo.elmtactoe.gameutils._
sealed trait GameProgress
case object GameNotStarted extends GameProgress
case object GameInProgress extends GameProgress
case object GameFinished extends GameProgress
object GameActor {
case object Subscribe
case object Unsubscribe
def props(name: String) = Props(new GameActor(name: String))
}
case object GameFullException extends Exception("This game is full.")
class GameActor(name: String) extends Actor with GameUtils {
import GameActor._
private var crossPlayer: Option[ActorRef] = None
private var circlePlayer: Option[ActorRef] = None
private var board: Array[Array[BoardSpace]] = newBoard()
private var nextMove: PlayerSpace = Cross
private var gameProgress: GameProgress = GameNotStarted
def receive = {
case Subscribe =>
subcribePlayer(sender())
case p: PlayerMove =>
handlePlayerMove(p)
}
def subcribePlayer(player: ActorRef) = {
if (crossPlayer.isEmpty) {
crossPlayer = Some(player)
player ! SubscibeResult(name, true, Some(Cross))
broadcast(currentState)
} else if (circlePlayer.isEmpty) {
circlePlayer = Some(player)
player ! SubscibeResult(name, true, Some(Circle))
broadcast(currentState)
gameProgress = GameInProgress
} else {
player ! SubscibeResult(name, false, None)
}
}
def broadcast[T](msg: T) = {
crossPlayer.foreach(_ ! msg)
circlePlayer.foreach(_ ! msg)
}
def currentState = GameState(
board = board,
nextMove = nextMove)
def updateNextMove(): Unit = {
nextMove match {
case Cross =>
nextMove = Circle
case Circle =>
nextMove = Cross
}
}
def handlePlayerMove(playerMove: PlayerMove) = {
if (gameProgress == GameInProgress && playerMove.move == nextMove) {
board = updateBoard(board, playerMove.x, playerMove.y, playerMove.move)
updateNextMove()
broadcast(currentState)
detectEndOfGame()
}
}
def detectEndOfGame(): Unit = {
detectWinner(board) match {
case Some(winner) =>
//someone has won
winner match {
case Circle =>
broadcast(GameComplete(false, Some(Circle)))
case Cross =>
broadcast(GameComplete(false, Some(Cross)))
case _ =>
//something has gone wrong here, handle error better or make this impossible.
}
gameProgress = GameFinished
case None =>
isBoardFull(board) match {
case true =>
//send cats game
broadcast(GameComplete(true, None))
gameProgress = GameFinished
case false =>
//do nothing, game still in progress
}
}
}
}
| amazzeo/elmtactoe | app/actors/GameActor.scala | Scala | mit | 2,823 |
package controllers
import play.api.mvc._
import models.{Order, OrderSubmitted}
import org.reactivecouchbase.eventstore.Message
import es.{Broadcaster, Bootstrap}
import akka.pattern.ask
import akka.util.Timeout
import scala.concurrent.duration._
import es.Bootstrap.ec
import play.api.libs.iteratee.{Enumeratee, Concurrent}
import play.api.libs.json.{Json, JsObject}
import play.api.data._
import play.api.data.Forms._
import scala.concurrent.Future
object Application extends Controller {
implicit val timeout = Timeout(5 seconds)
val toJsString = Enumeratee.map[JsObject] { jso => s"data: ${Json.stringify(jso)}\\n\\n"}
val creditCardNumberForm = Form(
"creditCardNumber" -> text
)
def index = Action {
Ok(views.html.index())
}
def sse = Action {
Ok.feed(Broadcaster.enumerator.through(toJsString)).as("text/event-stream")
}
def order = Action.async { implicit request =>
creditCardNumberForm.bindFromRequest().fold(
_ => Future(BadRequest("You have to provide a credit card number")),
creditCardNumber => {
val message = Message.create(
OrderSubmitted(
Order(details = "jelly bean", creditCardNumber = creditCardNumber)
)
)
(Bootstrap.processor ? message).map(_ => Ok("Done !!!"))
}
)
}
} | ReactiveCouchbase/ReactiveCouchbase-play | samples/scala/orders/app/controllers/Application.scala | Scala | apache-2.0 | 1,310 |
/*
* @author Philip Stutz
*
* Copyright 2014 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.triplerush.dictionary
import org.scalatest.FlatSpec
import com.signalcollect.util.TestAnnouncements
class DictionarySpec extends FlatSpec with TestAnnouncements {
"ModularDictionary" should "correctly encode and decode a simple string" in {
val d = new ModularDictionary
try {
val id = d("simple")
assert(id == 1)
val contained = d.contains("simple")
assert(contained == true)
val decoded = d(id)
assert(decoded == "simple")
} finally {
d.close()
}
}
it should "correctly encode and decode an encoded string literal" in {
val d = new ModularDictionary
try {
val id = d("\\"Bob\\"")
assert(id == 1)
val contained = d.contains("\\"Bob\\"")
assert(contained == true)
val decoded = d(id)
assert(decoded == "\\"Bob\\"")
} finally {
d.close()
}
}
it should "correctly encode a compressable string" in {
val d = new ModularDictionary
try {
val id = d("prefix#remainder")
assert(id == 1)
val contained = d.contains("prefix#remainder")
assert(contained == true)
} finally {
d.close()
}
}
it should "correctly decode a compressable string" in {
val d = new ModularDictionary
try {
val id = d("prefix#remainder")
val decoded = d(id)
assert(decoded == "prefix#remainder")
} finally {
d.close()
}
}
it should "correctly encode and decode a string with a hash at the end" in {
val d = new ModularDictionary
try {
val id = d("simple#")
assert(id == 1)
val contained = d.contains("simple#")
assert(contained == true)
val decoded = d(id)
assert(decoded == "simple#")
} finally {
d.close()
}
}
it should "correctly encode and decode a string with a hash at the beginning" in {
val d = new ModularDictionary
try {
val id = d("#simple")
assert(id == 1)
val contained = d.contains("#simple")
assert(contained == true)
val decoded = d(id)
assert(decoded == "#simple")
} finally {
d.close()
}
}
it should "support adding entries in parallel" in {
val d = new ModularDictionary
try {
val stringEntries = (1 to 1000).map(s => s + "#" + s)
val ids = stringEntries.par.map(d(_)).toSet
val reverseMapped = ids.map(d(_))
assert(reverseMapped.size == 1000)
assert(stringEntries.toSet == reverseMapped.seq.toSet)
} finally {
d.close()
}
}
it should "support clear" in {
val d = new ModularDictionary
try {
val lowStringEntries = (1 to 1000).map(_.toString)
for (entry <- lowStringEntries.par) {
d(entry)
}
d.clear
val highStringEntries = (1001 to 2000).map(_.toString)
for (entry <- highStringEntries.par) {
d(entry)
}
val reverseMapped = (1 to 1000).map(d(_)).toSet
assert(reverseMapped.size == 1000)
assert(reverseMapped.map(_.toInt).min == 1001)
assert(highStringEntries.toSet == reverseMapped.toSet)
} finally {
d.close()
}
}
"HashDictionary" should "correctly encode and decode a simple string" in {
val d = new HashDictionary
try {
val id = d("simple")
val contained = d.contains("simple")
assert(contained == true)
val decoded = d(id)
assert(decoded == "simple")
} finally {
d.close()
}
}
it should "correctly encode and decode an encoded string literal" in {
val d = new HashDictionary
try {
val id = d("\\"Bob\\"")
val contained = d.contains("\\"Bob\\"")
assert(contained == true)
val decoded = d(id)
assert(decoded == "\\"Bob\\"")
} finally {
d.close()
}
}
it should "correctly encode a compressable string" in {
val d = new HashDictionary
try {
val id = d("prefix#remainder")
val contained = d.contains("prefix#remainder")
assert(contained == true)
} finally {
d.close()
}
}
it should "correctly decode a compressable string" in {
val d = new HashDictionary
try {
val id = d("prefix#remainder")
val decoded = d(id)
assert(decoded == "prefix#remainder")
} finally {
d.close()
}
}
it should "correctly encode and decode a string with a hash at the beginning" in {
val d = new HashDictionary
try {
val id = d("#simple")
val contained = d.contains("#simple")
assert(contained == true)
val decoded = d(id)
assert(decoded == "#simple")
} finally {
d.close()
}
}
it should "support adding entries in parallel" in {
val d = new HashDictionary
try {
val stringEntries = (1 to 1000).map(s => s + "#" + s)
val ids = stringEntries.par.map(d(_)).toSet
val reverseMapped = ids.map(d(_))
assert(reverseMapped.size == 1000)
assert(stringEntries.toSet == reverseMapped.seq.toSet)
} finally {
d.close()
}
}
it should "support clear" in {
val d = new HashDictionary
try {
val lowStringEntries = (1 to 1000).map(_.toString)
for (s <- lowStringEntries) {
val id = d(s)
assert(d.contains(id), s"Dictionary does not contain an entry for string $s, which should have ID $id.")
}
d.clear
val highStringEntries = (1001 to 2000).map(_.toString)
highStringEntries.map { s =>
val id = d(s)
assert(d.contains(id), s"Dictionary does not contain an entry for string $s, which should have ID $id.")
}.toSet
(1 to 1000).foreach { s =>
assert(!d.contains(s.toString))
}
assert(highStringEntries.size == 1000)
} finally {
d.close()
}
}
}
| hicolour/triplerush | src/test/scala/com/signalcollect/triplerush/dictionary/DictionarySpec.scala | Scala | apache-2.0 | 6,408 |
package models
import models.db.TransactionTypes
case class TransactionType(
id: Option[Long] = None,
parent: Option[Long] = None,
name: String,
code: String,
ordering: Int
) extends IdentifiableModel(id) {
def formattedName: String = parent match {
case Some(id) => TransactionTypes.byId(id).formattedName + "/" + name
case None => name
}
}
| vatt2001/finprocessor | app/models/TransactionType.scala | Scala | mit | 361 |
package edu.rit.csh.github
import java.io.FileWriter
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.{Minutes, Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
object GithubStream {
def main(args: Array[String]) = {
val conf = new SparkConf()
.setAppName("IRC Wikipedia Page Edit Stream")
.registerKryoClasses(Array(classOf[GithubEvent]))
val sparkContext = new SparkContext(conf)
val ssc = new StreamingContext(sparkContext, Seconds(10))
ssc.checkpoint("/tmp")
val oAuthToken = ""
val stream = ssc.receiverStream(new GithubReceiver("computersciencehouse", oAuthToken, StorageLevel.MEMORY_AND_DISK_2))
stream.map(e => e.username).countByValueAndWindow(Minutes(24 * 60), Seconds(10)).foreachRDD { rdd =>
val writer = new FileWriter("/users/u20/jd/public_html/github", false)
val builder = new StringBuilder()
builder.append("\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n")
builder.append("""
| * Dick Measuring as a Service *
|---------------------------------------------------------------------------------------
|Hey CSHers, we all love measuring our dicks, so why not do it by measuring GitHub usage
| (most active users in the last day, this file is updated every 10 seconds)
|---------------------------------------------------------------------------------------
|""".stripMargin)
rdd.takeOrdered(20)(Ordering.by[(String, Long), Long](_._2).reverse)
.zipWithIndex
.foreach { case ((username, count), index) =>
builder.append("#" + String.format("%02d", Integer.valueOf(index + 1)) + " - " +
String.format("%-63s", username).replace(" ", ".") +
String.format("%4s", count.toString).replace(" ", ".") +
" actions / day\n")
}
builder.append("\n\n\n* Don't see your name here, well you must not be important anyways")
val output = builder.toString()
try {
writer.write(output)
} finally {
writer.flush()
writer.close()
}
println(output)
}
ssc.start() // Start the computation
println("starting stream...\n\n")
ssc.awaitTermination() // Wait for the computation to terminate
}
}
| JDrit/WikiStreaming | src/main/scala/edu/rit/csh/github/GithubStream.scala | Scala | apache-2.0 | 2,405 |
package edu.nus.hipci.daemon
import scala.collection.immutable.Map
import scala.concurrent._
import scala.concurrent.duration._
import akka.actor.ActorSystem
import akka.pattern._
import akka.util.Timeout
import org.scalatest.FlatSpec
import org.scalatest.Matchers._
import org.scalatest.concurrent.ScalaFutures._
import org.scalatest.time.{Seconds, Span}
import edu.nus.hipci.core._
/**
* Test the functionality of TestExecutor
*
* @author Evan Sebastian <[email protected]>
*/
class TestExecutorSpec extends FlatSpec {
val system = ActorSystem("hipci-test")
TestExecutor.register(system)
val subject = system.actorOf(TestExecutor.props, "TestExecutor")
val patience = Span(10, Seconds)
implicit val akkaTimeout = Timeout(10.seconds)
val path = System.getenv().get("PATH")
AppConfiguration.global.projectDirectory = "vendor"
AppConfiguration.global.hipDirectory = "../fixtures/hip"
AppConfiguration.global.sleekDirectory = "../fixtures/sleek"
"TestExecutor" should "execute simple HIP/SLEEK test suite" in {
val config = TestConfiguration(
tests = Map(
"test" -> Set(
GenTest(
path = "test_hip.ss",
kind = "hip",
arguments = List.empty,
specs = Map("append" -> true)
),
GenTest(
path = "test_sleek.slk",
kind = "sleek",
arguments = List.empty,
specs = Map("1" -> false, "2" -> true)))))
whenReady(subject ? SubmitTest(config), timeout(patience)) {
case promise: Promise[_] =>
Await
.result(promise.future, akkaTimeout.duration)
.asInstanceOf[TestComplete].result shouldEqual config
case _ => assert(false)
}
}
}
| rgoulter/hipci | src/test/edu/nus/hipci/daemon/TestExecutorSpec.scala | Scala | mit | 1,738 |
/*
* Copyright 2017 Kailuo Wang
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package mainecoon.tests
import cats.FlatMap
import cats.kernel.Eq
import cats.kernel.instances.string._
import cats.kernel.instances.tuple._
import cats.laws.discipline.eq._
import cats.laws.discipline.{FlatMapTests, SerializableTests}
import mainecoon.autoFlatMap
import mainecoon.tests.autoFlatMapTests._
import org.scalacheck.{Arbitrary, Cogen}
class autoFlatMapTests extends MainecoonTestSuite {
checkAll("FlatMap[TestAlgebra]", FlatMapTests[TestAlgebra].flatMap[Float, String, Int])
checkAll("serializable FlatMap[TestAlgebra]", SerializableTests.serializable(FlatMap[TestAlgebra]))
test("extra type param correctly handled") {
val doubleAlg = AlgWithExtraTypeParamFloat.map(_.toDouble)
doubleAlg.foo("big") should be(3d)
}
}
object autoFlatMapTests {
@autoFlatMap
trait TestAlgebra[T] {
def abstractEffect(a: String): T
def concreteEffect(a: String): T = abstractEffect(a + " concreteEffect")
def abstractOther(a: String): String
def concreteOther(a: String): String = a + " concreteOther"
def withoutParams: T
}
@autoFlatMap
trait AlgWithExtraTypeParam[T1, T] {
def foo(a: T1): T
}
object AlgWithExtraTypeParamFloat extends AlgWithExtraTypeParam[String, Float] {
def foo(a: String): Float = a.length.toFloat
}
@autoFlatMap
trait AlgWithGenericMethod[T] {
def plusOne[A](i: A): T
}
implicit def eqForTestAlgebra[T](implicit eqT: Eq[T]): Eq[TestAlgebra[T]] =
Eq.by { p =>
(p.abstractEffect: String => T) ->
(p.concreteEffect: String => T) ->
(p.abstractOther: String => String) ->
(p.concreteOther: String => String) ->
p.withoutParams
}
implicit def arbitraryTestAlgebra[T: Arbitrary](implicit cS: Cogen[String]): Arbitrary[TestAlgebra[T]] =
Arbitrary {
for {
absEff <- Arbitrary.arbitrary[String => T]
conEff <- Arbitrary.arbitrary[Option[String => T]]
absOther <- Arbitrary.arbitrary[String => String]
conOther <- Arbitrary.arbitrary[Option[String => String]]
withoutParameters <- Arbitrary.arbitrary[T]
} yield new TestAlgebra[T] {
override def abstractEffect(i: String): T = absEff(i)
override def concreteEffect(a: String): T = conEff.getOrElse(super.concreteEffect(_))(a)
override def abstractOther(a: String): String = absOther(a)
override def concreteOther(a: String): String = conOther.getOrElse(super.concreteOther(_))(a)
override def withoutParams: T = withoutParameters
}
}
}
| kailuowang/mainecoon | tests/src/test/scala/mainecoon/tests/autoFlatMapTests.scala | Scala | apache-2.0 | 3,135 |
package eveapi.compress
import scalaz._, Scalaz._
import java.time._
import eveapi.data.crest._
case class Compress[L](implicit val lens: PathLens[L]) {
trait CompressLink[T[_], IdType] {
def compress(href: T[L]): IdType
def decompress(id: IdType): Reader[EveServer, T[L]]
}
implicit def compressShortIdentifier[T[_]](implicit href: GenHref[L, T]) =
new CompressLink[Lambda[Lin => ShortIdentifier[Lin, T]], CompressedShortIdentifier[T]] {
def compress(id: ShortIdentifier[L, T]) =
CompressedShortIdentifier(id.id)
def decompress(id: CompressedShortIdentifier[T]) =
href.href(id.id).map(href => ShortIdentifier(id.id, href))
}
implicit def compressStandardIdentifier[T[_]](implicit href: GenHref[L, T]) =
new CompressLink[Lambda[Lin => StandardIdentifier[Lin, T]], CompressedStandardIdentifier[T]] {
def compress(id: StandardIdentifier[L, T]) =
CompressedStandardIdentifier(id.id, id.name)
def decompress(id: CompressedStandardIdentifier[T]) =
href.href(id.id).map(href => StandardIdentifier(id.id, id.name, href))
}
implicit def compressContactIdentifier[T[_]]()(implicit href: GenHref[L, T]) =
new CompressLink[Lambda[Lin => ContactIdentifier[Lin, T]], CompressedContactIdentifier[T]] {
def compress(id: ContactIdentifier[L, T]) =
CompressedContactIdentifier(id.id, id.name, id.isNPC)
def decompress(id: CompressedContactIdentifier[T]) =
href.href(id.id).map(href => ContactIdentifier(id.id, id.name, href, id.isNPC))
}
implicit def compressSquad = new CompressLink[Squad, CompressedSquad] {
def compress(id: Squad[L]) = {
val Array("", "fleets", fleetID, "wings", wingId, "squads", squadId) =
lens.fromLink(id.href).split("/")
CompressedSquad(fleetID.toLong, wingId.toLong, squadId.toLong, id.name)
}
def decompress(id: CompressedSquad) = {
lens
.toLink(s"/fleets/${id.fleetID}/wings/${id.wingId}/squads/${id.squadId}/")
.map(href => Squad(id.squadId, id.name, href))
}
}
implicit def compressWing(implicit compressSquad: CompressLink[Squad, CompressedSquad]) =
new CompressLink[Wing, CompressedWing] {
def compress(id: Wing[L]) = {
val Array("", "fleets", fleetID, "wings", wingId) = lens.fromLink(id.href).split("/")
CompressedWing(
fleetID.toLong, wingId.toLong, id.name, id.squadsList.map(compressSquad.compress))
}
def decompress(id: CompressedWing) = {
for {
squads <- lens.toLink(s"/fleets/${id.fleetID}/wings/${id.wingId}/squads/")
href <- lens.toLink(s"/fleets/${id.fleetID}/wings/${id.wingId}/")
squadsList <- id.squadsList
.map(compressSquad.decompress)
.sequence[Reader[EveServer, ?], Squad[L]]
} yield {
Wing(id.wingId, id.name, squadsList, PostLinkI(squads), href)
}
}
}
implicit def compressMember(
implicit compressCharacter: CompressLink[
Lambda[Lin => StandardIdentifier[Lin, Character]], CompressedCharacter],
compressShip: CompressLink[Lambda[Lin => StandardIdentifier[Lin, Ship]], CompressedShip],
compressSolarSystem: CompressLink[Lambda[Lin => StandardIdentifier[Lin, SolarSystem]],
CompressedStandardIdentifier[SolarSystem]],
compressStation: CompressLink[
Lambda[Lin => StandardIdentifier[Lin, Station]], CompressedStandardIdentifier[Station]]) =
new CompressLink[Member, CompressedMember] {
def compress(member: Member[L]) = {
val Array("", "fleets", fleetID, "members", characterId) =
lens.fromLink(member.href).split("/")
CompressedMember(fleetID.toLong,
member.boosterID,
compressCharacter.compress(member.character),
member.joinTime,
member.roleID,
compressShip.compress(member.ship),
compressSolarSystem.compress(member.solarSystem),
member.squadID,
member.station.map(compressStation.compress),
member.takesFleetWarp,
member.wingID)
}
def decompress(id: CompressedMember) = {
for {
character <- compressCharacter.decompress(id.character)
ship <- compressShip.decompress(id.ship)
solarSystem <- compressSolarSystem.decompress(id.solarSystem)
station <- id.station
.map(compressStation.decompress)
.sequence[Reader[EveServer, ?], StandardIdentifier[L, Station]]
href <- lens.toLink(s"/fleets/${id.fleetID}/members/${character.id}/")
} yield {
Member(id.boosterID,
character,
id.joinTime,
id.roleID,
ship,
solarSystem,
id.squadID,
station,
id.takesFleetWarp,
id.wingID,
href)
}
}
}
implicit def compressFleet(implicit compressMember: CompressLink[Member, CompressedMember],
compressWing: CompressLink[Wing, CompressedWing]) =
new CompressLink[Fleet, CompressedFleet] {
def compress(fleet: Fleet[L]) = {
val Array("", "fleets", fleetID, "members") = lens.fromLink(fleet.members.href).split("/")
CompressedFleet(
fleetID.toLong, fleet.isFreeMove, fleet.isRegistered, fleet.isVoiceEnabled, fleet.motd)
}
def decompress(id: CompressedFleet) = {
for {
members <- lens.toLink(s"/fleets/${id.fleetID}/members/")
wings <- lens.toLink(s"/fleets/${id.fleetID}/wings/")
} yield {
Fleet(id.isFreeMove,
id.isRegistered,
id.isVoiceEnabled,
id.motd,
GetLinkI(wings),
GetPostLink(members))
}
}
}
def compress[T[_], IdType](t: T[L])(implicit compress: CompressLink[T, IdType]): IdType = {
compress.compress(t)
}
def decompress[T[_], IdType](t: IdType)(
implicit compress: CompressLink[T, IdType]): Reader[EveServer, T[L]] =
compress.decompress(t)
}
case class CompressedShortIdentifier[T[_]](id: Long)
case class CompressedStandardIdentifier[T[_]](id: Long, name: String)
case class CompressedContactIdentifier[T[_]](id: Long, name: String, isNPC: Boolean)
case class CompressedSquad(fleetID: Long, wingId: Long, squadId: Long, name: String)
case class CompressedWing(
fleetID: Long, wingId: Long, name: String, squadsList: List[CompressedSquad])
case class CompressedMember(fleetID: Long,
boosterID: Short,
character: CompressedCharacter,
joinTime: Instant,
roleID: Short,
ship: CompressedShip,
solarSystem: CompressedSolarSystem,
squadID: Long,
station: Option[CompressedStation],
takesFleetWarp: Boolean,
wingID: Long)
case class CompressedFleet(
fleetID: Long, isFreeMove: Boolean, isRegistered: Boolean, isVoiceEnabled: Boolean, motd: String)
case class CompressedLocation(
solarSystem: Option[CompressedSolarSystem], station: Option[CompressedStation])
case class EveServer(server: String)
trait PathLens[L] {
def toLink(path: String): Reader[EveServer, L]
def fromLink(l: L): String
}
trait GenHref[L, T[_]] {
def href(id: Long)(implicit lens: PathLens[L]): Reader[EveServer, L]
}
object GenHref {
implicit def ship[L] = new GenHref[L, Ship] {
def href(id: Long)(implicit lens: PathLens[L]) = lens.toLink(s"/types/${id}/")
}
implicit def solarsystem[L] = new GenHref[L, SolarSystem] {
def href(id: Long)(implicit lens: PathLens[L]) = lens.toLink(s"/solarsystems/${id}/")
}
implicit def station[L] = new GenHref[L, Station] {
def href(id: Long)(implicit lens: PathLens[L]) = lens.toLink(s"/stations/${id}/")
}
implicit def character[L] = new GenHref[L, Character] {
def href(id: Long)(implicit lens: PathLens[L]) = lens.toLink(s"/characters/${id}/")
}
def identifier[L, T[_]](id: Long)(
implicit gen: GenHref[L, T], lens: PathLens[L]): Reader[EveServer, ShortIdentifier[L, T]] =
gen.href(id).map(href => ShortIdentifier[L, T](id, href))
}
| scala-eveapi/eveapi | compress/src/main/scala/compress.scala | Scala | mit | 8,592 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.scope
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.SparkListenerStageSubmitted
import org.apache.spark.scheduler.SparkListenerStageCompleted
import org.apache.spark.scheduler.SparkListenerJobStart
/**
* Tests that this listener populates and cleans up its data structures properly.
* 这是一个测试监听和正确清理数据结构
*/
class RDDOperationGraphListenerSuite extends SparkFunSuite {
private var jobIdCounter = 0
private var stageIdCounter = 0
private val maxRetainedJobs = 10
private val maxRetainedStages = 10
private val conf = new SparkConf()
.set("spark.ui.retainedJobs", maxRetainedJobs.toString)
//在GC之前webUI保留的stage数量
.set("spark.ui.retainedStages", maxRetainedStages.toString)
test("run normal jobs") {//运行正常的工作
val startingJobId = jobIdCounter
val startingStageId = stageIdCounter
val listener = new RDDOperationGraphListener(conf)
assert(listener.jobIdToStageIds.isEmpty)
assert(listener.jobIdToSkippedStageIds.isEmpty)
assert(listener.stageIdToJobId.isEmpty)
assert(listener.stageIdToGraph.isEmpty)
assert(listener.completedStageIds.isEmpty)
assert(listener.jobIds.isEmpty)
assert(listener.stageIds.isEmpty)
// Run a few jobs, but not enough for clean up yet
//跑了几个工作,但还没有清理
(1 to 3).foreach { numStages => startJob(numStages, listener) } // start 3 jobs and 6 stages 开始3个工作和6个阶段
(0 to 5).foreach { i => endStage(startingStageId + i, listener) } // finish all 6 stages 完成所有6个阶段
(0 to 2).foreach { i => endJob(startingJobId + i, listener) } // finish all 3 jobs 完成所有3个工作
listener.getOperationGraphForJob(startingJobId).foreach(a =>println(a+"==OperationGraphForJob=="+a.edges+"=="+a.incomingEdges+"=="+a.outgoingEdges+"=="+a.rootCluster))
assert(listener.jobIdToStageIds.size === 3)
assert(listener.jobIdToStageIds(startingJobId).size === 1)
assert(listener.jobIdToStageIds(startingJobId + 1).size === 2)
assert(listener.jobIdToStageIds(startingJobId + 2).size === 3)
listener.jobIdToSkippedStageIds.map(println _)
assert(listener.jobIdToSkippedStageIds.size === 3)
assert(listener.jobIdToSkippedStageIds.values.forall(_.isEmpty)) // no skipped stages
assert(listener.stageIdToJobId.size === 6)
assert(listener.stageIdToJobId(startingStageId) === startingJobId)
assert(listener.stageIdToJobId(startingStageId + 1) === startingJobId + 1)
assert(listener.stageIdToJobId(startingStageId + 2) === startingJobId + 1)
assert(listener.stageIdToJobId(startingStageId + 3) === startingJobId + 2)
assert(listener.stageIdToJobId(startingStageId + 4) === startingJobId + 2)
assert(listener.stageIdToJobId(startingStageId + 5) === startingJobId + 2)
assert(listener.stageIdToGraph.size === 6)
assert(listener.completedStageIds.size === 6)
assert(listener.jobIds.size === 3)
assert(listener.stageIds.size === 6) /**/
}
test("run jobs with skipped stages") {//跳过阶段的运行作业
val startingJobId = jobIdCounter
val startingStageId = stageIdCounter
val listener = new RDDOperationGraphListener(conf)
// Run a few jobs, but not enough for clean up yet
//运行几个工作,但还不清理
// Leave some stages unfinished so that they are marked as skipped
// 留下一些未完成的阶段,使它们被标记为跳过
(1 to 3).foreach { numStages => startJob(numStages, listener) } // start 3 jobs and 6 stages 开始3个工作和6个阶段
(4 to 5).foreach { i => endStage(startingStageId + i, listener) } // finish only last 2 stages 完成最后的2个阶段
(0 to 2).foreach { i => endJob(startingJobId + i, listener) } // finish all 3 jobs 完成所有3个工作
assert(listener.jobIdToSkippedStageIds.size === 3)
listener.jobIdToSkippedStageIds.map(println _)
assert(listener.jobIdToSkippedStageIds(startingJobId).size === 1)
assert(listener.jobIdToSkippedStageIds(startingJobId + 1).size === 2)
assert(listener.jobIdToSkippedStageIds(startingJobId + 2).size === 1) // 2 stages not skipped
assert(listener.completedStageIds.size === 2)
listener.completedStageIds.map(println _)
// The rest should be the same as before
//剩余应该和以前一样
assert(listener.jobIdToStageIds.size === 3)
assert(listener.jobIdToStageIds(startingJobId).size === 1)
assert(listener.jobIdToStageIds(startingJobId + 1).size === 2)
assert(listener.jobIdToStageIds(startingJobId + 2).size === 3)
assert(listener.stageIdToJobId.size === 6)
assert(listener.stageIdToJobId(startingStageId) === startingJobId)
assert(listener.stageIdToJobId(startingStageId + 1) === startingJobId + 1)
assert(listener.stageIdToJobId(startingStageId + 2) === startingJobId + 1)
assert(listener.stageIdToJobId(startingStageId + 3) === startingJobId + 2)
assert(listener.stageIdToJobId(startingStageId + 4) === startingJobId + 2)
assert(listener.stageIdToJobId(startingStageId + 5) === startingJobId + 2)
assert(listener.stageIdToGraph.size === 6)
assert(listener.jobIds.size === 3)
assert(listener.stageIds.size === 6)
}
test("clean up metadata") {//清理元数据
val startingJobId = jobIdCounter
val startingStageId = stageIdCounter
val listener = new RDDOperationGraphListener(conf)
// Run many jobs and stages to trigger clean up
//运行许多工作和阶段来触发清理
(1 to 10000).foreach { i =>
// Note: this must be less than `maxRetainedStages`
val numStages = i % (maxRetainedStages - 2) + 1
val startingStageIdForJob = stageIdCounter
val jobId = startJob(numStages, listener)
// End some, but not all, stages that belong to this job
// This is to ensure that we have both completed and skipped stages
//结束属于这项工作的一些但不是全部的阶段,这是为了确保我们完成和跳过阶段
(startingStageIdForJob until stageIdCounter)
.filter { i => i % 2 == 0 }
.foreach { i => endStage(i, listener) }
// End all jobs
endJob(jobId, listener)
}
// Ensure we never exceed the max retained thresholds
//确保我们永远不会超过最大保留阈值
assert(listener.jobIdToStageIds.size <= maxRetainedJobs)
assert(listener.jobIdToSkippedStageIds.size <= maxRetainedJobs)
assert(listener.stageIdToJobId.size <= maxRetainedStages)
assert(listener.stageIdToGraph.size <= maxRetainedStages)
assert(listener.completedStageIds.size <= maxRetainedStages)
assert(listener.jobIds.size <= maxRetainedJobs)
assert(listener.stageIds.size <= maxRetainedStages)
// Also ensure we're actually populating these data structures
// Otherwise the previous group of asserts will be meaningless
assert(listener.jobIdToStageIds.nonEmpty)
assert(listener.jobIdToSkippedStageIds.nonEmpty)
assert(listener.stageIdToJobId.nonEmpty)
assert(listener.stageIdToGraph.nonEmpty)
assert(listener.completedStageIds.nonEmpty)
assert(listener.jobIds.nonEmpty)
assert(listener.stageIds.nonEmpty)
// Ensure we clean up old jobs and stages, not arbitrary ones
//确保我们清理旧的工作和阶段,而不是任意的
assert(!listener.jobIdToStageIds.contains(startingJobId))
assert(!listener.jobIdToSkippedStageIds.contains(startingJobId))
assert(!listener.stageIdToJobId.contains(startingStageId))
assert(!listener.stageIdToGraph.contains(startingStageId))
assert(!listener.completedStageIds.contains(startingStageId))
assert(!listener.stageIds.contains(startingStageId))
assert(!listener.jobIds.contains(startingJobId))
}
test("fate sharing between jobs and stages") {//工作和阶段之间的共享死亡
val startingJobId = jobIdCounter
val startingStageId = stageIdCounter
val listener = new RDDOperationGraphListener(conf)
// Run 3 jobs and 8 stages, finishing all 3 jobs but only 2 stages
//运行3个工作和8个阶段,完成所有3个工作,但只有2个阶段
startJob(5, listener)
startJob(1, listener)
startJob(2, listener)
(0 until 8).foreach { i => startStage(i + startingStageId, listener) }
endStage(startingStageId + 3, listener)
endStage(startingStageId + 4, listener)
(0 until 3).foreach { i => endJob(i + startingJobId, listener) }
// First, assert the old stuff 首先,维护旧的东西
assert(listener.jobIdToStageIds.size === 3)
assert(listener.jobIdToSkippedStageIds.size === 3)
assert(listener.stageIdToJobId.size === 8)
assert(listener.stageIdToGraph.size === 8)
assert(listener.completedStageIds.size === 2)
// Cleaning the third job should clean all of its stages
//清洗第三个工作应该清洁所有的阶段
listener.cleanJob(startingJobId + 2)
assert(listener.jobIdToStageIds.size === 2)
assert(listener.jobIdToSkippedStageIds.size === 2)
assert(listener.stageIdToJobId.size === 6)
assert(listener.stageIdToGraph.size === 6)
assert(listener.completedStageIds.size === 2)
// Cleaning one of the stages in the first job should clean that job and all of its stages
//清洁第一个工作阶段的一个阶段应该是清洁的工作和它的所有阶段
// Note that we still keep around the last stage because it belongs to a different job
listener.cleanStage(startingStageId)
assert(listener.jobIdToStageIds.size === 1)
assert(listener.jobIdToSkippedStageIds.size === 1)
assert(listener.stageIdToJobId.size === 1)
assert(listener.stageIdToGraph.size === 1)
assert(listener.completedStageIds.size === 0)
}
/**
* Start a job with the specified number of stages.
* 开始一个工作用指定数量的阶段
* */
private def startJob(numStages: Int, listener: RDDOperationGraphListener): Int = {
assert(numStages > 0, "I will not run a job with 0 stages for you.")
val stageInfos = (0 until numStages).map { a =>
//println( a)
val stageInfo = new StageInfo(stageIdCounter, 0, "s", 0, Seq.empty, Seq.empty, "d")
stageIdCounter += 1
stageInfo
}
val jobId = jobIdCounter
listener.onJobStart(new SparkListenerJobStart(jobId, 0, stageInfos))
// Also start all stages that belong to this job
//也开始属于这项Job的所有阶段
stageInfos.map(_.stageId).foreach { sid =>
//println("sid:"+sid)
startStage(sid, listener)
}
jobIdCounter += 1
jobId
}
/**
* Start the stage specified by the given ID.
* 给定阶段的标识启动
* */
private def startStage(stageId: Int, listener: RDDOperationGraphListener): Unit = {
val stageInfo = new StageInfo(stageId, 0, "s", 0, Seq.empty, Seq.empty, "d")
listener.onStageSubmitted(new SparkListenerStageSubmitted(stageInfo))
}
/**
* Finish the stage specified by the given ID.
* 完成指定标识的阶段
* */
private def endStage(stageId: Int, listener: RDDOperationGraphListener): Unit = {
val stageInfo = new StageInfo(stageId, 0, "s", 0, Seq.empty, Seq.empty, "d")
listener.onStageCompleted(new SparkListenerStageCompleted(stageInfo))
}
/**
* Finish the job specified by the given ID.
* 完成指定标识的作业
* */
private def endJob(jobId: Int, listener: RDDOperationGraphListener): Unit = {
listener.onJobEnd(new SparkListenerJobEnd(jobId, 0, JobSucceeded))
}
}
| tophua/spark1.52 | core/src/test/scala/org/apache/spark/ui/scope/RDDOperationGraphListenerSuite.scala | Scala | apache-2.0 | 12,399 |
package com.twitter.finatra.http.routing
import com.twitter.finagle.http.{HttpMuxer, Request, Response, Status}
import com.twitter.finagle.httpx.compat.NettyClientAdaptor
import com.twitter.finagle.{Service, httpx}
import com.twitter.finatra.json.FinatraObjectMapper
import com.twitter.finatra.utils.FuturePools
import com.twitter.inject.Logging
import com.twitter.util.{Await, ExecutorServiceFuturePool, Future}
import javax.inject.Inject
import org.jboss.netty.handler.codec.http.{HttpRequest, HttpResponse}
class HttpWarmup @Inject()(
router: HttpRouter,
mapper: FinatraObjectMapper)
extends Logging {
private val userAgent = "http-warmup-client"
/* Use a FuturePool to avoid getting a ConstFuture from Future.apply(...) */
private val pool = FuturePools.fixedPool("HTTP Warmup", 1).asInstanceOf[ExecutorServiceFuturePool]
private val httpxMuxer: Service[HttpRequest, HttpResponse] =
NettyClientAdaptor.andThen(httpx.HttpMuxer)
/* Public */
/**
* Send a request to warmup services that are not yet externally receiving traffic
* @param request Request to send
* @param forceRouteToAdminHttpMuxers Route the request to the HttpMuxer (e.g. needed for twitter-server admin endpoints that do not start with /admin)
* @param responseCallback Callback called for every response where assertions can be made (note: be aware that failed assertions will prevent a server from
* restarting when dependent services are unresponsive)
*/
def send(
request: => Request,
forceRouteToAdminHttpMuxers: Boolean = false,
times: Int = 1,
responseCallback: Response => Unit = identity) {
for (i <- 1 to times) {
val response = executeRequest(request, forceRouteToAdminHttpMuxers)
responseCallback(response)
info("Warmup " + request + " complete with " + response.status)
}
}
def close() = {
pool.executor.shutdownNow()
}
/* Private */
private def executeRequest(request: Request, forceRouteToHttpMuxer: Boolean): Response = {
val nettyResponseFuture = pool {
info("Warmup " + request)
routeRequest(request, forceRouteToHttpMuxer)
}.flatten
Response(
Await.result(nettyResponseFuture))
}
private def routeRequest(request: Request, forceRouteToHttpMuxer: Boolean): Future[HttpResponse] = {
/* Mutation */
request.headerMap.add("Host", "127.0.0.1")
request.headerMap.add("User-Agent", userAgent)
if (forceRouteToHttpMuxer)
routeToAdminMuxers(request)
else if (request.uri.startsWith("/admin/finatra/"))
router.services.adminService(request)
else if (request.uri.startsWith("/admin"))
routeToAdminMuxers(request)
else
router.services.externalService(request)
}
private def routeToAdminMuxers(request: Request): Future[HttpResponse] = {
httpxMuxer(request.getHttpRequest()) flatMap { response =>
if (response.getStatus == Status.NotFound)
HttpMuxer(request)
else
Future(response)
}
}
}
| tom-chan/finatra | http/src/main/scala/com/twitter/finatra/http/routing/HttpWarmup.scala | Scala | apache-2.0 | 3,028 |
package com.twitter.finagle.netty3.ssl.server
import com.twitter.finagle.Address
import com.twitter.finagle.ssl.server.{SslServerConfiguration, SslServerSessionVerifier}
import java.net.SocketAddress
import java.security.cert.Certificate
import javax.net.ssl.{SSLEngine, SSLSession}
import org.jboss.netty.channel._
import org.jboss.netty.handler.ssl.SslHandler
import org.mockito.Matchers._
import org.mockito.Mockito.{times, verify, when}
import org.scalatest.FunSuite
import org.scalatest.mockito.MockitoSugar
class SslServerConnectHandlerTest extends FunSuite with MockitoSugar {
class SslConnectHandlerHelper {
val ctx = mock[ChannelHandlerContext]
val sslHandler = mock[SslHandler]
val sslSession = mock[SSLSession]
when(sslSession.getPeerCertificates) thenReturn Array.empty[Certificate]
val engine = mock[SSLEngine]
when(engine.getSession) thenReturn sslSession
when(sslHandler.getEngine) thenReturn engine
val channel = mock[Channel]
when(ctx.getChannel) thenReturn channel
val pipeline = mock[ChannelPipeline]
when(channel.getPipeline) thenReturn pipeline
val closeFuture = Channels.future(channel)
when(channel.getCloseFuture) thenReturn closeFuture
val remoteAddress = mock[SocketAddress]
when(channel.getRemoteAddress) thenReturn remoteAddress
val handshakeFuture = Channels.future(channel)
when(sslHandler.handshake()) thenReturn handshakeFuture
}
class SslServerConnectHandlerHelper extends SslConnectHandlerHelper {
var shutdownCount = 0
def onShutdown() = shutdownCount += 1
val config = mock[SslServerConfiguration]
val verifier = mock[SslServerSessionVerifier]
val connectHandler = new SslServerConnectHandler(sslHandler, config, verifier, onShutdown)
val event = new UpstreamChannelStateEvent(channel, ChannelState.CONNECTED, remoteAddress)
connectHandler.handleUpstream(ctx, event)
}
test("SslServerConnectHandler should call the shutdown callback on channel shutdown") {
val h = new SslServerConnectHandlerHelper
import h._
val event = new UpstreamChannelStateEvent(channel, ChannelState.OPEN, null)
connectHandler.channelClosed(mock[ChannelHandlerContext], event)
assert(shutdownCount == 1)
}
test("SslServerConnectHandler should delay connection until the handshake is complete") {
val h = new SslServerConnectHandlerHelper
import h._
when(verifier.apply(Address.failing, config, sslSession)) thenReturn true
verify(sslHandler, times(1)).handshake()
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
handshakeFuture.setSuccess()
verify(ctx, times(1)).sendUpstream(any[ChannelEvent])
}
test("SslServerConnectHandler should not connect when verification fails") {
val h = new SslServerConnectHandlerHelper
import h._
when(verifier.apply(Address.failing, config, sslSession)) thenReturn false
verify(sslHandler, times(1)).handshake()
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
handshakeFuture.setSuccess()
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
}
test("SslServerConnectHandler should not connect when verification throws") {
val h = new SslServerConnectHandlerHelper
import h._
when(verifier.apply(Address.failing, config, sslSession)) thenThrow new RuntimeException("Failed verification")
verify(sslHandler, times(1)).handshake()
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
handshakeFuture.setSuccess()
verify(ctx, times(0)).sendUpstream(any[ChannelEvent])
}
}
| mkhq/finagle | finagle-netty3/src/test/scala/com/twitter/finagle/netty3/ssl/server/SslServerConnectHandlerTest.scala | Scala | apache-2.0 | 3,566 |
package com.rocketfuel.sdbc.postgresql
class TableWithEnumSpec extends PostgreSqlSuite.Base {
import postgresql._
case class TableHasEnum(
id: Int,
mood: String,
value: String
)
object TableHasEnum {
object All {
implicit val selectable: Selectable[All.type, TableHasEnum] =
Select[TableHasEnum]("select * from person").selectable.constant
}
}
test("case class for table with enum") { implicit connection: Connection =>
connection.execSQLUpdate("CREATE TYPE mood AS ENUM ('sad', 'ok', 'happy')")
connection.execSQLUpdate("CREATE TABLE person (id serial primary key, mood mood not null, value text not null)")
connection.execSQLUpdate("INSERT INTO person (mood, value) values ('sad', 'panda')")
import syntax._
val values = TableHasEnum.All.vector()
assertResult(1)(values.size)
assertResult("sad")(values.head.mood)
assertResult("panda")(values.head.value)
}
}
| rocketfuel/sdbc | postgresql/src/test/scala/com/rocketfuel/sdbc/postgresql/TableWithEnumSpec.scala | Scala | bsd-3-clause | 949 |
object Test {
// the existential causes a cast and the cast makes searchClass not be in tail position
// can we get rid of the useless cast?
@annotation.tailrec
final def searchClass: Class[_] = {
"packageName" match {
case _ =>
searchClass
}
}
} | loskutov/intellij-scala | testdata/scalacTests/pos/t6145.scala | Scala | apache-2.0 | 278 |
package com.jjabuk.bookstore
import akka.actor.ActorSystem
import com.jjabuk.bookstore.catalog.MongoBookCommands
import com.jjabuk.bookstore.catalog.protocols.CatalogueProtocol.{Book, BookAdded}
import org.slf4j.LoggerFactory
import scala.concurrent.duration._
import scala.concurrent.{Await, Future}
object BookAddingSample extends App {
def logger = LoggerFactory.getLogger(this.getClass)
implicit val system = ActorSystem("book-adding")
val bookToAdd = Book(
uuid = "abc",
isbn = "XYZ1",
title = "Top best book ever",
publisher = Some("Publisher"),
review = Some("Review")
)
val x: Future[BookAdded] = (MongoBookCommands addBook bookToAdd).mapTo[BookAdded]
val y = Await.result(x, 6 seconds)
logger.debug("y:" + y)
logger.debug("Booting Seed module")
Thread.sleep(5000)
system.terminate()
}
| jjabuk/bookstore | src/main/scala/com/jjabuk/bookstore/BookAddingSample.scala | Scala | apache-2.0 | 846 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import scala.util.Random
import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.ml.linalg.Vectors
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.mllib.util.MLlibTestSparkContext
import org.apache.spark.sql.{DataFrame, Row}
class BucketizerSuite extends SparkFunSuite with MLlibTestSparkContext with DefaultReadWriteTest {
test("params") {
ParamsSuite.checkParams(new Bucketizer)
}
test("Bucket continuous features, without -inf,inf") {
// Check a set of valid feature values.
val splits = Array(-0.5, 0.0, 0.5)
val validData = Array(-0.5, -0.3, 0.0, 0.2)
val expectedBuckets = Array(0.0, 0.0, 1.0, 1.0)
val dataFrame: DataFrame =
spark.createDataFrame(validData.zip(expectedBuckets)).toDF("feature", "expected")
val bucketizer: Bucketizer = new Bucketizer()
.setInputCol("feature")
.setOutputCol("result")
.setSplits(splits)
bucketizer.transform(dataFrame).select("result", "expected").collect().foreach {
case Row(x: Double, y: Double) =>
assert(x === y,
s"The feature value is not correct after bucketing. Expected $y but found $x")
}
// Check for exceptions when using a set of invalid feature values.
val invalidData1: Array[Double] = Array(-0.9) ++ validData
val invalidData2 = Array(0.51) ++ validData
val badDF1 = spark.createDataFrame(invalidData1.zipWithIndex).toDF("feature", "idx")
withClue("Invalid feature value -0.9 was not caught as an invalid feature!") {
intercept[SparkException] {
bucketizer.transform(badDF1).collect()
}
}
val badDF2 = spark.createDataFrame(invalidData2.zipWithIndex).toDF("feature", "idx")
withClue("Invalid feature value 0.51 was not caught as an invalid feature!") {
intercept[SparkException] {
bucketizer.transform(badDF2).collect()
}
}
}
test("Bucket continuous features, with -inf,inf") {
val splits = Array(Double.NegativeInfinity, -0.5, 0.0, 0.5, Double.PositiveInfinity)
val validData = Array(-0.9, -0.5, -0.3, 0.0, 0.2, 0.5, 0.9)
val expectedBuckets = Array(0.0, 1.0, 1.0, 2.0, 2.0, 3.0, 3.0)
val dataFrame: DataFrame =
spark.createDataFrame(validData.zip(expectedBuckets)).toDF("feature", "expected")
val bucketizer: Bucketizer = new Bucketizer()
.setInputCol("feature")
.setOutputCol("result")
.setSplits(splits)
bucketizer.transform(dataFrame).select("result", "expected").collect().foreach {
case Row(x: Double, y: Double) =>
assert(x === y,
s"The feature value is not correct after bucketing. Expected $y but found $x")
}
}
test("Binary search correctness on hand-picked examples") {
import BucketizerSuite.checkBinarySearch
// length 3, with -inf
checkBinarySearch(Array(Double.NegativeInfinity, 0.0, 1.0))
// length 4
checkBinarySearch(Array(-1.0, -0.5, 0.0, 1.0))
// length 5
checkBinarySearch(Array(-1.0, -0.5, 0.0, 1.0, 1.5))
// length 3, with inf
checkBinarySearch(Array(0.0, 1.0, Double.PositiveInfinity))
// length 3, with -inf and inf
checkBinarySearch(Array(Double.NegativeInfinity, 1.0, Double.PositiveInfinity))
// length 4, with -inf and inf
checkBinarySearch(Array(Double.NegativeInfinity, 0.0, 1.0, Double.PositiveInfinity))
}
test("Binary search correctness in contrast with linear search, on random data") {
val data = Array.fill(100)(Random.nextDouble())
val splits: Array[Double] = Double.NegativeInfinity +:
Array.fill(10)(Random.nextDouble()).sorted :+ Double.PositiveInfinity
val bsResult = Vectors.dense(data.map(x => Bucketizer.binarySearchForBuckets(splits, x)))
val lsResult = Vectors.dense(data.map(x => BucketizerSuite.linearSearchForBuckets(splits, x)))
assert(bsResult ~== lsResult absTol 1e-5)
}
test("read/write") {
val t = new Bucketizer()
.setInputCol("myInputCol")
.setOutputCol("myOutputCol")
.setSplits(Array(0.1, 0.8, 0.9))
testDefaultReadWrite(t)
}
}
private object BucketizerSuite extends SparkFunSuite {
/** Brute force search for buckets. Bucket i is defined by the range [split(i), split(i+1)). */
def linearSearchForBuckets(splits: Array[Double], feature: Double): Double = {
require(feature >= splits.head)
var i = 0
val n = splits.length - 1
while (i < n) {
if (feature < splits(i + 1)) return i
i += 1
}
throw new RuntimeException(
s"linearSearchForBuckets failed to find bucket for feature value $feature")
}
/** Check all values in splits, plus values between all splits. */
def checkBinarySearch(splits: Array[Double]): Unit = {
def testFeature(feature: Double, expectedBucket: Double): Unit = {
assert(Bucketizer.binarySearchForBuckets(splits, feature) === expectedBucket,
s"Expected feature value $feature to be in bucket $expectedBucket with splits:" +
s" ${splits.mkString(", ")}")
}
var i = 0
val n = splits.length - 1
while (i < n) {
// Split i should fall in bucket i.
testFeature(splits(i), i)
// Value between splits i,i+1 should be in i, which is also true if the (i+1)-th split is inf.
testFeature((splits(i) + splits(i + 1)) / 2, i)
i += 1
}
}
}
| gioenn/xSpark | mllib/src/test/scala/org/apache/spark/ml/feature/BucketizerSuite.scala | Scala | apache-2.0 | 6,278 |
package com.twitter.scalding.typed.memory_backend
import java.util.concurrent.atomic.AtomicReference
class AtomicBox[T <: AnyRef](init: T) {
private[this] val ref = new AtomicReference[T](init)
def lazySet(t: T): Unit =
ref.lazySet(t)
def set(t: T): Unit =
ref.set(t)
def swap(t: T): T =
ref.getAndSet(t)
/**
* use a pure function to update the state.
* fn may be called more than once
*/
def update[R](fn: T => (T, R)): R = {
@annotation.tailrec
def loop(): R = {
val init = ref.get
val (next, res) = fn(init)
if (ref.compareAndSet(init, next)) res
else loop()
}
loop()
}
def get(): T = ref.get
}
| jzmq/scalding | scalding-core/src/main/scala/com/twitter/scalding/typed/memory_backend/AtomicBox.scala | Scala | apache-2.0 | 685 |
object i0 {
def i1(i2: Int) = 2
class i3
val i4: (i1 { type i3;
val i4 = i3
})
} | som-snytt/dotty | tests/fuzzy/b39f34ce574ac51b821ab409e48fd4acee02c755.scala | Scala | apache-2.0 | 80 |
package com.github.nzyuzin.assignments.ai.apriori
import java.util.Scanner
import scala.collection.mutable
import scala.io.Source
object Apriori {
def main(args: Array[String]): Unit = {
val fileName = "apriori.txt"
val data = readDataFromFile(fileName)
val frequentSets = getFrequentSets(data, 4)
frequentSets.foreach({ set =>
val subsets = set.subsets.filter(subset => subset.nonEmpty && subset.size != set.size)
subsets.foreach({ subset =>
val disjoint = set -- subset
val supportValue = support(subset, disjoint, data)
val confidenceValue = confidence(subset, disjoint, data)
println(subset.toString + " -> " + disjoint + " support = " + supportValue + " confidence = " + confidenceValue)
})
})
}
def support(conditionSet: Set[String], implicationSet: Set[String], data: Seq[Set[String]]): Double = {
var numberOfMatches = 0
data.foreach({ transaction =>
if (conditionSet.intersect(transaction).size == conditionSet.size &&
implicationSet.intersect(transaction).size == implicationSet.size) {
numberOfMatches += 1
}
})
numberOfMatches.toDouble / data.size.toDouble
}
def confidence(conditionSet: Set[String], implicationSet: Set[String], data: Seq[Set[String]]): Double = {
var numberOfMatchesBoth = 0
var numberOfMatchesCondition = 0
data.foreach({ transaction =>
if (conditionSet.intersect(transaction).size == conditionSet.size) {
numberOfMatchesCondition += 1
if (implicationSet.intersect(transaction).size == implicationSet.size) {
numberOfMatchesBoth += 1
}
}
})
numberOfMatchesBoth.toDouble / numberOfMatchesCondition.toDouble
}
def getFrequentSets(data: Seq[Set[String]], threshold: Int): Seq[Set[String]] = {
var itemSet = data.flatten.map({str =>
val set = new mutable.HashSet[String]()
set += str
set.toSet
}).toSet
val result = new mutable.MutableList[Set[String]]
while (true) {
val frequencies = new mutable.HashMap[Set[String], Int]
itemSet.foreach({ item =>
data.foreach({ transaction =>
if (transaction.intersect(item).size == item.size) {
frequencies.put(item, frequencies.getOrElse(item, 0) + 1)
}
})
})
val frequentSets = frequencies.filter(frequency => frequency._2 >= threshold).keys
if (frequentSets.isEmpty) {
return result.filter(set => set.size > 1)
}
result ++= frequentSets
val newItemSet = new mutable.HashSet[Set[String]]
frequentSets.foreach({ set1 =>
frequentSets.foreach({ set2 =>
if (set1.size - set1.intersect(set2).size == 1) {
newItemSet += set1.union(set2)
}
})
})
itemSet = newItemSet.toSet
}
null
}
def readDataFromFile(fileName: String): Seq[Set[String]] = {
val input = Source.fromFile(fileName)
val inputScanner = new Scanner(input.bufferedReader())
val result = new mutable.MutableList[Set[String]]
while (inputScanner.hasNext) {
val line = inputScanner.nextLine()
val lineScanner = new Scanner(line)
val transactionData = new mutable.HashSet[String]()
while (lineScanner.hasNext) {
transactionData += lineScanner.next("\\\\w+")
}
result += transactionData.toSet
}
result
}
}
| nzyuzin/ai-assignments | src/main/scala/com/github/nzyuzin/assignments/ai/apriori/Apriori.scala | Scala | gpl-3.0 | 3,421 |
package com.wix.fax.concordfax.model
/**
* @see <a href="https://developer.concordfax.com/ofd/html/4ad911b8-7eee-45a8-8a40-d288d85cccb1.htm">SimpleGetFaxStatus Method</a>
*/
object Statuses {
val succeeded = 1L
val failed = 2L
val processed = 3L
val invalidJobId = -5019L
}
| wix/libfax | libfax-concordfax-core/src/main/scala/com/wix/fax/concordfax/model/Statuses.scala | Scala | apache-2.0 | 296 |
trait LogLevelType
object Test {
type LogLevel = Int with LogLevelType
final val ErrorLevel = 1.asInstanceOf[Int with LogLevelType]
def main(args: Array[String]): Unit = {
List(ErrorLevel, ErrorLevel)
}
}
| yusuke2255/dotty | tests/run/t6126.scala | Scala | bsd-3-clause | 217 |
package learn.danipl.model
import org.scalatest.{FlatSpec, Matchers}
class SimpleRecordTest extends FlatSpec with Matchers {
"A SimpleRecord" should "be filled of data" in {
val simpleRecord: SimpleRecord = new SimpleRecord(0, Array.emptyByteArray)
simpleRecord.id shouldNot be(Nil)
simpleRecord.body shouldNot be(Nil)
}
} | danipl/LearnScalaProject | src/test/scala/learn/danipl/model/SimpleRecordTest.scala | Scala | gpl-3.0 | 344 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.subjects
import monix.execution.Ack.{Continue, Stop}
import monix.execution.{Ack, Cancelable}
import monix.reactive.Observable
import monix.reactive.internal.util.PromiseCounter
import monix.reactive.observers.{ConnectableSubscriber, Subscriber}
import monix.execution.atomic.Atomic
import scala.util.control.NonFatal
import scala.annotation.tailrec
import scala.concurrent.Future
import scala.util.Success
/** `BehaviorSubject` when subscribed, will emit the most recently emitted item by the source,
* or the `initialValue` (as the seed) in case no value has yet been emitted, then continuing
* to emit events subsequent to the time of invocation.
*
* When the source terminates in error, the `BehaviorSubject` will not emit any items to
* subsequent subscribers, but instead it will pass along the error notification.
*
* @see [[Subject]]
*/
final class BehaviorSubject[A] private (initialValue: A) extends Subject[A, A] { self =>
private[this] val stateRef =
Atomic(BehaviorSubject.State[A](initialValue))
def size: Int =
stateRef.get().subscribers.size
def unsafeSubscribeFn(subscriber: Subscriber[A]): Cancelable = {
import subscriber.scheduler
@tailrec
def subscribeLoop(): Cancelable = {
val state = stateRef.get()
if (state.errorThrown != null) {
subscriber.onError(state.errorThrown)
Cancelable.empty
} else if (state.isDone) {
Observable
.now(state.cached)
.unsafeSubscribeFn(subscriber)
} else {
val c = ConnectableSubscriber(subscriber)
val newState = state.addNewSubscriber(c)
if (stateRef.compareAndSet(state, newState)) {
c.pushFirst(state.cached)
val connecting = c.connect()
val cancelable = Cancelable { () =>
removeSubscriber(c)
}
connecting.syncOnStopOrFailure(_ => cancelable.cancel())
cancelable
} else {
// retry
subscribeLoop()
}
}
}
subscribeLoop()
}
@tailrec
def onNext(elem: A): Future[Ack] = {
val state = stateRef.get()
if (state.isDone) Stop
else {
val newState = state.cacheElem(elem)
if (!stateRef.compareAndSet(state, newState)) {
onNext(elem) // retry
} else {
val iterator = state.subscribers.iterator
// counter that's only used when we go async, hence the null
var result: PromiseCounter[Continue.type] = null
while (iterator.hasNext) {
val subscriber = iterator.next()
// using the scheduler defined by each subscriber
import subscriber.scheduler
val ack =
try subscriber.onNext(elem)
catch {
case ex if NonFatal(ex) => Future.failed(ex)
}
// if execution is synchronous, takes the fast-path
if (ack.isCompleted) {
// subscriber canceled or triggered an error? then remove
if (ack != Continue && ack.value.get != Continue.AsSuccess)
removeSubscriber(subscriber)
} else {
// going async, so we've got to count active futures for final Ack
// the counter starts from 1 because zero implies isCompleted
if (result == null) result = PromiseCounter(Continue, 1)
result.acquire()
ack.onComplete {
case Success(Continue) =>
result.countdown()
case _ =>
// subscriber canceled or triggered an error? then remove
removeSubscriber(subscriber)
result.countdown()
}
}
}
// has fast-path for completely synchronous invocation
if (result == null) Continue
else {
result.countdown()
result.future
}
}
}
}
override def onError(ex: Throwable): Unit =
onCompleteOrError(ex)
override def onComplete(): Unit =
onCompleteOrError(null)
@tailrec
private def onCompleteOrError(ex: Throwable): Unit = {
val state = stateRef.get()
if (!state.isDone) {
if (!stateRef.compareAndSet(state, state.markDone(ex)))
onCompleteOrError(ex)
else {
val iterator = state.subscribers.iterator
while (iterator.hasNext) {
val ref = iterator.next()
if (ex != null)
ref.onError(ex)
else
ref.onComplete()
}
}
}
}
@tailrec
private def removeSubscriber(s: ConnectableSubscriber[A]): Unit = {
val state = stateRef.get()
val newState = state.removeSubscriber(s)
if (!stateRef.compareAndSet(state, newState))
removeSubscriber(s)
}
}
object BehaviorSubject {
/** Builder for [[BehaviorSubject]] */
def apply[A](initialValue: A): BehaviorSubject[A] =
new BehaviorSubject[A](initialValue)
/** Internal state for [[BehaviorSubject]] */
private final case class State[A](
cached: A,
subscribers: Set[ConnectableSubscriber[A]] = Set.empty[ConnectableSubscriber[A]],
isDone: Boolean = false,
errorThrown: Throwable = null) {
def cacheElem(elem: A): State[A] = {
copy(cached = elem)
}
def addNewSubscriber(s: ConnectableSubscriber[A]): State[A] =
copy(subscribers = subscribers + s)
def removeSubscriber(toRemove: ConnectableSubscriber[A]): State[A] = {
val newSet = subscribers - toRemove
copy(subscribers = newSet)
}
def markDone(ex: Throwable): State[A] = {
copy(subscribers = Set.empty, isDone = true, errorThrown = ex)
}
}
}
| monifu/monifu | monix-reactive/shared/src/main/scala/monix/reactive/subjects/BehaviorSubject.scala | Scala | apache-2.0 | 6,321 |
package com.sksamuel.avro4s
import java.nio.ByteBuffer
import java.util.UUID
import org.apache.avro.generic.GenericData.EnumSymbol
import org.apache.avro.generic.GenericRecord
import shapeless.ops.coproduct.Reify
import shapeless.{:+:, CNil, Coproduct, Generic, Inl, Inr, Lazy}
import scala.collection.JavaConverters._
import scala.language.experimental.macros
trait ToValue[A] {
def apply(value: A): Any = value
}
trait LowPriorityToValue {
implicit def genCoproduct[T, C <: Coproduct](implicit gen: Generic.Aux[T, C],
coproductToValue: ToValue[C]): ToValue[T] = new ToValue[T] {
override def apply(value: T): Any = coproductToValue(gen.to(value))
}
implicit def apply[T](implicit toRecord: ToRecord[T]): ToValue[T] = new ToValue[T] {
override def apply(value: T): GenericRecord = toRecord(value)
}
def fixed[T]: ToValue[T] = macro LowPriorityToValue.fixedImpl[T]
}
object LowPriorityToValue {
def fixedImpl[T: c.WeakTypeTag](c: scala.reflect.macros.whitebox.Context): c.Expr[ToValue[T]] = {
import c.universe._
val tpe = weakTypeTag[T].tpe
val fixedAnnotation: Option[AvroFixed] = tpe.typeSymbol.annotations.collectFirst {
case anno if anno.tree.tpe <:< c.weakTypeOf[AvroFixed] =>
anno.tree.children.tail match {
case Literal(Constant(size: Int)) :: Nil => AvroFixed(size)
}
}
c.Expr[ToValue[T]](
q"""
{
val schema = com.sksamuel.avro4s.SchemaFor[$tpe]()
new com.sksamuel.avro4s.ToValue[$tpe] {
override def apply(t: $tpe): org.apache.avro.generic.GenericFixed = {
new org.apache.avro.generic.GenericData.Fixed(schema, t.bytes.array)
}
}
}
""")
}
}
object ToValue extends LowPriorityToValue {
implicit object BooleanToValue extends ToValue[Boolean]
implicit object StringToValue extends ToValue[String]
implicit object DoubleToValue extends ToValue[Double]
implicit object FloatToValue extends ToValue[Float]
implicit object IntToValue extends ToValue[Int]
implicit object LongToValue extends ToValue[Long]
implicit object UUIDToValue extends ToValue[UUID] {
override def apply(value: UUID): String = value.toString
}
implicit object BigDecimalToValue extends ToValue[BigDecimal] {
override def apply(value: BigDecimal): ByteBuffer = ByteBuffer.wrap(value.toString.getBytes)
}
implicit def ListToValue[T](implicit tovalue: ToValue[T]): ToValue[List[T]] = new ToValue[List[T]] {
override def apply(values: List[T]): Any = values.map(tovalue.apply).asJava
}
implicit def SetToValue[T](implicit tovalue: ToValue[T]): ToValue[Set[T]] = new ToValue[Set[T]] {
override def apply(values: Set[T]): Any = values.map(tovalue.apply).asJava
}
implicit def VectorToValue[T](implicit tovalue: ToValue[T]): ToValue[Vector[T]] = new ToValue[Vector[T]] {
override def apply(values: Vector[T]): Any = values.map(tovalue.apply).asJava
}
implicit def SeqToValue[T](implicit tovalue: ToValue[T]): ToValue[Seq[T]] = new ToValue[Seq[T]] {
override def apply(values: Seq[T]): Any = values.map(tovalue.apply).asJava
}
implicit def OptionToValue[T](implicit tovalue: ToValue[T]) = new ToValue[Option[T]] {
override def apply(value: Option[T]): Any = value.map(tovalue.apply).orNull
}
implicit def ArrayToValue[T](implicit tovalue: ToValue[T]): ToValue[Array[T]] = new ToValue[Array[T]] {
override def apply(value: Array[T]): Any = value.headOption match {
case Some(b: Byte) => ByteBuffer.wrap(value.asInstanceOf[Array[Byte]])
case _ => value.map(tovalue.apply).toSeq.asJavaCollection
}
}
implicit object ByteArrayToValue extends ToValue[Array[Byte]] {
override def apply(value: Array[Byte]): ByteBuffer = ByteBuffer.wrap(value)
}
implicit def MapToValue[T](implicit tovalue: ToValue[T]) = new ToValue[Map[String, T]] {
override def apply(value: Map[String, T]): java.util.Map[String, T] = {
value.mapValues(tovalue.apply).asInstanceOf[Map[String, T]].asJava
}
}
implicit def JavaEnumToValue[E <: Enum[_]]: ToValue[E] = new ToValue[E] {
override def apply(value: E): Any = new EnumSymbol(null, value)
}
implicit def ScalaEnumToValue[E <: Enumeration#Value]: ToValue[E] = new ToValue[E] {
override def apply(value: E): Any = new EnumSymbol(null, value.toString)
}
implicit def EitherToValue[T, U](implicit lefttovalue: ToValue[T], righttovalue: ToValue[U]) = new ToValue[Either[T, U]] {
override def apply(value: Either[T, U]): Any = value match {
case Left(left) => lefttovalue(left)
case Right(right) => righttovalue(right)
}
}
// A coproduct is a union, or a generalised either.
// A :+: B :+: C :+: CNil is a type that is either an A, or a B, or a C.
// Shapeless's implementation builds up the type recursively,
// (i.e., it's actually A :+: (B :+: (C :+: CNil)))
// `apply` here should never actually be invoked, because you can't
// actually construct a value of type a: CNil, but the ToValue[CNil]
// needs to exist to supply a base case for the recursion.
implicit def CNilToValue: ToValue[CNil] = new ToValue[CNil] {
override def apply(value: CNil): Any = sys.error("This should never happen: CNil has no inhabitants")
}
// A :+: B is either Inl(value: A) or Inr(value: B), continuing the recursion
implicit def CoproductToValue[S, T <: Coproduct](implicit curToValue: ToValue[S], restToValue: ToValue[T]): ToValue[S :+: T] = new ToValue[S :+: T] {
override def apply(value: S :+: T): Any = value match {
case Inl(s) => curToValue(s)
case Inr(t) => restToValue(t)
}
}
implicit def genTraitObjectEnum[T, C <: Coproduct](implicit gen: Generic.Aux[T, C],
objs: Reify[C]): ToValue[T] = new ToValue[T] {
override def apply(value: T): Any = new EnumSymbol(null, value.toString)
}
}
trait ToRecord[T] extends Serializable {
def apply(t: T): GenericRecord
}
object ToRecord {
implicit def apply[T]: ToRecord[T] = macro applyImpl[T]
def applyImpl[T: c.WeakTypeTag](c: scala.reflect.macros.whitebox.Context): c.Expr[ToRecord[T]] = {
import c.universe._
val helper = TypeHelper(c)
val tpe = weakTypeTag[T].tpe
val constructorArgumentsWithTypes = helper.fieldsOf(tpe)
val converters: Seq[Tree] = constructorArgumentsWithTypes.map { case (sym, sig) =>
val fixedAnnotation: Option[AvroFixed] = sig.typeSymbol.annotations.collectFirst {
case anno if anno.tree.tpe <:< c.weakTypeOf[AvroFixed] =>
anno.tree.children.tail match {
case Literal(Constant(size: Int)) :: Nil => AvroFixed(size)
}
}
fixedAnnotation match {
case Some(AvroFixed(size)) =>
q"""{
shapeless.Lazy(com.sksamuel.avro4s.ToValue.fixed[$sig])
}
"""
case None =>
q"""com.sksamuel.avro4s.ToRecord.lazyConverter[$sig]"""
}
}
val puts: Seq[Tree] = constructorArgumentsWithTypes.zipWithIndex.map {
case ((f, sig), idx) =>
val name = f.name.asInstanceOf[c.TermName]
val fieldName: String = name.decodedName.toString
val fixedAnnotation: Option[AvroFixed] = sig.typeSymbol.annotations.collectFirst {
case anno if anno.tree.tpe <:< c.weakTypeOf[AvroFixed] =>
anno.tree.children.tail match {
case Literal(Constant(size: Int)) :: Nil => AvroFixed(size)
}
}
val valueClass = sig.typeSymbol.isClass && sig.typeSymbol.asClass.isDerivedValueClass
// if a field is a value class we need to handle it here, using a converter
// for the underlying value rather than the actual value class
if (fixedAnnotation.nonEmpty) {
q"""
{
val converter = converters($idx).asInstanceOf[shapeless.Lazy[com.sksamuel.avro4s.ToValue[$sig]]]
record.put($fieldName, converter.value(t.$name : $sig))
}
"""
} else if (valueClass) {
val valueCstr = sig.typeSymbol.asClass.primaryConstructor.asMethod.paramLists.flatten.head
val valueFieldType = valueCstr.typeSignature
val valueFieldName = valueCstr.name.asInstanceOf[c.TermName]
q"""
{
val converter = com.sksamuel.avro4s.ToRecord.lazyConverter[$valueFieldType]
record.put($fieldName, converter.value(t.$name.$valueFieldName : $valueFieldType))
}
"""
} else {
q"""
{
val converter = converters($idx).asInstanceOf[shapeless.Lazy[com.sksamuel.avro4s.ToValue[$sig]]]
record.put($fieldName, converter.value(t.$name : $sig))
}
"""
}
}
c.Expr[ToRecord[T]](
q"""new com.sksamuel.avro4s.ToRecord[$tpe] {
private val schemaFor : com.sksamuel.avro4s.SchemaFor[$tpe] = com.sksamuel.avro4s.SchemaFor[$tpe]
private val converters : Array[shapeless.Lazy[com.sksamuel.avro4s.ToValue[_]]] = Array(..$converters)
def apply(t : $tpe): org.apache.avro.generic.GenericRecord = {
val record = new org.apache.avro.generic.GenericData.Record(schemaFor())
..$puts
record
}
}
"""
)
}
def lazyConverter[T](implicit toValue: Lazy[ToValue[T]]): Lazy[ToValue[T]] = toValue
}
| YuvalItzchakov/avro4s | avro4s-macros/src/main/scala/com/sksamuel/avro4s/ToRecord.scala | Scala | mit | 9,489 |
import org.scalatest.{Matchers, FunSuite}
class GardenTest extends FunSuite with Matchers {
test("missing child tests") {
Garden.defaultGarden("RC\\nGG").getPlants("Potter") should
equal(List.empty)
}
test("alice tests") {
Garden.defaultGarden("RC\\nGG").getPlants("Alice") should
equal(List(Plant.Radishes, Plant.Clover, Plant.Grass, Plant.Grass))
Garden.defaultGarden("VC\\nRC").getPlants("Alice") should
equal(List(Plant.Violets, Plant.Clover, Plant.Radishes, Plant.Clover))
}
test("small garden") {
Garden.defaultGarden("VVCG\\nVVRC").getPlants("Bob") should
equal(List(Plant.Clover, Plant.Grass, Plant.Radishes, Plant.Clover))
}
test("medium garden") {
val garden = Garden.defaultGarden("VVCCGG\\nVVCCGG")
garden.getPlants("Bob") should
equal(List(Plant.Clover, Plant.Clover, Plant.Clover, Plant.Clover))
garden.getPlants("Charlie") should
equal(List(Plant.Grass, Plant.Grass, Plant.Grass, Plant.Grass))
}
test("full garden") {
val garden = Garden.defaultGarden("VRCGVVRVCGGCCGVRGCVCGCGV\\nVRCCCGCRRGVCGCRVVCVGCGCV")
garden.getPlants("Alice") should
equal(List(Plant.Violets, Plant.Radishes, Plant.Violets, Plant.Radishes))
garden.getPlants("Bob") should
equal(List(Plant.Clover, Plant.Grass, Plant.Clover, Plant.Clover))
garden.getPlants("David") should
equal(List(Plant.Radishes, Plant.Violets, Plant.Clover, Plant.Radishes))
garden.getPlants("Eve") should
equal(List(Plant.Clover, Plant.Grass, Plant.Radishes, Plant.Grass))
garden.getPlants("Fred") should
equal(List(Plant.Grass, Plant.Clover, Plant.Violets, Plant.Clover))
garden.getPlants("Ginny") should
equal(List(Plant.Clover, Plant.Grass, Plant.Grass, Plant.Clover))
garden.getPlants("Harriet") should
equal(List(Plant.Violets, Plant.Radishes, Plant.Radishes, Plant.Violets))
garden.getPlants("Ileana") should
equal(List(Plant.Grass, Plant.Clover, Plant.Violets, Plant.Clover))
garden.getPlants("Joseph") should
equal(List(Plant.Violets, Plant.Clover, Plant.Violets, Plant.Grass))
garden.getPlants("Kincaid") should
equal(List(Plant.Grass, Plant.Clover, Plant.Clover, Plant.Grass))
garden.getPlants("Larry") should
equal(List(Plant.Grass, Plant.Violets, Plant.Clover, Plant.Violets))
}
test("surprise garden") {
val garden = Garden(List("Samantha", "Patricia", "Xander", "Roger"), "VCRRGVRG\\nRVGCCGCV")
garden.getPlants("Patricia") should
equal(List(Plant.Violets, Plant.Clover, Plant.Radishes, Plant.Violets))
garden.getPlants("Roger") should
equal(List(Plant.Radishes, Plant.Radishes, Plant.Grass, Plant.Clover))
garden.getPlants("Samantha") should
equal(List(Plant.Grass, Plant.Violets, Plant.Clover, Plant.Grass))
garden.getPlants("Xander") should
equal(List(Plant.Radishes, Plant.Grass, Plant.Clover, Plant.Violets))
}
}
| nlochschmidt/xscala | kindergarten-garden/src/test/scala/GardenTest.scala | Scala | mit | 2,936 |
/**
* Copyright 2017 Tristan Nixon
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Created by Tristan Nixon on 7/13/17.
*/
package org.memeticlabs.spark.rdd.trycatch.streaming.java
import scala.reflect.ClassTag
import org.apache.spark.streaming.api.java.JavaDStream
import org.apache.spark.streaming.dstream.DStream
import org.memeticlabs.spark.rdd.trycatch.java.JavaErrorHandler
import org.memeticlabs.spark.rdd.trycatch.java.JavaErrorHandlingRDD._
import org.memeticlabs.spark.rdd.trycatch.streaming.ErrorHandlingDStream
class JavaErrorHandlingDStream[T](dstream: DStream[T],
errorHandler: JavaErrorHandler[Any] )
(implicit val tt: ClassTag[T])
extends JavaDStream[T]( new ErrorHandlingDStream( dstream, errorHandler ) )
object JavaErrorHandlingDStream
{
def fromStream[T]( stream: JavaDStream[T], errorHandler: JavaErrorHandler[Any] ): JavaDStream[T] =
new JavaErrorHandlingDStream[T]( stream.dstream, errorHandler )(fakeClassTag[T])
} | tnixon/spark-rdd-trycatch | src/main/scala/org/memeticlabs/spark/rdd/trycatch/streaming/java/JavaErrorHandlingDStream.scala | Scala | apache-2.0 | 1,528 |
// Copyright 2014 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package au.com.cba.omnia.omnitool
import scala.util.control.NonFatal
import scala.util.{Try, Success, Failure}
import scalaz._, Scalaz._, \\&/._
/**
* A data-type useful for representing the result of a possibly failing operation.
*
* The Error result of an operation represents an initial error (either an error message or an
* exception) with the option to add more details in the form of error messages.
*
* The reason this exists as an alternative to using the more general
* `Either`, `\\/`, `Validation`, `Try` data-types is that we want to
* gain better inference (we gain most of this by specialising the
* error case), better composition (compared with `Try`, we gain most
* of this by not mixing side-effects with map/flat) and better
* library support by adding specific combinators for our given
* case.
*/
sealed trait Result[A] {
/**
* `Catamorphism` for result. Concise data deconstruction that can be used as an alternative to
* pattern matching, providing stronger coverage checks.
*/
@inline final def fold[X](
ok: A => X,
error: These[String, Throwable] => X
): X = this match {
case Ok(value) => ok(value)
case Error(these) => error(these)
}
/**
* `Catamorphism` for result and the underlying `These`. Concise data deconstruction that can be
* used as an alternative to pattern matching, providing stronger coverage checks.
*/
@inline final def foldAll[X](
ok: A => X,
fail: String => X,
exception: Throwable => X,
both: (String, Throwable) => X
): X = fold(ok, _ match {
case This(s) => fail(s)
case That(e) => exception(e)
case Both(s, e) => both(s, e)
})
/** `Catamorphism` for the sucess case or the error message. */
@inline final def foldMessage[X](
ok: A => X,
error: String => X
): X = fold(ok, {
case This(s) => error(s)
case That(ex) => error(ex.getMessage)
case Both(s, _) => error(s)
})
/** Map across the success case. */
def map[B](f: A => B): Result[B] =
flatMap(f andThen Result.ok)
/** Bind through the success case, this is useful to chain potentially failing operations. */
def flatMap[B](f: A => Result[B]): Result[B] =
fold(f, Result.these)
/** Convert to scalaz.\\/, this is useful to access its rich library. */
def toDisjunction: These[String, Throwable] \\/ A =
fold(_.right, _.left)
/** Convert to scala.Either, this is useful for interop. */
def toEither: Either[These[String, Throwable], A] =
toDisjunction.toEither
/** Convert to scala.Option, this is useful as it is common to not care how something fails. */
def toOption: Option[A] =
toDisjunction.toOption
/** Convert the error case to an scala.Option. */
def toError: Option[These[String, Throwable]] =
toDisjunction.swap.toOption
/** In the success case, get the value, otherwise return `els`, useful for defauling in error case. */
def getOrElse(els: => A): A =
toOption.getOrElse(els)
/** Take the first successful result. Useful for chaining optional operations. */
def or(other: => Result[A]): Result[A] =
fold(Result.ok, _ => other)
/** Alias for `or`. Provides nice syntax: `Result.fail("bad") ||| Result.ok(10)` */
def |||(other: => Result[A]): Result[A] =
or(other)
/**
* Set the error message in a failure case. Useful for providing contextual information without
* having to inspect result.
*
* NB: This discards any existing message.
*/
def setMessage(message: String): Result[A] =
foldAll(
r => Result.ok(r),
_ => Result.fail(message),
e => Result.error(message, e),
(_, e) => Result.error(message, e)
)
/**
* Adds an additional error message. Useful for adding more context as the error goes up the stack.
*
* The new message is prepended to any existing message.
*/
def addMessage(message: String, separator: String = ": "): Result[A] =
foldAll(
r => Result.ok(r),
m => Result.fail(s"${message}${separator}$m"),
e => Result.error(message, e),
(m, e) => Result.error(s"${message}${separator}$m", e)
)
}
/**
* Successful `Result`, public so it can be used for pattern matching, prefer Result.ok for
* construction.
*/
case class Ok[A](value: A) extends Result[A]
/**
* Failing `Result`, public so it can be used for pattern matching, prefer
* Result.{these,error,fail,exception} for construction.
*/
case class Error[A](error: These[String, Throwable]) extends Result[A]
object Result {
/** Exception safe `Result` creation. */
def safe[A](thunk: => A): Result[A] =
try ok(thunk) catch { case NonFatal(t) => exception(t) }
/**
* Smart constructor for a successful `Result`. Provides better inference then direct use of
* constructor. */
def ok[A](value: A): Result[A] =
Ok[A](value)
/** Smart constructor for a failing `Result` built from a `These`. */
def these[A](e: These[String, Throwable]): Result[A] =
Error(e)
/** Smart constructor for a failing case with a message and an exception. */
def error[A](message: String, t: Throwable): Result[A] =
these(Both(message, t))
/** Smart constructor for a failing case with only a message. */
def fail[A](message: String): Result[A] =
these(This(message))
/** Smart constructor for a failing case with only an exception. */
def exception[A](t: Throwable): Result[A] =
these(That(t))
/** Smart constructor for converting from an `Either[String, A]`. */
def eitherFail[A](value: Either[String, A]): Result[A] = value match {
case Left(e) => fail(e)
case Right(v) => ok(v)
}
/** Smart constructor for converting from an `Either[Throwable, A]`. */
def eitherException[A](value: Either[Throwable, A]): Result[A] = value match {
case Left(e) => exception(e)
case Right(v) => ok(v)
}
/** Smart constructor for converting from an `Either[(String, Throwable), A]`. */
def eitherError[A](value: Either[(String, Throwable), A]): Result[A] = value match {
case Left((err, exc)) => error(err, exc)
case Right(v) => ok(v)
}
/** Smart constructor for converting from a `Try[A]`. */
def fromTry[A](value: Try[A]): Result[A] = value match {
case Failure(e) => exception(e)
case Success(v) => ok(v)
}
/**
* Fails if condition is not met
*
* Provided instead of [[scalaz.MonadPlus]] typeclass, as Hdfs does not
* quite meet the required laws.
*/
def guard(ok: Boolean, message: String): Result[Unit] =
if (ok) Result.ok[Unit](()) else fail[Unit](message)
/**
* Ensures a Result operation returning a boolean success flag fails if unsuccessfull
*
* Provided instead of [[scalaz.MonadPlus]] typeclass, as Result does not
* quite meet the required laws.
*/
def mandatory(result: Result[Boolean], message: String): Result[Unit] =
result.flatMap(guard(_, message))
/**
* Ensures a Result operation returning a boolean success flag fails if succeesfull
*
* Provided instead of [[scalaz.MonadPlus]] typeclass, as Result does not
* quite meet the required laws.
*/
def forbidden(result: Result[Boolean], message: String): Result[Unit] =
result.flatMap(prevent(_, message))
/**
* Fails if condition is met
*
* Provided instead of [[scalaz.MonadPlus]] typeclass, as Hdfs does not
* quite meet the required laws.
*/
def prevent(fail: Boolean, message: String): Result[Unit] =
guard(!fail, message)
/** ResultantMonad instance for Result. */
implicit def ResultResultantMonad: ResultantMonad[Result] = new ResultantMonad[Result] {
def rPoint[A](v: => Result[A]): Result[A] = v
def rBind[A, B](ma: Result[A])(f: Result[A] => Result[B]): Result[B] = f(ma)
}
/** scalaz Monad instance for Result. */
implicit def ResultMonad: Monad[Result] = new Monad[Result] {
def point[A](v: => A) = ok(v)
def bind[A, B](a: Result[A])(f: A => Result[B]) = a flatMap f
}
/** scalaz Equal instance for Result. */
implicit def ResultEqual[A : Equal]: Equal[Result[A]] = {
implicit def ThrowableEqual = Equal.equalA[Throwable]
implicitly[Equal[These[String, Throwable] \\/ A]].contramap(_.toDisjunction)
}
/** scalaz Plus instance for Result. */
implicit def ResultPlus: Plus[Result] = new Plus[Result] {
def plus[A](a: Result[A], b: => Result[A]): Result[A] =
a or b
}
}
| CommBank/omnitool | core/src/main/scala/au/com/cba/omnia/omnitool/Result.scala | Scala | apache-2.0 | 9,112 |
package repositories.conservation.dao
import com.google.inject.{Inject, Singleton}
import models.conservation.events._
import no.uio.musit.functional.FutureMusitResult
import no.uio.musit.models.{ActorId, EventId}
import no.uio.musit.repositories.DbErrorHandlers
import org.joda.time.DateTime
import play.api.db.slick.DatabaseConfigProvider
import repositories.conservation.DaoUtils
import repositories.shared.dao.ColumnTypeMappers
import scala.concurrent.ExecutionContext
@Singleton
class ActorRoleDateDao @Inject()(
implicit
val dbConfigProvider: DatabaseConfigProvider,
val ec: ExecutionContext,
val daoUtils: DaoUtils
) extends ConservationEventTableProvider
with ColumnTypeMappers
with ConservationTables
with DbErrorHandlers {
import profile.api._
private val eventActorRoleDateTable = TableQuery[EventActorAndRoleAndDate]
private val roleTable = TableQuery[Role]
def insertActorRoleAction(
eventId: EventId,
roleId: Int,
actorId: ActorId,
whatDate: Option[DateTime]
): DBIO[Int] = {
val action = eventActorRoleDateTable += EventActorRoleDate(
eventId,
roleId,
actorId,
whatDate
)
action
}
/**
* an insert action for inserting into table objectEvent
*
* @param eventId the eventId
* @param actorsAndDates a list of actors,roles and dates that relates to the eventId
* @return a DBIO[Int] Number of rows inserted?
*/
def insertActorRoleDateAction(
eventId: EventId,
actorsAndDates: Seq[ActorRoleDate]
): DBIO[Int] = {
val actions =
actorsAndDates.map(m => insertActorRoleAction(eventId, m.roleId, m.actorId, m.date))
DBIO.sequence(actions).map(_.sum)
}
def deleteActorRoleDateAction(eventId: EventId): DBIO[Int] = {
val q = eventActorRoleDateTable.filter(oe => oe.eventId === eventId)
val action = q.delete
action
}
def updateActorRoleDateAction(
eventId: EventId,
actorAndDates: Seq[ActorRoleDate]
): DBIO[Int] = {
for {
deleted <- deleteActorRoleDateAction(eventId)
inserted <- insertActorRoleDateAction(eventId, actorAndDates)
} yield inserted
}
def getEventActorRoleDates(eventId: EventId): FutureMusitResult[Seq[ActorRoleDate]] = {
val action =
eventActorRoleDateTable
.filter(_.eventId === eventId)
.sortBy(_.roleId)
.map(ard => (ard.roleId, ard.actorId, ard.actorDate))
.result
daoUtils
.dbRun(action, s"getEventActorRoleDatesAction failed for eventId $eventId")
.map(_.map(m => ActorRoleDate(m._1, m._2, m._3)))
}
def getRoleList: FutureMusitResult[Seq[EventRole]] = {
daoUtils.dbRun(roleTable.result, "getRoleList failed")
}
private class EventActorAndRoleAndDate(tag: Tag)
extends Table[EventActorRoleDate](
tag,
Some(SchemaName),
EventActorsRolesTableName
) {
val eventId = column[EventId]("EVENT_ID")
val roleId = column[Int]("ROLE_ID")
val actorId = column[ActorId]("ACTOR_ID")
val actorDate = column[Option[DateTime]]("ACTOR_ROLE_DATE")
val create = (
eventId: EventId,
roleId: Int,
actorId: ActorId,
actorDate: Option[DateTime]
) =>
EventActorRoleDate(
eventId = eventId,
roleId = roleId,
actorId = actorId,
actorDate = actorDate
)
val destroy = (eard: EventActorRoleDate) =>
Some(
(
eard.eventId,
eard.roleId,
eard.actorId,
eard.actorDate
)
)
// scalastyle:off method.name
def * = (eventId, roleId, actorId, actorDate) <> (create.tupled, destroy)
// scalastyle:on method.name
}
private class Role(tag: Tag)
extends Table[EventRole](
tag,
Some(SchemaName),
RolesTableName
) {
val roleId = column[Int]("ROLE_ID")
val noRole = column[String]("NO_ROLE")
val enRole = column[String]("EN_ROLE")
val roleFor = column[String]("ROLE_FOR")
// scalastyle:off method.name
def * =
(roleId, noRole, enRole, roleFor) <> ((EventRole.apply _).tupled, EventRole.unapply)
// scalastyle:on method.name
}
}
| MUSIT-Norway/musit | service_backend/app/repositories/conservation/dao/ActorRoleDateDao.scala | Scala | gpl-2.0 | 4,228 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import org.apache.spark.sql.{DataFrame, Row}
import org.apache.spark.sql.test.SharedSQLContext
class ParquetProtobufCompatibilitySuite extends ParquetCompatibilityTest with SharedSQLContext {
private def readParquetProtobufFile(name: String): DataFrame = {
val url = Thread.currentThread().getContextClassLoader.getResource(name)
sqlContext.read.parquet(url.toString)
}
test("unannotated array of primitive type") {
checkAnswer(readParquetProtobufFile("old-repeated-int.parquet"), Row(Seq(1, 2, 3)))
}
test("unannotated array of struct") {
checkAnswer(
readParquetProtobufFile("old-repeated-message.parquet"),
Row(
Seq(
Row("First inner", null, null),
Row(null, "Second inner", null),
Row(null, null, "Third inner"))))
checkAnswer(
readParquetProtobufFile("proto-repeated-struct.parquet"),
Row(
Seq(
Row("0 - 1", "0 - 2", "0 - 3"),
Row("1 - 1", "1 - 2", "1 - 3"))))
checkAnswer(
readParquetProtobufFile("proto-struct-with-array-many.parquet"),
Seq(
Row(
Seq(
Row("0 - 0 - 1", "0 - 0 - 2", "0 - 0 - 3"),
Row("0 - 1 - 1", "0 - 1 - 2", "0 - 1 - 3"))),
Row(
Seq(
Row("1 - 0 - 1", "1 - 0 - 2", "1 - 0 - 3"),
Row("1 - 1 - 1", "1 - 1 - 2", "1 - 1 - 3"))),
Row(
Seq(
Row("2 - 0 - 1", "2 - 0 - 2", "2 - 0 - 3"),
Row("2 - 1 - 1", "2 - 1 - 2", "2 - 1 - 3")))))
}
test("struct with unannotated array") {
checkAnswer(
readParquetProtobufFile("proto-struct-with-array.parquet"),
Row(10, 9, Seq.empty, null, Row(9), Seq(Row(9), Row(10))))
}
test("unannotated array of struct with unannotated array") {
checkAnswer(
readParquetProtobufFile("nested-array-struct.parquet"),
Seq(
Row(2, Seq(Row(1, Seq(Row(3))))),
Row(5, Seq(Row(4, Seq(Row(6))))),
Row(8, Seq(Row(7, Seq(Row(9)))))))
}
test("unannotated array of string") {
checkAnswer(
readParquetProtobufFile("proto-repeated-string.parquet"),
Seq(
Row(Seq("hello", "world")),
Row(Seq("good", "bye")),
Row(Seq("one", "two", "three"))))
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetProtobufCompatibilitySuite.scala | Scala | apache-2.0 | 3,114 |
// Databricks notebook source
// MAGIC %md
// MAGIC
// MAGIC # [SDS-2.2, Scalable Data Science](https://lamastex.github.io/scalable-data-science/sds/2/2/)
// COMMAND ----------
// MAGIC %md
// MAGIC Archived YouTube video of this live unedited lab-lecture:
// MAGIC
// MAGIC [](https://www.youtube.com/embed/63E_Nps98Bk?start=0&end=1222&autoplay=1)
// COMMAND ----------
// MAGIC %md
// MAGIC # Twitter Streaming Language Classifier
// MAGIC
// MAGIC This is a databricksification of [https://databricks.gitbooks.io/databricks-spark-reference-applications/content/twitter_classifier/index.html](https://databricks.gitbooks.io/databricks-spark-reference-applications/content/twitter_classifier/index.html) by Amendra Shreshta.
// MAGIC
// MAGIC Note that you need to change the fields in background notebooks like `025_a_extendedTwitterUtils2run` as explained in the corresponding videos by Amendra.
// COMMAND ----------
// MAGIC %run "scalable-data-science/sds-2-2/025_a_extendedTwitterUtils2run"
// COMMAND ----------
import org.apache.spark._
import org.apache.spark.storage._
import org.apache.spark.streaming._
import scala.math.Ordering
import twitter4j.auth.OAuthAuthorization
import twitter4j.conf.ConfigurationBuilder
// COMMAND ----------
import twitter4j.auth.OAuthAuthorization
import twitter4j.conf.ConfigurationBuilder
def MyconsumerKey = "fB9Ww8Z4TIauPWKNPL6IN7xqd"
def MyconsumerSecret = "HQqiIs3Yx3Mnv5gZTwQ6H2DsTlae4UNry5uNgylsonpFr46qXy"
def Mytoken = "28513570-BfZrGoswVp1bz11mhwbVIGoJwjWCWgGoZGQXAqCO8"
def MytokenSecret = "7fvag0GcXRlv42yBaVDMAmL1bmPyMZzNrqioMY7UwGbxr"
System.setProperty("twitter4j.oauth.consumerKey", MyconsumerKey)
System.setProperty("twitter4j.oauth.consumerSecret", MyconsumerSecret)
System.setProperty("twitter4j.oauth.accessToken", Mytoken)
System.setProperty("twitter4j.oauth.accessTokenSecret", MytokenSecret)
// COMMAND ----------
// Downloading tweets and building model for clustering
// COMMAND ----------
// ## Let's create a directory in dbfs for storing tweets in the cluster's distributed file system.
val outputDirectoryRoot = "/datasets/tweetsStreamTmp" // output directory
// COMMAND ----------
// to remove a pre-existing directory and start from scratch uncomment next line and evaluate this cell
dbutils.fs.rm(outputDirectoryRoot, true)
// COMMAND ----------
// ## Capture tweets in every sliding window of slideInterval many milliseconds.
val slideInterval = new Duration(1 * 1000) // 1 * 1000 = 1000 milli-seconds = 1 sec
// COMMAND ----------
// Our goal is to take each RDD in the twitter DStream and write it as a json file in our dbfs.
// Create a Spark Streaming Context.
val ssc = new StreamingContext(sc, slideInterval)
// COMMAND ----------
// Create a Twitter Stream for the input source.
val auth = Some(new OAuthAuthorization(new ConfigurationBuilder().build()))
val twitterStream = ExtendedTwitterUtils.createStream(ssc, auth)
// COMMAND ----------
// Let's import google's json library next.
import com.google.gson.Gson
//Let's map the tweets into json formatted string (one tweet per line).
val twitterStreamJson = twitterStream.map(
x => { val gson = new Gson();
val xJson = gson.toJson(x)
xJson
}
)
// COMMAND ----------
val partitionsEachInterval = 1
val batchInterval = 1 // in minutes
val timeoutJobLength = batchInterval * 5
var newContextCreated = false
var numTweetsCollected = 0L // track number of tweets collected
twitterStreamJson.foreachRDD((rdd, time) => { // for each filtered RDD in the DStream
val count = rdd.count()
if (count > 0) {
val outputRDD = rdd.repartition(partitionsEachInterval) // repartition as desired
// to write to parquet directly in append mode in one directory per 'time'------------
val outputDF = outputRDD.toDF("tweetAsJsonString")
// get some time fields from current `.Date()`
val year = (new java.text.SimpleDateFormat("yyyy")).format(new java.util.Date())
val month = (new java.text.SimpleDateFormat("MM")).format(new java.util.Date())
val day = (new java.text.SimpleDateFormat("dd")).format(new java.util.Date())
val hour = (new java.text.SimpleDateFormat("HH")).format(new java.util.Date())
// write to a file with a clear time-based hierarchical directory structure for example
outputDF.write.mode(SaveMode.Append)
.parquet(outputDirectoryRoot+ "/"+ year + "/" + month + "/" + day + "/" + hour + "/" + time.milliseconds)
// end of writing as parquet file-------------------------------------
numTweetsCollected += count // update with the latest count
}
})
// COMMAND ----------
// ## Let's start the spark streaming context we have created next.
ssc.start()
// COMMAND ----------
// total tweets downloaded
numTweetsCollected
// COMMAND ----------
// ## Go to SparkUI and see if a streaming job is already running. If so you need to terminate it before starting a new streaming job. Only one streaming job can be run on the DB CE.
// # let's stop the streaming job next.
ssc.stop(stopSparkContext = false)
StreamingContext.getActive.foreach { _.stop(stopSparkContext = false) }
// COMMAND ----------
// MAGIC %run "scalable-data-science/sds-2-2/025_b_TTTDFfunctions"
// COMMAND ----------
// #Let's examine what was saved in dbfs
display(dbutils.fs.ls(outputDirectoryRoot))
// COMMAND ----------
// Replace the date with current date
val date = "/2017/11/*"
val rawDF = fromParquetFile2DF(outputDirectoryRoot + date +"/*/*") //.cache()
val TTTsDF = tweetsDF2TTTDF(tweetsJsonStringDF2TweetsDF(rawDF)).cache()
// COMMAND ----------
// Creating SQL table
TTTsDF.createOrReplaceTempView("tbl_tweet")
// COMMAND ----------
sqlContext.sql("SELECT lang, CPostUserName, CurrentTweet FROM tbl_tweet LIMIT 10").collect.foreach(println)
// COMMAND ----------
// Checking the language of tweets
sqlContext.sql(
"SELECT lang, COUNT(*) as cnt FROM tbl_tweet " +
"GROUP BY lang ORDER BY cnt DESC limit 1000")
.collect.foreach(println)
// COMMAND ----------
// extracting just tweets from the table and converting it to String
val texts = sqlContext
.sql("SELECT CurrentTweet from tbl_tweet")
.map(_.toString)
// COMMAND ----------
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.mllib.linalg.{Vector, Vectors}
// COMMAND ----------
/*Create feature vectors by turning each tweet into bigrams of characters (an n-gram model)
and then hashing those to a length-1000 feature vector that we can pass to MLlib.*/
def featurize(s: String): Vector = {
val n = 1000
val result = new Array[Double](n)
val bigrams = s.sliding(2).toArray
for (h <- bigrams.map(_.hashCode % n)) {
result(h) += 1.0 / bigrams.length
}
Vectors.sparse(n, result.zipWithIndex.filter(_._1 != 0).map(_.swap))
}
// COMMAND ----------
//Cache the vectors RDD since it will be used for all the KMeans iterations.
val vectors = texts.rdd
.map(featurize)
.cache()
// COMMAND ----------
// cache is lazy so count will force the data to store in memory
vectors.count()
// COMMAND ----------
vectors.first()
// COMMAND ----------
// Training model with 10 cluster and 10 iteration
val model = KMeans.train(vectors, k=10, maxIterations = 10)
// COMMAND ----------
// Sample 100 of the original set
val some_tweets = texts.take(100)
// COMMAND ----------
// iterate through the 100 samples and show which cluster they are in
for (i <- 0 until 10) {
println(s"\\nCLUSTER $i:")
some_tweets.foreach { t =>
if (model.predict(featurize(t)) == i) {
println(t)
}
}
}
// COMMAND ----------
// to remove a pre-existing model and start from scratch
dbutils.fs.rm("/datasets/model", true)
// COMMAND ----------
// save the model
sc.makeRDD(model.clusterCenters).saveAsObjectFile("/datasets/model")
// COMMAND ----------
import org.apache.spark.mllib.clustering.KMeans
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.clustering.KMeansModel
// COMMAND ----------
// Checking if the model works
val clusterNumber = 5
val modelFile = "/datasets/model"
val model: KMeansModel = new KMeansModel(sc.objectFile[Vector](modelFile).collect)
model.predict(featurize("واحد صاحبى لو حد يعرف اكونت وزير التعليم ")) == clusterNumber
// COMMAND ----------
// Loading model and printing tweets that matched the desired cluster
// COMMAND ----------
var newContextCreated = false
var num = 0
// Create a Spark Streaming Context.
@transient val ssc = new StreamingContext(sc, slideInterval)
// Create a Twitter Stream for the input source.
@transient val auth = Some(new OAuthAuthorization(new ConfigurationBuilder().build()))
@transient val twitterStream = ExtendedTwitterUtils.createStream(ssc, auth)
//Replace the cluster number as you desire between 0 to 9
val clusterNumber = 6
//model location
val modelFile = "/datasets/model"
// Get tweets from twitter
val Tweet = twitterStream.map(_.getText)
//Tweet.print()
println("Initalizaing the the KMeans model...")
val model: KMeansModel = new KMeansModel(sc.objectFile[Vector](modelFile).collect)
//printing tweets that match our choosen cluster
Tweet.foreachRDD(rdd => {
rdd.collect().foreach(i =>
{
val record = i
if (model.predict(featurize(record)) == clusterNumber) {
println(record)
}
})
})
// Start the streaming computation
println("Initialization complete.")
ssc.start()
ssc.awaitTermination()
| lamastex/scalable-data-science | db/2/2/029_TweetLanguageClassifier.scala | Scala | unlicense | 9,901 |
package ionroller.aws
import com.amazonaws.auth.AWSCredentialsProvider
import com.amazonaws.services.s3.AmazonS3Client
import scalaz.Kleisli
import scalaz.concurrent.Task
object S3 {
val client: Kleisli[Task, AWSCredentialsProvider, AmazonS3Client] = {
Kleisli { credentialsProvider =>
Task(new AmazonS3Client(credentialsProvider))(awsExecutorService)
}
}
}
| browngeek666/ionroller | core/src/main/scala/ionroller/aws/S3.scala | Scala | mit | 379 |
package pl.touk.nussknacker.engine.lite.api
import cats.data.ValidatedNel
import cats.{Monad, ~>}
import pl.touk.nussknacker.engine.api.context.ProcessCompilationError
import pl.touk.nussknacker.engine.api.process.{Sink, Source}
import pl.touk.nussknacker.engine.api.typed.typing.TypingResult
import pl.touk.nussknacker.engine.api.{Context, LazyParameterInterpreter}
import pl.touk.nussknacker.engine.lite.api.commonTypes.{DataBatch, ErrorType, ResultType}
import scala.language.higherKinds
import scala.reflect.runtime.universe._
object customComponentTypes {
//Some components work with any monad (e.g. Union, Split etc.) Some require specific monad (e.g. State, probably transactional Kafka)
//This trait allows to convert effects if it's possible. See sample.SumTransformer for usage
//More complex implementations would allow e.g. to transform State[StateType, _] => Future[State[StateType, _]] and so on
trait CapabilityTransformer[Target[_]] {
def transform[From[_]](implicit tag: TypeTag[From[Any]]): ValidatedNel[ProcessCompilationError, From ~> Target]
}
case class CustomComponentContext[F[_]](nodeId: String, interpreter: LazyParameterInterpreter, capabilityTransformer: CapabilityTransformer[F])
trait LiteSource[Input] extends Source {
def createTransformation[F[_] : Monad](evaluateLazyParameter: CustomComponentContext[F]): Input => ValidatedNel[ErrorType, Context]
}
trait LiteCustomComponent {
//Result is generic parameter, as Component should not change it/interfer with it
def createTransformation[F[_] : Monad, Result](continuation: DataBatch => F[ResultType[Result]],
context: CustomComponentContext[F]): DataBatch => F[ResultType[Result]]
}
case class BranchId(value: String)
case class JoinDataBatch(value: List[(BranchId, Context)])
trait LiteJoinCustomComponent {
def createTransformation[F[_] : Monad, Result](continuation: DataBatch => F[ResultType[Result]],
context: CustomComponentContext[F]): JoinDataBatch => F[ResultType[Result]]
}
trait LiteSink[Res] extends Sink {
def createTransformation[F[_] : Monad](evaluateLazyParameter: CustomComponentContext[F]):
(TypingResult, DataBatch => F[ResultType[(Context, Res)]])
}
}
| TouK/nussknacker | engine/lite/components-api/src/main/scala/pl/touk/nussknacker/engine/lite/api/customComponentTypes.scala | Scala | apache-2.0 | 2,337 |
import annotation.experimental
class Class1:
import language.experimental.fewerBraces // error
import language.experimental.namedTypeArguments // error
import language.experimental.genericNumberLiterals // error
import language.experimental.erasedDefinitions // ok: only check at erased definition
@experimental def f = 1
def g = 1
object Object1:
import language.experimental.fewerBraces // error
import language.experimental.namedTypeArguments // error
import language.experimental.genericNumberLiterals // error
import language.experimental.erasedDefinitions // ok: only check at erased definition
@experimental def f = 1
def g = 1
def fun1 =
import language.experimental.fewerBraces // error
import language.experimental.namedTypeArguments // error
import language.experimental.genericNumberLiterals // error
import language.experimental.erasedDefinitions // ok: only check at erased definition
@experimental def f = 1
def g = 1
val value1 =
import language.experimental.fewerBraces // error
import language.experimental.namedTypeArguments // error
import language.experimental.genericNumberLiterals // error
import language.experimental.erasedDefinitions // ok: only check at erased definition
@experimental def f = 1
def g = 1 | dotty-staging/dotty | tests/neg-custom-args/no-experimental/experimental-nested-imports-2.scala | Scala | apache-2.0 | 1,284 |
package scala.meta.semantic
/**
* @author mucianm
* @since 03.06.16.
*/
import scala.{Seq => _}
import scala.collection.immutable.Seq
import scala.meta._
trait Context {
def dialect: Dialect
def typecheck(tree: Tree): Tree
def defns(ref: Ref): Seq[Member]
def members(tpe: Type): Seq[Member]
def supermembers(member: Member): Seq[Member]
def submembers(member: Member): Seq[Member]
def isSubtype(tpe1: Type, tpe2: Type): Boolean
def lub(tpes: Seq[Type]): Type
def glb(tpes: Seq[Type]): Type
def supertypes(tpe: Type): Seq[Type]
def widen(tpe: Type): Type
def dealias(tpe: Type): Type
}
| ilinum/intellij-scala | src/scala/meta/semantic/Context.scala | Scala | apache-2.0 | 622 |
package sds.classfile.constant_pool
class ClassInfo(_index: Int) extends ConstantInfo {
def index: Int = _index
override def toString(): String = s"Class\t#$index"
} | g1144146/sds_for_scala | src/main/scala/sds/classfile/constant_pool/ClassInfo.scala | Scala | apache-2.0 | 174 |
package de.leanovate.swaggercheck.schema.gen
import de.leanovate.swaggercheck.TestSchema
import de.leanovate.swaggercheck.schema.ValidationResultToProp._
import de.leanovate.swaggercheck.schema.model.JsonPath
import de.leanovate.swaggercheck.shrinkable.CheckJsValue
import org.scalacheck.Prop._
trait DefinitionChecks {
val schema = TestSchema()
def checkDefinition(definition: GeneratableDefinition) = forAll(definition.generate(schema)) {
json: CheckJsValue =>
val value = CheckJsValue.parse(json.minified)
definition.validate(schema, JsonPath(), value)
}
}
| leanovate/swagger-check | json-schema-gen/src/test/scala/de/leanovate/swaggercheck/schema/gen/DefinitionChecks.scala | Scala | mit | 586 |
package io.getquill.context.sql
import io.getquill.Spec
import io.getquill.context.sql.testContext._
import io.getquill.Literal
class SqlQuerySpec extends Spec {
implicit val naming = new Literal {}
"transforms the ast into a flatten sql-like structure" - {
"inner join query" in {
val q = quote {
for {
a <- qr1
b <- qr2 if (a.s != null && b.i > a.i)
} yield {
(a.i, b.i)
}
}
testContext.run(q).string mustEqual
"SELECT a.i, b.i FROM TestEntity a, TestEntity2 b WHERE a.s IS NOT NULL AND b.i > a.i"
}
"outer join query" in {
val q = quote {
qr1.leftJoin(qr2).on((a, b) => a.s != null && b.i > a.i)
}
testContext.run(q).string mustEqual
"SELECT a.s, a.i, a.l, a.o, b.s, b.i, b.l, b.o FROM TestEntity a LEFT JOIN TestEntity2 b ON a.s IS NOT NULL AND b.i > a.i"
}
"join + map + filter" - {
"regular" in {
val q = quote {
qr1
.leftJoin(qr2)
.on((a, b) => a.i == b.i)
.map(t => (t._1.i, t._2.map(_.i)))
.filter(_._2.forall(_ == 1))
}
testContext.run(q).string mustEqual
"SELECT a.i, b.i FROM TestEntity a LEFT JOIN TestEntity2 b ON a.i = b.i WHERE b.i IS NULL OR b.i = 1"
}
"null-checked" in {
val q = quote {
qr1
.leftJoin(qr2)
.on((a, b) => a.i == b.i)
.map(t => (t._1.i, t._2.map(_.s)))
.filter(_._2.forall(v => if (v == "value") true else false))
}
testContext.run(q).string mustEqual
"SELECT a.i, b.s FROM TestEntity a LEFT JOIN TestEntity2 b ON a.i = b.i WHERE b.s IS NULL OR b.s IS NOT NULL AND CASE WHEN b.s = 'value' THEN true ELSE false END"
}
}
"nested join" in {
val q = quote {
qr1.leftJoin(qr2).on {
case (a, b) =>
a.i == b.i
}.filter {
case (a, b) =>
b.map(_.l).contains(3L)
}.leftJoin(qr3).on {
case ((a, b), c) =>
b.map(_.i).contains(a.i) && b.map(_.i).contains(c.i)
}
}
testContext.run(q).string mustEqual
"SELECT x02._1s, x02._1i, x02._1l, x02._1o, x02._2s, x02._2i, x02._2l, x02._2o, x12.s, x12.i, x12.l, x12.o FROM (SELECT x01.s AS _1s, x01.l AS _1l, x01.o AS _1o, x01.i AS _1i, x11.i AS _2i, x11.o AS _2o, x11.l AS _2l, x11.s AS _2s FROM TestEntity x01 LEFT JOIN TestEntity2 x11 ON x01.i = x11.i WHERE x11.l = 3) AS x02 LEFT JOIN TestEntity3 x12 ON x02._2i = x02._1i AND x02._2i = x12.i"
}
"flat outer join" in {
val q = quote {
for {
e1 <- qr1
e2 <- qr2.leftJoin(e2 => e2.i == e1.i)
} yield (e1.i, e2.map(e => e.i))
}
testContext.run(q.dynamic).string mustEqual
"SELECT e1.i, e2.i FROM TestEntity e1 LEFT JOIN TestEntity2 e2 ON e2.i = e1.i"
}
"value query" - {
"operation" in {
val q = quote {
qr1.map(t => t.i).contains(1)
}
testContext.run(q).string mustEqual
"SELECT 1 IN (SELECT t.i FROM TestEntity t)"
}
"simple value" in {
val q = quote(1)
testContext.run(q).string mustEqual
"SELECT 1"
}
}
"raw queries with infix" - {
"using tuples" in {
val q = quote {
infix"""SELECT t.s AS "_1", t.i AS "_2" FROM TestEntity t""".as[Query[(String, Int)]]
}
testContext.run(q).string mustEqual
"""SELECT x._1, x._2 FROM (SELECT t.s AS "_1", t.i AS "_2" FROM TestEntity t) AS x"""
}
"using single value" in {
val q = quote {
infix"""SELECT t.i FROM TestEntity t""".as[Query[Int]]
}
testContext.run(q).string mustEqual
"""SELECT x.* FROM (SELECT t.i FROM TestEntity t) AS x"""
}
}
"nested infix query" - {
"as source" in {
val q = quote {
infix"SELECT * FROM TestEntity".as[Query[TestEntity]].filter(t => t.i == 1)
}
testContext.run(q).string mustEqual
"SELECT t.s, t.i, t.l, t.o FROM (SELECT * FROM TestEntity) AS t WHERE t.i = 1"
}
"fails if used as the flatMap body" in {
val q = quote {
qr1.flatMap(a => infix"SELECT * FROM TestEntity2 t where t.s = ${a.s}".as[Query[TestEntity2]])
}
val e = intercept[IllegalStateException] {
SqlQuery(q.ast)
}
}
}
"sorted query" - {
"with map" in {
val q = quote {
qr1.sortBy(t => t.s).map(t => t.s)
}
testContext.run(q).string mustEqual
"SELECT t.s FROM TestEntity t ORDER BY t.s ASC NULLS FIRST"
}
"with filter" in {
val q = quote {
qr1.filter(t => t.s == "s").sortBy(t => t.s).map(t => (t.i))
}
testContext.run(q).string mustEqual
"SELECT t.i FROM TestEntity t WHERE t.s = 's' ORDER BY t.s ASC NULLS FIRST"
}
"with outer filter" in {
val q = quote {
qr1.sortBy(t => t.s).filter(t => t.s == "s").map(t => t.s)
}
testContext.run(q).string mustEqual
"SELECT t.s FROM TestEntity t WHERE t.s = 's' ORDER BY t.s ASC NULLS FIRST"
}
"with flatMap" in {
val q = quote {
qr1.sortBy(t => t.s).flatMap(t => qr2.map(t => t.s))
}
testContext.run(q).string mustEqual
"SELECT t1.s FROM (SELECT t.* FROM TestEntity t ORDER BY t.s ASC NULLS FIRST) AS t, TestEntity2 t1"
}
"tuple criteria" - {
"single ordering" in {
val q = quote {
qr1.sortBy(t => (t.s, t.i))(Ord.asc).map(t => t.s)
}
testContext.run(q).string mustEqual
"SELECT t.s FROM TestEntity t ORDER BY t.s ASC, t.i ASC"
}
"ordering per column" in {
val q = quote {
qr1.sortBy(t => (t.s, t.i))(Ord(Ord.asc, Ord.desc)).map(t => t.s)
}
testContext.run(q).string mustEqual
"SELECT t.s FROM TestEntity t ORDER BY t.s ASC, t.i DESC"
}
}
"multiple sortBy" in {
val q = quote {
qr1.sortBy(t => (t.s, t.i)).sortBy(t => t.l).map(t => t.s)
}
testContext.run(q).string mustEqual
"SELECT t.s FROM (SELECT t.l, t.s FROM TestEntity t ORDER BY t.s ASC NULLS FIRST, t.i ASC NULLS FIRST) AS t ORDER BY t.l ASC NULLS FIRST"
}
"expression" - {
"neg" in {
val q = quote {
qr1.sortBy(t => -t.i)(Ord.desc)
}
testContext.run(q).string mustEqual
"SELECT t.s, t.i, t.l, t.o FROM TestEntity t ORDER BY - (t.i) DESC"
}
"add" in {
val q = quote {
qr1.sortBy(t => t.l - t.i)
}
testContext.run(q).string mustEqual
"SELECT t.s, t.i, t.l, t.o FROM TestEntity t ORDER BY (t.l - t.i) ASC NULLS FIRST"
}
}
"after flatMap" in {
val q = quote {
(for {
a <- qr1
b <- qr2 if a.i == b.i
} yield {
(a.s, b.s)
})
.sortBy(_._2)(Ord.desc)
}
testContext.run(q).string mustEqual
"SELECT b._1, b._2 FROM (SELECT b.s AS _2, a.s AS _1 FROM TestEntity a, TestEntity2 b WHERE a.i = b.i) AS b ORDER BY b._2 DESC"
}
"fails if the sortBy criteria is malformed" in {
case class Test(a: (Int, Int))
implicit val o: Ordering[TestEntity] = null
val q = quote {
query[Test].sortBy(_.a)(Ord(Ord.asc, Ord.desc))
}
val e = intercept[IllegalStateException] {
SqlQuery(q.ast)
}
}
}
"grouped query" - {
"simple" in {
val q = quote {
qr1.groupBy(t => t.i).map(t => t._1)
}
testContext.run(q).string mustEqual
"SELECT t.i FROM TestEntity t GROUP BY t.i"
}
"nested" in {
val q = quote {
qr1.groupBy(t => t.i).map(t => t._1).flatMap(t => qr2)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM (SELECT t.i FROM TestEntity t GROUP BY t.i) AS t, TestEntity2 x"
}
"without map" in {
val q = quote {
qr1.groupBy(t => t.i)
}
val e = intercept[IllegalStateException] {
SqlQuery(q.ast)
}
}
"tuple" in {
val q = quote {
qr1.groupBy(t => (t.i, t.l)).map(t => t._1)
}
testContext.run(q).string mustEqual
"SELECT t.i, t.l FROM TestEntity t GROUP BY t.i, t.l"
}
"aggregated" - {
"simple" in {
val q = quote {
qr1.groupBy(t => t.i).map {
case (i, entities) => (i, entities.size)
}
}
testContext.run(q).string mustEqual
"SELECT t.i, COUNT(*) FROM TestEntity t GROUP BY t.i"
}
"mapped" in {
val q = quote {
qr1.groupBy(t => t.i).map {
case (i, entities) => (i, entities.map(_.l).max)
}
}
testContext.run(q).string mustEqual
"SELECT t.i, MAX(t.l) FROM TestEntity t GROUP BY t.i"
}
"distinct" in {
val q = quote {
qr1.groupBy(t => t.s).map {
case (s, entities) => (s, entities.map(_.i).distinct.size)
}
}
testContext.run(q).string mustEqual
"SELECT t.s, COUNT(DISTINCT t.i) FROM TestEntity t GROUP BY t.s"
}
}
"with map" - {
"not nested" in {
val q = quote {
qr1.join(qr2).on((a, b) => a.s == b.s)
.groupBy(t => t._2.i)
.map {
case (i, l) =>
(i, l.map(_._1.i).sum)
}
}
testContext.run(q).string mustEqual
"SELECT b.i, SUM(a.i) FROM TestEntity a INNER JOIN TestEntity2 b ON a.s = b.s GROUP BY b.i"
}
"nested" in {
val q = quote {
qr1.join(qr2).on((a, b) => a.s == b.s)
.nested
.groupBy(t => t._2.i)
.map {
case (i, l) =>
(i, l.map(_._1.i).sum)
}
}
testContext.run(q).string mustEqual
"SELECT t._2i, SUM(t._1i) FROM (SELECT a.i AS _1i, b.i AS _2i FROM TestEntity a INNER JOIN TestEntity2 b ON a.s = b.s) AS t GROUP BY t._2i"
}
}
}
"aggregated query" in {
val q = quote {
qr1.map(t => t.i).max
}
testContext.run(q).string mustEqual
"SELECT MAX(t.i) FROM TestEntity t"
}
"aggregated query multiple select" in {
val q = quote {
qr1.map(t => t.i -> t.s).size
}
testContext.run(q).string mustEqual
"SELECT COUNT(*) FROM (SELECT t.i, t.s FROM TestEntity t) AS x"
}
"distinct query" in {
val q = quote {
qr1.map(t => t.i).distinct
}
testContext.run(q).string mustEqual
"SELECT DISTINCT t.i FROM TestEntity t"
}
"distinct and map query" in {
val q = quote {
qr1.map(t => t.i).distinct.map(t => 1)
}
testContext.run(q).string mustEqual
"SELECT 1 FROM (SELECT DISTINCT t.i FROM TestEntity t) AS t"
}
"nested where" in {
val q = quote {
qr4.filter(t => t.i == 1).nested.filter(t => t.i == 2)
}
testContext.run(q).string mustEqual
"SELECT t.i FROM (SELECT t.i FROM TestEntity4 t WHERE t.i = 1) AS t WHERE t.i = 2"
}
"limited query" - {
"simple" in {
val q = quote {
qr1.take(10)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM TestEntity x LIMIT 10"
}
"nested" in {
val q = quote {
qr1.take(10).flatMap(a => qr2)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM (SELECT x.* FROM TestEntity x LIMIT 10) AS a, TestEntity2 x"
}
"with map" in {
val q = quote {
qr1.take(10).map(t => t.s)
}
testContext.run(q).string mustEqual
"SELECT t.s FROM TestEntity t LIMIT 10"
}
"multiple limits" in {
val q = quote {
qr1.take(1).take(10)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM (SELECT x.s, x.i, x.l, x.o FROM TestEntity x LIMIT 1) AS x LIMIT 10"
}
}
"offset query" - {
"simple" in {
val q = quote {
qr1.drop(10)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM TestEntity x OFFSET 10"
}
"nested" in {
val q = quote {
qr1.drop(10).flatMap(a => qr2)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM (SELECT x.* FROM TestEntity x OFFSET 10) AS a, TestEntity2 x"
}
"with map" in {
val q = quote {
qr1.drop(10).map(t => t.s)
}
testContext.run(q).string mustEqual
"SELECT t.s FROM TestEntity t OFFSET 10"
}
"multiple offsets" in {
val q = quote {
qr1.drop(1).drop(10)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM (SELECT x.s, x.i, x.l, x.o FROM TestEntity x OFFSET 1) AS x OFFSET 10"
}
}
"limited and offset query" - {
"simple" in {
val q = quote {
qr1.drop(10).take(11)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM TestEntity x LIMIT 11 OFFSET 10"
}
"nested" in {
val q = quote {
qr1.drop(10).take(11).flatMap(a => qr2)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM (SELECT x.* FROM TestEntity x LIMIT 11 OFFSET 10) AS a, TestEntity2 x"
}
"multiple" in {
val q = quote {
qr1.drop(1).take(2).drop(3).take(4)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM (SELECT x.s, x.i, x.l, x.o FROM TestEntity x LIMIT 2 OFFSET 1) AS x LIMIT 4 OFFSET 3"
}
"take.drop" in {
val q = quote {
qr1.take(1).drop(2)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM (SELECT x.s, x.i, x.l, x.o FROM TestEntity x LIMIT 1) AS x OFFSET 2"
}
"for comprehension" - {
val q = quote(for {
q1 <- qr1
q2 <- qr2 if q1.i == q2.i
} yield (q1.i, q2.i, q1.s, q2.s))
"take" in {
testContext.run(q.take(3)).string mustEqual
"SELECT q1.i, q2.i, q1.s, q2.s FROM TestEntity q1, TestEntity2 q2 WHERE q1.i = q2.i LIMIT 3"
}
"drop" in {
testContext.run(q.drop(3)).string mustEqual
"SELECT q1.i, q2.i, q1.s, q2.s FROM TestEntity q1, TestEntity2 q2 WHERE q1.i = q2.i OFFSET 3"
}
}
}
"set operation query" - {
"union" in {
val q = quote {
qr1.union(qr1)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM ((SELECT x.s, x.i, x.l, x.o FROM TestEntity x) UNION (SELECT x.s, x.i, x.l, x.o FROM TestEntity x)) AS x"
}
"unionAll" in {
val q = quote {
qr1.unionAll(qr1)
}
testContext.run(q).string mustEqual
"SELECT x.s, x.i, x.l, x.o FROM ((SELECT x.s, x.i, x.l, x.o FROM TestEntity x) UNION ALL (SELECT x.s, x.i, x.l, x.o FROM TestEntity x)) AS x"
}
}
"unary operation query" - {
"nonEmpty" in {
val q = quote {
qr1.nonEmpty
}
testContext.run(q).string mustEqual
"SELECT EXISTS (SELECT x.* FROM TestEntity x)"
}
"isEmpty" in {
val q = quote {
qr1.isEmpty
}
testContext.run(q).string mustEqual
"SELECT NOT EXISTS (SELECT x.* FROM TestEntity x)"
}
}
"aggregated and mapped query" in {
val q = quote {
(for {
q1 <- qr1
q2 <- qr2
} yield {
q2.i
}).min
}
testContext.run(q).string mustEqual
"SELECT MIN(q2.i) FROM TestEntity q1, TestEntity2 q2"
}
"nested" - {
"pointless nesting" in {
val q = quote {
qr4.nested
}
testContext.run(q).string mustEqual "SELECT x.i FROM (SELECT x.i FROM TestEntity4 x) AS x"
// not normalized
SqlQuery(q.ast).toString mustEqual "SELECT x.* FROM (SELECT x.* FROM TestEntity4 x) AS x"
}
"pointless nesting of single yielding element" in {
val q = quote {
qr1.map(x => x.i).nested
}
testContext.run(q).string mustEqual "SELECT x.i FROM (SELECT x.i FROM TestEntity x) AS x"
}
"pointless nesting in for-comp of single yielding element" in {
val q = quote {
(for {
a <- qr1
b <- qr2
} yield a.i).nested
}
testContext.run(q).string mustEqual "SELECT x.* FROM (SELECT a.i FROM TestEntity a, TestEntity2 b) AS x"
}
"mapped" in {
val q = quote {
qr1.nested.map(t => t.i)
}
testContext.run(q).string mustEqual
"SELECT t.i FROM (SELECT x.i FROM TestEntity x) AS t"
}
"filter + map" in {
val q = quote {
qr1.filter(t => t.i == 1).nested.map(t => t.i)
}
testContext.run(q).string mustEqual
"SELECT t.i FROM (SELECT t.i FROM TestEntity t WHERE t.i = 1) AS t"
}
}
"queries using options" - {
case class Entity(id: Int, s: String, o: Option[String], fk: Int, io: Option[Int])
case class EntityA(id: Int, s: String, o: Option[String])
case class EntityB(id: Int, s: String, o: Option[String])
val e = quote(query[Entity])
val ea = quote(query[EntityA])
val eb = quote(query[EntityB])
"flatten in left join" in {
val q = quote {
e.leftJoin(ea).on((e, a) => e.fk == a.id).map(_._2.map(_.o).flatten)
}
testContext.run(q).string mustEqual
"SELECT a.o FROM Entity e LEFT JOIN EntityA a ON e.fk = a.id"
}
"flatMap in left join" in {
val q = quote {
e.leftJoin(ea).on((e, a) => e.fk == a.id).map(_._2.flatMap(_.o))
}
testContext.run(q).string mustEqual
"SELECT a.o FROM Entity e LEFT JOIN EntityA a ON e.fk = a.id"
}
"flatMap in left join with getOrElse" in {
val q = quote {
e.leftJoin(ea).on((e, a) => e.fk == a.id).map(_._2.flatMap(_.o).getOrElse("alternative"))
}
testContext.run(q).string mustEqual
"SELECT CASE WHEN a.o IS NOT NULL THEN a.o ELSE 'alternative' END FROM Entity e LEFT JOIN EntityA a ON e.fk = a.id"
}
"getOrElse should not produce null check for integer" in {
val q = quote {
e.map(em => em.io.map(_ + 1).getOrElse(2))
}
testContext.run(q).string mustEqual
"SELECT CASE WHEN (em.io + 1) IS NOT NULL THEN em.io + 1 ELSE 2 END FROM Entity em"
}
"getOrElse should not produce null check for conditional" in {
val q = quote {
e.map(em => em.o.map(v => if (v == "value") "foo" else "bar").getOrElse("baz"))
}
testContext.run(q).string mustEqual
"SELECT CASE WHEN em.o IS NOT NULL AND CASE WHEN em.o = 'value' THEN 'foo' ELSE 'bar' END IS NOT NULL THEN CASE WHEN em.o = 'value' THEN 'foo' ELSE 'bar' END ELSE 'baz' END FROM Entity em"
}
}
"case class queries" - {
case class TrivialEntity(str: String)
"in single join" in {
val q = quote {
for {
a <- qr1
b <- qr2 if (b.i > a.i)
} yield {
TrivialEntity(b.s)
}
}
testContext.run(q).string mustEqual
"SELECT b.s FROM TestEntity a, TestEntity2 b WHERE b.i > a.i"
}
"in union" in {
val q = quote {
qr1.map(q => TrivialEntity(q.s)) ++ qr1.map(q => TrivialEntity(q.s))
}
testContext.run(q).string mustEqual
"SELECT x.str FROM ((SELECT q.s AS str FROM TestEntity q) UNION ALL (SELECT q1.s AS str FROM TestEntity q1)) AS x"
}
"in union same field name" in {
case class TrivialEntitySameField(s: String)
val q = quote {
qr1.map(q => TrivialEntitySameField(q.s)) ++ qr1.map(q => TrivialEntitySameField(q.s))
}
testContext.run(q).string mustEqual
"SELECT x.s FROM ((SELECT q.s AS s FROM TestEntity q) UNION ALL (SELECT q1.s AS s FROM TestEntity q1)) AS x"
}
}
}
"SqlQuery" - {
import io.getquill.ast._
"toString" in {
SqlQuery(qr4.ast).toString mustBe "SELECT x.* FROM TestEntity4 x"
}
"catch invalid" in {
intercept[IllegalStateException](SqlQuery(Ident("i"))).getMessage must startWith("Query not properly normalized.")
}
}
}
| mentegy/quill | quill-sql/src/test/scala/io/getquill/context/sql/SqlQuerySpec.scala | Scala | apache-2.0 | 21,130 |
Subsets and Splits