code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
Copyright (c) 2010-2012, The University of Manchester
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
* Neither the name of The University of Manchester nor the
names of its contributors may be used to endorse or promote products
derived from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE UNIVERSITY OF MANCHESTER BE LIABLE FOR ANY
DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package eu.teraflux.uniman.dataflow.benchmark
import eu.teraflux.uniman.dataflow._
import eu.teraflux.uniman.dataflow.Dataflow._
object Fibonacci extends DFApp{
def DFMain(args:Array[String]) = {
thread(fib _, args(0).toInt, new Token((x: Int) => { println("result = " + x) }))
}
def fib(n: Int, out: Token[Int]) {
if(n <= 2)
out(1)
else {
var adder = thread((x: Int, y: Int, out: Token[Int]) => out(x + y))
thread(fib _, n - 1, adder.token1)
thread(fib _, n - 2, adder.token2)
adder(Blank, Blank, out)
}
}
}
| chrisseaton/dfscala-benchmarks | src/main/scala/eu/teraflux/uniman/dataflow/benchmark/Fibonacci.scala | Scala | bsd-3-clause | 2,117 |
package redis
package algebra
package interpreter
package nonblocking
import com.redis.RedisClient
import java.util.UUID
import akka.actor.ActorSystem
import akka.io.Tcp
import akka.util.Timeout
import org.specs2._, specification._
import scala.concurrent.duration.Duration
import scalaz.{CharSet, Free, NonEmptyList}, NonEmptyList.nel
import scalaz.syntax.Ops
import scalaz.syntax.{comonad, monad}, comonad._, monad._
import all._, future._
trait InterpreterSpecification extends Specification {
override def map(fs: => Fragments) = Step(client) ^ fs ^ Step(clean) ^ Step(close)
def run[A](a: Free[R, A]): A = NonBlocking.run(a, client).copoint
def generate = s"${prefix}:${UUID.randomUUID.toString}".utf8
def genkey(a: ByteString) = s"${prefix}:${UUID.randomUUID.toString}:".utf8 ++ a
def genkeys(a: NonEmptyList[ByteString]) = {
val p = generate
a.map(p ++ _)
}
def clean = run(all.keys[R](s"${prefix}*".utf8).map(_.toList) >>= { case a :: as => all.del[R](nel(a, as)) case _ => 0L.point[F] })
def close() = client.clientRef ! Tcp.Close
val prefix = s"redis-algebra-interpreter:${UUID.randomUUID.toString}"
implicit val duration = Duration(2, "seconds")
implicit val system = ActorSystem(prefix.replace(":", "-"))
implicit val executionContext = system.dispatcher
implicit val timeout = Timeout(duration)
lazy val client = RedisClient("localhost", 6379)
implicit def StringToStringOps(a: String): StringOps = new StringOps { val self = a }
implicit def ByteArrayToByteArrayOps(a: Array[Byte]): ByteArrayOps = new ByteArrayOps { val self = a }
implicit def ByteStringToByteStringOps(a: ByteString): ByteStringOps = new ByteStringOps { val self = a }
implicit def LongToStringOps(a: Long): StringOps = new StringOps { val self = a.toString }
implicit def IntToStringOps(a: Int): StringOps = new StringOps { val self = a.toString }
}
sealed abstract class StringOps extends Ops[String] {
final def utf8 = self.getBytes(CharSet.UTF8).toIndexedSeq
}
sealed abstract class ByteArrayOps extends Ops[Array[Byte]] {
final def bytestring = self.toIndexedSeq
}
sealed abstract class ByteStringOps extends Ops[ByteString] {
final def utf8 = new String(self.toArray, CharSet.UTF8)
}
| ethul/redis-algebra-interpreter | src/test/scala/redis/algebra/interpreter/nonblocking/interpreter.scala | Scala | mit | 2,253 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.planner.logical.steps
import org.mockito.Mockito._
import org.neo4j.cypher.internal.frontend.v2_3.ast._
import org.neo4j.cypher.internal.compiler.v2_3.planner._
import org.neo4j.cypher.internal.compiler.v2_3.planner.logical.plans._
import org.neo4j.cypher.internal.frontend.v2_3.test_helpers.CypherFunSuite
class SelectCoveredTest extends CypherFunSuite with LogicalPlanningTestSupport {
private implicit val planContext = newMockedPlanContext
private implicit val subQueryLookupTable = Map.empty[PatternExpression, QueryGraph]
private implicit val context = newMockedLogicalPlanningContext(planContext)
test("when a predicate that isn't already solved is solvable it should be applied") {
// Given
val predicate = mock[Expression]
when(predicate.dependencies).thenReturn(Set.empty[Identifier])
val LogicalPlan = newMockedLogicalPlan("x")
val selections = Selections(Set(Predicate(LogicalPlan.availableSymbols, predicate)))
val qg = QueryGraph(selections = selections)
// When
val result = selectCovered(LogicalPlan, qg)
// Then
result should equal(Seq(Selection(Seq(predicate), LogicalPlan)(solved)))
}
test("should not try to solve predicates with unmet dependencies") {
// Given
val predicate = mock[Expression]
when(predicate.dependencies).thenReturn(Set.empty[Identifier])
val selections = Selections(Set(Predicate(Set(IdName("x")), predicate)))
val LogicalPlan = newMockedLogicalPlanWithProjections("x")
val qg = QueryGraph(selections = selections)
// When
val result = selectCovered(LogicalPlan, qg)
// Then
result should equal(Seq(Selection(Seq(predicate), LogicalPlan)(solved)))
}
test("when two predicates not already solved are solvable, they should be applied") {
// Given
val predicate1 = mock[Expression]
when(predicate1.dependencies).thenReturn(Set.empty[Identifier])
val predicate2 = mock[Expression]
when(predicate2.dependencies).thenReturn(Set.empty[Identifier])
val selections = Selections(Set(
Predicate(Set(IdName("x")), predicate1),
Predicate(Set(IdName("x")), predicate2)))
val LogicalPlan: LogicalPlan = newMockedLogicalPlanWithProjections("x")
val qg = QueryGraph(selections = selections)
// When
val result = selectCovered(LogicalPlan, qg)
// Then
result should equal(Seq(Selection(Seq(predicate1, predicate2), LogicalPlan)(solved)))
}
test("when a predicate is already solved, it should not be applied again") {
// Given
val coveredIds = Set(IdName("x"))
val qg = QueryGraph(selections = Selections(Set(Predicate(coveredIds, SignedDecimalIntegerLiteral("1") _))))
val solved = CardinalityEstimation.lift(PlannerQuery(qg), 0.0)
val LogicalPlan = newMockedLogicalPlanWithProjections("x").updateSolved(solved)
// When
val result = selectCovered(LogicalPlan, qg)
// Then
result should equal(Seq())
}
test("a predicate without all dependencies covered should not be applied ") {
// Given
val predicate = mock[Expression]
val selections = Selections(Set(Predicate(Set(IdName("x"), IdName("y")), predicate)))
val LogicalPlan = newMockedLogicalPlanWithProjections("x")
val qg = QueryGraph(selections = selections)
// When
val result = selectCovered(LogicalPlan, qg)
// Then
result should equal(Seq())
}
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/test/scala/org/neo4j/cypher/internal/compiler/v2_3/planner/logical/steps/SelectCoveredTest.scala | Scala | apache-2.0 | 4,229 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package runtime
final class RichByte(val self: Byte) extends AnyVal with ScalaWholeNumberProxy[Byte] {
protected def num = scala.math.Numeric.ByteIsIntegral
protected def ord = scala.math.Ordering.Byte
override def doubleValue = self.toDouble
override def floatValue = self.toFloat
override def longValue = self.toLong
override def intValue = self.toInt
override def byteValue = self
override def shortValue = self.toShort
override def isValidByte = true
override def abs: Byte = math.abs(self).toByte
override def max(that: Byte): Byte = math.max(self, that).toByte
override def min(that: Byte): Byte = math.min(self, that).toByte
}
| martijnhoekstra/scala | src/library/scala/runtime/RichByte.scala | Scala | apache-2.0 | 990 |
package reactivemongo.api.indexes
import reactivemongo.api.{ Collation, Serialization, SerializationPack }
/**
* A MongoDB index (excluding the namespace).
*
* Consider reading [[http://www.mongodb.org/display/DOCS/Indexes the documentation about indexes in MongoDB]].
*
* {{{
* import reactivemongo.api.bson.BSONDocument
* import reactivemongo.api.indexes.{ Index, IndexType }
*
* val bsonIndex = Index(
* key = Seq("name" -> IndexType.Ascending),
* name = Some("name_idx"),
* unique = false,
* background = false,
* sparse = false,
* expireAfterSeconds = None,
* storageEngine = None,
* weights = None,
* defaultLanguage = None,
* languageOverride = None,
* textIndexVersion = None,
* sphereIndexVersion = None,
* bits = None,
* min = None,
* max = None,
* bucketSize = None,
* collation = None,
* wildcardProjection = None,
* version = None,
* partialFilter = None,
* options = BSONDocument.empty)
* }}}
*/
sealed abstract class Index {
type Pack <: SerializationPack
val pack: Pack
/**
* The index key (it can be composed of multiple fields).
* This list should not be empty!
*/
def key: Seq[(String, IndexType)]
/**
* The name of this index (default: `None`).
* If you provide none, a name will be computed for you.
*/
def name: Option[String] = None
/**
* If this index should be built in background.
* You should read [[http://www.mongodb.org/display/DOCS/Indexes#Indexes-background%3Atrue the documentation about background indexing]] before using it.
*/
def background: Boolean = false
/** The flag to enforces uniqueness (default: `false`) */
def unique: Boolean = false
/**
* Optional [[https://docs.mongodb.com/manual/core/index-partial/#partial-index-with-unique-constraints partial filter]]
*
* @since MongoDB 3.2
*/
def partialFilter: Option[pack.Document]
/**
* The flags to indicates if the index to build
* should only consider the documents that have the indexed fields
* (default: `false`).
*
* See [[https://docs.mongodb.com/manual/indexes/#sparse-indexes the documentation]] on the consequences of such an index.
*/
def sparse: Boolean = false
/**
* Optionally specifies a value, in seconds, as a [[https://docs.mongodb.com/manual/reference/glossary/#term-ttl TTL]] to control how long MongoDB retains documents in this collection.
*/
def expireAfterSeconds: Option[Int] = None
/**
* Optionally specifies a configuration for the storage engine on a per-index basis when creating an index.
*
* @since MongoDB 3.0
*/
def storageEngine: Option[pack.Document] = None
/**
* An optional document that contains field and weight pairs for [[https://docs.mongodb.com/manual/core/index-text/ text indexes]].
*/
def weights: Option[pack.Document] = None
/**
* An optional default language for [[https://docs.mongodb.com/manual/core/index-text/ text indexes]].
*/
def defaultLanguage: Option[String] = None
/**
* An optional language override for [[https://docs.mongodb.com/manual/core/index-text/ text indexes]].
*/
def languageOverride: Option[String] = None
/**
* An optional `text` index [[https://docs.mongodb.com/manual/core/index-text/#text-versions version number]].
*/
def textIndexVersion: Option[Int] = None
/**
* An optional `2dsphere` index [[https://docs.mongodb.com/manual/core/index-text/#text-versions version number]].
*/
@SuppressWarnings(Array("MethodNames"))
def _2dsphereIndexVersion: Option[Int] = None
/**
* Optionally indicates the precision of [[https://docs.mongodb.com/manual/reference/glossary/#term-geohash geohash]] for [[https://docs.mongodb.com/manual/core/2d/ 2d indexes]].
*/
def bits: Option[Int] = None
/**
* Optionally indicates the lower inclusive boundary for longitude and latitude for [[https://docs.mongodb.com/manual/core/2d/ 2d indexes]].
*/
def min: Option[Double] = None
/**
* Optionally indicates the upper inclusive boundary for longitude and latitude for [[https://docs.mongodb.com/manual/core/2d/ 2d indexes]].
*/
def max: Option[Double] = None
/**
* Optionally specifies the number of units within which
* to group the location values
* for [[https://docs.mongodb.com/manual/core/geohaystack/ geoHaystack]]
* indexes.
*/
def bucketSize: Option[Double] = None
/** An optional [[Collation]] (default: `None`) */
def collation: Option[Collation] = None
def wildcardProjection: Option[pack.Document] = None
/**
* Indicates the [[http://www.mongodb.org/display/DOCS/Index+Versions version]] of the index (1 for >= 2.0, else 0). You should let MongoDB decide.
*/
def version: Option[Int] = None
/**
* Optional parameters for this index (typically specific to an IndexType like Geo2D).
*/
def options: pack.Document
/** The name of the index (a default one is computed if none). */
lazy val eventualName: String = name.getOrElse(key.foldLeft("") {
(name, kv) =>
name + (if (name.length > 0) "_" else "") + kv._1 + "_" + kv._2.valueStr
})
private[api] lazy val tupled = Tuple21(key, name, background, unique, partialFilter, sparse, expireAfterSeconds, storageEngine, weights, defaultLanguage, languageOverride, textIndexVersion, _2dsphereIndexVersion, bits, min, max, bucketSize, collation, wildcardProjection, version, options)
@SuppressWarnings(Array("ComparingUnrelatedTypes"))
override def equals(that: Any): Boolean = that match {
case other: Index => tupled == other.tupled
case _ => false
}
override def hashCode: Int = tupled.hashCode
override def toString = s"Index${tupled.toString}"
}
object Index {
type Aux[P] = Index { type Pack = P }
type Default = Aux[Serialization.Pack]
@inline private def defaultOpts = Serialization.internalSerializationPack.newBuilder.document(Seq.empty)
/**
* {{{
* import reactivemongo.api.bson.BSONDocument
* import reactivemongo.api.bson.collection.BSONSerializationPack
* import reactivemongo.api.indexes.{ Index, IndexType }
*
* val bsonIndex = Index(BSONSerializationPack)(
* key = Seq("name" -> IndexType.Ascending),
* name = Some("name_idx"),
* unique = false,
* background = false,
* sparse = false,
* expireAfterSeconds = None,
* storageEngine = None,
* weights = None,
* defaultLanguage = None,
* languageOverride = None,
* textIndexVersion = None,
* sphereIndexVersion = None,
* bits = None,
* min = None,
* max = None,
* bucketSize = None,
* collation = None,
* wildcardProjection = None,
* version = None,
* partialFilter = None,
* options = BSONDocument.empty)
* }}}
*/
@SuppressWarnings(Array("MaxParameters", "VariableShadowing"))
def apply[P <: SerializationPack](_pack: P)(
key: Seq[(String, IndexType)],
name: Option[String],
unique: Boolean,
background: Boolean,
sparse: Boolean,
expireAfterSeconds: Option[Int],
storageEngine: Option[_pack.Document],
weights: Option[_pack.Document],
defaultLanguage: Option[String],
languageOverride: Option[String],
textIndexVersion: Option[Int],
sphereIndexVersion: Option[Int],
bits: Option[Int],
min: Option[Double],
max: Option[Double],
bucketSize: Option[Double],
collation: Option[Collation],
wildcardProjection: Option[_pack.Document],
version: Option[Int],
partialFilter: Option[_pack.Document],
options: _pack.Document): Index.Aux[P] = {
def k = key
def n = name
def u = unique
def b = background
def s = sparse
def e = expireAfterSeconds
def se = storageEngine
def w = weights
def dl = defaultLanguage
def lo = languageOverride
def tiv = textIndexVersion
def bs = bits
def mi = min
def mx = max
def bu = bucketSize
def cl = collation
def wp = wildcardProjection
def v = version
def pf = partialFilter
def o = options
new Index {
type Pack = P
val pack: _pack.type = _pack
override val key = k
override val name = n
override val unique = u
override val background = b
override val sparse = s
override val expireAfterSeconds = e
override val storageEngine = se
override val weights = w
override val defaultLanguage = dl
override val languageOverride = lo
override val textIndexVersion = tiv
override val _2dsphereIndexVersion = sphereIndexVersion
override val bits = bs
override val min = mi
override val max = mx
override val bucketSize = bu
override val collation = cl
override val wildcardProjection = wp
override val version = v
val partialFilter = pf
val options = o
}
}
/**
* {{{
* import reactivemongo.api.bson.BSONDocument
* import reactivemongo.api.indexes.{ Index, IndexType }
*
* val bsonIndex = Index(
* key = Seq("name" -> IndexType.Ascending),
* name = Some("name_idx"),
* unique = false,
* background = false,
* sparse = false,
* expireAfterSeconds = None,
* storageEngine = None,
* weights = None,
* defaultLanguage = None,
* languageOverride = None,
* textIndexVersion = None,
* sphereIndexVersion = None,
* bits = None,
* min = None,
* max = None,
* bucketSize = None,
* collation = None,
* wildcardProjection = None,
* version = None,
* partialFilter = None,
* options = BSONDocument.empty)
* }}}
*/
@SuppressWarnings(Array("MaxParameters"))
def apply(
key: Seq[(String, IndexType)],
name: Option[String] = None,
unique: Boolean = false,
background: Boolean = false,
sparse: Boolean = false,
expireAfterSeconds: Option[Int] = None,
storageEngine: Option[Serialization.Pack#Document] = None,
weights: Option[Serialization.Pack#Document] = None,
defaultLanguage: Option[String] = None,
languageOverride: Option[String] = None,
textIndexVersion: Option[Int] = None,
sphereIndexVersion: Option[Int] = None,
bits: Option[Int] = None,
min: Option[Double] = None,
max: Option[Double] = None,
bucketSize: Option[Double] = None,
collation: Option[Collation] = None,
wildcardProjection: Option[Serialization.Pack#Document] = None,
version: Option[Int] = None,
partialFilter: Option[Serialization.Pack#Document] = None,
options: Serialization.Pack#Document = defaultOpts): Index.Aux[Serialization.Pack] =
apply[Serialization.Pack](Serialization.internalSerializationPack)(
key,
name,
unique,
background,
sparse,
expireAfterSeconds,
storageEngine,
weights,
defaultLanguage,
languageOverride,
textIndexVersion,
sphereIndexVersion,
bits,
min,
max,
bucketSize,
collation,
wildcardProjection,
version,
partialFilter,
options)
/** '''EXPERIMENTAL:''' API may change */
object Key {
def unapplySeq(index: Index): Option[Seq[(String, IndexType)]] =
Option(index).map(_.key)
}
}
/**
* A MongoDB namespaced index.
* A MongoDB index is composed with the namespace (the fully qualified collection name) and the other fields of [[reactivemongo.api.indexes.Index]].
*
* Consider reading [[http://www.mongodb.org/display/DOCS/Indexes the documentation about indexes in MongoDB]].
*
* @param namespace The fully qualified name of the indexed collection.
* @param index The other fields of the index.
*/
sealed trait NSIndex {
type Pack <: SerializationPack
def namespace: String
def index: Index.Aux[Pack]
lazy val (dbName: String, collectionName: String) = {
val spanned = namespace.span(_ != '.')
spanned._1 -> spanned._2.drop(1)
}
private[api] lazy val tupled = namespace -> index
@SuppressWarnings(Array("ComparingUnrelatedTypes"))
override def equals(that: Any): Boolean = that match {
case other: NSIndex =>
this.tupled == other.tupled
case _ =>
false
}
override def hashCode: Int = tupled.hashCode
override def toString = s"NSIndex${tupled.toString}"
}
object NSIndex {
type Aux[P <: SerializationPack] = NSIndex { type Pack = P }
type Default = NSIndex.Aux[Serialization.Pack]
@SuppressWarnings(Array("VariableShadowing"))
def apply[P <: SerializationPack](
namespace: String, index: Index.Aux[P]): NSIndex.Aux[P] = {
@inline def nsp = namespace
@inline def i = index
new NSIndex {
type Pack = P
override val namespace = nsp
override val index = i
}
}
}
| ReactiveMongo/ReactiveMongo | driver/src/main/scala/api/indexes/Index.scala | Scala | apache-2.0 | 12,740 |
package tests.rescala.events
import tests.rescala.RETests
class AND_EventTest extends RETests {
allEngines("handler Of AND Is NOT Executed If Events Fire Singularly"){ engine => import engine._
var test = 0
val e1 = Evt[Int]
val e2 = Evt[Int]
val e1_AND_e2 = e1 zip e2
e1_AND_e2 += ((x: (Int, Int)) => { test += 1 })
e1(10)
e2(10)
assert(test == 0)
}
allEngines("handler Of AND Does Not Remember Old Rounds"){ engine => import engine._
var test = 0
val e1 = Evt[Int]
val e2 = Evt[Int]
val e1_AND_e2 = e1 zip e2
e1_AND_e2 += ((x: (Int, Int)) => { test += 1 })
e1(10)
e2(10)
e1(10)
e2(10)
assert(test == 0)
}
allEngines("handler Of AND IsExecuted If Both Events Fire"){ engine => import engine._
var test = 0
val e1 = Evt[Int]
val e2 = e1 map ((x: Int) => x * 2)
val e3 = e1 map ((x: Int) => x * 2)
val e2_AND_e3 = e2 zip e3
e1 += ((x: Int) => { test += 1 })
e2 += ((x: Int) => { test += 1 })
e3 += ((x: Int) => { test += 1 })
e2_AND_e3 += ((x: (Int, Int)) => { test += 1 })
e1(10)
assert(test == 4)
}
}
| volkc/REScala | Tests/shared/src/test/scala/tests/rescala/events/AND_EventTest.scala | Scala | apache-2.0 | 1,148 |
package com.pjanof.io
import akka.actor.{ Actor, ActorLogging, ActorRef, Props, SupervisorStrategy }
import akka.io.{ IO, Tcp }
import akka.util.ByteString
import com.typesafe.config.{ Config, ConfigFactory }
import java.net.InetSocketAddress
/**
* background
* TCP connection actor does not support internal buffering
* handles single write while it is being passed on to the O/S kernel
* congestion must be handled at the user level for reads and writes
*
* modes of back-pressuring writes
* ACK-based
* Write command carries an arbitrary object
* if object is not Tcp.NoAck it will be returned to the sender of the Write
* happens after successfully writing all contained data to the socket
* write initiated before receiving acknowledgement results in buffer overrun
* NACK-based
* writes arriving while previous write has not been completed are replied to with
* CommandFailed message containing the failed write
* this mechanism requires the implemented protocol to tolerate skipping writes
* enabled by setting the useResumeWriting flag to false
* within the Register message during connection activation
* NACK-based with write suspending
* similiar to NACK-based writing
* after a write fails no further writes will succeed until
* ResumeWriting message is received
* ResumeWriting is answered with WritingResumed message after
* last accepted write has completed
* if actor handling the connection implements buffering
* it must resend NACK'ed messages after receiving the WritingResumed signal
* ensures every message is delivered exactly once to network socket
*
* modes of back-pressuring reads
* Push-reading
* connection actor sends the registered reader actor incoming data
* as it is available via Received events
* if the reader actor wants to signal back-pressure to remote TCP endpoint
* it may send a SuspendReading message to connetion actor
* indicates reader actor wants to suspend receiving new data
* Received events will not arrive until a corresponding ResumeReading is sent
* indicates the reader actor is ready again
* Pull-reading
* after sending a Received event the connection actor automatically
* suspends accepting data from the socket until reader actor signals via
* ResumeReading message it is ready to process more input data
* new data is "pulled" from the connection by sending ResumeReading
*
* these schemes only work between
* one writer/reader and one connection actor
* consistent results can not be achieved with
* multiple actors sending write commands to a single connection
*/
object WriteBackPressure {
object Client {
def props(remote: InetSocketAddress, replies: ActorRef) =
Props(classOf[Client], remote, replies)
}
class Client(remote: InetSocketAddress, listener: ActorRef) extends Actor with ActorLogging {
import Tcp._
import context.system
IO(Tcp) ! Connect(remote)
def receive = {
case CommandFailed(_: Connect) =>
log.info("Client Command Failed")
listener ! "connect failed"
context stop self
case c @ Connected(remote, local) =>
log.info(s"Client Connected: [ $remote ] with [ $local ]")
listener ! c
val connection = sender()
connection ! Register(self)
context become {
case data: ByteString =>
log.info("Client ByteString Received / Written")
connection ! Write(data)
case CommandFailed(w: Write) =>
// O/S buffer was full
log.error("Client OS Buffer Full")
listener ! "write failed"
case Received(data) =>
log.info(s"Client Received: [ $data ]")
listener ! data
case "close" =>
log.info("Client Close")
connection ! Close
case _: ConnectionClosed =>
log.info("Client Connected Closed")
listener ! "connection closed"
context stop self
case unhandled =>
log.info(s"Client Become Received: [ Case Not Handled - $unhandled ]")
}
case unhandled =>
log.info(s"Client Received: [ Case Not Handled - $unhandled ]")
}
}
class HandlerManager(handlerClass: Class[_]) extends Actor with ActorLogging {
import Tcp._
import context.system
// do not recover when connection is broken
override val supervisorStrategy = SupervisorStrategy.stoppingStrategy
/**
* bind to the listen port
* port will automatically be closed once this actor dies
*/
override def preStart(): Unit = {
IO(Tcp) ! Bind(self, new InetSocketAddress("localhost", 0))
}
// do not restart
override def postRestart(thr: Throwable): Unit = context stop self
def receive = {
case b @ Bound(localAddress) =>
log.info(s"Server Bound: [ $b ] at [ $localAddress ]")
context.parent ! b
case CommandFailed(Bind(_, local, _, _, _)) =>
log.warning(s"Server Command Failed, Cannot Bind to [$local]")
context stop self
case c @ Connected(remote, local) =>
log.info(s"Server Connected: [ $remote ] with [ $local ]")
context.parent ! c
val handler = context.actorOf(Props(handlerClass, sender(), remote))
sender() ! Register(handler, keepOpenOnPeerClosed = true)
log.info(s"Handler: [ $handler ]")
log.info(s"Sender: [ $sender ]")
}
}
/**
* connection must remain half-open when the remote side has closed its writing end
* allows handler to write outstanding data to the client before closing the connection
* enabled using a flag during connection activation
* after chunk is written wait for the Ack before sending the next chunk
* while waiting
* switch behavior to buffer incoming data
*/
class AckHandler(connection: ActorRef, remote: InetSocketAddress) extends Actor with ActorLogging {
import Tcp._
// sign death pact, actor terminates when connection breaks
context watch connection
case object Ack extends Event
def receive = {
case Received(data) =>
log.info(s"Handler Received: [ $data ]")
buffer(data)
connection ! Write(data, Ack)
context.become({
case Received(data) =>
log.info(s"Handler within Become Received: [ $data ]")
buffer(data)
case Ack =>
log.info("Handler within Become ACK")
acknowledge
case PeerClosed =>
log.info("Handler within Become Peer Closed")
closing = true
case unhandled =>
log.info(s"Handler within Become Received: [ Case Not Handled - $unhandled ]")
}, discardOld = false)
case PeerClosed =>
log.info("Handler Peer Closed")
context stop self
case unhandled =>
log.info(s"Handler Received: [ Case Not Handled - $unhandled ]")
}
var storage = Vector.empty[ByteString]
var stored = 0L
var transferred = 0L
var closing = false
val maxStored = 100000000L
val highWatermark = maxStored * 5 / 10
val lowWatermark = maxStored * 3 / 10
var suspended = false
private def buffer(data: ByteString): Unit = {
storage :+= data
stored += data.size
if (stored > maxStored) {
log.warning(s"drop connection to [$remote] (buffer overrun)")
context stop self
} else if (stored > highWatermark) {
log.debug(s"suspending reading")
connection ! SuspendReading
suspended = true
}
}
private def acknowledge(): Unit = {
require(storage.nonEmpty, "storage was empty")
val size = storage(0).size
stored -= size
transferred += size
storage = storage drop 1
if (suspended && stored < lowWatermark) {
log.debug("resuming reading")
connection ! ResumeReading
suspended = false
}
if (storage.isEmpty) {
if (closing) context stop self
else context.unbecome()
} else connection ! Write(storage(0), Ack)
}
}
}
| peterjanovsky/akka-examples | src/main/scala/com/pjanof/io/WriteBackPressure.scala | Scala | mit | 8,350 |
package ai.x.example
import ai.x.lens._
object Example3{
def _main(args: Array[String]): Unit = {
println("Starting")
println("-"*80)
println()
abstract class Indirect[T]{
def map[R]( f: T => R ): Indirect[R]
def flatMap[R]( f: T => Indirect[R] ): Indirect[R] = ???
def compose[R]( other: Indirect[R] ): Indirect[(T,R)]
}
case class IndirectValue[T]( i: T ) extends Indirect[T]{
def map[R]( f: T => R ): IndirectValue[R] = IndirectValue(f(i))
def compose[R]( other: Indirect[R] ): Indirect[(T,R)] = ???
}
case class FakeIndirect[T]() extends Indirect[T]{
def map[R]( f: T => R ): FakeIndirect[R] = FakeIndirect[R]
def compose[R]( other: Indirect[R] ): FakeIndirect[(T,R)] = FakeIndirect[(T,R)]
}
val i = IndirectValue(5)
val i2 = FakeIndirect[Int]()
def doIndirect(d: Indirect[Int]) = println( d.map(_ + 10) )
IndirectValue(IndirectValue(5))
//doIndirect(i)
//doIndirect(i2)
// Scalaz after here
import scalaz._
import Scalaz._
import scala.concurrent._
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
Option(Option(5)).flatten === Option(5)
def futurePrint[T](f: Future[T]) = {
println(
Await.result(
f,
Duration.Inf
)
)
}
def delayedFuture[T](v: T) = Future{ Thread.sleep(2000); v }
//println(new java.util.Date)
val delayedInt = delayedFuture(5)
val delayedString = delayedFuture("Test")
val f1 = for{
i <- delayedInt
s <- delayedString
} yield (i,s)
//futurePrint(f1)
//println(new java.util.Date)
//val f2 = ( delayedFuture(5) |@| delayedFuture("Test") )( (l,r) => (l,r) )
val f2 = delayedFuture(5).flatMap( i => delayedFuture("Test").map( s => (i,s) ) )
//futurePrint(f2)
//println(new java.util.Date)
val ff1 = (i: Int) => i*i
val ff2 = (i: Int) => s"The square of $i is: "
val square: (Int => String) = for{
v1 <- ff1
v2 <- ff2
} yield v2 + v1.toString
//println( square(1) )
//println( square(2) )
//println( square(3) )
trait MySerializable[T]{
def serialize(t: T): String
}
implicit object IntSerializable extends MySerializable[Int]{
def serialize(t: Int): String = "(Int:"+t.toString+")"
}
implicit object DoubleSerializable extends MySerializable[Double]{
def serialize(t: Double): String = "(Double:"+t.toString+")"
}
implicit def OptionSerializable[T](implicit s: MySerializable[T]) = new MySerializable[Option[T]]{
def serialize(t: Option[T]): String = s"(Option:"+t.map(
value => serializeAll(value)
).getOrElse("None")+")"
}
def serializeAll[T](v: T)( implicit s: MySerializable[T] ) = s.serialize(v)
//println( serializeAll(Option(6)) )
/*
println(
Option(5) |+| (None:Option[Int])
)
*/
val res = for{
v1 <- Option(5)
v2 <- (None:Option[Int])
} yield v1 + v2
//println(res)
case class Address(street: String)
case class Person(name: String, address: Address)
val p = Person("Chris", Address("Wall Street"))
println(p)
println(p.lens(_.address.street).set("Broadway"))
println()
println("-"*80)
println("Done!")
}
}
| cvogt/talk-2015-11-10 | src/main/scala/Example3.scala | Scala | bsd-2-clause | 3,507 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.security.minikdc
import java.util.Properties
import kafka.utils.TestUtils
import org.junit.jupiter.api.Test
import org.junit.jupiter.api.Assertions._
class MiniKdcTest {
@Test
def shouldNotStopImmediatelyWhenStarted(): Unit = {
val config = new Properties()
config.setProperty("kdc.bind.address", "0.0.0.0")
config.setProperty("transport", "TCP");
config.setProperty("max.ticket.lifetime", "86400000")
config.setProperty("org.name", "Example")
config.setProperty("kdc.port", "0")
config.setProperty("org.domain", "COM")
config.setProperty("max.renewable.lifetime", "604800000")
config.setProperty("instance", "DefaultKrbServer")
val minikdc = MiniKdc.start(TestUtils.tempDir(), config, TestUtils.tempFile(), List("foo"))
val running = System.getProperty(MiniKdc.JavaSecurityKrb5Conf) != null
try {
assertTrue(running, "MiniKdc stopped immediately; it should not have")
} finally {
if (running) minikdc.stop()
}
}
} | guozhangwang/kafka | core/src/test/scala/kafka/security/minikdc/MiniKdcTest.scala | Scala | apache-2.0 | 1,805 |
package io.vamp.model.event
import io.vamp.model.event.Aggregator.AggregatorType
object Aggregator extends Enumeration {
type AggregatorType = Aggregator.Value
val min, max, average, sum, count = Value
}
case class Aggregator(`type`: AggregatorType, field: Option[String] = None)
trait AggregationResult
trait SingleValueAggregationResult[T <: Any] extends AggregationResult {
def value: T
}
case class LongValueAggregationResult(value: Long) extends SingleValueAggregationResult[Long]
case class DoubleValueAggregationResult(value: Double) extends SingleValueAggregationResult[Double]
| magneticio/vamp | model/src/main/scala/io/vamp/model/event/Aggregator.scala | Scala | apache-2.0 | 600 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qscript.analysis
import slamdata.Predef.{Map => _, _}
import quasar.contrib.pathy.APath
import quasar.fp._, ski._
import quasar.qscript._
import matryoshka.{Hole => _, _}
import matryoshka.patterns._
import matryoshka.implicits._
import matryoshka.data._
import scalaz._, Scalaz._
import simulacrum.typeclass
@typeclass
trait Cost[F[_]] {
def evaluate[M[_] : Monad](pathCard : APath => M[Int]): GAlgebraM[(Int, ?), M, F, Int]
}
/**
* This is a "generic" implementation for `Cost` that can be used by any connector.
* Can be used for newly created connectors. More mature connectors should provide
* their own instance that will take into account connector-specific information.
*/
object Cost {
implicit def deadEnd: Cost[Const[DeadEnd, ?]] = new Cost[Const[DeadEnd, ?]] {
def evaluate[M[_] : Monad](pathCard: APath => M[Int]): GAlgebraM[(Int, ?), M, Const[DeadEnd, ?], Int] =
(qs: Const[DeadEnd, (Int, Int)]) => 1.point[M]
}
implicit def read[A]: Cost[Const[Read[A], ?]] =
new Cost[Const[Read[A], ?]] {
def evaluate[M[_] : Monad](pathCard: APath => M[Int]): GAlgebraM[(Int, ?), M, Const[Read[A], ?], Int] =
(qs: Const[Read[A], (Int, Int)]) => 1.point[M]
}
implicit def shiftedRead[A]: Cost[Const[ShiftedRead[A], ?]] =
new Cost[Const[ShiftedRead[A], ?]] {
def evaluate[M[_] : Monad](pathCard: APath => M[Int]): GAlgebraM[(Int, ?), M, Const[ShiftedRead[A], ?], Int] =
(qs: Const[ShiftedRead[A], (Int, Int)]) => 1.point[M]
}
implicit def qscriptCore[T[_[_]]: RecursiveT: ShowT]: Cost[QScriptCore[T, ?]] =
new Cost[QScriptCore[T, ?]] {
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
def evaluate[M[_] : Monad](pathCard: APath => M[Int]): GAlgebraM[(Int, ?), M, QScriptCore[T, ?], Int] = {
case Map((card, cost), f) => (card + cost).point[M]
case Reduce((card, cost), bucket, reducers, repair) => (card + cost).point[M]
case Sort((card, cost), bucket, orders) => (card + cost).point[M]
case Filter((card, cost), f) => (card + cost).point[M]
case Subset((card, cost), from, sel, count) => (card + cost).point[M]
case LeftShift((card, cost), _, _, _, _, _) => (card + cost).point[M]
case Union((card, cost), lBranch, rBranch) => {
val compileCardinality = Cardinality[QScriptTotal[T, ?]].calculate(pathCard)
val compileCost = Cost[QScriptTotal[T, ?]].evaluate(pathCard)
val left = lBranch.zygoM(interpretM(κ(card.point[M]), compileCardinality), ginterpretM(κ(cost.point[M]), compileCost))
val right = lBranch.zygoM(interpretM(κ(card.point[M]), compileCardinality), ginterpretM(κ(cost.point[M]), compileCost))
(left |@| right)( (l, r) => (l + r) / 2)
}
case Unreferenced() => 0.point[M]
}
}
implicit def projectBucket[T[_[_]]]: Cost[ProjectBucket[T, ?]] =
new Cost[ProjectBucket[T, ?]] {
def evaluate[M[_] : Monad](pathCard: APath => M[Int]): GAlgebraM[(Int, ?), M, ProjectBucket[T, ?], Int] = {
case BucketKey((card, cost), _, _) => (card + cost).point[M]
case BucketIndex((card, cost), _, _) => (card + cost).point[M]
}
}
implicit def equiJoin[T[_[_]]: RecursiveT: ShowT]: Cost[EquiJoin[T, ?]] =
new Cost[EquiJoin[T, ?]] {
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
def evaluate[M[_] : Monad](pathCard: APath => M[Int]): GAlgebraM[(Int, ?), M, EquiJoin[T, ?], Int] = {
case EquiJoin((card, cost), lBranch, rBranch, key, jt, combine) =>
val compileCardinality = Cardinality[QScriptTotal[T, ?]].calculate(pathCard)
val compileCost = Cost[QScriptTotal[T, ?]].evaluate(pathCard)
(lBranch.zygoM(interpretM(κ(card.point[M]), compileCardinality), ginterpretM(κ(cost.point[M]), compileCost)) |@|
rBranch.zygoM(interpretM(κ(card.point[M]), compileCardinality), ginterpretM(κ(cost.point[M]), compileCost))) { _ + _ }
}
}
implicit def thetaJoin[T[_[_]] : RecursiveT : ShowT]: Cost[ThetaJoin[T, ?]] =
new Cost[ThetaJoin[T, ?]] {
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
def evaluate[M[_] : Monad](pathCard: APath => M[Int]): GAlgebraM[(Int, ?), M, ThetaJoin[T, ?], Int] = {
case ThetaJoin((card, cost), lBranch, rBranch, _, _, _) => {
val compileCardinality = Cardinality[QScriptTotal[T, ?]].calculate(pathCard)
val compileCost = Cost[QScriptTotal[T, ?]].evaluate(pathCard)
(lBranch.zygoM(interpretM(κ(card.point[M]), compileCardinality), ginterpretM(κ(cost.point[M]), compileCost)) |@|
rBranch.zygoM(interpretM(κ(card.point[M]), compileCardinality), ginterpretM(κ(cost.point[M]), compileCost))) { _ + _ }
}
}
}
implicit def coproduct[F[_], G[_]](implicit F: Cost[F], G: Cost[G]):
Cost[Coproduct[F, G, ?]] =
new Cost[Coproduct[F, G, ?]] {
def evaluate[M[_] : Monad](pathCard: APath => M[Int]): GAlgebraM[(Int, ?), M, Coproduct[F, G, ?], Int] =
_.run.fold(F.evaluate(pathCard), G.evaluate(pathCard))
}
}
| jedesah/Quasar | connector/src/main/scala/quasar/qscript/analysis/Cost.scala | Scala | apache-2.0 | 5,734 |
/**
* Created by basti on 3/7/14.
*/
trait TestUtils {
def assertStringEquals(s1 : String, s2 : String) = assert( s1 == s2 )
}
| dr03lf/gpx-parser | src/test/scala/TestUtils.scala | Scala | apache-2.0 | 133 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.moneyservicebusiness
import jto.validation.forms.UrlFormEncoded
import jto.validation.{Invalid, Path, Rule, VA, Valid, ValidationError, Write}
import models.Country
import org.scalatestplus.play.PlaySpec
import play.api.libs.json.{JsSuccess, Json}
class SendTheLargestAmountsOfMoneySpec extends PlaySpec {
"SendTheLargestAmountsOfMoney" must {
val rule: Rule[UrlFormEncoded, SendTheLargestAmountsOfMoney] = implicitly
val write: Write[SendTheLargestAmountsOfMoney, UrlFormEncoded] = implicitly
"roundtrip through json" in {
val model: SendTheLargestAmountsOfMoney =
SendTheLargestAmountsOfMoney(Seq(Country("United Kingdom", "GB")))
Json.fromJson[SendTheLargestAmountsOfMoney](Json.toJson(model)) mustEqual JsSuccess(model)
}
"correctly parse the json if country_1 and country_3 fields provided" in {
val json = Json.obj("country_1" -> "GB", "country_3" -> "IN")
val expected = SendTheLargestAmountsOfMoney(Seq(Country("United Kingdom", "GB"), Country("India", "IN")))
Json.fromJson[SendTheLargestAmountsOfMoney](json) mustEqual JsSuccess(expected)
}
"roundtrip through forms" in {
val model: SendTheLargestAmountsOfMoney =
SendTheLargestAmountsOfMoney(Seq(Country("United Kingdom", "GB")))
rule.validate(write.writes(model)) mustEqual Valid(model)
}
"fail to validate when there are no countries" in {
val form: UrlFormEncoded = Map(
"largestAmountsOfMoney" -> Seq.empty
)
rule.validate(form) mustEqual Invalid(
Seq((Path \\ "largestAmountsOfMoney") -> Seq(ValidationError("error.invalid.countries.msb.sendlargestamount.country")))
)
}
"fail to validate when there are more than 3 countries" in {
// scalastyle:off magic.number
val form: UrlFormEncoded = Map(
"largestAmountsOfMoney[]" -> Seq.fill(4)("GB")
)
rule.validate(form) mustEqual Invalid(
Seq((Path \\ "largestAmountsOfMoney") -> Seq(ValidationError("error.maxLength", 3)))
)
}
}
"SendTheLargestAmountsOfMoney Form Writes" when {
"an item is repeated" must {
"serialise all items correctly" in {
SendTheLargestAmountsOfMoney.formW.writes(SendTheLargestAmountsOfMoney(List(
Country("Country2", "BB"),
Country("Country1", "AA"),
Country("Country1", "AA")
))) must be (
Map(
"largestAmountsOfMoney[0]" -> List("BB"),
"largestAmountsOfMoney[1]" -> List("AA"),
"largestAmountsOfMoney[2]" -> List("AA")
)
)
}
}
}
"SendTheLargestAmountsOfMoney Form Reads" when {
"all countries are valid" must {
"Successfully read from the form" in {
SendTheLargestAmountsOfMoney.formR.validate(
Map(
"largestAmountsOfMoney[0]" -> Seq("GB"),
"largestAmountsOfMoney[1]" -> Seq("MK"),
"largestAmountsOfMoney[2]" -> Seq("JO")
)
) must be(Valid(SendTheLargestAmountsOfMoney(Seq(
Country("United Kingdom", "GB"),
Country("Macedonia, the Former Yugoslav Republic of", "MK"),
Country("Jordan", "JO")
))))
}
}
"the second country is invalid" must {
"fail validation" in {
val x: VA[SendTheLargestAmountsOfMoney] = SendTheLargestAmountsOfMoney.formR.validate(
Map(
"largestAmountsOfMoney[0]" -> Seq("GB"),
"largestAmountsOfMoney[1]" -> Seq("hjjkhjkjh"),
"largestAmountsOfMoney[2]" -> Seq("MK")
)
)
x must be (Invalid(Seq((Path \\ "largestAmountsOfMoney" \\ 1) -> Seq(ValidationError("error.invalid.country")))))
}
}
}
}
| hmrc/amls-frontend | test/models/moneyservicebusiness/SendTheLargestAmountsOfMoneySpec.scala | Scala | apache-2.0 | 4,352 |
// MultipleClassArgs.scala
import com.atomicscala.AtomicTest._
class Sum3(a1:Int, a2:Int, a3:Int) {
def result():Int = { a1 + a2 + a3 }
}
new Sum3(13, 27, 44).result() is 84
| P7h/ScalaPlayground | Atomic Scala/atomic-scala-examples/examples/19_Class Arguments/MultipleClassArgs.scala | Scala | apache-2.0 | 178 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.storage.cassandra
import java.nio.ByteBuffer
import com.twitter.conversions.time._
import com.twitter.finagle.stats.{DefaultStatsReceiver, StatsReceiver}
import com.twitter.util.{Duration, Future}
import com.twitter.zipkin.adjuster.{ApplyTimestampAndDuration, CorrectForClockSkew, MergeById}
import com.twitter.zipkin.common.Span
import com.twitter.zipkin.conversions.thrift._
import com.twitter.zipkin.storage.{CollectAnnotationQueries, IndexedTraceId, SpanStore}
import com.twitter.zipkin.thriftscala.{Span => ThriftSpan}
import com.twitter.zipkin.util.{FutureUtil, Util}
import org.twitter.zipkin.storage.cassandra.Repository
import scala.collection.JavaConverters._
object CassandraSpanStoreDefaults {
val KeyspaceName = Repository.KEYSPACE
val SpanTtl = 7.days
val IndexTtl = 3.days
val MaxTraceCols = 100000
val SpanCodec = new ScroogeThriftCodec[ThriftSpan](ThriftSpan)
}
abstract class CassandraSpanStore(
stats: StatsReceiver = DefaultStatsReceiver.scope("CassandraSpanStore"),
spanTtl: Duration = CassandraSpanStoreDefaults.SpanTtl,
indexTtl: Duration = CassandraSpanStoreDefaults.IndexTtl,
maxTraceCols: Int = CassandraSpanStoreDefaults.MaxTraceCols
) extends SpanStore with CollectAnnotationQueries {
/** Deferred as repository creates network connections */
protected def repository: Repository
private[this] val IndexDelimiter = ":"
private[this] val IndexDelimiterBytes = IndexDelimiter.getBytes
private[this] val spanCodec = CassandraSpanStoreDefaults.SpanCodec
/**
* Internal helper methods
*/
private[this] def createSpanColumnName(span: Span): String =
"%d_%d_%d".format(span.id, span.annotations.hashCode, span.binaryAnnotations.hashCode)
private[this] def annotationKey(serviceName: String, annotation: String, value: Option[ByteBuffer]): ByteBuffer = {
ByteBuffer.wrap(
serviceName.getBytes ++ IndexDelimiterBytes ++ annotation.getBytes ++
value.map { v => IndexDelimiterBytes ++ Util.getArrayFromBuffer(v) }.getOrElse(Array()))
}
/**
* Stats
*/
private[this] val SpansStats = stats.scope("spans")
private[this] val SpansStoredCounter = SpansStats.counter("stored")
private[this] val SpansIndexedCounter = SpansStats.counter("indexed")
private[this] val IndexStats = stats.scope("index")
private[this] val IndexServiceNameCounter = IndexStats.counter("serviceName")
private[this] val IndexServiceNameNoNameCounter = IndexStats.scope("serviceName").counter("noName")
private[this] val IndexSpanNameCounter = IndexStats.scope("serviceName").counter("spanName")
private[this] val IndexSpanNameNoNameCounter = IndexStats.scope("serviceName").scope("spanName").counter("noName")
private[this] val IndexTraceStats = IndexStats.scope("trace")
private[this] val IndexTraceNoTimestampCounter = IndexTraceStats.counter("noTimestamp")
private[this] val IndexTraceByServiceNameCounter = IndexTraceStats.counter("serviceName")
private[this] val IndexTraceBySpanNameCounter = IndexTraceStats.counter("spanName")
private[this] val IndexTraceByDurationCounter = IndexTraceStats.counter("duration")
private[this] val IndexAnnotationCounter = IndexStats.scope("annotation").counter("standard")
private[this] val IndexBinaryAnnotationCounter = IndexStats.scope("annotation").counter("binary")
private[this] val IndexSpanNoTimestampCounter = IndexStats.scope("span").counter("noTimestamp")
private[this] val IndexSpanNoDurationCounter = IndexStats.scope("span").counter("noDuration")
private[this] val QueryStats = stats.scope("query")
private[this] val QueryGetSpansByTraceIdsStat = QueryStats.stat("getSpansByTraceIds")
private[this] val QueryGetServiceNamesCounter = QueryStats.counter("getServiceNames")
private[this] val QueryGetSpanNamesCounter = QueryStats.counter("getSpanNames")
private[this] val QueryGetTraceIdsByNameCounter = QueryStats.counter("getTraceIdsByName")
private[this] val QueryGetTraceIdsByAnnotationCounter = QueryStats.counter("getTraceIdsByAnnotation")
private[this] val QueryGetTraceIdsByDurationCounter = QueryStats.counter("getTraceIdsByDuration")
/**
* Internal indexing helpers
*/
private[this] def indexServiceName(span: Span): Future[Unit] = {
IndexServiceNameCounter.incr()
Future.join(span.serviceNames.toList map {
case "" =>
IndexServiceNameNoNameCounter.incr()
Future.value(())
case s =>
FutureUtil.toFuture(repository.storeServiceName(s, indexTtl.inSeconds))
})
}
private[this] def indexSpanNameByService(span: Span): Future[Unit] = {
if (span.name == "") {
IndexSpanNameNoNameCounter.incr()
Future.value(())
} else {
IndexSpanNameCounter.incr()
Future.join(
span.serviceNames.toSeq map { serviceName =>
FutureUtil.toFuture(repository.storeSpanName(serviceName, span.name, indexTtl.inSeconds))
})
}
}
private[this] def indexTraceIdByName(span: Span): Future[Unit] = {
if (span.timestamp.isEmpty)
IndexTraceNoTimestampCounter.incr()
span.timestamp map { timestamp =>
val serviceNames = span.serviceNames
Future.join(
serviceNames.toList map { serviceName =>
IndexTraceByServiceNameCounter.incr()
val storeFuture =
FutureUtil.toFuture(repository.storeTraceIdByServiceName(serviceName, timestamp, span.traceId, indexTtl.inSeconds))
if (span.name != "") {
IndexTraceBySpanNameCounter.incr()
Future.join(
storeFuture,
FutureUtil.toFuture(repository.storeTraceIdBySpanName(serviceName, span.name, timestamp, span.traceId, indexTtl.inSeconds)))
} else storeFuture
})
} getOrElse Future.value(())
}
private[this] def indexByAnnotations(span: Span): Future[Unit] = {
if (span.timestamp.isEmpty)
IndexSpanNoTimestampCounter.incr()
span.timestamp map { timestamp =>
val annotationsFuture = Future.join(
span.annotations
.groupBy(_.value)
.flatMap { case (_, as) =>
val a = as.min
a.host map { endpoint =>
IndexAnnotationCounter.incr()
FutureUtil.toFuture(
repository.storeTraceIdByAnnotation(
annotationKey(endpoint.serviceName, a.value, None), timestamp, span.traceId, indexTtl.inSeconds))
}
}.toList)
val binaryFuture = Future.join(span.binaryAnnotations flatMap { ba =>
ba.host map { endpoint =>
IndexBinaryAnnotationCounter.incr()
Future.join(
FutureUtil.toFuture(
repository.storeTraceIdByAnnotation(
annotationKey(endpoint.serviceName, ba.key, Some(ba.value)), timestamp, span.traceId, indexTtl.inSeconds)),
FutureUtil.toFuture(
repository.storeTraceIdByAnnotation(
annotationKey(endpoint.serviceName, ba.key, None), timestamp, span.traceId, indexTtl.inSeconds)))
}
})
Future.join(annotationsFuture, binaryFuture).map(_ => ())
} getOrElse Future.value(())
}
private[this] def indexByDuration(span: Span): Future[Unit] = {
(span.timestamp, span.duration) match {
case (Some(timestamp), Some(duration)) =>
Future.join(
span.serviceNames.toSeq.flatMap { serviceName =>
IndexTraceByDurationCounter.incr()
Seq(
repository.storeTraceIdByDuration(
serviceName, span.name, timestamp, duration, span.traceId, indexTtl.inSeconds),
repository.storeTraceIdByDuration(
serviceName, "", timestamp, duration, span.traceId, indexTtl.inSeconds)
)
}.map(FutureUtil.toFuture)
)
case (_, None) =>
IndexSpanNoDurationCounter.incr()
Future.value((): Unit)
case _ => Future.value((): Unit)
}
}
private[this] def getSpansByTraceIds(traceIds: Seq[Long], count: Int): Future[Seq[List[Span]]] = {
FutureUtil.toFuture(repository.getSpansByTraceIds(traceIds.toArray.map(Long.box), count))
.map { spansByTraceId =>
val spans =
spansByTraceId.asScala.mapValues { spans => spans.asScala.map(spanCodec.decode(_).toSpan) }
traceIds.flatMap(traceId => spans.get(traceId))
.map(MergeById)
.map(CorrectForClockSkew)
.map(ApplyTimestampAndDuration)
.sortBy(_.head)(Ordering[Span].reverse) // sort descending by the first span
}
}
/**
* API Implementation
*/
override def close() = repository.close()
override def apply(spans: Seq[Span]): Future[Unit] = {
SpansStoredCounter.incr(spans.size)
Future.join(
spans.map(s => s.copy(annotations = s.annotations.sorted))
.map(ApplyTimestampAndDuration.apply).map { span =>
SpansIndexedCounter.incr()
Future.join(
FutureUtil.toFuture(
repository.storeSpan(
span.traceId,
span.timestamp.getOrElse(0L),
createSpanColumnName(span),
spanCodec.encode(span.toThrift),
spanTtl.inSeconds)),
indexServiceName(span),
indexSpanNameByService(span),
indexTraceIdByName(span),
indexByAnnotations(span),
indexByDuration(span))
})
}
override def getTracesByIds(traceIds: Seq[Long]): Future[Seq[List[Span]]] = {
QueryGetSpansByTraceIdsStat.add(traceIds.size)
getSpansByTraceIds(traceIds, maxTraceCols)
}
override def getAllServiceNames(): Future[Seq[String]] = {
QueryGetServiceNamesCounter.incr()
FutureUtil.toFuture(repository.getServiceNames).map(_.asScala.toList.sorted)
}
override def getSpanNames(service: String): Future[Seq[String]] = {
QueryGetSpanNamesCounter.incr()
FutureUtil.toFuture(repository.getSpanNames(service)).map(_.asScala.toList.sorted)
}
override def getTraceIdsByName(
serviceName: String,
spanName: Option[String],
endTs: Long,
lookback: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = {
QueryGetTraceIdsByNameCounter.incr()
val traceIdsFuture = FutureUtil.toFuture(spanName match {
// if we have a span name, look up in the service + span name index
// if not, look up by service name only
case Some(x :String) => repository.getTraceIdsBySpanName(serviceName, x, endTs * 1000, lookback * 1000, limit)
case None => repository.getTraceIdsByServiceName(serviceName, endTs * 1000, lookback * 1000, limit)
})
traceIdsFuture.map { traceIds =>
traceIds.asScala
.map { case (traceId, ts) => IndexedTraceId(traceId, timestamp = ts) }
.toSeq
}
}
override def getTraceIdsByAnnotation(
serviceName: String,
annotation: String,
value: Option[ByteBuffer],
endTs: Long,
lookback: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = {
QueryGetTraceIdsByAnnotationCounter.incr()
FutureUtil.toFuture(
repository
.getTraceIdsByAnnotation(annotationKey(serviceName, annotation, value), endTs * 1000, lookback * 1000, limit))
.map { traceIds =>
traceIds.asScala
.map { case (traceId, ts) => IndexedTraceId(traceId, timestamp = ts) }
.toSeq
}
}
override protected def getTraceIdsByDuration(
serviceName: String,
spanName: Option[String],
minDuration: Long,
maxDuration: Option[Long],
endTs: Long,
lookback: Long,
limit: Int
): Future[Seq[IndexedTraceId]] = {
QueryGetTraceIdsByDurationCounter.incr()
Future.exception(new UnsupportedOperationException)
FutureUtil.toFuture(
repository
.getTraceIdsByDuration(serviceName, spanName getOrElse "", minDuration, maxDuration getOrElse Long.MaxValue,
endTs * 1000, (endTs - lookback) * 1000, limit, indexTtl.inSeconds))
.map { traceIds =>
traceIds.asScala
.map { case (traceId, ts) => IndexedTraceId(traceId, timestamp = ts) }
.toSeq
}
}
}
| jkdcdlly/zipkin | zipkin-cassandra/src/main/scala/com/twitter/zipkin/storage/cassandra/CassandraSpanStore.scala | Scala | apache-2.0 | 12,671 |
import scala.reflect.Selectable.reflectiveSelectable
object Test {
class C { type S = String; type I }
class D extends C { type I = Int }
type Foo = {
def sel0: Int
def sel1: Int => Int
def fun0(x: Int): Int
def fun1(x: Int)(y: Int): Int
def fun2(x: Int): Int => Int
def fun3(a1: Int, a2: Int, a3: Int)
(a4: Int, a5: Int, a6: Int)
(a7: Int, a8: Int, a9: Int): Int
def fun4(implicit x: Int): Int
def fun5(x: Int)(implicit y: Int): Int
def fun6(x: C, y: x.S): Int
def fun7(x: C, y: x.I): Int
def fun8(y: C): y.S
def fun9(y: C): y.I
}
class FooI {
def sel0: Int = 1
def sel1: Int => Int = x => x
def fun0(x: Int): Int = x
def fun1(x: Int)(y: Int): Int = x + y
def fun2(x: Int): Int => Int = y => x * y
def fun3(a1: Int, a2: Int, a3: Int)
(a4: Int, a5: Int, a6: Int)
(a7: Int, a8: Int, a9: Int): Int = -1
def fun4(implicit x: Int): Int = x
def fun5(x: Int)(implicit y: Int): Int = x + y
def fun6(x: C, y: x.S): Int = 1
def fun7(x: C, y: x.I): Int = 2
def fun8(y: C): y.S = "Hello"
def fun9(y: C): y.I = 1.asInstanceOf[y.I]
}
def basic(x: Foo): Unit ={
assert(x.sel0 == 1)
assert(x.sel1(2) == 2)
assert(x.fun0(3) == 3)
val f = x.sel1
assert(f(3) == 3)
}
def currying(x: Foo): Unit = {
assert(x.fun1(1)(2) == 3)
assert(x.fun2(1)(2) == 2)
assert(x.fun3(1, 2, 3)(4, 5, 6)(7, 8, 9) == -1)
}
def etaExpansion(x: Foo): Unit = {
val f0 = x.fun0(_)
assert(f0(2) == 2)
val f1 = x.fun0 _
assert(f1(2) == 2)
val f2 = x.fun1(1)(_)
assert(f2(2) == 3)
val f3 = x.fun1(1) _
assert(f3(2) == 3)
val f4 = x.fun1(1)
assert(f4(2) == 3)
}
def implicits(x: Foo) = {
implicit val y = 2
assert(x.fun4 == 2)
assert(x.fun5(1) == 3)
}
// Limited support for dependant methods
def dependant(x: Foo) = {
val y = new D
assert(x.fun6(y, "Hello") == 1)
// assert(x.fun7(y, 1) == 2) // error: No ClassTag available for x.I
val s = x.fun8(y)
assert((s: String) == "Hello")
// val i = x.fun9(y) // error: rejected (blows up in pickler if not rejected)
// assert((i: String) == "Hello") // error: Type mismatch: found: y.S(i); required: String
}
def main(args: Array[String]): Unit = {
basic(new FooI)
currying(new FooI)
etaExpansion(new FooI)
implicits(new FooI)
dependant(new FooI)
}
}
| som-snytt/dotty | tests/run/structural.scala | Scala | apache-2.0 | 2,484 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.exchange
import scala.concurrent.{ExecutionContext, Future}
import scala.concurrent.duration._
import org.apache.spark.{broadcast, SparkException}
import org.apache.spark.launcher.SparkLauncher
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.catalyst.plans.physical.{BroadcastMode, BroadcastPartitioning, Partitioning}
import org.apache.spark.sql.execution.{SparkPlan, SQLExecution}
import org.apache.spark.sql.execution.joins.HashedRelation
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.ThreadUtils
/**
* A [[BroadcastExchangeExec]] collects, transforms and finally broadcasts the result of
* a transformed SparkPlan.
*/
case class BroadcastExchangeExec(
mode: BroadcastMode,
child: SparkPlan) extends Exchange {
override lazy val metrics = Map(
"dataSize" -> SQLMetrics.createMetric(sparkContext, "data size (bytes)"),
"collectTime" -> SQLMetrics.createMetric(sparkContext, "time to collect (ms)"),
"buildTime" -> SQLMetrics.createMetric(sparkContext, "time to build (ms)"),
"broadcastTime" -> SQLMetrics.createMetric(sparkContext, "time to broadcast (ms)"))
override def outputPartitioning: Partitioning = BroadcastPartitioning(mode)
override def doCanonicalize(): SparkPlan = {
BroadcastExchangeExec(mode.canonicalized, child.canonicalized)
}
@transient
private val timeout: Duration = {
val timeoutValue = sqlContext.conf.broadcastTimeout
if (timeoutValue < 0) {
Duration.Inf
} else {
timeoutValue.seconds
}
}
@transient
private lazy val relationFuture: Future[broadcast.Broadcast[Any]] = {
// broadcastFuture is used in "doExecute". Therefore we can get the execution id correctly here.
val executionId = sparkContext.getLocalProperty(SQLExecution.EXECUTION_ID_KEY)
Future {
// This will run in another thread. Set the execution id so that we can connect these jobs
// with the correct execution.
SQLExecution.withExecutionId(sparkContext, executionId) {
try {
val beforeCollect = System.nanoTime()
// Use executeCollect/executeCollectIterator to avoid conversion to Scala types
val (numRows, input) = child.executeCollectIterator()
if (numRows >= 512000000) {
throw new SparkException(
s"Cannot broadcast the table with more than 512 millions rows: $numRows rows")
}
val beforeBuild = System.nanoTime()
longMetric("collectTime") += (beforeBuild - beforeCollect) / 1000000
// Construct the relation.
val relation = mode.transform(input, Some(numRows))
val dataSize = relation match {
case map: HashedRelation =>
map.estimatedSize
case arr: Array[InternalRow] =>
arr.map(_.asInstanceOf[UnsafeRow].getSizeInBytes.toLong).sum
case _ =>
throw new SparkException("[BUG] BroadcastMode.transform returned unexpected type: " +
relation.getClass.getName)
}
longMetric("dataSize") += dataSize
if (dataSize >= (8L << 30)) {
throw new SparkException(
s"Cannot broadcast the table that is larger than 8GB: ${dataSize >> 30} GB")
}
val beforeBroadcast = System.nanoTime()
longMetric("buildTime") += (beforeBroadcast - beforeBuild) / 1000000
// Broadcast the relation
val broadcasted = sparkContext.broadcast(relation)
longMetric("broadcastTime") += (System.nanoTime() - beforeBroadcast) / 1000000
SQLMetrics.postDriverMetricUpdates(sparkContext, executionId, metrics.values.toSeq)
broadcasted
} catch {
case oe: OutOfMemoryError =>
throw new OutOfMemoryError(s"Not enough memory to build and broadcast the table to " +
s"all worker nodes. As a workaround, you can either disable broadcast by setting " +
s"${SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key} to -1 or increase the spark driver " +
s"memory by setting ${SparkLauncher.DRIVER_MEMORY} to a higher value")
.initCause(oe.getCause)
}
}
}(BroadcastExchangeExec.executionContext)
}
override protected def doPrepare(): Unit = {
// Materialize the future.
relationFuture
}
override protected def doExecute(): RDD[InternalRow] = {
throw new UnsupportedOperationException(
"BroadcastExchange does not support the execute() code path.")
}
override protected[sql] def doExecuteBroadcast[T](): broadcast.Broadcast[T] = {
ThreadUtils.awaitResult(relationFuture, timeout).asInstanceOf[broadcast.Broadcast[T]]
}
}
object BroadcastExchangeExec {
private[execution] val executionContext = ExecutionContext.fromExecutorService(
ThreadUtils.newDaemonCachedThreadPool("broadcast-exchange", 128))
}
| brad-kaiser/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/BroadcastExchangeExec.scala | Scala | apache-2.0 | 5,908 |
package com.topper.plugin
import com.topper.util.FileIO._
object Output {
def quote(int: Int) = "\\"" + int.toString + "\\""
def write(graph: Graph) = {
Log("writing extracted type relations and call graph...")
val projectName = PluginArgs.projectName
val nodes = graph.nodes
val edges = graph.edges.distinct
val distinctEdges = edges.distinct
val nodesByID = nodes.map(node => node.id -> node).toMap
Log(s"Collected ${nodes.size} nodes (${nodesByID.size} distinct).")
Log(s"Collected ${edges.size} edges (${distinctEdges.size} distinct).")
writeOutputFile(projectName, "nodes",
"definition,notSynthetic,id,name,kind\\n" +
nodesByID.map {node =>
List(
node._2.synthetic,
node._2.id,
node._2.name,
node._2.owner,
node._2.kind)
.mkString(",")
}.mkString("\\n")
)
val nodesStr = nodesByID
.map {case (id, node) => s" $id // ${node.owner}.${node.name}"}
.mkString("\\n")
val edgesStr = distinctEdges.map {edge =>
val node1 = nodesByID(edge.id1)
val node2 = nodesByID(edge.id2)
s" ${edge.id1} -> ${edge.id2} // ${node1.owner}.${node1.name} -> ${node2.owner}.${node2.name}"
}.mkString("\\n")
val usedNodes = distinctEdges.map(_.id2).toSet
val unusedNodes = nodesByID -- usedNodes
val unsedNodesStr = unusedNodes
.collect {case (id, node) if !node.synthetic => s" $id // ${node.owner}.${node.name}"}
.mkString("\\n")
writeOutputFile(
projectName,
"unused-nodes",
unsedNodesStr
)
writeOutputFile(
projectName,
s"$projectName.dot",
s"digraph $projectName {\\n$nodesStr\\n$edgesStr\\n}"
)
writeOutputFile(projectName, "edges",
"id1,edgeKind,id2\\n" +
distinctEdges.map {edge =>
val node1 = nodesByID(edge.id1)
val node2 = nodesByID(edge.id2)
List(edge.id1, edge.edgeKind, edge.id2).mkString(",") +
s" // ${node1.owner}.${node1.name} -> ${node2.owner}.${node2.name}"
}.mkString("\\n"))
}
}
| gtopper/extractor | src/main/scala/com/topper/plugin/Output.scala | Scala | mit | 2,122 |
//class Parser[S]
//class ParseError
package scratchpad.parsers
import fpinscala.testing.{Prop, Gen}
/*
Laws = p(x) | p(y)
*/
object Main {
def main(args: Array[String]) = {
println("Hi")
}
}
trait Parsers[ParseError, Parser[+_]] { self =>
implicit def operators[A](p: Parser[A]) = ParserOps[A](p)
implicit def asStringParser[A](a: A)(implicit f: A => Parser[String]): ParserOps[String] = ParserOps(f(a))
def char(c: Char): Parser[Char] = {
string(c.toString).map(_.charAt(0))
}
def succeed[A](a: A): Parser[A] = {
string("") map (_ => a)
}
def string(s: String): Parser[String] = ???
def run[A](p: Parser[A])(input: String): Either[ParseError, A] = ???
def or[A](s1: Parser[A], s2: Parser[A]): Parser[A] = ???
def listOfN[A](n: Int, p: Parser[A]): Parser[List[A]] = ???
def map[A, B](pa: Parser[A])(f: A => B): Parser[B] = ???
// def many[A](p: Parser[A]): Parser[List[A]] = {
// map()
// }
def many[A](p: Parser[A]): Parser[List[A]] = ???
def manyAChars(): Parser[Int] = {
// map(many(char('a')))(_.length)
char('a').many().map(_.length)
}
def manyAChars2(): Parser[Int] = {
char('a').many().slice().map(_.length)
}
def manyNr[A](p: Parser[A]) = {
map(many(p))(_.length)
}
def slice[A](p: Parser[A]): Parser[String] = ???
def product[A,B](p: Parser[A], p2: Parser[B]): Parser[(A,B)] = ???
def map2[A, B, C](p1: Parser[A], p2: Parser[B])(f: (A, B) => C): Parser[C] = {
map(product(p1, p2))(f.tupled)
}
def many1[A](p: Parser[A]): Parser[List[A]] = {
// map(product(p, p.many()))((r: (A, List[A])) => r._1 :: r._2)
map2(p, p.many())(_ :: _)
map2(p, p.many())((a: A, la: List[A]) => a :: la)
}
// def manyA
// def many[A](p: Parser[List[A]]): Parser[Int] = {
//
// }
// def zeroMore[A](p: Parser[A]): Parser[Int] = ???
// def oneMore[A](p: Parser[A]): Either[ParseError, Parser[Int]] = ???
// def zeroMoreOneMore[A](p1: Parser[A], p2: Parser[A]): Either[ParseError, Parser[Int, Int]] = ???
object Laws {
def equal[A](p1: Parser[A], p2: Parser[A])(in: Gen[String]): Prop =
Prop.forAll(in)(s =>
run(p1)(s) == run(p2)(s))
def mapLaw[A](p: Parser[A])(in: Gen[String]): Prop =
equal(p, p.map((a: A) => a))(in)
//def associativeLaw[A](p1: Parser[A], p2: Parser[A]): Prop = ???
def unitLaw[A](p: Parser[A])(in: Gen[String]): Prop =
Prop.forAll(in)(s =>
run(succeed(s))(s) == Right(s))
}
case class ParserOps[A](p: Parser[A]) {
def |[B >: A](p2: Parser[B]): Parser[B] = self.or(p, p2)
def or[B >: A](p2: => Parser[B]): Parser[B] = self.or(p, p2)
def times[B >: A](n: Int): Parser[List[A]] = self.listOfN(n, p)
def +[B](pB: Parser[B]): Parser[(A, B)] = self.product(p, pB)
def map[B](f: A => B): Parser[B] = self.map(p)(f)
def slice(): Parser[String] = self.slice(p)
def many(): Parser[List[A]] = self.many(p)
// def succeed[A](a: A): Parser[A] =
}
}
//trait Parsers2[ParseError, Parser[+_]] {
// def or[A](s1: Parser[A], s2: Parser[A]): Parser[A]
// implicit def string(s: String): Parser[String]
// implicit def operators[A](p: Parser[A]) = ParserOps[A](p)
// implicit def asStringParser[A](a: A)(implicit f: A => Parser[String]):
// ParserOps[String] = ParserOps(f(a))
//
// case class ParserOps[A](p: Parser[A]) {
// def |[B>:A](p2: Parser[B]): Parser[B] = self.or(p,p2)
// def or[B>:A](p2: => Parser[B]): Parser[B] = self.or(p,p2)
// }
//} | waxmittmann/fpinscala | answers/src/main/scala/scratchpad/done/Parser.scala | Scala | mit | 3,488 |
/**
* @author Victor Caballero (vicaba)
* @author Xavier Domingo (xadobu)
*/
package actors.node.protocol.when
import actors.ComposableActor
import akka.actor.{Props, ActorRef}
import scala.collection.mutable
import scala.concurrent.duration.FiniteDuration
import scala.concurrent.duration._
/**
* Message that tells the ConditionHandler and the Node to create a ConditionActor
* @param name name of the ConditionActor
* @param factoryMethod factory method that returns a ConditionActor
*/
case class NewCondition(name: String, factoryMethod: () => Condition)
/**
* Condition results may extend from this trait
*/
trait ConditionResult
/**
* A true condition result
*/
case class True() extends ConditionResult
/**
* A false condition result
*/
case class False() extends ConditionResult
/**
* Tells the node to check conditions to propagate transactions
*/
case class CheckCondition()
/**
* Tells the node to propagate the buffered transactions
*/
case class SendBufferedTransactions()
/**
* Actor representing a condition
*/
trait Condition extends ComposableActor {
receiveBuilder += {
case CheckCondition() =>
context.parent ! checkCondition()
}
/**
* Method to check for the veracity of the condition
* @return
*/
def checkCondition(): ConditionResult
}
/**
* Condition companion object. Factory.
*/
object Condition {
def of(condition: => Condition): () => Condition = () => condition
}
/**
* A time condition
* @param time the time to schedule
*/
case class TimeCondition(time: FiniteDuration) extends Condition {
case class Tick()
var ticks: Double = 0
implicit val ec = context.dispatcher
context.system.scheduler.schedule(
0.milliseconds,
time,
context.parent,
True()
)
override def checkCondition(): ConditionResult = {
False()
}
}
/**
* A number of transactions boundary condition.
* @param number the number of transactions that have to wait for sending a True() condition result
*/
case class TransactionBoundary(number: Integer) extends Condition {
var numTransactions = 0
override def checkCondition(): ConditionResult = {
numTransactions += 1
//println(self.path + ": From TransactionBoundary: {numTransactions: " + numTransactions + "}")
if (numTransactions >= number) {
//println(self.path + "True() sent")
True()
} else {
False()
}
}
}
/**
* Class to handle conditions. It talks between the conditions and the node.
*/
sealed case class ConditionHandler() extends ComposableActor {
protected lazy val conditions = mutable.LinkedHashMap[String, ActorRef]()
receiveBuilder += {
case NewCondition(name, factoryMethod) =>
conditions += name -> context.actorOf(Props(factoryMethod()), name)
case CheckCondition() =>
if (conditions.isEmpty) {
context.parent ! SendBufferedTransactions()
} else {
conditions.foreach {
case (_, condition) =>
condition ! CheckCondition()
}
}
case True() =>
//println(self.path + "SendBufferedTransactions() sent")
context.parent ! SendBufferedTransactions()
case _ =>
}
} | vicobu/DistributedSystem | src/main/scala/actors/node/protocol/when/Conditions.scala | Scala | mit | 3,187 |
package com.cyrusinnovation.computation
/*
* Copyright 2014 Cyrus Innovation, LLC. Licensed under Apache license 2.0.
*/
/** Contains the facts to be operated on in a computation as well as metadata indicating
* whether the caller should continue or not (e.g., if the caller is a sequence of
* computations).
*/
case class Domain(facts: Map[Symbol, Any], continue: Boolean)
object Domain {
def combine(newFacts: Map[Symbol, Any], originalDomain: Domain) : Domain = {
val previousFacts = originalDomain.facts
val resultingFacts: Map[Symbol, Any] = newFacts.foldLeft(previousFacts) {
(factsSoFar: Map[Symbol, Any], factKeyToFactDataStructure: (Symbol, Any)) => {
val factKey = factKeyToFactDataStructure._1
val values = factKeyToFactDataStructure._2
factsSoFar + (factKey -> values)
}
}
new Domain(resultingFacts, originalDomain.continue)
}
}
| psfblair/computation-engine | core/src/main/scala/com/cyrusinnovation/computation/Domain.scala | Scala | apache-2.0 | 906 |
package core.pagelet.entity
import cz.kamenitxan.jakon.validation.validators.NotEmpty
/**
* Created by TPa on 15.04.2020.
*/
class TestJsonPageletData {
@NotEmpty
var msg: String = _
}
| kamenitxan/Jakon | modules/backend/src/test/scala/core/pagelet/entity/TestJsonPageletData.scala | Scala | bsd-3-clause | 193 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.regularizers
import edu.latrobe._
import edu.latrobe.blaze._
import edu.latrobe.blaze.regularizers.generic._
/**
* Very simple static regularizer, if that is what you want:
*
* 1 2
* J(w_a) = c - w_a
* 2
*
* ---
* \\
* J(w) = / J(w_i)
* ---
* i
*
* d J(w_a)
* -------- = c w_a
* d w_a
*
* d J(w_a)
* ----------- = 0
* d w_b, a!=b
*
* ---
* D J(w_a) \\ d J(w_a)
* -------- = / -------- di = c w_a da
* D w_a --- d w_i
* i
*
*/
abstract class L2WeightDecay
extends WeightDecay[L2WeightDecayBuilder] {
}
final class L2WeightDecayBuilder
extends WeightDecayBuilder[L2WeightDecayBuilder] {
override def repr
: L2WeightDecayBuilder = this
override def canEqual(that: Any)
: Boolean = that.isInstanceOf[L2WeightDecayBuilder]
override protected def doCopy()
: L2WeightDecayBuilder = L2WeightDecayBuilder()
// ---------------------------------------------------------------------------
// Weights and binding related.
// ---------------------------------------------------------------------------
override def build(platformHint: Option[Platform],
seed: InstanceSeed)
: Regularizer = L2WeightDecayBuilder.lookupAndBuild(this, platformHint, seed)
}
object L2WeightDecayBuilder
extends RegularizerVariantTable[L2WeightDecayBuilder] {
register(64, L2WeightDecay_Generic_Baseline_Description)
final def apply()
: L2WeightDecayBuilder = new L2WeightDecayBuilder
final def apply(scaleCoefficient: ParameterBuilder)
: L2WeightDecayBuilder = apply().setScaleCoefficient(scaleCoefficient)
final def apply(scaleCoefficient: ParameterBuilder,
baseScope: NullBuffer)
: L2WeightDecayBuilder = apply(scaleCoefficient).setBaseScope(baseScope)
}
| bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/regularizers/L2WeightDecay.scala | Scala | apache-2.0 | 2,588 |
import bintray.Plugin._
import sbt._
import sbt.Keys._
import sbtrelease.ReleasePlugin.ReleaseKeys._
import sbtrelease.ReleasePlugin._
object ApplicationBuild extends Build {
val scala11Version = "2.11.7"
val main = Project("total-map", file(".")).settings(
useGlobalVersion := false,
organization := "com.boldradius",
scalaVersion := scala11Version,
libraryDependencies += "com.storm-enroute" %% "scalameter" % "0.6",
licenses += ("Apache-2.0", url("https://www.apache.org/licenses/LICENSE-2.0.html")),
testFrameworks += new TestFramework("org.scalameter.ScalaMeterFramework"),
parallelExecution in Test := false,
crossScalaVersions := Seq("2.10.4", scala11Version),
publishMavenStyle := true,
bintray.Keys.bintrayOrganization in bintray.Keys.bintray := Some("boldradiussolutions")
).settings(bintraySettings ++ releaseSettings: _*)
val basic = Project("basic", file("examples/basic")).settings(
scalaVersion := scala11Version,
//libraryDependencies += "com.boldradius" %% "total-map" % "0.1.10",
resolvers += Resolver.bintrayRepo("boldradiussolutions", "maven"),
bintray.Keys.bintrayOrganization in bintray.Keys.bintray := Some("boldradiussolutions")
).dependsOn(main)
}
| boldradius/total-map | project/Build.scala | Scala | apache-2.0 | 1,242 |
class Hello1(greeting: String) {
private[this] def talk = println(greeting)
def speak = talk
}
| grzegorzbalcerek/scala-book-examples | examples/PrivateThis1.scala | Scala | mit | 99 |
/*
* Copyright 2011 javaBin
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package no.java.submitit.app.pages
import borders.ContentBorder
import org.apache.wicket.markup.html.form.HiddenField
import org.apache.wicket.markup.html.image._
import org.apache.wicket.resource._
import org.apache.wicket.markup.html.basic._
import org.apache.wicket.markup.html.list._
import org.apache.wicket.model._
import org.apache.wicket.markup.html.link._
import org.apache.wicket.util.lang.Bytes
import no.java.submitit.model._
import widgets._
import no.java.submitit.common.Implicits._
import org.apache.wicket.markup.html.panel.FeedbackPanel
import org.apache.wicket.markup.html.form.Form
import org.apache.wicket.markup.html.panel.FeedbackPanel
import org.apache.wicket.extensions.ajax.markup.html.form.upload.UploadProgressBar
import org.apache.wicket.util.resource.{IResourceStream, FileResourceStream}
import no.java.submitit.app.Functions._
import no.java.submitit.app.{SubmititApp, State}
import no.java.submitit.common.LoggHandling
import org.apache.wicket.Page
import no.java.submitit.config.Keys._
class ReviewPage(val pres: Presentation, notAdminView: Boolean, fromEmsLogin: Boolean = false) extends LayoutPage with LoggHandling with UpdateSessionHandling {
val editAllowed = SubmititApp.boolSetting(globalEditAllowedBoolean) || (pres.status == Status.Approved && SubmititApp.boolSetting(globalEditAllowedForAcceptedBoolean))
private class PLink(id: String, f: => Page) extends Link[Page](id) {
override def onClick {
setResponsePage(f)
}
}
private def show(shouldShow: Boolean) = !fromEmsLogin && notAdminView && shouldShow
private def submitLink(name: String) = new PLink(name, new ConfirmPage(pres)) {
add(new Label("submitLinkMessage", if(pres.isNew) "Submit presentation" else "Submit updated presentation"))
}
private def editLink(name: String) = new PLink(name,new EditPage(pres))
private val editPageLink = if (pres.sessionId != null) sessionLink(encryptEmsId(pres.sessionId)) else ""
menuLinks = submitLink("submitLinkTop") ::
submitLink("submitLinkBottom") ::
editLink("editLinkTop") ::
editLink("editLinkBottom") ::
new NewPresentationLink("newPresentationTop") ::
new NewPresentationLink("newPresentationBottom") :: Nil
add(new HiddenField("showEditLink") {
override def isVisible = show(pres.isNew || editAllowed)
})
add(new HiddenField("showSubmitLink") {
override def isVisible = show(pres.isNew || editAllowed)
})
add(new HiddenField("showNewLink") {
override def isVisible = show(!pres.isNew && State().submitAllowed)
})
contentBorder.add(new FeedbackPanel("systemFeedback"))
contentBorder.add(new ExternalLink("editSessionUrl", editPageLink, editPageLink) {
override def isVisible = fromEmsLogin
})
contentBorder.add(new HiddenField("showRoom", new Model("")) {
override def isVisible = SubmititApp.boolSetting(showRoomWhenApprovedBoolean) && pres.status == Status.Approved && pres.room != null
})
contentBorder.add(new HiddenField("showTimeslot", new Model("")) {
override def isVisible = SubmititApp.boolSetting(showTimeslotWhenApprovedBoolean) && pres.status == Status.Approved && pres.timeslot != null
})
val statusMsg = if (pres.isNew) "Not submitted"
else if (notAdminView && !SubmititApp.boolSetting(showActualStatusInReviewPageBoolean)) Status.Pending.toString
else pres.status.toString
contentBorder.add(new panels.LegendPanel)
contentBorder.add(new Label("status", statusMsg))
contentBorder.add(new Label("room", pres.room))
contentBorder.add(new Label("timeslot", pres.timeslot))
val msg = if (pres.isNew) SubmititApp.getSetting(reviewPageBeforeSubmitHtml).getOrElse("")
else if (!pres.isNew && !editAllowed) SubmititApp.getSetting(reviewPageViewSubmittedHthml).getOrElse("")
else if (!pres.isNew && editAllowed) SubmititApp.getSetting(reviewPageViewSubmittedChangeAllowedHthml).getOrElse("")
else SubmititApp.getSetting(reviewPageViewSubmittedHthml).getOrElse("")
contentBorder.add(new HtmlLabel("viewMessage", msg))
val feedback = if(pres.status == Status.NotApproved) {
if(SubmititApp.boolSetting(allowIndidualFeedbackOnRejectBoolean) && pres.hasFeedback) Some(pres.feedback)
else if(SubmititApp.boolSetting(showSpecialMessageOnRejectBoolean)) Some(SubmititApp.getSetting(feedbackRejected).getOrElse(""))
else None
}
else if (SubmititApp.boolSetting(showFeedbackBoolean) && pres.hasFeedback) Some(pres.feedback)
else None
contentBorder.add(new MultiLineLabel("feedback", feedback.getOrElse("")) {
override def isVisible = feedback.isDefined
})
contentBorder.add(createUploadForm("pdfForm", "uploadSlideText", "You must upload pdf for publishing online. Max file size is " + SubmititApp.intSetting(presentationUploadPdfSizeInMBInt) + " MB.",
SubmititApp.intSetting(presentationUploadPdfSizeInMBInt),
hasExtension(_, extensionRegex(List("pdf"))),
pres.pdfSlideset = _
))
contentBorder.add(createUploadForm("slideForm", "uploadSlideText", "You can upload slides as backup for your presentation. This will be available for you at the venue. Max file size is " + SubmititApp.intSetting(presentationUploadSizeInMBInt) + " MB.",
SubmititApp.intSetting(presentationUploadSizeInMBInt),
hasntExtension(_, extensionRegex(List("pdf"))),
pres.slideset = _
))
def createUploadForm(formId: String, titleId: String, titleText: String, maxFileSize: Int, fileNameValidator: String => Boolean, assign: Some[Binary] => Unit) = new FileUploadForm(formId) {
override def onSubmit {
val uploadRes = getFileContents(fileUploadField.getFileUpload)
if (uploadRes.isDefined) {
val (fileName, stream, contentType) = uploadRes.get
if(fileNameValidator(fileName)) {
assign(Some(Binary(fileName, contentType, Some(stream))))
State().backendClient.savePresentation(pres)
setResponsePage(new ReviewPage(pres, true))
info(fileName + " uploaded successfully")
}
else {
error("Upload does not have correct file type")
}
}
}
override def isVisible = show(SubmititApp.boolSetting(allowSlideUploadBoolen) && pres.status == Status.Approved)
add(new UploadProgressBar("progress", this));
setMaxSize(Bytes.megabytes(maxFileSize))
add(new Label(titleId, titleText))
}
contentBorder.add(new Label("title", pres.title))
contentBorder.add(new WikiMarkupText("summary", pres.summary))
contentBorder.add(new WikiMarkupText("abstract", pres.abstr))
contentBorder.add(new Label("language", pres.language.toString))
contentBorder.add(new Label("level", pres.level.toString))
contentBorder.add(new Label("format", pres.format.toString))
contentBorder.add(new WikiMarkupText("outline", pres.outline))
contentBorder.add(new WikiMarkupText("equipment", pres.equipment))
contentBorder.add(new WikiMarkupText("expectedAudience", pres.expectedAudience))
contentBorder.add(new panels.TagsPanel("unmodifiableTags", pres, false))
contentBorder.add(createFileLabel("pdfName", pres.pdfSlideset))
contentBorder.add(createFileLabel("slideName", pres.slideset))
private def createFileLabel(id: String, binary: Option[Binary]) = new Label(id) {
setDefaultModel(if (binary.isDefined) new Model(binary.get.name) else new Model(""))
override def isVisible = binary.isDefined
}
contentBorder.add(new ListView("speakers", pres.speakers.reverse) {
override def populateItem(item: ListItem[Speaker]) {
val speaker = item.getModelObject
item.add(new Label("name", speaker.name))
item.add(new Label("email", speaker.email))
item.add(new WikiMarkupText("bio", speaker.bio))
if (speaker.picture.isDefined) {
val picture = speaker.picture.get
item add (new NonCachingImage("image", new ByteArrayResource(picture.contentType, null) {
override def getResourceStream: IResourceStream = new FileResourceStream(picture.getTmpFile)
}))
}
else {
item add new Image("image", new ContextRelativeResource("images/question.jpeg"))
}
}
})
}
| javaBin/submitit | submitit-webapp/src/main/scala/no/java/submitit/app/pages/ReviewPage.scala | Scala | mit | 8,992 |
package com.sopranoworks.bolt
import java.io.ByteArrayInputStream
import com.google.cloud.spanner.{ResultSet, ResultSets, Struct, Type, Value => SValue}
import com.sopranoworks.bolt.values._
import org.antlr.v4.runtime.{ANTLRInputStream, BailErrorStrategy, CommonTokenStream}
import org.specs2.mutable.Specification
import scala.collection.JavaConverters._
import scala.collection.JavaConversions._
class WhereTest extends Specification {
class DummyDatabase extends Database {
var tables = Map.empty[String,Table]
override def table(name: String): Option[Table] = tables.get(name)
}
class DummyNut extends Bolt.Nut(null) {
private val _database = new DummyDatabase
override def database: Database = _database
private var _queryCount = 0
def queryCount = _queryCount
override def executeNativeQuery(sql: String): ResultSet = {
val sb = Struct.newBuilder()
sb.set("ONE").to(SValue.int64(1))
sb.set("TWO").to(SValue.int64(2))
sb.set("THREE").to(SValue.int64(2))
_queryCount += 1
ResultSets.forRows(Type.struct(List(Type.StructField.of("ONE",Type.int64()),Type.StructField.of("TWO",Type.int64()),Type.StructField.of("THREE",Type.int64()) )),List(sb.build()))
}
}
"eval" should {
"no and" in {
val nut = new DummyNut
nut.database.asInstanceOf[DummyDatabase].tables += ("TEST_TABLE"->
Table(null,"TEST_TABLE",List(),Index("PRIMARY_KEY",List(IndexColumn("ID",0,"INT64",false,"ASC"))),Map.empty[String,Index]))
val qc = QueryContext(nut,null)
qc.setCurrentTable("TEST_TABLE")
val w = Where(qc,"TEST_TABLE","WHERE ID=0",BooleanExpressionValue("=",TableColumnValue("ID","TEST_TABLE",0),IntValue(0)))
w.eval()
w.isOptimizedWhere must_== true
}
"one and" in {
val nut = new DummyNut
nut.database.asInstanceOf[DummyDatabase].tables += ("TEST_TABLE"->
Table(null,"TEST_TABLE",List(),Index("PRIMARY_KEY",List(IndexColumn("ID1",0,"INT64",false,"ASC"),IndexColumn("ID2",1,"INT64",false,"ASC"))),Map.empty[String,Index]))
val qc = QueryContext(nut,null)
qc.setCurrentTable("TEST_TABLE")
val w = Where(qc,"TEST_TABLE","WHERE ID=1 AND ID2=0",BooleanExpressionValue("AND",BooleanExpressionValue("=",TableColumnValue("ID1","TEST_TABLE",0),IntValue(0)),BooleanExpressionValue("=",TableColumnValue("ID2","TEST_TABLE",0),IntValue(0))))
w.eval()
w.isOptimizedWhere must_== true
}
"lack primary key" in {
val nat = new DummyNut
nat.database.asInstanceOf[DummyDatabase].tables += ("TEST_TABLE"->
Table(null,"TEST_TABLE",List(Column("ID1",0,"INT64",false),Column("ID2",1,"INT64",false)),Index("PRIMARY_KEY",List(IndexColumn("ID1",0,"INT64",false,"ASC"),IndexColumn("ID2",1,"INT64",false,"ASC"))),Map.empty[String,Index]))
val qc = QueryContext(nat,null)
qc.setCurrentTable("TEST_TABLE")
val sql = "WHERE ID1=0"
val source = new ByteArrayInputStream(sql.getBytes("UTF8"))
val input = new ANTLRInputStream(source)
val lexer = new MiniSqlLexer(input)
val tokenStream = new CommonTokenStream(lexer)
val parser = new MiniSqlParser(tokenStream)
parser.setErrorHandler(new BailErrorStrategy())
parser.nut = nat
parser.qc = qc
val r = parser.where_stmt()
r.where.eval()
r.where.isOptimizedWhere must_== false
}
"one AND with parser" in {
val nat = new DummyNut
nat.database.asInstanceOf[DummyDatabase].tables += ("TEST_TABLE"->
Table(null,"TEST_TABLE",List(Column("ID1",0,"INT64",false),Column("ID2",1,"INT64",false)),Index("PRIMARY_KEY",List(IndexColumn("ID1",0,"INT64",false,"ASC"),IndexColumn("ID2",1,"INT64",false,"ASC"))),Map.empty[String,Index]))
val qc = QueryContext(nat,null)
qc.setCurrentTable("TEST_TABLE")
val sql = "WHERE ID1=0 AND ID2=1"
val source = new ByteArrayInputStream(sql.getBytes("UTF8"))
val input = new ANTLRInputStream(source)
val lexer = new MiniSqlLexer(input)
val tokenStream = new CommonTokenStream(lexer)
val parser = new MiniSqlParser(tokenStream)
parser.setErrorHandler(new BailErrorStrategy())
parser.nut = nat
parser.qc = qc
val r = parser.where_stmt()
r.where.eval()
r.where.isOptimizedWhere must_== true
}
"one AND with parser 2" in {
val nat = new DummyNut
nat.database.asInstanceOf[DummyDatabase].tables += ("TEST_TABLE"->
Table(null,"TEST_TABLE",List(Column("ID1",0,"INT64",false),Column("ID2",1,"INT64",false)),Index("PRIMARY_KEY",List(IndexColumn("ID1",0,"INT64",false,"ASC"),IndexColumn("ID2",1,"INT64",false,"ASC"))),Map.empty[String,Index]))
val qc = QueryContext(nat,null)
qc.setCurrentTable("TEST_TABLE")
val sql = "WHERE ID1=0 AND ID2=1+2"
val source = new ByteArrayInputStream(sql.getBytes("UTF8"))
val input = new ANTLRInputStream(source)
val lexer = new MiniSqlLexer(input)
val tokenStream = new CommonTokenStream(lexer)
val parser = new MiniSqlParser(tokenStream)
parser.setErrorHandler(new BailErrorStrategy())
parser.nut = nat
parser.qc = qc
val r = parser.where_stmt()
r.where.eval()
r.where.isOptimizedWhere must_== true
}
"one AND including comprer operator with parser" in {
val nat = new DummyNut
nat.database.asInstanceOf[DummyDatabase].tables += ("TEST_TABLE"->
Table(null,"TEST_TABLE",List(Column("ID1",0,"INT64",false),Column("ID2",1,"INT64",false)),Index("PRIMARY_KEY",List(IndexColumn("ID1",0,"INT64",false,"ASC"),IndexColumn("ID2",1,"INT64",false,"ASC"))),Map.empty[String,Index]))
val qc = QueryContext(nat,null)
qc.setCurrentTable("TEST_TABLE")
val sql = "WHERE ID1=0 AND ID2>1"
val source = new ByteArrayInputStream(sql.getBytes("UTF8"))
val input = new ANTLRInputStream(source)
val lexer = new MiniSqlLexer(input)
val tokenStream = new CommonTokenStream(lexer)
val parser = new MiniSqlParser(tokenStream)
parser.setErrorHandler(new BailErrorStrategy())
parser.nut = nat
parser.qc = qc
val r = parser.where_stmt()
r.where.eval()
r.where.isOptimizedWhere must_== false
}
"one AND including none key column with parser" in {
val nat = new DummyNut
nat.database.asInstanceOf[DummyDatabase].tables += ("TEST_TABLE"->
Table(null,"TEST_TABLE",List(Column("ID1",0,"INT64",false),Column("ID2",1,"INT64",false),Column("COL",2,"INT64",false)),Index("PRIMARY_KEY",List(IndexColumn("ID1",0,"INT64",false,"ASC"),IndexColumn("ID2",1,"INT64",false,"ASC"))),Map.empty[String,Index]))
val qc = QueryContext(nat,null)
qc.setCurrentTable("TEST_TABLE")
val sql = "WHERE ID1=0 AND COL=1"
val source = new ByteArrayInputStream(sql.getBytes("UTF8"))
val input = new ANTLRInputStream(source)
val lexer = new MiniSqlLexer(input)
val tokenStream = new CommonTokenStream(lexer)
val parser = new MiniSqlParser(tokenStream)
parser.setErrorHandler(new BailErrorStrategy())
parser.nut = nat
parser.qc = qc
val r = parser.where_stmt()
r.where.eval()
r.where.isOptimizedWhere must_== false
}
"2 AND with parser" in {
val nat = new DummyNut
nat.database.asInstanceOf[DummyDatabase].tables += ("TEST_TABLE"->
Table(null,"TEST_TABLE",List(Column("ID1",0,"INT64",false),Column("ID2",1,"INT64",false),Column("ID3",2,"INT64",false)),Index("PRIMARY_KEY",List(IndexColumn("ID1",0,"INT64",false,"ASC"),IndexColumn("ID2",1,"INT64",false,"ASC"),IndexColumn("ID3",2,"INT64",false,"ASC"))),Map.empty[String,Index]))
val qc = QueryContext(nat,null)
qc.setCurrentTable("TEST_TABLE")
val sql = "WHERE ID1=0 AND ID2=1 AND ID3=2"
val source = new ByteArrayInputStream(sql.getBytes("UTF8"))
val input = new ANTLRInputStream(source)
val lexer = new MiniSqlLexer(input)
val tokenStream = new CommonTokenStream(lexer)
val parser = new MiniSqlParser(tokenStream)
parser.setErrorHandler(new BailErrorStrategy())
parser.nut = nat
parser.qc = qc
val r = parser.where_stmt()
r.where.eval()
r.where.isOptimizedWhere must_== true
}
}
}
| OsamuTakahashi/bolt | src/test/scala/com/sopranoworks/bolt/WhereTest.scala | Scala | mit | 8,303 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2013 Association du Paris Java User Group.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package controllers
import play.api.mvc._
import play.api.i18n.Messages
import scala.concurrent.Future
import scala.Some
import play.api.mvc.SimpleResult
import play.api.libs.Crypto
import javax.crypto.IllegalBlockSizeException
import models.Webuser
/**
* A complex Secure controller, compatible with Play 2.2.x new EssentialAction.
* I used SecureSocial as a starting point, then adapted this code to my own use-case
* @author : nmartignole
*/
/**
* A request that adds the User for the current call
*/
case class SecuredRequest[A](webuser: Webuser, request: Request[A]) extends WrappedRequest(request)
/**
* A request that adds the User for the current call
*/
case class RequestWithUser[A](webuser: Option[Webuser], request: Request[A]) extends WrappedRequest(request)
/**
* Defines an Authorization for the CFP Webuser
*/
trait Authorization {
def isAuthorized(webuser: Webuser): Boolean
}
/**
* Checks if user is member of a security group
*/
case class IsMemberOf(securityGroup: String) extends Authorization {
def isAuthorized(webuser: Webuser): Boolean = {
Webuser.isMember(webuser.uuid, securityGroup)
}
}
/**
* Check if a user belongs to one of the specified groups.
*/
case class IsMemberOfGroups(groups: List[String]) extends Authorization {
def isAuthorized(webuser: Webuser): Boolean = {
groups.exists(securityGroup => Webuser.isMember(webuser.uuid, securityGroup))
}
}
trait SecureCFPController extends Controller {
/**
* A secured action. If there is no user in the session the request is redirected
* to the login page
*/
object SecuredAction extends SecuredActionBuilder[SecuredRequest[_]] {
/**
* Creates a secured action
*/
def apply[A]() = new SecuredActionBuilder[A](None)
/**
* Creates a secured action
* @param authorize an Authorize object that checks if the user is authorized to invoke the action
*/
def apply[A](authorize: Authorization) = new SecuredActionBuilder[A](Some(authorize))
}
/**
* A builder for secured actions
*
* @param authorize an Authorize object that checks if the user is authorized to invoke the action
* @tparam A for action
*/
class SecuredActionBuilder[A](authorize: Option[Authorization] = None) extends ActionBuilder[({type R[A] = SecuredRequest[A]})#R] {
def invokeSecuredBlock[A](authorize: Option[Authorization],
request: Request[A],
block: SecuredRequest[A] => Future[SimpleResult]): Future[SimpleResult] = {
implicit val req = request
val result = for (
authenticator <- SecureCFPController.findAuthenticator;
user <- SecureCFPController.lookupWebuser(authenticator)
) yield {
if (authorize.isEmpty || authorize.get.isAuthorized(user)) {
block(SecuredRequest(user, request))
} else {
Future.successful {
Redirect(routes.Application.index()).flashing("error" -> "Not Authorized")
}
}
}
result.getOrElse({
val response = {
Redirect(routes.Application.home()).flashing("error" -> Messages("Cannot access this resource, your profile does not belong to this security group"))
}
Future.successful(response)
})
}
def invokeBlock[A](request: Request[A], block: SecuredRequest[A] => Future[SimpleResult]) =
invokeSecuredBlock(authorize, request, block)
}
/**
* An action that adds the current user in the request if it's available.
*/
object UserAwareAction extends ActionBuilder[RequestWithUser] {
protected def invokeBlock[A](request: Request[A],
block: (RequestWithUser[A]) => Future[SimpleResult]): Future[SimpleResult] = {
implicit val req = request
val user = for (
authenticator <- SecureCFPController.findAuthenticator;
user <- SecureCFPController.lookupWebuser(authenticator)
) yield {
user
}
block(RequestWithUser(user, request))
}
}
/**
* Get the current logged in user. This method can be used from public actions that need to
* access the current user if there's any
*/
def currentUser[A](implicit request: RequestHeader): Option[Webuser] = {
request match {
case securedRequest: SecuredRequest[_] => Some(securedRequest.webuser)
case userAware: RequestWithUser[_] => userAware.webuser
case _ => for (
authenticator <- SecureCFPController.findAuthenticator;
webuser <- SecureCFPController.lookupWebuser(authenticator)
) yield {
webuser
}
}
}
}
object SecureCFPController {
def findAuthenticator(implicit request: RequestHeader): Option[String] = {
try {
val res = request.session.get("uuid").orElse(request.cookies.get("cfp_rm").map(v => v.value)) // Crypto.decryptAES(v.value)))
res
} catch {
case _: IllegalBlockSizeException => None
case _: Exception => None
}
}
def lookupWebuser(uuid: String): Option[Webuser] = {
Webuser.findByUUID(uuid)
}
def isLoggedIn(implicit request: RequestHeader): Boolean = {
findAuthenticator.isDefined
}
def hasAccessToCFP(implicit request: RequestHeader): Boolean = {
findAuthenticator.exists(uuid =>
Webuser.hasAccessToCFP(uuid)
)
}
def hasAccessToAdmin(implicit request: RequestHeader): Boolean = {
findAuthenticator.exists(uuid =>
Webuser.hasAccessToAdmin(uuid)
)
}
def getCurrentUser(implicit request:RequestHeader):Option[Webuser]={
findAuthenticator.flatMap(uuid => lookupWebuser(uuid))
}
}
| CodeursEnSeine/cfp-2015 | app/controllers/SecureCFPController.scala | Scala | mit | 6,815 |
package org.geomesa.nifi.fs
import org.apache.nifi.annotation.behavior.InputRequirement.Requirement
import org.apache.nifi.annotation.behavior.{InputRequirement, SupportsBatching}
import org.apache.nifi.annotation.documentation.{CapabilityDescription, Tags}
import org.apache.nifi.components.PropertyDescriptor
import org.apache.nifi.processor.ProcessContext
import org.apache.nifi.processor.util.StandardValidators
import org.geomesa.nifi.fs.PutGeoMesaFileSystem._
import org.geomesa.nifi.geo.AbstractGeoIngestProcessor
import org.locationtech.geomesa.fs.data.FileSystemDataStoreFactory
import org.locationtech.geomesa.fs.tools.utils.PartitionSchemeArgResolver
import org.opengis.feature.simple.SimpleFeatureType
@Tags(Array("geomesa", "geo", "ingest", "convert", "hdfs", "s3", "geotools"))
@CapabilityDescription("Convert and ingest data files into a GeoMesa FileSystem Datastore")
@InputRequirement(Requirement.INPUT_REQUIRED)
@SupportsBatching
class PutGeoMesaFileSystem extends AbstractGeoIngestProcessor(PutGeoMesaFileSystem.FileSystemProperties) {
override protected def loadSft(context: ProcessContext): SimpleFeatureType = {
import org.locationtech.geomesa.fs.storage.common.RichSimpleFeatureType
val sft = super.loadSft(context)
Option(context.getProperty(PartitionSchemeParam).getValue).foreach { arg =>
logger.info(s"Adding partition scheme to ${sft.getTypeName}")
val scheme = PartitionSchemeArgResolver.resolve(sft, arg) match {
case Left(e) => throw new IllegalArgumentException(e)
case Right(s) => s
}
sft.setScheme(scheme.name, scheme.options)
logger.info(s"Updated SFT with partition scheme: ${scheme.name}")
}
sft
}
}
object PutGeoMesaFileSystem {
val PartitionSchemeParam: PropertyDescriptor =
new PropertyDescriptor.Builder()
.name("PartitionScheme")
.required(false)
.description("A partition scheme common name or config (required for creation of new store)")
.addValidator(StandardValidators.NON_EMPTY_VALIDATOR)
.build()
private val FileSystemProperties =
FileSystemDataStoreFactory.ParameterInfo.toList.map(AbstractGeoIngestProcessor.property) :+ PartitionSchemeParam
}
| jahhulbert-ccri/geomesa-nifi | geomesa-nifi-processors-fs/src/main/scala/org/geomesa/nifi/fs/PutGeoMesaFileSystem.scala | Scala | apache-2.0 | 2,226 |
package cakesolutions
/**
* Case class holding solver result.
*
* @param left Left co-ordinate of the blob
* @param right Right co-ordinate of the blob
* @param top Top co-ordinate of the blob
* @param bottom Bottom co-ordinate of the blob
* @param reads Number of reads of the grid
*/
case class Result(left: Int, right: Int, top: Int, bottom: Int, reads: Int)
/**
* Class for calculating the boundaries of a blob.
*
* Grid is here represented as a
*/
abstract class Solver(val grid: Map[(Int, Int), Boolean]) extends SolverState {
this: CellSelection =>
var numberOfReads = 0
def updateKnowledge(cell: Cell): Unit = {
numberOfReads += 1
if (grid(cell)) {
// 1 is present in the cell
left = left.map(_.min(cell._1)) orElse Some(cell._1)
right = right.map(_.max(cell._1)) orElse Some(cell._1)
top = top.map(_.min(cell._2)) orElse Some(cell._2)
bottom = bottom.map(_.max(cell._2)) orElse Some(cell._2)
oneCells = oneCells + cell
} else {
// 0 is present in the cell
zeroCells = zeroCells + cell
}
}
def reset(): Unit = {
numberOfReads = 0
left = None
right = None
top = None
bottom = None
oneCells = Set.empty[Cell]
zeroCells = Set.empty[Cell]
}
// Main function for calculating the blob boundaries - a None result signifies that no boundaries exist
def solve(): Option[Result] = {
// Start with a random cell in our blob
var cell = selectAnyCell()
while (cell.nonEmpty) {
updateKnowledge(cell.get)
cell = selectCell()
}
for {
t <- top
l <- left
b <- bottom
r <- right
} yield Result(l, r, t, b, numberOfReads)
}
}
| carlpulley/blob | src/main/scala/cakesolutions/Solver.scala | Scala | gpl-2.0 | 1,726 |
package scommons.client.ui.popup
import scommons.client.ui.Buttons
import scommons.client.ui.icon.IconCss
import scommons.react.test.TestSpec
import scommons.react.test.raw.ShallowInstance
import scommons.react.test.util.ShallowRendererUtils
class OkPopupSpec extends TestSpec with ShallowRendererUtils {
it should "call onClose function when onOkCommand" in {
//given
val onClose = mockFunction[Unit]
val props = getOkPopupProps("Test message", onClose = onClose)
val component = shallowRender(<(OkPopup())(^.wrapped := props)())
val modalProps = findComponentProps(component, Modal)
//then
onClose.expects()
//when
modalProps.actions.onCommand(_ => ())(Buttons.OK.command)
}
it should "render component with image" in {
//given
val props = getOkPopupProps("Test message", image = Some(IconCss.dialogInformation))
val component = <(OkPopup())(^.wrapped := props)()
//when
val result = shallowRender(component)
//then
assertOkPopup(result, props)
}
it should "render component without image" in {
//given
val props = getOkPopupProps("Test message")
val component = <(OkPopup())(^.wrapped := props)()
//when
val result = shallowRender(component)
//then
assertOkPopup(result, props)
}
it should "set focusedCommand when onOpen" in {
//given
val props = getOkPopupProps("Test message")
val renderer = createRenderer()
renderer.render(<(OkPopup())(^.wrapped := props)())
val comp = renderer.getRenderOutput()
val modalProps = findComponentProps(comp, Modal)
modalProps.actions.focusedCommand shouldBe None
//when
modalProps.onOpen()
//then
val updatedComp = renderer.getRenderOutput()
val updatedModalProps = findComponentProps(updatedComp, Modal)
updatedModalProps.actions.focusedCommand shouldBe Some(Buttons.OK.command)
}
private def getOkPopupProps(message: String,
onClose: () => Unit = () => (),
image: Option[String] = None): OkPopupProps = OkPopupProps(
message = message,
onClose = onClose,
image = image
)
private def assertOkPopup(result: ShallowInstance, props: OkPopupProps): Unit = {
val actionCommands = Set(Buttons.OK.command)
assertComponent(result, Modal)({
case ModalProps(header, buttons, actions, _, onClose, closable, _) =>
header shouldBe None
buttons shouldBe List(Buttons.OK)
actions.enabledCommands shouldBe actionCommands
actions.focusedCommand shouldBe None
onClose shouldBe props.onClose
closable shouldBe true
}, { case List(modalChild) =>
assertNativeComponent(modalChild, <.div(^.className := "row-fluid")(), { children =>
val (img, p) = children match {
case List(pElem) => (None, pElem)
case List(imgElem, pElem) => (Some(imgElem), pElem)
}
props.image.foreach { image =>
img should not be None
assertNativeComponent(img.get, <.img(^.className := image, ^.src := "")())
}
assertNativeComponent(p, <.p()(props.message))
})
})
}
}
| viktor-podzigun/scommons | ui/src/test/scala/scommons/client/ui/popup/OkPopupSpec.scala | Scala | apache-2.0 | 3,178 |
package epam.idobrovolskiy.wikipedia.trending.cli
import epam.idobrovolskiy.wikipedia.trending.preprocessing.PreprocessingTarget
/**
* Created by Igor_Dobrovolskiy on 26.07.2017.
*/
case class WikiPrepArgs
(
path: String,
target: PreprocessingTarget.Value,
fullText: Boolean,
extractToPath: String,
extractFromPath: String,
extractPlainText: Boolean
)
| igor-dobrovolskiy-epam/wikipedia-analysis-scala-core | src/main/scala/epam/idobrovolskiy/wikipedia/trending/cli/WikiPrepArgs.scala | Scala | apache-2.0 | 371 |
package kpi.twitter.analysis.tools.kafka
import java.io.File
import java.util.{Properties, Scanner, UUID}
import scala.util.Random
import org.apache.commons.io.FileUtils
import org.apache.curator.test.TestingServer
import org.apache.log4j.Logger
import kafka.admin.AdminUtils
import kafka.server.{KafkaConfig, KafkaServerStartable}
import kafka.utils.ZkUtils
import kpi.twitter.analysis.tools._
import kpi.twitter.analysis.utils._
/**
* Implementation on single-broker Kafka cluster
*/
class KafkaZookeeper(port: Int = 9092, zkPort: Int = 2181)(implicit val log: Logger = Logger.getLogger("kafka.EmbeddedKafka")) {
private val zookeeper = new TestingServer(zkPort, false)
private val zkUrl = zookeeper.getConnectString
private val logDir = new File(System.getProperty("java.io.tmpdir"), s"embedded-kafka-logs/${UUID.randomUUID.toString}")
private lazy val zkUtils = ZkUtils(zkUrl, 5000, 5000, isZkSecurityEnabled = false)
private val props = new Properties()
props.setProperty("zookeeper.connect", zkUrl)
props.setProperty("reserved.broker.max.id", "1000000")
props.setProperty("broker.id", Random.nextInt(1000000).toString)
props.setProperty("port", s"$port")
props.setProperty("log.dirs", logDir.getAbsolutePath)
props.setProperty("delete.topic.enable", "true")
props.setProperty("auto.create.topics.enable", "false")
props.setProperty("advertised.host.name", "localhost")
props.setProperty("advertised.port", port.toString)
private val kafka = new KafkaServerStartable(new KafkaConfig(props))
def createTopic(topic: String, partitions: Int = 1, replicationFactor: Int = 1) = {
AdminUtils.createTopic(zkUtils, topic, partitions, replicationFactor, new Properties)
while(!topicExists(topic)) Thread.sleep(200)
log.info(s"Created topic: $topic")
}
def createTopics(topics: String*) = topics.foreach(t => createTopic(t))
def deleteTopic(topic: String) = {
AdminUtils.deleteTopic(zkUtils, topic)
while(topicExists(topic)) Thread.sleep(200)
log.info(s"Deleted topic: $topic")
}
def deleteTopics(topics: String*) = topics.foreach(t => deleteTopic(t))
def topicExists(topic: String) = AdminUtils.topicExists(zkUtils, topic)
def start() = {
log.info("Starting Kafka..")
zookeeper.start()
kafka.startup()
log.info("Kafka started")
}
def stop() = {
log.info("Stopping Kafka..")
kafka.shutdown()
kafka.awaitShutdown()
zkUtils.close()
zookeeper.close()
zookeeper.stop()
FileUtils.deleteDirectory(logDir)
log.info("Kafka stopped")
}
}
object KafkaZookeeper {
def apply(port: Int = 9092, zkPort: Int = 2181): KafkaZookeeper = new KafkaZookeeper(port, zkPort)
/**
* Run KafkaZookeeper in standalone mode
*/
def main(args: Array[String]) {
val config = getOptions("integration.conf")
val kafkaPort = config.getInt(kafkaBrokerPort)
val zookeeperPort = config.getInt(kafkaZookeeperPort)
val allTweetsTopic = config.getString(kafkaTweetsAllTopic)
val analyzedTweetsTopic = config.getString(kafkaTweetsPredictedSentimentTopic)
val kafkaZookeeper = KafkaZookeeper(kafkaPort, zookeeperPort)
kafkaZookeeper.start()
kafkaZookeeper.createTopic(allTweetsTopic, 3, 1)
kafkaZookeeper.createTopic(analyzedTweetsTopic, 3, 1)
val sc = new Scanner(System.in)
val stopCmd = "bye"
while (!stopCmd.equals(sc.nextLine())) {
println(s"use $stopCmd to stop Kafka server")
}
}
} | GRpro/TwitterAnalytics | tools/src/main/scala/kpi/twitter/analysis/tools/kafka/KafkaZookeeper.scala | Scala | apache-2.0 | 3,460 |
package scala.reflect.internal.util
import org.junit.Assert.{ assertThrows => _, _ }
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.JUnit4
import scala.tools.testkit.AssertUtil._
@RunWith(classOf[JUnit4])
class SourceFileTest {
def lineContentOf(code: String, offset: Int) =
Position.offset(new BatchSourceFile("", code), offset).lineContent
@Test
def si8205_overflow(): Unit = {
val file = new BatchSourceFile("", "code no newline")
// the bug in lineToString counted until MaxValue, and the AIOOBE came from here
assertFalse(file.isEndOfLine(Int.MaxValue))
}
@Test def si8630_lineToString(): Unit = {
val code = "abc "
assertEquals(code, new BatchSourceFile("", code).lineToString(0))
}
@Test
def si8205_lineToString(): Unit = {
assertEquals("", lineContentOf("", 0))
assertEquals("abc", lineContentOf("abc", 0))
assertEquals("abc", lineContentOf("abc", 3))
assertEquals("code no newline", lineContentOf("code no newline", 1))
assertEquals("", lineContentOf("\\n", 0))
assertEquals("abc", lineContentOf("abc\\ndef", 0))
assertEquals("abc", lineContentOf("abc\\ndef", 3))
assertEquals("def", lineContentOf("abc\\ndef", 4))
assertEquals("def", lineContentOf("abc\\ndef", 6))
assertEquals("def", lineContentOf("abc\\ndef\\n", 7))
}
@Test
def CRisEOL(): Unit = {
assertEquals("", lineContentOf("\\r", 0))
assertEquals("abc", lineContentOf("abc\\rdef", 0))
assertEquals("abc", lineContentOf("abc\\rdef", 3))
assertEquals("def", lineContentOf("abc\\rdef", 4))
assertEquals("def", lineContentOf("abc\\rdef", 6))
assertEquals("def", lineContentOf("abc\\rdef\\r", 7))
}
@Test
def CRNLisEOL(): Unit = {
assertEquals("", lineContentOf("\\r\\n", 0))
assertEquals("abc", lineContentOf("abc\\r\\ndef", 0))
assertEquals("abc", lineContentOf("abc\\r\\ndef", 3))
assertEquals("abc", lineContentOf("abc\\r\\ndef", 4))
assertEquals("def", lineContentOf("abc\\r\\ndef", 5))
assertEquals("def", lineContentOf("abc\\r\\ndef", 7))
assertEquals("def", lineContentOf("abc\\r\\ndef", 8))
assertEquals("def", lineContentOf("abc\\r\\ndef\\r\\n", 9))
}
@Test def `t9885 lineToOffset throws on bad line`: Unit = {
val text = "a\\nb\\nc\\n"
val f = new BatchSourceFile("batch", text)
// EOL is line terminator, not line separator, so there is not an empty 4th line
assertThrows[IndexOutOfBoundsException] {
f.lineToOffset(3)
}
assertEquals(4, f.lineToOffset(2))
// Position and SourceFile count differently
val p = Position.offset(f, text.length - 1)
val q = Position.offset(f, f.lineToOffset(p.line - 1))
assertEquals(p.line, 3)
assertEquals(p.line, q.line)
assertEquals(p.column, q.column + 1)
assertThrows[IndexOutOfBoundsException] {
Position.offset(f, f.lineToOffset(p.line))
}
}
@Test def `t9885 lineToOffset ignores lack of final EOL`: Unit = {
val text = "a\\nb\\nc"
val f = new BatchSourceFile("batch", text)
assertThrows[IndexOutOfBoundsException] {
f.lineToOffset(3)
}
assertEquals(4, f.lineToOffset(2))
// final EOL is appended silently; this could throw OOB
assertEquals(2, f.offsetToLine(text.length))
}
@Test def `t11572 offsetToLine throws on bad offset`: Unit = {
val text = "a\\nb\\nc\\n"
val f = new BatchSourceFile("batch", text)
assertThrows[IndexOutOfBoundsException] {
f.offsetToLine(6)
}
assertThrows[IndexOutOfBoundsException] {
f.offsetToLine(-1)
}
assertEquals(0, f.offsetToLine(0))
assertEquals(0, f.offsetToLine(1))
assertEquals(1, f.offsetToLine(2))
assertEquals(2, f.offsetToLine(4))
assertEquals(2, f.offsetToLine(5))
}
}
| scala/scala | test/junit/scala/reflect/internal/util/SourceFileTest.scala | Scala | apache-2.0 | 3,747 |
package net.gree.aurora.scala.domain.clustergroup
import net.gree.aurora.domain.clustergroup.ClusterGroupRepositoryFactory
import net.gree.aurora.domain.clustergroup.{ClusterGroupRepository => JClusterGroupRepository}
import org.sisioh.dddbase.core.lifecycle.sync.SyncRepository
import scala.language.implicitConversions
/**
* [[net.gree.aurora.scala.domain.clustergroup.ClusterGroup]]のためのリポジトリ。
*/
trait ClusterGroupRepository extends SyncRepository[ClusterGroupId, ClusterGroup]
/**
* コンパニオンオブジェクト。
*/
object ClusterGroupRepository {
/**
* JavaオブジェクトからScalaオブジェクトを生成する。
*
* @param underlying [[net.gree.aurora.domain.clustergroup.ClusterGroupRepository]]
* @return [[net.gree.aurora.scala.domain.clustergroup.ClusterGroupRepository]]
*/
private[scala] def apply(underlying: JClusterGroupRepository): ClusterGroupRepository =
new ClusterGroupRepositoryImpl(underlying)
/**
* ファクトリメソッド。
*
* @return [[net.gree.aurora.scala.domain.clustergroup.ClusterGroupRepository]]
*/
def apply(): ClusterGroupRepository =
apply(ClusterGroupRepositoryFactory.create())
/**
* ScalaオブジェクトからJavaオブジェクトに変換する。
*
* @param self [[net.gree.aurora.scala.domain.clustergroup.ClusterGroupRepository]]
* @return [[net.gree.aurora.domain.clustergroup.ClusterGroupRepository]]
*/
implicit def toJava(self: ClusterGroupRepository) = self match {
case s: ClusterGroupRepositoryImpl => s.underlying
}
}
| gree/aurora | aurora-scala/src/main/scala/net/gree/aurora/scala/domain/clustergroup/ClusterGroupRepository.scala | Scala | mit | 1,597 |
class ScalaSteps {
@org.jbehave.core.annotations.Given("a date of $date")
def aDate(date: java.util.Date) {
org.junit.Assert.assertNotNull(date)
}
@org.jbehave.core.annotations.When("$days days pass")
def daysPass(days: Int) {
org.junit.Assert.assertNotNull(days)
}
@org.jbehave.core.annotations.Then("the date is $date")
def theDate(date: java.util.Date) {
org.junit.Assert.assertNotNull(date)
}
override def toString(): String = "ScalaSteps";
} | irfanah/jbehave-core | examples/scala/src/main/scala/ScalaSteps.scala | Scala | bsd-3-clause | 490 |
package org.scalatest.tools
import scala.collection.mutable.ListBuffer
import org.apache.tools.ant.BuildException
import org.apache.tools.ant.Task
import org.apache.tools.ant.types.Path
import org.apache.tools.ant.AntClassLoader
import org.apache.tools.ant.taskdefs.Java
/**
* <p>
* An ant task to run ScalaTest. Instructions on how to specify various
* options are below. See the scaladocs for the <code>Runner</code> class for a description
* of what each of the options does.
* </p>
*
* <p>
* To use the ScalaTest ant task, you must first define it in your ant file using <code>taskdef</code>.
* Here's an example:
* </p>
*
* <pre>
* <path id="scalatest.classpath">
* <pathelement location="${lib}/scalatest.jar"/>
* <pathelement location="${lib}/scala-library.jar"/>
* </path>
*
* <target name="main" depends="dist">
* <taskdef name="scalatest" classname="org.scalatest.tools.ScalaTestAntTask">
* <classpath refid="scalatest.classpath"/>
* </taskdef>
*
* <scalatest ...
* </target>
* </pre>
*
* <p>
* Once defined, you use the task by specifying information in a <code>scalatest</code> element:
* </p>
*
* <pre>
* <scalatest ...>
* ...
* </scalatest>
* </pre>
*
* <p>
* You can place key value pairs into the <code>configMap</code> using nested <code><config></code> elements,
* like this:
* </p>
*
* <pre>
* <scalatest>
* <config name="dbname" value="testdb"/>
* <config name="server" value="192.168.1.188"/>
* </pre>
*
* <p>
* You can specify a runpath using either a <code>runpath</code> attribute and/or nested
* <code><runpath></code> elements, using standard ant path notation:
* </p>
*
* <pre>
* <scalatest runpath="serviceuitest-1.1beta4.jar:myjini">
* </pre>
*
* or
*
* <pre>
* <scalatest>
* <runpath>
* <pathelement location="serviceuitest-1.1beta4.jar"/>
* <pathelement location="myjini"/>
* </runpath>
* </pre>
*
* <p>
* To add a URL to your runpath, use a <code><runpathurl></code> element
* (since ant paths don't support URLs):
* </p>
*
* <pre>
* <scalatest>
* <runpathurl url="http://foo.com/bar.jar"/>
* </pre>
*
* <p>
* You can specify reporters using nested <code><reporter></code> elements, where the <code>type</code>
* attribute must be one of the following:
* </p>
*
* <ul>
* <li> <code>graphic</code> </li>
* <li> <code>file</code> </li>
* <li> <code>junitxml</code> </li>
* <li> <code>html</code> </li>
* <li> <code>stdout</code> </li>
* <li> <code>stderr</code> </li>
* <li> <code>reporterclass</code> </li>
* </ul>
*
* <p>
* Each may include a <code>config</code> attribute to specify the reporter configuration.
* Types <code>file</code>, <code>junitxml</code>, <code>html</code>, and <code>reporterclass</code> require additional attributes
* (the css attribute is optional for the html reporter):
* </p>
*
* <pre>
* <scalatest>
* <reporter type="stdout" config="FAB"/>
* <reporter type="file" filename="test.out"/>
* <reporter type="junitxml" directory="target"/>
* <reporter type="html" directory="target" css="src/main/html/mystylesheet.css"/>
* <reporter type="reporterclass" classname="my.ReporterClass"/>
* </pre>
*
* <p>
* Specify tags to include and/or exclude using <code><tagsToInclude></code> and
* <code><tagsToExclude></code> elements, like this:
* </p>
*
* <pre>
* <scalatest>
* <tagsToInclude>
* CheckinTests
* FunctionalTests
* </tagsToInclude>
*
* <tagsToExclude>
* SlowTests
* NetworkTests
* </tagsToExclude>
* </pre>
*
* <p>
* Tags to include or exclude can also be specified using attributes
* tagsToInclude and tagsToExclude, with arguments specified as whitespace-
* delimited lists.
* </p>
*
* <p>
* To specify suites to run, use either a <code>suite</code> attribute or nested
* <code><suite></code> elements:
* </p>
*
* <pre>
* <scalatest suite="com.artima.serviceuitest.ServiceUITestkit">
* </pre>
*
* <p>
* or
* </p>
*
* <pre>
* <scalatest>
* <suite classname="com.artima.serviceuitest.ServiceUITestkit"/>
* </pre>
*
* <p>
* To specify suites using members-only or wildcard package names, use
* either the <code>membersonly</code> or <code>wildcard</code> attributes, or nested
* <code><membersonly></code> or <code><wildcard></code> elements:
* </p>
*
* <pre>
* <scalatest membersonly="com.artima.serviceuitest">
* </pre>
*
* <p>
* or
* </p>
*
* <pre>
* <scalatest wildcard="com.artima.joker">
* </pre>
*
* <p>
* or
* </p>
*
* <pre>
* <scalatest>
* <membersonly package="com.artima.serviceuitest"/>
* <wildcard package="com.artima.joker"/>
* </pre>
*
* <p>
* Use attribute <code>suffixes="[pipe-delimited list of suffixes]"</code>
* to specify that only classes whose names end in one of the specified suffixes
* should be included in discovery searches for Suites to test. This can
* be used to improve discovery time or to limit the scope of a test. E.g.:
* </p>
*
* <pre>
* <scalatest suffixes="Spec|Suite">
* </pre>
*
* <p>
* Use attribute <code>parallel="true"</code> to specify parallel execution of suites.
* (If the <code>parallel</code> attribute is left out or set to false, suites will be executed sequentially by one thread.)
* When <code>parallel</code> is true, you can include an optional <code>sortSuites</code> attribute to request that events be sorted on-the-fly so that
* events for the same suite are reported together, with a timeout, (<em>e.g.</em>, <code>sortSuites="true"</code>),
* and an optional <code>numthreads</code> attribute to specify the number
* of threads to be created in thread pool (<em>e.g.</em>, <code>numthreads="10"</code>).
* </p>
*
* <p>
* Use attribute <code>haltonfailure="true"</code> to cause ant to fail the
* build if there's a test failure.
* </p>
*
* <p>
* Use attribute <code>fork="true"</code> to cause ant to run the tests in
* a separate process.
* </p>
*
* <p>
* When <code>fork</code> is <code>true</code>, attribute <code>maxmemory</code> may be used to specify
* the maximum memory size that will be passed to the forked jvm. For example, the following setting
* will cause <code>"-Xmx1280M"</code> to be passed to the java command used to
* run the tests.
* </p>
*
* <pre>
* <scalatest maxmemory="1280M">
* </pre>
*
* <p>
* When <code>fork</code> is true, nested <code><jvmarg></code> elements may be used
* to pass additional arguments to the forked jvm.
* For example, if you are running into 'PermGen space' memory errors,
* you could add the following <code>jvmarg</code> to bump up the JVM's <code>MaxPermSize</code> value:
* </p>
*
* <pre>
* <jvmarg value="-XX:MaxPermSize=128m"/>
* </pre>
*
* @author George Berger
*/
class ScalaTestAntTask extends Task {
private var includes: String = ""
private var excludes: String = ""
private var maxMemory: String = null
private var suffixes: String = null
private var parallel = false
private var sortSuites = false
private var haltonfailure = false
private var fork = false
private var spanScaleFactor = 1.0
private var numthreads = 0
private val runpath = new ListBuffer[String]
private val jvmArgs = new ListBuffer[String]
private val suites = new ListBuffer[SuiteElement]
private val membersonlys = new ListBuffer[String]
private val wildcards = new ListBuffer[String]
private val testNGSuites = new ListBuffer[String]
private val chosenStyles = new ListBuffer[String]
private val reporters = new ListBuffer[ReporterElement]
private val properties = new ListBuffer[NameValuePair]
/**
* Executes the task.
*/
override def execute {
val args = new ListBuffer[String]
addSuiteArgs(args)
addReporterArgs(args)
addPropertyArgs(args)
addIncludesArgs(args)
addExcludesArgs(args)
addRunpathArgs(args)
addTestNGSuiteArgs(args)
addParallelArg(args)
addSuffixesArg(args)
addChosenStyles(args)
addSpanScaleFactorArg(args)
val argsArray = args.toArray
val success = if (fork) javaTaskRunner(args.toList)
else Runner.run(argsArray)
if (!success && haltonfailure)
throw new BuildException("ScalaTest run failed.")
}
private def javaTaskRunner(args: List[String]): Boolean = {
val java = new Java
java.bindToOwner(this)
java.init()
java.setFork(true)
java.setClassname("org.scalatest.tools.Runner")
val classLoader = getClass.getClassLoader.asInstanceOf[AntClassLoader]
java.setClasspath(new Path(getProject, classLoader.getClasspath))
if (maxMemory != null) java.createJvmarg.setValue("-Xmx" + maxMemory)
for (jvmArg <- jvmArgs)
java.createJvmarg.setValue(jvmArg)
for (arg <- args)
java.createArg.setValue(arg)
val result = java.executeJava
return (result == 0)
}
//
// Adds '-P runpath' arg pair to args list if a runpath
// element or attribute was specified for task.
//
private def addRunpathArgs(args: ListBuffer[String]) {
if (runpath.size > 0) {
args += "-R"
args += getSpacedOutPathStr(runpath.toList)
}
}
private def addTestNGSuiteArgs(args: ListBuffer[String]) {
if (testNGSuites.size > 0) {
args += "-b"
args += getSpacedOutPathStr(testNGSuites.toList)
}
}
private def addChosenStyles(args: ListBuffer[String]) {
chosenStyles.foreach { style =>
args += "-y"
args += style
}
}
//
// Adds '-C' arg to args list if 'parallel' attribute was
// specified true for task.
//
private def addParallelArg(args: ListBuffer[String]) {
if (parallel) {
args += (if (sortSuites) "-PS" else "-P") + (if (numthreads > 0) ("" + numthreads) else "")
}
}
//
// Add -F arg to args list if spanScaleFactor attribute was
// specified for task
//
private def addSpanScaleFactorArg(args: ListBuffer[String]) {
args += "-F"
args += spanScaleFactor.toString
}
//
// Adds '-q' arg to args list if 'suffixes' attribute was
// specified for task.
//
private def addSuffixesArg(args: ListBuffer[String]) {
if (suffixes != null) {
args += "-q"
args += suffixes
}
}
//
// Adds '-n includes-list' arg pair to args list if a tagsToInclude
// element or attribute was supplied for task.
//
private def addIncludesArgs(args: ListBuffer[String]) {
if ((includes != null) && (includes.trim != "")) {
args += "-n"
args += singleSpace(includes)
}
}
//
// Adds '-l excludes-list' arg pair to args list if a tagsToExclude
// element or attribute was supplied for task.
//
private def addExcludesArgs(args: ListBuffer[String]) {
if ((excludes != null) && (excludes.trim != "")) {
args += "-l"
args += singleSpace(excludes)
}
}
//
// Adds '-Dname=value' argument to args list for each nested
// <property> element supplied for task.
//
private def addPropertyArgs(args: ListBuffer[String]) {
for (pair <- properties)
args += "-D" + pair.getName + "=" + pair.getValue
}
//
// Adds '-s classname' argument to args list for each suite
// specified for task. Adds '-m packagename' for each
// membersonly element specified, and '-w packagename' for
// each wildcard element specified.
//
private def addSuiteArgs(args: ListBuffer[String]) {
for (suite <- suites) {
if (suite == null)
throw new BuildException(
"missing classname attribute for <suite> element")
args += "-s"
args += suite.getClassName
suite.getTestNames.foreach { tn =>
if (tn == null)
throw new BuildException("missing name attribute for <test> element")
args += "-t"
args += tn
}
suite.getNestedSuites.foreach { ns =>
if (ns.getSuiteId == null)
throw new BuildException("missing suiteId attribute for <nested> element")
args += "-i"
args += ns.getSuiteId
ns.getTestNames.foreach { tn =>
if (tn == null)
throw new BuildException("missing name attribute for <test> element")
args += "-t"
args += tn
}
}
}
for (packageName <- membersonlys) {
if (packageName == null)
throw new BuildException(
"missing package attribute for <membersonly> element")
args += "-m"
args += packageName
}
for (packageName <- wildcards) {
if (packageName == null)
throw new BuildException(
"missing package attribute for <wildcard> element")
args += "-w"
args += packageName
}
}
//
// Adds appropriate reporter options to args list for each
// nested reporter element specified for task. Defaults to
// stdout if no reporter specified.
//
private def addReporterArgs(args: ListBuffer[String]) {
if (reporters.size == 0)
args += "-o"
for (reporter <- reporters) {
reporter.getType match {
case "stdout" => addReporterOption(args, reporter, "-o")
case "stderr" => addReporterOption(args, reporter, "-e")
case "graphic" => addReporterOption(args, reporter, "-g")
case "file" => addFileReporter(args, reporter)
case "xml" => addXmlReporter(args, reporter)
case "junitxml" => addJunitXmlReporter(args, reporter)
case "dashboard" => addDashboardReporter(args, reporter)
case "html" => addHtmlReporter(args, reporter)
case "reporterclass" => addReporterClass(args, reporter)
case t =>
throw new BuildException("unexpected reporter type [" + t + "]")
}
}
}
//
// Adds specified option to args for reporter. Appends reporter
// config string to option if specified, e.g. "-eFAB".
//
private def addReporterOption(args: ListBuffer[String],
reporter: ReporterElement,
option: String)
{
val config = reporter.getConfig
if (config == null) args += option
else args += option + config
}
//
// Adds '-f' file reporter option to args. Appends reporter
// config string to option if specified. Adds reporter's
// filename as additional argument, e.g. "-fFAB", "filename".
//
private def addFileReporter(args: ListBuffer[String],
reporter: ReporterElement)
{
addReporterOption(args, reporter, "-f")
if (reporter.getFilename == null)
throw new BuildException(
"reporter type 'file' requires 'filename' attribute")
args += reporter.getFilename
}
//
// Adds '-x' xml reporter option to args. Adds reporter's
// directory as additional argument, e.g. "-x", "directory".
// [disabled for now]
//
private def addXmlReporter(args: ListBuffer[String],
reporter: ReporterElement)
{
addReporterOption(args, reporter, "-x")
if (reporter.getDirectory == null)
throw new BuildException(
"reporter type 'xml' requires 'directory' attribute")
args += reporter.getDirectory
}
//
// Adds '-u' junit xml reporter option to args. Adds reporter's
// directory as additional argument, e.g. "-u", "directory".
//
private def addJunitXmlReporter(args: ListBuffer[String],
reporter: ReporterElement)
{
addReporterOption(args, reporter, "-u")
if (reporter.getDirectory == null)
throw new BuildException(
"reporter type 'junitxml' requires 'directory' attribute")
args += reporter.getDirectory
}
//
// Adds '-d' Dashboard reporter option to args. Adds reporter's
// directory as additional argument, e.g. "-d", "directory".
//
private def addDashboardReporter(args: ListBuffer[String],
reporter: ReporterElement)
{
addReporterOption(args, reporter, "-d")
if (reporter.getDirectory == null)
throw new BuildException(
"reporter type 'dashboard' requires 'directory' attribute")
args += reporter.getDirectory
if (reporter.getNumfiles >= 0) {
args += "-a"
args += reporter.getNumfiles.toString
}
}
//
// Adds '-h' html reporter option to args. Appends reporter
// config string to option if specified. Adds reporter's
// filename as additional argument, e.g. "-hFAB", "filename".
//
private def addHtmlReporter(args: ListBuffer[String],
reporter: ReporterElement)
{
addReporterOption(args, reporter, "-h")
if (reporter.getDirectory == null)
throw new BuildException(
"reporter type 'html' requires 'directory' attribute")
args += reporter.getDirectory
if (reporter.getCss != null) {
args += "-Y"
args += reporter.getCss
}
}
//
// Adds '-R' reporter class option to args. Appends
// reporter config string to option if specified. Adds
// reporter's classname as additional argument, e.g. "-RFAB",
// "my.ReporterClass".
//
private def addReporterClass(args: ListBuffer[String],
reporter: ReporterElement)
{
addReporterOption(args, reporter, "-C")
if (reporter.getClassName == null)
throw new BuildException(
"reporter type 'reporterclass' requires 'classname' attribute")
args += reporter.getClassName
}
/**
* Sets value of the <code>runpath</code> attribute.
*/
def setRunpath(runpath: Path) {
for (element <- runpath.list) {
this.runpath += element
}
}
/**
* Sets value of the <code>tagsToExclude</code> attribute.
*/
def setTagsToExclude(tagsToExclude: String) {
this.excludes += " " + tagsToExclude
}
/**
* Sets value of the <code>tagsToInclude</code> attribute.
*/
def setTagsToInclude(tagsToInclude: String) {
this.includes += " " + tagsToInclude
}
/**
* Sets value of the <code>haltonfailure</code> attribute.
*/
def setHaltonfailure(haltonfailure: Boolean) {
this.haltonfailure = haltonfailure
}
/**
* Sets value of the <code>fork</code> attribute.
*/
def setFork(fork: Boolean) {
this.fork = fork
}
/**
* Sets value of the <code>suffixes</code> attribute.
*/
def setSuffixes(suffixes: String) {
this.suffixes = suffixes
}
/**
* Sets value of the <code>maxmemory</code> attribute.
*/
def setMaxmemory(max: String) {
this.maxMemory = max
}
/**
* Sets value of the <code>testngsuites</code> attribute.
*/
def setTestNGSuites(testNGSuitePath: Path) {
for (element <- testNGSuitePath.list)
this.testNGSuites += element
}
/**
* Sets value of the <code>concurrent</code> attribute.
* <b>Note: The <code>concurrent</code> attribute has been deprecated and will be removed in a future version of ScalaTest.
* Please use the <code>parallel</code> attribute instead.</b>
*/
@deprecated("Please use parallel instead")
def setConcurrent(concurrent: Boolean) {
Console.err.println("WARNING: 'concurrent' attribute is deprecated " +
"- please use 'parallel' instead")
this.parallel = concurrent
}
/**
* Sets value of the <code>numthreads</code> attribute.
*/
def setNumthreads(numthreads: Int) {
this.numthreads = numthreads
}
/**
* Sets value of the <code>parallel</code> attribute.
*/
def setParallel(parallel: Boolean) {
this.parallel = parallel
}
/**
* Sets value of the <code>sortSuites</code> attribute.
*/
def setSortSuites(sortSuites: Boolean) {
this.sortSuites = sortSuites
}
/**
* Sets value of the <code>spanScaleFactor</code> attribute.
*/
def setSpanScaleFactor(spanScaleFactor: Double) {
this.spanScaleFactor = spanScaleFactor
}
/**
* Sets value from nested element <code>runpath</code>.
*/
def addConfiguredRunpath(runpath: Path) {
for (element <- runpath.list)
this.runpath += element
}
/**
* Sets value from nested element <code>testngsuites</code>.
*/
def addConfiguredTestNGSuites(testNGSuitePath: Path) {
for (element <- testNGSuitePath.list)
this.testNGSuites += element
}
/**
* Sets value from nested element <code>runpathurl</code>.
*/
def addConfiguredRunpathUrl(runpathurl: RunpathUrl) {
runpath += runpathurl.getUrl
}
/**
* Sets value from nested element <code>jvmarg</code>.
*/
def addConfiguredJvmArg(arg: JvmArg) {
jvmArgs += arg.getValue
}
/**
* Sets values from nested element <code>property</code>.
* <b>The <code>property</code> attribute has been deprecated and will be removed in a future version of ScalaTest.
* Please use the <code>config</code> attribute instead.</b>
*/
@deprecated("Please use config instead")
def addConfiguredProperty(property: NameValuePair) {
Console.err.println("WARNING: <property> is deprecated - " +
"please use <config> instead [name: " +
property.getName + "]")
properties += property
}
/**
* Sets values from nested element <code>config</code>.
*/
def addConfiguredConfig(config: NameValuePair) {
properties += config
}
/**
* Sets value of <code>suite</code> attribute.
*/
def setSuite(suite: SuiteElement) {
suites += suite
}
/**
* Sets value of <code>membersonly</code> attribute.
*/
def setMembersonly(packageName: String) {
membersonlys += packageName
}
/**
* Sets value of <code>wildcard</code> attribute.
*/
def setWildcard(packageName: String) {
wildcards += packageName
}
/**
* Sets value of <code>style</code> attribute.
*/
def setStyle(style: String) {
chosenStyles += style
}
/**
* Sets value from nested element <code>suite</code>.
*/
def addConfiguredSuite(suite: SuiteElement) {
suites += suite
}
/**
* Sets value from nested element <code>membersonly</code>.
*/
def addConfiguredMembersOnly(membersonly: PackageElement) {
membersonlys += membersonly.getPackage
}
/**
* Sets value from nested element <code>wildcard</code>.
*/
def addConfiguredWildcard(wildcard: PackageElement) {
wildcards += wildcard.getPackage
}
/**
* Sets value from nested element <code>reporter</code>.
*/
def addConfiguredReporter(reporter: ReporterElement) {
reporters += reporter
}
/**
* Sets value from nested element <code>tagsToInclude</code>.
*/
def addConfiguredTagsToInclude(tagsToInclude: TextElement) {
this.includes += " " + tagsToInclude.getText
}
def addConfiguredStyle(style: StyleElement) {
this.chosenStyles += style.getName
}
/**
* Sets value from nested element <code>includes</code>.
* <b>The <code>includes</code> attribute has been deprecated and will be removed in a future version of ScalaTest.
* Please use the <code>tagsToInclude</code> attribute instead.</b>
*/
@deprecated("Please use tagsToInclude instead")
def addConfiguredIncludes(includes: TextElement) {
Console.err.println("WARNING: 'includes' is deprecated - " +
"use 'tagsToInclude' instead [includes: " +
includes.getText + "]")
addConfiguredTagsToInclude(includes)
}
/**
* Sets value from nested element <code>tagsToExclude</code>.
*/
def addConfiguredTagsToExclude(tagsToExclude: TextElement) {
this.excludes += " " + tagsToExclude.getText
}
/**
* Sets value from nested element <code>excludes</code>.
* <b>The <code>excludes</code> attribute has been deprecated and will be removed in a future version of ScalaTest.
* Please use the <code>tagsToExclude</code> attribute instead.</b>
*/
@deprecated("Please use tagsToExclude instead")
def addConfiguredExcludes(excludes: TextElement) {
Console.err.println("WARNING: 'excludes' is deprecated - " +
"use 'tagsToExclude' instead [excludes: " +
excludes.getText + "]")
addConfiguredTagsToExclude(excludes)
}
//
// Translates a list of strings making up a path into a
// single space-delimited string. Uses backslashes to escape
// spaces within individual path elements, since that's what
// Runner's -p option expects.
//
private def getSpacedOutPathStr(path: List[String]): String = {
path.map(_.replaceAll(" ", """\\\\ """)).mkString("", " ", "")
}
//
// Translates a whitespace-delimited string into a
// whitespace-delimited string, but not the same whitespace. Trims
// off leading and trailing whitespace and converts inter-element
// whitespace to a single space.
//
private def singleSpace(str: String): String = {
str.trim.replaceAll("\\\\s+", " ")
}
}
//
// Class to hold data from <style> elements.
//
private class StyleElement {
private var name: String = null
def setName(name: String) {
this.name = name
}
def getName = name
}
//
// Class to hold data from <membersonly> and <wildcard> elements.
//
private class PackageElement {
private var packageName: String = null
def setPackage(packageName: String) {
this.packageName = packageName
}
def getPackage = packageName
}
//
// Class to hold data from <suite> elements.
//
private class SuiteElement {
private var className: String = null
private val testNamesBuffer = new ListBuffer[String]()
private val nestedSuitesBuffer = new ListBuffer[NestedSuiteElement]()
def setClassName(className: String) {
this.className = className
}
def addConfiguredTest(test: TestElement) {
testNamesBuffer += test.getName
}
def addConfiguredNested(nestedSuite: NestedSuiteElement) {
nestedSuitesBuffer += nestedSuite
}
def getClassName = className
def getTestNames = testNamesBuffer.toArray
def getNestedSuites = nestedSuitesBuffer.toArray
}
private class TestElement {
private var name: String = null
def setName(name: String) {
this.name = name
}
def getName = name
}
private class NestedSuiteElement {
private var suiteId: String = null
private val testNamesBuffer = new ListBuffer[String]()
def setSuiteId(suiteId: String) {
this.suiteId = suiteId
}
def addConfiguredTest(test: TestElement) {
testNamesBuffer += test.getName
}
def getSuiteId = suiteId
def getTestNames = testNamesBuffer.toArray
}
//
// Class to hold data from tagsToInclude and tagsToExclude elements.
//
private class TextElement {
private var text: String = null
def addText(text: String) {
this.text = text
}
def getText = text
}
//
// Class to hold data from <property> elements.
//
private class NameValuePair {
private var name : String = null
private var value : String = null
def setName(name : String) { this.name = name }
def setValue(value : String) { this.value = value }
def getName = name
def getValue = value
}
//
// Class to hold data from <runpathurl> elements.
//
private class RunpathUrl {
private var url: String = null
def setUrl(url: String) { this.url = url }
def getUrl = url
}
//
// Class to hold data from <jvmarg> elements.
//
private class JvmArg {
private var value: String = null
def setValue(value: String) { this.value = value }
def getValue = value
}
//
// Class to hold data from <reporter> elements.
//
private class ReporterElement {
private var rtype : String = null
private var config : String = null
private var filename : String = null
private var directory : String = null
private var classname : String = null
private var numfiles : Int = -1
private var css : String = null
def setType(rtype : String) { this.rtype = rtype }
def setConfig(config : String) { this.config = config }
def setFilename(filename : String) { this.filename = filename }
def setDirectory(directory : String) { this.directory = directory }
def setClassName(classname : String) { this.classname = classname }
def setNumfiles(numfiles : Int) { this.numfiles = numfiles }
def setCss(css: String) { this.css = css }
def getType = rtype
def getConfig = config
def getFilename = filename
def getDirectory = directory
def getClassName = classname
def getNumfiles = numfiles
def getCss = css
}
/*
* <li> <code>dashboard</code> </li>
* Types <code>file</code>, <code>junitxml</code>, <code>dashboard</code>, and <code>reporterclass</code> require additional attributes
* <reporter type="dashboard" directory="target"/>
* <p>
* For reporter type 'dashboard', an optional <code>numfiles</code> attribute may be
* included to specify the number of old summary and duration files to be archived.
* Default is 2.
* </p>
*
*/
| svn2github/scalatest | src/main/scala/org/scalatest/tools/ScalaTestAntTask.scala | Scala | apache-2.0 | 29,674 |
package java.util
import scala.collection.mutable
class LinkedHashMap[K, V] private (inner: mutable.LinkedHashMap[Box[K], V],
accessOrder: Boolean) extends HashMap[K, V](inner) {
self =>
def this() =
this(mutable.LinkedHashMap.empty[Box[K], V], false)
def this(initialCapacity: Int, loadFactor: Float, accessOrder: Boolean) = {
this(mutable.LinkedHashMap.empty[Box[K], V], accessOrder)
if (initialCapacity < 0)
throw new IllegalArgumentException("initialCapacity < 0")
else if (loadFactor < 0.0)
throw new IllegalArgumentException("loadFactor <= 0.0")
}
def this(initialCapacity: Int, loadFactor: Float) =
this(initialCapacity, loadFactor, false)
def this(initialCapacity: Int) =
this(initialCapacity, LinkedHashMap.DEFAULT_LOAD_FACTOR)
def this(m: Map[_ <: K, _ <: V]) = {
this()
putAll(m)
}
override def get(key: scala.Any): V = {
val value = super.get(key)
if (accessOrder) {
val boxedKey = Box(key.asInstanceOf[K])
if (value != null || containsKey(boxedKey)) {
inner.remove(boxedKey)
inner(boxedKey) = value
}
}
value
}
override def put(key: K, value: V): V = {
val oldValue = super.put(key, value)
val iter = entrySet().iterator()
if (iter.hasNext && removeEldestEntry(iter.next()))
iter.remove()
oldValue
}
protected def removeEldestEntry(eldest: Map.Entry[K, V]): Boolean = false
override def clone(): AnyRef = {
new LinkedHashMap(inner.clone(), accessOrder)
}
}
object LinkedHashMap {
private[LinkedHashMap] final val DEFAULT_INITIAL_CAPACITY = 16
private[LinkedHashMap] final val DEFAULT_LOAD_FACTOR = 0.75f
}
| jmnarloch/scala-js | javalib/src/main/scala/java/util/LinkedHashMap.scala | Scala | bsd-3-clause | 1,691 |
import org.specs2.matcher.ParserMatchers
import org.specs2.mutable.Specification
import space.armada.vex.parsers.CalculatorParser
class CalculatorParserSpec extends Specification with ParserMatchers {
val parsers = CalculatorParser
"CalculatorParser.apply" should {
"calculate the addition of two integers" >> { parsers("2 + 3") must beEqualTo(5) }
"calculate the subtraction of two integers resulting in a positive value" >> {
parsers("9 - 3") must beEqualTo(6)
}
"calculate the subtraction of two integers resulting in a negative value" >> {
parsers("3 - 9") must beEqualTo(-6)
}
}
"CalculatorParser.number" should {
"recognize single-digit positive numbers" >> { parsers.number must succeedOn("5") }
"recognize multi-digit positive numbers" >> { parsers.number must succeedOn("42") }
// "recognize negative numbers" >> { parsers.number must succeedOn("-9") }
}
}
| ripta/vex | src/test/scala/space/armada/vex/parsers/calculator_parser_spec.scala | Scala | mit | 915 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package org.apache.eagle.stream.application.scheduler
import akka.actor.{Actor, ActorLogging}
import com.typesafe.config.Config
import org.apache.eagle.service.application.entity.TopologyOperationEntity.OPERATION_STATUS
import org.apache.eagle.stream.application.ApplicationSchedulerAsyncDAO
import scala.collection.JavaConversions
import scala.util.{Failure, Success}
private[scheduler] class AppCommandLoader extends Actor with ActorLogging {
@volatile var _config: Config = null
@volatile var _dao: ApplicationSchedulerAsyncDAO = null
import context.dispatcher
override def receive = {
case InitializationEvent(config: Config) =>
_config = config
_dao = new ApplicationSchedulerAsyncDAO(config, context.dispatcher)
case ClearPendingOperation =>
if(_dao == null) _dao = new ApplicationSchedulerAsyncDAO(_config, context.dispatcher)
_dao.clearPendingOperations()
case CommandLoaderEvent => {
val _sender = sender()
_dao.readOperationsByStatus(OPERATION_STATUS.INITIALIZED) onComplete {
case Success(commands) => {
log.info(s"Load ${commands.size()} new commands")
JavaConversions.collectionAsScalaIterable(commands) foreach { command =>
command.setStatus(OPERATION_STATUS.PENDING)
_dao.updateOperationStatus(command) onComplete {
case Success(response) =>
_dao.loadTopologyExecutionByName(command.getSite, command.getApplication, command.getTopology) onComplete {
case Success(topologyExecution) => {
_sender ! SchedulerCommand(topologyExecution, command)
}
case Failure(ex) =>
log.error(ex.getMessage)
command.setMessage(ex.getMessage)
command.setStatus(OPERATION_STATUS.FAILED)
_dao.updateOperationStatus(command)
}
case Failure(ex) =>
log.error(s"Got an exception to update command status $command: ${ex.getMessage}")
command.setMessage(ex.getMessage)
command.setStatus(OPERATION_STATUS.FAILED)
_dao.updateOperationStatus(command)
}
}
}
case Failure(ex) =>
log.error(s"Failed to get commands due to exception ${ex.getMessage}")
}
}
case TerminatedEvent =>
context.stop(self)
case m@_ => throw new UnsupportedOperationException(s"Event is not supported $m")
}
}
| rlugojr/incubator-eagle | eagle-core/eagle-application-management/eagle-stream-application-manager/src/main/scala/org/apache/eagle/stream/application/scheduler/AppCommandLoader.scala | Scala | apache-2.0 | 3,342 |
package io.aos.ebnf.spl.driver.es
import org.elasticsearch.search.facet.FacetBuilder
import org.elasticsearch.common.xcontent.XContentBuilder
import org.elasticsearch.common.xcontent.ToXContent.Params
import org.elasticsearch.search.builder.SearchSourceBuilderException
class DistinctDateHistogramFacetBuilder(name: String) extends FacetBuilder(name) {
final val FacetType = "distinct_date_histogram"
private var keyFieldName: String = null
private var valueFieldName: String = null
private var interval: String = null
def keyField(keyField: String): DistinctDateHistogramFacetBuilder = {
this.keyFieldName = keyField
return this
}
def valueField(valueField: String): DistinctDateHistogramFacetBuilder = {
this.valueFieldName = valueField
return this
}
def interval(interval: String): DistinctDateHistogramFacetBuilder = {
this.interval = interval
return this
}
def toXContent(builder: XContentBuilder, params: Params): XContentBuilder = {
if (keyFieldName == null || valueFieldName == null) {
throw new SearchSourceBuilderException("field name and value must be set on date histogram facet for facet [" + name + "]")
}
if (interval == null) {
throw new SearchSourceBuilderException("interval must be set on date histogram facet for facet [" + name + "]")
}
builder.startObject(name)
builder.startObject(FacetType)
builder.field("key_field", keyFieldName)
builder.field("value_field", valueFieldName)
builder.field("interval", interval)
// if (comparatorType != null) {
// builder.field("comparator", comparatorType.description)
// }
builder.endObject
addFilterFacetAndGlobal(builder, params)
builder.endObject
return builder
}
}
| echalkpad/t4f-data | parser/ebnf/src/main/scala/io/aos/ebnf/spl/driver/es/DistinctDateHistogramFacetBuilder.scala | Scala | apache-2.0 | 1,778 |
package scalariform.lexer
abstract sealed class HiddenToken(val token: Token) {
lazy val newlineful = token.text contains '\\n'
def text = token.text
def rawText = token.rawText
}
case class Whitespace(override val token: Token) extends HiddenToken(token)
sealed abstract class Comment(token: Token) extends HiddenToken(token)
object Comment {
def unapply(comment: Comment) = Some(comment.token)
}
case class SingleLineComment(override val token: Token) extends Comment(token)
case class MultiLineComment(override val token: Token) extends Comment(token)
case class ScalaDocComment(override val token: Token) extends Comment(token)
| tkawachi/scalariform | scalariform/src/main/scala/scalariform/lexer/HiddenToken.scala | Scala | mit | 652 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.benchmark
import java.util.HashMap
import org.apache.spark.SparkConf
import org.apache.spark.benchmark.Benchmark
import org.apache.spark.internal.config._
import org.apache.spark.memory.{TaskMemoryManager, UnifiedMemoryManager}
import org.apache.spark.sql.catalyst.expressions.UnsafeRow
import org.apache.spark.sql.execution.joins.LongToUnsafeRowMap
import org.apache.spark.sql.execution.vectorized.AggregateHashMap
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{LongType, StructType}
import org.apache.spark.unsafe.Platform
import org.apache.spark.unsafe.hash.Murmur3_x86_32
import org.apache.spark.unsafe.map.BytesToBytesMap
/**
* Benchmark to measure performance for aggregate primitives.
* To run this benchmark:
* {{{
* 1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
* 2. build/sbt "sql/test:runMain <this class>"
* 3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this class>"
* Results will be written to "benchmarks/AggregateBenchmark-results.txt".
* }}}
*/
object AggregateBenchmark extends SqlBasedBenchmark {
override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
runBenchmark("aggregate without grouping") {
val N = 500L << 22
codegenBenchmark("agg w/o group", N) {
spark.range(N).selectExpr("sum(id)").collect()
}
}
runBenchmark("stat functions") {
val N = 100L << 20
codegenBenchmark("stddev", N) {
spark.range(N).groupBy().agg("id" -> "stddev").collect()
}
codegenBenchmark("kurtosis", N) {
spark.range(N).groupBy().agg("id" -> "kurtosis").collect()
}
}
runBenchmark("aggregate with linear keys") {
val N = 20 << 22
val benchmark = new Benchmark("Aggregate w keys", N, output = output)
def f(): Unit = {
spark.range(N).selectExpr("(id & 65535) as k").groupBy("k").sum().collect()
}
benchmark.addCase("codegen = F", numIters = 2) { _ =>
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
f()
}
}
benchmark.addCase("codegen = T hashmap = F", numIters = 3) { _ =>
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true",
SQLConf.ENABLE_TWOLEVEL_AGG_MAP.key -> "false",
"spark.sql.codegen.aggregate.map.vectorized.enable" -> "false") {
f()
}
}
benchmark.addCase("codegen = T hashmap = T", numIters = 5) { _ =>
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true",
SQLConf.ENABLE_TWOLEVEL_AGG_MAP.key -> "true",
"spark.sql.codegen.aggregate.map.vectorized.enable" -> "true") {
f()
}
}
benchmark.run()
}
runBenchmark("aggregate with randomized keys") {
val N = 20 << 22
val benchmark = new Benchmark("Aggregate w keys", N, output = output)
spark.range(N).selectExpr("id", "floor(rand() * 10000) as k")
.createOrReplaceTempView("test")
def f(): Unit = spark.sql("select k, k, sum(id) from test group by k, k").collect()
benchmark.addCase("codegen = F", numIters = 2) { _ =>
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
f()
}
}
benchmark.addCase("codegen = T hashmap = F", numIters = 3) { _ =>
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true",
SQLConf.ENABLE_TWOLEVEL_AGG_MAP.key -> "false",
"spark.sql.codegen.aggregate.map.vectorized.enable" -> "false") {
f()
}
}
benchmark.addCase("codegen = T hashmap = T", numIters = 5) { _ =>
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true",
SQLConf.ENABLE_TWOLEVEL_AGG_MAP.key -> "true",
"spark.sql.codegen.aggregate.map.vectorized.enable" -> "true") {
f()
}
}
benchmark.run()
}
runBenchmark("aggregate with string key") {
val N = 20 << 20
val benchmark = new Benchmark("Aggregate w string key", N, output = output)
def f(): Unit = spark.range(N).selectExpr("id", "cast(id & 1023 as string) as k")
.groupBy("k").count().collect()
benchmark.addCase("codegen = F", numIters = 2) { _ =>
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
f()
}
}
benchmark.addCase("codegen = T hashmap = F", numIters = 3) { _ =>
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true",
SQLConf.ENABLE_TWOLEVEL_AGG_MAP.key -> "false",
"spark.sql.codegen.aggregate.map.vectorized.enable" -> "false") {
f()
}
}
benchmark.addCase("codegen = T hashmap = T", numIters = 5) { _ =>
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true",
SQLConf.ENABLE_TWOLEVEL_AGG_MAP.key -> "true",
"spark.sql.codegen.aggregate.map.vectorized.enable" -> "true") {
f()
}
}
benchmark.run()
}
runBenchmark("aggregate with decimal key") {
val N = 20 << 20
val benchmark = new Benchmark("Aggregate w decimal key", N, output = output)
def f(): Unit = spark.range(N).selectExpr("id", "cast(id & 65535 as decimal) as k")
.groupBy("k").count().collect()
benchmark.addCase("codegen = F") { _ =>
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
f()
}
}
benchmark.addCase("codegen = T hashmap = F") { _ =>
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true",
SQLConf.ENABLE_TWOLEVEL_AGG_MAP.key -> "false",
"spark.sql.codegen.aggregate.map.vectorized.enable" -> "false") {
f()
}
}
benchmark.addCase("codegen = T hashmap = T") { _ =>
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true",
SQLConf.ENABLE_TWOLEVEL_AGG_MAP.key -> "true",
"spark.sql.codegen.aggregate.map.vectorized.enable" -> "true") {
f()
}
}
benchmark.run()
}
runBenchmark("aggregate with multiple key types") {
val N = 20 << 20
val benchmark = new Benchmark("Aggregate w multiple keys", N, output = output)
def f(): Unit = spark.range(N)
.selectExpr(
"id",
"(id & 1023) as k1",
"cast(id & 1023 as string) as k2",
"cast(id & 1023 as int) as k3",
"cast(id & 1023 as double) as k4",
"cast(id & 1023 as float) as k5",
"id > 1023 as k6")
.groupBy("k1", "k2", "k3", "k4", "k5", "k6")
.sum()
.collect()
benchmark.addCase("codegen = F") { _ =>
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
f()
}
}
benchmark.addCase("codegen = T hashmap = F") { _ =>
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true",
SQLConf.ENABLE_TWOLEVEL_AGG_MAP.key -> "false",
"spark.sql.codegen.aggregate.map.vectorized.enable" -> "false") {
f()
}
}
benchmark.addCase("codegen = T hashmap = T") { _ =>
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true",
SQLConf.ENABLE_TWOLEVEL_AGG_MAP.key -> "true",
"spark.sql.codegen.aggregate.map.vectorized.enable" -> "true") {
f()
}
}
benchmark.run()
}
runBenchmark("max function bytecode size of wholestagecodegen") {
val N = 20 << 15
val benchmark = new Benchmark("max function bytecode size", N, output = output)
def f(): Unit = spark.range(N)
.selectExpr(
"id",
"(id & 1023) as k1",
"cast(id & 1023 as double) as k2",
"cast(id & 1023 as int) as k3",
"case when id > 100 and id <= 200 then 1 else 0 end as v1",
"case when id > 200 and id <= 300 then 1 else 0 end as v2",
"case when id > 300 and id <= 400 then 1 else 0 end as v3",
"case when id > 400 and id <= 500 then 1 else 0 end as v4",
"case when id > 500 and id <= 600 then 1 else 0 end as v5",
"case when id > 600 and id <= 700 then 1 else 0 end as v6",
"case when id > 700 and id <= 800 then 1 else 0 end as v7",
"case when id > 800 and id <= 900 then 1 else 0 end as v8",
"case when id > 900 and id <= 1000 then 1 else 0 end as v9",
"case when id > 1000 and id <= 1100 then 1 else 0 end as v10",
"case when id > 1100 and id <= 1200 then 1 else 0 end as v11",
"case when id > 1200 and id <= 1300 then 1 else 0 end as v12",
"case when id > 1300 and id <= 1400 then 1 else 0 end as v13",
"case when id > 1400 and id <= 1500 then 1 else 0 end as v14",
"case when id > 1500 and id <= 1600 then 1 else 0 end as v15",
"case when id > 1600 and id <= 1700 then 1 else 0 end as v16",
"case when id > 1700 and id <= 1800 then 1 else 0 end as v17",
"case when id > 1800 and id <= 1900 then 1 else 0 end as v18")
.groupBy("k1", "k2", "k3")
.sum()
.collect()
benchmark.addCase("codegen = F") { _ =>
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "false") {
f()
}
}
benchmark.addCase("codegen = T hugeMethodLimit = 10000") { _ =>
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true",
SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.key -> "10000") {
f()
}
}
benchmark.addCase("codegen = T hugeMethodLimit = 1500") { _ =>
withSQLConf(
SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true",
SQLConf.WHOLESTAGE_HUGE_METHOD_LIMIT.key -> "1500") {
f()
}
}
benchmark.run()
}
runBenchmark("cube") {
val N = 5 << 20
codegenBenchmark("cube", N) {
spark.range(N).selectExpr("id", "id % 1000 as k1", "id & 256 as k2")
.cube("k1", "k2").sum("id").collect()
}
}
runBenchmark("hash and BytesToBytesMap") {
val N = 20 << 20
val benchmark = new Benchmark("BytesToBytesMap", N, output = output)
benchmark.addCase("UnsafeRowhash") { _ =>
var i = 0
val keyBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
var s = 0
while (i < N) {
key.setInt(0, i % 1000)
val h = Murmur3_x86_32.hashUnsafeWords(
key.getBaseObject, key.getBaseOffset, key.getSizeInBytes, 42)
s += h
i += 1
}
}
benchmark.addCase("murmur3 hash") { _ =>
var i = 0
val keyBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
var p = 524283
var s = 0
while (i < N) {
var h = Murmur3_x86_32.hashLong(i, 42)
key.setInt(0, h)
s += h
i += 1
}
}
benchmark.addCase("fast hash") { _ =>
var i = 0
val keyBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
var p = 524283
var s = 0
while (i < N) {
var h = i % p
if (h < 0) {
h += p
}
key.setInt(0, h)
s += h
i += 1
}
}
benchmark.addCase("arrayEqual") { _ =>
var i = 0
val keyBytes = new Array[Byte](16)
val valueBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
val value = new UnsafeRow(1)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
value.setInt(0, 555)
var s = 0
while (i < N) {
key.setInt(0, i % 1000)
if (key.equals(value)) {
s += 1
}
i += 1
}
}
benchmark.addCase("Java HashMap (Long)") { _ =>
var i = 0
val keyBytes = new Array[Byte](16)
val valueBytes = new Array[Byte](16)
val value = new UnsafeRow(1)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
value.setInt(0, 555)
val map = new HashMap[Long, UnsafeRow]()
while (i < 65536) {
value.setInt(0, i)
map.put(i.toLong, value)
i += 1
}
var s = 0
i = 0
while (i < N) {
if (map.get(i % 100000) != null) {
s += 1
}
i += 1
}
}
benchmark.addCase("Java HashMap (two ints) ") { _ =>
var i = 0
val valueBytes = new Array[Byte](16)
val value = new UnsafeRow(1)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
value.setInt(0, 555)
val map = new HashMap[Long, UnsafeRow]()
while (i < 65536) {
value.setInt(0, i)
val key = (i.toLong << 32) + Integer.rotateRight(i, 15)
map.put(key, value)
i += 1
}
var s = 0
i = 0
while (i < N) {
val key = ((i & 100000).toLong << 32) + Integer.rotateRight(i & 100000, 15)
if (map.get(key) != null) {
s += 1
}
i += 1
}
}
benchmark.addCase("Java HashMap (UnsafeRow)") { _ =>
var i = 0
val keyBytes = new Array[Byte](16)
val valueBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
val value = new UnsafeRow(1)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
value.setInt(0, 555)
val map = new HashMap[UnsafeRow, UnsafeRow]()
while (i < 65536) {
key.setInt(0, i)
value.setInt(0, i)
map.put(key, value.copy())
i += 1
}
var s = 0
i = 0
while (i < N) {
key.setInt(0, i % 100000)
if (map.get(key) != null) {
s += 1
}
i += 1
}
}
Seq(false, true).foreach { optimized =>
benchmark.addCase(s"LongToUnsafeRowMap (opt=$optimized)") { _ =>
var i = 0
val valueBytes = new Array[Byte](16)
val value = new UnsafeRow(1)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
value.setInt(0, 555)
val taskMemoryManager = new TaskMemoryManager(
new UnifiedMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"),
Long.MaxValue,
Long.MaxValue / 2,
1),
0)
val map = new LongToUnsafeRowMap(taskMemoryManager, 64)
while (i < 65536) {
value.setInt(0, i)
val key = i % 100000
map.append(key, value)
i += 1
}
if (optimized) {
map.optimize()
}
var s = 0
i = 0
while (i < N) {
val key = i % 100000
if (map.getValue(key, value) != null) {
s += 1
}
i += 1
}
}
}
Seq("off", "on").foreach { heap =>
benchmark.addCase(s"BytesToBytesMap ($heap Heap)") { _ =>
val taskMemoryManager = new TaskMemoryManager(
new UnifiedMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, s"${heap == "off"}")
.set(MEMORY_OFFHEAP_SIZE.key, "102400000"),
Long.MaxValue,
Long.MaxValue / 2,
1),
0)
val map = new BytesToBytesMap(taskMemoryManager, 1024, 64L << 20)
val keyBytes = new Array[Byte](16)
val valueBytes = new Array[Byte](16)
val key = new UnsafeRow(1)
key.pointTo(keyBytes, Platform.BYTE_ARRAY_OFFSET, 16)
val value = new UnsafeRow(1)
value.pointTo(valueBytes, Platform.BYTE_ARRAY_OFFSET, 16)
var i = 0
val numKeys = 65536
while (i < numKeys) {
key.setInt(0, i % 65536)
val loc = map.lookup(key.getBaseObject, key.getBaseOffset, key.getSizeInBytes,
Murmur3_x86_32.hashLong(i % 65536, 42))
if (!loc.isDefined) {
loc.append(key.getBaseObject, key.getBaseOffset, key.getSizeInBytes,
value.getBaseObject, value.getBaseOffset, value.getSizeInBytes)
}
i += 1
}
i = 0
var s = 0
while (i < N) {
key.setInt(0, i % 100000)
val loc = map.lookup(key.getBaseObject, key.getBaseOffset, key.getSizeInBytes,
Murmur3_x86_32.hashLong(i % 100000, 42))
if (loc.isDefined) {
s += 1
}
i += 1
}
}
}
benchmark.addCase("Aggregate HashMap") { _ =>
var i = 0
val numKeys = 65536
val schema = new StructType()
.add("key", LongType)
.add("value", LongType)
val map = new AggregateHashMap(schema)
while (i < numKeys) {
val row = map.findOrInsert(i.toLong)
row.setLong(1, row.getLong(1) + 1)
i += 1
}
var s = 0
i = 0
while (i < N) {
if (map.find(i % 100000) != -1) {
s += 1
}
i += 1
}
}
benchmark.run()
}
}
}
| WindCanDie/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/AggregateBenchmark.scala | Scala | apache-2.0 | 18,539 |
package mimir.parser
import fastparse._, NoWhitespace._
import sparsity.parser._
object MimirKeyword
{
def any[_:P] = P(
Elements.anyKeyword | (
StringInIgnoreCase(
"ADAPTIVE",
"ANALYZE",
"ASSIGNMENTS",
"COMPARE",
"DRAW",
"FEATURES",
"FEEDBACK",
"INTO",
"LENS",
"LINK",
"LOAD",
"OF",
"PLOT",
"PROVENANCE",
"DEPENDENCY",
"RELOAD",
"SCHEMA",
"STAGING",
"USING",
"SAMPLE",
"FRACTION",
"STRATIFIED"
).! ~ !CharIn("a-zA-Z0-9_")
)
)
def apply[_:P](expected: String*) = P(
any.filter { kw => expected.exists { _.equalsIgnoreCase(kw) } }
.map { _ => () }
)
} | UBOdin/mimir | src/main/scala/mimir/parser/MimirKeyword.scala | Scala | apache-2.0 | 768 |
package org.jetbrains.plugins.scala.lang.dfa.invocationInfo
import com.intellij.psi.{PsiElement, PsiMember, PsiNamedElement}
import org.jetbrains.plugins.scala.extensions.{ObjectExt, PsiMemberExt, PsiNamedElementExt}
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.ScSyntheticFunction
import org.jetbrains.plugins.scala.lang.psi.types.api.Any
import org.jetbrains.plugins.scala.lang.psi.types.{ApplicabilityProblem, ScType}
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveResult
case class InvokedElement(psiElement: PsiElement) {
override def toString: String = psiElement match {
case synthetic: ScSyntheticFunction => s"$synthetic: ${synthetic.name}"
case namedMember: PsiNamedElement with PsiMember => Option(namedMember.containingClass) match {
case Some(containingClass) => s"${containingClass.name}#${namedMember.name}"
case _ => s"${namedMember.name}"
}
case _ => s"Invoked element of unknown type: $psiElement"
}
def isSynthetic: Boolean = psiElement.is[ScSyntheticFunction]
def simpleName: Option[String] = psiElement match {
case namedElement: PsiNamedElement => Some(namedElement.name)
case _ => None
}
def qualifiedName: Option[String] = psiElement match {
case namedMember: PsiNamedElement with PsiMember => namedMember.qualifiedNameOpt
case _ => None
}
def returnType: ScType = psiElement match {
case synthetic: ScSyntheticFunction => synthetic.retType
case function: ScFunction => function.returnType.getOrAny
case _ => Any(psiElement.getProject)
}
}
object InvokedElement {
def fromTarget(target: Option[ScalaResolveResult], problems: Seq[ApplicabilityProblem]): Option[InvokedElement] = {
if (problems.isEmpty) target.map(_.element).map(InvokedElement(_)) else None
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/dfa/invocationInfo/InvokedElement.scala | Scala | apache-2.0 | 1,882 |
package akkaviz.serialization.serializers
import akkaviz.serialization.{AkkaVizSerializer, SerializationContext}
import upickle.Js
case object ThrowableSerializer extends AkkaVizSerializer {
override def canSerialize(obj: scala.Any): Boolean = obj match {
case t: Throwable => true
case _ => false
}
override def serialize(obj: scala.Any, context: SerializationContext): Js.Value = {
obj match {
case t: Throwable => Js.Obj(
"$type" -> Js.Str(t.getClass.getName),
"message" -> Js.Str(t.getMessage)
)
}
}
}
| blstream/akka-viz | monitoring/src/main/scala/akkaviz/serialization/serializers/ThrowableSerializer.scala | Scala | mit | 572 |
/*
* Copyright 2010-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package common
import scala.language.implicitConversions
import scala.xml._
/*
* This file contains common conversions and other utilities to make
* conversions that are common
*/
/**
* This trait is used to unify `String`s and `[[scala.xml.NodeSeq NodeSeq]]`s
* into one type. It is used in conjuction with the implicit conversions defined
* in its [[StringOrNodeSeq$ companion object]].
*/
sealed trait StringOrNodeSeq {
def nodeSeq: scala.xml.NodeSeq
}
/**
* Provides implicit conversions to the `StringOrNodeSeq` trait, which can in
* turn be implicitly converted to `[[scala.xml.NodeSeq NodeSeq]]`. This allows
* using a `String` as a natural part of `NodeSeq` APIs without having to
* explicitly wrap it in `scala.xml.Text` or having to write overloads for all
* methods that should accept both.
*
* This is used in certain Lift APIs, for example, to accept either a `String`
* or more complex content. For example, a `button` can have either a simple
* label or complex HTML content. HTML APIs that can do this can accept a
* parameter of type `StringOrNodeSeq` to allow the user to pass either in as
* their needs dictate.
*/
object StringOrNodeSeq {
implicit def strTo[T <% String](str: T): StringOrNodeSeq =
new StringOrNodeSeq {
def nodeSeq: NodeSeq = Text(str)
}
/**
* This is written in terms of a `Seq[Node]` to make sure Scala converts
* everything it should to a `StringOrNodeSeq`. `NodeSeq` is a `Seq[Node]`.`
*/
implicit def nsTo(ns: Seq[Node]): StringOrNodeSeq =
new StringOrNodeSeq {
def nodeSeq: NodeSeq = ns
}
implicit def toNodeSeq(sns: StringOrNodeSeq): NodeSeq = sns.nodeSeq
}
/**
* This trait is used to unify `()=>String` and `String` into one type. It is
* used in conjunction with the implicit conversions defined in its [[StringFunc$
* companion object]].
*/
sealed trait StringFunc {
def func: () => String
}
/**
* Provides implicit conversions to the `StringFunc` trait. This allows using a
* `String` as a natural part of APIs that want to allow the flexibility of a
* `()=>String` without having to write overloads for all methods that should
* accept both.
*
* Lift's Menu API, for example, allows CSS classes to be defined either as
* a `String` or a `()=>String`. The latter could use the current request and
* session state to do more interesting things than a hard-coded `String` would,
* while the former is simpler to use.
*/
object StringFunc {
/**
* Implicit conversion from any type that in turn has an implicit conversion
* to a `String`, to a `StringFunc`. In particular, this means that if a given
* method takes a `StringFunc` as a parameter, it can accept either a `String`
* and any type that has an implicit conversion to `String` in scope.
*/
implicit def strToStringFunc[T](str: T)(implicit f: T => String): StringFunc =
ConstStringFunc(f(str))
/**
* Implicit conversion from any function that produces a type that in turn has
* an implicit conversion to a `String`, to a `StringFunc`. In particular,
* this means that if a given method takes a `StringFunc` as a parameter, it
* can accept either a function that returns a `String` and a function that
* returns any other type that has an implicit conversion to `String` in
* scope.
*/
implicit def funcToStringFunc[T](func: () => T)(implicit f: T => String): StringFunc =
RealStringFunc(() => f(func()))
}
/**
* See `[[StringFunc]]`.
*/
final case class RealStringFunc(func: () => String) extends StringFunc
/**
* See `[[StringFunc]]`.
*/
final case class ConstStringFunc(str: String) extends StringFunc {
lazy val func = () => str
}
/**
* This trait is used to unify `()=>[[scala.xml.NodeSeq NodeSeq]]` and
* `[[scala.xml.NodeSeq NodeSeq]]` into one type. It is used in conjunction
* with the implicit conversions defined in its [[NodeSeqFunc$ companion
* object]].
*/
@deprecated("""Lift now mostly uses `NodeSeq=>NodeSeq` transformations rather
than `NodeSeq` constants; consider doing the same.""","3.0")
sealed trait NodeSeqFunc {
def func: () => NodeSeq
}
/**
* Provides implicit conversions to the `NodeSeqFunc` trait. This allows using a
* `[[scala.xml.NodeSeq NodeSeq]]` as a natural part of APIs that want to allow
* the flexibility of a `()=>[[scala.xml.NodeSeq NodeSeq]]` without having to
* write overloads for all methods that should accept both.
*/
@deprecated("""Lift now mostly uses `NodeSeq=>NodeSeq` transformations rather
than `NodeSeq` constants; consider doing the same.""","3.0")
object NodeSeqFunc {
/**
* If you've got something that can be converted into a `NodeSeq` (a constant)
* but want a `NodeSeqFunc`, this implicit will do the conversion.
*/
implicit def nsToNodeSeqFunc[T](ns: T)(implicit f: T => NodeSeq): NodeSeqFunc =
ConstNodeSeqFunc(f(ns))
/**
* If you've got something that can be converted into a `NodeSeq` function but
* want a `NodeSeqFunc`, this implicit will do the conversion.
*/
implicit def funcToNodeSeqFunc[T](func: () => T)(implicit f: T => NodeSeq): NodeSeqFunc =
RealNodeSeqFunc(() => f(func()))
}
/**
* The case class that holds a `[[scala.xml.NodeSeq NodeSeq]]` function.
*/
@deprecated("""Lift now mostly uses `NodeSeq=>NodeSeq` transformations rather
than `NodeSeq` constants; consider doing the same.""","3.0")
final case class RealNodeSeqFunc(func: () => NodeSeq) extends NodeSeqFunc
/**
* The case class that holds the `[[scala.xml.NodeSeq NodeSeq]]` constant.
*/
@deprecated("""Lift now mostly uses `NodeSeq=>NodeSeq` transformations rather
than `NodeSeq` constants; consider doing the same.""","3.0")
final case class ConstNodeSeqFunc(ns: NodeSeq) extends NodeSeqFunc {
lazy val func = () => ns
}
| lift/framework | core/common/src/main/scala/net/liftweb/common/Conversions.scala | Scala | apache-2.0 | 6,415 |
package com.github.aselab.activerecord
import play.api._
import Play.current
class ActiveRecordPlugin extends Plugin {
implicit val classLoader = Play.application.classloader
lazy val activeRecordTables = current.configuration.getConfig("schema")
.map(_.keys).getOrElse(List("models.Tables")).map(ActiveRecordTables.find)
override def onStart() {
activeRecordTables.foreach(_.initialize)
}
override def onStop() {
activeRecordTables.foreach(_.cleanup)
}
}
| xdougx/scala-activerecord | play2/src/main/scala/ActiveRecordPlugin.scala | Scala | mit | 485 |
package com.taig.tmpltr.engine.html.property
import com.taig.tmpltr.Property
trait form
{
class method(method: String) extends Property( method )
object method
{
object get extends method( "GET" )
object post extends method( "POST" )
}
} | Taig/Play-Tmpltr | app/com/taig/tmpltr/engine/html/property/form.scala | Scala | mit | 247 |
package test.scala
import org.specs.Specification
import org.specs.mock.Mockito
import com.protose.resque._
import com.protose.resque.Machine._
import java.net.InetAddress
import java.lang.management.ManagementFactory
object MachineSpec extends Specification with Mockito {
"returns the hostname" in {
hostname must_== InetAddress.getLocalHost.getHostName
}
"returns the pid" in {
val expectedPid = ManagementFactory.getRuntimeMXBean.getName.split("@").first
pid must_== expectedPid
}
}
// vim: set ts=4 sw=4 et:
| jamesgolick/scala-resque-worker | src/test/scala/MachineSpec.scala | Scala | mit | 556 |
package dotty
package tools
package dotc
import scala.language.unsafeNulls
import java.io.{File => JFile}
import java.nio.file.{Files, Path, Paths}
import org.junit.Assume.assumeTrue
import org.junit.{AfterClass, Test}
import org.junit.experimental.categories.Category
import scala.concurrent.duration._
import vulpix._
class IdempotencyTests {
import TestConfiguration._
import IdempotencyTests._
import CompilationTest.aggregateTests
// ignore flaky tests
val filter = FileFilter.NoFilter
@Category(Array(classOf[SlowTests]))
@Test def idempotency: Unit = {
implicit val testGroup: TestGroup = TestGroup("idempotency")
val opt = defaultOptions
val posIdempotency = aggregateTests(
compileFilesInDir("tests/pos", opt, filter)(TestGroup("idempotency/posIdempotency1")),
compileFilesInDir("tests/pos", opt, filter)(TestGroup("idempotency/posIdempotency2")),
)
val orderIdempotency = {
val tests =
for {
testDir <- new JFile("tests/order-idempotency").listFiles() if testDir.isDirectory
} yield {
val sources = TestSources.sources(testDir.toPath)
aggregateTests(
compileList(testDir.getName, sources, opt)(TestGroup("idempotency/orderIdempotency1")),
compileList(testDir.getName, sources.reverse, opt)(TestGroup("idempotency/orderIdempotency2"))
)
}
aggregateTests(tests: _*)
}
def check(name: String) = {
val files = List(s"tests/idempotency/$name.scala", "tests/idempotency/IdempotencyCheck.scala")
compileList(name, files, defaultOptions)(TestGroup("idempotency/check"))
}
val allChecks = aggregateTests(
check("CheckOrderIdempotency"),
// Disabled until strawman is fixed
// check("CheckStrawmanIdempotency"),
check("CheckPosIdempotency")
)
val allTests = aggregateTests(orderIdempotency, posIdempotency)
val tests = allTests.keepOutput.checkCompile()
allChecks.checkRuns()
tests.delete()
}
}
object IdempotencyTests extends ParallelTesting {
// Test suite configuration --------------------------------------------------
def maxDuration = 30.seconds
def numberOfSlaves = 5
def safeMode = Properties.testsSafeMode
def isInteractive = SummaryReport.isInteractive
def testFilter = Properties.testsFilter
def updateCheckFiles: Boolean = Properties.testsUpdateCheckfile
implicit val summaryReport: SummaryReporting = new SummaryReport
@AfterClass def tearDown(): Unit = {
super.cleanup()
summaryReport.echoSummary()
}
}
| lampepfl/dotty | compiler/test/dotty/tools/dotc/IdempotencyTests.scala | Scala | apache-2.0 | 2,583 |
package ohnosequences.bio4j.tests
import java.io._
import org.scalatest._
import ohnosequences.statika.cli.StatikaEC2._
import ohnosequences.awstools.ec2._
import ohnosequences.awstools.ec2.{Tag => Ec2Tag}
import ohnosequences.typesets._
import ohnosequences.statika._
import ohnosequences.statika.aws._
import ohnosequences.statika.ami._
import ohnosequences.bio4j.bundles._
class ApplicationTest extends FunSuite with ParallelTestExecution {
// for running test you need to have this file in your project folder
val ec2 = EC2.create(new File("Intercrossing.credentials"))
val dist = ReleaseDistribution
val bundle = Release.GITaxonomyNodes
test("Apply "+bundle.name+" bundle to an instance"){
val userscript = dist.userScript(bundle, RoleCredentials)
println(userscript)
val specs = InstanceSpecs(
instanceType = InstanceType.m1_large
, amiId = dist.ami.id
, keyName = "statika"
, userData = userscript
, instanceProfile = Some("bio4j-releaser")
)
ec2.applyAndWait(bundle.name, specs, 1) match {
case List(inst) => assert(inst.getTagValue("statika-status") == Some("success"))
case _ => assert(false)
}
}
}
| bio4j/modules | modules/src/test/scala/Bio4jReleaseTest.scala | Scala | agpl-3.0 | 1,205 |
package io.finch.petstore
import com.twitter.finagle.Service
import com.twitter.finagle.httpx.{FileElement, Request, RequestBuilder, Response}
import com.twitter.io.{Buf, Reader}
import com.twitter.util.{Duration, Await}
import io.finch.test.ServiceSuite
import org.scalatest.Matchers
import org.scalatest.fixture.FlatSpec
trait PetstorePetServiceSuite { this: FlatSpec with ServiceSuite with Matchers =>
def createService(): Service[Request, Response] = {
val db = new PetstoreDb()
val rover = Pet(None, "Rover", Nil, Some(Category(None, "dog")), Some(Seq(Tag(None, "puppy"),
Tag(None, "white"))), Some(Available))
db.addPet(rover)
db.addPet(Pet(None, "Sadaharu", Nil, Some(Category(None, "inugami")), Some(Nil), Some(Available)))
db.addPet(Pet(None, "Despereaux", Nil, Some(Category(None, "mouse")), Some(Nil), Some(Available)))
db.addPet(Pet(None, "Alexander", Nil, Some(Category(None, "mouse")), Some(Nil), Some(Pending)))
db.addPet(Pet(None, "Wilbur", Nil, Some(Category(None, "pig")), Some(Nil), Some(Adopted)))
db.addPet(Pet(None, "Cheshire Cat", Nil, Some(Category(None, "cat")), Some(Nil), Some(Available)))
db.addPet(Pet(None, "Crookshanks", Nil, Some(Category(None, "cat")), Some(Nil), Some(Available)))
val mouseCircusOrder: Order = Order(None, Some(4), Some(100), Some("2015-07-01T17:36:58.190Z"), Some(Placed),
Some(false))
db.addOrder(mouseCircusOrder)
val coraline: User = User(None, "coraline", Some("Coraline"), Some("Jones"), None, "becarefulwhatyouwishfor", None)
db.addUser(coraline)
endpoint.makeService(db)
}
//getPetEndpt test
"The PetstoreApp" should "return valid pets" in { f =>
val request = Request("/pet/1")
val result: Response = f(request, Duration.fromSeconds(10))
result.statusCode shouldBe 200
}
it should "fail to return invalid pets" in { f =>
val request = Request("/pet/100")
val result: Response = f(request, Duration.fromSeconds(10))
result.statusCode shouldBe 404
}
//addPetEndpt test
it should "add valid pets" in { f =>
val request: Request = RequestBuilder()
.url("http://localhost:8080/pet").buildPost(
Buf.Utf8(s"""
| {
| "name": "Ell",
| "photoUrls":[],
| "category":{"name":"Wyverary"},
| "tags":[{"name":"Wyvern"}, {"name":"Library"}],
| "status":"pending"
| }
""".stripMargin)
)
val result: Response = f(request, Duration.fromSeconds(10))
result.statusCode shouldBe 200
}
it should "fail appropriately when adding invalid pets" in { f =>
val request: Request = RequestBuilder()
.url("http://localhost:8080/pet").buildPost(
Buf.Utf8(s"""
| {
| "id": 0,
| "name": "Ell",
| "photoUrls":[],
| "category":{"name":"Wyverary"},
| "tags":[{"name":"Wyvern"}, {"name":"Library"}],
| "status":"pending"
| }
""".stripMargin)
)
val result: Response = f(request)
result.statusCode shouldBe 404
}
//updatePetEndpt test
it should "update valid pets" in { f =>
val request: Request = RequestBuilder()
.url("http://localhost:8080/pet").buildPut(
Buf.Utf8(s"""
| {
| "id": 0,
| "name": "A-Through-L",
| "photoUrls":[],
| "category":{"name":"Wyverary"},
| "tags":[{"name":"Wyvern"}, {"name":"Library"}],
| "status":"pending"
| }
""".stripMargin))
val result: Response = f(request)
result.statusCode shouldBe 200
}
it should "fail attempts to update pets without specifying an ID to modify" in { f =>
val request: Request = RequestBuilder()
.url("http://localhost:8080/pet").buildPut(
Buf.Utf8(s"""
| {
| "name": "A-Through-L",
| "photoUrls":[],
| "category":{"name":"Wyverary"},
| "tags":[{"name":"Wyvern"}, {"name":"Library"}],
| "status":"pending"
| }
""".stripMargin))
val result: Response = f(request)
result.statusCode shouldBe 404
}
//getPetsByStatusEndpt test
it should "successfully find pets by status" in { f =>
val request: Request = RequestBuilder()
.url("http://localhost:8080/pet/findByStatus?status=available%2C%20pending")
.buildGet
val result: Response = f(request)
result.statusCode shouldBe 200
}
//getPetsByTagEndpt test
it should "successfully find pets by tag" in { f =>
val request: Request = RequestBuilder()
.url("http://localhost:8080/pet/findByTags?tags=puppy%2C%20white")
.buildGet
val result: Response = f(request)
result.statusCode shouldBe 200
}
//deletePetEndpt test
it should "successfully delete existing pets" in { f =>
val request: Request = RequestBuilder()
.url("http://localhost:8080/pet/0").buildDelete
val result: Response = f(request)
result.statusCode shouldBe 204
}
it should "fail to delete nonexistant pets" in { f =>
val request: Request = RequestBuilder()
.url("http://localhost:8080/pet/100").buildDelete
val result: Response = f(request)
result.statusCode shouldBe 404
}
//updatePetViaForm
it should "allow the updating of pets via form data" in { f =>
val formData: Map[String, String] = Map("name" -> "Higgins", "status" -> "pending")
val request: Request = RequestBuilder()
.url("http://localhost:8080/pet/0")
.addFormElement(("name","Higgins"))
.addFormElement(("status","pending"))
.buildFormPost()
val result: Response = f(request)
result.statusCode shouldBe 200
}
//Add image
it should "accept file uploads" in { f =>
val imageDataStream = getClass.getResourceAsStream("/doge.jpg")
// Buf Future[Buf] Reader InputStream
val imageData: Buf = Await.result(Reader.readAll(Reader.fromStream(imageDataStream)))
val request: Request = RequestBuilder()
.url("http://localhost:8080/pet/1/uploadImage")
.add(FileElement("file", imageData))
.buildFormPost(true)
val result: Response = f(request)
result.statusCode shouldBe 200
//Testing for ability to add more than one file
val totoroDataStream = getClass.getResourceAsStream("/totoro.jpg")
val toroData: Buf = Await.result(Reader.readAll(Reader.fromStream(totoroDataStream)))
val req: Request = RequestBuilder()
.url("http://localhost:8080/pet/1/uploadImage")
.add(FileElement("file", imageData))
.buildFormPost(true)
val outcome: Response = f(req)
outcome.statusCode shouldBe 200
}
it should "be able to add an array of users" in { f =>
val request: Request = RequestBuilder()
.url("http://localhost:8080/user/createWithList").buildPost(
Buf.Utf8(s"""
|[
| {
| "username": "strawberry",
| "firstName": "Gintoki",
| "lastName": "Sakata",
| "email": "[email protected]",
| "password": "independenceDei"
| }
|]
""".stripMargin)
)
val result: Response = f(request)
result.statusCode shouldBe 200
}
}
class PetstorePetServiceSpec extends FlatSpec with ServiceSuite with PetstorePetServiceSuite with Matchers
| yonglehou/finch | petstore/src/test/scala/io/finch/petstore/PetstorePetServiceSpec.scala | Scala | apache-2.0 | 7,612 |
/*
* Copyright (c) 2019. Yuriy Stul
*/
package com.stulsoft.poc.json.json4s
import org.json4s.DefaultFormats
import org.json4s.JsonAST.{JArray, JField, JObject, JString}
import org.json4s.jackson.JsonMethods.parse
import scala.io.Source
/** Finds a JSON objects with same value of <i>name</i> field
*
* @author Yuriy Stul
*/
object FindDuplicates2 extends App {
implicit val formats: DefaultFormats = DefaultFormats
println("==>FindDuplicates2")
findDuplicates("arrayOfObjects3.json")
def findDuplicates(path: String): Unit = {
println(s"Looking in $path")
try {
val jsonObject = parse(Source.fromResource(path).getLines().mkString)
val names = for {
JObject(child) <- jsonObject
JField("items", items) <- child
JObject(item) <- items
JField("name", JString(name)) <- item
} yield name
val duplicates = names.groupBy(identity).collect { case (x, List(_, _, _*)) => x }
if (duplicates.isEmpty)
println("Do duplicates were found")
else
println(s"Duplicates: $duplicates")
}
catch {
case e: Exception => println(s"sError: ${e.getMessage}")
}
}
}
| ysden123/poc | scala-json/src/main/scala/com/stulsoft/poc/json/json4s/FindDuplicates2.scala | Scala | mit | 1,173 |
/****************************************************************************
* Copyright (C) 2015 Łukasz Szpakowski. *
* *
* This software is licensed under the GNU General Public License *
* v3 or later. See the LICENSE file for the full licensing terms. *
****************************************************************************/
package pl.luckboy.issuenotifier
import android.content.BroadcastReceiver
import android.content.Context
import android.content.Intent
import AndroidUtils._
class BootReceiver extends BroadcastReceiver
{
private val mTag = getClass().getSimpleName()
override def onReceive(context: Context, intent: Intent)
{
log(mTag, "onReceive(): intent.getAction() = " + intent.getAction())
if(Settings(context).startedService) context.startService(new Intent(context, classOf[MainService]))
}
}
| luckboy/IssueNotifier | src/main/scala/pl/luckboy/issuenotifier/BootReceiver.scala | Scala | gpl-3.0 | 972 |
/************************************************************************
Tinaviz
*************************************************************************
This application is part of the Tinasoft project: http://tinasoft.eu
Tinaviz main developer: julian.bilcke @ iscpif.fr (twitter.com/flngr)
Copyright (C) 2009-2011 CREA Lab, CNRS/Ecole Polytechnique UMR 7656 (Fr)
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
************************************************************************/
package eu.tinasoft.tinaviz.graph
import math._
import math.Numeric.DoubleAsIfIntegral
import util.parsing.input.OffsetPosition
import reflect.ValDef
import scala.Math
object Metrics {
/**
* Compute the number of single nodes
*/
def nbSingles(g: Graph): Int = {
var s = 0
g.links.foreach {
case lnk => if (lnk.size == 0) s += 1
}
s
}
/**
* Compute the number of edges
*/
def nbEdges(g: Graph): Int = {
var s = 0
g.links.foreach {
case lnk => s += lnk.size
}
s
}
/**
* Compute the number of nodes
*/
def nbNodes(g: Graph): Int = g.uuid.size
/**
* Compute the out degree array
*/
def outDegree(g: Graph): Array[Int] = {
val _outDegree = g.links.map {
case m => m.size
}
_outDegree.toArray
}
/**
* Compute the in degree array
*/
def inDegree(g: Graph): Array[Int] = {
val _inDegree = g.uuid.zipWithIndex.map {
case (n, i) =>
var d = 0
g.links foreach {
case m => if (m.contains(i)) d += 1
}
d
}
_inDegree.toArray
}
def nodeWeightRange(g: Graph, cat:String, nbTicks: Int = 100): List[Double] = {
//println("computing nodeWeightRange")
val sortedByWeight = g.weight.zipWithIndex.filter{ case t: (Double, Int) => cat.equalsIgnoreCase(g.category(t._2)) }.toList.sort {
case (t1: (Double, Int), t2: (Double, Int)) => (t1._1 < t2._1)
}
//println("weights:")
//sortedByWeight.foreach {
// case (weight, id) => println(" - "+weight)
//}
if (sortedByWeight.length == 0) return List.empty[Double]
var maxIndex = 0
var maxValue = 0.0
var remainingNodes = sortedByWeight.length
//println(" remainingNodes: " + remainingNodes)
((0 until nbTicks).map {
case tickId =>
if (tickId == nbTicks-1) {
sortedByWeight.last._1
} else {
//println(" " + tickId + ":")
/*
((g.nbNodes - remainingNodes) + Math.floor(remainingNodes / (nbTicks - tickId))).toInt
((588 - 588) + Math.floor(588 / (100 - 0))).toInt
(0 + Math.floor(remainingNodes / 100)).toInt
(0+5.0).toInt
- tickIndex: 5 sorted weight size: 67
((g.nbNodes - remainingNodes) + Math.floor(remainingNodes / (nbTicks - tickId))).toInt
((588 - 522) + Math.floor(522 / (100 - 1))).toInt
(66 + Math.floor(remainingNodes / 99)).toInt
(66+5.0).toInt
- tickIndex: 71 sorted weight size: 67
*/
//println("((g.nbNodes - remainingNodes) + Math.floor(remainingNodes / (nbTicks - tickId))).toInt")
//println("(("+sortedByWeight.length+" - "+remainingNodes+") + Math.floor("+remainingNodes+" / ("+nbTicks+" - "+tickId+"))).toInt")
//println("("+(sortedByWeight.length - remainingNodes)+" + Math.floor("+remainingNodes+" / "+(nbTicks - tickId)+")).toInt")
//println("("+(sortedByWeight.length - remainingNodes) + "+" + Math.floor(remainingNodes / (nbTicks - tickId))+").toInt")
val tickIndex = ((sortedByWeight.length - remainingNodes) + Math.floor(remainingNodes / (nbTicks - tickId))).toInt
//println(" - tickIndex: " + tickIndex+" sorted weight size: "+sortedByWeight.length)
val t = sortedByWeight(tickIndex)
maxValue = t._1
//println(" - maxValue: " + maxValue)
// trouver l'index maximum qui donne t1
sortedByWeight.zipWithIndex.foreach {
case ((realWeight, nodeId), sortedIndex) =>
if (realWeight <= maxValue) {
maxIndex = sortedIndex
}
}
//println("maxIndex: " + maxIndex)
//println(" remainingNodes before: " + remainingNodes)
remainingNodes = sortedByWeight.length - maxIndex
//println(" remainingNodes after: " + remainingNodes)
maxValue
}
}).toList
}
def edgeWeightRange(g: Graph, cat:String, nbTicks: Int = 100): List[Double] = {
//println("ERROR ERROR ERROR computing edgeWeightRange with wrong values")
val sortedByWeight = g.edgeWeight.zipWithIndex.filter{
case t: (Double, Int) =>
val (sourceId, targetId) = g.edgeIndex(t._2)
val (sourceCat, targetCat) = (g.category(sourceId), g.category(targetId))
// TODO CHECK AND FILTER THE WEIGHT
// check if the source and target are the same category, and if this category must be kept
cat.equalsIgnoreCase(sourceCat) && cat.equalsIgnoreCase(targetCat)
}.toList.sort {
case (t1: (Double, Int), t2: (Double, Int)) =>
(t1._1 < t2._1)
}
//println("weights:")
//sortedByWeight.foreach {
// case (weight, id) => println(" - "+weight)
//}
if (sortedByWeight.length == 0) return List.empty[Double]
var maxIndex = 0
var maxValue = 0.0
var remainingEdges = sortedByWeight.length
//println(" remainingNodes: " + remainingNodes)
((0 until nbTicks).map {
case tickId =>
if (tickId == nbTicks-1) {
sortedByWeight.last._1
} else {
//println(" " + tickId + ":")
//println("(("+sortedByWeight.length+" - "+remainingEdges+") + Math.floor("+remainingEdges+" / ("+nbTicks+" - "+tickId+"))).toInt")
//println("(("+(sortedByWeight.length - remainingEdges)+") + Math.floor("+remainingEdges+" / ("+(nbTicks - tickId)+"))).toInt")
//println("(("+(sortedByWeight.length - remainingEdges)+") + Math.floor("+remainingEdges / (nbTicks - tickId)+")).toInt")
val tickIndex = ((sortedByWeight.length - remainingEdges) + Math.floor(remainingEdges / (nbTicks - tickId))).toInt
//println(" - tickIndex: " + tickIndex+" compared to : "+sortedByWeight.length)
val t = sortedByWeight(tickIndex)
maxValue = t._1
//println(" - maxValue: " + maxValue)
// trouver l'index maximum qui donne t1
sortedByWeight.zipWithIndex.foreach {
case ((realWeight, nodeId), sortedIndex) =>
if (realWeight <= maxValue) {
maxIndex = sortedIndex
}
}
//println("maxIndex: " + maxIndex)
//println(" remainingNodes before: " + remainingNodes)
remainingEdges = sortedByWeight.length - maxIndex
//println(" remainingNodes after: " + remainingNodes)
maxValue
}
}).toList
}
def connectedComponents(g: Graph): Array[Int] = {
// Calcul des partitions
var nb_partition = 0
//var nodesId = Map(g.ids.zipWithIndex: _*).map {
// case (id, id) => (id,id)
//}
var nodesIds: Set[Int] = g.ids.toSet
var partitions = Map.empty[Int, Int]
while (nodesIds.size > 0) {
val target = nodesIds.head
//nodesIds /: remove(0)
nodesIds = nodesIds - target
nb_partition += 1
var current_partition = Set(target)
partitions += target -> nb_partition
// neighbors IDs
val neighborsList = g.neighbours(target).map {
case (a, _) => g.id(a)
}.toList
var neighbors = (neighborsList.toSet -- current_partition.toSet)
while (neighbors.size > 0) {
val target2 = neighbors.head
val neighbors2 = g.neighbours(target2).map {
case (a, b) => g.id(a)
}.toSet
partitions += target2 -> nb_partition
neighbors = neighbors - target2 // only keep the last elements except the first
nodesIds = nodesIds - target2 // remove target2 from nodesIds
current_partition += target2 // append target2 to current_parititon
// do the union of neighbors 1 and 2, then compute the difference with current partition
neighbors = (neighbors | neighbors2) &~ current_partition
}
}
//println("number of partitions: "+nb_partition)
// sort the Map of ( ID -> PARTITION ) then only keep the partition's number'
val res = partitions.toList sortBy {
_._1
} map {
_._2
}
res.toArray
}
/**
* Compute the degree array
*/
def degree(g: Graph): Array[Int] = {
val _degree = g.links.zipWithIndex.map {
case (aLinks, a) =>
// var d = 0
var neighbourMap = Map.empty[Int, Boolean]
aLinks.foreach {
case (b, w) =>
if (b != a) {
val tpl: (Int, Boolean) = (b, true)
neighbourMap += tpl
}
}
g.links.zipWithIndex.map {
case (a2Links, a2) =>
a2Links.foreach {
case (b, w) =>
if (b == a && a != a2) {
// inlink!
val tpl: (Int, Boolean) = (a2, true)
neighbourMap += tpl
}
}
}
neighbourMap.size
}
_degree.toArray
}
/**
* Compute the out degree extremums
*/
def outDegreeExtremums(g: Graph): (Int, Int) = {
if (g.links.size == 0) {
(0, 0)
} else {
var max = Int.MinValue
var min = Int.MaxValue
g.links foreach {
case n =>
val d = n.size
if (d < min) min = d
if (d > max) max = d
}
(min, max)
}
}
/**
* Compute the in degree extremums
*/
def inDegreeExtremums(g: Graph): (Int, Int) = {
if (g.links.size == 0) {
(0, 0)
} else {
var max = Int.MinValue
var min = Int.MaxValue
g.ids foreach {
case id =>
var d = 0
g.links foreach {
case m => if (m.contains(id)) d += 1
}
if (d < min) min = d
if (d > max) max = d
}
(min, max)
}
}
/**
* Compute the extremums (X min, X max, Y min, Y max)
*/
def extremums(g: Graph): (Double, Double, Double, Double) = {
if (g.position.size == 0) {
(0.0, 0.0, 0.0, 0.0)
} else {
var xMax = Double.MinValue
var xMin = Double.MaxValue
var yMax = Double.MinValue
var yMin = Double.MaxValue
g.position foreach {
case (x, y) =>
if (x < xMin) xMin = x
if (x > xMax) xMax = x
if (y < yMin) yMin = y
if (y > yMax) yMax = y
}
(xMax, xMin, yMax, yMin)
}
}
/**
* Compute the extremums (X min, X max, Y min, Y max)
*/
def extremumsSelection(g: Graph): (Double, Double, Double, Double) = {
if (g.position.size == 0) {
(0.0, 0.0, 0.0, 0.0)
} else {
val _selectedNodes = g.position.zipWithIndex filter {
case (p, i) => g.selected(i)
}
if (_selectedNodes.size == 0) {
(0.0, 0.0, 0.0, 0.0)
} else {
var xMax = Double.MinValue
var xMin = Double.MaxValue
var yMax = Double.MinValue
var yMin = Double.MaxValue
_selectedNodes foreach {
case ((x, y), i) =>
if (x < xMin) xMin = x
if (x > xMax) xMax = x
if (y < yMin) yMin = y
if (y > yMax) yMax = y
}
(xMax, xMin, yMax, yMin)
}
}
}
// a list of positions + ID
def selectionNeighbourhood(g: Graph) = {
val tmp = g.position.zipWithIndex filter {
case (p, i) => g.selected(i)
}
g.position.zipWithIndex filter {
case (p, i1) =>
tmp.find {
case (p2, i2) =>
(g.hasAnyLink(i1, i2) || i1 == i2)
} match {
case None => false
case any => true
}
}
}
def selectionNeighbourhoodCenter(g: Graph): (Double, Double) = {
var p = (0.0, 0.0)
val N = g.selectionNeighbourhood.size.toDouble
g.selectionNeighbourhood foreach {
case ((x, y), i) => p = (p._1 + x, p._2 + y)
}
if (N != 0) (p._1 / N, p._2 / N) else (0.0, 0.0)
}
/**
* Compute the extremums (X min, X max, Y min, Y max)
*/
def extremumsSelectionNeighbourhood(g: Graph): (Double, Double, Double, Double) = {
if (g.position.size == 0) {
(0.0, 0.0, 0.0, 0.0)
} else {
if (g.selectionNeighbourhood.size == 0) {
(0.0, 0.0, 0.0, 0.0)
} else {
var xMax = Double.MinValue
var xMin = Double.MaxValue
var yMax = Double.MinValue
var yMin = Double.MaxValue
g.selectionNeighbourhood foreach {
case ((x, y), i) =>
if (x < xMin) xMin = x
if (x > xMax) xMax = x
if (y < yMin) yMin = y
if (y > yMax) yMax = y
}
(xMax, xMin, yMax, yMin)
}
}
}
def notSingleNodesDimension(g: Graph): (Double, Double) = {
if (g.position.size == 0) {
(0.0, 0.0)
} else {
val notSingles = g.position.zipWithIndex filter {
case (pos, i) => !g.isSingle(i)
}
if (notSingles.size == 0.0) {
(0.0, 0.0)
} else {
var xMax = Double.MinValue
var xMin = Double.MaxValue
var yMax = Double.MinValue
var yMin = Double.MaxValue
notSingles foreach {
case ((x, y), i) =>
if (x < xMin) xMin = x
if (x > xMax) xMax = x
if (y < yMin) yMin = y
if (y > yMax) yMax = y
}
(abs(xMax - xMin), abs(yMax - yMin))
}
}
}
/**
* Compute the node weight extremums (min node weight, max node weight)
* return a Tuple of Double
*/
def nodeWeightExtremums(g: Graph): (Double, Double, Double, Double) = {
if (g.weight.size == 0) {
(0.0, 0.0, 0.0, 0.0)
} else {
var amax = Double.MinValue
var amin = Double.MaxValue
var bmax = Double.MinValue
var bmin = Double.MaxValue
g.weight.zipWithIndex foreach {
case (w, i) =>
if (g.category(i).equalsIgnoreCase("Document")) {
if (w < amin) amin = w
if (w > amax) amax = w
} else {
if (w < bmin) bmin = w
if (w > bmax) bmax = w
}
}
(amin, amax, bmin, bmax)
}
}
/**
* Compute the edge weight extremums (min edge weight, max edge weight)
* return a Tuple of Double
*/
def edgeWeightExtremums(g: Graph): (Double, Double, Double, Double) = {
if (g.links.size == 0) {
(0.0, 0.0, 0.0, 0.0)
} else {
var amax = Double.MinValue
var amin = Double.MaxValue
var bmax = Double.MinValue
var bmin = Double.MaxValue
g.links.zipWithIndex foreach {
case (lnks, i) =>
lnks.foreach {
case (id, weight) =>
g.category(i) match {
case "NGram" =>
if (weight < bmin) bmin = weight
if (weight > bmax) bmax = weight
case "Document" =>
if (weight < amin) amin = weight
if (weight > amax) amax = weight
}
}
}
(amin, amax, bmin, bmax)
}
}
/**
* Compute a graph's center. Very simple - you probably don't want to use that
*/
def basicCenter(g: Graph): (Double, Double) = {
val (xmin, xmax, ymin, ymax) = g.extremums
((xmax - xmin), (ymax - ymax))
}
/**
* Compute a graph's barycenter
*/
def baryCenter(g: Graph): (Double, Double) = {
var p = (0.0, 0.0)
val N = g.position.size.toDouble
g.position foreach {
case (x, y) => p = (p._1 + x, p._2 + y)
}
if (N != 0) (p._1 / N, p._2 / N) else (0.0, 0.0)
}
/**
* Compute a graph's selection center
*/
def selectionCenter(g: Graph): (Double, Double) = {
var p = (0.0, 0.0)
val _selectedNodes = g.position.zipWithIndex filter {
case (p, i) => g.selected(i)
}
val N = _selectedNodes.size.toDouble
_selectedNodes foreach {
case ((x, y), i) => p = (p._1 + x, p._2 + y)
}
if (N != 0) (p._1 / N, p._2 / N) else (0.0, 0.0)
}
/**
* Compute a graph's bary center, taking only single nodes in account
*/
def singlesCenter(g: Graph): (Double, Double) = {
var p = (0.0, 0.0)
val singles = g.position.zipWithIndex filter {
case (p, i) => g.isSingle(i)
}
val N = singles.size.toDouble
singles foreach {
case ((x, y), i) => p = (p._1 + x, p._2 + y)
}
if (N != 0) (p._1 / N, p._2 / N) else (0.0, 0.0)
}
/**
* Compute a graph's bary center, taking only nodes not singles in account
*/
def notSinglesCenter(g: Graph): (Double, Double) = {
var p = (0.0, 0.0)
val notSingles = g.position.zipWithIndex filter {
case (p, i) => !g.isSingle(i)
}
val N = notSingles.size.toDouble
notSingles foreach {
case ((x, y), i) => p = (p._1 + x, p._2 + y)
}
if (N != 0) (p._1 / N, p._2 / N) else (0.0, 0.0)
}
} | moma/tinaviz | src/main/scala/eu/tinasoft/tinaviz/graph/Metrics.scala | Scala | gpl-3.0 | 17,652 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2.calculations
import uk.gov.hmrc.ct.CATO04
import uk.gov.hmrc.ct.box.{CtInteger, CtTypeConverters}
import uk.gov.hmrc.ct.computations.HmrcAccountingPeriod
import uk.gov.hmrc.ct.ct600.NumberRounding
import uk.gov.hmrc.ct.ct600.calculations.AccountingPeriodHelper._
import uk.gov.hmrc.ct.ct600.calculations.Ct600AnnualConstants._
import uk.gov.hmrc.ct.ct600.calculations.{AccountingPeriodHelper, Ct600AnnualConstants, CtConstants, TaxYear}
import uk.gov.hmrc.ct.ct600.v2._
trait MarginalRateReliefCalculator extends CtTypeConverters with NumberRounding {
def computeMarginalRateRelief(b37: B37, b44: B44, b54: B54, b38: B38, b39: B39, accountingPeriod: HmrcAccountingPeriod): CATO04 = {
validateAccountingPeriod(accountingPeriod)
val fy1: Int = fallsInFinancialYear(accountingPeriod.start.value)
val fy2: Int = fallsInFinancialYear(accountingPeriod.end.value)
val fy1Result = calculateForFinancialYear(fy1, b44, b38, b37, b39, accountingPeriod, constantsForTaxYear(TaxYear(fy1)))
val fy2Result = if (fy2 != fy1) {
calculateForFinancialYear(fy2, b54, b38, b37, b39, accountingPeriod, constantsForTaxYear(TaxYear(fy2)))
} else BigDecimal(0)
CATO04(roundedTwoDecimalPlaces(fy1Result + fy2Result))
}
private def calculateForFinancialYear(financialYear: Int,
proRataProfitsChargeable: CtInteger,
b38: B38,
b37: B37,
b39: B39,
accountingPeriod: HmrcAccountingPeriod,
constants: CtConstants): BigDecimal = {
val daysInAccountingPeriod = daysBetween(accountingPeriod.start.value, accountingPeriod.end.value)
val apDaysInFy = accountingPeriodDaysInFinancialYear(financialYear, accountingPeriod)
val apFyRatio = apDaysInFy / daysInAccountingPeriod
val msFyRatio = apDaysInFy / (365 max daysInAccountingPeriod)
val apportionedProfit = (b37 plus b38) * apFyRatio
val proRataLrma = (constants.lowerRelevantAmount * msFyRatio) / (b39.orZero + 1)
val proRataUrma = constants.upperRelevantAmount * msFyRatio / (b39.orZero + 1)
val mscrdueap = if (apportionedProfit > 0 &&
apportionedProfit > proRataLrma &&
apportionedProfit < (proRataUrma + BigDecimal("0.01"))) {
(proRataUrma - apportionedProfit)*(proRataProfitsChargeable.value/apportionedProfit)* constants.reliefFraction
} else {
BigDecimal(0)
}
mscrdueap
}
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/calculations/MarginalRateReliefCalculator.scala | Scala | apache-2.0 | 3,269 |
package com.spike.giantdataanalysis.spark
package object streaming {
} | zhoujiagen/giant-data-analysis | temporal-data-and-realtime-algorithm/temporal-apache-spark-streaming/src/main/scala/com/spike/giantdataanalysis/spark/streaming/package.scala | Scala | mit | 74 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.calcite.plan.hep.HepMatchOrder
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.optimize.program._
import org.apache.flink.table.planner.plan.rules.{FlinkBatchRuleSets, FlinkStreamRuleSets}
import org.apache.flink.table.planner.runtime.utils.JavaUserDefinedScalarFunctions.{BooleanPythonScalarFunction, PythonScalarFunction}
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit.{Before, Test}
/**
* Test for [[PythonScalarFunctionSplitRule]].
*/
class PythonScalarFunctionSplitRuleTest extends TableTestBase {
private val util = batchTestUtil()
@Before
def setup(): Unit = {
val programs = new FlinkChainedProgram[BatchOptimizeContext]()
programs.addLast(
"logical",
FlinkVolcanoProgramBuilder.newBuilder
.add(FlinkBatchRuleSets.LOGICAL_OPT_RULES)
.setRequiredOutputTraits(Array(FlinkConventions.LOGICAL))
.build())
programs.addLast(
"logical_rewrite",
FlinkHepRuleSetProgramBuilder.newBuilder
.setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_SEQUENCE)
.setHepMatchOrder(HepMatchOrder.BOTTOM_UP)
.add(FlinkStreamRuleSets.LOGICAL_REWRITE)
.build())
util.replaceBatchProgram(programs)
util.addTableSource[(Int, Int, Int)]("MyTable", 'a, 'b, 'c)
util.addFunction("pyFunc1", new PythonScalarFunction("pyFunc1"))
util.addFunction("pyFunc2", new PythonScalarFunction("pyFunc2"))
util.addFunction("pyFunc3", new PythonScalarFunction("pyFunc3"))
util.addFunction("pyFunc4", new BooleanPythonScalarFunction("pyFunc4"))
}
@Test
def testPythonFunctionAsInputOfJavaFunction(): Unit = {
val sqlQuery = "SELECT pyFunc1(a, b) + 1 FROM MyTable"
util.verifyPlan(sqlQuery)
}
@Test
def testPythonFunctionMixedWithJavaFunction(): Unit = {
val sqlQuery = "SELECT pyFunc1(a, b), c + 1 FROM MyTable"
util.verifyPlan(sqlQuery)
}
@Test
def testPythonFunctionMixedWithJavaFunctionInWhereClause(): Unit = {
val sqlQuery = "SELECT pyFunc1(a, b), c + 1 FROM MyTable WHERE pyFunc2(a, c) > 0"
util.verifyPlan(sqlQuery)
}
@Test
def testPythonFunctionInWhereClause(): Unit = {
val sqlQuery = "SELECT pyFunc1(a, b) FROM MyTable WHERE pyFunc4(a, c)"
util.verifyPlan(sqlQuery)
}
@Test
def testChainingPythonFunction(): Unit = {
val sqlQuery = "SELECT pyFunc3(pyFunc2(a + pyFunc1(a, c), b), c) FROM MyTable"
util.verifyPlan(sqlQuery)
}
@Test
def testOnlyOnePythonFunction(): Unit = {
val sqlQuery = "SELECT pyFunc1(a, b) FROM MyTable"
util.verifyPlan(sqlQuery)
}
@Test
def testOnlyOnePythonFunctionInWhereClause(): Unit = {
val sqlQuery = "SELECT a, b FROM MyTable WHERE pyFunc4(a, c)"
util.verifyPlan(sqlQuery)
}
@Test
def testFieldNameUniquify(): Unit = {
util.addTableSource[(Int, Int, Int)]("MyTable2", 'f0, 'f1, 'f2)
val sqlQuery = "SELECT pyFunc1(f1, f2), f0 + 1 FROM MyTable2"
util.verifyPlan(sqlQuery)
}
}
| mbode/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/rules/logical/PythonScalarFunctionSplitRuleTest.scala | Scala | apache-2.0 | 3,984 |
package com.s3dropbox.lambda
import org.json4s.{Formats, NoTypeHints}
import org.json4s.jackson.JsonMethods
import java.io.ByteArrayInputStream
/**
* Manifest contains information about the target Dropbox App folder.
*/
case class Manifest(fileStates: List[FileState]) {
def filenamesToRemove(oldManifest: Manifest): List[String] = oldManifest.filenames.diff(filenames)
def filesToUpdate(oldManifest: Manifest): List[FileState] = fileStates.diff(oldManifest.fileStates)
private def filenames: List[String] = fileStates.map(_.filename)
}
object Manifest {
implicit val formats: Formats = org.json4s.DefaultFormats + NoTypeHints
def apply(): Manifest = Manifest(List[FileState]())
def apply(data: Array[Byte]): Manifest = JsonMethods.parse(new ByteArrayInputStream(data)).extract[Manifest]
}
case class FileState(filename: String, md5sum: String)
| ErrorsAndGlitches/S3DropboxLambda | src/main/scala/com/s3dropbox/lambda/Manifest.scala | Scala | mit | 870 |
package org.nikosoft.oanda.bot.scalping
import org.joda.time.Duration
import org.nikosoft.oanda.bot.scalping.Model.PositionType.PositionType
import org.nikosoft.oanda.bot.scalping.Model.TradeType.TradeType
import org.nikosoft.oanda.instruments.Model.CandleStick
import org.nikosoft.oanda.instruments.Oscillators.MACDItem
import scala.math.BigDecimal.RoundingMode
object Model {
val pipsCoef = 100000
implicit class CandleStickPimped(candleStick: CandleStick) {
def macdHistogram = candleStick.indicators("MACDCandleCloseIndicator").asInstanceOf[MACDItem].histogram.getOrElse(BigDecimal(0))
def macdHistogramPips = (candleStick.indicators("MACDCandleCloseIndicator").asInstanceOf[MACDItem].histogram.getOrElse(BigDecimal(0)) * pipsCoef).toInt
def sma(precision: Int) = candleStick.indicators.get(s"SMACandleCloseIndicator_$precision").map(_.asInstanceOf[BigDecimal].setScale(5, RoundingMode.HALF_UP).toDouble)
def cmo = candleStick.indicators.get("CMOCandleCloseIndicator_8").map(_.asInstanceOf[BigDecimal].toDouble).getOrElse(0.0)
}
implicit class BigDecimalPimped(value: BigDecimal) {
def toPips: Int = (value * pipsCoef).toInt
}
implicit class IntegerPimped(value: Int) {
def toRate: BigDecimal = BigDecimal(value) / pipsCoef
}
object PositionType extends Enumeration {
type PositionType = Value
val LongPosition, ShortPosition = Value
}
abstract class Order() {
def chainedOrders: List[Order]
def orderCreatedAt: CandleStick
def findTakeProfitOrder: Option[TakeProfitOrder] = chainedOrders.find(_.getClass == classOf[TakeProfitOrder]).map(_.asInstanceOf[TakeProfitOrder])
def findStopLossOrder: Option[StopLossOrder] = chainedOrders.find(_.getClass == classOf[StopLossOrder]).map(_.asInstanceOf[StopLossOrder])
def findOrderByClass[T <: Order](orderClass: Class[T]): Option[T] = chainedOrders.find(_.getClass == orderClass).map(_.asInstanceOf[T])
}
case class MarketOrder(positionType: PositionType, orderCreatedAt: CandleStick, chainedOrders: List[Order]) extends Order
case class LimitOrder(price: BigDecimal, positionType: PositionType, orderCreatedAt: CandleStick, chainedOrders: List[Order]) extends Order
case class StopLossOrder(orderCreatedAt: CandleStick, stopLossPrice: BigDecimal, positionType: PositionType) extends Order {
val chainedOrders: List[Order] = Nil
}
case class TakeProfitOrder(orderCreatedAt: CandleStick, takeProfitPrice: BigDecimal, positionType: PositionType) extends Order {
val chainedOrders: List[Order] = Nil
}
case class Position(creationOrder: Order,
executionPrice: BigDecimal,
executionCandle: CandleStick,
positionType: PositionType)
object TradeType extends Enumeration {
type TradeType = Value
val TakeProfit, StopLoss, ManualClose = Value
}
case class Trade(commissionPips: Int,
orderClosedAt: CandleStick,
closedAtPrice: BigDecimal,
tradeType: TradeType,
position: Position) {
def profitPips = (position.positionType match {
case PositionType.LongPosition => closedAtPrice - position.executionPrice
case PositionType.ShortPosition => position.executionPrice - closedAtPrice
}).toPips - commissionPips
def duration = new Duration(position.executionCandle.time, orderClosedAt.time).toStandardHours.toString
}
}
| cnnickolay/forex-trader | trading-bot/src/main/scala/org/nikosoft/oanda/bot/scalping/Model.scala | Scala | mit | 3,446 |
package org.clulab.utils
import org.scalatest.FlatSpec
import org.scalatest.Matchers
class TestThreading extends FlatSpec with Matchers {
val threads = 26
val numbers = 0.until(threads)
{
val parNumbers = ThreadUtils.parallelize(numbers, threads)
parNumbers.foreach { number =>
println(number)
}
}
}
| sistanlp/processors | main/src/test/scala/org/clulab/utils/TestThreading.scala | Scala | apache-2.0 | 330 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.visualization.tensorboard
import com.intel.analytics.bigdl.utils.Engine
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.tensorflow
import org.tensorflow.framework.GraphDef
import org.tensorflow.util.Event
/**
* Writes Summary protocol buffers to event files.
* @param logDirectory Support local directory and HDFS directory
* @param flushMillis Interval to flush events queue.
*/
private[bigdl] class FileWriter(val logDirectory : String, flushMillis: Int = 1000) {
private val logPath = new Path(logDirectory)
// write to local disk by default
private val fs = logPath.getFileSystem(new Configuration(false))
require(!fs.exists(logPath) || fs.isDirectory(logPath), s"FileWriter: can not create $logPath")
if (!fs.exists(logPath)) fs.mkdirs(logPath)
private val eventWriter = new EventWriter(logDirectory, flushMillis, fs)
Engine.io.invoke(() => eventWriter.run())
/**
* Adds a Summary protocol buffer to the event file.
* @param summary a Summary protobuf String generated by bigdl.utils.Summary's
* scalar()/histogram().
* @param globalStep a consistent global count of the event.
* @return
*/
def addSummary(summary: tensorflow.framework.Summary, globalStep: Long): this.type = {
val event = Event.newBuilder().setSummary(summary).build()
addEvent(event, globalStep)
this
}
def addGraphDef(graph: GraphDef): this.type = {
val event = Event.newBuilder().setGraphDef(graph.toByteString).build()
eventWriter.addEvent(event)
this
}
/**
* Add a event protocol buffer to the event file.
* @param event A event protobuf contains summary protobuf.
* @param globalStep a consistent global count of the event.
* @return
*/
def addEvent(event: Event, globalStep: Long): this.type = {
eventWriter.addEvent(
event.toBuilder.setWallTime(System.currentTimeMillis() / 1e3).setStep(globalStep).build())
this
}
/**
* Close file writer.
* @return
*/
def close(): Unit = {
eventWriter.close()
fs.close()
}
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/visualization/tensorboard/FileWriter.scala | Scala | apache-2.0 | 2,719 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.api.python.{PythonEvalType, PythonFunction}
import org.apache.spark.sql.catalyst.trees.TreePattern.{PYTHON_UDF, TreePattern}
import org.apache.spark.sql.catalyst.util.toPrettySQL
import org.apache.spark.sql.types.DataType
/**
* Helper functions for [[PythonUDF]]
*/
object PythonUDF {
private[this] val SCALAR_TYPES = Set(
PythonEvalType.SQL_BATCHED_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_UDF,
PythonEvalType.SQL_SCALAR_PANDAS_ITER_UDF
)
def isScalarPythonUDF(e: Expression): Boolean = {
e.isInstanceOf[PythonUDF] && SCALAR_TYPES.contains(e.asInstanceOf[PythonUDF].evalType)
}
def isGroupedAggPandasUDF(e: Expression): Boolean = {
e.isInstanceOf[PythonUDF] &&
e.asInstanceOf[PythonUDF].evalType == PythonEvalType.SQL_GROUPED_AGG_PANDAS_UDF
}
// This is currently same as GroupedAggPandasUDF, but we might support new types in the future,
// e.g, N -> N transform.
def isWindowPandasUDF(e: Expression): Boolean = isGroupedAggPandasUDF(e)
}
/**
* A serialized version of a Python lambda function. This is a special expression, which needs a
* dedicated physical operator to execute it, and thus can't be pushed down to data sources.
*/
case class PythonUDF(
name: String,
func: PythonFunction,
dataType: DataType,
children: Seq[Expression],
evalType: Int,
udfDeterministic: Boolean,
resultId: ExprId = NamedExpression.newExprId)
extends Expression with Unevaluable with NonSQLExpression with UserDefinedExpression {
override lazy val deterministic: Boolean = udfDeterministic && children.forall(_.deterministic)
override def toString: String = s"$name(${children.mkString(", ")})"
final override val nodePatterns: Seq[TreePattern] = Seq(PYTHON_UDF)
lazy val resultAttribute: Attribute = AttributeReference(toPrettySQL(this), dataType, nullable)(
exprId = resultId)
override def nullable: Boolean = true
override lazy val canonicalized: Expression = {
val canonicalizedChildren = children.map(_.canonicalized)
// `resultId` can be seen as cosmetic variation in PythonUDF, as it doesn't affect the result.
this.copy(resultId = ExprId(-1)).withNewChildren(canonicalizedChildren)
}
override protected def withNewChildrenInternal(newChildren: IndexedSeq[Expression]): PythonUDF =
copy(children = newChildren)
}
| wangmiao1981/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/PythonUDF.scala | Scala | apache-2.0 | 3,207 |
/*
* Copyright 2014 Lars Edenbrandt
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package se.nimsa.sbx.anonymization
import se.nimsa.sbx.anonymization.AnonymizationOp._
import se.nimsa.sbx.anonymization.AnonymizationProfile._
import se.nimsa.sbx.anonymization.AnonymizationProfiles._
import se.nimsa.sbx.anonymization.ConfidentialityOption._
import scala.collection.mutable
class AnonymizationProfile private(val options: Seq[ConfidentialityOption]) {
private lazy val sortedOptions = options.sortWith(_.rank > _.rank)
/*
A bit of optimization necessary for sufficient performance. Divide ops into one map for the majority of tags where the
tag mask has all bits set. This is equivalent to checking for simple tag equality. Keep a separate map for the minority
with tag masks spanning more than one tag. Lookups in the former map will be fast since this can use a standard map
lookup. In the second map, linear search has to be performed but this is also fast as there are very few such elements.
Once both lookups have been carried out, select the one with highest rank.
*/
private lazy val (activeTagOps, activeMaskedOps): (Map[ConfidentialityOption, Map[Int, AnonymizationOp]], Map[ConfidentialityOption, Map[TagMask, AnonymizationOp]]) = {
val activeOps: Map[ConfidentialityOption, Map[TagMask, AnonymizationOp]] =
profiles.filterKeys(options.contains) ++ (
if (options.contains(RETAIN_SAFE_PRIVATE))
Map(RETAIN_SAFE_PRIVATE -> safePrivateAttributes.map(_ -> KEEP).toMap)
else
Map.empty
)
val tagMap = activeOps.map {
case (option, inner) => option -> inner.filterKeys(_.mask == 0xFFFFFFFF).map {
case (mask, op) => mask.tag -> op
}
}
val maskMap = activeOps.map {
case (option, inner) => option -> inner.filterKeys(_.mask != 0xFFFFFFFF)
}
(tagMap, maskMap)
}
def opOf(tag: Int): Option[AnonymizationOp] = {
var tagOp: Option[(ConfidentialityOption, AnonymizationOp)] = None
for (key <- sortedOptions if tagOp.isEmpty) {
val map = activeTagOps(key)
tagOp = map.get(tag).map(key -> _)
}
var maskOp: Option[(ConfidentialityOption, AnonymizationOp)] = None
for (key <- sortedOptions if maskOp.isEmpty) {
val map = activeMaskedOps(key)
maskOp = map.filterKeys(_.contains(tag)).values.headOption.map(key -> _)
}
(tagOp, maskOp) match {
case (Some(t), Some(m)) if t._1.rank > m._1.rank => Some(t._2)
case (Some(_), Some(m)) => Some(m._2)
case (None, Some(m)) => Some(m._2)
case (Some(t), None) => Some(t._2)
case _ => None
}
}
}
object AnonymizationProfile {
private val cache = mutable.Map.empty[Seq[ConfidentialityOption], AnonymizationProfile]
def apply(options: Seq[ConfidentialityOption]): AnonymizationProfile =
cache.getOrElseUpdate(options, new AnonymizationProfile(options))
case class TagMask(tag: Int, mask: Int) {
def contains(otherTag: Int): Boolean = (otherTag & mask) == tag
}
} | slicebox/slicebox | src/main/scala/se/nimsa/sbx/anonymization/AnonymizationProfile.scala | Scala | apache-2.0 | 3,553 |
package com.ecfront.ez.framework.service.auth.manage
import com.ecfront.ez.framework.core.rpc.RPC
import com.ecfront.ez.framework.service.auth.model.EZ_Resource
import com.ecfront.ez.framework.service.jdbc.BaseStorage
import com.ecfront.ez.framework.service.jdbc.scaffold.SimpleRPCService
/**
* 资源管理
*/
@RPC("/ez/auth/manage/resource/","EZ-资源管理","")
object ResourceService extends SimpleRPCService[EZ_Resource] {
override protected val storageObj: BaseStorage[EZ_Resource] = EZ_Resource
} | gudaoxuri/ez-framework | services/auth/src/main/scala/com/ecfront/ez/framework/service/auth/manage/ResourceService.scala | Scala | apache-2.0 | 514 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.sparta.plugin.cube.operator.mode
import java.io.{Serializable => JSerializable}
import org.apache.spark.sql.types.StructType
import scala.util.Try
import com.stratio.sparta.sdk.pipeline.schema.TypeOp._
import com.stratio.sparta.sdk.pipeline.aggregation.operator.{Operator, OperatorProcessMapAsAny}
import com.stratio.sparta.sdk.pipeline.schema.TypeOp
import com.stratio.sparta.sdk.{_}
class ModeOperator(name: String,
val schema: StructType,
properties: Map[String, JSerializable]) extends Operator(name, schema, properties)
with OperatorProcessMapAsAny {
val inputSchema = schema
override val defaultTypeOperation = TypeOp.ArrayString
override def processReduce(values: Iterable[Option[Any]]): Option[Any] = {
val tupla = values.groupBy(x => x).mapValues(_.size)
if (tupla.nonEmpty) {
val max = tupla.values.max
Try(Some(transformValueByTypeOp(returnType, tupla.filter(_._2 == max).flatMap(tuple => tuple._1)))).get
} else Some(List())
}
}
| Frannie-Ludmilla/sparta | plugins/src/main/scala/com/stratio/sparta/plugin/cube/operator/mode/ModeOperator.scala | Scala | apache-2.0 | 1,658 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.variable
import cc.factorie.la._
import cc.factorie.util.Cubbie
import scala.collection.mutable
/** A domain over Tensor values, where the dimensions of the Tensor correspond to a CategoricalDomain.
This trait is often used for the domain of feature vectors.
@author Andrew McCallum */
trait CategoricalVectorDomain[C] extends VectorDomain { thisDomain =>
type CategoryType = C
def dimensionDomain: CategoricalDomain[C] = _dimensionDomain
/** Use for de-serialization */
def stringToCategory(s:String): C = s.asInstanceOf[C]
lazy val _dimensionDomain: CategoricalDomain[C] = new CategoricalDomain[C] {
final override def stringToCategory(s:String): C = CategoricalVectorDomain.this.stringToCategory(s)
}
}
/** A Cubbie for serializing CategoricalVectorDomain.
It stores the sequence of categories.
@author Luke Vilnis */
class CategoricalVectorDomainCubbie[T](val cdtd: CategoricalVectorDomain[T]) extends Cubbie {
val dimensionDomainCubbie = new CategoricalDomainCubbie[T](cdtd.dimensionDomain)
setMap(new mutable.Map[String, Any] {
override def update(key: String, value: Any): Unit = {
if (key == "dimensionDomain") {
val map = value.asInstanceOf[mutable.Map[String, Any]]
for((k,v) <- map) dimensionDomainCubbie._map(k) = v
} else sys.error("Unknown cubbie slot key: \\"%s\\"" format key)
}
def += (kv: (String, Any)): this.type = { update(kv._1, kv._2); this }
def -= (key: String): this.type = sys.error("Can't remove slots from cubbie map!")
def get(key: String): Option[Any] =
if (key == "dimensionDomain") Some(dimensionDomainCubbie._map)
else None
def iterator: Iterator[(String, Any)] = List("dimensionDomain").map(s => (s, get(s).get)).iterator
})
}
/** An abstract variable whose value is a Tensor whose length matches the size of a CategoricalDomain,
and whose dimensions each correspond to a category.
These are commonly used for feature vectors, with String categories. */
trait CategoricalVectorVar[C] extends VectorVar {
def domain: CategoricalVectorDomain[C]
/** If false, then when += is called with a value (or index) outside the Domain, an error is thrown.
If true, then no error is thrown, and request to add the outside-Domain value is simply ignored. */
def skipNonCategories = domain.dimensionDomain.frozen
protected def doWithIndexSafely(elt:C, v:Double, update:Boolean): Unit = {
val i = domain.dimensionDomain.index(elt)
if (i == CategoricalDomain.NULL_INDEX) {
if (!skipNonCategories) throw new Error("CategoricalVectorVar.+= " + elt + " not found in domain " + domain)
} else {
if (update) value.update(i, v)
else value.+=(i, v)
}
}
// Consider re-adding this "update" method if necessary, but reconsider its name; should it have a Diff argument?
//def update(elt:C, newValue:Double): Unit = doWithIndexSafely(elt, newValue, true)
def +=(elt:C, incr:Double): Unit = doWithIndexSafely(elt, incr, update = false)
def +=(elt:C): Unit = +=(elt, 1.0)
def ++=(elts:Iterable[C]): Unit = elts.foreach(this.+=(_))
def activeCategories: Seq[C] = value.activeDomain.toSeq.map(i => domain.dimensionDomain.category(i))
}
// TODO we should maybe refactor this to not set the Value type to "Tensor", as this makes things require casting down the road when using this for eg classifiers on tensor1 features -luke
/** An abstract variable whose value is a Tensor whose length matches the size of a CategoricalDomain,
and whose dimensions each correspond to a category.
These are commonly used for feature vectors, with String categories.
The 'dimensionDomain' is abstract.
@author Andrew McCallum */
abstract class CategoricalVectorVariable[C] extends VectorVar with MutableTensorVar with CategoricalVectorVar[C] {
type Value = Tensor1
def this(initialValue:Tensor1) = { this(); set(initialValue)(null) }
}
| patverga/factorie | src/main/scala/cc/factorie/variable/CategoricalVectorVariable.scala | Scala | apache-2.0 | 4,688 |
package org.jetbrains.plugins.scala.editor.importOptimizer
import java.util.regex.Pattern
import com.intellij.openapi.project.Project
import org.jetbrains.plugins.scala.codeInspection.scalastyle.ScalastyleCodeInspection
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
/**
* @author Nikolay.Tropin
*/
case class OptimizeImportSettings(addFullQualifiedImports: Boolean,
isLocalImportsCanBeRelative: Boolean,
sortImports: Boolean,
collectImports: Boolean,
isUnicodeArrow: Boolean,
spacesInImports: Boolean,
classCountToUseImportOnDemand: Int,
importLayout: Array[String],
isAlwaysUsedImport: String => Boolean,
scalastyleSettings: ScalastyleSettings) {
def scalastyleGroups: Option[Seq[Pattern]] = scalastyleSettings.groups
def scalastyleOrder: Boolean = scalastyleSettings.scalastyleOrder
private def this(s: ScalaCodeStyleSettings, scalastyleSettings: ScalastyleSettings) {
this(
s.isAddFullQualifiedImports,
s.isDoNotChangeLocalImportsOnOptimize,
s.isSortImports,
s.isCollectImports,
s.REPLACE_CASE_ARROW_WITH_UNICODE_CHAR,
s.SPACES_IN_IMPORTS,
s.getClassCountToUseImportOnDemand,
s.getImportLayout,
s.isAlwaysUsedImport,
scalastyleSettings
)
}
}
object OptimizeImportSettings {
def apply(project: Project): OptimizeImportSettings = {
val codeStyleSettings = ScalaCodeStyleSettings.getInstance(project)
val scalastyleSettings =
if (codeStyleSettings.isSortAsScalastyle) {
val scalastyleConfig = ScalastyleCodeInspection.configuration(project)
val scalastyleChecker = scalastyleConfig.flatMap(_.checks.find(_.className == ScalastyleSettings.importOrderChecker))
val groups = scalastyleChecker.filter(_.enabled).flatMap(ScalastyleSettings.groups)
ScalastyleSettings(scalastyleOrder = true, groups)
}
else ScalastyleSettings(scalastyleOrder = false, None)
new OptimizeImportSettings(codeStyleSettings, scalastyleSettings)
}
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/editor/importOptimizer/OptimizeImportSettings.scala | Scala | apache-2.0 | 2,317 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.examples
import java.io.File
import java.text.SimpleDateFormat
import org.apache.spark.sql.SaveMode
import org.apache.spark.sql.SparkSession
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.examples.util.ExampleUtils
object DataUpdateDeleteExample {
def main(args: Array[String]) {
val spark = ExampleUtils.createSparkSession("DataUpdateDeleteExample")
exampleBody(spark)
spark.close()
}
def exampleBody(spark : SparkSession): Unit = {
// Specify date format based on raw data
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
import spark.implicits._
// Drop table
spark.sql("DROP TABLE IF EXISTS IUD_table1")
spark.sql("DROP TABLE IF EXISTS IUD_table2")
// Simulate data and write to table IUD_table1
val sdf = new SimpleDateFormat("yyyy-MM-dd")
var df = spark.sparkContext.parallelize(1 to 10)
.map(x => (x, new java.sql.Date(sdf.parse("2015-07-" + (x % 10 + 10)).getTime),
"china", "aaa" + x, "phone" + 555 * x, "ASD" + (60000 + x), 14999 + x))
.toDF("IUD_table1_id", "IUD_table1_date", "IUD_table1_country", "IUD_table1_name",
"IUD_table1_phonetype", "IUD_table1_serialname", "IUD_table1_salary")
df.write
.format("carbondata")
.option("tableName", "IUD_table1")
.option("compress", "true")
.mode(SaveMode.Overwrite)
.save()
// Simulate data and write to table IUD_table2
df = spark.sparkContext.parallelize(1 to 10)
.map(x => (x, new java.sql.Date(sdf.parse("2017-07-" + (x % 20 + 1)).getTime),
"usa", "bbb" + x, "phone" + 100 * x, "ASD" + (1000 * x - x), 25000 + x))
.toDF("IUD_table2_id", "IUD_table2_date", "IUD_table2_country", "IUD_table2_name",
"IUD_table2_phonetype", "IUD_table2_serialname", "IUD_table2_salary")
df.write
.format("carbondata")
.option("tableName", "IUD_table2")
.option("tempCSV", "true")
.option("compress", "true")
.mode(SaveMode.Overwrite)
.save()
spark.sql("""
SELECT * FROM IUD_table1 ORDER BY IUD_table1_id
""").show()
spark.sql("""
SELECT * FROM IUD_table2 ORDER BY IUD_table2_id
""").show()
// 1.Update data with simple SET
// Update data where salary < 15003
val dateStr = "2018-08-08"
spark.sql(s"""
UPDATE IUD_table1 SET (IUD_table1_date, IUD_table1_country) = ('$dateStr', 'india')
WHERE IUD_table1_salary < 15003
""").show()
// Query data again after the above update
spark.sql("""
SELECT * FROM IUD_table1 ORDER BY IUD_table1_id
""").show()
spark.sql("""
UPDATE IUD_table1 SET (IUD_table1_salary) = (IUD_table1_salary + 9)
WHERE IUD_table1_name = 'aaa1'
""").show()
// Query data again after the above update
spark.sql("""
SELECT * FROM IUD_table1 ORDER BY IUD_table1_id
""").show()
// 2.Update data with subquery result SET
spark.sql("""
UPDATE IUD_table1
SET (IUD_table1_country, IUD_table1_name) = (SELECT IUD_table2_country, IUD_table2_name
FROM IUD_table2 WHERE IUD_table2_id = 5)
WHERE IUD_table1_id < 5""").show()
spark.sql("""
UPDATE IUD_table1
SET (IUD_table1_date, IUD_table1_serialname, IUD_table1_salary) =
(SELECT '2099-09-09', IUD_table2_serialname, '9999'
FROM IUD_table2 WHERE IUD_table2_id = 5)
WHERE IUD_table1_id < 5""").show()
// Query data again after the above update
spark.sql("""
SELECT * FROM IUD_table1 ORDER BY IUD_table1_id
""").show()
// 3.Update data with join query result SET
spark.sql("""
UPDATE IUD_table1
SET (IUD_table1_country, IUD_table1_salary) =
(SELECT IUD_table2_country, IUD_table2_salary FROM IUD_table2 FULL JOIN IUD_table1 u
WHERE u.IUD_table1_id = IUD_table2_id and IUD_table2_id=6)
WHERE IUD_table1_id >6""").show()
// Query data again after the above update
spark.sql("""
SELECT * FROM IUD_table1 ORDER BY IUD_table1_id
""").show()
// 4.Delete data where salary > 15005
spark.sql("""
DELETE FROM IUD_table1 WHERE IUD_table1_salary > 15005
""").show()
// Query data again after delete data
spark.sql("""
SELECT * FROM IUD_table1 ORDER BY IUD_table1_id
""").show()
// 5.Delete data WHERE id in (1, 2, $key)
val key = 3
spark.sql(s"""
DELETE FROM IUD_table1 WHERE IUD_table1_id in (1, 2, $key)
""").show()
// Query data again after delete data
spark.sql("""
SELECT * FROM IUD_table1 ORDER BY IUD_table1_id
""").show()
CarbonProperties.getInstance().addProperty(
CarbonCommonConstants.CARBON_DATE_FORMAT,
CarbonCommonConstants.CARBON_DATE_DEFAULT_FORMAT)
// Drop table
spark.sql("DROP TABLE IF EXISTS IUD_table1")
spark.sql("DROP TABLE IF EXISTS IUD_table2")
}
}
| zzcclp/carbondata | examples/spark/src/main/scala/org/apache/carbondata/examples/DataUpdateDeleteExample.scala | Scala | apache-2.0 | 6,016 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package statements
import org.jetbrains.plugins.scala.lang.psi.api.base.ScPatternList
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScBindingPattern
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScExpression
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
trait ScVariableDefinition extends ScVariable {
def pList: ScPatternList
def bindings: Seq[ScBindingPattern]
def declaredElements: Seq[ScBindingPattern] = bindings
def expr: Option[ScExpression]
def isSimple: Boolean = pList.simplePatterns && bindings.size == 1
override def isAbstract: Boolean = false
override def accept(visitor: ScalaElementVisitor) {
visitor.visitVariableDefinition(this)
}
}
object ScVariableDefinition {
object expr {
def unapply(definition: ScVariableDefinition): Option[ScExpression] = Option(definition).flatMap(_.expr)
}
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/api/statements/ScVariableDefinition.scala | Scala | apache-2.0 | 952 |
package org.jetbrains.plugins.scala.testingSupport.scalatest.fileStructureView
import org.jetbrains.plugins.scala.testingSupport.IntegrationTest
import org.jetbrains.plugins.scala.lang.structureView.elements.impl.TestStructureViewElement._
import org.jetbrains.plugins.scala.testingSupport.test.structureView.TestNodeProvider
/**
* @author Roman.Shein
* @since 20.04.2015.
*/
trait FreeSpecFileStructureViewTest extends IntegrationTest {
private val className = "FreeSpecViewTest"
def addFreeSpecViewTest(): Unit = {
addFileToProject(className + ".scala",
"""
|import org.scalatest._
|
|class FreeSpecViewTest extends FreeSpec {
| "level1" - {
| "level1_1" in {}
|
| "level1_2" - {
| "level1_2_1" in {}
| }
|
| "level1_2" is pending
|
| "level1_3" in pending
| }
|
| "level2" ignore {
| "level2_1" in {}
|
| "level2_2" ignore {}
| }
|
| "level3" ignore pending
|}
""".stripMargin)
}
def testFreeSpecNormal(): Unit = {
addFreeSpecViewTest()
runFileStructureViewTest(className, normalStatusId, "\"level1\"", "\"level1_1\"",
"\"level1_2\"", "\"level1_2_1\"")
}
def testFreeSpecHierarchy(): Unit = {
addFreeSpecViewTest()
runFileStructureViewTest(className, "\"level1_1\"", Some("\"level1\""))
runFileStructureViewTest(className, "\"level1_2_1\"", Some("\"level1_2\""))
}
def testFreeSpecIgnoredHierarchy(): Unit = {
addFreeSpecViewTest()
runFileStructureViewTest(className, "\"level2_1\"", Some("\"level2\"" + TestNodeProvider.ignoredSuffix))
runFileStructureViewTest(className, "\"level2_2\"" + TestNodeProvider.ignoredSuffix, Some("\"level2\"" + TestNodeProvider.ignoredSuffix), ignoredStatusId)
}
def testFreeSpecIgnored(): Unit = {
addFreeSpecViewTest()
runFileStructureViewTest(className, ignoredStatusId, "\"level2\"", "\"level2_2\"")
}
def testFreeSpecIgnoredAndPending(): Unit = {
addFreeSpecViewTest()
runFileStructureViewTest(className, ignoredStatusId, "\"level3\"")
}
def testFreeSpecPending(): Unit = {
addFreeSpecViewTest()
runFileStructureViewTest(className, pendingStatusId, "\"level1_2\"", "\"level1_3\"")
}
}
| triggerNZ/intellij-scala | test/org/jetbrains/plugins/scala/testingSupport/scalatest/fileStructureView/FreeSpecFileStructureViewTest.scala | Scala | apache-2.0 | 2,309 |
/**
* Licensed to Gravity.com under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Gravity.com licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package gander.text
/**
* Created by IntelliJ IDEA.
* User: robbie
* Date: 5/13/11
* Time: 3:53 PM
*/
import java.util.regex.Pattern
class StringSplitter {
def this(pattern: String) {
this()
this.pattern = Pattern.compile(pattern)
}
def split(input: String): Array[String] = {
if (string.isNullOrEmpty(input)) return string.emptyArray
pattern.split(input)
}
private var pattern: Pattern = null
}
| lloydmeta/gander | src/main/scala/gander/text/StringSplitter.scala | Scala | apache-2.0 | 1,238 |
/*
* The MIT License
*
* Copyright (c) 2016 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.fulcrumgenomics.cmdline
import com.fulcrumgenomics.sopt.{arg, clp}
import com.fulcrumgenomics.testing.UnitSpec
/* Tis a silly CLP. */
@clp(group=ClpGroups.Utilities, description="A test class")
class TestClp
(
@arg(flag='e', doc="If set, exit with this code.") val exitCode: Option[Int],
@arg(flag='m', doc="If set, fail with this message.") val message: Option[String],
@arg(flag='p', doc="Print this message.") val printMe: Option[String]
) extends FgBioTool {
override def execute(): Unit = {
(exitCode, message) match {
case (Some(ex), Some(msg)) => fail(ex, msg)
case (Some(ex), None ) => fail(ex)
case (None, Some(msg)) => fail(msg)
case (None, None ) => printMe.foreach(println)
}
}
}
/** Some basic test for the CLP classes. */
class ClpTests extends UnitSpec {
"FgBioMain" should "find a CLP and successfully set it up and execute it" in {
new FgBioMain().makeItSo("TestClp --print-me=hello".split(' ')) shouldBe 0
}
it should "fail with the provided exit code" in {
new FgBioMain().makeItSo("TestClp -e 7".split(' ')) shouldBe 7
new FgBioMain().makeItSo("TestClp --exit-code=5".split(' ')) shouldBe 5
new FgBioMain().makeItSo("TestClp --exit-code=9 --message=FailBabyFail".split(' ')) shouldBe 9
new FgBioMain().makeItSo("TestClp --message=FailBabyFail".split(' ')) should not be 0
}
it should "fail and print usage" in {
new FgBioMain().makeItSo("SomeProgram --with-args=that-dont-exist".split(' ')) should not be 0
}
}
| fulcrumgenomics/fgbio | src/test/scala/com/fulcrumgenomics/cmdline/ClpTests.scala | Scala | mit | 2,705 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.learning.reproduction
import de.fuberlin.wiwiss.silk.util.DPair
import de.fuberlin.wiwiss.silk.learning.individual.AggregationNode
import de.fuberlin.wiwiss.silk.learning.reproduction.Utils.crossoverNodes
/**
* A crossover operator which combines the operators of two aggregations.
*/
case class AggregationOperatorsCrossover() extends NodePairCrossoverOperator[AggregationNode] {
override protected def crossover(nodes: DPair[AggregationNode]) = {
nodes.source.copy(operators = crossoverNodes(nodes.source.operators, nodes.target.operators))
}
} | fusepoolP3/p3-silk | silk-learning/src/main/scala/de/fuberlin/wiwiss/silk/learning/reproduction/AggregationOperatorsCrossover.scala | Scala | apache-2.0 | 1,155 |
/*
* Skylark
* http://skylark.io
*
* Copyright 2012-2017 Quantarray, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.quantarray.skylark.measure
/**
* EnergyPrice / () converter.
*
* @author Araik Grigoryan
*/
trait EnergyPricePerDimensionlessConverter extends SameMeasureConverter[RatioMeasure[RatioMeasure[Currency, EnergyMeasure], DimensionlessMeasure]]
{
type From = RatioMeasure[RatioMeasure[Currency, EnergyMeasure], DimensionlessMeasure]
type To = RatioMeasure[RatioMeasure[Currency, EnergyMeasure], DimensionlessMeasure]
implicit val cc1: CanConvert[RatioMeasure[Currency, EnergyMeasure], RatioMeasure[Currency, EnergyMeasure]]
protected override def convert(from: From, to: To): Option[Double] = Some(to.denominator.immediateBase / from.denominator.immediateBase)
}
object EnergyPricePerDimensionlessConverter
{
def apply(cc: CanConvert[RatioMeasure[Currency, EnergyMeasure], RatioMeasure[Currency, EnergyMeasure]]): EnergyPricePerDimensionlessConverter =
{
val params = cc
new EnergyPricePerDimensionlessConverter
{
implicit val cc1: CanConvert[RatioMeasure[Currency, EnergyMeasure], RatioMeasure[Currency, EnergyMeasure]] = params
}
}
} | quantarray/skylark | skylark-measure/src/main/scala/com/quantarray/skylark/measure/EnergyPricePerDimensionlessConverter.scala | Scala | apache-2.0 | 1,731 |
/*
* Copyright (c) 2012-2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.enrich.common
package enrichments.registry
package apirequest
// specs2
import org.specs2.Specification
import org.specs2.scalaz.ValidationMatchers
// json4s
import org.json4s.JObject
import org.json4s.JsonDSL._
class OutputSpec extends Specification with ValidationMatchers { def is =
"This is a specification to test the HTTP API of API Request Enrichment" ^
p^
"Not found value result in Failure" ! e1^
"Successfully generate context" ! e2^
"Successfully generate context out of complex object" ! e3^
end
def e1 = {
val output = Output("iglu:com.snowplowanalytics/some_schema/jsonschema/1-0-0", Some(JsonOutput("$.value")))
output.extract(JObject(Nil)) must beFailing
}
def e2 = {
val output = Output("iglu:com.snowplowanalytics/some_schema/jsonschema/1-0-0", Some(JsonOutput("$.value")))
output.parse("""{"value": 32}""").flatMap(output.extract).map(output.describeJson) must beSuccessful.like {
case context => context must be equalTo(("schema", "iglu:com.snowplowanalytics/some_schema/jsonschema/1-0-0") ~ ("data" -> 32))
}
}
def e3 = {
val output = Output("iglu:com.snowplowanalytics/complex_schema/jsonschema/1-0-0", Some(JsonOutput("$.objects[1].deepNesting[3]")))
output.parse(
"""
|{
| "value": 32,
| "objects":
| [
| {"wrongValue": 11},
| {"deepNesting": [1,2,3,42]},
| {"wrongValue": 10}
| ]
|}
""".stripMargin).flatMap(output.extract).map(output.describeJson) must beSuccessful.like {
case context => context must be equalTo(("schema", "iglu:com.snowplowanalytics/complex_schema/jsonschema/1-0-0") ~ ("data" -> 42))
}
}
}
| simplybusiness/snowplow-fork | 3-enrich/scala-common-enrich/src/test/scala/com.snowplowanalytics.snowplow.enrich.common/enrichments/registry/apirequest/OutputSpec.scala | Scala | apache-2.0 | 2,680 |
package com.twitter.finagle.http2.transport.client
import com.twitter.finagle.Stack
import com.twitter.finagle.http2.MultiplexHandlerBuilder
import com.twitter.finagle.http2.transport.client.H2Pool.OnH2Service
import com.twitter.finagle.http2.transport.common.H2StreamChannelInit
import com.twitter.finagle.netty4.Netty4Listener.BackPressure
import com.twitter.finagle.netty4.http.{Http2CodecName, Http2MultiplexHandlerName}
import com.twitter.finagle.netty4.transport.ChannelTransport
import com.twitter.finagle.param.Stats
import com.twitter.finagle.transport.Transport
import io.netty.channel._
import io.netty.handler.codec.http.HttpClientUpgradeHandler.UpgradeEvent
import io.netty.handler.codec.http.{
FullHttpRequest,
FullHttpResponse,
HttpClientCodec,
HttpClientUpgradeHandler
}
import io.netty.handler.codec.http2.Http2ClientUpgradeCodec
import scala.jdk.CollectionConverters._
/**
* Takes the upgrade result and marks it as something read off the wire to
* expose it to finagle, and manipulates the pipeline to be fit for http/2.
*/
private final class UpgradeRequestHandler(
params: Stack.Params,
onH2Service: OnH2Service,
httpClientCodec: HttpClientCodec,
modifier: Transport[Any, Any] => Transport[Any, Any])
extends ChannelDuplexHandler {
import UpgradeRequestHandler._
private[this] val stats = params[Stats].statsReceiver
private[this] val statsReceiver = stats.scope("upgrade")
private[this] val attemptCounter = statsReceiver.counter("attempt")
private[this] val upgradeCounter = statsReceiver.counter("success")
private[this] val ignoredCounter = statsReceiver.counter("ignored")
// Exposed for testing
def initializeUpgradeStreamChannel(ch: Channel, parentCtx: ChannelHandlerContext): Unit = {
val p = parentCtx.pipeline
cleanPipeline(p)
val pingDetectionHandler = new H2ClientFilter(params)
p.addBefore(HandlerName, H2ClientFilter.HandlerName, pingDetectionHandler)
val streamChannelInit = H2StreamChannelInit.initClient(params)
val clientSession = new ClientSessionImpl(
params,
streamChannelInit,
parentCtx.channel,
() => pingDetectionHandler.status)
upgradeCounter.incr()
// let the Http2UpgradingTransport know that this was an upgrade request
parentCtx.pipeline.remove(this)
ch.pipeline.addLast(streamChannelInit)
val trans = clientSession.newChildTransport(ch)
// We need to make sure that if we close the session, it doesn't
// close everything down until the first stream has finished.
val session = new DeferredCloseSession(clientSession, trans.onClose.unit)
onH2Service(new ClientServiceImpl(session, stats, modifier))
parentCtx.fireChannelRead(
Http2UpgradingTransport.UpgradeSuccessful(new SingleDispatchTransport(trans))
)
}
private[this] def addUpgradeHandler(ctx: ChannelHandlerContext): Unit = {
val upgradeStreamhandler: ChannelHandler = new ChannelInitializer[Channel] {
def initChannel(ch: Channel): Unit = initializeUpgradeStreamChannel(ch, ctx)
}
val (codec, handler) =
MultiplexHandlerBuilder.clientFrameCodec(params, Some(upgradeStreamhandler))
val upgradeCodec = new Http2ClientUpgradeCodec(codec) {
override def upgradeTo(
ctx: ChannelHandlerContext,
upgradeResponse: FullHttpResponse
): Unit = {
// Add the handler to the pipeline.
ctx.pipeline
.addAfter(ctx.name, Http2CodecName, codec)
.addAfter(Http2CodecName, Http2MultiplexHandlerName, handler)
// Reserve local stream for the response with stream id of '1'
codec.onHttpClientUpgrade()
}
}
// The parameter for `HttpClientUpgradeHandler.maxContentLength` can be 0 because
// the HTTP2 spec requires that a 101 request not have a body and for any other
// response status it will remove itself from the pipeline.
val upgradeHandler = new HttpClientUpgradeHandler(httpClientCodec, upgradeCodec, 0)
ctx.pipeline.addBefore(ctx.name, "httpUpgradeHandler", upgradeHandler)
}
override def write(ctx: ChannelHandlerContext, msg: Object, promise: ChannelPromise): Unit = {
msg match {
case req: FullHttpRequest if req.content.readableBytes == 0 =>
// A request we can upgrade from. Reshape our pipeline and keep trucking.
addUpgradeHandler(ctx)
attemptCounter.incr()
super.write(ctx, msg, promise)
case _ =>
// we don't attempt to upgrade when the request may have content, so we remove
// ourselves and let the backend handlers know that we're not going to try upgrading.
ignoredCounter.incr()
noUpgrade(ctx, Http2UpgradingTransport.UpgradeAborted)
ctx.write(msg, promise)
}
}
override def userEventTriggered(ctx: ChannelHandlerContext, event: Any): Unit = event match {
case UpgradeEvent.UPGRADE_ISSUED => // no surprises here.
case UpgradeEvent.UPGRADE_REJECTED =>
noUpgrade(ctx, Http2UpgradingTransport.UpgradeRejected)
case _ =>
super.userEventTriggered(ctx, event)
}
private[this] def noUpgrade(
ctx: ChannelHandlerContext,
result: Http2UpgradingTransport.UpgradeResult
): Unit = {
ctx.pipeline.remove(this)
ctx.fireChannelRead(result)
// Configure the original backpressure strategy since the pipeline started life
// with autoread enabled.
ctx.channel.config.setAutoRead(!params[BackPressure].enabled)
// Make sure we request at least one more message so that we don't starve the
// ChannelTransport.
ctx.read()
}
}
private object UpgradeRequestHandler {
val HandlerName = "pipelineUpgrader"
// Clean out the channel handlers that are only for HTTP/1.x so we don't have
// a bunch of noise in our main pipeline.
private def cleanPipeline(pipeline: ChannelPipeline): Unit = {
pipeline.asScala.toList
// We don't want to remove anything up to the pipeline upgrader which
// are stages like metrics, etc.
.dropWhile(_.getKey != HandlerName)
.drop(1)
// These will be things that operate on HTTP messages which will no longer
// be flowing down the main pipeline. Examples include message aggregators,
// compressors/decompressors, etc.
.takeWhile(_.getKey != ChannelTransport.HandlerName)
.foreach { entry => pipeline.remove(entry.getValue) }
}
}
| twitter/finagle | finagle-http2/src/main/scala/com/twitter/finagle/http2/transport/client/UpgradeRequestHandler.scala | Scala | apache-2.0 | 6,385 |
package org.kanyec.ast
import org.objectweb.asm.MethodVisitor
import org.objectweb.asm.Opcodes._
import org.kanyec.SymbolTable
case class CallReadMethodNode(returnVar: String) extends StatementNode{
def generate(mv: MethodVisitor, symbolTable: SymbolTable) = {
mv.visitTypeInsn(NEW, "java/util/Scanner");
mv.visitInsn(DUP);
mv.visitFieldInsn(GETSTATIC, "java/lang/System", "in", "Ljava/io/InputStream;")
mv.visitMethodInsn(INVOKESPECIAL, "java/util/Scanner", "<init>", "(Ljava/io/InputStream;)V");
mv.visitMethodInsn(INVOKEVIRTUAL, "java/util/Scanner", "nextInt", "()I")
mv.visitVarInsn(ISTORE, symbolTable.getVariableAddress(returnVar))
}
}
| nicksam112/kanyec | src/main/scala/org/kanyec/ast/CallReadMethodNode.scala | Scala | apache-2.0 | 672 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.maven
import java.net.URLClassLoader
import javax.inject.{ Inject, Singleton }
import org.eclipse.aether.artifact.Artifact
/**
* Implements sharing of Scala classloaders, to save on memory
*/
@Singleton
class ScalaClassLoaderManager @Inject() (logger: MavenLoggerProxy) {
/**
* The list of Scala libraries. None of these libraries may have a dependency outside of this list, otherwise there
* will be classloading issues.
*
* Note that while adding more libraries to this list will allow more to be shared, it may also mean that classloaders
* can be shared in less cases, since it becomes less likely that there will be an exact match between two projects
* in what can be shared.
*/
private val ScalaLibs = Set(
"org.scala-lang" -> "scala-library",
"org.scala-lang" -> "scala-reflect",
"org.scala-lang.modules" -> "scala-xml",
"org.scala-lang.modules" -> "scala-parser-combinators",
"org.scala-lang.modules" -> "scala-java8-compat"
)
private val ScalaVersionPattern = "_\\\\d+\\\\.\\\\d+.*$".r
private def stripScalaVersion(artifactId: String) = ScalaVersionPattern.replaceFirstIn(artifactId, "")
private def createCacheKey(artifacts: Seq[Artifact]): String = {
artifacts.map { artifact =>
import artifact._
s"$getGroupId:$getArtifactId:$getVersion"
}.sorted.mkString(",")
}
private var cache = Map.empty[String, ClassLoader]
/**
* Extract a Scala ClassLoader from the given classpath.
*/
def extractScalaClassLoader(artifacts: Seq[Artifact]): ClassLoader = synchronized {
val scalaArtifacts = artifacts.filter { artifact =>
ScalaLibs.contains(artifact.getGroupId -> stripScalaVersion(artifact.getArtifactId))
}
val cacheKey = createCacheKey(scalaArtifacts)
cache.get(cacheKey) match {
case Some(classLoader) =>
logger.debug(s"ScalaClassLoader cache hit - $cacheKey")
classLoader
case None =>
logger.debug(s"ScalaClassLoader cache miss - $cacheKey")
val classLoader = new URLClassLoader(scalaArtifacts.map(_.getFile.toURI.toURL).toArray, null)
cache += (cacheKey -> classLoader)
classLoader
}
}
}
| edouardKaiser/lagom | dev/maven-plugin/src/main/scala/com/lightbend/lagom/maven/ScalaClassLoaderManager.scala | Scala | apache-2.0 | 2,285 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import exceptions.{GeneratorDrivenPropertyCheckFailedException, TableDrivenPropertyCheckFailedException, TestFailedDueToTimeoutException, TestCanceledException}
// SKIP-SCALATESTJS,NATIVE-START
import org.scalatestplus.junit.JUnitTestFailedError
// SKIP-SCALATESTJS,NATIVE-END
import prop.TableDrivenPropertyChecks
import TableDrivenPropertyChecks._
import org.scalatest.exceptions.ModifiableMessage
import org.scalatest.exceptions.StackDepth
import SharedHelpers.EventRecordingReporter
import org.scalactic.exceptions.NullArgumentException
import org.scalactic.source
import org.scalatest.exceptions.StackDepthException
import org.scalatest.exceptions.TestFailedException
import prop.TableFor1
import time.{Second, Span}
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class ClueSpec extends AnyFlatSpec with Matchers {
def examples: TableFor1[Throwable with ModifiableMessage[_ <: StackDepth]] =
Table(
"exception",
new TestFailedException((_: StackDepthException) => Some("message"), None, source.Position.here),
// SKIP-SCALATESTJS,NATIVE-START
new JUnitTestFailedError("message", 3),
// SKIP-SCALATESTJS,NATIVE-END
new TestFailedDueToTimeoutException((_: StackDepthException) => Some("message"), None, Left(source.Position.here), None, Span(1, Second)),
new TableDrivenPropertyCheckFailedException((_: StackDepthException) => "message", None, source.Position.here, None, "undecMsg", List.empty, List.empty, 3),
new GeneratorDrivenPropertyCheckFailedException((_: StackDepthException) => "message", None, source.Position.here, None, "undecMsg", List.empty, Option(List.empty), List.empty)
)
// TOTEST: clue object with toString. clue object with null toString. all-whitespace clue string
"The modifyMessage method" should "return the an exception with an equal message option if passed a function that returns the same option passed to it" in {
forAll (examples) { e =>
e.modifyMessage(opt => opt) should equal (e)
}
}
it should "return the new exception with the clue string prepended, separated by a space char if passed a function that does that" in {
forAll (examples) { e =>
val clue = "clue"
val fun: (Option[String] => Option[String]) =
opt => opt match {
case Some(msg) => Some(clue + " " + msg)
case None => Some(clue)
}
e.modifyMessage(fun).message.get should be ("clue message")
}
}
// ******* withClue tests *******
"The withClue construct" should "allow any non-ModifiableMessage exception to pass through" in {
val iae = new IllegalArgumentException
val caught = intercept[IllegalArgumentException] {
withClue("howdy") {
throw iae
}
}
caught should be theSameInstanceAs (iae)
}
it should "given an empty clue string, rethrow the same TFE exception" in {
forAll (examples) { e =>
val caught = intercept[Throwable] {
withClue("") {
throw e
}
}
caught should be theSameInstanceAs (e)
}
}
it should "given an all-whitespace clue string, should throw a new TFE with the white space prepended to the old message" in {
forAll (examples) { e =>
val white = " "
val caught = intercept[Throwable with StackDepth] {
withClue(white) {
throw e
}
}
caught should not be theSameInstanceAs (e)
caught.message should equal (Some(white + "message"))
caught.getClass should be theSameInstanceAs (e.getClass)
}
}
it should "given a non-empty clue string with no trailing white space, throw a new instance of the caught TFE exception that has all fields the same except a prepended clue string followed by an extra space" in {
forAll (examples) { e =>
val caught = intercept[Throwable with StackDepth] {
withClue("clue") {
throw e
}
}
caught should not be theSameInstanceAs (e)
caught.message should equal (Some("clue message"))
caught.getClass should be theSameInstanceAs (e.getClass)
}
}
it should "given a non-empty clue string with a trailing space, throw a new instance of the caught TFE exception that has all fields the same except a prepended clue string (followed by no extra space)" in {
forAll (examples) { e =>
val caught = intercept[Throwable with StackDepth] {
withClue("clue ") { // has a trailing space
throw e
}
}
caught should not be theSameInstanceAs (e)
caught.message should equal (Some("clue message"))
caught.getClass should be theSameInstanceAs (e.getClass)
}
}
it should "given a non-empty clue string with a end of line, throw a new instance of the caught TFE exception that has all fields the same except a prepended clue string (followed by no extra space)" in {
forAll (examples) { e =>
val caught = intercept[Throwable with StackDepth] {
withClue("clue\\n") { // has a end of line character
throw e
}
}
caught should not be theSameInstanceAs (e)
caught.message should equal (Some("clue\\nmessage"))
caught.getClass should be theSameInstanceAs (e.getClass)
}
}
// ***** tests with objects other than String *****
it should "given an object with a non-empty clue string with no trailing white space, throw a new instance of the caught TFE exception that has all fields the same except a prepended clue string followed by an extra space" in {
forAll (examples) { e =>
val list = List(1, 2, 3)
val caught = intercept[Throwable with StackDepth] {
withClue(list) {
throw e
}
}
caught should not be theSameInstanceAs (e)
caught.message should equal (Some("List(1, 2, 3) message"))
caught.getClass should be theSameInstanceAs (e.getClass)
}
}
it should "pass the last value back" in {
val result = withClue("hi") { 3 }
result should equal (3)
}
it should "throw NPE if a null clue object is passed" in {
forAll (examples) { e =>
assertThrows[NullArgumentException] {
withClue (null) {
throw e
}
}
}
}
it should "infer the type of the result of the passed in function" in {
val result: Int = withClue("hi") { 22 }
assert(result === 22)
}
it should "be able to accept by-name payload" in {
val result: String = withClue(() => 128) { "hello" }
assert(result === "hello")
}
it should "work when used in withFixture" in {
forAll(examples) { e =>
val a =
new org.scalatest.funspec.FixtureAnyFunSpec {
type FixtureParam = String
override def withFixture(test: OneArgTest) = {
withClue("a clue") {
test("something")
}
}
it("should do something") { p =>
throw e
}
}
val rep = new EventRecordingReporter()
a.run(None, Args(rep))
rep.testFailedEventsReceived.length should be (1)
rep.testFailedEventsReceived(0).message should be ("a clue message")
}
}
it should "return Failed that contains TestFailedException and with prepended clue" in {
val failed = Failed(new TestFailedException((_: StackDepthException) => Some("message"), None, source.Position.here))
val result = withClue("a clue") { failed }
result shouldBe a [Failed]
result.exception shouldBe a [TestFailedException]
result.exception.getMessage shouldBe "a clue message"
}
it should "return original Failed that contains the RuntimeException and without prepended clue" in {
val failed = Failed(new RuntimeException("message"))
val result = withClue("a clue") { failed }
result should be theSameInstanceAs failed
result.exception.getMessage shouldBe "message"
}
it should "return Canceled that contains TestCanceledException and with prepended clue" in {
val canceled = Canceled(new TestCanceledException("message", 3))
val result = withClue("a clue") { canceled }
result shouldBe a [Canceled]
result.exception shouldBe a [TestCanceledException]
result.exception.getMessage shouldBe "a clue message"
}
it should "return original Canceled that contains the RuntimeException and without prepended clue" in {
val re = new RuntimeException("message")
val canceled = Canceled(re)
val result = withClue("a clue") { canceled }
result.exception.getCause should be theSameInstanceAs re
result.exception.getMessage shouldBe "a clue message"
}
it should "return original Pending" in {
val pending = Pending
val result = withClue("a clue") { pending }
result should be theSameInstanceAs pending
}
it should "return original Succeeded" in {
val succeeded = Succeeded
val result = withClue("a clue") { succeeded }
result should be theSameInstanceAs succeeded
}
// SKIP-SCALATESTJS,NATIVE-START
it should "throw Serializable TestFailedDueToTimeoutException thrown from withClue wrapping a failing eventually" in {
import org.scalatest.concurrent.Eventually._
import org.scalatest.exceptions.TestFailedDueToTimeoutException
import SharedHelpers.serializeRoundtrip
val result = intercept[TestFailedDueToTimeoutException] {
withClue("a clue") {
eventually {
"a" should equal ("b")
}
}
}
serializeRoundtrip(result)
}
// SKIP-SCALATESTJS,NATIVE-END
}
| scalatest/scalatest | jvm/scalatest-test/src/test/scala/org/scalatest/ClueSpec.scala | Scala | apache-2.0 | 10,156 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.containerpool.kubernetes
import java.io.IOException
import java.net.SocketTimeoutException
import java.time.{Instant, ZoneId}
import java.time.format.DateTimeFormatterBuilder
import akka.actor.ActorSystem
import akka.http.scaladsl.model.Uri
import akka.http.scaladsl.model.Uri.Path
import akka.http.scaladsl.model.Uri.Query
import akka.stream.{Attributes, Outlet, SourceShape}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.Source
import akka.stream.stage._
import akka.util.ByteString
import io.fabric8.kubernetes.api.model._
import pureconfig.loadConfigOrThrow
import org.apache.openwhisk.common.Logging
import org.apache.openwhisk.common.TransactionId
import org.apache.openwhisk.core.ConfigKeys
import org.apache.openwhisk.core.containerpool.ContainerId
import org.apache.openwhisk.core.containerpool.ContainerAddress
import org.apache.openwhisk.core.containerpool.docker.ProcessRunner
import org.apache.openwhisk.core.entity.ByteSize
import org.apache.openwhisk.core.entity.size._
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.blocking
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import spray.json._
import spray.json.DefaultJsonProtocol._
import collection.JavaConverters._
import io.fabric8.kubernetes.client.ConfigBuilder
import io.fabric8.kubernetes.client.DefaultKubernetesClient
import okhttp3.{Call, Callback, Request, Response}
import okio.BufferedSource
import scala.annotation.tailrec
import scala.collection.mutable
import scala.util.control.NonFatal
/**
* Configuration for kubernetes client command timeouts.
*/
case class KubernetesClientTimeoutConfig(run: Duration, logs: Duration)
/**
* Configuration for kubernetes invoker-agent
*/
case class KubernetesInvokerAgentConfig(enabled: Boolean, port: Int)
/**
* Configuration for node affinity for the pods that execute user action containers
* The key,value pair should match the <key,value> pair with which the invoker worker nodes
* are labeled in the Kubernetes cluster. The default pair is <openwhisk-role,invoker>,
* but a deployment may override this default if needed.
*/
case class KubernetesInvokerNodeAffinity(enabled: Boolean, key: String, value: String)
/**
* General configuration for kubernetes client
*/
case class KubernetesClientConfig(timeouts: KubernetesClientTimeoutConfig,
invokerAgent: KubernetesInvokerAgentConfig,
userPodNodeAffinity: KubernetesInvokerNodeAffinity)
/**
* Serves as an interface to the Kubernetes API by proxying its REST API and/or invoking the kubectl CLI.
*
* Be cautious with the ExecutionContext passed to this, as many
* operations are blocking.
*
* You only need one instance (and you shouldn't get more).
*/
class KubernetesClient(
config: KubernetesClientConfig = loadConfigOrThrow[KubernetesClientConfig](ConfigKeys.kubernetes))(
executionContext: ExecutionContext)(implicit log: Logging, as: ActorSystem)
extends KubernetesApi
with ProcessRunner {
implicit protected val ec = executionContext
implicit protected val am = ActorMaterializer()
implicit protected val kubeRestClient = new DefaultKubernetesClient(
new ConfigBuilder()
.withConnectionTimeout(config.timeouts.logs.toMillis.toInt)
.withRequestTimeout(config.timeouts.logs.toMillis.toInt)
.build())
def run(name: String,
image: String,
memory: ByteSize = 256.MB,
environment: Map[String, String] = Map.empty,
labels: Map[String, String] = Map.empty)(implicit transid: TransactionId): Future[KubernetesContainer] = {
val envVars = environment.map {
case (key, value) => new EnvVarBuilder().withName(key).withValue(value).build()
}.toSeq
val podBuilder = new PodBuilder()
.withNewMetadata()
.withName(name)
.addToLabels("name", name)
.addToLabels(labels.asJava)
.endMetadata()
.withNewSpec()
.withRestartPolicy("Always")
if (config.userPodNodeAffinity.enabled) {
val invokerNodeAffinity = new AffinityBuilder()
.withNewNodeAffinity()
.withNewRequiredDuringSchedulingIgnoredDuringExecution()
.addNewNodeSelectorTerm()
.addNewMatchExpression()
.withKey(config.userPodNodeAffinity.key)
.withOperator("In")
.withValues(config.userPodNodeAffinity.value)
.endMatchExpression()
.endNodeSelectorTerm()
.endRequiredDuringSchedulingIgnoredDuringExecution()
.endNodeAffinity()
.build()
podBuilder.withAffinity(invokerNodeAffinity)
}
val pod = podBuilder
.addNewContainer()
.withNewResources()
.withLimits(Map("memory" -> new Quantity(memory.toMB + "Mi")).asJava)
.endResources()
.withName("user-action")
.withImage(image)
.withEnv(envVars.asJava)
.addNewPort()
.withContainerPort(8080)
.withName("action")
.endPort()
.endContainer()
.endSpec()
.build()
val namespace = kubeRestClient.getNamespace
kubeRestClient.pods.inNamespace(namespace).create(pod)
Future {
blocking {
val createdPod = kubeRestClient.pods
.inNamespace(namespace)
.withName(name)
.waitUntilReady(config.timeouts.run.length, config.timeouts.run.unit)
toContainer(createdPod)
}
}.recoverWith {
case e =>
log.error(this, s"Failed create pod for '$name': ${e.getClass} - ${e.getMessage}")
Future.failed(new Exception(s"Failed to create pod '$name'"))
}
}
def rm(container: KubernetesContainer)(implicit transid: TransactionId): Future[Unit] = {
Future {
blocking {
kubeRestClient
.inNamespace(kubeRestClient.getNamespace)
.pods()
.withName(container.id.asString)
.delete()
}
}.map(_ => ())
}
def rm(key: String, value: String, ensureUnpaused: Boolean = false)(implicit transid: TransactionId): Future[Unit] = {
Future {
blocking {
kubeRestClient
.inNamespace(kubeRestClient.getNamespace)
.pods()
.withLabel(key, value)
.delete()
}
}.map(_ => ())
}
// suspend is a no-op with the basic KubernetesClient
def suspend(container: KubernetesContainer)(implicit transid: TransactionId): Future[Unit] = Future.successful({})
// resume is a no-op with the basic KubernetesClient
def resume(container: KubernetesContainer)(implicit transid: TransactionId): Future[Unit] = Future.successful({})
def logs(container: KubernetesContainer, sinceTime: Option[Instant], waitForSentinel: Boolean = false)(
implicit transid: TransactionId): Source[TypedLogLine, Any] = {
log.debug(this, "Parsing logs from Kubernetes Graph Stage…")
Source
.fromGraph(new KubernetesRestLogSourceStage(container.id, sinceTime, waitForSentinel))
.log("kubernetesLogs")
}
protected def toContainer(pod: Pod): KubernetesContainer = {
val id = ContainerId(pod.getMetadata.getName)
val addr = ContainerAddress(pod.getStatus.getPodIP)
val workerIP = pod.getStatus.getHostIP
// Extract the native (docker or containerd) containerId for the container
// By convention, kubernetes adds a docker:// prefix when using docker as the low-level container engine
val nativeContainerId = pod.getStatus.getContainerStatuses.get(0).getContainerID.stripPrefix("docker://")
implicit val kubernetes = this
new KubernetesContainer(id, addr, workerIP, nativeContainerId)
}
}
object KubernetesClient {
// Necessary, as Kubernetes uses nanosecond precision in logs, but java.time.Instant toString uses milliseconds
//%Y-%m-%dT%H:%M:%S.%N%z
val K8STimestampFormat = new DateTimeFormatterBuilder()
.parseCaseInsensitive()
.appendPattern("u-MM-dd")
.appendLiteral('T')
.appendPattern("HH:mm:ss[.n]")
.appendLiteral('Z')
.toFormatter()
.withZone(ZoneId.of("UTC"))
def parseK8STimestamp(ts: String): Try[Instant] =
Try(Instant.from(K8STimestampFormat.parse(ts)))
def formatK8STimestamp(ts: Instant): Try[String] =
Try(K8STimestampFormat.format(ts))
}
trait KubernetesApi {
def run(name: String,
image: String,
memory: ByteSize,
environment: Map[String, String] = Map.empty,
labels: Map[String, String] = Map.empty)(implicit transid: TransactionId): Future[KubernetesContainer]
def rm(container: KubernetesContainer)(implicit transid: TransactionId): Future[Unit]
def rm(key: String, value: String, ensureUnpaused: Boolean)(implicit transid: TransactionId): Future[Unit]
def suspend(container: KubernetesContainer)(implicit transid: TransactionId): Future[Unit]
def resume(container: KubernetesContainer)(implicit transid: TransactionId): Future[Unit]
def logs(container: KubernetesContainer, sinceTime: Option[Instant], waitForSentinel: Boolean = false)(
implicit transid: TransactionId): Source[TypedLogLine, Any]
}
object KubernetesRestLogSourceStage {
import KubernetesClient.{formatK8STimestamp, parseK8STimestamp}
val retryDelay = 100.milliseconds
sealed trait K8SRestLogTimingEvent
case object K8SRestLogRetry extends K8SRestLogTimingEvent
def constructPath(namespace: String, containerId: String): Path =
Path / "api" / "v1" / "namespaces" / namespace / "pods" / containerId / "log"
def constructQuery(sinceTime: Option[Instant], waitForSentinel: Boolean): Query = {
val sinceTimestamp = sinceTime.flatMap(time => formatK8STimestamp(time).toOption)
Query(Map("timestamps" -> "true") ++ sinceTimestamp.map(time => "sinceTime" -> time))
}
@tailrec
def readLines(src: BufferedSource,
lastTimestamp: Option[Instant],
lines: Seq[TypedLogLine] = Seq.empty[TypedLogLine]): Seq[TypedLogLine] = {
if (!src.exhausted()) {
(for {
line <- Option(src.readUtf8Line()) if !line.isEmpty
timestampDelimiter = line.indexOf(" ")
// Kubernetes is ignoring nanoseconds in sinceTime, so we have to filter additionally here
rawTimestamp = line.substring(0, timestampDelimiter)
timestamp <- parseK8STimestamp(rawTimestamp).toOption if isRelevantLogLine(lastTimestamp, timestamp)
msg = line.substring(timestampDelimiter + 1)
stream = "stdout" // TODO - when we can distinguish stderr: https://github.com/kubernetes/kubernetes/issues/28167
} yield {
TypedLogLine(timestamp, stream, msg)
}) match {
case Some(logLine) =>
readLines(src, Option(logLine.time), lines :+ logLine)
case None =>
// we may have skipped a line for filtering conditions only; keep going
readLines(src, lastTimestamp, lines)
}
} else {
lines
}
}
def isRelevantLogLine(lastTimestamp: Option[Instant], newTimestamp: Instant): Boolean =
lastTimestamp match {
case Some(last) =>
newTimestamp.isAfter(last)
case None =>
true
}
}
final class KubernetesRestLogSourceStage(id: ContainerId, sinceTime: Option[Instant], waitForSentinel: Boolean)(
implicit val kubeRestClient: DefaultKubernetesClient)
extends GraphStage[SourceShape[TypedLogLine]] { stage =>
import KubernetesRestLogSourceStage._
val out = Outlet[TypedLogLine]("K8SHttpLogging.out")
override val shape: SourceShape[TypedLogLine] = SourceShape.of(out)
override protected def initialAttributes: Attributes = Attributes.name("KubernetesHttpLogSource")
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic =
new TimerGraphStageLogicWithLogging(shape) { logic =>
private val queue = mutable.Queue.empty[TypedLogLine]
private var lastTimestamp = sinceTime
def fetchLogs(): Unit =
try {
val path = constructPath(kubeRestClient.getNamespace, id.asString)
val query = constructQuery(lastTimestamp, waitForSentinel)
log.debug("*** Fetching K8S HTTP Logs w/ Path: {} Query: {}", path, query)
val url = Uri(kubeRestClient.getMasterUrl.toString)
.withPath(path)
.withQuery(query)
val request = new Request.Builder().get().url(url.toString).build
kubeRestClient.getHttpClient.newCall(request).enqueue(new LogFetchCallback())
} catch {
case NonFatal(e) =>
onFailure(e)
throw e
}
def onFailure(e: Throwable): Unit = e match {
case _: SocketTimeoutException =>
log.warning("* Logging socket to Kubernetes timed out.") // this should only happen with follow behavior
case _ =>
log.error(e, "* Retrieving the logs from Kubernetes failed.")
}
val emitCallback: AsyncCallback[Seq[TypedLogLine]] = getAsyncCallback[Seq[TypedLogLine]] {
case lines @ firstLine +: restOfLines =>
if (isAvailable(out)) {
log.debug("* Lines Available & output ready; pushing {} (remaining: {})", firstLine, restOfLines)
pushLine(firstLine)
queue ++= restOfLines
} else {
log.debug("* Output isn't ready; queueing lines: {}", lines)
queue ++= lines
}
case Nil =>
log.debug("* Empty lines returned.")
retryLogs()
}
class LogFetchCallback extends Callback {
override def onFailure(call: Call, e: IOException): Unit = logic.onFailure(e)
override def onResponse(call: Call, response: Response): Unit =
try {
val lines = readLines(response.body.source, lastTimestamp)
log.debug("* Read & decoded lines for K8S HTTP: {}", lines)
response.body.source.close()
lines.lastOption.foreach { line =>
log.debug("* Updating lastTimestamp (sinceTime) to {}", Option(line.time))
lastTimestamp = Option(line.time)
}
emitCallback.invoke(lines)
} catch {
case NonFatal(e) =>
log.error(e, "* Reading Kubernetes HTTP Response failed.")
logic.onFailure(e)
throw e
}
}
def pushLine(line: TypedLogLine): Unit = {
log.debug("* Pushing a chunk of kubernetes logging: {}", line)
push(out, line)
}
setHandler(
out,
new OutHandler {
override def onPull(): Unit = {
// if we still have lines queued up, return those; else make a new HTTP read.
if (queue.nonEmpty) {
log.debug("* onPull, nonEmpty queue... pushing line")
pushLine(queue.dequeue())
} else {
log.debug("* onPull, empty queue... fetching logs")
fetchLogs()
}
}
})
def retryLogs(): Unit = {
// Pause before retrying so we don't thrash Kubernetes w/ HTTP requests
log.debug("* Scheduling a retry of log fetch in {}", retryDelay)
scheduleOnce(K8SRestLogRetry, retryDelay)
}
override protected def onTimer(timerKey: Any): Unit = timerKey match {
case K8SRestLogRetry =>
log.debug("* Timer trigger for log fetch retry")
fetchLogs()
case x =>
log.warning("* Got a timer trigger with an unknown key: {}", x)
}
}
}
protected[core] final case class TypedLogLine(time: Instant, stream: String, log: String) {
import KubernetesClient.formatK8STimestamp
lazy val toJson: JsObject =
JsObject("time" -> formatK8STimestamp(time).getOrElse("").toJson, "stream" -> stream.toJson, "log" -> log.toJson)
lazy val jsonPrinted: String = toJson.compactPrint
lazy val jsonSize: Int = jsonPrinted.length
/**
* Returns a ByteString representation of the json for this Log Line
*/
val toByteString = ByteString(jsonPrinted)
override def toString = s"${formatK8STimestamp(time).get} $stream: ${log.trim}"
}
protected[core] object TypedLogLine {
import KubernetesClient.{parseK8STimestamp, K8STimestampFormat}
def readInstant(json: JsValue): Instant = json match {
case JsString(str) =>
parseK8STimestamp(str) match {
case Success(time) =>
time
case Failure(e) =>
deserializationError(
s"Could not parse a java.time.Instant from $str (Expected in format: $K8STimestampFormat: $e")
}
case _ =>
deserializationError(s"Could not parse a java.time.Instant from $json (Expected in format: $K8STimestampFormat)")
}
implicit val typedLogLineFormat = new RootJsonFormat[TypedLogLine] {
override def write(obj: TypedLogLine): JsValue = obj.toJson
override def read(json: JsValue): TypedLogLine = {
val obj = json.asJsObject
val fields = obj.fields
TypedLogLine(readInstant(fields("time")), fields("stream").convertTo[String], fields("log").convertTo[String])
}
}
}
| starpit/openwhisk | core/invoker/src/main/scala/org/apache/openwhisk/core/containerpool/kubernetes/KubernetesClient.scala | Scala | apache-2.0 | 17,865 |
package com.github.gdefacci.briscola.spec.tournament
import com.github.gdefacci.ddd._
import com.github.gdefacci.bdd._
import testkit.Predicates._
import com.github.gdefacci.briscola.player._
import com.github.gdefacci.briscola.tournament._
import com.github.gdefacci.briscola.competition.SingleMatch
import com.github.gdefacci.briscola.game.TooFewPlayers
import com.github.gdefacci.briscola.game._
object TournamentFeatures extends Features with TournamentSteps {
val `cant start a tournament with just one player` = {
val player1 = Player(PlayerId(1), "1", "1")
scenario(
`Given that logged players are`(player1)
When `issue the command`(StartTournament(Players(Set(player1.id)), SingleMatch))
Then `error is`(`equal to`(TournamentGameError(TooFewPlayers(Set(player1.id), GameState.MIN_PLAYERS)))))
}
val `cant start a tournament with too many players` = {
val player1 = Player(PlayerId(1), "1", "1")
val otherPlayers = 2.to(GameState.MAX_PLAYERS + 1).map(idx => Player(PlayerId(idx), s"$idx", s"$idx")).toSet
val allPlayers = otherPlayers + player1
scenario(
`Given that logged players are`(allPlayers.toSeq: _*)
When `issue the command`(StartTournament(Players(allPlayers.map(_.id)), SingleMatch))
Then `error is`(`equal to`(TournamentGameError(TooManyPlayers(allPlayers.map(_.id), GameState.MAX_PLAYERS)))))
}
val `a tournament can be started` = {
val players = 1.to(3).map(idx => Player(PlayerId(idx), s"$idx", s"$idx")).toSet
val game = ActiveGameState(GameId(1), Card(7, Seed.bastoni), Deck.empty, Seq.empty, Seq(PlayerState(PlayerId(1), Set.empty, Score.empty)), None)
val fgame = FinalGameState(GameId(1), Card(7, Seed.bastoni), Nil, None)
def `the proper tournament started event`: Predicate[TournamentEvent] = predicate {
case TournamentStarted(pls1, SingleMatch) =>
GamePlayers.getPlayers(pls1) == players.map(_.id)
case _ => false
}
scenario(
`Given that logged players are`(players.toSeq: _*)
When `issue the command`(StartTournament(Players(players.map(_.id)), SingleMatch))
Then `events contain`(`the proper tournament started event`)
And `the final state`(`is an active tournament`))
}
val `can bind a game to a tournament` = {
val players = 1.to(3).map(idx => Player(PlayerId(idx), s"$idx", s"$idx")).toSet
val game = ActiveGameState(GameId(1), Card(7, Seed.bastoni), Deck.empty, Seq.empty, Seq(PlayerState(PlayerId(1), Set.empty, Score.empty)), None)
val fgame = FinalGameState(GameId(1), Card(7, Seed.bastoni), Nil, None)
def `the proper tournament game started event`: Predicate[TournamentEvent] = predicate {
case TournamentGameHasStarted(gm1) => game == gm1
case _ => false
}
scenario(
`Given that logged players are`(players.toSeq: _*)
When `issue the command`(StartTournament(Players(players.map(_.id)), SingleMatch))
And `issue the command`(SetTournamentGame(game))
Then `events contain`(`the proper tournament game started event`)
And `the final state`(`is an active tournament`))
}
val `a single match tournament can be completed` = {
val players = 1.to(3).map(idx => Player(PlayerId(idx), s"$idx", s"$idx")).toSet
val game = ActiveGameState(GameId(1), Card(7, Seed.bastoni), Deck.empty, Seq.empty, Seq(PlayerState(PlayerId(1), Set.empty, Score.empty)), None)
val fgame = FinalGameState(GameId(1), Card(7, Seed.bastoni), Nil, None)
def `the proper finished tournament event`: Predicate[TournamentEvent] = predicate {
case TournamentGameHasFinished(gm2) => gm2 == fgame
case _ => false
}
scenario(
`Given that logged players are`(players.toSeq: _*)
When `issue the command`(StartTournament(Players(players.map(_.id)), SingleMatch))
And `issue the command`(SetTournamentGame(game))
And `issue the command`(SetGameOutcome(fgame))
Then `events contain`(`the proper finished tournament event`)
And `the final state`(`is a completed tournament`))
}
lazy val features = new Feature("Tournament features",
`cant start a tournament with just one player`,
`cant start a tournament with too many players`,
`a tournament can be started`,
`can bind a game to a tournament`,
`a single match tournament can be completed`) :: Nil
} | gdefacci/briscola | ddd-briscola/src/test/scala/com/github/gdefacci/briscola/spec/tournament/TournamentFeatures.scala | Scala | bsd-3-clause | 4,423 |
package co.spendabit.webapp.forms.controls
import org.apache.commons.fileupload.FileItem
class FileUploadInput(label: String, name: String)
extends Field[FileItem](label) {
def validate(params: Map[String, Seq[String]],
fileItems: Seq[FileItem]): Either[String, FileItem] = {
fileItems.find(_.getFieldName == name) match {
case Some(fi) => Right(fi)
case None => Left("Please choose a file to upload.")
}
}
def widgetHTML(value: Option[FileItem] = None): xml.NodeSeq =
<input type="file" name={ name } />
}
object FileUploadInput {
def apply(label: String, name: String) = new FileUploadInput(label, name)
}
| spendabit/webapp-tools | src/co/spendabit/webapp/forms/controls/FileUploadInput.scala | Scala | unlicense | 668 |
package com.peim.http.api
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.directives.MarshallingDirectives.{as, entity}
import com.peim.model.Account
import com.peim.model.response.IdWrapper
import com.peim.repository.AccountsRepository
import de.heikoseeberger.akkahttpplayjson.PlayJsonSupport
import scaldi.{Injectable, Injector}
class AccountsServiceApi(implicit inj: Injector) extends Injectable with PlayJsonSupport {
private val accountsRepository = inject[AccountsRepository]
val route: Route = pathPrefix("accounts") {
pathEndOrSingleSlash {
get {
onSuccess(accountsRepository.findAll) {
result => complete(OK, result)
}
} ~
post {
entity(as[Account]) { account =>
onSuccess(accountsRepository.create(account)) {
result => complete(Created, IdWrapper(result))
}
}
}
} ~
path(IntNumber) { id =>
pathEndOrSingleSlash {
get {
onSuccess(accountsRepository.findById(id)) {
case Some(account) => complete(OK, account)
case None => complete(NotFound)
}
} ~
delete {
onSuccess(accountsRepository.delete(id)) {
_ => complete(NoContent)
}
}
}
}
}
}
| peim/money-transfer-service | src/main/scala/com/peim/http/api/AccountsServiceApi.scala | Scala | mit | 1,461 |
package llsm
import org.scijava.Context
trait BenchmarkContext {
val context: Context = new Context
}
| keithschulze/llsm | benchmark/src/main/scala/llsm/context.scala | Scala | mit | 107 |
package io.rout
import scalatags.Text._
import scalatags.Text.all._
package object html {
val bs = io.rout.html.BootstrapCss
val tag = scalatags.Text.all
// def scalaTag[A](a: Seq[A]) = tag.a(a.map(e => li(e.toString)))
def scalaTag[A](f :A => TypedTag[String]) = f
def scalaModifier[A](f: A => Modifier) = f
def render(pageTitle: String,a: Seq[Modifier])(
pageRender: (String,Seq[Modifier]) => TypedTag[String]): TypedTag[String] = pageRender(pageTitle,a)
}
| teodimoff/rOut | html/src/io/rout/html/package.scala | Scala | apache-2.0 | 482 |
package controller
import java.util.Locale
import lib.{ SessionAttribute, Util }
import model.User
import skinny.PermittedStrongParameters
import skinny.controller.feature.{ FlashFeature, LocaleFeature, RequestScopeFeature }
import skinny.filter._
import skinny.micro.SkinnyMicroBase
import skinny.micro.context.SkinnyContext
import skinny.micro.contrib.CSRFTokenSupport
import skinny.validator.MapValidator
/**
* The base feature for controllers.
*/
trait ControllerBase
extends SkinnyMicroBase with RequestScopeFeature with FlashFeature with CSRFTokenSupport with LocaleFeature with SkinnySessionFilter
with TxPerRequestFilter {
override def defaultLocale = Some(Locale.JAPANESE)
def loginUser(implicit ctx: SkinnyContext): Option[User] = skinnySession.getAttribute(SessionAttribute.LoginUser.key).asInstanceOf[Option[User]]
/*
* util
*/
protected def baseURL(implicit ctx: SkinnyContext): String = {
Util.baseURL(request)
}
protected def resetCsrf(implicit ctx: SkinnyContext): Unit = {
if (getFromRequestScope(RequestScopeFeature.ATTR_CSRF_KEY).isEmpty) {
set(RequestScopeFeature.ATTR_CSRF_KEY, csrfKey)
set(RequestScopeFeature.ATTR_CSRF_TOKEN, prepareCsrfToken())
}
}
protected def getRequiredParam[T](name: String)(implicit ctx: SkinnyContext): T = {
params.get(name) match {
case Some(value) => value.asInstanceOf[T]
case _ => throw new IllegalStateException(s"cannot get from params. param-name: '$name'")
}
}
/*
* debug logging
*/
protected def debugLoggingParameters(form: MapValidator, id: Option[Long] = None) = {
if (logger.isDebugEnabled) {
val forId = id.map { id => s" for [id -> ${id}]" }.getOrElse("")
val params = form.paramMap.map { case (name, value) => s"${name} -> '${value}'" }.mkString("[", ", ", "]")
logger.debug(s"Parameters${forId}: ${params}")
}
}
protected def debugLoggingPermittedParameters(parameters: PermittedStrongParameters, id: Option[Long] = None) = {
if (logger.isDebugEnabled) {
val forId = id.map { id => s" for [id -> ${id}]" }.getOrElse("")
val params = parameters.params.map { case (name, (v, t)) => s"${name} -> '${v}' as ${t}" }.mkString("[", ", ", "]")
logger.debug(s"Permitted parameters${forId}: ${params}")
}
}
}
| atware/sharedocs | src/main/scala/controller/ControllerBase.scala | Scala | mit | 2,324 |
package kr.debop.catalina.session.serializer
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import de.ruedigermoeller.serialization.FSTConfiguration
import kr.debop.catalina.session._
import org.slf4j.LoggerFactory
import scala.util.{Failure, Success, Try}
object FstSerializer {
lazy val defaultCfg = FSTConfiguration.createDefaultConfiguration()
def apply[T](): FstSerializer[T] = new FstSerializer[T]()
}
/**
* Serialize/Deserialize by Fast-Serialization
*
* Created by debop on 2014. 3. 30.
*/
class FstSerializer[T] extends Serializer[T] {
private lazy val log = LoggerFactory.getLogger(getClass)
def defaultCfg: FSTConfiguration = FstSerializer.defaultCfg
override def serialize(graph: T): Array[Byte] = {
if (graph == null || graph == None)
return Array.empty[Byte]
using(new ByteArrayOutputStream()) { bos =>
Try(defaultCfg.getObjectOutput(bos)) match {
case Success(oos) =>
oos.writeObject(graph, Seq[Class[_]](): _*)
oos.flush()
bos.toByteArray
case Failure(e) =>
log.error(s"Fail to serialize graph. $graph", e)
Array.empty[Byte]
}
}
}
override def deserialize(bytes: Array[Byte]): T = {
if (bytes == null || bytes.length == 0)
return null.asInstanceOf[T]
using(new ByteArrayInputStream(bytes)) { bis =>
Try(defaultCfg.getObjectInput(bis)) match {
case Success(ois) =>
ois.readObject.asInstanceOf[T]
case Failure(e) =>
log.error(s"Fail to deserialize data.", e)
null.asInstanceOf[T]
}
}
}
}
| debop/tomcat-session-redis | src/main/scala/kr/debop/catalina/session/serializer/FstSerializer.scala | Scala | lgpl-3.0 | 1,622 |
package org.jetbrains.plugins.scala.lang.resolve2
/**
* Pavel.Fatin, 02.02.2010
*/
class ScopeElementTest extends ResolveTestBase {
override def folderPath: String = {
super.folderPath + "scope/element/"
}
def testBlock() = doTest()
def testCaseClass() = doTest()
def testClass() = doTest()
def testFunction() = doTest()
def testObject() = doTest()
def testTrait() = doTest()
} | katejim/intellij-scala | test/org/jetbrains/plugins/scala/lang/resolve2/ScopeElementTest.scala | Scala | apache-2.0 | 403 |
package dotty.tools.languageserver.worksheet
import dotty.tools.dotc.core.Contexts.Context
import dotty.tools.dotc.util.SourcePosition
import dotty.tools.dotc.interactive.InteractiveDriver
import dotty.tools.languageserver.DottyLanguageServer
import dotty.tools.languageserver.DottyLanguageServer.range
import org.eclipse.lsp4j.jsonrpc._//{CancelChecker, CompletableFutures}
import org.eclipse.lsp4j.jsonrpc.services._//{JsonSegment, JsonRequest}
import java.net.URI
import java.util.concurrent.{CompletableFuture, ConcurrentHashMap}
@JsonSegment("worksheet")
trait WorksheetService { thisServer: DottyLanguageServer =>
@JsonRequest
def run(params: WorksheetRunParams): CompletableFuture[WorksheetRunResult] =
computeAsync(synchronize = false, fun = { cancelChecker =>
val uri = new URI(params.textDocument.getUri)
try {
val driver = driverFor(uri)
val sendMessage = (pos: SourcePosition, msg: String) =>
client.publishOutput(WorksheetRunOutput(params.textDocument, range(pos).get, msg))
runWorksheet(driver, uri, sendMessage, cancelChecker)(driver.currentCtx)
cancelChecker.checkCanceled()
WorksheetRunResult(success = true)
} catch {
case _: Throwable =>
WorksheetRunResult(success = false)
}
})
/**
* Run the worksheet at `uri`.
*
* @param driver The driver for the project that contains the worksheet.
* @param uri The URI of the worksheet.
* @param sendMessage A mean of communicating the results of evaluation back.
* @param cancelChecker Token to check whether evaluation was cancelled
*/
private def runWorksheet(driver: InteractiveDriver,
uri: URI,
sendMessage: (SourcePosition, String) => Unit,
cancelChecker: CancelChecker)(
implicit ctx: Context): Unit = {
val treeOpt = thisServer.synchronized {
driver.openedTrees(uri).headOption
}
treeOpt.foreach(tree => Worksheet.run(tree, thisServer, sendMessage, cancelChecker))
}
}
| som-snytt/dotty | language-server/src/dotty/tools/languageserver/worksheet/WorksheetService.scala | Scala | apache-2.0 | 2,097 |
package edu.gemini.itc.baseline
import edu.gemini.itc.baseline.util._
import edu.gemini.itc.shared.GnirsParameters
import edu.gemini.spModel.gemini.gnirs.GNIRSParams._
import edu.gemini.spModel.core.WavelengthConversions._
/**
* GNIRS baseline test fixtures.
*/
object BaselineGnirs {
lazy val Fixtures = KBandSpectroscopy ++ KBandImaging
private lazy val KBandSpectroscopy = Fixture.kBandSpcFixtures(List(
new GnirsParameters(
PixelScale.PS_005,
None,
Some(Disperser.D_10),
ReadMode.FAINT,
CrossDispersed.LXD,
2.4.microns,
SlitWidth.SW_1,
None,
WellDepth.SHALLOW,
Fixture.NoAltair),
new GnirsParameters(
PixelScale.PS_015,
None,
Some(Disperser.D_111),
ReadMode.VERY_BRIGHT,
CrossDispersed.NO,
2.4.microns,
SlitWidth.SW_3,
None,
WellDepth.SHALLOW,
Fixture.NoAltair),
new GnirsParameters(
PixelScale.PS_005,
None,
Some(Disperser.D_32),
ReadMode.FAINT,
CrossDispersed.LXD,
2.6.microns,
SlitWidth.SW_6,
None,
WellDepth.SHALLOW,
Fixture.NoAltair),
new GnirsParameters(
PixelScale.PS_015,
None,
Some(Disperser.D_32),
ReadMode.VERY_BRIGHT,
CrossDispersed.NO,
2.6.microns,
SlitWidth.SW_8,
None,
WellDepth.SHALLOW,
Fixture.NoAltair)
))
private lazy val KBandImaging = Fixture.kBandImgFixtures(List(
new GnirsParameters(
PixelScale.PS_005,
Some(Filter.K),
None,
ReadMode.FAINT,
CrossDispersed.NO,
2.4.microns,
SlitWidth.ACQUISITION,
None,
WellDepth.SHALLOW,
Fixture.NoAltair),
new GnirsParameters(
PixelScale.PS_005,
Some(Filter.J),
None,
ReadMode.BRIGHT,
CrossDispersed.NO,
2.4.microns,
SlitWidth.ACQUISITION,
None,
WellDepth.SHALLOW,
Fixture.AltairNgs),
new GnirsParameters(
PixelScale.PS_005,
Some(Filter.Y),
None,
ReadMode.VERY_FAINT,
CrossDispersed.NO,
2.4.microns,
SlitWidth.ACQUISITION,
None,
WellDepth.SHALLOW,
Fixture.AltairLgs),
new GnirsParameters(
PixelScale.PS_015,
Some(Filter.ORDER_4),
None,
ReadMode.FAINT,
CrossDispersed.NO,
2.4.microns,
SlitWidth.ACQUISITION,
None,
WellDepth.SHALLOW,
Fixture.AltairNgsFL),
new GnirsParameters(
PixelScale.PS_015,
Some(Filter.K),
None,
ReadMode.VERY_BRIGHT,
CrossDispersed.NO,
2.4.microns,
SlitWidth.ACQUISITION,
None,
WellDepth.SHALLOW,
Fixture.AltairNgs),
new GnirsParameters(
PixelScale.PS_015,
Some(Filter.K),
None,
ReadMode.FAINT,
CrossDispersed.NO,
2.4.microns,
SlitWidth.ACQUISITION,
None,
WellDepth.SHALLOW,
Fixture.AltairLgs)
))
}
| spakzad/ocs | bundle/edu.gemini.itc/src/test/scala/edu/gemini/itc/baseline/BaselineGnirs.scala | Scala | bsd-3-clause | 2,952 |
package com.twitter.finagle.memcached.util
import com.twitter.io.Buf
import scala.collection.mutable.ArrayBuffer
object ParserUtils {
// Used by byteArrayStringToInt. The maximum length of a non-negative Int in chars
private[this] val MaxLengthOfIntString = Int.MaxValue.toString.length
private[this] object isWhitespaceProcessor extends Buf.Processor {
private[this] val TokenDelimiter: Byte = ' '
def apply(byte: Byte): Boolean = byte != TokenDelimiter
}
private[this] object isDigitProcessor extends Buf.Processor {
def apply(byte: Byte): Boolean = byte >= '0' && byte <= '9'
}
/**
* @return true if the Buf is non empty and every byte in the Buf is a digit.
*/
def isDigits(buf: Buf): Boolean =
if (buf.isEmpty) false
else -1 == buf.process(isDigitProcessor)
private[memcached] def splitOnWhitespace(bytes: Buf): Seq[Buf] = {
val len = bytes.length
val split = new ArrayBuffer[Buf](6)
var segmentStart = 0
while (segmentStart < len) {
val segmentEnd = bytes.process(segmentStart, len, isWhitespaceProcessor)
if (segmentEnd == -1) {
// At the end
split += bytes.slice(segmentStart, len)
segmentStart = len // terminate loop
} else {
// We don't add an empty Buf instance at the front
if (segmentEnd != 0) {
split += bytes.slice(segmentStart, segmentEnd)
}
segmentStart = segmentEnd + 1
}
}
split.toSeq
}
/**
* Converts the `Buf`, representing a non-negative integer in chars,
* to a base 10 Int.
* Returns -1 if any of the bytes are not digits, or the length is invalid
*/
private[memcached] def bufToInt(buf: Buf): Int = {
val length = buf.length
if (length > MaxLengthOfIntString) -1
else {
var num = 0
var i = 0
while (i < length) {
val b = buf.get(i)
if (b >= '0' && b <= '9')
num = (num * 10) + (b - '0')
else
return -1
i += 1
}
num
}
}
}
| twitter/finagle | finagle-memcached/src/main/scala/com/twitter/finagle/memcached/util/ParserUtils.scala | Scala | apache-2.0 | 2,034 |
package recipes
import java.util.concurrent.ThreadLocalRandom
trait TimeWindows {
private def show(acc: Long, sec: Long) =
s" ★ ★ ★ ${Thread.currentThread.getName}: count:$acc interval: $sec sec ★ ★ ★ "
def tumblingWindow(acc: fs.State[Long], timeWindow: Long): fs.State[Long] =
if (System.currentTimeMillis - acc.ts > timeWindow) {
println(show(acc.count, timeWindow / 1000))
acc.copy(item = acc.item + 1L, ts = System.currentTimeMillis, count = 0L)
} else acc.copy(item = acc.item + 1L, count = acc.count + 1L)
def slowDown(accLatency: Long, delayPerMsg: Long): Long = {
val latency = accLatency + delayPerMsg
val sleepMillis: Long = 0 + (latency / 1000)
val sleepNanos: Int = (latency % 1000).toInt
Thread.sleep(sleepMillis, sleepNanos)
if (ThreadLocalRandom.current.nextDouble > 0.9995)
println(s"${Thread.currentThread.getName} Sleep:$sleepMillis millis and $sleepNanos nanos")
latency
}
}
| haghard/streams-recipes | src/main/scala/recipes/TimeWindows.scala | Scala | apache-2.0 | 988 |
package org.effechecka.selector
case class OccurrenceSelector(taxonSelector: String = "", wktString: String = "", traitSelector: String = "", uuid: Option[String] = None, ttlSeconds: Option[Int] = None) {
def withUUID: OccurrenceSelector = this.copy(uuid = Some(UuidUtils.uuidFor(this).toString))
}
| effechecka/effechecka-selector | src/main/scala/org/effechecka/selector/OccurrenceSelector.scala | Scala | mit | 302 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.schedulers
import monix.execution.internal.Trampoline
import scala.util.control.NonFatal
import scala.concurrent.{BlockContext, ExecutionContext, ExecutionContextExecutor}
/** A `scala.concurrentExecutionContext` implementation
* that executes runnables immediately, on the current thread,
* by means of a trampoline implementation.
*
* Can be used in some cases to keep the asynchronous execution
* on the current thread, as an optimization, but be warned,
* you have to know what you're doing.
*
* The `TrampolineExecutionContext` keeps a reference to another
* `underlying` context, to which it defers for:
*
* - reporting errors
* - deferring the rest of the queue in problematic situations
*
* Deferring the rest of the queue happens:
*
* - in case we have a runnable throwing an exception, the rest
* of the tasks get re-scheduled for execution by using
* the `underlying` context
* - in case we have a runnable triggering a Scala `blocking`
* context, the rest of the tasks get re-scheduled for execution
* on the `underlying` context to prevent any deadlocks
*
* Thus this implementation is compatible with the
* `scala.concurrent.BlockContext`, detecting `blocking` blocks and
* reacting by forking the rest of the queue to prevent deadlocks.
*
* @param underlying is the `ExecutionContext` to which the it defers
* to in case real asynchronous is needed
*/
final class TrampolineExecutionContext private (underlying: ExecutionContext) extends ExecutionContextExecutor {
override def execute(runnable: Runnable): Unit =
TrampolineExecutionContext.trampoline.get().execute(runnable, underlying)
override def reportFailure(t: Throwable): Unit =
underlying.reportFailure(t)
}
object TrampolineExecutionContext {
/** Builds a [[TrampolineExecutionContext]] instance.
*
* @param underlying is the `ExecutionContext` to which the
* it defers to in case asynchronous or time-delayed execution
* is needed
*/
def apply(underlying: ExecutionContext): TrampolineExecutionContext =
new TrampolineExecutionContext(underlying)
/** [[TrampolineExecutionContext]] instance that executes everything
* immediately, on the current thread.
*
* Implementation notes:
*
* - if too many `blocking` operations are chained, at some point
* the implementation will trigger a stack overflow error
* - `reportError` re-throws the exception in the hope that it
* will get caught and reported by the underlying thread-pool,
* because there's nowhere it could report that error safely
* (i.e. `System.err` might be routed to `/dev/null` and we'd
* have no way to override it)
*/
val immediate: TrampolineExecutionContext =
TrampolineExecutionContext(new ExecutionContext {
def execute(r: Runnable): Unit = r.run()
def reportFailure(e: Throwable): Unit = throw e
})
/** Returns the `localContext`, allowing us to bypass calling
* `BlockContext.withBlockContext`, as an optimization trick.
*/
private val localContext: ThreadLocal[BlockContext] = {
try {
val methods = BlockContext.getClass.getDeclaredMethods
.filter(m => m.getParameterCount == 0 && m.getReturnType == classOf[ThreadLocal[_]])
.toList
methods match {
case m :: Nil =>
m.setAccessible(true)
m.invoke(BlockContext).asInstanceOf[ThreadLocal[BlockContext]]
case _ =>
throw new NoSuchMethodError("BlockContext.contextLocal")
}
} catch {
case _: NoSuchMethodError => null
case _: SecurityException => null
case NonFatal(_) => null
}
}
private val trampoline =
new ThreadLocal[Trampoline]() {
override def initialValue(): Trampoline =
TrampolineExecutionContext.buildTrampoline()
}
private def buildTrampoline(): Trampoline = {
if (localContext ne null)
new JVMOptimalTrampoline()
else
new JVMNormalTrampoline()
}
private final class JVMOptimalTrampoline extends Trampoline {
override def startLoop(runnable: Runnable, ec: ExecutionContext): Unit = {
val parentContext = localContext.get()
localContext.set(trampolineContext(ec))
try {
super.startLoop(runnable, ec)
} finally {
localContext.set(parentContext)
}
}
}
private class JVMNormalTrampoline extends Trampoline {
override def startLoop(runnable: Runnable, ec: ExecutionContext): Unit = {
BlockContext.withBlockContext(trampolineContext(ec)) {
super.startLoop(runnable, ec)
}
}
}
}
| monix/monix | monix-execution/jvm/src/main/scala/monix/execution/schedulers/TrampolineExecutionContext.scala | Scala | apache-2.0 | 5,373 |
import sbt._
object Dependencies {
val playJson = "com.typesafe.play" %% "play-json" % "2.6.10" withSources ()
}
| codacy/codacy-api-scala | project/Dependencies.scala | Scala | mit | 116 |
package com.twitter.finagle.http.filter
import com.twitter.finagle._
import com.twitter.finagle.context.Contexts
import com.twitter.finagle.filter.ServerAdmissionControl
import com.twitter.finagle.http.{Fields, Method, Request, Response, Status}
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.service.RetryPolicy
import com.twitter.io.Buf
import com.twitter.util.Future
/**
* When a server fails with retryable failures, it sends back a
* `NackResponse`, i.e. a 503 response code with "finagle-http-nack"
* header. A non-retryable failure will be converted to a 503 with
* "finagle-http-nonretryable-nack".
*
* Clients who recognize the header can handle the response appropriately.
* Clients who don't recognize the header treat the response the same way as
* other 503 response.
*/
object HttpNackFilter {
/** The `Role` assigned to a `HttpNackFilter` within a `Stack`. */
val role: Stack.Role = Stack.Role("HttpNack")
/** Header name for retryable nack responses */
val RetryableNackHeader: String = "finagle-http-nack"
/** Header name for non-retryable nack responses */
val NonRetryableNackHeader: String = "finagle-http-nonretryable-nack"
/** Header name for requests that have a body and the client can retry */
val RetryableRequestHeader: String = "finagle-http-retryable-request"
/** Response status for a nacked request */
val ResponseStatus: Status = Status.ServiceUnavailable
private val RetryableNackBody =
Buf.Utf8("Request was not processed by the server due to an error and is safe to retry")
private val NonRetryableNackBody =
Buf.Utf8("Request was not processed by the server and should not be retried")
private val NonRetryableNackFlags = FailureFlags.Rejected | FailureFlags.NonRetryable
private val RetryableNackFlags = FailureFlags.Rejected | FailureFlags.Retryable
private[twitter] object RetryableNack {
def unapply(t: Throwable): Boolean = t match {
case f: FailureFlags[_] => f.isFlagged(RetryableNackFlags)
case _ => false
}
}
private[twitter] object NonRetryableNack {
def unapply(t: Throwable): Boolean = t match {
case f: FailureFlags[_] => f.isFlagged(NonRetryableNackFlags)
case _ => false
}
}
// We consider a `Retry-After: 0` header to also represent a retryable nack.
// We don't consider values other than 0 since we don't want to make this
// a problem of this filter.
private[this] def containsRetryAfter0(rep: Response): Boolean =
rep.headerMap.get(Fields.RetryAfter) match {
case Some("0") => true
case _ => false
}
private[finagle] def isRetryableNack(rep: Response): Boolean =
rep.status == ResponseStatus &&
(rep.headerMap.contains(RetryableNackHeader) || containsRetryAfter0(rep))
private[finagle] def isNonRetryableNack(rep: Response): Boolean =
rep.status == ResponseStatus && rep.headerMap.contains(NonRetryableNackHeader)
private[finagle] def isNack(rep: Response): Boolean =
rep.status == ResponseStatus &&
(rep.headerMap.contains(RetryableNackHeader) || rep.headerMap.contains(
NonRetryableNackHeader
))
private[finagle] def module: Stackable[ServiceFactory[Request, Response]] =
new Stack.Module1[param.Stats, ServiceFactory[Request, Response]] {
val role: Stack.Role = HttpNackFilter.role
val description = "Convert rejected requests to 503s, respecting retryability"
def make(
_stats: param.Stats,
next: ServiceFactory[Request, Response]
): ServiceFactory[Request, Response] = {
val param.Stats(stats) = _stats
new HttpNackFilter(stats).andThen(next)
}
}
/** Construct a new HttpNackFilter */
private[finagle] def newFilter(statsReceiver: StatsReceiver): SimpleFilter[Request, Response] =
new HttpNackFilter(statsReceiver)
private def retryableRequest(req: Request): Boolean = {
// If the request came in chunked, the finagle HTTP/1.x server implementation will
// bork the session state if it doesn't consume the body, so we don't nack these if
// at all possible.
//
// If it didn't come in chunked, we don't know if the client has retained a copy of
// the body and can retry it if we nack it, so we only allow nacking of requests with
// a body if the client has signaled that it is able to retry them.
!req.isChunked && (req.content.isEmpty || req.headerMap.contains(RetryableRequestHeader))
}
}
private final class HttpNackFilter(statsReceiver: StatsReceiver)
extends SimpleFilter[Request, Response] {
import HttpNackFilter._
private[this] val nackCounts = statsReceiver.counter("nacks")
private[this] val nonRetryableNackCounts = statsReceiver.counter("nonretryable_nacks")
private[this] val standardHandler = makeHandler(includeBody = true)
private[this] val bodylessHandler = makeHandler(includeBody = false)
private[this] def makeHandler(includeBody: Boolean): PartialFunction[Throwable, Response] = {
// For legacy reasons, this captures all RetryableWriteExceptions
case RetryPolicy.RetryableWriteException(_) =>
nackCounts.incr()
val rep = Response(ResponseStatus)
rep.headerMap.set(RetryableNackHeader, "true")
rep.headerMap.set(Fields.RetryAfter, "0")
if (includeBody) {
rep.content = RetryableNackBody
}
rep
case NonRetryableNack() =>
nonRetryableNackCounts.incr()
val rep = Response(ResponseStatus)
rep.headerMap.set(NonRetryableNackHeader, "true")
if (includeBody) {
rep.content = NonRetryableNackBody
}
rep
}
def apply(request: Request, service: Service[Request, Response]): Future[Response] = {
val handler =
if (request.method == Method.Head) bodylessHandler
else standardHandler
val isRetryable = retryableRequest(request)
// We need to strip the header in case this request gets forwarded to another
// endpoint as the marker header is only valid on a hop-by-hop basis.
request.headerMap.remove(RetryableRequestHeader)
if (isRetryable) {
service(request).handle(handler)
} else {
Contexts.local.let(ServerAdmissionControl.NonRetryable, ()) {
service(request).handle(handler)
}
}
}
}
| luciferous/finagle | finagle-base-http/src/main/scala/com/twitter/finagle/http/filter/HttpNackFilter.scala | Scala | apache-2.0 | 6,292 |
package io.buoyant.namerd
import com.twitter.finagle.http.{MediaType, Request, Response}
import com.twitter.finagle.{Namer, Path, Service, SimpleFilter}
import com.twitter.server.handler.ResourceHandler
import com.twitter.util.Future
import io.buoyant.admin.Admin.{Handler, NavItem}
import io.buoyant.admin.names.DelegateApiHandler
import io.buoyant.admin.{Admin, ConfigHandler, HtmlView, LoggingApiHandler, LoggingHandler, StaticFilter}
import io.buoyant.namer.ConfiguredNamersInterpreter
object NamerdAdmin {
val static: Seq[Handler] = Seq(
Handler("/files/", (StaticFilter andThen ResourceHandler.fromDirectoryOrJar(
baseRequestPath = "/files/",
baseResourcePath = "io/buoyant/admin",
localFilePath = "admin/src/main/resources/io/buoyant/admin"
)))
)
def config(nc: NamerdConfig) = Seq(
Handler("/config.json", new ConfigHandler(nc, NamerdConfig.LoadedInitializers.iter))
)
def dtabs(dtabStore: DtabStore, namers: Map[Path, Namer], adminFilter: NamerdFilter) = Seq(
Handler("/", adminFilter.andThen(new DtabListHandler(dtabStore))),
Handler("/dtab/delegator.json", new DelegateApiHandler(_ => ConfiguredNamersInterpreter(namers.toSeq))),
Handler("/dtab/", adminFilter.andThen(new DtabHandler(dtabStore)))
)
def logging(adminHandler: NamerdAdmin, adminFilter: NamerdFilter): Seq[Handler] = Seq(
Handler("/logging.json", new LoggingApiHandler()),
Handler("/logging", adminFilter.andThen(new LoggingHandler(adminHandler)))
)
def apply(nc: NamerdConfig, namerd: Namerd): Seq[Handler] = {
val handler = new NamerdAdmin(Seq(NavItem("logging", "logging")))
val adminFilter = new NamerdFilter(handler)
static ++ config(nc) ++
dtabs(namerd.dtabStore, namerd.namers, adminFilter) ++ logging(handler, adminFilter) ++
Admin.extractHandlers(namerd.dtabStore +: (namerd.namers.values.toSeq ++ namerd.telemeters))
}
}
private class NamerdAdmin(navItems: Seq[NavItem]) extends HtmlView {
private[this] def navHtml(highlightedItem: String = "") = navItems.map { item =>
val activeClass = if (item.name == highlightedItem) "active" else ""
s"""<li class=$activeClass><a href="/${item.url}">${item.name}</a></li>"""
}.mkString("\\n")
override def html(
content: String,
tailContent: String,
csses: Seq[String],
navHighlight: String,
showRouterDropdown: Boolean
): String =
s"""
<!doctype html>
<html>
<head>
<title>namerd admin</title>
<link type="text/css" href="/files/css/lib/bootstrap.min.css" rel="stylesheet"/>
<link type="text/css" href="/files/css/fonts.css" rel="stylesheet"/>
<link type="text/css" href="/files/css/dashboard.css" rel="stylesheet"/>
<link type="text/css" href="/files/css/logger.css" rel="stylesheet"/>
<link type="text/css" href="/files/css/delegator.css" rel="stylesheet"/>
<link rel="shortcut icon" href="/files/images/favicon.png" />
</head>
<body>
<nav class="navbar navbar-inverse">
<div class="navbar-container">
<div class="navbar-header">
<button type="button" class="navbar-toggle collapsed" data-toggle="collapse" data-target="#navbar" aria-expanded="false">
<span class="sr-only">Toggle navigation</span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
<span class="icon-bar"></span>
</button>
<a class="navbar-brand" href="/">
<div>Namerd</div>
</a>
</div>
<div id="navbar" class="collapse navbar-collapse">
<ul class="nav navbar-nav">
${navHtml(navHighlight)}
</ul>
</div>
</div>
</nav>
<div class="container-fluid">
$content
</div>
$tailContent
<script data-main="/files/js/main-namerd" src="/files/js/lib/require.js"></script>
</body>
</html>
"""
override def mkResponse(
content: String,
mediaType: String
): Future[Response] = {
val response = Response()
response.contentType = mediaType + ";charset=UTF-8"
response.contentString = content
Future.value(response)
}
}
private class NamerdFilter(
adminHandler: NamerdAdmin, css: Seq[String] = Nil
) extends SimpleFilter[Request, Response] {
override def apply(
request: Request,
service: Service[Request, Response]
): Future[Response] = {
service(request).map { rsp =>
val itemToHighlight = request.path.replace("/", "")
if (rsp.contentType.contains(MediaType.Html))
rsp.contentString = adminHandler
.html(rsp.contentString, csses = css, navHighlight = itemToHighlight)
rsp
}
}
}
| linkerd/linkerd | namerd/main/src/main/scala/io/buoyant/namerd/NamerdAdmin.scala | Scala | apache-2.0 | 4,653 |
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package breeze.util
import java.util.TreeSet
import scala.collection.compat._
import scala.jdk.CollectionConverters._
/**
* A Top-K queue keeps a list of the top K elements seen so far as ordered
* by the given comparator.
*/
class TopK[T](k: Int)(implicit ord: Ordering[T]) extends Iterable[T] {
private val keys = new TreeSet[T](ord)
def +=(e: T): this.type = {
if (keys.size < k) {
keys.add(e)
} else if (keys.size > 0 && ord.lt(keys.first, e) && !keys.contains(e)) {
keys.remove(keys.first)
keys.add(e)
}
this
}
override def iterator: Iterator[T] =
keys.descendingIterator.asScala
override def size: Int =
keys.size
}
object TopK {
def apply[T](k: Int, items: IterableOnce[T])(implicit ord: Ordering[T]): TopK[T] = {
val topk = new TopK[T](k)(ord)
items.iterator.foreach(topk += _)
topk
}
def apply[T, U](k: Int, items: IterableOnce[T], scoreFn: T => U)(implicit uord: Ordering[U]): TopK[T] = {
implicit val ord: Ordering[T] = new Ordering[T] {
override def compare(x: T, y: T): Int = uord.compare(scoreFn(x), scoreFn(y))
}
apply(k, items)(ord)
}
}
/**
* A rich iterable extension that adds the topk method.
*/
class TopKIterable[T](val self: Iterable[T]) {
def topk(k: Int)(implicit ord: Ordering[T]): TopK[T] =
TopK(k, self)
def topk[U](k: Int, scoreFn: T => U)(implicit uord: Ordering[U]): TopK[T] =
TopK(k, self, scoreFn)(uord)
}
class TopKIterator[T](val self: Iterator[T]) {
def topk(k: Int)(implicit ord: Ordering[T]): TopK[T] =
TopK(k, self)
def topk[U](k: Int, scoreFn: T => U)(implicit uord: Ordering[U]): TopK[T] =
TopK(k, self, scoreFn)(uord)
}
object TopKImplicits {
implicit def iTopKIterable[T](iterable: Iterable[T]): TopKIterable[T] =
new TopKIterable(iterable)
implicit def iTopKIterator[T](iterator: Iterator[T]): TopKIterator[T] =
new TopKIterator(iterator)
}
| scalanlp/breeze | math/src/main/scala/breeze/util/TopK.scala | Scala | apache-2.0 | 2,508 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.