code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.scaladsl.persistence.cassandra
import akka.actor.ActorSystem
import com.lightbend.lagom.internal.persistence.ReadSideConfig
import com.lightbend.lagom.internal.persistence.cassandra.CassandraReadSideSettings
import com.lightbend.lagom.internal.persistence.cassandra.CassandraOffsetStore
import com.lightbend.lagom.scaladsl.persistence.cassandra.CassandraSession
import scala.concurrent.ExecutionContext
/**
* Internal API
*/
private[lagom] final class ScaladslCassandraOffsetStore(
system: ActorSystem,
session: CassandraSession,
cassandraReadSideSettings: CassandraReadSideSettings,
config: ReadSideConfig
)(implicit ec: ExecutionContext)
extends CassandraOffsetStore(system, session.delegate, cassandraReadSideSettings, config)
| rcavalcanti/lagom | persistence-cassandra/scaladsl/src/main/scala/com/lightbend/lagom/internal/scaladsl/persistence/cassandra/ScaladslCassandraOffsetStore.scala | Scala | apache-2.0 | 870 |
package diameter.Dictionary
case class DictionaryApplication(val id:Long, val name:String) extends DictionaryObject {
override def toString() = {id +"["+name+"]"}
def ==(that:DictionaryApplication):Boolean = {id == that.id}
}
object DictionaryApplication {
def apply(id:Int)(implicit dictionary:GenericDictionary):DictionaryApplication = {
dictionary.collectFirst(
{ case [email protected](_id, _) if _id == id.toLong => x }:PartialFunction[DictionaryObject,diameter.Dictionary.DictionaryApplication]
).get
}
def apply(name:String)(implicit dictionary:GenericDictionary):DictionaryApplication = {
dictionary.collectFirst(
{ case [email protected](_, _name) if _name == name => x }:PartialFunction[DictionaryObject,diameter.Dictionary.DictionaryApplication]
).get
}
} | dbuhryk/DiameterCoder | Diameter/src/main/scala/diameter/Dictionary/DictionaryApplication.scala | Scala | mit | 860 |
package net.shift
package common
import net.shift.security.SecurityFailure
import scala.util.{Failure, Success, Try}
trait Functor[F[_]] {
def unit[A](a: A): F[A]
def fmap[A, B](f: A => B): F[A] => F[B]
}
trait ApplicativeFunctor[F[_]] extends Functor[F] {
def <*>[A, B](f: F[A => B]): F[A] => F[B]
def map2[A, B, C](ma: F[A], mb: F[B])(f: (A, B) => C): F[C] = {
val g: A => B => C = a => b => f(a, b)
<*>(<*>(unit(g))(ma))(mb)
}
}
trait Monad[M[_]] extends Functor[M] {
def flatMap[A, B](f: A => M[B]): M[A] => M[B]
def join[A](mma: M[M[A]]): M[A] = flatMap((ma: M[A]) => ma)(mma)
def map[A, B](f: A => B): M[A] => M[B] = fmap(f)
}
trait Traversing[F[_]] {
def traverse[A, B, M[_]](f: A => M[B])(fa: F[A])(implicit m: ApplicativeFunctor[M]): M[F[B]]
def sequence[A, M[_]](fma: F[M[A]])(implicit m: ApplicativeFunctor[M]): M[F[A]] = traverse((x: M[A]) => x)(fma)
}
trait TraversingSpec {
def listTraverse = new Traversing[List] {
def traverse[A, B, M[_]](f: A => M[B])(fa: List[A])(implicit m: ApplicativeFunctor[M]): M[List[B]] = {
fa.foldRight(m.unit(List[B]()))((a, mbs) => {
m.map2(f(a), mbs)(_ :: _)
})
}
}
}
trait Combinators[M[_]] {
def >=>[A, B, C](f: A => M[B])(g: B => M[C]): A => M[C]
}
trait Semigroup[A] {
def append(a: A, b: A): A
}
trait State[S, +A] {
import State._
def apply(s: S): Try[(S, A)]
def map[B](f: A => B): State[S, B] = state {
apply(_) map { case (s, a) => (s, f(a)) }
}
def flatMap[B](f: A => State[S, B]): State[S, B] = state {
apply(_) flatMap { case (st, a) => f(a)(st) }
}
def filter(f: A => Boolean): State[S, A] = state {
apply(_) filter { case (s, a) => f(a) }
}
def withFilter(f: A => Boolean): State[S, A] = state {
apply(_) filter { case (s, a) => f(a) }
}
def |[B >: A](other: State[S, B]): State[S, B] = state {
x =>
apply(x) match {
case f @ Failure(SecurityFailure(msg, _)) => f
case f @ Failure(_) => other apply x
case s => s
}
}
def >=>[B, C](f: A => State[S, B])(g: B => State[S, C]): State[S, C] = state {
apply(_) flatMap {
case (s, a) =>
(for {
b <- f(a)
c <- g(b)
} yield c)(s)
}
}
}
trait Identity[M[_]] {
def unit[A](a: A): M[A]
}
trait Bind[M[_]] {
def flatMap[A, B](f: A => M[B]): M[A] => M[B]
}
trait Flat[F[_]] {
def fmap[A, B](f: A => B): F[A] => F[B]
}
object Monad {
def monad[M[_]](implicit id: Identity[M], flat: Flat[M], b: Bind[M]): Monad[M] = new Monad[M] {
def unit[A](a: A): M[A] = id unit a
def fmap[A, B](f: A => B): M[A] => M[B] = flat fmap f
def flatMap[A, B](f: A => M[B]): M[A] => M[B] = b flatMap f
}
}
object State {
import Monad._
implicit def stateMonad[S] = monad[({ type l[a] = State[S, a] })#l]
implicit def stateBind[S]: Bind[({ type l[a] = State[S, a] })#l] = new Bind[({ type l[a] = State[S, a] })#l] {
def flatMap[A, B](f: A => State[S, B]): State[S, A] => State[S, B] = s => s flatMap f
}
implicit def stateIdentity[S]: Identity[({ type l[a] = State[S, a] })#l] = new Identity[({ type l[a] = State[S, a] })#l] {
def unit[A](a: A): State[S, A] = state {
s => Success((s, a))
}
}
implicit def stateFlat[S]: Flat[({ type l[a] = State[S, a] })#l] = new Flat[({ type l[a] = State[S, a] })#l] {
def fmap[A, B](f: A => B): State[S, A] => State[S, B] = _ map f
}
def state[S, A](f: S => Try[(S, A)]): State[S, A] = new State[S, A] {
def apply(s: S) = f(s)
}
def init[S] = state[S, S] {
s => Success((s, s))
}
def initf[S](f: S => S) = state[S, S] {
s => Success((f(s), f(s)))
}
def put[S, A](a: A) = state[S, A] {
s => Success((s, a))
}
def putOpt[S, A](a: Option[A]) = state[S, A] {
s =>
a match {
case Some(v) => Success((s, v))
case _ => Failure(new RuntimeException with util.control.NoStackTrace)
}
}
def modify[S](f: S => S) = state[S, Unit] {
s => Success((f(s), ()))
}
def gets[S, A](f: S => A) = for (s <- init[S]) yield f(s)
}
| mariusdanciu/shift | shift-common/src/main/scala/net/shift/common/FunctionalDefs.scala | Scala | apache-2.0 | 4,161 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.accounts.MockComputationsRetriever
import uk.gov.hmrc.ct.box.CtValidation
import uk.gov.hmrc.ct.mocks.MockComputationsBoxRetriever
import org.mockito.Mockito._
import uk.gov.hmrc.ct.{CATO02, CATO20, CATO21, CATO22}
class MachineryAndPlantValidationSpec extends WordSpec with Matchers {
val stubBoxRetriever = MockComputationsBoxRetriever()
"CP78 " should {
"validate if present and non-negative or if not present, otherwise fail" in {
CP78(Some(0)).validate(stubBoxRetriever) shouldBe Set()
CP78(None).validate(stubBoxRetriever) shouldBe Set()
CP78(Some(-1)).validate(stubBoxRetriever) shouldBe Set(CtValidation(boxId = Some("CP78"), errorMessageKey = "error.CP78.mustBeZeroOrPositive"))
}
}
"CP666 " should {
"validate if present and non-negative or if not present, otherwise fail" in {
CP666(Some(0)).validate(stubBoxRetriever) shouldBe Set()
CP666(None).validate(stubBoxRetriever) shouldBe Set()
CP666(Some(-1)).validate(stubBoxRetriever) shouldBe Set(CtValidation(boxId = Some("CP666"), errorMessageKey = "error.CP666.mustBeZeroOrPositive"))
}
}
"CP79" should {
"validate if present and non-negative or if not present, otherwise fail" in {
CP79(Some(0)).validate(stubBoxRetriever) shouldBe Set()
CP79(None).validate(stubBoxRetriever) shouldBe Set()
CP79(Some(-1)).validate(stubBoxRetriever) shouldBe Set(CtValidation(boxId = Some("CP79"), errorMessageKey = "error.CP79.mustBeZeroOrPositive"))
}
}
"CP80" should {
"validate if present and non-negative or if not present, otherwise fail" in {
CP80(Some(0)).validate(stubBoxRetriever) shouldBe Set()
CP80(None).validate(stubBoxRetriever) shouldBe Set()
CP80(Some(-1)).validate(stubBoxRetriever) shouldBe Set(CtValidation(boxId = Some("CP80"), errorMessageKey = "error.CP80.mustBeZeroOrPositive"))
}
}
"CP82 " should {
"validate if present and non-negative or if not present, otherwise fail" in {
CP82(Some(0)).validate(stubBoxRetriever) shouldBe Set()
CP82(None).validate(stubBoxRetriever) shouldBe Set()
CP82(Some(-1)).validate(stubBoxRetriever) shouldBe Set(CtValidation(boxId = Some("CP82"), errorMessageKey = "error.CP82.mustBeZeroOrPositive"))
}
}
"CP83 " should {
"validate if present and non-negative or if not present, otherwise fail" in {
CP83(Some(0)).validate(stubBoxRetriever) shouldBe Set()
CP83(None).validate(stubBoxRetriever) shouldBe Set()
CP83(Some(-1)).validate(stubBoxRetriever) shouldBe Set(CtValidation(boxId = Some("CP83"), errorMessageKey = "error.CP83.mustBeZeroOrPositive"))
}
}
"CP674 " should {
"validate if present and non-negative or if not present, otherwise fail" in {
CP674(Some(0)).validate(stubBoxRetriever) shouldBe Set()
CP674(None).validate(stubBoxRetriever) shouldBe Set()
CP674(Some(-1)).validate(stubBoxRetriever) shouldBe Set(CtValidation(boxId = Some("CP674"), errorMessageKey = "error.CP674.mustBeZeroOrPositive"))
}
}
"CP84 " should {
"validate if present and non-negative or if not present, otherwise fail" in {
CP84(Some(0)).validate(stubBoxRetriever) shouldBe Set()
CP84(None).validate(stubBoxRetriever) shouldBe Set()
CP84(Some(-1)).validate(stubBoxRetriever) shouldBe Set(CtValidation(boxId = Some("CP84"), errorMessageKey = "error.CP84.mustBeZeroOrPositive"))
}
}
"CP252" should {
"validate if present and non-negative or if not present, otherwise fail" in {
CP252(Some(0)).validate(stubBoxRetriever) shouldBe Set()
CP252(None).validate(stubBoxRetriever) shouldBe Set()
CP252(Some(-1)).validate(stubBoxRetriever) shouldBe Set(CtValidation(boxId = Some("CP252"), errorMessageKey = "error.CP252.mustBeZeroOrPositive"))
}
}
"CP673 " should {
"validate if present and non-negative or if not present, otherwise fail" in {
CP673(Some(0)).validate(stubBoxRetriever) shouldBe Set()
CP673(None).validate(stubBoxRetriever) shouldBe Set()
CP673(Some(-1)).validate(stubBoxRetriever) shouldBe Set(CtValidation(boxId = Some("CP673"), errorMessageKey = "error.CP673.mustBeZeroOrPositive"))
}
}
"CP667 " should {
"validate if present and non-negative or if not present, otherwise fail" in {
CP667(Some(0)).validate(stubBoxRetriever) shouldBe Set()
CP667(None).validate(stubBoxRetriever) shouldBe Set()
CP667(Some(-1)).validate(stubBoxRetriever) shouldBe Set(CtValidation(boxId = Some("CP667"), errorMessageKey = "error.CP667.mustBeZeroOrPositive"))
}
}
"CP672 " should {
"validate if present and non-negative or if not present, otherwise fail" in {
CP672(Some(0)).validate(stubBoxRetriever) shouldBe Set()
CP672(None).validate(stubBoxRetriever) shouldBe Set()
CP672(Some(-1)).validate(stubBoxRetriever) shouldBe Set(CtValidation(boxId = Some("CP672"), errorMessageKey = "error.CP672.mustBeZeroOrPositive"))
}
}
"CP87Input, given is trading and first Year Allowance Not Greater Than Max FYA" should {
"validate if present and non-negative, otherwise fail" in {
val stubTestComputationsRetriever = MockComputationsBoxRetriever(cpq8Param = Some(false))
CP87Input(Some(0)).validate(stubTestComputationsRetriever) shouldBe Set()
CP87Input(Some(-1)).validate(stubTestComputationsRetriever) shouldBe Set(CtValidation(boxId = Some("CP87Input"), errorMessageKey = "error.CP87Input.mustBeZeroOrPositive"))
}
}
"CP87Input, given is non-negative" should {
"validate correctly when not greater than CP81 CPaux1" in new MockComputationsRetriever {
when(boxRetriever.cp81).thenReturn(CP81(100))
when(boxRetriever.cpAux1()).thenReturn(CPAux1(100))
when(boxRetriever.cp80).thenReturn(CP80(Some(29)))
when(boxRetriever.cp79()).thenReturn(CP79(Some(20)))
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
CP87Input(Some(100)).validate(boxRetriever) shouldBe Set()
}
"fail validation when greater than CP81+ CPaux1" in new MockComputationsRetriever {
when(boxRetriever.cp81).thenReturn(CP81(50))
when(boxRetriever.cpAux1()).thenReturn(CPAux1(50))
when(boxRetriever.cp80).thenReturn(CP80(Some(29)))
when(boxRetriever.cp79()).thenReturn(CP79(Some(20)))
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
CP87Input(Some(101)).validate(boxRetriever) shouldBe Set(CtValidation(boxId = Some("CP87Input"), errorMessageKey = "error.CP87Input.firstYearAllowanceClaimExceedsAllowance", args = Some(Seq("100"))))
}
"validate because FYA defaults to 0 when not entered" in {
val stubTestComputationsRetriever = MockComputationsBoxRetriever(
cpq8Param = Some(true),
cp79Param = Some(20),
cp80Param = Some(29)
// ,
// cpAux1Param = 51
)
CP87Input(None).validate(stubTestComputationsRetriever) shouldBe Set()
}
"fail validation when trading but no value entered" in {
val stubTestComputationsRetriever = MockComputationsBoxRetriever(cpq8Param = Some(false))
CP87Input(None).validate(stubTestComputationsRetriever) shouldBe Set(CtValidation(boxId = Some("CP87Input"), errorMessageKey = "error.CP87Input.fieldMustHaveValueIfTrading"))
}
"validate when ceased trading but no value entered" in {
val stubTestComputationsRetriever = MockComputationsBoxRetriever(cpq8Param = Some(true))
CP87Input(None).validate(stubTestComputationsRetriever) shouldBe Set()
}
"validate when ceased trading not set" in {
val stubTestComputationsRetriever = MockComputationsBoxRetriever()
CP87Input(None).validate(stubTestComputationsRetriever) shouldBe Set()
}
"fails validation when negative" in {
val stubTestComputationsRetriever = MockComputationsBoxRetriever(cpq8Param = Some(false))
CP87Input(-1).validate(stubTestComputationsRetriever) shouldBe Set(CtValidation(boxId = Some("CP87Input"), errorMessageKey = "error.CP87Input.mustBeZeroOrPositive"))
}
}
"CP88(annual investment allowance claimed)" should {
"fail to validate when negative" in new MockComputationsRetriever {
// val stubTestComputationsRetriever = MockComputationsBoxRetriever()
when(boxRetriever.cp83).thenReturn(CP83(None))
when(boxRetriever.cato02()).thenReturn(CATO02(0))
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
CP88(Some(-1)).validate(boxRetriever) shouldBe Set(CtValidation(boxId = Some("CP88"), errorMessageKey = "error.CP88.mustBeZeroOrPositive"))
}
"validate correctly when not greater than the minimum of CATO02 (maxAIA) and CP83 (expenditureQualifyingAnnualInvestmentAllowance)" in new MockComputationsRetriever {
// val stubTestComputationsRetriever = MockComputationsBoxRetriever(
// cp83Param = Some(11)
//// ,
//// cato02Param = 10
// )
when(boxRetriever.cp83).thenReturn(CP83(Some(11)))
when(boxRetriever.cato02()).thenReturn(CATO02(10))
when(boxRetriever.cpQ8()).thenReturn(CPQ8(None))
CP88(Some(10)).validate(boxRetriever) shouldBe Set()
}
"fails validation when greater than the minimum of CATO02 (maxAIA) and CP83 (expenditureQualifyingAnnualInvestmentAllowance)" in new MockComputationsRetriever {
when(boxRetriever.cp83).thenReturn(CP83(Some(11)))
when(boxRetriever.cato02()).thenReturn(CATO02(10))
when(boxRetriever.cpQ8()).thenReturn(CPQ8(None))
CP88(Some(11)).validate(boxRetriever) shouldBe Set(CtValidation(boxId = Some("CP88"), errorMessageKey = "error.CP88.annualInvestmentAllowanceExceeded", args = Some(Seq("10"))))
}
"fails validation when CATO02 (maxAIA) is the minimum" in new MockComputationsRetriever {
when(boxRetriever.cp83).thenReturn(CP83(Some(10)))
when(boxRetriever.cato02()).thenReturn(CATO02(11))
when(boxRetriever.cpQ8()).thenReturn(CPQ8(None))
CP88(Some(11)).validate(boxRetriever) shouldBe Set(CtValidation(boxId = Some("CP88"), errorMessageKey = "error.CP88.annualInvestmentAllowanceExceeded", args = Some(Seq("10"))))
}
"fail validation when trading but no value entered" in new MockComputationsRetriever {
when(boxRetriever.cp83).thenReturn(CP83(Some(11)))
when(boxRetriever.cato02()).thenReturn(CATO02(10))
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
CP88(None).validate(boxRetriever) shouldBe Set(CtValidation(boxId = Some("CP88"), errorMessageKey = "error.CP88.fieldMustHaveValueIfTrading"))
}
"validate when ceased trading but no value entered" in new MockComputationsRetriever {
when(boxRetriever.cp83).thenReturn(CP83(Some(11)))
when(boxRetriever.cato02()).thenReturn(CATO02(10))
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(true)))
CP88(None).validate(boxRetriever) shouldBe Set()
}
"validate when ceased trading not set" in new MockComputationsRetriever {
when(boxRetriever.cp83).thenReturn(CP83(None))
when(boxRetriever.cato02()).thenReturn(CATO02(0))
when(boxRetriever.cpQ8()).thenReturn(CPQ8(None))
CP88(None).validate(boxRetriever) shouldBe Set()
}
"fails validation when negative" in new MockComputationsRetriever {
when(boxRetriever.cp83).thenReturn(CP83(None))
when(boxRetriever.cato02()).thenReturn(CATO02(0))
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
CP88(-1).validate(boxRetriever) shouldBe Set(CtValidation(boxId = Some("CP88"), errorMessageKey = "error.CP88.mustBeZeroOrPositive"))
}
}
"CP89 (Writing Down Allowance claimed from Main pool)" should {
"validates correctly when not greater than MAX(0, MainPool% * ( CP78 (Main Pool brought forward) " +
"+ CP82 (Additions Qualifying for Main Pool) + MainRatePool - CP672 (Proceed from Disposals from Main Pool) " +
"+ UnclaimedAIA_FYA (Unclaimed FYA and AIA amounts)) - CATO-2730" in new MockComputationsRetriever {
when(boxRetriever.cp78).thenReturn(CP78(Some(2000)))
when(boxRetriever.cp79).thenReturn(CP79(Some(20)))
when(boxRetriever.cpQ8()).thenReturn(CPQ8(None))
when(boxRetriever.cp81).thenReturn(CP81(50))
when(boxRetriever.cp82).thenReturn(CP82(Some(2000)))
when(boxRetriever.cp83).thenReturn(CP83(None))
when(boxRetriever.cp87).thenReturn(CP87(0))
when(boxRetriever.cp88).thenReturn(CP88(None))
when(boxRetriever.cp87Input()).thenReturn(CP87Input(Some(50)))
when(boxRetriever.cp672()).thenReturn(CP672(Some(1000)))
when(boxRetriever.cpAux1()).thenReturn(CPAux1(0))
when(boxRetriever.cpAux2()).thenReturn(CPAux2(0))
when(boxRetriever.cato21()).thenReturn(CATO21(18))
CP89(549).validate(boxRetriever) shouldBe Set()
CP89(550).validate(boxRetriever) shouldBe Set(CtValidation(boxId = Some("CP89"), errorMessageKey = "error.CP89.mainPoolAllowanceExceeded", Some(Seq("549"))))
}
"validates when greater than MAX(0, MainPool% * ( CP78 (Main Pool brought forward) " +
"+ CP82 (Additions Qualifying for Main Pool) + MainRatePool - CP672 (Proceed from Disposals from Main Pool) " +
"+ LEC14 (Unclaimed FYA and AIA amounts)))" in new MockComputationsRetriever {
when(boxRetriever.cpQ8()).thenReturn(CPQ8(None))
when(boxRetriever.cp78).thenReturn(CP78(Some(100)))
when(boxRetriever.cp79).thenReturn(CP79(None))
when(boxRetriever.cp80).thenReturn(CP80(None))
when(boxRetriever.cp81).thenReturn(CP81(0))
when(boxRetriever.cp82).thenReturn(CP82(Some(100)))
when(boxRetriever.cp83).thenReturn(CP83(None))
when(boxRetriever.cp87).thenReturn(CP87(0))
when(boxRetriever.cp88).thenReturn(CP88(None))
when(boxRetriever.cp87Input()).thenReturn(CP87Input(Some(50)))
when(boxRetriever.cp672()).thenReturn(CP672(Some(100)))
when(boxRetriever.cpAux1()).thenReturn(CPAux1(0))
when(boxRetriever.cpAux2()).thenReturn(CPAux2(50))
when(boxRetriever.cato20()).thenReturn(CATO20(50))
when(boxRetriever.cato21()).thenReturn(CATO21(10))
CP89(15).validate(boxRetriever) shouldBe Set()
CP89(16).validate(boxRetriever) shouldBe Set(CtValidation(boxId = Some("CP89"), errorMessageKey = "error.CP89.mainPoolAllowanceExceeded", Some(Seq("15"))))
}
"validated when CP672 is large enough to make the total -ve and any +ve claim is made" in new MockComputationsRetriever {
when(boxRetriever.cpQ8()).thenReturn(CPQ8(None))
when(boxRetriever.cp78).thenReturn(CP78(Some(100)))
when(boxRetriever.cp79).thenReturn(CP79(None))
when(boxRetriever.cp80).thenReturn(CP80(None))
when(boxRetriever.cp81).thenReturn(CP81(0))
when(boxRetriever.cp82).thenReturn(CP82(Some(100)))
when(boxRetriever.cp83).thenReturn(CP83(None))
when(boxRetriever.cp87).thenReturn(CP87(0))
when(boxRetriever.cp88).thenReturn(CP88(None))
when(boxRetriever.cp87Input()).thenReturn(CP87Input(Some(50)))
when(boxRetriever.cp672()).thenReturn(CP672(Some(1000)))
when(boxRetriever.cpAux1()).thenReturn(CPAux1(0))
when(boxRetriever.cpAux2()).thenReturn(CPAux2(100))
when(boxRetriever.cato20()).thenReturn(CATO20(50))
when(boxRetriever.cato21()).thenReturn(CATO21(10))
CP89(0).validate(boxRetriever) shouldBe Set()
CP89(1).validate(boxRetriever) shouldBe Set(CtValidation(boxId = Some("CP89"), errorMessageKey = "error.CP89.mainPoolAllowanceExceeded", Some(Seq("0"))))
}
"validate when ceased trading but no value entered" in new MockComputationsRetriever {
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(true)))
when(boxRetriever.cp78).thenReturn(CP78(Some(100)))
when(boxRetriever.cp79).thenReturn(CP79(None))
when(boxRetriever.cp80).thenReturn(CP80(None))
when(boxRetriever.cp81).thenReturn(CP81(0))
when(boxRetriever.cp82).thenReturn(CP82(Some(100)))
when(boxRetriever.cp83).thenReturn(CP83(None))
when(boxRetriever.cp87).thenReturn(CP87(0))
when(boxRetriever.cp88).thenReturn(CP88(None))
when(boxRetriever.cp87Input()).thenReturn(CP87Input(Some(50)))
when(boxRetriever.cp672()).thenReturn(CP672(Some(1000)))
when(boxRetriever.cpAux1()).thenReturn(CPAux1(0))
when(boxRetriever.cpAux2()).thenReturn(CPAux2(100))
when(boxRetriever.cato20()).thenReturn(CATO20(50))
when(boxRetriever.cato21()).thenReturn(CATO21(10))
CP89(None).validate(boxRetriever) shouldBe Set()
}
"validate when ceased trading not set" in new MockComputationsRetriever {
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(true)))
when(boxRetriever.cp78).thenReturn(CP78(Some(100)))
when(boxRetriever.cp79).thenReturn(CP79(None))
when(boxRetriever.cp80).thenReturn(CP80(None))
when(boxRetriever.cp81).thenReturn(CP81(0))
when(boxRetriever.cp82).thenReturn(CP82(Some(100)))
when(boxRetriever.cp83).thenReturn(CP83(None))
when(boxRetriever.cp87).thenReturn(CP87(0))
when(boxRetriever.cp88).thenReturn(CP88(None))
when(boxRetriever.cp87Input()).thenReturn(CP87Input(Some(50)))
when(boxRetriever.cp672()).thenReturn(CP672(Some(1000)))
when(boxRetriever.cpAux1()).thenReturn(CPAux1(0))
when(boxRetriever.cpAux2()).thenReturn(CPAux2(100))
when(boxRetriever.cato20()).thenReturn(CATO20(50))
when(boxRetriever.cato21()).thenReturn(CATO21(10))
CP89(None).validate(boxRetriever) shouldBe Set()
}
"fails validation when negative" in new MockComputationsRetriever {
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(false)))
when(boxRetriever.cp78).thenReturn(CP78(Some(100)))
when(boxRetriever.cp79).thenReturn(CP79(None))
when(boxRetriever.cp80).thenReturn(CP80(None))
when(boxRetriever.cp81).thenReturn(CP81(0))
when(boxRetriever.cp82).thenReturn(CP82(Some(100)))
when(boxRetriever.cp83).thenReturn(CP83(None))
when(boxRetriever.cp87).thenReturn(CP87(0))
when(boxRetriever.cp88).thenReturn(CP88(None))
when(boxRetriever.cp87Input()).thenReturn(CP87Input(Some(50)))
when(boxRetriever.cp672()).thenReturn(CP672(Some(1000)))
when(boxRetriever.cpAux1()).thenReturn(CPAux1(0))
when(boxRetriever.cpAux2()).thenReturn(CPAux2(100))
when(boxRetriever.cato20()).thenReturn(CATO20(50))
when(boxRetriever.cato21()).thenReturn(CATO21(10))
CP89(-1).validate(boxRetriever) shouldBe Set(CtValidation(boxId = Some("CP89"), errorMessageKey = "error.CP89.mustBeZeroOrPositive"))
}
}
"(CP668) Writing Down Allowance claimed from Special rate pool" should {
"validates correctly when not greater than MAX( 0, SpecialPool% * ( CP666 + CPaux3 - CP667) )" in new MockComputationsRetriever {
when(boxRetriever.cpQ8()).thenReturn(CPQ8(None))
when(boxRetriever.cp666()).thenReturn(CP666(Some(100)))
when(boxRetriever.cp667()).thenReturn(CP667(Some(100)))
when(boxRetriever.cpAux3()).thenReturn(CPAux3(100))
when(boxRetriever.cato22()).thenReturn(CATO22(10))
CP668(10).validate(boxRetriever) shouldBe Set()
CP668(11).validate(boxRetriever) shouldBe Set(CtValidation(boxId = Some("CP668"), errorMessageKey = "error.CP668.specialRatePoolAllowanceExceeded", Some(Seq("10"))))
}
"fails validation when CP667 is large enough to make the total -ve and any +ve claim is made" in new MockComputationsRetriever {
when(boxRetriever.cpQ8()).thenReturn(CPQ8(None))
when(boxRetriever.cp666()).thenReturn(CP666(Some(100)))
when(boxRetriever.cp667()).thenReturn(CP667(Some(1000)))
when(boxRetriever.cpAux3()).thenReturn(CPAux3(100))
when(boxRetriever.cato22()).thenReturn(CATO22(10))
CP668(0).validate(boxRetriever) shouldBe Set()
CP668(1).validate(boxRetriever) shouldBe Set(CtValidation(boxId = Some("CP668"), errorMessageKey = "error.CP668.specialRatePoolAllowanceExceeded", Some(Seq("0"))))
}
"validate when ceased trading but no value entered" in new MockComputationsRetriever {
when(boxRetriever.cpQ8()).thenReturn(CPQ8(Some(true)))
when(boxRetriever.cp666()).thenReturn(CP666(Some(100)))
when(boxRetriever.cp667()).thenReturn(CP667(Some(1000)))
when(boxRetriever.cpAux3()).thenReturn(CPAux3(100))
when(boxRetriever.cato22()).thenReturn(CATO22(10))
val stubTestComputationsRetriever = MockComputationsBoxRetriever(cpq8Param = Some(true))
CP668(None).validate(boxRetriever) shouldBe Set()
}
"validate when ceased trading not set" in new MockComputationsRetriever {
when(boxRetriever.cpQ8()).thenReturn(CPQ8(None))
when(boxRetriever.cp666()).thenReturn(CP666(Some(100)))
when(boxRetriever.cp667()).thenReturn(CP667(Some(1000)))
when(boxRetriever.cpAux3()).thenReturn(CPAux3(100))
when(boxRetriever.cato22()).thenReturn(CATO22(10))
CP668(None).validate(boxRetriever) shouldBe Set()
}
}
}
| liquidarmour/ct-calculations | src/test/scala/uk/gov/hmrc/ct/computations/MachineryAndPlantValidationSpec.scala | Scala | apache-2.0 | 21,661 |
/*
* Copyright (c) 2013-16 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless
import org.junit.Test
import org.junit.Assert._
import shapeless.test._
import shapeless.test.illTyped
import shapeless.testutil.assertTypedEquals
package SingletonTypeTestsDefns {
class ValueTest(val x: Int) extends AnyVal
}
class SingletonTypesTests {
import SingletonTypeTestsDefns._
import syntax.singleton._
val wTrue = Witness(true)
type True = wTrue.T
val wFalse = Witness(false)
type False = wFalse.T
val w0 = Witness(0)
type _0 = w0.T
val w1 = Witness(1)
type _1 = w1.T
val w2 = Witness(2)
type _2 = w2.T
val w3 = Witness(3)
type _3 = w3.T
val wFoo = Witness(Symbol("foo"))
type Foo = wFoo.T
val wBar = Witness(Symbol("bar"))
type Bar = wBar.T
@Test
def testRefine: Unit = {
val sTrue = true.narrow
val sFalse = false.narrow
sameTyped(sTrue)(sTrue)
sameTyped(sTrue)(true)
illTyped("""
sameTyped(sTrue)(sFalse)
sameTyped(sTrue)(false)
""")
val s13 = 13.narrow
val s23 = 23.narrow
sameTyped(s13)(s13)
sameTyped(s13)(13)
illTyped("""
sameTyped(s13)(s23)
sameTyped(s13)(23)
""")
val sFoo = "foo".narrow
val sBar = "bar".narrow
sameTyped(sFoo)(sFoo)
sameTyped(sFoo)("foo")
illTyped("""
sameTyped(sFoo)(sBar)
sameTyped(sFoo)("bar")
""")
val sFooSym = Symbol("foo").narrow
val sBarSym = Symbol("bar").narrow
sameTyped(sFooSym)(sFooSym)
sameTyped(sFooSym)(Symbol("foo"))
illTyped("""
sameTyped(sFooSym)(sBarSym)
sameTyped(sFooSym)(Symbol("bar"))
""")
}
trait Show[T] {
def show: String
}
object Show {
implicit val showTrue = new Show[True] { def show = "true" }
implicit val showFalse = new Show[False] { def show = "false" }
implicit val showOne = new Show[_1] { def show = "One" }
implicit val showTwo = new Show[_2] { def show = "Two" }
implicit val showThree = new Show[_3] { def show = "Three" }
implicit val showFoo = new Show[Foo] { def show = "'foo" }
implicit val showBar = new Show[Bar] { def show = "'bar" }
}
def show[T](t: T)(implicit s: Show[T]) = s.show
@Test
def testRefinedTypeClass: Unit = {
val sTrue = show(true.narrow)
assertEquals("true", sTrue)
val sFalse = show(false.narrow)
assertEquals("false", sFalse)
val sOne = show(1.narrow)
assertEquals("One", sOne)
val sTwo = show(2.narrow)
assertEquals("Two", sTwo)
val sThree = show(3.narrow)
assertEquals("Three", sThree)
illTyped("""
show(0.narrow)
""")
val sFoo = show(Symbol("foo").narrow)
assertEquals("'foo", sFoo)
val sBar = show(Symbol("bar").narrow)
assertEquals("'bar", sBar)
}
trait LiteralShow[T] {
def show: String
}
object LiteralShow {
implicit val showTrue = new LiteralShow[Witness.`true`.T] { def show = "true" }
implicit val showFalse = new LiteralShow[Witness.`false`.T] { def show = "false" }
implicit val showOne = new LiteralShow[Witness.`1`.T] { def show = "One" }
implicit val showTwo = new LiteralShow[Witness.`2`.T] { def show = "Two" }
implicit val showThree = new LiteralShow[Witness.`3`.T] { def show = "Three" }
implicit val showFoo = new LiteralShow[Witness.`'foo`.T] { def show = "'foo" }
implicit val showBar = new LiteralShow[Witness.`'bar`.T] { def show = "'bar" }
}
def literalShow[T](t: T)(implicit s: LiteralShow[T]) = s.show
@Test
def testRefinedLiteralTypeClass: Unit = {
val sTrue = literalShow(true.narrow)
assertEquals("true", sTrue)
val sFalse = literalShow(false.narrow)
assertEquals("false", sFalse)
val sOne = literalShow(1.narrow)
assertEquals("One", sOne)
val sTwo = literalShow(2.narrow)
assertEquals("Two", sTwo)
val sThree = literalShow(3.narrow)
assertEquals("Three", sThree)
illTyped("""
literalShow(0.narrow)
""")
val sFoo = literalShow(Symbol("foo").narrow)
assertEquals("'foo", sFoo)
val sBar = literalShow(Symbol("bar").narrow)
assertEquals("'bar", sBar)
}
trait LiteralsShow[-T] {
def show: String
}
object LiteralsShow {
implicit val showTrueFalse = new LiteralsShow[HList.`true, false`.T] { def show = "true, false" }
implicit val showOneOrTwoOrThree = new LiteralsShow[Coproduct.`1, 2, 3`.T] { def show = "One | Two | Three" }
implicit val showFooBar = new LiteralsShow[HList.`'foo, 'bar`.T] { def show = "'foo, 'bar" }
}
def literalsShow[T](t: T)(implicit s: LiteralsShow[T]) = s.show
@Test
def testRefinedLiteralsTypeClass: Unit = {
val sTrueFalse = literalsShow(true.narrow :: false.narrow :: HNil)
assertEquals("true, false", sTrueFalse)
val sOne = literalsShow(Inl(1.narrow))
assertEquals("One | Two | Three", sOne)
val sTwo = literalsShow(Inr(Inl(2.narrow)))
assertEquals("One | Two | Three", sTwo)
val sThree = literalsShow(Inr(Inr(Inl(3.narrow))))
assertEquals("One | Two | Three", sThree)
illTyped("""
literalsShow(true :: false :: HNil)
""")
val sFooBar = literalsShow(Symbol("foo").narrow :: Symbol("bar").narrow :: HNil)
assertEquals("'foo, 'bar", sFooBar)
}
@Test
def testWitness: Unit = {
val wTrue = Witness(true)
val wFalse = Witness(false)
sameTyped(wTrue)(wTrue)
illTyped("""
sameTyped(wTrue)(wFalse)
""")
val w13 = Witness(13)
val w23 = Witness(23)
sameTyped(w13)(w13)
illTyped("""
sameTyped(w13)(w23)
""")
val wFoo = Witness("foo")
val wBar = Witness("bar")
sameTyped(wFoo)(wFoo)
illTyped("""
sameTyped(wFoo)(wBar)
""")
val wFooSym = Witness(Symbol("foo"))
val wBarSym = Witness(Symbol("bar"))
sameTyped(wFooSym)(wFooSym)
illTyped("""
sameTyped(wFooSym)(wBarSym)
""")
}
def convert(w: Witness): Witness.Aux[w.T] = w
def boundedConvert2[B](w: Witness.Lt[B]): Witness.Aux[w.T] = w
def testSingletonWitness: Unit = {
trait Bound
object Foo extends Bound
val bar = "bar"
val wFoo = Witness(Foo)
val wBar = Witness(bar)
typed[Foo.type](wFoo.value)
typed[bar.type](wBar.value)
val cFoo = convert(Foo)
val cBar = convert(bar)
sameTyped(cFoo)(Witness(Foo))
sameTyped(cBar)(Witness(bar))
val bcFoo = boundedConvert2[Bound](Foo)
val bcBar = boundedConvert2[String](bar)
sameTyped(bcFoo)(Witness(Foo))
sameTyped(bcBar)(Witness(bar))
}
@Test
def testWitnessConversion: Unit = {
val cTrue = convert(true)
val cFalse = convert(false)
sameTyped(cTrue)(Witness(true))
sameTyped(cFalse)(Witness(false))
illTyped("""
sameTyped(cTrue)(Witness(false))
""")
illTyped("""
sameTyped(cFalse)(Witness(true))
""")
val c13 = convert(13)
val c23 = convert(23)
sameTyped(c13)(Witness(13))
sameTyped(c23)(Witness(23))
illTyped("""
sameTyped(c13)(Witness(23))
""")
illTyped("""
sameTyped(c23)(Witness(13))
""")
val cFoo = convert("foo")
val cBar = convert("bar")
sameTyped(cFoo)(Witness("foo"))
sameTyped(cBar)(Witness("bar"))
illTyped("""
sameTyped(cFoo)(Witness("bar"))
""")
illTyped("""
sameTyped(cBar)(Witness("foo"))
""")
val cFooSym = convert(Symbol("foo"))
val cBarSym = convert(Symbol("bar"))
sameTyped(cFooSym)(Witness(Symbol("foo")))
sameTyped(cBarSym)(Witness(Symbol("bar")))
illTyped("""
sameTyped(cFooSym)(Witness(Symbol("bar")))
""")
illTyped("""
sameTyped(cBarSym)(Witness(Symbol("foo")))
""")
}
def boundedConvert(w: Witness.Lt[Int]): Witness.Aux[w.T] = w
@Test
def testBoundedWitnessConversion: Unit = {
val c13 = boundedConvert(13)
sameTyped(c13)(Witness(13))
illTyped("""
sameTyped(c13)(Witness(23))
""")
illTyped("""
boundedConvert(true)
""")
illTyped("""
boundedConvert("foo")
""")
illTyped("""
boundedConvert(Symbol("foo"))
""")
}
def showLiteral(t: Witness)(implicit s: Show[t.T]) = s.show
@Test
def testLiteralTypeClass: Unit = {
val sTrue = showLiteral(true)
assertEquals("true", sTrue)
val sFalse = showLiteral(false)
assertEquals("false", sFalse)
val sOne = showLiteral(1)
assertEquals("One", sOne)
val sTwo = showLiteral(2)
assertEquals("Two", sTwo)
val sThree = showLiteral(3)
assertEquals("Three", sThree)
val sFooSym = showLiteral(Symbol("foo"))
assertEquals("'foo", sFooSym)
val sBarSym = showLiteral(Symbol("bar"))
assertEquals("'bar", sBarSym)
illTyped("""
showLiteral(0)
""")
}
trait ShowWitness[T] {
def show: String
}
object ShowWitness {
implicit def showWitness[T](implicit w: Witness.Aux[T]) =
new ShowWitness[T] {
def show = w.value.toString
}
}
def showWitness(w: Witness)(implicit s: ShowWitness[w.T]) = s.show
@Test
def testWitnessTypeClass: Unit = {
val sTrue = showWitness(true)
assertEquals("true", sTrue)
val sFalse = showWitness(false)
assertEquals("false", sFalse)
val sOne = showWitness(1)
assertEquals("1", sOne)
val sTwo = showWitness(2)
assertEquals("2", sTwo)
val sThree = showWitness(3)
assertEquals("3", sThree)
val sFooSym = showWitness(Symbol("foo"))
assertEquals("'foo", sFooSym)
val sBarSym = showWitness(Symbol("bar"))
assertEquals("'bar", sBarSym)
}
def showWitnessWith(w: WitnessWith[Show]) = w.instance.show
@Test
def testWitnessWith: Unit = {
val sTrue = showWitnessWith(true)
assertEquals("true", sTrue)
val sFalse = showWitnessWith(false)
assertEquals("false", sFalse)
val sOne = showWitnessWith(1)
assertEquals("One", sOne)
val sTwo = showWitnessWith(2)
assertEquals("Two", sTwo)
val sThree = showWitnessWith(3)
assertEquals("Three", sThree)
val sFooSym = showWitnessWith(Symbol("foo"))
assertEquals("'foo", sFooSym)
val sBarSym = showWitnessWith(Symbol("bar"))
assertEquals("'bar", sBarSym)
}
trait Rel[T] {
type Out
}
object Rel {
implicit def relTrue: Rel[True] { type Out = Int } = new Rel[True] { type Out = Int }
implicit def relFalse: Rel[False] { type Out = String } = new Rel[False] { type Out = String }
}
def check(w: WitnessWith[Rel])(v: w.instance.Out) = v
@Test
def testWitnessWithOut: Unit = {
val relTrue = check(true)(23)
typed[Int](relTrue)
val relFalse = check(false)("foo")
typed[String](relFalse)
illTyped("""
check(true)("foo")
""")
illTyped("""
check(false)(23)
""")
illTyped("""
check(23)(23)
""")
}
@Test
def testValueClass: Unit = {
val x = new ValueTest(5)
val y = new ValueTest(5)
val wX = Witness(x)
val wY = Witness(y)
illTyped("""
implicitly[wX.T =:= wY.T]
""", "Cannot prove that wX.T =:= wY.T.")
}
@Test
def primitiveWiden: Unit = {
{
val w = Widen[Witness.`2`.T]
illTyped(" w(3) ", "type mismatch;.*")
val n = w(2)
val n0: Int = n
illTyped(" val n1: Witness.`2`.T = n ", "type mismatch;.*")
assertTypedEquals[Int](2, n)
}
{
val w = Widen[Witness.`true`.T]
illTyped(" w(false) ", "type mismatch;.*")
val b = w(true)
val b0: Boolean = b
illTyped(" val b1: Witness.`true`.T = b ", "type mismatch;.*")
assertTypedEquals[Boolean](true, b)
}
{
val w = Widen[Witness.`"ab"`.T]
illTyped(""" w("s") """, "type mismatch;.*")
val s = w("ab")
val s0: String = s
illTyped(""" val s1: Witness.`"ab"`.T = s """, "type mismatch;.*")
assertTypedEquals[String]("ab", s)
}
}
@Test
def symbolWiden: Unit = {
// Masks shapeless.syntax.singleton.narrowSymbol.
// Having it in scope makes the illTyped tests fail in an unexpected way.
def narrowSymbol = ???
val w = Widen[Witness.`'ab`.T]
illTyped(""" w(Symbol("s").narrow) """, "type mismatch;.*")
val s = w(Symbol("ab").narrow)
val s0: Symbol = s
illTyped(" val s1: Witness.`'ab`.T = s ", "type mismatch;.*")
assertTypedEquals[Symbol](Symbol("ab"), s)
}
@Test
def aliasWiden: Unit = {
type T = Witness.`2`.T
val w = Widen[T]
illTyped(" w(3) ", "type mismatch;.*")
val n = w(2)
val n0: Int = n
illTyped(" val n1: Witness.`2`.T = n ", "type mismatch;.*")
assertTypedEquals[Int](2, n)
}
trait B
case object A extends B
@Test
def singletonWiden: Unit = {
illTyped(" Widen[A.type] ", "could not find implicit value for parameter widen:.*")
}
@Test
def testWitnessThisType: Unit = {
class ClassThis {
val w1 = Witness(this)
val w2 = Witness[this.type]
}
object ObjectThis {
val w1 = Witness(this)
val w2 = Witness[this.type]
}
val c = new ClassThis
assertTypedEquals[c.type](c.w1.value, c.w2.value)
assertTypedEquals[ObjectThis.type](ObjectThis.w1.value, ObjectThis.w2.value)
}
@Test
def testWitnessTypeRefType: Unit = {
trait B1 {
type T <: B
def getT(implicit w: Witness.Aux[T]): T = w.value
}
case class A1() extends B1 {
type T = A.type
}
assertTypedEquals[A.type](A1().getT, A)
}
class NestingBug {
val o: AnyRef = new Object {}
val wO = {
final class W extends _root_.shapeless.Witness {
type T = o.type
val value: T = o
}
new W
}
val x1: o.type = wO.value
}
class PathDependentSingleton1 {
val o: AnyRef = new Object {}
val wO = Witness(o)
type OT = wO.T
implicitly[OT =:= o.type]
val x0: OT = wO.value
val x1: o.type = wO.value
val x2 = wO.value
typed[o.type](x2)
typed[OT](x2)
}
object PathDependentSingleton2 {
val o: AnyRef = new Object {}
val wO = Witness(o)
type OT = wO.T
implicitly[OT =:= o.type]
val x0: OT = wO.value
val x1: o.type = wO.value
val x2 = wO.value
typed[o.type](x2)
typed[OT](x2)
}
}
package SingletonTypeTestsAux {
class Wrapper {
sealed trait Sealed
object Sealed {
case object A extends Sealed
}
implicitly[Witness.Aux[Sealed.A.type]]
}
}
package UnrefineTest {
import shapeless._
import shapeless.ops.record._
import shapeless.syntax.singleton._
trait Foo[A] {
type Out
def to(a: A): Out
}
object Foo {
type Aux[A, Out0] = Foo[A] { type Out = Out0 }
def apply[A, Out](implicit foo: Aux[A, Out]) = foo
implicit def from[A, Out0](implicit gen: LabelledGeneric.Aux[A, Out0]) =
new Foo[A] {
type Out = Out0
def to(a: A) = gen.to(a)
}
}
class Bar[A, HL <: HList](gen: Foo.Aux[A, HL]) {
def modify[K, V, U, Out0 <: HList](k: K, f: V => U)(implicit modifier: Modifier.Aux[HL, K, V, U, Out0]) =
new Bar[A, Out0](new Foo[A] {
type Out = Out0
def to(a: A): Out = modifier.apply(gen.to(a), f)
})
def keys[Out <: HList](implicit keys: Keys.Aux[HL, Out]): Out = keys.apply()
}
final case class FooBar(x: String, y: Int)
object Test {
new Bar(Foo.from( LabelledGeneric[FooBar] )).modify(Symbol("y").narrow, (_: Int) * 2)
new Bar(Foo.from( LabelledGeneric[FooBar] )).keys
new Bar(Foo.from( LabelledGeneric[FooBar] )).modify(Symbol("y").narrow, (_: Int) * 2).keys
}
}
| lambdista/shapeless | core/src/test/scala/shapeless/singletons.scala | Scala | apache-2.0 | 16,080 |
package jp.co.bizreach.elasticsearch4s.generator
case class ESCodegenConfig(
outputDir: String = "src/main/scala",
packageName: String = "models",
jsonFiles: Seq[String] = Seq("schema.json"),
classMappings: Map[String, String] = Map.empty,
typeMappings: Map[String, String] = Map.empty,
arrayProperties: Map[String, Seq[String]] = Map.empty,
ignoreProperties: Seq[String] = Nil
)
object ESCodegenConfig {
import com.typesafe.config.ConfigFactory
import collection.JavaConverters._
import java.nio.file.Paths
def load(): ESCodegenConfig = {
val config = ConfigFactory.parseFileAnySyntax(Paths.get("es-gen.conf").toFile)
ESCodegenConfig(
outputDir = if(config.hasPath("es-codegen.output.dir")) config.getString("es-codegen.output.dir") else "src/main/scala",
packageName = if(config.hasPath("es-codegen.package.name")) config.getString("es-codegen.package.name") else "models",
jsonFiles = if(config.hasPath("es-codegen.json.files")) config.getStringList("es-codegen.json.files").asScala.toSeq else Seq("schema.json"),
classMappings = if(config.hasPath("es-codegen.class.mappings")) config.getStringList("es-codegen.class.mappings").asScala.map { x =>
val array = x.split(":")
val key = array(0).trim
val value = array(1).trim
key -> value
}.toMap else Map.empty,
typeMappings = if(config.hasPath("es-codegen.type.mappings")) config.getStringList("es-codegen.type.mappings").asScala.map { x =>
val array = x.split(":")
val key = array(0).trim
val value = array(1).trim
key -> value
}.toMap else Map.empty,
arrayProperties = if(config.hasPath("es-codegen.array.properties")) config.getStringList("es-codegen.array.properties").asScala.map { x =>
val array = x.split(":")
val key = array(0).trim
val value = array(1).trim
key -> value.split(",").map(_.trim).toSeq
}.toMap else Map.empty,
ignoreProperties = if(config.hasPath("es-codegen.ignore.properties")) config.getStringList("es-codegen.ignore.properties").asScala.toSeq else Nil
)
}
} | dasoran/elastic-scala-httpclient | elastic-scala-codegen/src/main/scala/jp/co/bizreach/elasticsearch4s/generator/ESCodegenConfig.scala | Scala | apache-2.0 | 2,168 |
package com.mesosphere.cosmos.thirdparty.marathon.model
case class MarathonAppContainer(`type`: String, docker: Option[MarathonAppContainerDocker])
| movicha/cosmos | cosmos-model/src/main/scala/com/mesosphere/cosmos/thirdparty/marathon/model/MarathonAppContainer.scala | Scala | apache-2.0 | 149 |
import http.HttpServer
import stock.mock.MockStockPriceService
// Mock server
object Mock extends App
with HttpServer
with MockStockPriceService
{
// Increment port to allow simultaneous real and mock servers
override def port = super.port + 1
}
| linearregression/akka-streams-http-intro | src/test/scala/Mock.scala | Scala | apache-2.0 | 259 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.nn.internal
import com.intel.analytics.bigdl.dllib.nn.Graph.ModuleNode
import com.intel.analytics.bigdl.dllib.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.dllib.nn.{CAddTable, CAveTable, CMaxTable, CMulTable, CosineDistance, DotProduct, JoinTable, ParallelTable, Sequential => TSequential}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.dllib.utils.{MultiShape, Shape}
import scala.reflect.ClassTag
/**
* Used to merge a list of inputs into a single output, following some merge mode.
* To merge layers, it must take at least two input layers.
*
* When using this layer as the first layer in a model, you need to provide the argument
* inputShape for input layers (each as a Single Shape, does not include the batch dimension).
*
* @param layers A list of layer instances. Must be more than one layer.
* @param mode Merge mode. String, must be one of: 'sum', 'mul', 'concat', 'ave', 'cos',
* 'dot', 'max'. Default is 'sum'.
* @param concatAxis Integer, axis to use when concatenating layers. Only specify this when merge
* mode is 'concat'. Default is -1, meaning the last axis of the input.
* @tparam T The numeric type of parameter(e.g. weight, bias). Only support float/double now.
*/
class Merge[T: ClassTag](
val layers: Array[AbstractModule[Activity, Activity, T]] = null,
val mode: String = "sum",
val concatAxis: Int = -1,
// MultiShape isn't directly supported for serialization. Use Shape instead.
val inputShape: Shape = null)(implicit ev: TensorNumeric[T])
extends KerasLayer[Tensor[T], Tensor[T], T](Merge.calcBatchInputShape(inputShape, layers)) {
private val mergeMode = mode.toLowerCase()
private var axis = concatAxis
require(mergeMode == "sum" || mergeMode == "mul" || mergeMode == "concat" || mergeMode == "ave"
|| mergeMode == "cos" || mergeMode == "dot" || mergeMode == "max",
s"Invalid merge mode: $mergeMode")
if (layers != null) {
require(layers.length >= 2, s"Merge must take at least two input layers " +
s"but found ${layers.length}")
this.excludeInvalidLayers(layers)
}
private def computeOutputShapeForConcat(input: List[Shape]): Shape = {
import scala.util.control.Breaks._
val input1 = input.head.toSingle().toArray
val output = input1.clone()
require(Math.abs(concatAxis) < output.length, s"Invalid concat axis $concatAxis")
axis = if (concatAxis < 0) concatAxis + output.length else concatAxis
var i = 1
while (i < input.length) {
val input_i = input(i).toSingle().toArray
var j = 0
while (j < input_i.length) {
if (j != axis) require(input_i(j)==output(j), s"Incompatible input dimension for merge " +
s"mode concat: (${output.deep.mkString(", ")}), " +
s"(${input_i.deep.mkString(", ")})")
j += 1
}
if (output(axis) == -1 || input_i(axis) == -1) {
output(i) = -1
break
}
output(axis) = output(axis) + input_i(axis)
i += 1
}
Shape(output)
}
private def checkSameInputShape(input: List[Shape]): Unit = {
val input1 = input.head.toSingle().toArray
var i = 1
while (i < input.length) {
val input_i = input(i).toSingle().toArray
require(input_i.sameElements(input1), s"Incompatible input dimension for " +
s"merge mode $mergeMode: (${input1.deep.mkString(", ")}), " +
s"(${input_i.deep.mkString(", ")})")
i += 1
}
}
override def computeOutputShape(inputShape: Shape): Shape = {
val input = inputShape.toMulti()
val input1 = input.head.toSingle().toArray
if (mergeMode == "concat") {
computeOutputShapeForConcat(input)
}
else {
checkSameInputShape(input)
if (mergeMode == "dot" || mergeMode == "cos") {
require(input.head.toSingle().length <=2, s"For merge mode $mergeMode, 3D input " +
s"or above is currently not supported, got input dim ${input.head.toSingle().length}")
require(input.length == 2, s"Merge mode $mergeMode takes exactly two layers, " +
s"but got ${input.length}")
if (mergeMode == "dot") Shape(-1, 1) else Shape(-1, 1, 1)
}
else {
input.head
}
}
}
override def doBuild(inputShape: Shape): AbstractModule[Tensor[T], Tensor[T], T] = {
val input = inputShape.toMulti()
val mergeLayer = mergeMode match {
case "sum" => CAddTable()
case "mul" => CMulTable()
case "max" => CMaxTable()
case "ave" => CAveTable()
case "concat" =>
val input1 = input.head.toSingle().toArray
JoinTable(axis, input1.length -1)
case "dot" =>
val seq = TSequential[T]()
seq.add(DotProduct())
seq.add(com.intel.analytics.bigdl.dllib.nn.Reshape(Array(1), Some(true)))
seq
case "cos" =>
val seq = TSequential[T]()
seq.add(CosineDistance())
seq.add(com.intel.analytics.bigdl.dllib.nn.Reshape(Array(1, 1), Some(true)))
seq
}
if (layers != null) { // In the case `layers != null`, return a ParallelTable to merge layers
val model = TSequential[T]()
val parallel = ParallelTable()
var i = 0
while(i < layers.length) {
parallel.add(layers(i).asInstanceOf[KerasLayer[Activity, Activity, T]].labor)
i += 1
}
model.add(parallel)
model.add(mergeLayer)
model.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]]
}
else { // In the case `layers == null`, only return a merge layer to merge nodes not layers.
mergeLayer.asInstanceOf[AbstractModule[Tensor[T], Tensor[T], T]]
}
}
}
object Merge {
def calcBatchInputShape[T: ClassTag](
inputShape: Shape = null,
layers: Array[AbstractModule[Activity, Activity, T]]): Shape = {
val batchInputShape = KerasLayer.addBatch(inputShape)
val actualInputShape = if (layers != null) {
MultiShape(layers.map { layer =>
if (layer.isBuilt()) { // it's possible while reloaded from file
layer.getOutputShape()
} else {
layer.build(layer.getInputShape())
}
}.toList)
} else null
if (batchInputShape != null) {
require(batchInputShape.isInstanceOf[MultiShape],
"Merge requires inputShape to be MultiShape")
require(batchInputShape.toMulti().equals(actualInputShape.toMulti()),
"Actual layer input shapes are not the same as expected layer input shapes")
}
actualInputShape
}
def apply[@specialized(Float, Double) T: ClassTag](
layers: List[AbstractModule[Activity, Activity, T]] = null,
mode: String = "sum",
concatAxis: Int = -1,
inputShape: Shape = null)(implicit ev: TensorNumeric[T]): Merge[T] = {
val layersArray = if (layers != null) layers.toArray else null
new Merge[T](layersArray, mode, concatAxis, inputShape)
}
def merge[@specialized(Float, Double) T: ClassTag](
inputs: List[ModuleNode[T]],
mode: String = "sum",
concatAxis: Int = -1,
name: String = null)(implicit ev: TensorNumeric[T]): ModuleNode[T] = {
val mergeLayer = new Merge[T](mode = mode, concatAxis = concatAxis)
if (name != null) mergeLayer.setName(name)
mergeLayer.inputs(inputs.toArray)
}
}
| intel-analytics/BigDL | scala/dllib/src/main/scala/com/intel/analytics/bigdl/dllib/nn/internal/Merge.scala | Scala | apache-2.0 | 8,008 |
/***********************************************************************
* Copyright (c) 2017-2020 IBM
* Copyright (c) 2013-2020 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa
import org.locationtech.geomesa.utils.conf.GeoMesaSystemProperties.SystemProperty
package object cassandra {
case class NamedColumn(name: String, i: Int, cType: String, jType: Class[_], partition: Boolean = false)
case class ColumnSelect(column: NamedColumn, start: Any, end: Any, startInclusive: Boolean, endInclusive: Boolean)
case class RowSelect(clauses: Seq[ColumnSelect])
object CassandraSystemProperties {
val ReadTimeoutMillis = SystemProperty("geomesa.cassandra.read.timeout", "30 seconds")
val ConnectionTimeoutMillis = SystemProperty("geomesa.cassandra.connection.timeout", "30 seconds")
}
}
| ccri/geomesa | geomesa-cassandra/geomesa-cassandra-datastore/src/main/scala/org/locationtech/geomesa/cassandra/package.scala | Scala | apache-2.0 | 1,161 |
/*
* Copyright 2013 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may
* not use this file except in compliance with the License. You may obtain
* a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.algebird.util
import com.twitter.algebird.MonadLaws.monadLaws
import com.twitter.util.{ Await, Future, Throw, Return, Try }
import org.scalatest.{ PropSpec, Matchers }
import org.scalatest.prop.PropertyChecks
import org.scalacheck.{ Arbitrary, Properties }
class UtilAlgebraProperties extends PropSpec with PropertyChecks with Matchers {
import UtilAlgebras._
def toOption[T](f: Future[T]): Option[T] =
try {
Some(Await.result(f))
} catch {
case _: Exception => None
}
implicit def futureA[T: Arbitrary]: Arbitrary[Future[T]] =
Arbitrary {
Arbitrary.arbitrary[T].map { l => Future.value(l) }
}
implicit def returnA[T: Arbitrary]: Arbitrary[Try[T]] =
Arbitrary {
Arbitrary.arbitrary[T].map { l => Return(l) }
}
property("futureMonad") {
monadLaws[Future, Int, String, Long] { (f1, f2) =>
toOption(f1) == toOption(f2)
}
}
property("tryMonad") {
monadLaws[Try, Int, String, Long]()
}
}
| avibryant/algebird | algebird-util/src/test/scala/com/twitter/algebird/util/UtilAlgebraProperties.scala | Scala | apache-2.0 | 1,594 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.storage.BlockManagerId
/**
* :: DeveloperApi ::
* Various possible reasons why a task ended. The low-level TaskScheduler is supposed to retry
* tasks several times for "ephemeral" failures, and only report back failures that require some
* old stages to be resubmitted, such as shuffle map fetch failures.
*/
@DeveloperApi
sealed trait TaskEndReason
@DeveloperApi
case object Success extends TaskEndReason
@DeveloperApi
case object Resubmitted extends TaskEndReason // Task was finished earlier but we've now lost it
@DeveloperApi
case class FetchFailed(
bmAddress: BlockManagerId,
shuffleId: Int,
mapId: Int,
reduceId: Int)
extends TaskEndReason
@DeveloperApi
case class ExceptionFailure(
className: String,
description: String,
stackTrace: Array[StackTraceElement],
metrics: Option[TaskMetrics])
extends TaskEndReason
/**
* :: DeveloperApi ::
* The task finished successfully, but the result was lost from the executor's block manager before
* it was fetched.
*/
@DeveloperApi
case object TaskResultLost extends TaskEndReason
@DeveloperApi
case object TaskKilled extends TaskEndReason
/**
* :: DeveloperApi ::
* The task failed because the executor that it was running on was lost. This may happen because
* the task crashed the JVM.
*/
@DeveloperApi
case object ExecutorLostFailure extends TaskEndReason
/**
* :: DeveloperApi ::
* We don't know why the task ended -- for example, because of a ClassNotFound exception when
* deserializing the task result.
*/
@DeveloperApi
case object UnknownReason extends TaskEndReason
| yelshater/hadoop-2.3.0 | spark-core_2.10-1.0.0-cdh5.1.0/src/main/scala/org/apache/spark/TaskEndReason.scala | Scala | apache-2.0 | 2,530 |
/**
* Author : Florian Simon <[email protected]>
* Role : Alternate entry point that spawns a preconfigured game
* (used for testing).
*/
import Game.{Character, Facts, Game, Grid, Point, Stat, Team};
import Views.Swing.SwingInterface;
import Controllers.GameView;
import scala.swing.Color;
import scala.util.Random;
/**
* Main object that starts a game directly without any manual configuration.
*/
object PreconfiguredGame extends EntryPoint {
/**
* Generates a new, pre-configured game.
*/
def makeGame : Game = {
/* Creates the teams without players. */
val emptyDelta = Team(new Color(255, 0, 0));
val emptyGamma : Team = Team(new Color(0, 0, 255));
/* Members of the first team. */
lazy val carl = Character("Carl", Stat(20, 20), Stat(20, 20), Stat(20, 20), 0, emptyDelta.ID);
lazy val mark = Character("Mark", Stat(20, 20), Stat(20, 20), Stat(20, 20), 0, emptyDelta.ID);
lazy val lucy = Character("Mark", Stat(20, 20), Stat(20, 20), Stat(20, 20), 0, emptyDelta.ID);
lazy val sara = Character("Sara", Stat(20, 20), Stat(20, 20), Stat(20, 20), 0, emptyDelta.ID);
/* Members of the second team. */
lazy val lara = Character("Lara", Stat(20, 20), Stat(20, 20), Stat(20, 20), 0, emptyGamma.ID);
lazy val gina = Character("Gina", Stat(20, 20), Stat(20, 20), Stat(20, 20), 0, emptyGamma.ID);
lazy val kile = Character("Kile", Stat(20, 20), Stat(20, 20), Stat(20, 20), 0, emptyGamma.ID);
lazy val gwen = Character("Gwen", Stat(20, 20), Stat(20, 20), Stat(20, 20), 0, emptyGamma.ID);
/* The teams themselves. */
val delta = emptyDelta.withCharacters(List(carl.ID, mark.ID, lucy.ID, sara.ID));
val gamma = emptyGamma.withCharacters(List(lara.ID, gina.ID, kile.ID, gwen.ID));
/* The game grid. */
val grid = Grid(Facts.gridSize.x, Facts.gridSize.y, Map(
/* Placement of the delta team. */
(carl.ID, Point(0, 13)), (mark.ID, Point(0, 12)), (lucy.ID, Point(0, 14)), (sara.ID, Point(1, 13)),
/* Placement of the gamma team. */
(lara.ID, Point(24, 13)), (gina.ID, Point(24, 12)), (kile.ID, Point(24, 14)), (gwen.ID, Point(23, 13))
));
/* The index of all characters. */
val characters = Map(
/* Characters of team delta. */
(carl.ID, carl), (mark.ID, mark), (lucy.ID, lucy), (sara.ID, sara),
/* Characters of team gamma. */
(lara.ID, lara), (gina.ID, gina), (kile.ID, kile), (gwen.ID, gwen)
);
val game = Game(grid, Random, Nil, Map((delta.ID, delta), (gamma.ID, gamma)), characters);
return game;
}
override val interface = new SwingInterface();
override val firstStep = Some(new GameView(this.makeGame));
}
| floriansimon1/learning.trpg | src/PreconfiguredGame.scala | Scala | mit | 2,705 |
package controllers.api.protocol
import play.api.libs.json.Json
/**
* Wrapper to generate a response that has an array of users.
*
* @param users A list of users
*/
case class UsersResponse(users: List[UserProtocolModel])
/**
* The companion object. Provides implicit Json formatting.
*/
object UsersResponse {
implicit val implicitUserListModelFormats = Json.format[UsersResponse]
} | HiP-App/HiPCMS | app/controllers/api/protocol/UsersResponse.scala | Scala | apache-2.0 | 400 |
package io.circe
import io.circe.scalajs.convertJsToJson
import scala.scalajs.js.JSON
import scala.util.control.NonFatal
package object parser extends Parser {
final def parse(input: String): Either[ParsingFailure, Json] = (
try convertJsToJson(JSON.parse(input))
catch {
case NonFatal(exception) => Left(ParsingFailure(exception.getMessage, exception))
}
) match {
case r @ Right(_) => r.asInstanceOf[Either[ParsingFailure, Json]]
case Left(exception) => Left(ParsingFailure(exception.getMessage, exception))
}
}
| travisbrown/circe | modules/parser/js/src/main/scala/io/circe/parser/package.scala | Scala | apache-2.0 | 551 |
package spark.storage
import java.nio.ByteBuffer
import akka.actor._
import org.scalatest.FunSuite
import org.scalatest.BeforeAndAfter
import org.scalatest.PrivateMethodTester
import org.scalatest.concurrent.Eventually._
import org.scalatest.concurrent.Timeouts._
import org.scalatest.matchers.ShouldMatchers._
import org.scalatest.time.SpanSugar._
import spark.JavaSerializer
import spark.KryoSerializer
import spark.SizeEstimator
import spark.util.ByteBufferInputStream
class BlockManagerSuite extends FunSuite with BeforeAndAfter with PrivateMethodTester {
var store: BlockManager = null
var store2: BlockManager = null
var actorSystem: ActorSystem = null
var master: BlockManagerMaster = null
var oldArch: String = null
var oldOops: String = null
var oldHeartBeat: String = null
// Reuse a serializer across tests to avoid creating a new thread-local buffer on each test
System.setProperty("spark.kryoserializer.buffer.mb", "1")
val serializer = new KryoSerializer
before {
actorSystem = ActorSystem("test")
master = new BlockManagerMaster(
actorSystem.actorOf(Props(new spark.storage.BlockManagerMasterActor(true))))
// Set the arch to 64-bit and compressedOops to true to get a deterministic test-case
oldArch = System.setProperty("os.arch", "amd64")
oldOops = System.setProperty("spark.test.useCompressedOops", "true")
oldHeartBeat = System.setProperty("spark.storage.disableBlockManagerHeartBeat", "true")
val initialize = PrivateMethod[Unit]('initialize)
SizeEstimator invokePrivate initialize()
}
after {
if (store != null) {
store.stop()
store = null
}
if (store2 != null) {
store2.stop()
store2 = null
}
actorSystem.shutdown()
actorSystem.awaitTermination()
actorSystem = null
master = null
if (oldArch != null) {
System.setProperty("os.arch", oldArch)
} else {
System.clearProperty("os.arch")
}
if (oldOops != null) {
System.setProperty("spark.test.useCompressedOops", oldOops)
} else {
System.clearProperty("spark.test.useCompressedOops")
}
}
test("StorageLevel object caching") {
val level1 = StorageLevel(false, false, false, 3)
val level2 = StorageLevel(false, false, false, 3) // this should return the same object as level1
val level3 = StorageLevel(false, false, false, 2) // this should return a different object
assert(level2 === level1, "level2 is not same as level1")
assert(level2.eq(level1), "level2 is not the same object as level1")
assert(level3 != level1, "level3 is same as level1")
val bytes1 = spark.Utils.serialize(level1)
val level1_ = spark.Utils.deserialize[StorageLevel](bytes1)
val bytes2 = spark.Utils.serialize(level2)
val level2_ = spark.Utils.deserialize[StorageLevel](bytes2)
assert(level1_ === level1, "Deserialized level1 not same as original level1")
assert(level1_.eq(level1), "Deserialized level1 not the same object as original level2")
assert(level2_ === level2, "Deserialized level2 not same as original level2")
assert(level2_.eq(level1), "Deserialized level2 not the same object as original level1")
}
test("BlockManagerId object caching") {
val id1 = BlockManagerId("e1", "XXX", 1)
val id2 = BlockManagerId("e1", "XXX", 1) // this should return the same object as id1
val id3 = BlockManagerId("e1", "XXX", 2) // this should return a different object
assert(id2 === id1, "id2 is not same as id1")
assert(id2.eq(id1), "id2 is not the same object as id1")
assert(id3 != id1, "id3 is same as id1")
val bytes1 = spark.Utils.serialize(id1)
val id1_ = spark.Utils.deserialize[BlockManagerId](bytes1)
val bytes2 = spark.Utils.serialize(id2)
val id2_ = spark.Utils.deserialize[BlockManagerId](bytes2)
assert(id1_ === id1, "Deserialized id1 is not same as original id1")
assert(id1_.eq(id1), "Deserialized id1 is not the same object as original id1")
assert(id2_ === id2, "Deserialized id2 is not same as original id2")
assert(id2_.eq(id1), "Deserialized id2 is not the same object as original id1")
}
test("master + 1 manager interaction") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 2000)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
// Putting a1, a2 and a3 in memory and telling master only about a1 and a2
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY)
store.putSingle("a2", a2, StorageLevel.MEMORY_ONLY)
store.putSingle("a3", a3, StorageLevel.MEMORY_ONLY, false)
// Checking whether blocks are in memory
assert(store.getSingle("a1") != None, "a1 was not in store")
assert(store.getSingle("a2") != None, "a2 was not in store")
assert(store.getSingle("a3") != None, "a3 was not in store")
// Checking whether master knows about the blocks or not
assert(master.getLocations("a1").size > 0, "master was not told about a1")
assert(master.getLocations("a2").size > 0, "master was not told about a2")
assert(master.getLocations("a3").size === 0, "master was told about a3")
// Drop a1 and a2 from memory; this should be reported back to the master
store.dropFromMemory("a1", null)
store.dropFromMemory("a2", null)
assert(store.getSingle("a1") === None, "a1 not removed from store")
assert(store.getSingle("a2") === None, "a2 not removed from store")
assert(master.getLocations("a1").size === 0, "master did not remove a1")
assert(master.getLocations("a2").size === 0, "master did not remove a2")
}
test("master + 2 managers interaction") {
store = new BlockManager("exec1", actorSystem, master, serializer, 2000)
store2 = new BlockManager("exec2", actorSystem, master, new KryoSerializer, 2000)
val peers = master.getPeers(store.blockManagerId, 1)
assert(peers.size === 1, "master did not return the other manager as a peer")
assert(peers.head === store2.blockManagerId, "peer returned by master is not the other manager")
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY_2)
store2.putSingle("a2", a2, StorageLevel.MEMORY_ONLY_2)
assert(master.getLocations("a1").size === 2, "master did not report 2 locations for a1")
assert(master.getLocations("a2").size === 2, "master did not report 2 locations for a2")
}
test("removing block") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 2000)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
// Putting a1, a2 and a3 in memory and telling master only about a1 and a2
store.putSingle("a1-to-remove", a1, StorageLevel.MEMORY_ONLY)
store.putSingle("a2-to-remove", a2, StorageLevel.MEMORY_ONLY)
store.putSingle("a3-to-remove", a3, StorageLevel.MEMORY_ONLY, false)
// Checking whether blocks are in memory and memory size
val memStatus = master.getMemoryStatus.head._2
assert(memStatus._1 == 2000L, "total memory " + memStatus._1 + " should equal 2000")
assert(memStatus._2 <= 1200L, "remaining memory " + memStatus._2 + " should <= 1200")
assert(store.getSingle("a1-to-remove") != None, "a1 was not in store")
assert(store.getSingle("a2-to-remove") != None, "a2 was not in store")
assert(store.getSingle("a3-to-remove") != None, "a3 was not in store")
// Checking whether master knows about the blocks or not
assert(master.getLocations("a1-to-remove").size > 0, "master was not told about a1")
assert(master.getLocations("a2-to-remove").size > 0, "master was not told about a2")
assert(master.getLocations("a3-to-remove").size === 0, "master was told about a3")
// Remove a1 and a2 and a3. Should be no-op for a3.
master.removeBlock("a1-to-remove")
master.removeBlock("a2-to-remove")
master.removeBlock("a3-to-remove")
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
store.getSingle("a1-to-remove") should be (None)
master.getLocations("a1-to-remove") should have size 0
}
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
store.getSingle("a2-to-remove") should be (None)
master.getLocations("a2-to-remove") should have size 0
}
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
store.getSingle("a3-to-remove") should not be (None)
master.getLocations("a3-to-remove") should have size 0
}
eventually(timeout(1000 milliseconds), interval(10 milliseconds)) {
val memStatus = master.getMemoryStatus.head._2
memStatus._1 should equal (2000L)
memStatus._2 should equal (2000L)
}
}
test("reregistration on heart beat") {
val heartBeat = PrivateMethod[Unit]('heartBeat)
store = new BlockManager("<driver>", actorSystem, master, serializer, 2000)
val a1 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY)
assert(store.getSingle("a1") != None, "a1 was not in store")
assert(master.getLocations("a1").size > 0, "master was not told about a1")
master.removeExecutor(store.blockManagerId.executorId)
assert(master.getLocations("a1").size == 0, "a1 was not removed from master")
store invokePrivate heartBeat()
assert(master.getLocations("a1").size > 0, "a1 was not reregistered with master")
}
test("reregistration on block update") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 2000)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY)
assert(master.getLocations("a1").size > 0, "master was not told about a1")
master.removeExecutor(store.blockManagerId.executorId)
assert(master.getLocations("a1").size == 0, "a1 was not removed from master")
store.putSingle("a2", a1, StorageLevel.MEMORY_ONLY)
store.waitForAsyncReregister()
assert(master.getLocations("a1").size > 0, "a1 was not reregistered with master")
assert(master.getLocations("a2").size > 0, "master was not told about a2")
}
test("reregistration doesn't dead lock") {
val heartBeat = PrivateMethod[Unit]('heartBeat)
store = new BlockManager("<driver>", actorSystem, master, serializer, 2000)
val a1 = new Array[Byte](400)
val a2 = List(new Array[Byte](400))
// try many times to trigger any deadlocks
for (i <- 1 to 100) {
master.removeExecutor(store.blockManagerId.executorId)
val t1 = new Thread {
override def run() {
store.put("a2", a2.iterator, StorageLevel.MEMORY_ONLY, true)
}
}
val t2 = new Thread {
override def run() {
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY)
}
}
val t3 = new Thread {
override def run() {
store invokePrivate heartBeat()
}
}
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
store.dropFromMemory("a1", null)
store.dropFromMemory("a2", null)
store.waitForAsyncReregister()
}
}
test("in-memory LRU storage") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 1200)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY)
store.putSingle("a2", a2, StorageLevel.MEMORY_ONLY)
store.putSingle("a3", a3, StorageLevel.MEMORY_ONLY)
assert(store.getSingle("a2") != None, "a2 was not in store")
assert(store.getSingle("a3") != None, "a3 was not in store")
assert(store.getSingle("a1") === None, "a1 was in store")
assert(store.getSingle("a2") != None, "a2 was not in store")
// At this point a2 was gotten last, so LRU will getSingle rid of a3
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY)
assert(store.getSingle("a1") != None, "a1 was not in store")
assert(store.getSingle("a2") != None, "a2 was not in store")
assert(store.getSingle("a3") === None, "a3 was in store")
}
test("in-memory LRU storage with serialization") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 1200)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY_SER)
store.putSingle("a2", a2, StorageLevel.MEMORY_ONLY_SER)
store.putSingle("a3", a3, StorageLevel.MEMORY_ONLY_SER)
assert(store.getSingle("a2") != None, "a2 was not in store")
assert(store.getSingle("a3") != None, "a3 was not in store")
assert(store.getSingle("a1") === None, "a1 was in store")
assert(store.getSingle("a2") != None, "a2 was not in store")
// At this point a2 was gotten last, so LRU will getSingle rid of a3
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY_SER)
assert(store.getSingle("a1") != None, "a1 was not in store")
assert(store.getSingle("a2") != None, "a2 was not in store")
assert(store.getSingle("a3") === None, "a3 was in store")
}
test("in-memory LRU for partitions of same RDD") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 1200)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
store.putSingle("rdd_0_1", a1, StorageLevel.MEMORY_ONLY)
store.putSingle("rdd_0_2", a2, StorageLevel.MEMORY_ONLY)
store.putSingle("rdd_0_3", a3, StorageLevel.MEMORY_ONLY)
// Even though we accessed rdd_0_3 last, it should not have replaced partitions 1 and 2
// from the same RDD
assert(store.getSingle("rdd_0_3") === None, "rdd_0_3 was in store")
assert(store.getSingle("rdd_0_2") != None, "rdd_0_2 was not in store")
assert(store.getSingle("rdd_0_1") != None, "rdd_0_1 was not in store")
// Check that rdd_0_3 doesn't replace them even after further accesses
assert(store.getSingle("rdd_0_3") === None, "rdd_0_3 was in store")
assert(store.getSingle("rdd_0_3") === None, "rdd_0_3 was in store")
assert(store.getSingle("rdd_0_3") === None, "rdd_0_3 was in store")
}
test("in-memory LRU for partitions of multiple RDDs") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 1200)
store.putSingle("rdd_0_1", new Array[Byte](400), StorageLevel.MEMORY_ONLY)
store.putSingle("rdd_0_2", new Array[Byte](400), StorageLevel.MEMORY_ONLY)
store.putSingle("rdd_1_1", new Array[Byte](400), StorageLevel.MEMORY_ONLY)
// At this point rdd_1_1 should've replaced rdd_0_1
assert(store.memoryStore.contains("rdd_1_1"), "rdd_1_1 was not in store")
assert(!store.memoryStore.contains("rdd_0_1"), "rdd_0_1 was in store")
assert(store.memoryStore.contains("rdd_0_2"), "rdd_0_2 was not in store")
// Do a get() on rdd_0_2 so that it is the most recently used item
assert(store.getSingle("rdd_0_2") != None, "rdd_0_2 was not in store")
// Put in more partitions from RDD 0; they should replace rdd_1_1
store.putSingle("rdd_0_3", new Array[Byte](400), StorageLevel.MEMORY_ONLY)
store.putSingle("rdd_0_4", new Array[Byte](400), StorageLevel.MEMORY_ONLY)
// Now rdd_1_1 should be dropped to add rdd_0_3, but then rdd_0_2 should *not* be dropped
// when we try to add rdd_0_4.
assert(!store.memoryStore.contains("rdd_1_1"), "rdd_1_1 was in store")
assert(!store.memoryStore.contains("rdd_0_1"), "rdd_0_1 was in store")
assert(!store.memoryStore.contains("rdd_0_4"), "rdd_0_4 was in store")
assert(store.memoryStore.contains("rdd_0_2"), "rdd_0_2 was not in store")
assert(store.memoryStore.contains("rdd_0_3"), "rdd_0_3 was not in store")
}
test("on-disk storage") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 1200)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.DISK_ONLY)
store.putSingle("a2", a2, StorageLevel.DISK_ONLY)
store.putSingle("a3", a3, StorageLevel.DISK_ONLY)
assert(store.getSingle("a2") != None, "a2 was in store")
assert(store.getSingle("a3") != None, "a3 was in store")
assert(store.getSingle("a1") != None, "a1 was in store")
}
test("disk and memory storage") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 1200)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.MEMORY_AND_DISK)
store.putSingle("a2", a2, StorageLevel.MEMORY_AND_DISK)
store.putSingle("a3", a3, StorageLevel.MEMORY_AND_DISK)
assert(store.getSingle("a2") != None, "a2 was not in store")
assert(store.getSingle("a3") != None, "a3 was not in store")
assert(store.memoryStore.getValues("a1") == None, "a1 was in memory store")
assert(store.getSingle("a1") != None, "a1 was not in store")
assert(store.memoryStore.getValues("a1") != None, "a1 was not in memory store")
}
test("disk and memory storage with getLocalBytes") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 1200)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.MEMORY_AND_DISK)
store.putSingle("a2", a2, StorageLevel.MEMORY_AND_DISK)
store.putSingle("a3", a3, StorageLevel.MEMORY_AND_DISK)
assert(store.getLocalBytes("a2") != None, "a2 was not in store")
assert(store.getLocalBytes("a3") != None, "a3 was not in store")
assert(store.memoryStore.getValues("a1") == None, "a1 was in memory store")
assert(store.getLocalBytes("a1") != None, "a1 was not in store")
assert(store.memoryStore.getValues("a1") != None, "a1 was not in memory store")
}
test("disk and memory storage with serialization") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 1200)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.MEMORY_AND_DISK_SER)
store.putSingle("a2", a2, StorageLevel.MEMORY_AND_DISK_SER)
store.putSingle("a3", a3, StorageLevel.MEMORY_AND_DISK_SER)
assert(store.getSingle("a2") != None, "a2 was not in store")
assert(store.getSingle("a3") != None, "a3 was not in store")
assert(store.memoryStore.getValues("a1") == None, "a1 was in memory store")
assert(store.getSingle("a1") != None, "a1 was not in store")
assert(store.memoryStore.getValues("a1") != None, "a1 was not in memory store")
}
test("disk and memory storage with serialization and getLocalBytes") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 1200)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
store.putSingle("a1", a1, StorageLevel.MEMORY_AND_DISK_SER)
store.putSingle("a2", a2, StorageLevel.MEMORY_AND_DISK_SER)
store.putSingle("a3", a3, StorageLevel.MEMORY_AND_DISK_SER)
assert(store.getLocalBytes("a2") != None, "a2 was not in store")
assert(store.getLocalBytes("a3") != None, "a3 was not in store")
assert(store.memoryStore.getValues("a1") == None, "a1 was in memory store")
assert(store.getLocalBytes("a1") != None, "a1 was not in store")
assert(store.memoryStore.getValues("a1") != None, "a1 was not in memory store")
}
test("LRU with mixed storage levels") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 1200)
val a1 = new Array[Byte](400)
val a2 = new Array[Byte](400)
val a3 = new Array[Byte](400)
val a4 = new Array[Byte](400)
// First store a1 and a2, both in memory, and a3, on disk only
store.putSingle("a1", a1, StorageLevel.MEMORY_ONLY_SER)
store.putSingle("a2", a2, StorageLevel.MEMORY_ONLY_SER)
store.putSingle("a3", a3, StorageLevel.DISK_ONLY)
// At this point LRU should not kick in because a3 is only on disk
assert(store.getSingle("a1") != None, "a2 was not in store")
assert(store.getSingle("a2") != None, "a3 was not in store")
assert(store.getSingle("a3") != None, "a1 was not in store")
assert(store.getSingle("a1") != None, "a2 was not in store")
assert(store.getSingle("a2") != None, "a3 was not in store")
assert(store.getSingle("a3") != None, "a1 was not in store")
// Now let's add in a4, which uses both disk and memory; a1 should drop out
store.putSingle("a4", a4, StorageLevel.MEMORY_AND_DISK_SER)
assert(store.getSingle("a1") == None, "a1 was in store")
assert(store.getSingle("a2") != None, "a2 was not in store")
assert(store.getSingle("a3") != None, "a3 was not in store")
assert(store.getSingle("a4") != None, "a4 was not in store")
}
test("in-memory LRU with streams") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 1200)
val list1 = List(new Array[Byte](200), new Array[Byte](200))
val list2 = List(new Array[Byte](200), new Array[Byte](200))
val list3 = List(new Array[Byte](200), new Array[Byte](200))
store.put("list1", list1.iterator, StorageLevel.MEMORY_ONLY, true)
store.put("list2", list2.iterator, StorageLevel.MEMORY_ONLY, true)
store.put("list3", list3.iterator, StorageLevel.MEMORY_ONLY, true)
assert(store.get("list2") != None, "list2 was not in store")
assert(store.get("list2").get.size == 2)
assert(store.get("list3") != None, "list3 was not in store")
assert(store.get("list3").get.size == 2)
assert(store.get("list1") === None, "list1 was in store")
assert(store.get("list2") != None, "list2 was not in store")
assert(store.get("list2").get.size == 2)
// At this point list2 was gotten last, so LRU will getSingle rid of list3
store.put("list1", list1.iterator, StorageLevel.MEMORY_ONLY, true)
assert(store.get("list1") != None, "list1 was not in store")
assert(store.get("list1").get.size == 2)
assert(store.get("list2") != None, "list2 was not in store")
assert(store.get("list2").get.size == 2)
assert(store.get("list3") === None, "list1 was in store")
}
test("LRU with mixed storage levels and streams") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 1200)
val list1 = List(new Array[Byte](200), new Array[Byte](200))
val list2 = List(new Array[Byte](200), new Array[Byte](200))
val list3 = List(new Array[Byte](200), new Array[Byte](200))
val list4 = List(new Array[Byte](200), new Array[Byte](200))
// First store list1 and list2, both in memory, and list3, on disk only
store.put("list1", list1.iterator, StorageLevel.MEMORY_ONLY_SER, true)
store.put("list2", list2.iterator, StorageLevel.MEMORY_ONLY_SER, true)
store.put("list3", list3.iterator, StorageLevel.DISK_ONLY, true)
// At this point LRU should not kick in because list3 is only on disk
assert(store.get("list1") != None, "list2 was not in store")
assert(store.get("list1").get.size === 2)
assert(store.get("list2") != None, "list3 was not in store")
assert(store.get("list2").get.size === 2)
assert(store.get("list3") != None, "list1 was not in store")
assert(store.get("list3").get.size === 2)
assert(store.get("list1") != None, "list2 was not in store")
assert(store.get("list1").get.size === 2)
assert(store.get("list2") != None, "list3 was not in store")
assert(store.get("list2").get.size === 2)
assert(store.get("list3") != None, "list1 was not in store")
assert(store.get("list3").get.size === 2)
// Now let's add in list4, which uses both disk and memory; list1 should drop out
store.put("list4", list4.iterator, StorageLevel.MEMORY_AND_DISK_SER, true)
assert(store.get("list1") === None, "list1 was in store")
assert(store.get("list2") != None, "list3 was not in store")
assert(store.get("list2").get.size === 2)
assert(store.get("list3") != None, "list1 was not in store")
assert(store.get("list3").get.size === 2)
assert(store.get("list4") != None, "list4 was not in store")
assert(store.get("list4").get.size === 2)
}
test("negative byte values in ByteBufferInputStream") {
val buffer = ByteBuffer.wrap(Array[Int](254, 255, 0, 1, 2).map(_.toByte).toArray)
val stream = new ByteBufferInputStream(buffer)
val temp = new Array[Byte](10)
assert(stream.read() === 254, "unexpected byte read")
assert(stream.read() === 255, "unexpected byte read")
assert(stream.read() === 0, "unexpected byte read")
assert(stream.read(temp, 0, temp.length) === 2, "unexpected number of bytes read")
assert(stream.read() === -1, "end of stream not signalled")
assert(stream.read(temp, 0, temp.length) === -1, "end of stream not signalled")
}
test("overly large block") {
store = new BlockManager("<driver>", actorSystem, master, serializer, 500)
store.putSingle("a1", new Array[Byte](1000), StorageLevel.MEMORY_ONLY)
assert(store.getSingle("a1") === None, "a1 was in store")
store.putSingle("a2", new Array[Byte](1000), StorageLevel.MEMORY_AND_DISK)
assert(store.memoryStore.getValues("a2") === None, "a2 was in memory store")
assert(store.getSingle("a2") != None, "a2 was not in store")
}
test("block compression") {
try {
System.setProperty("spark.shuffle.compress", "true")
store = new BlockManager("exec1", actorSystem, master, serializer, 2000)
store.putSingle("shuffle_0_0_0", new Array[Byte](1000), StorageLevel.MEMORY_ONLY_SER)
assert(store.memoryStore.getSize("shuffle_0_0_0") <= 100, "shuffle_0_0_0 was not compressed")
store.stop()
store = null
System.setProperty("spark.shuffle.compress", "false")
store = new BlockManager("exec2", actorSystem, master, serializer, 2000)
store.putSingle("shuffle_0_0_0", new Array[Byte](1000), StorageLevel.MEMORY_ONLY_SER)
assert(store.memoryStore.getSize("shuffle_0_0_0") >= 1000, "shuffle_0_0_0 was compressed")
store.stop()
store = null
System.setProperty("spark.broadcast.compress", "true")
store = new BlockManager("exec3", actorSystem, master, serializer, 2000)
store.putSingle("broadcast_0", new Array[Byte](1000), StorageLevel.MEMORY_ONLY_SER)
assert(store.memoryStore.getSize("broadcast_0") <= 100, "broadcast_0 was not compressed")
store.stop()
store = null
System.setProperty("spark.broadcast.compress", "false")
store = new BlockManager("exec4", actorSystem, master, serializer, 2000)
store.putSingle("broadcast_0", new Array[Byte](1000), StorageLevel.MEMORY_ONLY_SER)
assert(store.memoryStore.getSize("broadcast_0") >= 1000, "broadcast_0 was compressed")
store.stop()
store = null
System.setProperty("spark.rdd.compress", "true")
store = new BlockManager("exec5", actorSystem, master, serializer, 2000)
store.putSingle("rdd_0_0", new Array[Byte](1000), StorageLevel.MEMORY_ONLY_SER)
assert(store.memoryStore.getSize("rdd_0_0") <= 100, "rdd_0_0 was not compressed")
store.stop()
store = null
System.setProperty("spark.rdd.compress", "false")
store = new BlockManager("exec6", actorSystem, master, serializer, 2000)
store.putSingle("rdd_0_0", new Array[Byte](1000), StorageLevel.MEMORY_ONLY_SER)
assert(store.memoryStore.getSize("rdd_0_0") >= 1000, "rdd_0_0 was compressed")
store.stop()
store = null
// Check that any other block types are also kept uncompressed
store = new BlockManager("exec7", actorSystem, master, serializer, 2000)
store.putSingle("other_block", new Array[Byte](1000), StorageLevel.MEMORY_ONLY)
assert(store.memoryStore.getSize("other_block") >= 1000, "other_block was compressed")
store.stop()
store = null
} finally {
System.clearProperty("spark.shuffle.compress")
System.clearProperty("spark.broadcast.compress")
System.clearProperty("spark.rdd.compress")
}
}
test("block store put failure") {
// Use Java serializer so we can create an unserializable error.
store = new BlockManager("<driver>", actorSystem, master, new JavaSerializer, 1200)
// The put should fail since a1 is not serializable.
class UnserializableClass
val a1 = new UnserializableClass
intercept[java.io.NotSerializableException] {
store.putSingle("a1", a1, StorageLevel.DISK_ONLY)
}
// Make sure get a1 doesn't hang and returns None.
failAfter(1 second) {
assert(store.getSingle("a1") == None, "a1 should not be in store")
}
}
}
| koeninger/spark | core/src/test/scala/spark/storage/BlockManagerSuite.scala | Scala | bsd-3-clause | 28,670 |
/*
* Copyright (c) 2011-15 Miles Sabin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package shapeless.ops
import shapeless._
import labelled._
object maps {
/**
* Type class supporting type safe conversion of Map to Records.
*/
trait FromMap[R <: HList] extends Serializable {
def apply[K, V](m: Map[K, V]): Option[R]
}
/**
* `FromMap` type class instances.
*/
object FromMap {
def apply[R <: HList](implicit fm: FromMap[R]) = fm
implicit def hnilFromMap[T]: FromMap[HNil] =
new FromMap[HNil] {
def apply[K, V](m: Map[K, V]): Option[HNil] = Some(HNil)
}
implicit def hlistFromMap[K0, V0, T <: HList]
(implicit wk: Witness.Aux[K0], tv: Typeable[V0], fmt: FromMap[T]): FromMap[FieldType[K0, V0] :: T] =
new FromMap[FieldType[K0, V0] :: T] {
def apply[K, V](m: Map[K, V]): Option[FieldType[K0, V0] :: T] = {
for {
value <- m.get(wk.value.asInstanceOf[K])
typed <- tv.cast(value)
rest <- fmt(m)
} yield field[K0](typed) :: rest
}
}
}
}
| liff/shapeless | core/src/main/scala/shapeless/ops/maps.scala | Scala | apache-2.0 | 1,625 |
/*
* Copyright 2009-2010 LinkedIn, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*/
package com.linkedin.norbert
package network
import cluster.Node
/**
* A component which provides a network server.
*/
trait NetworkServerComponent {
val networkServer: NetworkServer
/**
* A <code>NetworkServer</code> listens for incoming messages and processes them using the handler
* registered for that message type with the <code>MessageRegistry</code>.
*/
trait NetworkServer {
/**
* Binds the network server to a port and marks the node associated with this server available.
*/
def bind: Unit
/**
* Binds the network server to a port and, if markAvailable is true, marks the node associated with this
* server available.
*
* @param markAvailable specified whether or not to mark the node associated with this server available after
* binding
*/
def bind(markAvailable: Boolean): Unit
/**
* Retrieves the node associated with this server.
*/
def currentNode: Node
/**
* Marks the node associated with this server available. If you call <code>bind(false)</code> you must, eventually,
* call this method before the cluster will start sending this server requests.
*/
def markAvailable: Unit
/**
* Shuts down the <code>NetworkServer</code>. The server will disconnect from the cluster, unbind from the port,
* wait for all currently processing requests to finish, close the sockets that have been accepted and any opened
* client sockets.
*/
def shutdown: Unit
}
}
| thesiddharth/norbert | network/src/main/scala/com/linkedin/norbert/network/NetworkServerComponent.scala | Scala | apache-2.0 | 2,121 |
package fr.univ.nantes.roomanager
import fr.univ.nantes.roomanager.Origine.Origine
import fr.univ.nantes.roomanager.Titre.Titre
class Demandeur(var no_dem: Int,
var nom: String,
var adresse: Adresse,
var origine: Origine,
var titre: Titre) {
var reservations: Set[Reservation] = Set()
}
| P1erreGaultier/workspace | Roomanager/src/scala/fr/univ/nantes/roomanager/Demandeur.scala | Scala | unlicense | 356 |
/*
* Copyright (c) 2013
*/
package controllers
import play.api.data.Form
import play.api.libs.json._
import play.api.libs.functional.syntax._
abstract class JsResponse(status: String, message: String, data: JsValue = JsString("")) {
def getStatus = status
def getMessage = message
def getData = data
}
case class JsResponseOk(message: String, data: JsValue = JsString(""))
extends JsResponse(JsonResponseType.ok.toString, message, data)
case class JsResponseError[T](message: String, errors: Option[Form[T]] = None)
extends JsResponse(errors.map(_.forField("errorType")(_.value.getOrElse(
JsonResponseType.error.toString))).getOrElse(JsonResponseType.error.toString),
message, errors.map(_.errorsAsJson).getOrElse(JsString(""))) {
def this() = this("Unknown error.")
}
object JsonResponseType extends Enumeration {
val ok = Value
val error = Value
}
| kompot/play2sec | test/controllers/JsResponse.scala | Scala | apache-2.0 | 890 |
package com.avsystem.commons
package redis.util
import java.io.{DataInputStream, DataOutputStream}
import akka.util._
import com.avsystem.commons.serialization.{GenCodec, StreamInput, StreamOutput}
/**
* Author: ghik
* Created: 27/09/16.
*/
object ByteStringSerialization {
def write[T: GenCodec](value: T): ByteString = {
val builder = new ByteStringBuilder
GenCodec.write(new StreamOutput(new DataOutputStream(builder.asOutputStream)), value)
builder.result()
}
def read[T: GenCodec](bytes: ByteString): T =
GenCodec.read[T](new StreamInput(new DataInputStream(bytes.iterator.asInputStream)))
}
| AVSystem/scala-commons | commons-redis/src/main/scala/com/avsystem/commons/redis/util/ByteStringSerialization.scala | Scala | mit | 629 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.religions
import io.truthencode.ddo.model.item.weapon.{FavoredWeapon, WeaponCategory}
import io.truthencode.ddo.model.worlds.Eberron
/**
* Created by adarr on 4/9/2017.
*/
trait Amaunator extends Eberron with FavoredWeapon {
override val favoredWeapon: WeaponCategory = WeaponCategory.HeavyMace
}
| adarro/ddo-calc | subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/religions/Amaunator.scala | Scala | apache-2.0 | 1,008 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.models
import com.intel.analytics.bigdl.dllib.nn.{ClassNLLCriterion, GradientChecker}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.utils.RandomGenerator._
import org.scalatest.{BeforeAndAfter, FlatSpec, Matchers}
import scala.util.Random
@com.intel.analytics.bigdl.tags.Parallel
class ModelGraientCheckSpec extends FlatSpec with BeforeAndAfter with Matchers {
private val checkModel = true
"GoogleNet_v1 model in batch mode" should "be good in gradient check for input" in {
val seed = 100
RNG.setSeed(seed)
val start = System.nanoTime()
val input = Tensor[Double](4, 3, 224, 224).apply1(e => Random.nextDouble())
val model = GoogleNet_v1_test(1000)
model.zeroGradParameters()
val checker = new GradientChecker(1e-4).setType(checkModel)
checker.checkLayer(model, input, 1e-2) should be(true)
val scalaTime = System.nanoTime() - start
println("Test Scala time : " + scalaTime / 1e9 + " s")
}
"GoogleNet_v1 model in batch mode" should "be good in gradient check for weight" in {
val seed = 100
RNG.setSeed(seed)
val start = System.nanoTime()
val input = Tensor[Double](4, 3, 224, 224).apply1(e => Random.nextDouble())
val model = GoogleNet_v1_test(1000)
model.zeroGradParameters()
val checker = new GradientChecker(1e-4).setType(checkModel)
checker.checkWeight(model, input, 1e-2) should be(true)
val scalaTime = System.nanoTime() - start
println("Test Scala time : " + scalaTime / 1e9 + " s")
}
"GoogleNet_v1 model" should "init right" in {
val seed = 100
RNG.setSeed(seed)
Random.setSeed(seed)
val input = Tensor[Double](4, 3, 224, 224).apply1(e => Random.nextDouble())
val labels = Tensor[Double](4).apply1(e => Random.nextInt(1000))
val criterion = new ClassNLLCriterion[Double]()
val model = GoogleNet_v1_test(1000)
val output = model.forward(input)
val loss = criterion.forward(output, labels)
loss should be (6.905944392665487)
}
"GoogleNet_v2 model in batch mode" should "be good in gradient check for input" in {
val seed = 100
RNG.setSeed(seed)
val start = System.nanoTime()
val input = Tensor[Double](4, 3, 224, 224).apply1(e => Random.nextDouble())
val model = GoogleNet_v2_test(1000)
model.zeroGradParameters()
val checker = new GradientChecker(1e-4).setType(checkModel)
checker.checkLayer(model, input, 1e-2) should be(true)
val scalaTime = System.nanoTime() - start
println("Test Scala time : " + scalaTime / 1e9 + " s")
}
"GoogleNet_v2 model in batch mode" should "be good in gradient check for weight" in {
val seed = 100
RNG.setSeed(seed)
val start = System.nanoTime()
val input = Tensor[Double](4, 3, 224, 224).apply1(e => Random.nextDouble())
val model = GoogleNet_v2_test.applyNoBn(1000)
model.zeroGradParameters()
val checker = new GradientChecker(1e-4).setType(checkModel)
checker.checkWeight(model, input, 1e-2) should be(true)
val scalaTime = System.nanoTime() - start
println("Test Scala time : " + scalaTime / 1e9 + " s")
}
"VggLike model in batch mode" should "be good in gradient check for input" in {
val seed = 100
RNG.setSeed(seed)
val start = System.nanoTime()
val input = Tensor[Double](8, 3, 32, 32).apply1(e => Random.nextDouble())
val model = VggLike_test(10)
model.zeroGradParameters()
val checker = new GradientChecker(1e-4).setType(checkModel)
checker.checkLayer[Double](model, input, 1e-2) should be(true)
val scalaTime = System.nanoTime() - start
println("Test Scala time : " + scalaTime / 1e9 + " s")
}
"VggLike model in batch mode" should "be good in gradient check for weight" in {
val seed = 100
RNG.setSeed(seed)
val start = System.nanoTime()
val input = Tensor[Double](8, 3, 32, 32).apply1(e => Random.nextDouble())
val model = VggLike_test(10)
model.zeroGradParameters()
val checker = new GradientChecker(1e-4).setType(checkModel)
checker.checkWeight[Double](model, input, 1e-2) should be(true)
val scalaTime = System.nanoTime() - start
println("Test Scala time : " + scalaTime / 1e9 + " s")
}
"LeNet model in batch mode" should "be good in gradient check for input" in {
val seed = 100
RNG.setSeed(seed)
val start = System.nanoTime()
val input = Tensor[Double](8, 1, 28, 28).apply1(e => Random.nextDouble())
val model = LeNet5_test(10)
model.zeroGradParameters()
val checker = new GradientChecker(1e-4).setType(checkModel)
checker.checkLayer[Double](model, input, 1e-2) should be(true)
val scalaTime = System.nanoTime() - start
println("Test Scala time : " + scalaTime / 1e9 + " s")
}
"LeNet model in batch mode" should "be good in gradient check for weight" in {
val seed = 100
RNG.setSeed(seed)
val start = System.nanoTime()
val input = Tensor[Double](8, 1, 28, 28).apply1(e => Random.nextDouble())
val model = LeNet5_test(10)
model.zeroGradParameters()
val checker = new GradientChecker(1e-4).setType(checkModel)
checker.checkWeight(model, input, 1e-2) should be(true)
val scalaTime = System.nanoTime() - start
println("Test Scala time : " + scalaTime / 1e9 + " s")
}
"CNN model in batch mode" should "be good in gradient check for input" in {
val seed = 100
RNG.setSeed(seed)
val start = System.nanoTime()
val input = Tensor[Double](8, 1, 28, 28).apply1(e => Random.nextDouble())
val model = SimpleCNN_test(10)
model.zeroGradParameters()
val checker = new GradientChecker(1e-4).setType(checkModel)
checker.checkLayer[Double](model, input, 1e-2) should be(true)
val scalaTime = System.nanoTime() - start
println("Test Scala time : " + scalaTime / 1e9 + " s")
}
"CNN model in batch mode" should "be good in gradient check for weight" in {
val seed = 100
RNG.setSeed(seed)
val start = System.nanoTime()
val input = Tensor[Double](8, 1, 28, 28).apply1(e => Random.nextDouble())
val model = SimpleCNN_test(10)
model.zeroGradParameters()
val checker = new GradientChecker(1e-4).setType(checkModel)
checker.checkWeight(model, input, 1e-2) should be(true)
val scalaTime = System.nanoTime() - start
println("Test Scala time : " + scalaTime / 1e9 + " s")
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/models/ModelGraientCheckSpec.scala | Scala | apache-2.0 | 7,003 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.clustering
import breeze.linalg.{DenseVector => BDV}
import org.apache.hadoop.fs.Path
import org.apache.spark.annotation.{Experimental, Since}
import org.apache.spark.ml.{Estimator, Model}
import org.apache.spark.ml.impl.Utils.EPSILON
import org.apache.spark.ml.linalg._
import org.apache.spark.ml.param._
import org.apache.spark.ml.param.shared._
import org.apache.spark.ml.stat.distribution.MultivariateGaussian
import org.apache.spark.ml.util._
import org.apache.spark.mllib.clustering.{GaussianMixture => MLlibGM}
import org.apache.spark.mllib.linalg.{Matrices => OldMatrices, Matrix => OldMatrix,
Vector => OldVector, Vectors => OldVectors}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, Dataset, Row, SparkSession}
import org.apache.spark.sql.functions.{col, udf}
import org.apache.spark.sql.types.{IntegerType, StructType}
/**
* Common params for GaussianMixture and GaussianMixtureModel
*/
private[clustering] trait GaussianMixtureParams extends Params with HasMaxIter with HasFeaturesCol
with HasSeed with HasPredictionCol with HasProbabilityCol with HasTol {
/**
* Number of independent Gaussians in the mixture model. Must be > 1. Default: 2.
* @group param
*/
@Since("2.0.0")
final val k = new IntParam(this, "k", "Number of independent Gaussians in the mixture model. " +
"Must be > 1.", ParamValidators.gt(1))
/** @group getParam */
@Since("2.0.0")
def getK: Int = $(k)
/**
* Validates and transforms the input schema.
* @param schema input schema
* @return output schema
*/
protected def validateAndTransformSchema(schema: StructType): StructType = {
SchemaUtils.checkColumnType(schema, $(featuresCol), new VectorUDT)
SchemaUtils.appendColumn(schema, $(predictionCol), IntegerType)
SchemaUtils.appendColumn(schema, $(probabilityCol), new VectorUDT)
}
}
/**
* :: Experimental ::
*
* Multivariate Gaussian Mixture Model (GMM) consisting of k Gaussians, where points
* are drawn from each Gaussian i with probability weights(i).
*
* @param weights Weight for each Gaussian distribution in the mixture.
* This is a multinomial probability distribution over the k Gaussians,
* where weights(i) is the weight for Gaussian i, and weights sum to 1.
* @param gaussians Array of [[MultivariateGaussian]] where gaussians(i) represents
* the Multivariate Gaussian (Normal) Distribution for Gaussian i
*/
@Since("2.0.0")
@Experimental
class GaussianMixtureModel private[ml] (
@Since("2.0.0") override val uid: String,
@Since("2.0.0") val weights: Array[Double],
@Since("2.0.0") val gaussians: Array[MultivariateGaussian])
extends Model[GaussianMixtureModel] with GaussianMixtureParams with MLWritable {
@Since("2.0.0")
override def copy(extra: ParamMap): GaussianMixtureModel = {
val copied = new GaussianMixtureModel(uid, weights, gaussians)
copyValues(copied, extra).setParent(this.parent)
}
@Since("2.0.0")
override def transform(dataset: Dataset[_]): DataFrame = {
transformSchema(dataset.schema, logging = true)
val predUDF = udf((vector: Vector) => predict(vector))
val probUDF = udf((vector: Vector) => predictProbability(vector))
dataset.withColumn($(predictionCol), predUDF(col($(featuresCol))))
.withColumn($(probabilityCol), probUDF(col($(featuresCol))))
}
@Since("2.0.0")
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
private[clustering] def predict(features: Vector): Int = {
val r = predictProbability(features)
r.argmax
}
private[clustering] def predictProbability(features: Vector): Vector = {
val probs: Array[Double] =
GaussianMixtureModel.computeProbabilities(features.asBreeze.toDenseVector, gaussians, weights)
Vectors.dense(probs)
}
/**
* Retrieve Gaussian distributions as a DataFrame.
* Each row represents a Gaussian Distribution.
* Two columns are defined: mean and cov.
* Schema:
* {{{
* root
* |-- mean: vector (nullable = true)
* |-- cov: matrix (nullable = true)
* }}}
*/
@Since("2.0.0")
def gaussiansDF: DataFrame = {
val modelGaussians = gaussians.map { gaussian =>
(OldVectors.fromML(gaussian.mean), OldMatrices.fromML(gaussian.cov))
}
SparkSession.builder().getOrCreate().createDataFrame(modelGaussians).toDF("mean", "cov")
}
/**
* Returns a [[org.apache.spark.ml.util.MLWriter]] instance for this ML instance.
*
* For [[GaussianMixtureModel]], this does NOT currently save the training [[summary]].
* An option to save [[summary]] may be added in the future.
*
*/
@Since("2.0.0")
override def write: MLWriter = new GaussianMixtureModel.GaussianMixtureModelWriter(this)
private var trainingSummary: Option[GaussianMixtureSummary] = None
private[clustering] def setSummary(summary: GaussianMixtureSummary): this.type = {
this.trainingSummary = Some(summary)
this
}
/**
* Return true if there exists summary of model.
*/
@Since("2.0.0")
def hasSummary: Boolean = trainingSummary.nonEmpty
/**
* Gets summary of model on training set. An exception is
* thrown if `trainingSummary == None`.
*/
@Since("2.0.0")
def summary: GaussianMixtureSummary = trainingSummary.getOrElse {
throw new RuntimeException(
s"No training summary available for the ${this.getClass.getSimpleName}")
}
}
@Since("2.0.0")
object GaussianMixtureModel extends MLReadable[GaussianMixtureModel] {
@Since("2.0.0")
override def read: MLReader[GaussianMixtureModel] = new GaussianMixtureModelReader
@Since("2.0.0")
override def load(path: String): GaussianMixtureModel = super.load(path)
/** [[MLWriter]] instance for [[GaussianMixtureModel]] */
private[GaussianMixtureModel] class GaussianMixtureModelWriter(
instance: GaussianMixtureModel) extends MLWriter {
private case class Data(weights: Array[Double], mus: Array[OldVector], sigmas: Array[OldMatrix])
override protected def saveImpl(path: String): Unit = {
// Save metadata and Params
DefaultParamsWriter.saveMetadata(instance, path, sc)
// Save model data: weights and gaussians
val weights = instance.weights
val gaussians = instance.gaussians
val mus = gaussians.map(g => OldVectors.fromML(g.mean))
val sigmas = gaussians.map(c => OldMatrices.fromML(c.cov))
val data = Data(weights, mus, sigmas)
val dataPath = new Path(path, "data").toString
sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class GaussianMixtureModelReader extends MLReader[GaussianMixtureModel] {
/** Checked against metadata when loading model */
private val className = classOf[GaussianMixtureModel].getName
override def load(path: String): GaussianMixtureModel = {
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val dataPath = new Path(path, "data").toString
val row = sparkSession.read.parquet(dataPath).select("weights", "mus", "sigmas").head()
val weights = row.getSeq[Double](0).toArray
val mus = row.getSeq[OldVector](1).toArray
val sigmas = row.getSeq[OldMatrix](2).toArray
require(mus.length == sigmas.length, "Length of Mu and Sigma array must match")
require(mus.length == weights.length, "Length of weight and Gaussian array must match")
val gaussians = mus.zip(sigmas).map {
case (mu, sigma) =>
new MultivariateGaussian(mu.asML, sigma.asML)
}
val model = new GaussianMixtureModel(metadata.uid, weights, gaussians)
DefaultParamsReader.getAndSetParams(model, metadata)
model
}
}
/**
* Compute the probability (partial assignment) for each cluster for the given data point.
* @param features Data point
* @param dists Gaussians for model
* @param weights Weights for each Gaussian
* @return Probability (partial assignment) for each of the k clusters
*/
private[clustering]
def computeProbabilities(
features: BDV[Double],
dists: Array[MultivariateGaussian],
weights: Array[Double]): Array[Double] = {
val p = weights.zip(dists).map {
case (weight, dist) => EPSILON + weight * dist.pdf(features)
}
val pSum = p.sum
var i = 0
while (i < weights.length) {
p(i) /= pSum
i += 1
}
p
}
}
/**
* :: Experimental ::
* Gaussian Mixture clustering.
*
* This class performs expectation maximization for multivariate Gaussian
* Mixture Models (GMMs). A GMM represents a composite distribution of
* independent Gaussian distributions with associated "mixing" weights
* specifying each's contribution to the composite.
*
* Given a set of sample points, this class will maximize the log-likelihood
* for a mixture of k Gaussians, iterating until the log-likelihood changes by
* less than convergenceTol, or until it has reached the max number of iterations.
* While this process is generally guaranteed to converge, it is not guaranteed
* to find a global optimum.
*
* Note: For high-dimensional data (with many features), this algorithm may perform poorly.
* This is due to high-dimensional data (a) making it difficult to cluster at all (based
* on statistical/theoretical arguments) and (b) numerical issues with Gaussian distributions.
*/
@Since("2.0.0")
@Experimental
class GaussianMixture @Since("2.0.0") (
@Since("2.0.0") override val uid: String)
extends Estimator[GaussianMixtureModel] with GaussianMixtureParams with DefaultParamsWritable {
setDefault(
k -> 2,
maxIter -> 100,
tol -> 0.01)
@Since("2.0.0")
override def copy(extra: ParamMap): GaussianMixture = defaultCopy(extra)
@Since("2.0.0")
def this() = this(Identifiable.randomUID("GaussianMixture"))
/** @group setParam */
@Since("2.0.0")
def setFeaturesCol(value: String): this.type = set(featuresCol, value)
/** @group setParam */
@Since("2.0.0")
def setPredictionCol(value: String): this.type = set(predictionCol, value)
/** @group setParam */
@Since("2.0.0")
def setProbabilityCol(value: String): this.type = set(probabilityCol, value)
/** @group setParam */
@Since("2.0.0")
def setK(value: Int): this.type = set(k, value)
/** @group setParam */
@Since("2.0.0")
def setMaxIter(value: Int): this.type = set(maxIter, value)
/** @group setParam */
@Since("2.0.0")
def setTol(value: Double): this.type = set(tol, value)
/** @group setParam */
@Since("2.0.0")
def setSeed(value: Long): this.type = set(seed, value)
@Since("2.0.0")
override def fit(dataset: Dataset[_]): GaussianMixtureModel = {
transformSchema(dataset.schema, logging = true)
val rdd: RDD[OldVector] = dataset.select(col($(featuresCol))).rdd.map {
case Row(point: Vector) => OldVectors.fromML(point)
}
val algo = new MLlibGM()
.setK($(k))
.setMaxIterations($(maxIter))
.setSeed($(seed))
.setConvergenceTol($(tol))
val parentModel = algo.run(rdd)
val gaussians = parentModel.gaussians.map { case g =>
new MultivariateGaussian(g.mu.asML, g.sigma.asML)
}
val model = copyValues(new GaussianMixtureModel(uid, parentModel.weights, gaussians))
.setParent(this)
val summary = new GaussianMixtureSummary(model.transform(dataset),
$(predictionCol), $(probabilityCol), $(featuresCol), $(k))
model.setSummary(summary)
}
@Since("2.0.0")
override def transformSchema(schema: StructType): StructType = {
validateAndTransformSchema(schema)
}
}
@Since("2.0.0")
object GaussianMixture extends DefaultParamsReadable[GaussianMixture] {
@Since("2.0.0")
override def load(path: String): GaussianMixture = super.load(path)
}
/**
* :: Experimental ::
* Summary of GaussianMixture.
*
* @param predictions [[DataFrame]] produced by [[GaussianMixtureModel.transform()]]
* @param predictionCol Name for column of predicted clusters in `predictions`
* @param probabilityCol Name for column of predicted probability of each cluster in `predictions`
* @param featuresCol Name for column of features in `predictions`
* @param k Number of clusters
*/
@Since("2.0.0")
@Experimental
class GaussianMixtureSummary private[clustering] (
@Since("2.0.0") @transient val predictions: DataFrame,
@Since("2.0.0") val predictionCol: String,
@Since("2.0.0") val probabilityCol: String,
@Since("2.0.0") val featuresCol: String,
@Since("2.0.0") val k: Int) extends Serializable {
/**
* Cluster centers of the transformed data.
*/
@Since("2.0.0")
@transient lazy val cluster: DataFrame = predictions.select(predictionCol)
/**
* Probability of each cluster.
*/
@Since("2.0.0")
@transient lazy val probability: DataFrame = predictions.select(probabilityCol)
/**
* Size of (number of data points in) each cluster.
*/
@Since("2.0.0")
lazy val clusterSizes: Array[Long] = {
val sizes = Array.fill[Long](k)(0)
cluster.groupBy(predictionCol).count().select(predictionCol, "count").collect().foreach {
case Row(cluster: Int, count: Long) => sizes(cluster) = count
}
sizes
}
}
| gioenn/xSpark | mllib/src/main/scala/org/apache/spark/ml/clustering/GaussianMixture.scala | Scala | apache-2.0 | 14,061 |
package io.youi.material.impl
import scala.scalajs.js
@js.native
trait MDCTopAppBarImplementation extends js.Object {
}
| outr/youi | gui/src/main/scala/io/youi/material/impl/MDCTopAppBarImplementation.scala | Scala | mit | 122 |
package pl.combosolutions
object TestTag {
val IntegrationTest = "integration"
val FunctionalTest = "functional"
val UnitTest = "unit"
val DisabledTest = "disabled"
}
| sriramkp/test01 | modules/common/src/test/scala/pl/combosolutions/TestTag.scala | Scala | mit | 176 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.internal
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.internal.SQLConf.SQLConfigBuilder
import org.apache.carbondata.core.constants.{CarbonCommonConstants, CarbonLoadOptionConstants}
import org.apache.carbondata.core.util.CarbonProperties
/**
* To initialize dynamic values default param
*/
class CarbonSQLConf(sparkSession: SparkSession) {
val carbonProperties = CarbonProperties.getInstance()
/**
* To initialize dynamic param defaults along with usage docs
*/
def addDefaultCarbonParams(): Unit = {
val ENABLE_UNSAFE_SORT =
SQLConfigBuilder(CarbonCommonConstants.ENABLE_UNSAFE_SORT)
.doc("To enable/ disable unsafe sort.")
.booleanConf
.createWithDefault(carbonProperties.getProperty(CarbonCommonConstants.ENABLE_UNSAFE_SORT,
CarbonCommonConstants.ENABLE_UNSAFE_SORT_DEFAULT).toBoolean)
val CARBON_CUSTOM_BLOCK_DISTRIBUTION =
SQLConfigBuilder(CarbonCommonConstants.CARBON_CUSTOM_BLOCK_DISTRIBUTION)
.doc("To enable/ disable carbon custom block distribution.")
.booleanConf
.createWithDefault(carbonProperties
.getProperty(CarbonCommonConstants.CARBON_CUSTOM_BLOCK_DISTRIBUTION,
CarbonCommonConstants.CARBON_CUSTOM_BLOCK_DISTRIBUTION_DEFAULT).toBoolean)
val BAD_RECORDS_LOGGER_ENABLE =
SQLConfigBuilder(CarbonLoadOptionConstants.CARBON_OPTIONS_BAD_RECORDS_LOGGER_ENABLE)
.doc("To enable/ disable carbon bad record logger.")
.booleanConf
.createWithDefault(CarbonLoadOptionConstants
.CARBON_OPTIONS_BAD_RECORDS_LOGGER_ENABLE_DEFAULT.toBoolean)
val BAD_RECORDS_ACTION =
SQLConfigBuilder(CarbonLoadOptionConstants.CARBON_OPTIONS_BAD_RECORDS_ACTION)
.doc("To configure the bad records action.")
.stringConf
.createWithDefault(carbonProperties
.getProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION,
CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION_DEFAULT))
val IS_EMPTY_DATA_BAD_RECORD =
SQLConfigBuilder(CarbonLoadOptionConstants.CARBON_OPTIONS_IS_EMPTY_DATA_BAD_RECORD)
.doc("Property to decide weather empty data to be considered bad/ good record.")
.booleanConf
.createWithDefault(CarbonLoadOptionConstants.CARBON_OPTIONS_IS_EMPTY_DATA_BAD_RECORD_DEFAULT
.toBoolean)
val SORT_SCOPE =
SQLConfigBuilder(CarbonLoadOptionConstants.CARBON_OPTIONS_SORT_SCOPE)
.doc("Property to specify sort scope.")
.stringConf
.createWithDefault(carbonProperties.getProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT))
val BATCH_SORT_SIZE_INMB =
SQLConfigBuilder(CarbonLoadOptionConstants.CARBON_OPTIONS_BATCH_SORT_SIZE_INMB)
.doc("Property to specify batch sort size in MB.")
.stringConf
.createWithDefault(carbonProperties
.getProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB,
CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB_DEFAULT))
val SINGLE_PASS =
SQLConfigBuilder(CarbonLoadOptionConstants.CARBON_OPTIONS_SINGLE_PASS)
.doc("Property to enable/disable single_pass.")
.booleanConf
.createWithDefault(CarbonLoadOptionConstants.CARBON_OPTIONS_SINGLE_PASS_DEFAULT.toBoolean)
val BAD_RECORD_PATH =
SQLConfigBuilder(CarbonLoadOptionConstants.CARBON_OPTIONS_BAD_RECORD_PATH)
.doc("Property to configure the bad record location.")
.stringConf
.createWithDefault(carbonProperties.getProperty(CarbonCommonConstants.CARBON_BADRECORDS_LOC,
CarbonCommonConstants.CARBON_BADRECORDS_LOC_DEFAULT_VAL))
val GLOBAL_SORT_PARTITIONS =
SQLConfigBuilder(CarbonLoadOptionConstants.CARBON_OPTIONS_GLOBAL_SORT_PARTITIONS)
.doc("Property to configure the global sort partitions.")
.stringConf
.createWithDefault(carbonProperties
.getProperty(CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS,
CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS_DEFAULT))
val DATEFORMAT =
SQLConfigBuilder(CarbonLoadOptionConstants.CARBON_OPTIONS_DATEFORMAT)
.doc("Property to configure data format for date type columns.")
.stringConf
.createWithDefault(CarbonLoadOptionConstants.CARBON_OPTIONS_DATEFORMAT_DEFAULT)
}
/**
* to set the dynamic properties default values
*/
def addDefaultCarbonSessionParams(): Unit = {
sparkSession.conf.set(CarbonCommonConstants.ENABLE_UNSAFE_SORT,
carbonProperties.getProperty(CarbonCommonConstants.ENABLE_UNSAFE_SORT,
CarbonCommonConstants.ENABLE_UNSAFE_SORT_DEFAULT).toBoolean)
sparkSession.conf.set(CarbonCommonConstants.CARBON_CUSTOM_BLOCK_DISTRIBUTION,
carbonProperties
.getProperty(CarbonCommonConstants.CARBON_CUSTOM_BLOCK_DISTRIBUTION,
CarbonCommonConstants.CARBON_CUSTOM_BLOCK_DISTRIBUTION_DEFAULT).toBoolean)
sparkSession.conf.set(CarbonLoadOptionConstants.CARBON_OPTIONS_BAD_RECORDS_LOGGER_ENABLE,
CarbonLoadOptionConstants.CARBON_OPTIONS_BAD_RECORDS_LOGGER_ENABLE_DEFAULT.toBoolean)
sparkSession.conf.set(CarbonLoadOptionConstants.CARBON_OPTIONS_BAD_RECORDS_ACTION,
carbonProperties.getProperty(CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION,
CarbonCommonConstants.CARBON_BAD_RECORDS_ACTION_DEFAULT))
sparkSession.conf.set(CarbonLoadOptionConstants.CARBON_OPTIONS_IS_EMPTY_DATA_BAD_RECORD,
CarbonLoadOptionConstants.CARBON_OPTIONS_IS_EMPTY_DATA_BAD_RECORD_DEFAULT.toBoolean)
sparkSession.conf.set(CarbonLoadOptionConstants.CARBON_OPTIONS_SORT_SCOPE,
carbonProperties.getProperty(CarbonCommonConstants.LOAD_SORT_SCOPE,
CarbonCommonConstants.LOAD_SORT_SCOPE_DEFAULT))
sparkSession.conf.set(CarbonLoadOptionConstants.CARBON_OPTIONS_BATCH_SORT_SIZE_INMB,
carbonProperties.getProperty(CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB,
CarbonCommonConstants.LOAD_BATCH_SORT_SIZE_INMB_DEFAULT))
sparkSession.conf.set(CarbonLoadOptionConstants.CARBON_OPTIONS_SINGLE_PASS,
CarbonLoadOptionConstants.CARBON_OPTIONS_SINGLE_PASS_DEFAULT.toBoolean)
sparkSession.conf.set(CarbonLoadOptionConstants.CARBON_OPTIONS_BAD_RECORD_PATH,
carbonProperties.getProperty(CarbonCommonConstants.CARBON_BADRECORDS_LOC,
CarbonCommonConstants.CARBON_BADRECORDS_LOC_DEFAULT_VAL))
sparkSession.conf.set(CarbonLoadOptionConstants.CARBON_OPTIONS_BAD_RECORD_PATH,
carbonProperties.getProperty(CarbonCommonConstants.CARBON_BADRECORDS_LOC,
CarbonCommonConstants.CARBON_BADRECORDS_LOC_DEFAULT_VAL))
sparkSession.conf.set(CarbonLoadOptionConstants.CARBON_OPTIONS_GLOBAL_SORT_PARTITIONS,
carbonProperties.getProperty(CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS,
CarbonCommonConstants.LOAD_GLOBAL_SORT_PARTITIONS_DEFAULT))
sparkSession.conf.set(CarbonLoadOptionConstants.CARBON_OPTIONS_DATEFORMAT,
CarbonLoadOptionConstants.CARBON_OPTIONS_DATEFORMAT_DEFAULT)
}
}
| aniketadnaik/carbondataStreamIngest | integration/spark2/src/main/scala/org/apache/spark/sql/internal/CarbonSqlConf.scala | Scala | apache-2.0 | 7,867 |
/*
* Copyright 2017 Zhang Di
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.dizhang.seqspark.ds
import breeze.linalg.{max, min}
/**
* Region on chromosome
* position is 0-based and the interval is half open half closed
* [start, end)
*
* case classes:
* 1. single: for snv
* 2. interval: for cnv
* 3. gene: for region with a name
* */
@SerialVersionUID(7737730001L)
trait Region extends Serializable {
def chr: Byte
def start: Int
def end: Int
override def toString = s"$chr:$start-$end"
def length = end - start
def mid = start + length/2
def overlap(that: ZeroLength): Boolean = false
def overlap(that: Region): Boolean = {
this.chr == that.chr && min(this.end, that.end) > max(this.start, that.start)
}
def intersect(that: Region): Region = {
require(this overlap that, "this region must overlap that")
Region(this.chr, max(this.start, that.start), min(this.end, that.end))
}
def in(that: Region): Boolean = {
this.chr == that.chr && this.start >= that.start && this.end <= that.end
}
def ==(that: Region): Boolean = this.chr == that.chr && this.start == that.start && this.end == that.end
def <(that: Region)(implicit ordering: Ordering[Region]): Boolean = {
if (ordering.compare(this, that) < 0) true else false
}
def <=(that: Region)(implicit ordering: Ordering[Region]): Boolean = {
if (this < that || this == that) true else false
}
def >(that: Region)(implicit ordering: Ordering[Region]): Boolean = {
if (that < this) true else false
}
def >=(that: Region)(implicit ordering: Ordering[Region]): Boolean = {
if (! (this < that)) true else false
}
}
case class Single(chr: Byte, pos: Int) extends Region {
val start = pos
val end = pos + 1
override def toString = s"$chr:$pos"
}
case class ZeroLength(chr: Byte, pos: Int) extends Region {
val start = pos
val end = pos
override def overlap(that: Region) = false
}
case class Interval(chr: Byte, start: Int, end: Int) extends Region
case class Named(chr: Byte, start: Int, end: Int, name: String) extends Region
case class Variation(chr: Byte, pos: Int, ref: String, alt: String, var info: Option[String]) extends Region {
def start = pos
def end = pos + 1
def this(region: Region, ref: String, alt: String, info: Option[String] = None) = {
this(region.chr, region.start, ref, alt, info)
}
def mutType = Variant.mutType(ref, alt)
override def toString = s"$chr:$pos-$end[$ref|$alt]${info match {case None => ""; case Some(i) => i}}"
def toRegion: String = s"$chr:$pos-$end[$ref|$alt]"
//def copy() = Variation(chr, start, end, ref, alt, info)
def ==(that: Variation): Boolean = {
this.asInstanceOf[Region] == that.asInstanceOf[Region] && this.ref == that.ref && this.alt == that.alt
}
def addInfo(k: String, v: String): Variation = {
info match {
case None => this.info = Some(s"$k=$v")
case Some(i) => this.info = Some(s"$i;$k=$v")
}
this
}
def parseInfo: Map[String, String] = info match {
case None => Map[String, String]()
case Some(s) =>
s.split(";").map{x =>
val is = x.split("=")
is(0) -> is(1)
}.toMap
}
}
object Variation {
import Region.Chromosome
def apply(x: String): Variation = {
val p = """(?:chr)?([MTXY0-9]+):(\\d+)-(\\d+)\\[([ATCG]+)\\|([ATCG]+)\\]""".r
x match {
case p(c, s, _, r, a) =>
Variation(c.byte, s.toInt, r, a, None)
}
}
implicit object VariationOrdering extends Ordering[Variation] {
def compare(x: Variation, y: Variation): Int = {
val c = Region.ord.compare(x, y)
if (c == 0) {
x.alt compare y.alt
} else {
c
}
}
}
}
object Single {
implicit object SingleOrdering extends Ordering[Single] {
def compare(x: Single, y: Single): Int = {
if (x.chr != y.chr) {
x.chr compare y.chr
} else {
x.pos compare y.pos
}
}
}
}
object Region {
case object Empty extends Region {
val chr = 0.toByte
val start = 0
val end = 0
}
implicit def ord[A <: Region] = new Ordering[A] {
def compare(x: A, y: A): Int = {
if (x.chr != y.chr) {
x.chr compare y.chr
} else if (x.start != y.start) {
x.start compare y.start
} else {
x.end compare y.end
}
}
}
implicit class Chromosome(val self: String) extends AnyVal {
def byte: Byte = {
val num = """(\\d+)""".r
self match {
case num(x) => x.toByte
case "X" => 23.toByte
case "Y" => 24.toByte
case "M" => 25.toByte
case "MT" => 25.toByte
case "XY" => 26.toByte
case _ => 0.toByte
}
}
}
def apply(c: Byte, p: Int): Region = Single(c, p)
def apply(c: String, p: Int): Region = apply(c.byte, p)
def apply(c: String, s: Int, e: Int): Region = {
apply(c.byte, s, e)
}
def apply(c: Byte, s: Int, e: Int): Region = {
require(e >= s, "end must be larger than or equal to start.")
if (e == s)
ZeroLength(c, s)
else if (e - s > 1)
Interval(c, s, e)
else
Single(c, s)
}
def apply(c: String, s: Int, e: Int, n: String): Region = Named(c.byte, s, e, n)
def apply(pattern: String): Region = {
val onlyChr = """(?i)(?:chr)?([MTXY0-9]+)""".r
val start = """(?i)(?:chr)?([MTXY0-9]+):(\\d+)-""".r
val end = """(?i)(?:chr)?([MTXY0-9]+):-(\\d+)""".r
val full = """(?i)(?:chr)?([MTXY0-9]+):(\\d+)-(\\d+)""".r
pattern match {
case onlyChr(chr) => apply(chr, 0, Int.MaxValue)
case start(chr, s) => apply(chr, s.toInt, Int.MaxValue)
case end(chr, e) => apply(chr, 0, e.toInt)
case full(chr, s, e) => apply(chr, s.toInt, e.toInt)
case _ => Empty
}
}
}
| statgenetics/seqspark | src/main/scala/org/dizhang/seqspark/ds/Region.scala | Scala | apache-2.0 | 6,295 |
package sampler.abc.actor.root.state
import org.scalatest.FreeSpec
class GatheringTest extends FreeSpec {
"Gathering should" - {
"Ignore report completed message" in {
fail("TODO")
}
"Worker failure triggers allocation of new job" in {
fail("TODO")
// TODO consider ability to reallocate weighting jobs
// Note that if the failed job was a weighing job
// then it doesn't get reallocated, we just loose all
// those scored particles. Not ideal, but if a weigh
// job caused a failure then resubmitting it probably
// wouldn't work better
}
"New scored partilcles added" in {
fail("TODO")
}
"Mix payload particles added" in {
fail("TODO")
}
"Weighted particles added and triggers flush" in {
fail("TODO")
}
"Weighted particles added but not time to flush" in {
fail("TODO")
}
"MixNow message" in {
fail("TODO")
}
}
}
| tearne/Sampler | sampler-abc/src/test/scala/sampler/abc/actor/root/state/GatheringTest.scala | Scala | apache-2.0 | 964 |
package scalakurs.option
sealed trait Gender
case object Female extends Gender
case object Male extends Gender
case class User(id: Int,
firstName: String,
lastName: String,
age: Int,
gender: Option[Gender],
spouseId: Option[Int]) {
lazy val printable = ???
}
object UserRepository {
private lazy val users = Map(
1 -> User(1, "John", "Doe", 32, Some(Male), Some(2)),
2 -> User(2, "Jane", "Doe", 30, Some(Female), Some(1)),
3 -> User(3, "Doni", "Doe", 15, None, None),
4 -> User(4, "Honey", "Ryder", 27, Some(Female), None)
)
def findById(id: Int): Option[User] = ???
def findAll: Iterable[User] = ???
def findAllWithSpouse: Iterable[User] = ???
def findPairById(id: Int): Option[(User, User)] = ???
}
| elacin/scala-kurs | oppgaver/src/main/scala/scalakurs/option/UserRepository.scala | Scala | apache-2.0 | 847 |
package org.jetbrains.plugins.scala.annotator.element
import com.intellij.lang.annotation.HighlightSeverity
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.TextRange
import com.intellij.psi.PsiFile
import junit.framework.Test
import org.jetbrains.plugins.scala.annotator.AnnotatorHolderMockBase
import org.jetbrains.plugins.scala.annotator.element.ScStringLiteralAnnotatorTest.{MyAnnotatorHolderMock, MyMessage}
import org.jetbrains.plugins.scala.base.ScalaFileSetTestCase
import org.jetbrains.plugins.scala.extensions.{IteratorExt, PsiElementExt}
import org.jetbrains.plugins.scala.lang.psi.api.base.literals.ScStringLiteral
import org.jetbrains.plugins.scala.lang.surroundWith.SurroundWithTest
import org.junit.runner.RunWith
import org.junit.runners.AllTests
import scala.annotation.nowarn
import scala.math.Ordered.orderingToOrdered
@RunWith(classOf[AllTests])
class ScStringLiteralAnnotatorTest
extends ScalaFileSetTestCase("/annotator/string_literals/") {
override protected def transform(testName: String, fileText: String, project: Project): String = {
val lightFile = createLightFile(fileText, project)
val messages = collectMessages(lightFile)
messages.mkString("\\n")
}
private def collectMessages(file: PsiFile): List[MyMessage] = {
val mock = new MyAnnotatorHolderMock(file)
val literals = file.depthFirst().filterByType[ScStringLiteral].toSeq
literals.foreach(ElementAnnotator.annotate(_)(mock))
mock.annotations
}
}
object ScStringLiteralAnnotatorTest {
def suite: Test = new ScStringLiteralAnnotatorTest
implicit private object TextRangeOrdering extends scala.math.Ordering[TextRange] {
override def compare(x: TextRange, y: TextRange): Int =
(x.getStartOffset, x.getEndOffset) compare (y.getStartOffset, y.getEndOffset)
}
// NOTE: we could try to unify with org.jetbrains.plugins.scala.annotator.Message
// which currently doesn't test text ranges, but only test file text (it's has it's advantages and disadvantages)
sealed abstract class MyMessage extends Ordered[MyMessage] {
def range: TextRange
def message: String
override def compare(that: MyMessage): Int =
(this.range, this.message) compare (that.range, that.message)
}
object MyMessage {
case class Info(override val range: TextRange, override val message: String) extends MyMessage
case class Warning(override val range: TextRange, override val message: String) extends MyMessage
case class Error(override val range: TextRange, override val message: String) extends MyMessage
}
class MyAnnotatorHolderMock(file: PsiFile) extends AnnotatorHolderMockBase[MyMessage](file) {
//noinspection ScalaUnnecessaryParentheses
@nowarn("cat=deprecation")
override def createMockAnnotation(s: HighlightSeverity, range: TextRange, message: String): Option[MyMessage] =
s match {
case HighlightSeverity.ERROR => Some(MyMessage.Error(range, message))
case HighlightSeverity.WARNING |
HighlightSeverity.GENERIC_SERVER_ERROR_OR_WARNING |
HighlightSeverity.WEAK_WARNING => Some(MyMessage.Warning(range, message))
case HighlightSeverity.INFORMATION |
(HighlightSeverity.INFO) => Some(MyMessage.Info(range, message))
case _ => None
}
}
} | JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/annotator/element/ScStringLiteralAnnotatorTest.scala | Scala | apache-2.0 | 3,376 |
/*
* Copyright © 2014 TU Berlin ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.emmalanguage
package lib.ml.clustering
import api._
import lib.linalg._
import lib.ml._
class KMeansFlinkSpec extends KMeansSpec with FlinkAware {
override val runs = 2
override val iterations = 2
override val overlap = .30
override def run(k: Int, input: String): Set[kMeans.Solution[Long]] =
withDefaultFlinkEnv(implicit flink => emma.onFlink {
// read the input
val points = for (line <- DataBag.readText(input)) yield {
val record = line.split("\\t")
DPoint(record.head.toLong, dense(record.tail.map(_.toDouble)))
}
// do the clustering
val result = kMeans(2, k, runs, iterations)(points)
// return the solution as a local set
result.collect().toSet[kMeans.Solution[Long]]
})
}
| emmalanguage/emma | emma-lib-flink/src/test/scala/org/emmalanguage/lib/ml/clustering/KMeansFlinkSpec.scala | Scala | apache-2.0 | 1,392 |
/*
* Copyright 2011-2014 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.ebpi.yaidom.queryapi
import scala.collection.immutable
import nl.ebpi.yaidom.core.EName
import nl.ebpi.yaidom.core.QName
/**
* Shorthand for `ClarkElemApi[E] with HasQNameApi with HasScopeApi` with some additional methods that
* use the scope for resolving QName-valued text and attribute values. In other words, an element query API typically
* supported by element implementations, because most element implementations know about scopes, QNames, ENames and
* text content, as well as offering the `ElemApi` query API.
*
* '''Generic code abstracting over yaidom element implementations should either use
* this trait, or super-trait `ClarkElemApi`, depending on the abstraction level.'''
*
* ==ScopedElemApi more formally==
*
* Scopes resolve QNames as ENames, so some properties are expected to hold for the element "name":
* {{{
* this.scope.resolveQNameOption(this.qname) == Some(this.resolvedName)
*
* // Therefore:
* this.resolvedName.localPart == this.qname.localPart
*
* this.resolvedName.namespaceUriOption ==
* this.scope.prefixNamespaceMap.get(this.qname.prefixOption.getOrElse(""))
* }}}
*
* For the attribute "name" properties, first define:
* {{{
* val attributeScope = this.scope.withoutDefaultNamespace
*
* val resolvedAttrs = this.attributes map {
* case (attrQName, attrValue) =>
* val resolvedAttrName = attributeScope.resolveQNameOption(attrQName).get
* (resolvedAttrName -> attrValue)
* }
* }}}
* Then the following must hold:
* {{{
* resolvedAttrs.toMap == this.resolvedAttributes.toMap
* }}}
*
* @tparam E The captured element subtype
*
* @author Chris de Vreeze
*/
trait ScopedElemApi[E <: ScopedElemApi[E]] extends ClarkElemApi[E] with HasQNameApi with HasScopeApi { self: E =>
/**
* Returns the QName value of the attribute with the given expanded name, if any, wrapped in an `Option`.
* If the attribute exists, but its value is not a QName, an exception is thrown.
*/
def attributeAsQNameOption(expandedName: EName): Option[QName]
/** Returns the QName value of the attribute with the given expanded name, and throws an exception otherwise */
def attributeAsQName(expandedName: EName): QName
/**
* Returns the resolved QName value (as EName) of the attribute with the given expanded name, if any, wrapped in an `Option`.
* None is returned if the attribute does not exist. If the QName value cannot be resolved given the scope of the element,
* an exception is thrown.
*/
def attributeAsResolvedQNameOption(expandedName: EName): Option[EName]
/**
* Returns the resolved QName value (as EName) of the attribute with the given expanded name, and throws an exception otherwise
*/
def attributeAsResolvedQName(expandedName: EName): EName
/** Returns `QName(text.trim)` */
def textAsQName: QName
/** Returns the equivalent of `scope.resolveQNameOption(textAsQName).get` */
def textAsResolvedQName: EName
}
| EBPI/yaidom | src/main/scala/nl/ebpi/yaidom/queryapi/ScopedElemApi.scala | Scala | apache-2.0 | 3,553 |
package gr.cslab.ece.ntua.musqle.plan.hypergraph
import java.util
import gr.cslab.ece.ntua.musqle.MuSQLEContext
import gr.cslab.ece.ntua.musqle.engine._
import gr.cslab.ece.ntua.musqle.plan.Cache
import gr.cslab.ece.ntua.musqle.plan.spark._
import org.apache.log4j.Logger
import scala.collection.JavaConversions._
import scala.collection.mutable.HashSet
abstract class DPhyp(mc: MuSQLEContext) {
protected var queryInfo: MQueryInfo
protected var numberOfVertices: Int = 0
protected var maxCoverage: Int = 0
protected final val location: util.HashMap[Int, Seq[(Engine, String)]] = new util.HashMap[Int, Seq[(Engine, String)]]()
protected final val edgeGraph: util.TreeMap[Vertex, util.TreeMap[Int, util.BitSet]] =
new util.TreeMap[Vertex, util.TreeMap[Int, util.BitSet]]
protected var dptable: DPTable = new DPTable(Seq())
protected val cacheChecks: Int = 0
protected var totalChecks: Int = 0
protected final val vertices: util.TreeMap[Vertex, List[(Int, Seq[Int])]] =
new util.TreeMap[Vertex, List[(Int, Seq[Int])]]
val logger = Logger.getLogger(classOf[DPhyp])
def plan(): DPJoinPlan = {
generateGraph()
logger.info("Graph generated.")
init()
logger.info("Initialization completed.")
solve()
}
protected def generateGraph()
/* Graph initialization */
protected def init(): Unit ={
var bitSet: util.BitSet = null
var keyToAdj: util.TreeMap[Int, util.BitSet] = null
/* For each vertex create a TreeMap with its
adjacent nodes and the corresponding key */
for (e <- vertices.entrySet()) {
keyToAdj = new util.TreeMap[Int, util.BitSet]()
e.getValue.foreach { pair =>
//Setting adjacent nodes
bitSet = new util.BitSet(numberOfVertices)
pair._2.foreach(bitSet.set(_))
//Add the adjacent nodes with the key connecting them (key: pair._1)
keyToAdj.put(pair._1, bitSet)
}
//Add a new edge
edgeGraph.put(e.getKey, keyToAdj)
}
}
/**
* Adds a new vertex(scan) to the graph
* @param vertex The new vertex
* @param adj The list of the foreign keys and the connected nodes on each key
* */
protected def addVertex(vertex: Vertex, adj: List[(Int, Seq[Int])]): Unit ={
numberOfVertices += 1
vertices.put(vertex, adj)
location.put(vertex.id, vertex.engines)
}
/**
* The optimization of the graph is triggered by calling solve()
*/
protected def solve(): DPJoinPlan = {
for (vertex <- edgeGraph.descendingKeySet) {
val b: util.BitSet = new util.BitSet(numberOfVertices)
b.set(vertex.id)
for (engine <- location.get(vertex.id)) {
val scan = new MuSQLEScan(vertex.asInstanceOf[SparkPlanVertex], engine._1, engine._2,queryInfo)
mc.cache.hit(scan.toAbstract())
dptable.checkAndPut(engine._1 , b, scan)
}
}
for (vertex <- edgeGraph.descendingKeySet) {
val b: util.BitSet = new util.BitSet(numberOfVertices)
b.set(vertex.id)
emitCsg(b)
val bv: util.BitSet = new util.BitSet(numberOfVertices)
for (i <- 1 to vertex.id) {
bv.set(i)
}
enumerateCsgRec(b, bv)
}
val b: util.BitSet = new util.BitSet(numberOfVertices)
for (i <- 1 to numberOfVertices) {
b.set(i)
}
val optimal = dptable.getOptimalPlan(b)
optimal.isRoot = true
optimal
}
protected def enumerateCsgRec(b: util.BitSet, bv: util.BitSet) {
val N: util.BitSet = neighBoor(b, bv)
if (N.isEmpty) return
var powerSet: PowerSet = new PowerSet(N)
while (powerSet.hasNext) {
val t = powerSet.next
if (!t.isEmpty) {
t.or(b)
//System.out.println("Check DPtable: "+t);
if (dptable.getAllPlans(t) != null) emitCsg(t)
}
}
powerSet = new PowerSet(N)
while (powerSet.hasNext) {
val t = powerSet.next
if (!t.isEmpty) {
t.or(b)
val Xnew: util.BitSet = new util.BitSet(numberOfVertices)
Xnew.or(bv)
Xnew.or(N)
enumerateCsgRec(t, Xnew)
}
}
}
protected def emitCsg(s1: util.BitSet) {
val X: util.BitSet = new util.BitSet(numberOfVertices)
val mins1: Int = s1.nextSetBit(0)
for (i <- 1 to mins1) {
X.set(i)
}
X.or(s1)
val N: util.BitSet = neighBoor(s1, X)
for (i <- N.size to 1 by -1) {
val s2: util.BitSet = new util.BitSet(numberOfVertices)
if (N.get(i)) {
s2.set(i)
//removed check for connectedness
emitCsgCmp(s1, s2)
enumerateCmpRec(s1, s2, X)
}
}
}
protected def enumerateCmpRec(s1: util.BitSet, s2: util.BitSet, X: util.BitSet) {
var N: util.BitSet = neighBoor(s2, X)
if (N.isEmpty) return
var powerSet: PowerSet = new PowerSet(N)
while (powerSet.hasNext) {
val t = powerSet.next
if (!t.isEmpty) {
t.or(s2)
//System.out.println("Check DPtable: "+t);
if (dptable.getAllPlans(t) != null) emitCsgCmp(s1, t)
}
}
X.or(N)
N = neighBoor(s2, X)
if (N.isEmpty) {
return
}
powerSet = new PowerSet(N)
while (powerSet.hasNext) {
val t = powerSet.next
if (!t.isEmpty) {
t.or(s2)
enumerateCmpRec(s1, t, X)
}
}
}
protected def emitCsgCmp(s1: util.BitSet, s2: util.BitSet) {
//System.out.println("EmitCsgCmp s1:" + s1 + " s2: " + s2)
val vars: HashSet[Int] = findJoinVars(s1, s2)
val s: util.BitSet = new util.BitSet(numberOfVertices)
s.or(s1)
s.or(s2)
totalChecks += 1
for (leftSubPlan <- dptable.getAllPlans(s1).entrySet) {
for (rightSubPlan <- dptable.getAllPlans(s2).entrySet) {
val candidate = new MuSQLEJoin(leftSubPlan.getValue, rightSubPlan.getValue, vars, leftSubPlan.getKey, queryInfo)
val cache = mc.cache
cache.hit(candidate.toAbstract())
if (leftSubPlan.getKey.equals(rightSubPlan.getKey)) {
// leftSubPlan and rightSubPlan are on the same engine.
// No move required
val r: DPJoinPlan = new MuSQLEJoin(leftSubPlan.getValue, rightSubPlan.getValue, vars, leftSubPlan.getKey, queryInfo)
//val r: DPJoinPlan = new Join(left = leftSubPlan.getValue, right = rightSubPlan.getValue, vars = vars, engine = leftSubPlan.getKey)
dptable.checkAndPut(leftSubPlan.getKey, s, r)
} else {
/* A move is required */
/* Move left to right - Checking if the left right engine support moving from right engine */
if (rightSubPlan.getKey.supportsMove(leftSubPlan.getKey)) {
val m: DPJoinPlan = new MuSQLEMove(leftSubPlan.getValue, rightSubPlan.getKey, queryInfo)
val r: DPJoinPlan = new MuSQLEJoin(m, rightSubPlan.getValue, vars, rightSubPlan.getKey, queryInfo)
dptable.checkAndPut(rightSubPlan.getKey, s, r)
}
/* Move right to left - Checking if the left right engine support moving from right engine */
if (leftSubPlan.getKey.supportsMove(rightSubPlan.getKey)) {
val m = new MuSQLEMove(rightSubPlan.getValue, leftSubPlan.getKey, queryInfo)
val r = new MuSQLEJoin(leftSubPlan.getValue, m, vars, leftSubPlan.getKey, queryInfo)
dptable.checkAndPut(leftSubPlan.getKey, s, r)
}
/* Move all to other engine */
for (engine <- dptable.engines) {
var m1: DPJoinPlan = leftSubPlan.getValue
var m2: DPJoinPlan = rightSubPlan.getValue
if (!leftSubPlan.getKey.equals(engine) && engine.supportsMove(leftSubPlan.getKey)) {
m1 = new MuSQLEMove(m1, engine, queryInfo)
}
if (!rightSubPlan.getKey.equals(engine) && engine.supportsMove(rightSubPlan.getKey)) {
m2 = new MuSQLEMove(m2, engine, queryInfo)
}
val r = new MuSQLEJoin(m1, m2, vars, engine, queryInfo)
dptable.checkAndPut(engine, s, r)
}
}
}
//check for grouping filters on subgraphs
}
}
protected def findJoinVars(s1: util.BitSet, s2: util.BitSet): HashSet[Int] = {
val ret = new HashSet[Int]
for (i <- edgeGraph.keySet()) {
if (s1.get(i.id)) {
for (s <- edgeGraph.get(i).entrySet) {
if (s.getValue.intersects(s2)) ret.add(s.getKey)
}
}
}
ret
}
def neighBoor(S: util.BitSet, X: util.BitSet): util.BitSet = {
val N: util.BitSet = new util.BitSet(numberOfVertices)
for (i <- edgeGraph.keySet()) {
if (S.get(i.id)) {
for (s <- edgeGraph.get(i).entrySet) {
N.or(s.getValue)
}
}
}
N.andNot(X)
N
}
} | gsvic/MuSQLE | src/main/scala/gr/cslab/ece/ntua/musqle/plan/hypergraph/DPhyp.scala | Scala | apache-2.0 | 8,653 |
/**
* Copyright (C) 2014 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.stratio.decision.api
import java.util.concurrent.TimeoutException
import com.stratio.decision.api.kafka.KafkaProducer
import com.stratio.decision.api.zookeeper.ZookeeperConsumer
import com.stratio.decision.commons.exceptions.{StratioEngineConnectionException, StratioAPIGenericException, StratioEngineOperationException}
import com.stratio.decision.commons.messages.StratioStreamingMessage
import org.junit.runner.RunWith
import org.mockito.Matchers._
import org.mockito.Mockito
import org.scalatest._
import org.scalatest.junit.JUnitRunner
import org.scalatest.mock.MockitoSugar
import scala.collection.JavaConversions._
import scala.concurrent.Future
@RunWith(classOf[JUnitRunner])
class StreamingAPIListOperationTest extends FunSpec
with GivenWhenThen
with ShouldMatchers
with MockitoSugar {
val kafkaProducerMock = mock[KafkaProducer]
val zookeeperConsumerMock = mock[ZookeeperConsumer]
val stratioStreamingAPIListOperation = new StreamingAPIListOperation(kafkaProducerMock, zookeeperConsumerMock, 2000)
val stratioStreamingMessage = new StratioStreamingMessage(
"theOperation",
"theStreamName",
"sessionId",
"requestId",
"theRequest",
123456,
Seq(),
Seq(),
true)
describe("The Decision API Sync Operation") {
it("should throw no exceptions when the engine returns a proper list") {
Given("a proper streams list")
val streamsList = """{"count":1,"timestamp":1402494388420,"streams":[{"streamName":"unitTestsStream","columns":[{"column":"column1","type":"STRING"}],"queries":[],"activeActions":[],"userDefined":true}]}"""
When("we perform the list operation")
Mockito.doNothing().when(kafkaProducerMock).send(anyString(), anyString())
org.mockito.Mockito.when(zookeeperConsumerMock.zNodeExists(anyString())).thenReturn(true)
org.mockito.Mockito.when(zookeeperConsumerMock.readZNode(anyString())).thenReturn(Future.successful())
org.mockito.Mockito.when(zookeeperConsumerMock.getZNodeData(anyString())).thenReturn(Some(streamsList))
Then("we should not get a StratioAPISecurityException")
try {
stratioStreamingAPIListOperation.getListStreams(stratioStreamingMessage)
} catch {
case _: Throwable => fail()
}
}
it("should throw a StratioAPIGenericException exception when the engine returns a wrong list") {
Given("a wrong streams list")
val streamsList = """{"count":1,"blahblah":1402494388420}"""
When("we perform the list operation")
Mockito.doNothing().when(kafkaProducerMock).send(anyString(), anyString())
org.mockito.Mockito.when(zookeeperConsumerMock.zNodeExists(anyString())).thenReturn(true)
org.mockito.Mockito.when(zookeeperConsumerMock.readZNode(anyString())).thenReturn(Future.successful())
org.mockito.Mockito.when(zookeeperConsumerMock.getZNodeData(anyString())).thenReturn(Some(streamsList))
Then("we should throw a StratioAPIGenericException")
intercept[StratioAPIGenericException] {
stratioStreamingAPIListOperation.getListStreams(stratioStreamingMessage)
}
}
it("should throw a StratioEngineOperationException when the ack time-out expired") {
Given("a time-out exception")
When("we perform the sync operation")
Mockito.doNothing().when(kafkaProducerMock).send(anyString(), anyString())
org.mockito.Mockito.when(zookeeperConsumerMock.zNodeExists(anyString())).thenReturn(true)
org.mockito.Mockito.when(zookeeperConsumerMock.readZNode(anyString())).thenReturn(Future.failed(new TimeoutException()))
Then("we should get a StratioEngineOperationException")
intercept[StratioEngineConnectionException] {
stratioStreamingAPIListOperation.getListStreams(stratioStreamingMessage)
}
}
}
}
| Stratio/streaming-cep-engine | api/src/test/scala/com/stratio/decision/api/StreamingAPIListOperationTest.scala | Scala | apache-2.0 | 4,426 |
/*
* Copyright (C) 2015 Original Work Marios Iliofotou
* Copyright (C) 2016 Modified Work Benjamin Finley
*
* This file is part of ReSurfAlt.
*
* ReSurfAlt is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* ReSurfAlt is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with ReSurfAlt. If not, see <http://www.gnu.org/licenses/>.
*/
package com.resurf.common
import org.apache.log4j.{Logger,ConsoleAppender,PatternLayout}
import org.scalatest.FunSuite
import org.scalatest.BeforeAndAfter
import org.scalatest.Matchers
import org.scalatest.OptionValues._
class TestTemplate extends FunSuite with BeforeAndAfter with Matchers {
protected def log4jToConsoleAndNewLevel(newLevel: org.apache.log4j.Level) = {
val rootLogger = org.apache.log4j.Logger.getRootLogger
rootLogger.removeAllAppenders()
rootLogger.setLevel(newLevel)
rootLogger.addAppender(new ConsoleAppender(new PatternLayout("[%d{dd/MM/yy hh:mm:ss:sss z}] %5p %c{2}: %m%n")))
}
before {
//org.apache.log4j.BasicConfigurator.configure()
log4jToConsoleAndNewLevel(org.apache.log4j.Level.OFF)
}
}
| finleyb/ReSurfAlt | src/test/scala/com/resurf/common/TestTemplate.scala | Scala | gpl-2.0 | 1,543 |
/*
* Copyright (c) 2013-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.snowplow.collectors.scalastream
package monitoring
import java.lang.management.ManagementFactory
import javax.management.{MBeanServer, ObjectName}
import scala.beans.BeanProperty
trait SnowplowMXBean {
def getLatest(): Long
def getRequests(): Int
def getSuccessfulRequests(): Int
def getFailedRequests(): Int
def incrementSuccessfulRequests(): Unit
def incrementFailedRequests(): Unit
}
class SnowplowScalaCollectorMetrics extends SnowplowMXBean {
@BeanProperty
var latest = now()
@BeanProperty
var requests = 0
@BeanProperty
var successfulRequests = 0
@BeanProperty
var failedRequests = 0
override def incrementSuccessfulRequests(): Unit = {
successfulRequests += 1
incrementRequests()
}
override def incrementFailedRequests(): Unit = {
failedRequests += 1
incrementRequests()
}
private def incrementRequests(): Unit = {
requests += 1
latest = now()
}
def now(): Long = System.currentTimeMillis() / 1000L
}
object BeanRegistry {
val collectorBean = new SnowplowScalaCollectorMetrics
val mbs: MBeanServer = ManagementFactory.getPlatformMBeanServer
val mBeanName: ObjectName =
new ObjectName("com.snowplowanalytics.snowplow:type=ScalaCollector")
mbs.registerMBean(collectorBean, mBeanName)
}
| RetentionGrid/snowplow | 2-collectors/scala-stream-collector/core/src/main/scala/com.snowplowanalytics.snowplow.collectors.scalastream/monitoring/JMX.scala | Scala | apache-2.0 | 2,026 |
/**
* Licensed to the Minutemen Group under one or more contributor license
* agreements. See the COPYRIGHT file distributed with this work for
* additional information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You may
* obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package silhouette.authenticator
import com.typesafe.scalalogging.LazyLogging
import javax.inject.Inject
import silhouette._
import silhouette.authenticator.AuthenticatorProvider._
import silhouette.http.RequestPipeline
import silhouette.provider.RequestProvider
import scala.concurrent.Future
/**
* A request provider implementation that supports authentication with an authenticator.
*
* @param pipeline The authentication pipeline which transforms a request into an [[AuthState]].
* @tparam R The type of the request.
* @tparam I The type of the identity.
*/
class AuthenticatorProvider[R, I <: Identity] @Inject() (
pipeline: silhouette.Reads[RequestPipeline[R], Future[AuthState[I, Authenticator]]]
) extends RequestProvider[R, I] with LazyLogging {
/**
* The type of the credentials.
*/
override type C = Authenticator
/**
* Gets the provider ID.
*
* @return The provider ID.
*/
override def id: String = ID
/**
* Authenticates an identity based on credentials sent in a request.
*
* @param request The request pipeline.
* @return Some login info on successful authentication or None if the authentication was unsuccessful.
*/
override def authenticate(request: RequestPipeline[R]): Future[AuthState[I, Authenticator]] =
pipeline.read(request)
}
/**
* The companion object.
*/
object AuthenticatorProvider {
/**
* The provider constants.
*/
val ID = "authenticator"
}
| mohiva/silhouette | modules/authenticator/src/main/scala/silhouette/authenticator/AuthenticatorProvider.scala | Scala | apache-2.0 | 2,216 |
/*******************************************************************************
* Copyright 2010 Olaf Sebelin
*
* This file is part of Verdandi.
*
* Verdandi is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Verdandi is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Verdandi. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package verdandi.ui.workdayeditor
import verdandi.ui.Icons
import verdandi.ui.TextResources
import verdandi.model._
import scala.swing.{ Action, Reactor }
import scala.swing.event.{ MousePressed, MouseReleased }
import java.util.Date
import com.weiglewilczek.slf4s.Logging
abstract class WorkDayEditorAction(val editor: WorkDayEditor, labelKey: String) extends Action(TextResources.getText(labelKey)) with Logging with Reactor {
listenTo(editor)
var currentRecord: Option[EditableWorkRecord] = None
var currentTime: Date = _
reactions += {
case evt: BeforePopupEvent => {
currentRecord = evt.recordUnderCursor
currentTime = evt.time
handleEvent(evt)
}
}
def handleEvent(evt: BeforePopupEvent)
}
class NewWorkRecordAction(costUnit: CostUnit, _ed: WorkDayEditor) extends WorkDayEditorAction(_ed, costUnit.toString) {
def handleEvent(evt: BeforePopupEvent) = enabled = evt.recordUnderCursor.isEmpty
override def apply() {
logger.debug("New Work record at " + currentTime)
VerdandiModel.workRecordStorage.newWorkRecord(costUnit, currentTime)
}
}
class ChangeWorkRecordsCostunitAction(costUnit: CostUnit, _ed: WorkDayEditor) extends WorkDayEditorAction(_ed, costUnit.toString) {
def handleEvent(evt: BeforePopupEvent) = enabled = evt.recordUnderCursor.isDefined
override def apply() {
currentRecord.get.rec.costUnit = costUnit
VerdandiModel.workRecordStorage.save(currentRecord.get.rec)
}
}
class DeleteWorkRecordAction(_ed: WorkDayEditor) extends WorkDayEditorAction(_ed, "action.delete_workrecord.label") with Reactor {
def handleEvent(evt: BeforePopupEvent) = enabled = evt.recordUnderCursor.isDefined
override def apply() = currentRecord match {
case None => logger.warn("No current work record")
case Some(wr) => VerdandiModel.workRecordStorage.delete(wr.workRecord)
}
}
class StartTrackingCurrentRecordAction(_ed: WorkDayEditor) extends WorkDayEditorAction(_ed, "action.starttrackingcurrentrecord.label") with Reactor {
def handleEvent(evt: BeforePopupEvent) = enabled = evt.recordUnderCursor.isDefined
override def apply() = currentRecord match {
case None => logger.warn("No current work record")
case Some(wr) => {
wr.startTracking()
editor.repaint()
}
}
}
class StopTrackingAction(_ed: WorkDayEditor) extends WorkDayEditorAction(_ed, "action.stoptracking.label") with Reactor {
def handleEvent(evt: BeforePopupEvent) = {
enabled = editor.editableRecords.find(_.tracking == true).isDefined
}
override def apply() = {
editor.stopTracking()
editor.repaint()
}
}
class StartTrackingAction(val editor: WorkDayEditor, costUnit: CostUnit) extends Action(costUnit.toString) with Logging with Reactor {
override def apply() = {
editor.startTracking(costUnit)
}
}
class StopTracking(val editor: WorkDayEditor) extends Action("") {
icon = Icons.getIcon("icon.workdayeditor.track.stop")
override def apply() = {
editor.stopTracking()
editor.repaint()
}
}
| osebelin/verdandi | src/main/scala/verdandi/ui/workdayeditor/WorkDayEditorAction.scala | Scala | gpl-3.0 | 3,891 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.rudder.web.rest
import net.liftweb.http._
import net.liftweb.http.rest._
import com.normation.rudder.batch.AsyncDeploymentAgent
import com.normation.rudder.batch.ManualStartDeployment
import com.normation.utils.StringUuidGenerator
import com.normation.eventlog.ModificationId
/**
* A rest api that allows to deploy promises.
*
*/
class RestDeploy(
asyncDeploymentAgent: AsyncDeploymentAgent
, uuidGen : StringUuidGenerator
) extends RestHelper {
serve {
case Get("api" :: "deploy" :: "reload" :: Nil, req) =>
asyncDeploymentAgent ! ManualStartDeployment(ModificationId(uuidGen.newUuid), RestUtils.getActor(req), "Policy update asked by REST request")
PlainTextResponse("OK")
}
} | bmwjanos/rudder | rudder-web/src/main/scala/com/normation/rudder/web/rest/RestDeploy.scala | Scala | gpl-3.0 | 2,438 |
/*!
* Copyright 2013-2014 Dennis Hörsch.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.dennishoersch.util.dropwizard.views.jasmine
import de.dennishoersch.util.assets.DevelopmentOnlyAssetsBundle
import de.dennishoersch.util.dropwizard.config.{ConfiguredBundle, DeploymentConfiguration}
import io.dropwizard.setup.{Bootstrap, Environment}
object JasmineTestBundle extends ConfiguredBundle[DeploymentConfiguration] {
override def init(bootstrap: Bootstrap[DeploymentConfiguration]): Unit =
bootstrap.addBundle(
DevelopmentOnlyAssetsBundle(
name = "specs",
resourcePath = "/view/specs",
uriPath = "/specs",
indexFile = "index.html"))
override def run(configuration: DeploymentConfiguration, environment: Environment): Unit = {}
}
| dhs3000/dropwizard-scala | src/main/scala/de/dennishoersch/util/dropwizard/views/jasmine/JasmineTestBundle.scala | Scala | apache-2.0 | 1,307 |
package io.getquill.context.mirror
import scala.reflect.ClassTag
import io.getquill.util.Messages.fail
case class Row(data: Any*) {
def add(value: Any) = Row((data :+ value): _*)
def apply[T](index: Int)(implicit t: ClassTag[T]) =
data(index) match {
case v: T => v
case other => fail(s"Invalid column type. Expected '${t.runtimeClass}', but got '$other'")
}
}
| mentegy/quill | quill-core/src/main/scala/io/getquill/context/mirror/Row.scala | Scala | apache-2.0 | 389 |
/**
* Copyright (c) 2002-2012 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.pipes.matching
import org.neo4j.graphdb.{Relationship, Node, Direction, PropertyContainer}
import org.neo4j.cypher.internal.commands.Predicate
import collection.Map
import org.neo4j.cypher.internal.pipes.ExecutionContext
class PatterMatchingBuilder(patternGraph: PatternGraph, predicates: Seq[Predicate]) extends MatcherBuilder {
def getMatches(sourceRow: ExecutionContext): Traversable[ExecutionContext] = {
val bindings: Map[String, Any] = sourceRow.filter(_._2.isInstanceOf[PropertyContainer])
val boundPairs: Map[String, MatchingPair] = extractBoundMatchingPairs(bindings)
val undirectedBoundRelationships: Iterable[PatternRelationship] = bindings.keys.
filter(z => patternGraph.contains(z)).
filter(patternGraph(_).isInstanceOf[PatternRelationship]).
map(patternGraph(_).asInstanceOf[PatternRelationship]).
filter(_.dir == Direction.BOTH)
val mandatoryPattern: Traversable[ExecutionContext] = if (undirectedBoundRelationships.isEmpty) {
createPatternMatcher(boundPairs, false, sourceRow)
} else {
val boundRels: Seq[Map[String, MatchingPair]] = createListOfBoundRelationshipsWithHangingNodes(undirectedBoundRelationships, bindings)
boundRels.map(relMap => createPatternMatcher(relMap ++ boundPairs, false, sourceRow)).reduceLeft(_ ++ _)
}
if (patternGraph.containsOptionalElements)
mandatoryPattern.flatMap(innerMatch => createPatternMatcher(extractBoundMatchingPairs(innerMatch), true, sourceRow))
else
mandatoryPattern
}
private def createListOfBoundRelationshipsWithHangingNodes(undirectedBoundRelationships: Iterable[PatternRelationship], bindings: Map[String, Any]): Seq[Map[String, MatchingPair]] = {
val toList = undirectedBoundRelationships.map(patternRel => {
val rel = bindings(patternRel.key).asInstanceOf[Relationship]
val x = patternRel.key -> MatchingPair(patternRel, rel)
// Outputs the first direction of the pattern relationship
val a1 = patternRel.startNode.key -> MatchingPair(patternRel.startNode, rel.getStartNode)
val a2 = patternRel.endNode.key -> MatchingPair(patternRel.endNode, rel.getEndNode)
// Outputs the second direction of the pattern relationship
val b1 = patternRel.startNode.key -> MatchingPair(patternRel.startNode, rel.getEndNode)
val b2 = patternRel.endNode.key -> MatchingPair(patternRel.endNode, rel.getStartNode)
Seq(Map(x, a1, a2), Map(x, b1, b2))
}).toList
cartesian(toList).map(_.reduceLeft(_ ++ _))
}
private def createNullValuesForOptionalElements(matchedGraph: ExecutionContext): ExecutionContext = {
val m = (patternGraph.keySet -- matchedGraph.keySet).map(_ -> null).toMap
matchedGraph.newWith(m)
}
// This method takes a Seq of Seq and produces the cartesian product of all inner Seqs
// I'm committing this code, but it's all Tobias' doing.
private def cartesian[T](lst: Seq[Seq[T]]): Seq[Seq[T]] =
lst.foldRight(List(List[T]()))(// <- the type T needs to be specified here
(element: Seq[T], result: List[List[T]]) => // types for better readability
result.flatMap(r => element.map(e => e :: r))
).toSeq
private def createPatternMatcher(boundPairs: Map[String, MatchingPair], includeOptionals: Boolean, source: ExecutionContext): Traversable[ExecutionContext] = {
val patternMatcher = if (patternGraph.hasDoubleOptionals)
new DoubleOptionalPatternMatcher(boundPairs, predicates, includeOptionals, source, patternGraph.doubleOptionalPaths)
else
new PatternMatcher(boundPairs, predicates, includeOptionals, source)
if (includeOptionals)
patternMatcher.map(matchedGraph => matchedGraph ++ createNullValuesForOptionalElements(matchedGraph))
else
patternMatcher
}
private def extractBoundMatchingPairs(bindings: Map[String, Any]): Map[String, MatchingPair] = bindings.flatMap {
case (key, value: PropertyContainer) if patternGraph.contains(key) =>
val element = patternGraph(key)
value match {
case node: Node => Seq(key -> MatchingPair(element, node))
case rel: Relationship => {
val pr = element.asInstanceOf[PatternRelationship]
val x = pr.dir match {
case Direction.OUTGOING => Some((pr.startNode, pr.endNode))
case Direction.INCOMING => Some((pr.endNode, pr.startNode))
case Direction.BOTH => None
}
//We only want directed bound relationships. Undirected relationship patterns
//have to be treated a little differently
x match {
case Some((a, b)) => {
val t1 = a.key -> MatchingPair(a, rel.getStartNode)
val t2 = b.key -> MatchingPair(b, rel.getEndNode)
val t3 = pr.key -> MatchingPair(pr, rel)
Seq(t1, t2, t3)
}
case None => Seq()
}
}
}
case _ => Seq()
}
}
| dksaputra/community | cypher/src/main/scala/org/neo4j/cypher/internal/pipes/matching/PatterMatchingBuilder.scala | Scala | gpl-3.0 | 5,784 |
package com.codahale.jerkson.ser
import java.lang.reflect.Modifier
import com.codahale.jerkson.JsonSnakeCase
import com.codahale.jerkson.Util._
import com.fasterxml.jackson.core.JsonGenerator
import com.fasterxml.jackson.annotation.{ JsonIgnore, JsonIgnoreProperties, JsonProperty }
import com.fasterxml.jackson.databind.{ SerializerProvider, JsonSerializer }
class CaseClassSerializer[A <: Product](klass: Class[_]) extends JsonSerializer[A] {
private val isSnakeCase = klass.isAnnotationPresent(classOf[JsonSnakeCase])
private val ignoredFields = if (klass.isAnnotationPresent(classOf[JsonIgnoreProperties])) {
klass.getAnnotation(classOf[JsonIgnoreProperties]).value().toSet
} else Set.empty[String]
private val nonIgnoredFields = klass.getDeclaredFields.filterNot { f ⇒
f.getAnnotation(classOf[JsonIgnore]) != null ||
ignoredFields(f.getName) ||
(f.getModifiers & Modifier.TRANSIENT) != 0 ||
f.getName.contains("$")
}
private val methods = klass.getDeclaredMethods
.filter { _.getParameterTypes.isEmpty }
.map { m ⇒ m.getName -> m }.toMap
private val jsonGetters = methods
.filter { _._2.getAnnotation(classOf[JsonProperty]) != null }
.map { m ⇒ m._2.getAnnotation(classOf[JsonProperty]).value -> m._2 }.toMap
def serialize(value: A, json: JsonGenerator, provider: SerializerProvider) {
json.writeStartObject()
for (field ← nonIgnoredFields) {
val methodOpt = methods.get(field.getName)
val getterOpt = jsonGetters.get(field.getName)
val fieldValue: Object = getterOpt.map { _.invoke(value) }.getOrElse(methodOpt.map { _.invoke(value) }.getOrElse(field.get(value)))
if (fieldValue != None) {
val fieldName = methodOpt.map { _.getName }.getOrElse(field.getName)
provider.defaultSerializeField(if (isSnakeCase) snakeCase(fieldName) else fieldName, fieldValue, json)
}
}
json.writeEndObject()
}
}
| mDialog/jerkson | src/main/scala/com/codahale/jerkson/ser/CaseClassSerializer.scala | Scala | mit | 1,934 |
// Project: Default (Template) Project
// Module:
// Description:
// Distributed under the MIT License (see included file LICENSE)
package slogging
/**
* Common interface for LoggerS (this interface is compatible to the slf4j logging API)
*/
trait UnderlyingLogger {
def isErrorEnabled: Boolean
def isWarnEnabled: Boolean
def isInfoEnabled: Boolean
def isDebugEnabled: Boolean
def isTraceEnabled: Boolean
// Error
def error(source: String, message: String) : Unit
def error(source: String, message: String, cause: Throwable) : Unit
def error(source: String, message: String, args: Any*) : Unit
// Warn
def warn(source: String, message: String): Unit
def warn(source: String, message: String, cause: Throwable): Unit
def warn(source: String, message: String, args: Any*): Unit
// Info
def info(source: String, message: String) : Unit
def info(source: String, message: String, cause: Throwable) : Unit
def info(source: String, message: String, args: Any*) : Unit
// Debug
def debug(source: String, message: String): Unit
def debug(source: String, message: String, cause: Throwable): Unit
def debug(source: String, message: String, args: Any*): Unit
// Trace
def trace(source: String, message: String): Unit
def trace(source: String, message: String, cause: Throwable): Unit
def trace(source: String, message: String, args: Any*): Unit
}
abstract class AbstractUnderlyingLogger extends UnderlyingLogger {
@inline final def isErrorEnabled: Boolean = LoggerConfig.level >= LogLevel.ERROR
@inline final def isWarnEnabled: Boolean = LoggerConfig.level >= LogLevel.WARN
@inline final def isInfoEnabled: Boolean = LoggerConfig.level >= LogLevel.INFO
@inline final def isDebugEnabled: Boolean = LoggerConfig.level >= LogLevel.DEBUG
@inline final def isTraceEnabled: Boolean = LoggerConfig.level >= LogLevel.TRACE
}
| jokade/slogging | shared/src/main/scala/slogging/UnderlyingLogger.scala | Scala | mit | 1,902 |
package org.automanlang.core.policy.aggregation
import java.io.ObjectInputStream
object PrecompTable {
def load(resource_name: String) : Option[PrecompTable] = {
try {
val is = new ObjectInputStream(getClass.getResourceAsStream(resource_name))
val table = is.readObject().asInstanceOf[PrecompTable]
is.close()
Some(table)
} catch {
case t:Throwable => None
}
}
}
class PrecompTable(val possibilities_sz: Int, val reward_sz: Int) extends Serializable {
private val _store = Array.fill[Int](possibilities_sz * reward_sz)(0)
// np is numOpts: number of question possibilities(?)
private def computeIndex(np: Int, reward: BigDecimal) : Int = {
assert(np >= 2) // todo failing here
// convert reward to cents
val cents: Int = (reward * BigDecimal(100)).toInt
// adjust np
val npadj = np - 2
// compute index (-1 is to start at zero)
val index = (reward_sz * npadj) + cents - 1
assert(index < possibilities_sz * reward_sz)
index
}
def addEntry(np: Int, reward: BigDecimal, num_to_run: Int) : Unit = {
assert(num_to_run != 0)
_store(computeIndex(np, reward)) = num_to_run
}
def getEntryOrNone(np: Int, reward: BigDecimal) : Option[Int] = {
if (computeIndex(np, reward) < possibilities_sz * reward_sz) {
val output = _store(computeIndex(np, reward))
assert(output != 0)
Some(output)
} else {
None
}
}
}
| dbarowy/AutoMan | libautoman/src/main/scala/org/automanlang/core/policy/aggregation/PrecompTable.scala | Scala | gpl-2.0 | 1,448 |
package debop4s.data.slick3.tests
import debop4s.data.slick3.AbstractSlickFunSuite
import debop4s.data.slick3.TestDatabase._
import debop4s.data.slick3.TestDatabase.driver.api._
import slick.ast.NumericTypedType
/**
* RelationalTypeFunSuite
* @author [email protected]
*/
class RelationalTypeFunSuite extends AbstractSlickFunSuite {
test("numeric") {
def store[T](values: T*)(implicit tm: BaseColumnType[T] with NumericTypedType) = {
class Tbl(tag: Tag) extends Table[(Int, T)](tag, "numeric_t") {
def id = column[Int]("id")
def data = column[T]("data")
def * = (id, data)
}
lazy val tbl = TableQuery[Tbl]
val data = values.zipWithIndex.map { case (d, i) => (i + 1, d) }
val q = tbl.sortBy(_.id)
DBIO.seq(
tbl.schema.drop.asTry,
tbl.schema.create,
tbl ++= data,
q.result.map(_ shouldEqual data),
tbl.schema.drop
)
}
commit {
DBIO.seq(
store[Int](-1, 0, 1, Int.MinValue, Int.MaxValue),
ifCap(rcap.typeLong) { store[Long](-1L, 0L, 1L, Long.MinValue, Long.MaxValue) },
store[Short](-1, 0, 1, Short.MinValue, Short.MaxValue),
store[Byte](-1, 0, 1, Byte.MinValue, Byte.MaxValue),
store[Double](-1.0, 0.0, 1.0),
store[Float](-1.0f, 0.0f, 1.0f),
ifCap(rcap.typeBigDecimal) {
store[BigDecimal](BigDecimal(-1), BigDecimal(0), BigDecimal(1), BigDecimal(Long.MinValue), BigDecimal(Long.MaxValue))
}
)
}
}
private def roundtrip[T: BaseColumnType](tn: String, v: T) = {
class A(tag: Tag) extends Table[(Int, T)](tag, tn) {
def id = column[Int]("id")
def data = column[T]("data")
def * = (id, data)
}
lazy val as = TableQuery[A]
commit {
DBIO.seq(
as.schema.drop.asTry,
as.schema.create,
as +=(1, v),
as.map(_.data).result.map(_ shouldEqual Seq(v)),
as.filter(_.data === v).map(_.id).result.map(_ shouldEqual Seq(1)),
as.filter(_.data =!= v).map(_.id).result.map(_ shouldEqual Nil),
as.filter(_.data === v.bind).map(_.id).result.map(_ shouldEqual Seq(1)),
as.filter(_.data =!= v.bind).map(_.id).result.map(_ shouldEqual Nil),
as.schema.drop
)
}
}
test("boolean") {
roundtrip[Boolean]("boolean_true", true)
roundtrip[Boolean]("boolean_false", false)
}
test("string") {
roundtrip[String]("roundtrip_string", "aaa")
}
test("unit test") {
class T(tag: Tag) extends Table[Int](tag, "unit_t") {
def id = column[Int]("id")
def * = id
}
val ts = TableQuery[T]
commit {
DBIO.seq(
ts.schema.drop.asTry,
ts.schema.create,
ts += 42,
ts.map(_ => ()).result.map(_ shouldEqual Seq(())),
ts.map(a => ((), a)).result.map(_ shouldEqual Seq(((), 42))),
ts.map(a => (a, ())).result.map(_ shouldEqual Seq((42, ()))),
ts.schema.drop
)
}
}
}
| debop/debop4s | debop4s-data-slick3/src/test/scala/debop4s/data/slick3/tests/RelationalTypeFunSuite.scala | Scala | apache-2.0 | 2,986 |
package mesosphere.marathon.core
import akka.actor.ActorSystem
import com.google.inject.Inject
import com.twitter.common.zookeeper.ZooKeeperClient
import mesosphere.marathon.api.LeaderInfo
import mesosphere.marathon.core.auth.AuthModule
import mesosphere.marathon.core.base.{ ActorsModule, Clock, ShutdownHooks }
import mesosphere.marathon.core.flow.FlowModule
import mesosphere.marathon.core.launcher.{ TaskOpFactory, LauncherModule }
import mesosphere.marathon.core.launchqueue.LaunchQueueModule
import mesosphere.marathon.core.leadership.LeadershipModule
import mesosphere.marathon.core.matcher.base.util.StopOnFirstMatchingOfferMatcher
import mesosphere.marathon.core.matcher.manager.OfferMatcherManagerModule
import mesosphere.marathon.core.matcher.reconcile.OfferMatcherReconciliationModule
import mesosphere.marathon.core.plugin.PluginModule
import mesosphere.marathon.core.readiness.ReadinessModule
import mesosphere.marathon.core.task.bus.TaskBusModule
import mesosphere.marathon.core.task.jobs.TaskJobsModule
import mesosphere.marathon.core.task.tracker.TaskTrackerModule
import mesosphere.marathon.core.task.update.TaskUpdateStep
import mesosphere.marathon.metrics.Metrics
import mesosphere.marathon.state.{ GroupRepository, AppRepository, TaskRepository }
import mesosphere.marathon.{ LeadershipAbdication, MarathonConf, MarathonSchedulerDriverHolder }
import scala.util.Random
/**
* Provides the wiring for the core module.
*
* Its parameters represent guice wired dependencies.
* [[CoreGuiceModule]] exports some dependencies back to guice.
*/
class CoreModuleImpl @Inject() (
// external dependencies still wired by guice
zk: ZooKeeperClient,
leader: LeadershipAbdication,
marathonConf: MarathonConf,
metrics: Metrics,
actorSystem: ActorSystem,
marathonSchedulerDriverHolder: MarathonSchedulerDriverHolder,
appRepository: AppRepository,
groupRepository: GroupRepository,
taskRepository: TaskRepository,
taskOpFactory: TaskOpFactory,
leaderInfo: LeaderInfo,
clock: Clock,
taskStatusUpdateSteps: Seq[TaskUpdateStep]) extends CoreModule {
// INFRASTRUCTURE LAYER
private[this] lazy val random = Random
private[this] lazy val shutdownHookModule = ShutdownHooks()
private[this] lazy val actorsModule = new ActorsModule(shutdownHookModule, actorSystem)
override lazy val leadershipModule = LeadershipModule(actorsModule.actorRefFactory, zk, leader)
// TASKS
override lazy val taskBusModule = new TaskBusModule()
override lazy val taskTrackerModule =
new TaskTrackerModule(clock, metrics, marathonConf, leadershipModule, taskRepository, taskStatusUpdateSteps)
override lazy val taskJobsModule = new TaskJobsModule(marathonConf, leadershipModule, clock)
// READINESS CHECKS
lazy val readinessModule = new ReadinessModule(actorSystem)
// OFFER MATCHING AND LAUNCHING TASKS
private[this] lazy val offerMatcherManagerModule = new OfferMatcherManagerModule(
// infrastructure
clock, random, metrics, marathonConf,
leadershipModule
)
private[this] lazy val offerMatcherReconcilerModule =
new OfferMatcherReconciliationModule(
marathonConf,
clock,
actorSystem.eventStream,
taskTrackerModule.taskTracker,
groupRepository,
offerMatcherManagerModule.subOfferMatcherManager,
leadershipModule
)
override lazy val launcherModule = new LauncherModule(
// infrastructure
clock, metrics, marathonConf,
// external guicedependencies
taskTrackerModule.taskCreationHandler,
marathonSchedulerDriverHolder,
// internal core dependencies
StopOnFirstMatchingOfferMatcher(
offerMatcherReconcilerModule.offerMatcherReconciler,
offerMatcherManagerModule.globalOfferMatcher
)
)
override lazy val appOfferMatcherModule = new LaunchQueueModule(
marathonConf,
leadershipModule, clock,
// internal core dependencies
offerMatcherManagerModule.subOfferMatcherManager,
maybeOfferReviver,
// external guice dependencies
appRepository,
taskTrackerModule.taskTracker,
taskOpFactory
)
// PLUGINS
override lazy val pluginModule = new PluginModule(marathonConf)
override lazy val authModule: AuthModule = new AuthModule(pluginModule.pluginManager)
// FLOW CONTROL GLUE
private[this] lazy val flowActors = new FlowModule(leadershipModule)
flowActors.refillOfferMatcherManagerLaunchTokens(
marathonConf, taskBusModule.taskStatusObservables, offerMatcherManagerModule.subOfferMatcherManager)
/** Combine offersWanted state from multiple sources. */
private[this] lazy val offersWanted =
offerMatcherManagerModule.globalOfferMatcherWantsOffers
.combineLatest(offerMatcherReconcilerModule.offersWantedObservable)
.map { case (managerWantsOffers, reconciliationWantsOffers) => managerWantsOffers || reconciliationWantsOffers }
lazy val maybeOfferReviver = flowActors.maybeOfferReviver(
clock, marathonConf,
actorSystem.eventStream,
offersWanted,
marathonSchedulerDriverHolder)
// GREEDY INSTANTIATION
//
// Greedily instantiate everything.
//
// lazy val allows us to write down object instantiations in any order.
//
// The LeadershipModule requires that all actors have been registered when the controller
// is created. Changing the wiring order for this feels wrong since it is nicer if it
// follows architectural logic. Therefore we instantiate them here explicitly.
taskJobsModule.handleOverdueTasks(
taskTrackerModule.taskTracker,
taskTrackerModule.taskReservationTimeoutHandler,
marathonSchedulerDriverHolder
)
maybeOfferReviver
offerMatcherManagerModule
launcherModule
offerMatcherReconcilerModule.start()
}
| ss75710541/marathon | src/main/scala/mesosphere/marathon/core/CoreModuleImpl.scala | Scala | apache-2.0 | 5,754 |
import sbt._
object Version {
final val Scala = "2.12.1"
final val ScalaTest = "3.0.1"
final val Vertx = "3.4.2"
}
object Library {
val vertx_codegen = "io.vertx" % "vertx-codegen" % Version.Vertx % "provided"
val vertx_lang_scala = "io.vertx" %% "vertx-lang-scala" % Version.Vertx
val vertx_hazelcast = "io.vertx" % "vertx-hazelcast" % Version.Vertx
val vertx_web = "io.vertx" %% "vertx-web-scala" % Version.Vertx
val vertx_mqtt_server = "io.vertx" %% "vertx-mqtt-server-scala" % Version.Vertx
val vertx_sql_common = "io.vertx" %% "vertx-sql-common-scala" % Version.Vertx
val vertx_bridge_common = "io.vertx" %% "vertx-bridge-common-scala" % Version.Vertx
val vertx_jdbc_client = "io.vertx" %% "vertx-jdbc-client-scala" % Version.Vertx
val vertx_mongo_client = "io.vertx" %% "vertx-mongo-client-scala" % Version.Vertx
val vertx_mongo_service = "io.vertx" %% "vertx-mongo-service-scala" % Version.Vertx
val vertx_auth_common = "io.vertx" %% "vertx-auth-common-scala" % Version.Vertx
val vertx_auth_shiro = "io.vertx" %% "vertx-auth-shiro-scala" % Version.Vertx
val vertx_auth_htdigest = "io.vertx" %% "vertx-auth-htdigest-scala" % Version.Vertx
val vertx_auth_oauth2 = "io.vertx" %% "vertx-auth-oauth2-scala" % Version.Vertx
val vertx_auth_mongo = "io.vertx" %% "vertx-auth-mongo-scala" % Version.Vertx
val vertx_auth_jwt = "io.vertx" %% "vertx-auth-jwt-scala" % Version.Vertx
val vertx_auth_jdbc = "io.vertx" %% "vertx-auth-jdbc-scala" % Version.Vertx
val vertx_web_common = "io.vertx" %% "vertx-web-common-scala" % Version.Vertx
val vertx_web_client = "io.vertx" %% "vertx-web-client-scala" % Version.Vertx
val vertx_sockjs_service_proxy = "io.vertx" %% "vertx-sockjs-service-proxy-scala" % Version.Vertx
val vertx_web_templ_freemarker = "io.vertx" %% "vertx-web-templ-freemarker-scala" % Version.Vertx
val vertx_web_templ_handlebars = "io.vertx" %% "vertx-web-templ-handlebars-scala" % Version.Vertx
val vertx_web_templ_jade = "io.vertx" %% "vertx-web-templ-jade-scala" % Version.Vertx
val vertx_web_templ_mvel = "io.vertx" %% "vertx-web-templ-mvel-scala" % Version.Vertx
val vertx_web_templ_pebble = "io.vertx" %% "vertx-web-templ-pebble-scala" % Version.Vertx
val vertx_web_templ_thymeleaf = "io.vertx" %% "vertx-web-templ-thymeleaf-scala" % Version.Vertx
val vertx_mysql_postgresql_client = "io.vertx" %% "vertx-mysql-postgresql-client-scala" % Version.Vertx
val vertx_mail_client = "io.vertx" %% "vertx-mail-client-scala" % Version.Vertx
val vertx_rabbitmq_client = "io.vertx" %% "vertx-rabbitmq-client-scala" % Version.Vertx
val vertx_redis_client = "io.vertx" %% "vertx-redis-client-scala" % Version.Vertx
val vertx_stomp = "io.vertx" %% "vertx-stomp-scala" % Version.Vertx
val vertx_tcp_eventbus_bridge = "io.vertx" %% "vertx-tcp-eventbus-bridge-scala" % Version.Vertx
val vertx_amqp_bridge = "io.vertx" %% "vertx-amqp-bridge-scala" % Version.Vertx
val vertx_dropwizard_metrics = "io.vertx" %% "vertx-dropwizard-metrics-scala" % Version.Vertx
val vertx_hawkular_metrics = "io.vertx" %% "vertx-hawkular-metrics-scala" % Version.Vertx
val vertx_shell = "io.vertx" %% "vertx-shell-scala" % Version.Vertx
val vertx_kafka_client = "io.vertx" %% "vertx-kafka-client-scala" % Version.Vertx
val vertx_circuit_breaker = "io.vertx" %% "vertx-circuit-breaker-scala" % Version.Vertx
val vertx_config = "io.vertx" %% "vertx-config-scala" % Version.Vertx
val vertx_service_discovery = "io.vertx" %% "vertx-service-discovery-scala" % Version.Vertx
val vertx_config_git = "io.vertx" %% "vertx-config-git-scala" % Version.Vertx
val vertx_config_hocon = "io.vertx" %% "vertx-config-hocon-scala" % Version.Vertx
val vertx_config_kubernetes_configmap = "io.vertx" %% "vertx-config-kubernetes-configmap-scala" % Version.Vertx
val vertx_config_redis = "io.vertx" %% "vertx-config-redis-scala" % Version.Vertx
val vertx_config_spring_config_server = "io.vertx" %% "vertx-config-spring-config-server-scala" % Version.Vertx
val vertx_config_yaml = "io.vertx" %% "vertx-config-yaml-scala" % Version.Vertx
val vertx_config_zookeeper = "io.vertx" %% "vertx-config-zookeeper-scala" % Version.Vertx
//non-vert.x deps
val scalaTest = "org.scalatest" %% "scalatest" % Version.ScalaTest
}
| giampaolotrapasso/vertx-ydl | frontend/project/Dependencies.scala | Scala | apache-2.0 | 4,274 |
package japgolly.scalajs.react
import org.scalajs.dom
import scala.scalajs.js
import js.{Dynamic, UndefOr, ThisFunction, ThisFunction0, Object, Any => JAny, Function => JFn}
import js.annotation.{JSBracketAccess, JSName}
object React extends React
trait React extends Object {
/**
* Create a component given a specification. A component implements a render method which returns one single child.
* That child may have an arbitrarily deep child structure. One thing that makes components different than standard
* prototypal classes is that you don't need to call new on them. They are convenience wrappers that construct
* backing instances (via new) for you.
*/
def createClass[P,S,B,N <: TopNode](spec: ReactComponentSpec[P,S,B,N]): ReactComponentType[P,S,B,N] = js.native
def createFactory[P,S,B,N <: TopNode](t: ReactComponentType[P,S,B,N]): ReactComponentCU[P,S,B,N] = js.native
def createElement[P,S,B,N <: TopNode](t: ReactComponentType[P,S,B,N]): ReactComponentCU[P,S,B,N] = js.native
def createElement(tag: String, props: Object, children: ReactNode*): ReactDOMElement = js.native
def render(e: ReactElement, n: dom.Node): ReactComponentM_[TopNode] = js.native
def render(e: ReactElement, n: dom.Node, callback: ThisFunction): ReactComponentM_[TopNode] = js.native
def render[P,S,B,N <: TopNode](c: ReactComponentU[P,S,B,N], n: dom.Node): ReactComponentM[P,S,B,N] = js.native
def render[P,S,B,N <: TopNode](c: ReactComponentU[P,S,B,N], n: dom.Node, callback: ThisFunction0[ReactComponentM[P,S,B,N], Unit]): ReactComponentM[P,S,B,N] = js.native
/**
* Remove a mounted React component from the DOM and clean up its event handlers and state. If no component was
* mounted in the container, calling this function does nothing. Returns true if a component was unmounted and false
* if there was no component to unmount.
*/
def unmountComponentAtNode(container: dom.Node): Boolean = js.native
/**
* Render a ReactElement to its initial HTML. This should only be used on the server. React will return an HTML
* string. You can use this method to generate HTML on the server and send the markup down on the initial request for
* faster page loads and to allow search engines to crawl your pages for SEO purposes.
*
* If you call React.render() on a node that already has this server-rendered markup, React will preserve it and only
* attach event handlers, allowing you to have a very performant first-load experience.
*/
def renderToString(e: ReactElement): String = js.native
/**
* Similar to renderToString, except this doesn't create extra DOM attributes such as data-react-id, that React uses
* internally. This is useful if you want to use React as a simple static page generator, as stripping away the extra
* attributes can save lots of bytes.
*/
def renderToStaticMarkup(e: ReactElement): String = js.native
/** Verifies the object is a ReactElement. */
def isValidElement(o: JAny): Boolean = js.native
/** Configure React's event system to handle touch events on mobile devices. */
def initializeTouchEvents(shouldUseTouch: Boolean): Unit = js.native
/**
* React.DOM provides convenience wrappers around React.createElement for DOM components. These should only be used
* when not using JSX. For example, React.DOM.div(null, 'Hello World!')
*/
def DOM: Dynamic = js.native
def addons: Dynamic = js.native
/** React.Children provides utilities for dealing with the this.props.children opaque data structure. */
def Children: ReactChildren = js.native
@deprecated("React.renderComponent will be deprecated in a future version. Use React.render instead.", "React 0.12.0")
def renderComponent(c: ReactComponentU_, n: dom.Node): ReactComponentM_[TopNode] = js.native
@deprecated("React.renderComponent will be deprecated in a future version. Use React.render instead.", "React 0.12.0")
def renderComponent(c: ReactComponentU_, n: dom.Node, callback: ThisFunction): ReactComponentM_[TopNode] = js.native
@deprecated("React.renderComponent will be deprecated in a future version. Use React.render instead.", "React 0.12.0")
def renderComponent[P, S, B, N <: TopNode](c: ReactComponentU[P, S, B, N], n: dom.Node): ReactComponentM[P, S, B, N] = js.native
@deprecated("React.renderComponent will be deprecated in a future version. Use React.render instead.", "React 0.12.0")
def renderComponent[P, S, B, N <: TopNode](c: ReactComponentU[P, S, B, N], n: dom.Node, callback: ThisFunction0[ReactComponentM[P, S, B, N], Unit]): ReactComponentM[P, S, B, N] = js.native
@deprecated("React.renderComponentToString will be deprecated in a future version. Use React.renderToString instead.", "React 0.12.0")
def renderComponentToString(component: ReactComponentU_): String = js.native
@deprecated("React.renderComponentToStaticMarkup will be deprecated in a future version. Use React.renderToStaticMarkup instead.", "React 0.12.0")
def renderComponentToStaticMarkup(component: ReactComponentU_): String = js.native
}
/** `React.Children` */
trait ReactChildren extends Object {
/** Invoke fn on every immediate child contained within children with this set to context. If children is a nested object or array it will be traversed: fn will never be passed the container objects. If children is null or undefined returns null or undefined rather than an empty object. */
def map(c: PropsChildren, fn: js.Function1[ReactNode, JAny]): UndefOr[Object] = js.native
/** Invoke fn on every immediate child contained within children with this set to context. If children is a nested object or array it will be traversed: fn will never be passed the container objects. If children is null or undefined returns null or undefined rather than an empty object. */
def map(c: PropsChildren, fn: js.Function2[ReactNode, Int, JAny]): UndefOr[Object] = js.native
/** Like React.Children.map() but does not return an object. */
def forEach(c: PropsChildren, fn: js.Function1[ReactNode, JAny]): Unit = js.native
/** Like React.Children.map() but does not return an object. */
def forEach(c: PropsChildren, fn: js.Function2[ReactNode, Int, JAny]): Unit = js.native
/** Return the only child in children. Throws otherwise. */
def only(c: PropsChildren): ReactNode = js.native
/** Return the total number of components in children, equal to the number of times that a callback passed to map or forEach would be invoked. */
def count(c: PropsChildren): Int = js.native
}
trait ReactComponentSpec[Props, State, +Backend, +Node <: TopNode] extends Object with ReactComponentTypeAuxJ[Props, State, Backend, Node]
/** The meat in React's createClass-createFactory sandwich. */
trait ReactComponentType[Props, State, +Backend, +Node <: TopNode] extends Object with ReactComponentTypeAuxJ[Props, State, Backend, Node]
/**
* https://facebook.github.io/react/docs/glossary.html indicates children can be a super type of ReactElement.
* Array and null are acceptable, thus this can be 0-n elements.
*/
trait ReactNode extends Object
/** ReactElement = ReactComponentElement | ReactDOMElement */
trait ReactElement extends Object with ReactNode {
def key: UndefOr[String] = js.native
def ref: UndefOr[String] = js.native
}
/** A React virtual DOM element, such as 'div', 'table', etc. */
trait ReactDOMElement extends ReactElement {
def `type`: String = js.native
def props : Object = js.native
}
/** An instance of a React component. Prefer using the subtype ReactComponentU instead. */
trait ReactComponentElement[Props]
extends ReactElement
with ComponentScope_P[Props]
/** A JS function that creates a React component instance. */
trait ReactComponentC_ extends JFn
/** The underlying function that creates a Scala-based React component instance. */
trait ReactComponentCU[Props, State, +Backend, +Node <: TopNode] extends ReactComponentC_ with ReactComponentTypeAuxJ[Props, State, Backend, Node] {
def apply(props: WrapObj[Props], children: ReactNode*): ReactComponentU[Props, State, Backend, Node] = js.native
}
/** An unmounted component. Not guaranteed to have been created by Scala, could be a React addon. */
trait ReactComponentU_ extends ReactElement
/** A mounted component. Not guaranteed to have been created by Scala, could be a React addon. */
trait ReactComponentM_[+Node <: TopNode]
extends ReactComponentU_
with ComponentScope_M[Node]
/** An unmounted Scala component. */
trait ReactComponentU[Props, State, +Backend, +Node <: TopNode]
extends ReactComponentU_
with ReactComponentTypeAuxJ[Props, State, Backend, Node]
/** A mounted Scala component. */
trait ReactComponentM[Props, State, +Backend, +Node <: TopNode]
extends ReactComponentM_[Node]
with ComponentScopeM[Props, State, Backend, Node]
with ReactComponentTypeAuxJ[Props, State, Backend, Node]
// =====================================================================================================================
// Scope
/** Methods always available. */
trait ComponentScope_A extends Object {
def isMounted(): Boolean = js.native
}
trait ComponentScope_P[+Props] extends Object {
@JSName("props") private[react] def _props: WrapObj[Props] with PropsMixedIn = js.native
}
trait ComponentScope_S[+State] extends Object {
@JSName("state") private[react] def _state: WrapObj[State] = js.native
}
trait ComponentScope_SS[State] extends ComponentScope_S[State] {
@JSName("setState") private[react] def _setState(s: WrapObj[State]): Unit = js.native
@JSName("setState") private[react] def _setState(s: WrapObj[State], callback: UndefOr[JFn]): Unit = js.native
}
trait ComponentScope_B[+Backend] extends Object {
def backend: Backend = js.native
}
trait ComponentScope_PS[-Props, +State] extends Object {
@JSName("getInitialState") private[react] def _getInitialState(s: WrapObj[Props]): WrapObj[State] = js.native
}
trait ComponentScope_M[+Node <: TopNode] extends Object {
/** Can be invoked on any mounted component in order to obtain a reference to its rendered DOM node. */
def getDOMNode(): Node = js.native
def refs: RefsObject = js.native
/**
* Can be invoked on any mounted component when you know that some deeper aspect of the component's state has
* changed without using this.setState().
*/
def forceUpdate(): Unit = js.native
}
/** Type of an unmounted component's `this` scope. */
trait ComponentScopeU[Props, State, +Backend]
extends ComponentScope_A
with ComponentScope_PS[Props, State]
with ComponentScope_P[Props]
with ComponentScope_SS[State]
with ComponentScope_B[Backend] {
// prohibits: ComponentScope_M.*
}
/** Type of a component's `this` scope during componentWillUpdate. */
trait ComponentScopeWU[Props, +State, +Backend, +Node <: TopNode]
extends ComponentScope_A
with ComponentScope_PS[Props, State]
with ComponentScope_P[Props]
with ComponentScope_S[State]
with ComponentScope_B[Backend]
with ComponentScope_M[Node]
// prohibits: .setState
/** Type of a mounted component's `this` scope. */
trait ComponentScopeM[Props, State, +Backend, +Node <: TopNode]
extends ComponentScopeU[Props, State, Backend]
with ReactComponentTypeAuxJ[Props, State, Backend, Node]
with ComponentScope_PS[Props, State]
with ComponentScope_M[Node]
/** Type of a component's `this` scope as is available to backends. */
trait BackendScope[Props, State]
extends ComponentScope_A
with ComponentScope_PS[Props, State]
with ComponentScope_P[Props]
with ComponentScope_SS[State]
with ComponentScope_M[TopNode]
// prohibits: .backend
/** Type of `this.refs` */
trait RefsObject extends Object {
@JSBracketAccess
def apply[Node <: TopNode](key: String): UndefOr[ReactComponentM_[Node]] = js.native
}
/** Additional methods that React mixes into `this.props` */
trait PropsMixedIn extends Object {
def children: PropsChildren = js.native
}
/** Type of `this.props.children` */
trait PropsChildren extends Object
// =====================================================================================================================
// Events
/** https://facebook.github.io/react/docs/events.html */
trait SyntheticEvent[+DOMEventTarget <: dom.Node] extends Object {
val bubbles : Boolean = js.native
val cancelable : Boolean = js.native
val currentTarget : DOMEventTarget = js.native
def defaultPrevented: Boolean = js.native
val eventPhase : Double = js.native
val isTrusted : Boolean = js.native
val nativeEvent : dom.Event = js.native
val target : DOMEventTarget = js.native
val timeStamp : js.Date = js.native
/**
* Stops the default action of an element from happening.
* For example: Prevent a submit button from submitting a form Prevent a link from following the URL
*/
def preventDefault(): Unit = js.native
/**
* Stops the bubbling of an event to parent elements, preventing any parent event handlers from being executed.
*/
def stopPropagation(): Unit = js.native
@JSName("type") val eventType: String = js.native
}
/** https://github.com/facebook/react/blob/master/src/browser/syntheticEvents/SyntheticUIEvent.js */
trait SyntheticUIEvent[+DOMEventTarget <: dom.Node] extends SyntheticEvent[DOMEventTarget] {
override val nativeEvent: dom.UIEvent = js.native
/**
* The view attribute identifies the AbstractView from which the event was generated.
* The un-initialized value of this attribute must be null.
*/
def view(event: dom.Event): Object = js.native
/**
* Specifies some detail information about the Event, depending on the type of event.
* The un-initialized value of this attribute must be 0.
*/
def detail(event: dom.Event): Double = js.native
}
/** https://github.com/facebook/react/blob/master/src/browser/syntheticEvents/SyntheticClipboardEvent.js */
trait SyntheticClipboardEvent[+DOMEventTarget <: dom.Node] extends SyntheticEvent[DOMEventTarget] {
/**
* The clipboardData attribute is an instance of the DataTransfer interface which lets a script read and manipulate
* values on the system clipboard during user-initiated copy, cut and paste operations. The associated drag data store
* is a live but filtered view of the system clipboard, exposing data types the implementation knows the script can
* safely access.
*
* The clipboardData object's items and files properties enable processing of multi-part or non-textual data from the
* clipboard.
*
* http://www.w3.org/TR/clipboard-apis/#widl-ClipboardEvent-clipboardData
*/
def clipboardData(event: dom.Event): dom.DataTransfer = js.native
}
/** https://github.com/facebook/react/blob/master/src/browser/syntheticEvents/SyntheticCompositionEvent.js */
trait SyntheticCompositionEvent[+DOMEventTarget <: dom.Node] extends SyntheticEvent[DOMEventTarget] {
override val nativeEvent: dom.CompositionEvent = js.native
/**
* Holds the value of the characters generated by an input method.
* This may be a single Unicode character or a non-empty sequence of Unicode characters [Unicode].
* Characters should be normalized as defined by the Unicode normalization form NFC, defined in [UAX #15].
* This attribute may be null or contain the empty string.
*
* http://www.w3.org/TR/DOM-Level-3-Events/#events-compositionevents
*/
val data: String = js.native
}
/** https://github.com/facebook/react/blob/master/src/browser/syntheticEvents/SyntheticDragEvent.js */
trait SyntheticDragEvent[+DOMEventTarget <: dom.Node] extends SyntheticMouseEvent[DOMEventTarget] {
override val nativeEvent: dom.DragEvent = js.native
val dataTransfer: dom.DataTransfer = js.native
}
/** https://github.com/facebook/react/blob/master/src/browser/syntheticEvents/SyntheticFocusEvent.js */
trait SyntheticFocusEvent[+DOMEventTarget <: dom.Node] extends SyntheticUIEvent[DOMEventTarget] {
override val nativeEvent: dom.FocusEvent = js.native
val relatedTarget: dom.EventTarget = js.native
}
// DISABLED. input.onchange generates SyntheticEvent not SyntheticInputEvent
///** https://github.com/facebook/react/blob/master/src/browser/syntheticEvents/SyntheticInputEvent.js */
//trait SyntheticInputEvent[+DOMEventTarget <: dom.Node] extends SyntheticEvent[DOMEventTarget] {
// /**
// * Holds the value of the characters generated by an input method.
// * This may be a single Unicode character or a non-empty sequence of Unicode characters [Unicode].
// * Characters should be normalized as defined by the Unicode normalization form NFC, defined in [UAX #15].
// * This attribute may be null or contain the empty string.
// *
// * http://www.w3.org/TR/2013/WD-DOM-Level-3-Events-20131105/#events-inputevents
// */
// val data: String = js.native
//}
/** https://github.com/facebook/react/blob/master/src/browser/syntheticEvents/SyntheticKeyboardEvent.js */
trait SyntheticKeyboardEvent[+DOMEventTarget <: dom.Node] extends SyntheticUIEvent[DOMEventTarget] {
override val nativeEvent: dom.KeyboardEvent = js.native
/** See org.scalajs.dom.extensions.KeyValue */
val key : String = js.native
val location : Double = js.native
val altKey : Boolean = js.native
val ctrlKey : Boolean = js.native
val metaKey : Boolean = js.native
val shiftKey : Boolean = js.native
val repeat : Boolean = js.native
val locale : String = js.native
def getModifierState(keyArg: String): Boolean = js.native
// charCode: function(event) {
// keyCode: function(event) {
// which: function(event) {
}
/** https://github.com/facebook/react/blob/master/src/browser/syntheticEvents/SyntheticMouseEvent.js */
trait SyntheticMouseEvent[+DOMEventTarget <: dom.Node] extends SyntheticUIEvent[DOMEventTarget] {
override val nativeEvent: dom.MouseEvent = js.native
val screenX : Double = js.native
val screenY : Double = js.native
val clientX : Double = js.native
val clientY : Double = js.native
val buttons : Int = js.native
val altKey : Boolean = js.native
val ctrlKey : Boolean = js.native
val metaKey : Boolean = js.native
val shiftKey : Boolean = js.native
def getModifierState(keyArg: String) : Boolean = js.native
def relatedTarget (event: dom.Event): dom.EventTarget = js.native
def button (event: dom.Event): Double = js.native
def pageX (event: dom.Event): Double = js.native
def pageY (event: dom.Event): Double = js.native
}
/** https://github.com/facebook/react/blob/master/src/browser/syntheticEvents/SyntheticTouchEvent.js */
trait SyntheticTouchEvent[+DOMEventTarget <: dom.Node] extends SyntheticUIEvent[DOMEventTarget] {
override val nativeEvent: dom.TouchEvent = js.native
val altKey : Boolean = js.native
val ctrlKey : Boolean = js.native
val metaKey : Boolean = js.native
val shiftKey : Boolean = js.native
val touches : dom.TouchList = js.native
val targetTouches : dom.TouchList = js.native
val changedTouches: dom.TouchList = js.native
def getModifierState(keyArg: String): Boolean = js.native
}
/** https://github.com/facebook/react/blob/master/src/browser/syntheticEvents/SyntheticWheelEvent.js */
trait SyntheticWheelEvent[+DOMEventTarget <: dom.Node] extends SyntheticMouseEvent[DOMEventTarget] {
override val nativeEvent: dom.WheelEvent = js.native
def deltaX(event: dom.Event): Double = js.native
def deltaY(event: dom.Event): Double = js.native
val deltaZ: Double = js.native
/**
* Browsers without "deltaMode" is reporting in raw wheel delta where one
* notch on the scroll is always +/- 120, roughly equivalent to pixels.
* A good approximation of DOM_DELTA_LINE (1) is 5% of viewport size or
* ~40 pixels, for DOM_DELTA_SCREEN (2) it is 87.5% of viewport size.
*/
val deltaMode: Double = js.native
}
| russpowers/scalajs-react | core/src/main/scala/japgolly/scalajs/react/React.scala | Scala | apache-2.0 | 19,996 |
import java.lang.Thread.State
import java.lang.Thread.State._
object Test {
def f(state: State) = state match {
case NEW | WAITING => true
case RUNNABLE => false
// and I forget the rest
}
}
| yusuke2255/dotty | tests/untried/neg/sealed-java-enums.scala | Scala | bsd-3-clause | 215 |
package biz.gsconsulting.play.loadbalancer
import scala.collection.JavaConversions._
import play.api._
import biz.gsconsulting.play.util._
class LoadBalancerPlugin(app: Application) extends Plugin
with Logs{
private[this] var unsafeLoadBalancers: Option[Map[String, LoadBalancer]] = None
def balancers() = unsafeLoadBalancers getOrElse Map()
def configuration() = app.configuration.getConfig("loadbalancer")
override def enabled() = {
app.configuration.getString("loadbalancerplugin").filter(_ != "disabled").isDefined
}
override def onStart() = {
if (enabled) {
logger.info("Load Balancer Plugin Enabled.")
unsafeLoadBalancers = for {
config <- configuration
balancers <- config.getConfig("balancers")
names = balancers.subKeys
} yield names.foldLeft(Map[String, LoadBalancer]()) {
(acc: Map[String, LoadBalancer], name: String) =>
logger.info(s"Creating load balancer '$name'")
acc.updated(name, LoadBalancer.fromConfiguration(name, config))
}
}
}
}
| gregsymons/play-loadbalancer | app/biz/gsconsulting/play/loadbalancer/LoadBalancerPlugin.scala | Scala | apache-2.0 | 1,066 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.controller.test.migration
import scala.Vector
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import common.TestHelpers
import common.WskTestHelpers
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.model.StatusCodes.OK
import akka.http.scaladsl.marshallers.sprayjson.SprayJsonSupport._
import spray.json.DefaultJsonProtocol._
import spray.json._
import whisk.core.controller.WhiskActionsApi
import whisk.core.controller.test.ControllerTestCommon
import whisk.core.controller.test.WhiskAuthHelpers
import whisk.core.entity._
/**
* Tests migration of a new implementation of sequences: old style sequences can be updated and retrieved - standalone tests
*/
@RunWith(classOf[JUnitRunner])
class SequenceActionApiMigrationTests extends ControllerTestCommon
with WhiskActionsApi
with TestHelpers
with WskTestHelpers {
behavior of "Sequence Action API Migration"
val creds = WhiskAuthHelpers.newIdentity()
val namespace = EntityPath(creds.subject.asString)
val collectionPath = s"/${EntityPath.DEFAULT}/${collection.path}"
def aname = MakeName.next("seq_migration_tests")
private def seqParameters(seq: Vector[String]) = Parameters("_actions", seq.toJson)
it should "list old-style sequence action with explicit namespace" in {
implicit val tid = transid()
val components = Vector("/_/a", "/_/x/b", "/n/a", "/n/x/c").map(stringToFullyQualifiedName(_))
val actions = (1 to 2).map { i =>
WhiskAction(namespace, aname, sequence(components))
}.toList
actions foreach { put(entityStore, _) }
waitOnView(entityStore, WhiskAction, namespace, 2)
Get(s"/$namespace/${collection.path}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[List[JsObject]]
actions.length should be(response.length)
actions forall { a => response contains a.summaryAsJson } should be(true)
}
}
it should "get old-style sequence action by name in default namespace" in {
implicit val tid = transid()
val components = Vector("/_/a", "/_/x/b", "/n/a", "/n/x/c").map(stringToFullyQualifiedName(_))
val action = WhiskAction(namespace, aname, sequence(components))
put(entityStore, action)
Get(s"$collectionPath/${action.name}") ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response should be(action)
}
}
// this test is a repeat from ActionsApiTest BUT with old style sequence
it should "preserve new parameters when changing old-style sequence action to non sequence" in {
implicit val tid = transid()
val components = Vector("/_/a", "/_/x/b", "/n/a", "/n/x/c")
val seqComponents = components.map(stringToFullyQualifiedName(_))
val action = WhiskAction(namespace, aname, sequence(seqComponents), seqParameters(components))
val content = WhiskActionPut(Some(jsDefault("")), parameters = Some(Parameters("a", "A")))
put(entityStore, action, false)
// create an action sequence
Put(s"$collectionPath/${action.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
deleteAction(action.docid)
status should be(OK)
val response = responseAs[WhiskAction]
response.exec.kind should be(NODEJS6)
response.parameters should be(Parameters("a", "A"))
}
}
// this test is a repeat from ActionsApiTest BUT with old style sequence
it should "reset parameters when changing old-style sequence action to non sequence" in {
implicit val tid = transid()
val components = Vector("/_/a", "/_/x/b", "/n/a", "/n/x/c")
val seqComponents = components.map(stringToFullyQualifiedName(_))
val action = WhiskAction(namespace, aname, sequence(seqComponents), seqParameters(components))
val content = WhiskActionPut(Some(jsDefault("")))
put(entityStore, action, false)
// create an action sequence
Put(s"$collectionPath/${action.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
deleteAction(action.docid)
status should be(OK)
val response = responseAs[WhiskAction]
response.exec.kind should be(NODEJS6)
response.parameters shouldBe Parameters()
}
}
it should "update old-style sequence action with new annotations" in {
implicit val tid = transid()
val components = Vector("/_/a", "/_/x/b", "/n/a", "/n/x/c")
val seqComponents = components.map(stringToFullyQualifiedName(_))
val action = WhiskAction(namespace, aname, sequence(seqComponents))
val content = """{"annotations":[{"key":"old","value":"new"}]}""".parseJson.asJsObject
put(entityStore, action, false)
// create an action sequence
Put(s"$collectionPath/${action.name}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
deleteAction(action.docid)
status should be(OK)
val response = responseAs[String]
// contains the action
components map {c => response should include(c)}
// contains the annotations
response should include("old")
response should include("new")
}
}
it should "update an old-style sequence with new sequence" in {
implicit val tid = transid()
// old sequence
val seqName = EntityName(s"${aname}_new")
val oldComponents = Vector("/_/a", "/_/x/b", "/n/a", "/n/x/c").map(stringToFullyQualifiedName(_))
val oldSequence = WhiskAction(namespace, seqName, sequence(oldComponents))
put(entityStore, oldSequence)
// new sequence
val limit = 5 // count of bogus actions in sequence
val bogus = s"${aname}_bogus"
val bogusActionName = s"/_/${bogus}" // test that default namespace gets properly replaced
// put the action in the entity store so it exists
val bogusAction = WhiskAction(namespace, EntityName(bogus), jsDefault("??"), Parameters("x", "y"))
put(entityStore, bogusAction)
val seqComponents = for (i <- 1 to limit) yield stringToFullyQualifiedName(bogusActionName)
val seqAction = WhiskAction(namespace, seqName, sequence(seqComponents.toVector))
val content = WhiskActionPut(Some(seqAction.exec), Some(Parameters()))
// update an action sequence
Put(s"$collectionPath/${seqName}?overwrite=true", content) ~> Route.seal(routes(creds)) ~> check {
status should be(OK)
val response = responseAs[WhiskAction]
response.exec.kind should be(Exec.SEQUENCE)
response.limits should be(seqAction.limits)
response.publish should be(seqAction.publish)
response.version should be(seqAction.version.upPatch)
}
}
}
| prccaraujo/openwhisk | tests/src/test/scala/whisk/core/controller/test/migration/SequenceActionApiMigrationTests.scala | Scala | apache-2.0 | 7,894 |
/*
Copyright 2020 Coursera Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.coursera.courier.templates
import org.junit.Test
import java.net.URL
import java.io.IOException
import java.nio.ByteBuffer
import com.linkedin.data.DataMap
import com.linkedin.data.template.DataTemplateUtil
import com.linkedin.data.schema.EnumDataSchema
import sun.misc.URLClassPath
class EnumTestBridge {
def value = EnumTemplateTest.VALUE.name
def withName = EnumTemplateTest.withName("VALUE").name
}
class EnumTemplateRaceTest() {
/**
* Make a separate class loader for a test, and load [[EnumTestBridge]].
* The bug occurs while loading classes, hence the need for a class loader per test.
* @return The [[EnumTestBridge]] class in the new class loader.
*/
def createForeignClazz(): Class[_] = {
val systemClassLoader = this.getClass.getClassLoader()
val classLoader = new ClassLoader(systemClassLoader.getParent) {
override def findClass(name: String): Class[_] = {
val path = name.replace('.', '/').concat(".class")
val resOpt = systemClassLoader.getResourceAsStream(path)
Option(resOpt) match {
case Some(res) =>
try {
val bytes = Stream.continually(res.read).takeWhile(_ != -1).map(_.toByte).toArray
defineClass(name, ByteBuffer.wrap(bytes), null)
} catch {
case e: IOException => throw new ClassNotFoundException(name, e)
}
case None => throw new ClassNotFoundException(name)
}
}
override def loadClass(name: String, resolve: Boolean) =
super.loadClass(name, true)
}
classLoader.loadClass(new EnumTestBridge().getClass.getName)
}
class TestSetup() extends Race {
private val bridgeClazz = createForeignClazz()
def method(name: String) = bridgeClazz.getMethod(name)
private val foreignObject = bridgeClazz.getConstructor().newInstance()
def value(): String = s"""${method("value").invoke(foreignObject)}"""
def withName(): String = s"""${method("withName").invoke(foreignObject)}"""
}
@Test
def raceValueValue(): Unit = {
val testSetup = new TestSetup()
assert(testSetup.race(testSetup.value(), testSetup.value()))
}
@Test
def raceValueWithName(): Unit = {
val testSetup = new TestSetup()
assert(testSetup.race(testSetup.value(), testSetup.withName()))
}
@Test
def raceWithNameWithName(): Unit = {
val testSetup = new TestSetup()
assert(testSetup.race(testSetup.withName(), testSetup.withName()))
}
}
/* The remaining test classes and objects resemble those created
by Courier.
*/
sealed abstract class EnumTemplateTest(name: String,
properties: Option[DataMap])
extends ScalaEnumTemplateSymbol(name, properties) {}
object EnumTemplateTest extends ScalaEnumTemplate[EnumTemplateTest] {
case object VALUE extends EnumTemplateTest("VALUE", properties("VALUE"))
val SCHEMA =
DataTemplateUtil.parseSchema("""
{ "type": "enum",
"name": "EnumTypeB",
"namespace": "org.coursera.courier.templates",
"symbols": ["VALUE"]
}""").asInstanceOf[EnumDataSchema]
override def withName(s: String): EnumTemplateTest = {
symbols.find(_.toString == s).get
}
}
| coursera/courier | scala/runtime/src/test/scala/org/coursera/courier/templates/EnumTemplateRaceTest.scala | Scala | apache-2.0 | 3,807 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution
import org.apache.spark.{broadcast, TaskContext}
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.plans.physical.Partitioning
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.aggregate.HashAggregateExec
import org.apache.spark.sql.execution.joins.{BroadcastHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* An interface for those physical operators that support codegen.
*/
trait CodegenSupport extends SparkPlan {
/** Prefix used in the current operator's variable names. */
private def variablePrefix: String = this match {
case _: HashAggregateExec => "agg"
case _: BroadcastHashJoinExec => "bhj"
case _: SortMergeJoinExec => "smj"
case _: RDDScanExec => "rdd"
case _: DataSourceScanExec => "scan"
case _ => nodeName.toLowerCase
}
/**
* Creates a metric using the specified name.
*
* @return name of the variable representing the metric
*/
def metricTerm(ctx: CodegenContext, name: String): String = {
ctx.addReferenceObj(name, longMetric(name))
}
/**
* Whether this SparkPlan support whole stage codegen or not.
*/
def supportCodegen: Boolean = true
/**
* Which SparkPlan is calling produce() of this one. It's itself for the first SparkPlan.
*/
protected var parent: CodegenSupport = null
/**
* Returns all the RDDs of InternalRow which generates the input rows.
*
* Note: right now we support up to two RDDs.
*/
def inputRDDs(): Seq[RDD[InternalRow]]
/**
* Returns Java source code to process the rows from input RDD.
*/
final def produce(ctx: CodegenContext, parent: CodegenSupport): String = executeQuery {
this.parent = parent
ctx.freshNamePrefix = variablePrefix
s"""
|${ctx.registerComment(s"PRODUCE: ${this.simpleString}")}
|${doProduce(ctx)}
""".stripMargin
}
/**
* Generate the Java source code to process, should be overridden by subclass to support codegen.
*
* doProduce() usually generate the framework, for example, aggregation could generate this:
*
* if (!initialized) {
* # create a hash map, then build the aggregation hash map
* # call child.produce()
* initialized = true;
* }
* while (hashmap.hasNext()) {
* row = hashmap.next();
* # build the aggregation results
* # create variables for results
* # call consume(), which will call parent.doConsume()
* if (shouldStop()) return;
* }
*/
protected def doProduce(ctx: CodegenContext): String
/**
* Consume the generated columns or row from current SparkPlan, call its parent's `doConsume()`.
*/
final def consume(ctx: CodegenContext, outputVars: Seq[ExprCode], row: String = null): String = {
val inputVars =
if (row != null) {
ctx.currentVars = null
ctx.INPUT_ROW = row
output.zipWithIndex.map { case (attr, i) =>
BoundReference(i, attr.dataType, attr.nullable).genCode(ctx)
}
} else {
assert(outputVars != null)
assert(outputVars.length == output.length)
// outputVars will be used to generate the code for UnsafeRow, so we should copy them
outputVars.map(_.copy())
}
val rowVar = if (row != null) {
ExprCode("", "false", row)
} else {
if (outputVars.nonEmpty) {
val colExprs = output.zipWithIndex.map { case (attr, i) =>
BoundReference(i, attr.dataType, attr.nullable)
}
val evaluateInputs = evaluateVariables(outputVars)
// generate the code to create a UnsafeRow
ctx.INPUT_ROW = row
ctx.currentVars = outputVars
val ev = GenerateUnsafeProjection.createCode(ctx, colExprs, false)
val code = s"""
|$evaluateInputs
|${ev.code.trim}
""".stripMargin.trim
ExprCode(code, "false", ev.value)
} else {
// There is no columns
ExprCode("", "false", "unsafeRow")
}
}
ctx.freshNamePrefix = parent.variablePrefix
val evaluated = evaluateRequiredVariables(output, inputVars, parent.usedInputs)
s"""
|${ctx.registerComment(s"CONSUME: ${parent.simpleString}")}
|$evaluated
|${parent.doConsume(ctx, inputVars, rowVar)}
""".stripMargin
}
/**
* Returns source code to evaluate all the variables, and clear the code of them, to prevent
* them to be evaluated twice.
*/
protected def evaluateVariables(variables: Seq[ExprCode]): String = {
val evaluate = variables.filter(_.code != "").map(_.code.trim).mkString("\\n")
variables.foreach(_.code = "")
evaluate
}
/**
* Returns source code to evaluate the variables for required attributes, and clear the code
* of evaluated variables, to prevent them to be evaluated twice.
*/
protected def evaluateRequiredVariables(
attributes: Seq[Attribute],
variables: Seq[ExprCode],
required: AttributeSet): String = {
val evaluateVars = new StringBuilder
variables.zipWithIndex.foreach { case (ev, i) =>
if (ev.code != "" && required.contains(attributes(i))) {
evaluateVars.append(ev.code.trim + "\\n")
ev.code = ""
}
}
evaluateVars.toString()
}
/**
* The subset of inputSet those should be evaluated before this plan.
*
* We will use this to insert some code to access those columns that are actually used by current
* plan before calling doConsume().
*/
def usedInputs: AttributeSet = references
/**
* Generate the Java source code to process the rows from child SparkPlan.
*
* This should be override by subclass to support codegen.
*
* For example, Filter will generate the code like this:
*
* # code to evaluate the predicate expression, result is isNull1 and value2
* if (isNull1 || !value2) continue;
* # call consume(), which will call parent.doConsume()
*
* Note: A plan can either consume the rows as UnsafeRow (row), or a list of variables (input).
*/
def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = {
throw new UnsupportedOperationException
}
}
/**
* InputAdapter is used to hide a SparkPlan from a subtree that support codegen.
*
* This is the leaf node of a tree with WholeStageCodegen that is used to generate code
* that consumes an RDD iterator of InternalRow.
*/
case class InputAdapter(child: SparkPlan) extends UnaryExecNode with CodegenSupport {
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override def doExecute(): RDD[InternalRow] = {
child.execute()
}
override def doExecuteBroadcast[T](): broadcast.Broadcast[T] = {
child.doExecuteBroadcast()
}
override def inputRDDs(): Seq[RDD[InternalRow]] = {
child.execute() :: Nil
}
override def doProduce(ctx: CodegenContext): String = {
val input = ctx.freshName("input")
// Right now, InputAdapter is only used when there is one input RDD.
ctx.addMutableState("scala.collection.Iterator", input, s"$input = inputs[0];")
val row = ctx.freshName("row")
s"""
| while ($input.hasNext()) {
| InternalRow $row = (InternalRow) $input.next();
| ${consume(ctx, null, row).trim}
| if (shouldStop()) return;
| }
""".stripMargin
}
override def generateTreeString(
depth: Int,
lastChildren: Seq[Boolean],
builder: StringBuilder,
verbose: Boolean,
prefix: String = ""): StringBuilder = {
child.generateTreeString(depth, lastChildren, builder, verbose, "")
}
}
object WholeStageCodegenExec {
val PIPELINE_DURATION_METRIC = "duration"
}
/**
* WholeStageCodegen compile a subtree of plans that support codegen together into single Java
* function.
*
* Here is the call graph of to generate Java source (plan A support codegen, but plan B does not):
*
* WholeStageCodegen Plan A FakeInput Plan B
* =========================================================================
*
* -> execute()
* |
* doExecute() ---------> inputRDDs() -------> inputRDDs() ------> execute()
* |
* +-----------------> produce()
* |
* doProduce() -------> produce()
* |
* doProduce()
* |
* doConsume() <--------- consume()
* |
* doConsume() <-------- consume()
*
* SparkPlan A should override doProduce() and doConsume().
*
* doCodeGen() will create a CodeGenContext, which will hold a list of variables for input,
* used to generated code for BoundReference.
*/
case class WholeStageCodegenExec(child: SparkPlan) extends UnaryExecNode with CodegenSupport {
override def output: Seq[Attribute] = child.output
override def outputPartitioning: Partitioning = child.outputPartitioning
override def outputOrdering: Seq[SortOrder] = child.outputOrdering
override lazy val metrics = Map(
"pipelineTime" -> SQLMetrics.createTimingMetric(sparkContext,
WholeStageCodegenExec.PIPELINE_DURATION_METRIC))
/**
* Generates code for this subtree.
*
* @return the tuple of the codegen context and the actual generated source.
*/
def doCodeGen(): (CodegenContext, CodeAndComment) = {
val ctx = new CodegenContext
val code = child.asInstanceOf[CodegenSupport].produce(ctx, this)
val source = s"""
public Object generate(Object[] references) {
return new GeneratedIterator(references);
}
${ctx.registerComment(s"""Codegend pipeline for\\n${child.treeString.trim}""")}
final class GeneratedIterator extends org.apache.spark.sql.execution.BufferedRowIterator {
private Object[] references;
private scala.collection.Iterator[] inputs;
${ctx.declareMutableStates()}
public GeneratedIterator(Object[] references) {
this.references = references;
}
public void init(int index, scala.collection.Iterator[] inputs) {
partitionIndex = index;
this.inputs = inputs;
${ctx.initMutableStates()}
${ctx.initPartition()}
}
${ctx.declareAddedFunctions()}
protected void processNext() throws java.io.IOException {
${code.trim}
}
}
""".trim
// try to compile, helpful for debug
val cleanedSource = CodeFormatter.stripOverlappingComments(
new CodeAndComment(CodeFormatter.stripExtraNewLines(source), ctx.getPlaceHolderToComments()))
logDebug(s"\\n${CodeFormatter.format(cleanedSource)}")
(ctx, cleanedSource)
}
override def doExecute(): RDD[InternalRow] = {
val (ctx, cleanedSource) = doCodeGen()
// try to compile and fallback if it failed
try {
CodeGenerator.compile(cleanedSource)
} catch {
case e: Exception if !Utils.isTesting && sqlContext.conf.wholeStageFallback =>
// We should already saw the error message
logWarning(s"Whole-stage codegen disabled for this plan:\\n $treeString")
return child.execute()
}
val references = ctx.references.toArray
val durationMs = longMetric("pipelineTime")
val rdds = child.asInstanceOf[CodegenSupport].inputRDDs()
assert(rdds.size <= 2, "Up to two input RDDs can be supported")
if (rdds.length == 1) {
rdds.head.mapPartitionsWithIndex { (index, iter) =>
val clazz = CodeGenerator.compile(cleanedSource)
val buffer = clazz.generate(references).asInstanceOf[BufferedRowIterator]
buffer.init(index, Array(iter))
new Iterator[InternalRow] {
override def hasNext: Boolean = {
val v = buffer.hasNext
if (!v) durationMs += buffer.durationMs()
v
}
override def next: InternalRow = buffer.next()
}
}
} else {
// Right now, we support up to two input RDDs.
rdds.head.zipPartitions(rdds(1)) { (leftIter, rightIter) =>
Iterator((leftIter, rightIter))
// a small hack to obtain the correct partition index
}.mapPartitionsWithIndex { (index, zippedIter) =>
val (leftIter, rightIter) = zippedIter.next()
val clazz = CodeGenerator.compile(cleanedSource)
val buffer = clazz.generate(references).asInstanceOf[BufferedRowIterator]
buffer.init(index, Array(leftIter, rightIter))
new Iterator[InternalRow] {
override def hasNext: Boolean = {
val v = buffer.hasNext
if (!v) durationMs += buffer.durationMs()
v
}
override def next: InternalRow = buffer.next()
}
}
}
}
override def inputRDDs(): Seq[RDD[InternalRow]] = {
throw new UnsupportedOperationException
}
override def doProduce(ctx: CodegenContext): String = {
throw new UnsupportedOperationException
}
override def doConsume(ctx: CodegenContext, input: Seq[ExprCode], row: ExprCode): String = {
val doCopy = if (ctx.copyResult) {
".copy()"
} else {
""
}
s"""
|${row.code}
|append(${row.value}$doCopy);
""".stripMargin.trim
}
override def generateTreeString(
depth: Int,
lastChildren: Seq[Boolean],
builder: StringBuilder,
verbose: Boolean,
prefix: String = ""): StringBuilder = {
child.generateTreeString(depth, lastChildren, builder, verbose, "*")
}
}
/**
* Find the chained plans that support codegen, collapse them together as WholeStageCodegen.
*/
case class CollapseCodegenStages(conf: SQLConf) extends Rule[SparkPlan] {
private def supportCodegen(e: Expression): Boolean = e match {
case e: LeafExpression => true
// CodegenFallback requires the input to be an InternalRow
case e: CodegenFallback => false
case _ => true
}
private def numOfNestedFields(dataType: DataType): Int = dataType match {
case dt: StructType => dt.fields.map(f => numOfNestedFields(f.dataType)).sum
case m: MapType => numOfNestedFields(m.keyType) + numOfNestedFields(m.valueType)
case a: ArrayType => numOfNestedFields(a.elementType)
case u: UserDefinedType[_] => numOfNestedFields(u.sqlType)
case _ => 1
}
private def supportCodegen(plan: SparkPlan): Boolean = plan match {
case plan: CodegenSupport if plan.supportCodegen =>
val willFallback = plan.expressions.exists(_.find(e => !supportCodegen(e)).isDefined)
// the generated code will be huge if there are too many columns
val hasTooManyOutputFields =
numOfNestedFields(plan.schema) > conf.wholeStageMaxNumFields
val hasTooManyInputFields =
plan.children.map(p => numOfNestedFields(p.schema)).exists(_ > conf.wholeStageMaxNumFields)
!willFallback && !hasTooManyOutputFields && !hasTooManyInputFields
case _ => false
}
/**
* Inserts an InputAdapter on top of those that do not support codegen.
*/
private def insertInputAdapter(plan: SparkPlan): SparkPlan = plan match {
case j @ SortMergeJoinExec(_, _, _, _, left, right) if j.supportCodegen =>
// The children of SortMergeJoin should do codegen separately.
j.copy(left = InputAdapter(insertWholeStageCodegen(left)),
right = InputAdapter(insertWholeStageCodegen(right)))
case p if !supportCodegen(p) =>
// collapse them recursively
InputAdapter(insertWholeStageCodegen(p))
case p =>
p.withNewChildren(p.children.map(insertInputAdapter))
}
/**
* Inserts a WholeStageCodegen on top of those that support codegen.
*/
private def insertWholeStageCodegen(plan: SparkPlan): SparkPlan = plan match {
// For operators that will output domain object, do not insert WholeStageCodegen for it as
// domain object can not be written into unsafe row.
case plan if plan.output.length == 1 && plan.output.head.dataType.isInstanceOf[ObjectType] =>
plan.withNewChildren(plan.children.map(insertWholeStageCodegen))
case plan: CodegenSupport if supportCodegen(plan) =>
WholeStageCodegenExec(insertInputAdapter(plan))
case other =>
other.withNewChildren(other.children.map(insertWholeStageCodegen))
}
def apply(plan: SparkPlan): SparkPlan = {
if (conf.wholeStageEnabled) {
insertWholeStageCodegen(plan)
} else {
plan
}
}
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/core/src/main/scala/org/apache/spark/sql/execution/WholeStageCodegenExec.scala | Scala | apache-2.0 | 17,744 |
package net.fwbrasil.bond
import scala.reflect.macros.whitebox.Context
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import net.fwbrasil.smirror.SInstanceMethod
import net.fwbrasil.smirror.runtimeMirror
import net.fwbrasil.smirror.sClassOf
object Macros {
private val classLoader = getClass.getClassLoader
private implicit lazy val mirror = runtimeMirror(classLoader)
def lift[T, U, M](c: Context)(value: c.Expr[U])(
implicit t: c.WeakTypeTag[T], u: c.WeakTypeTag[U], m: c.WeakTypeTag[M]) = {
import c.universe._
c.inferImplicitView(value.tree, value.tree.tpe, t.tpe)
isValid[U, M](c) match {
case Success(true) =>
// ok
case Success(false) =>
c.error(c.enclosingPosition,
s"The lifting of '${u.tpe}'' to '${m.tpe}' is not valid (Bond)")
case Failure(ex) =>
c.error(c.enclosingPosition,
s"'${u.tpe}' is not liftable to '${m.tpe}' - ${ex.getMessage} (Bond)")
}
q"""
$value.asInstanceOf[${value.actualType} with $m]
"""
}
private def isValid[U, M](c: Context)(implicit u: c.WeakTypeTag[U], m: c.WeakTypeTag[M]) =
for {
origin <- extractConstantTypeArg(c)(u.tpe.baseType(m.tpe.baseClasses.head))
target <- extractConstantTypeArg(c)(m.tpe)
valid <- tryFastJavaReflection[U, M](c)(origin, target)
} yield {
valid
}
private def tryFastJavaReflection[U, M](c: Context)(
origin: c.universe.Constant, target: c.universe.Constant)(
implicit u: c.WeakTypeTag[U], m: c.WeakTypeTag[M]) = {
import c.universe._
for {
lift <- findLiftMethod(c)(weakTypeTag[M].tpe)
valid <- validateLift(lift, origin.value, target.value)
} yield {
valid
}
}
private def extractConstantTypeArg(c: Context)(tpe: c.Type) =
Try {
import c.universe._
tpe.typeArgs match {
case List(ConstantType(c @ Constant(value))) =>
c
case _ =>
throw new IllegalStateException(
s"Expected a single constant type arg, but got ${tpe.typeArgs} for $tpe")
}
}
private def findLiftMethod(c: Context)(tpe: c.Type) =
Try {
val cls = classLoader.loadClass(tpe.baseClasses.head.fullName)
val Some(method) = sClassOf(cls).companionObjectOption.flatMap(_.methods.find(_.name == "lift"))
method
}
private def validateLift(method: SInstanceMethod[_], origin: Any, target: Any) =
Try {
method.invoke(origin, target).asInstanceOf[Boolean]
}
} | fwbrasil/bond | src/main/scala/net/fwbrasil/bond/AMacros.scala | Scala | lgpl-2.1 | 2,526 |
package inputdata
import org.apache.spark.mllib.recommendation.Rating
import org.apache.spark.rdd.RDD
/**
* Created by Ondra Fiedler on 8.8.14.
*/
/**
* Basis for DataHolders of Netflix data (http://www.netflixprize.com/)
* @param dataDirectoryPath Directory containing the Netflix data
*/
abstract class NetflixDataHolder(dataDirectoryPath: String) extends DataHolder {
protected val productsIDsToNameMap = loadIDsToProductnameMapFromADirectory()
/**
* From file "movie_titles.txt" loads mapping from movies IDs to titles
* @return Map: movieID -> title
*/
protected def loadIDsToProductnameMapFromADirectory(): Map[Int, String] = {
val sc = spark.sparkEnvironment.sc
val movies = sc.textFile(dataDirectoryPath + "/movie_titles.txt").map { line =>
val fields = line.split(",")
// format: (movieID, movieName)
(fields(0).toInt, fields(2) + " (" + fields(1) + ")")
}.collect.toMap
movies
}
}
/**
* Loads Netflix data from one file. Each line has format: movieID>,userID,rating,date.
* @param dataDirectoryPath Directory containing the Netflix data
* @param filename Short filename of file with ratings (dafault: "ratings.txt")
*/
class NetflixInOneFileDataHolder(dataDirectoryPath: String, filename: String = "ratings.txt") extends NetflixDataHolder(dataDirectoryPath) with Serializable {
protected val ratings = {
val sc = spark.sparkEnvironment.sc
val ratingsRDD = sc.textFile(dataDirectoryPath + "/" + filename).map {
line => val fields = line.split(",")
(Rating(fields(1).toInt, fields(0).toInt, fields(2).toDouble))
}
ratingsRDD
}
}
/**
* Loads Netflix data from the original files.
* @param dataDirectoryPath Directory containing the Netflix data. This directory has to contain a sub-directory "training_set" with ratings files.
*/
class NetflixInManyFilesDataHolder(dataDirectoryPath: String) extends NetflixDataHolder(dataDirectoryPath) with Serializable {
protected val ratings = loadRatingsFromADirectory()
protected def loadRatingsFromADirectory(): RDD[Rating] = {
val dir = new java.io.File(dataDirectoryPath).listFiles.filter(f => f.getName == "training_set")
if (dir.length != 1) throw new WrongInputDataException
val files = dir(0).listFiles
val ratingsRDDsArray = files.map { file => loadRatingsFromOneFile(file.getAbsolutePath)}
val ratings = spark.sparkEnvironment.sc.union(ratingsRDDsArray)
ratings.persist.coalesce(77)
}
protected def loadRatingsFromOneFile(absoluteFilePath: String): RDD[Rating] = {
val ratingsTxtRDD = spark.sparkEnvironment.sc.textFile(absoluteFilePath)
val movieIDLine = ratingsTxtRDD.first()
val movieID = movieIDLine.split(":")(0).toInt
val ratingsRDD = ratingsTxtRDD.map(line => if (line == movieIDLine) {
Rating(-1, -1, -1)
} else {
val fields = line.split(",")
(Rating(fields(0).toInt, movieID, fields(1).toDouble))
})
ratingsRDD.filter(rat => rat.user >= 0)
}
} | OndraFiedler/spark-recommender | src/main/scala/inputdata/NetflixDataHolder.scala | Scala | mit | 2,996 |
package test.scala
import org.specs.Specification
import org.specs.mock.Mockito
import org.mockito.Matchers._
import com.protose.resque._
import com.protose.resque.Machine._
import com.protose.resque.FancySeq._
import com.redis.Redis
import java.util.Date
object WorkerSpec extends Specification with Mockito {
val resque = mock[Resque]
val worker = new Worker(resque, List("someAwesomeQueue", "someOtherAwesomeQueue"))
val startKey = List("resque", "worker", worker.id, "started").join(":")
val job = mock[Job]
"it has a string representation" in {
val expectedId = hostname + ":" + pid + ":" + "someAwesomeQueue,someOtherAwesomeQueue"
worker.id must_== expectedId
}
"starting a worker" in {
worker.start
"registers the worker with resque" in {
resque.register(worker) was called
}
}
"stopping a worker" in {
worker.stop
"unregisters the worker" in {
resque.unregister(worker) was called
}
}
"working off the next job" in {
"when the job succeeds" in {
worker.work(job)
"performs the job" in {
job.perform was called
}
"notifies the job of success" in {
resque.success(job) was called
}
}
"when the job fails" in {
val exception = new NullPointerException("asdf")
job.perform throws exception
worker.work(job)
"it registers a failure" in {
resque.failure(job, exception) was called
}
}
}
}
// vim: set ts=4 sw=4 et:
| jamesgolick/scala-resque-worker | src/test/scala/WorkerSpec.scala | Scala | mit | 1,674 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.camel
package scala
package dsl
import org.apache.camel.model._
import org.apache.camel.processor.aggregate.AggregationStrategy
import org.apache.camel.scala.dsl.builder.RouteBuilder
import spi.Policy
import reflect.{ClassTag, classTag}
import java.util.Comparator
abstract class SAbstractDefinition[P <: ProcessorDefinition[_]] extends DSL with Wrapper[P] with Block {
val target: P
val unwrap = target
implicit val builder: RouteBuilder
implicit def predicateBuilder(predicate: Exchange => Any) = new ScalaPredicate(predicate)
implicit def expressionBuilder(expression: Exchange => Any) = new ScalaExpression(expression)
def apply(block: => Unit) = {
builder.build(this, block)
this
}
/**
* Helper method to return this Scala type instead of creating another wrapper type for the processor
*/
def wrap(block: => Unit): this.type = {
block
this
}
// EIPs
//-----------------------------------------------------------------
def aggregate(expression: Exchange => Any, strategy: AggregationStrategy) = SAggregateDefinition(target.aggregate(expression, strategy))
def as[Target](toType: Class[Target], charset: String = null) = wrap(target.convertBodyTo(toType, charset))
def attempt: STryDefinition = STryDefinition(target.doTry())
def bean(bean: Any) = bean match {
case cls: Class[_] => wrap(target.bean(cls))
case ref: String => wrap(target.bean(ref))
case obj: Any => wrap(target.bean(obj))
}
def choice = SChoiceDefinition(target.choice)
def convertBodyTo[Target](toType: Class[Target], charset: String = null) = wrap(target.convertBodyTo(toType, charset))
def delay(period: Period) = SDelayDefinition(target.delay(period.milliseconds))
def dynamicRouter(expression: Exchange => Any) = wrap(target.dynamicRouter(expression))
def enrich(uri: String, strategy: AggregationStrategy) = wrap(target.enrich(uri, strategy))
def enrich(uri: String, strategy: AggregationStrategy, aggregateOnException: Boolean) = wrap(target.enrich(uri, strategy, aggregateOnException))
def filter(predicate: Exchange => Any) = SFilterDefinition(target.filter(predicateBuilder(predicate)))
def handle[E <: Throwable : ClassTag](block: => Unit) = SOnExceptionDefinition[E](target.onException(classTag[E].runtimeClass.asInstanceOf[Class[E]])).apply(block)
def id(id : String) = wrap(target.id(id))
def idempotentConsumer(expression: Exchange => Any) = SIdempotentConsumerDefinition(target.idempotentConsumer(expression, null))
@Deprecated
def inOnly = wrap(target.inOnly)
@Deprecated
def inOut = wrap(target.inOut)
def loadbalance = SLoadBalanceDefinition(target.loadBalance)
def log(message: String) = wrap(target.log(message))
def log(level: LoggingLevel, message: String) = wrap(target.log(level, message))
def log(level: LoggingLevel, logName: String, message: String) = wrap(target.log(level, logName, message))
def log(level: LoggingLevel, logName: String, marker: String, message: String) = wrap(target.log(level, logName, marker, message))
def loop(expression: Exchange => Any) = SLoopDefinition(target.loop(expression))
def marshal(format: DataFormatDefinition) = wrap(target.marshal(format))
def marshal(dataFormatRef: String) = wrap(target.marshal(dataFormatRef))
def multicast = SMulticastDefinition(target.multicast)
def onCompletion: SOnCompletionDefinition = {
val completion = SOnCompletionDefinition(target.onCompletion)
// let's end the block in the Java DSL, we have a better way of handling blocks here
completion.target.end
completion
}
def onCompletion(predicate: Exchange => Boolean) = onCompletion.when(predicate).asInstanceOf[SOnCompletionDefinition]
def onCompletion(config: Config[SOnCompletionDefinition]) = {
config.configure(onCompletion)
onCompletion
}
def otherwise: SChoiceDefinition = throw new Exception("otherwise is only supported in a choice block or after a when statement")
def pipeline = SPipelineDefinition(target.pipeline)
def policy(policy: Policy) = SPolicyDefinition(target.policy(policy))
def pollEnrich(uri: String, strategy: AggregationStrategy = null, timeout: Long = -1) =
wrap(target.pollEnrich(uri, timeout, strategy))
def pollEnrich(uri: String, strategy: AggregationStrategy, timeout: Long, aggregateOnException: Boolean) =
wrap(target.pollEnrich(uri, timeout, strategy, aggregateOnException))
def process(function: Exchange => Unit) = wrap(target.process(new ScalaProcessor(function)))
def process(processor: Processor) = wrap(target.process(processor))
def recipients(expression: Exchange => Any) = wrap(target.recipientList(expression))
def resequence(expression: Exchange => Any) = SResequenceDefinition(target.resequence(expression))
def removeHeader(name : String) = wrap(target.removeHeader(name))
def removeHeaders(pattern: String) = wrap(target.removeHeaders(pattern))
def removeHeaders(pattern: String, excludePatterns: String*) = wrap(target.removeHeaders(pattern, excludePatterns:_*))
def removeProperty(name: String) = wrap(target.removeProperty(name))
def removeProperties(pattern: String) = wrap(target.removeProperties(pattern))
def removeProperties(pattern: String, excludePatterns: String*) = wrap(target.removeProperties(pattern, excludePatterns:_*))
def rollback = wrap(target.rollback)
def routeId(routeId: String) = wrap(target.routeId(routeId))
def routeDescription(description: String) = wrap(target.routeDescription(description))
@Deprecated
def routingSlip(header: String) = wrap(target.routingSlip(header))
@Deprecated
def routingSlip(header: String, separator: String) = wrap(target.routingSlip(header, separator))
def routingSlip(expression: Exchange => Any, separator: String) = wrap(target.routingSlip(expression, separator))
def routingSlip(expression: Exchange => Any) = wrap(target.routingSlip(expression))
def script(expression: Exchange => Any) = wrap(target.script(expression))
def setBody(expression: Exchange => Any) = wrap(target.setBody(expression))
def setFaultBody(expression: Exchange => Any) = wrap(target.setFaultBody(expression))
def setHeader(name: String, expression: Exchange => Any) = wrap(target.setHeader(name, expression))
def setExchangePattern(mep: ExchangePattern) = wrap(target.setExchangePattern(mep))
def setProperty(name: String, expression: Exchange => Any) = wrap(target.setProperty(name, expression))
def sort[T](expression: (Exchange) => Any, comparator: Comparator[T] = null) = wrap(target.sort(expression, comparator))
def split(expression: Exchange => Any) = SSplitDefinition(target.split(expression))
def startupOrder(startupOrder :Int) = wrap(target.startupOrder(startupOrder))
def stop = wrap(target.stop)
def threads = SThreadsDefinition(target.threads)
def throttle(frequency: Frequency) = SThrottleDefinition(target.throttle(frequency.count).timePeriodMillis(frequency.period.milliseconds))
def throwException(exception: Exception) = wrap(target.throwException(exception))
def throwException(exceptionType: Class[_ <: Exception], message: String) = wrap(target.throwException(exceptionType, message))
def transacted = STransactedDefinition(target.transacted)
def transacted(ref: String) = STransactedDefinition(target.transacted(ref))
def transform(expression: Exchange => Any) = wrap(target.transform(expression))
def unmarshal(format: DataFormatDefinition) = wrap(target.unmarshal(format))
def unmarshal(dataFormatRef: String) = wrap(target.unmarshal(dataFormatRef))
def validate(expression: Exchange => Any) = SValidateDefinition(target.validate(predicateBuilder(expression)))
def when(filter: Exchange => Any): DSL with Block = SChoiceDefinition(target.choice).when(filter)
def wireTap(uri: String) = SWireTapDefinition(target.wireTap(uri))
def wireTap(uri: String, expression: Exchange => Any) = SWireTapDefinition(target.wireTap(uri).newExchangeBody(expression))
def -->(pattern: ExchangePattern, uri: String) = wrap(target.to(pattern, uri))
def -->(uris: String*) = to(uris:_*)
def to(pattern: ExchangePattern, uri: String) = wrap(target.to(pattern, uri))
def to(uris: String*) = {
uris.length match {
case 1 => target.to(uris(0))
case _ =>
val multi = multicast
uris.foreach(multi.to(_))
}
this
}
}
| grgrzybek/camel | components/camel-scala/src/main/scala/org/apache/camel/scala/dsl/SAbstractDefinition.scala | Scala | apache-2.0 | 9,164 |
package ml.wolfe.nlp.discourse
import ml.wolfe.nlp.CharOffsets
/**
* Created by matko on 3/20/15.
*/
case class DiscourseRelation(arg1: DiscourseArgument,
arg2: DiscourseArgument,
connective: DiscourseArgument,
id: String,
sense: List[String],
typ: String)
case class DiscourseArgument(text: String = "",
charOffsets: List[CharOffsets] = List.empty,
tokens: Seq[(Int, Int)] = Seq.empty) {
def charSpan:CharOffsets = CharOffsets(charOffsets.head.start,charOffsets.last.end + 1)
}
object DiscourseArgument {
val empty = DiscourseArgument()
}
| wolfe-pack/wolfe | wolfe-nlp/src/main/scala/ml/wolfe/nlp/discourse/Discourse.scala | Scala | apache-2.0 | 753 |
package epic.features
import breeze.linalg.Counter
import epic.framework.Feature
import epic.features.LongestFrequentSuffixFeaturizer.LongestFrequentSuffix
import breeze.numerics.I
/**
* TODO
*
* @author dlwh
**/
class LongestFrequentSuffixFeaturizer private (fixedMap: Map[String, Feature],
suffixCounts: Counter[String, Double], commonWordThreshold: Double) extends WordFeaturizer[String] with Serializable {
def anchor(w: IndexedSeq[String]): WordFeatureAnchoring[String] = new WordFeatureAnchoring[String] {
val feats = words.map(w => Array(fixedMap.getOrElse(w, LongestFrequentSuffix(lookup(w)))))
def featuresForWord(pos: Int): Array[Feature] = if (pos < 0 || pos >= w.length) Array(BeginSentFeature) else feats(pos)
def words: IndexedSeq[String] = w
}
def lookupSentence(sent: IndexedSeq[String]) = {
sent.map(w => fixedMap.getOrElse(w, LongestFrequentSuffix(lookup(w))) match {
case LongestFrequentSuffix(s) => "-" + s
case IndicatorFeature(w) => w
})
}
private def lookup(x: String): String = {
x.tails.find(suffixCounts(_) >= commonWordThreshold).getOrElse("-UNK-")
}
}
object LongestFrequentSuffixFeaturizer {
def apply(counts: Counter[String, Double], commonWordThreshold: Int = 100) = {
var suffixCounts = Counter[String, Double]()
for( (k, v) <- counts.iterator; if v <= commonWordThreshold; tail <- k.tails) {
suffixCounts(tail) += v
}
suffixCounts = suffixCounts.mapValues(v => v * I(v >= commonWordThreshold))
def lookup(x: String): String = {
x.tails.find(suffixCounts(_) >= commonWordThreshold).getOrElse("-UNK-")
}
val map = Map.empty ++ (for( (w,v) <- counts.iterator) yield {
if (v > commonWordThreshold)
w -> IndicatorFeature(w)
else
w -> LongestFrequentSuffix(lookup(w))
})
new LongestFrequentSuffixFeaturizer(map, suffixCounts, commonWordThreshold)
}
case class LongestFrequentSuffix(suffix: String) extends Feature
}
| langkilde/epic | src/main/scala/epic/features/LongestFrequentSuffixFeaturizer.scala | Scala | apache-2.0 | 2,036 |
package pt1.week4
import org.scalatest.{FunSuite, Matchers}
import pt1.week5.{BondPricing, ShortRateLattice}
class FuturesPricingSpec extends FunSuite with Matchers {
private val sharePriceLattice = SharePriceLattice.generate(initialPrice = 100.0, termInYears = 0.25, volatility = 0.3,
numberOfPeriods = 15, interestRate = 0.02, dividendYield = 0.01)
test("Price should be as expected for futures") {
val futuresPrice = FuturesPricing.calculateForShares(sharePriceLattice, termInYears = 0.25, volatility = 0.3,
numberOfPeriods = 15, interestRate = 0.02, dividendYield = 0.01)
futuresPrice shouldBe (100.25 +- 0.01)
}
test("Price should be as expected for futures on bond") {
val faceValue = 100.0d
val coupon = 0.1d
val shortRateLattice = ShortRateLattice.generate(0.06d, 1.25d, 0.9d, 6)
val bondPricingMatrix = BondPricing.calculatePricingMatrix(shortRateLattice, faceValue, coupon, maturity = 6, q = 0.5d, p = 0.5d)
val noCouponPricingMatrix = bondPricingMatrix - (faceValue * coupon)
val futuresPrice = FuturesPricing.calculate(noCouponPricingMatrix, 4, 0.5d, 0.5d)
futuresPrice shouldBe (103.22 +- 0.01)
}
test("Price should be as expected for futures on zero-coupon bond") {
val shortRateLattice = ShortRateLattice.generate(0.05d, 1.1d, 0.9d, 10)
val bondPricingMatrix = BondPricing.calculatePricingMatrix(shortRateLattice, faceValue = 100.0d, coupon = 0.0d, maturity = 10, q = 0.5d, p = 0.5d)
val futuresPrice = FuturesPricing.calculate(bondPricingMatrix, 4, 0.5d, 0.5d)
futuresPrice shouldBe (74.82 +- 0.01)
}
}
| ligasgr/fe-and-rm | src/test/scala/pt1/week4/FuturesPricingSpec.scala | Scala | apache-2.0 | 1,604 |
import java.net.ServerSocket
import scala.util.Random
object UniquePortGenerator {
private[this] val usingPorts = collection.mutable.HashSet.empty[Int]
def getOpt(): Option[Int] = synchronized {
@annotation.tailrec
def loop(loopCount: Int): Option[Int] = {
val socket = new ServerSocket(0)
val port = try {
socket.getLocalPort
} finally {
socket.close()
}
if (usingPorts(port)) {
if (loopCount == 0) {
None
} else {
Thread.sleep(Random.nextInt(50))
loop(loopCount - 1)
}
} else {
usingPorts += port
Option(port)
}
}
loop(30)
}
def get(): Int = getOpt().getOrElse(sys.error("could not get port"))
}
| eiennohito/ScalaPB | e2e/src/test/scala/UniquePortGenerator.scala | Scala | apache-2.0 | 755 |
/*
* The MIT License
*
* Copyright (c) 2019 Fulcrum Genomics LLC
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package com.fulcrumgenomics.vcf
import com.fulcrumgenomics.FgBioDef.{PathToSequenceDictionary, PathToVcf, SafelyClosable, javaIterableToIterator}
import com.fulcrumgenomics.cmdline.{ClpGroups, FgBioTool}
import com.fulcrumgenomics.commons.util.LazyLogging
import com.fulcrumgenomics.fasta.SequenceDictionary
import com.fulcrumgenomics.sopt.{arg, clp}
import com.fulcrumgenomics.util.{Io, ProgressLogger}
import htsjdk.variant.variantcontext.VariantContextBuilder
import htsjdk.variant.variantcontext.writer.{Options, VariantContextWriterBuilder}
import htsjdk.variant.vcf.{VCFFileReader, VCFHeader}
@clp(description =
"""
|Updates then contig names in a VCF.
|
|The name of each sequence must match one of the names (including aliases) in the given sequence dictionary. The
|new name will be the primary (non-alias) name in the sequence dictionary.
""",
group = ClpGroups.VcfOrBcf)
class UpdateVcfContigNames
(@arg(flag='i', doc="Input VCF.") val input: PathToVcf,
@arg(flag='d', doc="The path to the sequence dictionary with contig aliases.") val dict: PathToSequenceDictionary,
@arg(flag='o', doc="Output VCF.") val output: PathToVcf,
@arg(doc="Skip missing contigs.") val skipMissing: Boolean = false
) extends FgBioTool with LazyLogging {
Io.assertReadable(input)
Io.assertReadable(Seq(input, dict))
Io.assertCanWriteFile(output)
override def execute(): Unit = {
val dict = SequenceDictionary(this.dict)
val reader = new VCFFileReader(this.input)
val header = {
import com.fulcrumgenomics.fasta.Converters.ToSAMSequenceDictionary
val h: VCFHeader = new VCFHeader(reader.getFileHeader)
h.setSequenceDictionary(dict.asSam)
h
}
val writer = {
new VariantContextWriterBuilder()
.setOutputPath(this.output)
.setOption(Options.INDEX_ON_THE_FLY)
.build()
}
writer.writeHeader(header)
// go through all the records
val progress = ProgressLogger(logger, noun = "variants", verb = "written")
reader.foreach { v =>
dict.get(v.getContig) match {
case None =>
if (skipMissing) logger.warning(s"Did not find contig ${v.getContig} in the sequence dictionary.")
else throw new IllegalStateException(s"Did not find contig ${v.getContig} in the sequence dictionary.")
case Some(info) =>
val newV = new VariantContextBuilder(v).chr(info.name).make()
progress.record(newV.getContig, newV.getStart)
writer.add(newV)
}
}
progress.logLast()
reader.safelyClose()
writer.close()
}
}
| fulcrumgenomics/fgbio | src/main/scala/com/fulcrumgenomics/vcf/UpdateVcfContigNames.scala | Scala | mit | 3,743 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import java.io._
import java.nio._
import java.nio.channels.FileChannel
import java.nio.file.StandardOpenOption
/* This code tests the correct function of java's FileChannel.truncate--some platforms don't work. */
object TestTruncate {
def main(args: Array[String]): Unit = {
val name = File.createTempFile("kafka", ".test")
name.deleteOnExit()
val file = FileChannel.open(name.toPath, StandardOpenOption.READ, StandardOpenOption.WRITE)
val buffer = ByteBuffer.allocate(12)
buffer.putInt(4).putInt(4).putInt(4)
buffer.rewind()
file.write(buffer)
println("position prior to truncate: " + file.position)
file.truncate(4)
println("position after truncate to 4: " + file.position)
}
}
| KevinLiLu/kafka | core/src/test/scala/other/kafka/TestTruncate.scala | Scala | apache-2.0 | 1,549 |
package edu.rice.habanero.benchmarks.bndbuffer
import edu.rice.habanero.actors.{HabaneroActor, HabaneroDeclarativeSelector}
import edu.rice.habanero.benchmarks.bndbuffer.ProdConsBoundedBufferConfig._
import edu.rice.habanero.benchmarks.{Benchmark, BenchmarkRunner}
import edu.rice.hj.Module0.finish
import edu.rice.hj.api.HjSuspendable
import scala.collection.mutable.ListBuffer
/**
* @author <a href="http://shams.web.rice.edu/">Shams Imam</a> ([email protected])
*/
object ProdConsHabaneroDeclSelectorBenchmark {
def main(args: Array[String]) {
BenchmarkRunner.runBenchmark(args, new ProdConsHabaneroDeclSelectorBenchmark)
}
private final class ProdConsHabaneroDeclSelectorBenchmark extends Benchmark {
def initialize(args: Array[String]) {
ProdConsBoundedBufferConfig.parseArgs(args)
}
def printArgInfo() {
ProdConsBoundedBufferConfig.printArgs()
}
def runIteration() {
finish(new HjSuspendable {
override def run() = {
val manager = new ManagerActor(
ProdConsBoundedBufferConfig.bufferSize,
ProdConsBoundedBufferConfig.numProducers,
ProdConsBoundedBufferConfig.numConsumers,
ProdConsBoundedBufferConfig.numItemsPerProducer)
manager.start()
}
})
}
def cleanupIteration(lastIteration: Boolean, execTimeMillis: Double) {
}
private class ManagerActor(bufferSize: Int, numProducers: Int, numConsumers: Int, numItemsPerProducer: Int)
extends HabaneroDeclarativeSelector[AnyRef](MessageSource.values().length) {
private val adjustedBufferSize: Int = bufferSize - numProducers
private val pendingData = new ListBuffer[ProdConsBoundedBufferConfig.DataItemMessage]
private var numTerminatedProducers: Int = 0
private val producers = Array.tabulate[ProducerActor](numProducers)(i =>
new ProducerActor(i, this, numItemsPerProducer))
private val consumers = Array.tabulate[ConsumerActor](numConsumers)(i =>
new ConsumerActor(i, this))
// disable processing of request from consumers
disable(MessageSource.CONSUMER)
override def onPostStart() {
consumers.foreach(loopConsumer => {
loopConsumer.start()
})
producers.foreach(loopProducer => {
loopProducer.start()
loopProducer.send(ProduceDataMessage.ONLY)
})
}
override def onPreExit() {
consumers.foreach(loopConsumer => {
loopConsumer.send(ConsumerExitMessage.ONLY)
})
}
override def registerGuards(): Unit = {
guard(MessageSource.PRODUCER, (msg: AnyRef) => pendingData.size < adjustedBufferSize)
guard(MessageSource.CONSUMER, (msg: AnyRef) => pendingData.nonEmpty)
}
override def doProcess(theMsg: AnyRef) {
theMsg match {
case dm: ProdConsBoundedBufferConfig.DataItemMessage =>
val producer: ProducerActor = dm.producer.asInstanceOf[ProducerActor]
pendingData.append(dm)
producer.send(ProduceDataMessage.ONLY)
case cm: ProdConsBoundedBufferConfig.ConsumerAvailableMessage =>
val consumer: ConsumerActor = cm.consumer.asInstanceOf[ConsumerActor]
consumer.send(pendingData.remove(0))
tryExit()
case _: ProdConsBoundedBufferConfig.ProducerExitMessage =>
numTerminatedProducers += 1
tryExit()
case msg =>
val ex = new IllegalArgumentException("Unsupported message: " + msg)
ex.printStackTrace(System.err)
}
}
private def tryExit() {
if (numTerminatedProducers == numProducers && pendingData.isEmpty) {
exit()
}
}
}
private class ProducerActor(id: Int, manager: ManagerActor, numItemsToProduce: Int) extends HabaneroActor[AnyRef] {
private var prodItem: Double = 0.0
private var itemsProduced: Int = 0
private def produceData() {
prodItem = processItem(prodItem, prodCost)
manager.send(MessageSource.PRODUCER, new ProdConsBoundedBufferConfig.DataItemMessage(prodItem, this))
itemsProduced += 1
}
override def process(theMsg: AnyRef) {
if (theMsg.isInstanceOf[ProdConsBoundedBufferConfig.ProduceDataMessage]) {
if (itemsProduced == numItemsToProduce) {
exit()
} else {
produceData()
}
} else {
val ex = new IllegalArgumentException("Unsupported message: " + theMsg)
ex.printStackTrace(System.err)
}
}
override def onPreExit() {
manager.send(MessageSource.PRODUCER, ProducerExitMessage.ONLY)
}
}
private class ConsumerActor(id: Int, manager: ManagerActor) extends HabaneroActor[AnyRef] {
private val consumerAvailableMessage = new ProdConsBoundedBufferConfig.ConsumerAvailableMessage(this)
private var consItem: Double = 0
override def onPostStart() {
manager.send(MessageSource.CONSUMER, consumerAvailableMessage)
}
protected def consumeDataItem(dataToConsume: Double) {
consItem = processItem(consItem + dataToConsume, consCost)
}
override def process(theMsg: AnyRef) {
theMsg match {
case dm: ProdConsBoundedBufferConfig.DataItemMessage =>
consumeDataItem(dm.data)
manager.send(MessageSource.CONSUMER, consumerAvailableMessage)
case _: ProdConsBoundedBufferConfig.ConsumerExitMessage =>
exit()
case msg =>
val ex = new IllegalArgumentException("Unsupported message: " + msg)
ex.printStackTrace(System.err)
}
}
}
}
}
| shamsmahmood/savina | src/main/scala/edu/rice/habanero/benchmarks/bndbuffer/ProdConsHabaneroDeclSelectorBenchmark.scala | Scala | gpl-2.0 | 5,739 |
package org.kongo.kafka.metrics
import java.util.regex.Pattern
import com.yammer.metrics.core.Metric
import com.yammer.metrics.core.MetricName
import com.yammer.metrics.core.MetricPredicate
case class RegexMetricPredicate(include: Option[Pattern], exclude: Option[Pattern]) extends MetricPredicate {
def matches(metricName: String): Boolean = {
val includeMatch = include.forall(p => p.matcher(metricName).matches())
val notExcludeMatch = exclude.forall(p => !p.matcher(metricName).matches())
includeMatch && notExcludeMatch
}
override def matches(name: MetricName, metric: Metric): Boolean = {
val metricName = MetricFormatter.format(name)
matches(metricName)
}
}
| kongo2002/kafka-statsd-reporter | src/main/scala/org/kongo/kafka/metrics/RegexMetricPredicate.scala | Scala | apache-2.0 | 699 |
/*
* This file is part of P2pCore.
*
* Copyright (C) 2012 Timur Mehrvarz, timur.mehrvarz(at)gmail.com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation <http://www.gnu.org/licenses/>, either
* version 3 of the License, or (at your option) any later version.
*/
package timur.p2pCore
import java.security.{ Security, MessageDigest }
object P2pEncrypt {
def main(args:Array[String]): Unit = {
if(args.length<2) {
println("arg1: keyFolderPath")
println("arg2: remotePublicKeyName (san .pub)")
println("arg3: rendezvous-string (optional)")
return
}
new P2pEncrypt(args(0), args(1), if(args.length>2) args(2) else null).start
}
}
class P2pEncrypt(keyFolderPath:String, setRemoteKeyName:String, rendezvous:String) extends P2pBase {
var privKeyLocal:String = null
var pubKeyLocal:String = null
var pubKeyLocalFingerprint:String = null
var pubKeyRemote:String = null
var remoteKeyName = setRemoteKeyName
/**
*
*/
override def start() :Int = {
init
val ret = readkeys
if(ret!=0)
return ret
return super.start
}
/**
* prepare org.bouncycastle.crypto.encodings.PKCS1Encoding in RsaEncrypt/RsaDecrypt
*/
def init() {
Security.addProvider(new ext.org.bouncycastle.jce.provider.BouncyCastleProvider())
}
/**
* readkeys will
* try to load privKeyLocal and pubKeyLocal from keyFolderPath
* or: generate a new key pair and set privKeyLocal and pubKeyLocal
* @return 0 if there was no error
*/
def readkeys() :Int = {
if(remoteKeyName.length<=0)
return -1
try {
privKeyLocal = io.Source.fromFile(keyFolderPath+"/key").mkString
val fullLocalKeyName = keyFolderPath+"/key.pub"
log("fullLocalKeyName="+fullLocalKeyName+" used for fingerprint matching")
pubKeyLocal = io.Source.fromFile(fullLocalKeyName).mkString
} catch {
case ex:Exception =>
// generate local key pair
val keyPair = RsaKeyGenerate.rsaKeyGenerate
new java.io.File(keyFolderPath).mkdir
pubKeyLocal = Base64.encode(keyPair.getPublic.getEncoded)
Tools.writeToFile(keyFolderPath+"/key.pub", pubKeyLocal)
privKeyLocal = Base64.encode(keyPair.getPrivate.getEncoded)
Tools.writeToFile(keyFolderPath+"/key", privKeyLocal)
}
if(buildMatchStrings!=0)
return -2
return 0
}
/**
* buildMatchStrings will define matchSource and matchTarget
* based on: rendezvous string - or -
* based on: pubKeyLocalFingerprint and pubKeyRemoteFingerprint
*/
def buildMatchStrings() :Int = {
try {
// create pubKeyLocalFingerprint based on pubKeyLocal
val messageDigest = MessageDigest.getInstance("SHA-1")
messageDigest.update(Base64.decode(pubKeyLocal))
pubKeyLocalFingerprint = RsaEncrypt.getHexString(messageDigest.digest)
} catch {
case ex:Exception =>
logEx("fingerprint setup error ex="+ex)
return -1
}
if(rendezvous!=null && rendezvous.length>0) {
// build match strings based on the rendezvous string
matchSource = rendezvous
matchTarget = rendezvous
log("matching clients with rendezvous string '"+rendezvous+"'")
return 0
}
try {
// create match strings based on fingerprints of the two public keys
val fullRemoteKeyName = keyFolderPath +"/" +remoteKeyName+".pub"
log("fullRemoteKeyName="+fullRemoteKeyName+" used for fingerprint matching")
pubKeyRemote = io.Source.fromFile(fullRemoteKeyName).mkString
val messageDigest = MessageDigest.getInstance("SHA-1")
messageDigest.update(Base64.decode(pubKeyRemote))
val pubKeyRemoteFingerprint = RsaEncrypt.getHexString(messageDigest.digest)
matchSource = pubKeyLocalFingerprint.substring(0,20)
matchTarget = pubKeyRemoteFingerprint.substring(0,20)
} catch {
case ex:Exception =>
logEx("fingerprint setup error ex="+ex)
return -2
}
return 0
}
/**
* p2pSendThread is called when a p2p connection was established
* if relayBasedP2pCommunication is set, p2p is relayed; else it is direct
* if pubKeyRemote is not known, a key fingerprint will be requested using "//requestPubKeyFingerprint"
* processing will then be forward to p2pEncryptedCommunication
*/
override def p2pSendThread() {
if(pubKeyRemote!=null) {
// remote public key is known
log("p2pSendThread -> p2pEncryptedCommunication...")
p2pEncryptedCommunication
} else {
// remote public key is NOT known, request public key fingerprint, so we can check if we have the key stored already
log("p2pSendThread requestPubKeyFingerprint...")
p2pSend("//requestPubKeyFingerprint", udpConnectIpAddr,udpConnectPortInt) // unencrypted request for pubkey fingerprint
// p2pEncryptedCommunication will be called, as soon as we receive "//pubKeyFingerprint=..." in p2pReceiveHandler
}
}
/**
* p2pReceiveMultiplexHandler will be called with an encoded P2pCore.Message
* will decrypt MsgString (if nesessary) and
* if there was no error, hand over the decrypted string from the other client to p2pReceivePreHandler
*/
override def p2pReceiveMultiplexHandler(protoMultiplex:P2pCore.Message) {
val command = protoMultiplex.getCommand
if(command=="string") {
super.p2pReceiveMultiplexHandler(protoMultiplex)
} else if(command=="rsastr") {
val len = protoMultiplex.getMsgLength.asInstanceOf[Int]
val receivedString = protoMultiplex.getMsgString
//val id = protoMultiplex.getMsgId
try {
// possible exception: ext.org.bouncycastle.crypto.InvalidCipherTextException: unknown block type
// possible exception: ext.org.bouncycastle.crypto.DataLengthException: input too large for RSA cipher
//log("p2pReceiveMultiplexHandler: crypted="+receivedString+" len="+receivedString.length)
val decryptString = RsaDecrypt.decrypt(privKeyLocal, receivedString)
p2pReceivePreHandler(decryptString) // -> p2pReceiveHandler
} catch {
case ex:Exception =>
logEx("p2pReceiveMultiplexHandler "+ex.getMessage)
ex.printStackTrace
}
}
}
/**
* p2pReceivePreHandler is called as soon as p2p data was encrypted
* will process special commands, such as "//requestPubKeyFingerprint", "//pubKeyFingerprint=...", "//check", "//ack", "//quit"
*/
override def p2pReceivePreHandler(str:String) {
if(str=="//requestPubKeyFingerprint") {
log("p2pReceivePreHandler: sending fingerprint of our pubkey on request="+pubKeyLocalFingerprint)
p2pSend("//pubKeyFingerprint="+pubKeyLocalFingerprint, udpConnectIpAddr, udpConnectPortInt)
} else if(str.startsWith("//pubKeyFingerprint=")) {
val remoteKeyFingerprint = str.substring(19)
log("p2pReceivePreHandler: remoteKeyFingerprint="+remoteKeyFingerprint)
// search all stored pub keys for a match to remoteKeyFingerprint
pubKeyRemote = null
val fileArray = new java.io.File(keyFolderPath).listFiles
for(file <- fileArray.iterator.toList) {
if(pubKeyRemote==null) {
val fileName = file.getName.trim
if(fileName.length>4 && fileName.endsWith(".pub") && fileName!="key.pub") {
val key = io.Source.fromFile(keyFolderPath+"/"+fileName).mkString
val messageDigest = MessageDigest.getInstance("SHA-1")
messageDigest.update(Base64.decode(key))
val fingerprint = RsaEncrypt.getHexString(messageDigest.digest)
if(fingerprint==remoteKeyFingerprint) {
log("p2pReceivePreHandler: found stored pubKeyRemote in file "+fileName)
pubKeyRemote = key
}
}
}
}
if(pubKeyRemote==null) {
log("p2pReceivePreHandler: not found stored pubKeyRemote - abort session")
p2pQuitFlag = true
p2pQuit(true)
relayReceiveEncryptionFailed(remoteKeyFingerprint)
return
}
log("p2pReceivePreHandler -> p2pEncryptedCommunication...")
new Thread("datagramSendPublic") { override def run() {
p2pEncryptedCommunication
} }.start
} else {
super.p2pReceivePreHandler(str) // -> p2pReceiveHandler()
}
}
/**
* storeRemotePublicKey todo ?
*/
def storeRemotePublicKey(keyName:String, keystring:String) {
Tools.writeToFile(keyFolderPath+"/"+keyName+".pub", keystring)
}
/**
* relayReceiveEncryptionFailed will be called after the received fingerprint has been evaluated
* and not matching public key was not found in the local filesystem
*/
def relayReceiveEncryptionFailed(remoteKeyFingerprint:String) {
log("relayReceiveEncryptionFailed failed to load key for remote key fingerprint='"+remoteKeyFingerprint+"'")
}
/**
* p2pReceiveHandler is called for receiving and processing of decrypted data strings from the other client
* data that was sent directly per UDP - or relayed per TCP (relayBasedP2pCommunication=true)
* if relayBasedP2pCommunication is NOT set, we may use this to disconnect from the relay connection
*/
override def p2pReceiveHandler(str:String, host:String, port:Int) {
log("p2pReceiveHandler decryptString='"+str+"'")
}
/**
* relayReceiveHandler is NOT used in p2p mode; instead all received data goes to p2pReceiveHandler
* exception: if the relay link is being used as a udp-fallback (relayBasedP2pCommunication=true)
* a base64 encoded P2pCore.Message will be received here and will be forwarded to p2pReceiveMultiplexHandler
*/
override def relayReceiveHandler(str:String) {
if(relayBasedP2pCommunication) {
val p2pCoreMessage = Base64.decode(str)
val protoMultiplex = P2pCore.Message.parseFrom(p2pCoreMessage)
p2pReceiveMultiplexHandler(protoMultiplex) // -> p2pReceivePreHandler() -> p2pEncryptedCommunication()
return
}
log("relayReceiveHandler !relayBasedP2pComm str='"+str+"' pubKeyRemote="+pubKeyRemote+" UNEXPECTED IN P2P APP ###########")
}
/**
* p2pEncryptedCommunication is being called when p2p connection has been
* established and encryption has been enabled
*/
def p2pEncryptedCommunication() {
log("p2pEncryptedCommunication...")
for(i <- 0 until 3) {
val unencryptedMessage = "hello "+i // maxSize of unencrypted string ~128 bytes (?)
val encryptedMessage = RsaEncrypt.encrypt(pubKeyRemote, unencryptedMessage)
p2pSend(encryptedMessage, udpConnectIpAddr, udpConnectPortInt, "rsastr")
try { Thread.sleep(1000); } catch { case ex:Exception => }
}
p2pQuit(true)
}
}
| mehrvarz/P2pCore | src/P2pEncrypt.scala | Scala | gpl-3.0 | 10,828 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.blueeyes
package json
import org.scalacheck.Prop
import org.scalacheck.Arbitrary, Arbitrary.arbitrary
import quasar.precog.JsonTestSupport._
object JPathSpec extends Specification with ScalaCheck {
"Parser" should {
"parse all valid JPath strings" in {
prop { (jpath: JPath) =>
JPath(jpath.toString) == jpath
}
}
"forgivingly parse initial field name without leading dot" in {
JPath("foo.bar").nodes mustEqual (JPathField("foo") :: JPathField("bar") :: Nil)
}
}
"Extractor" should {
"extract all existing paths" in {
implicit val arb: Arbitrary[(JValue, List[(JPath, JValue)])] = Arbitrary {
for (jv <- arbitrary[JObject]) yield (jv, jv.flattenWithPath)
}
prop { (testData: (JValue, List[(JPath, JValue)])) =>
testData match {
case (obj, allPathValues) =>
val allProps = allPathValues.map {
case (path, pathValue) => path.extract(obj) == pathValue
}
allProps.foldLeft[Prop](true)(_ && _)
}
}
}
"extract a second level node" in {
val j = JObject(JField("address", JObject(JField("city", JString("B")) :: JField("street", JString("2")) :: Nil)) :: Nil)
JPath("address.city").extract(j) mustEqual (JString("B"))
}
}
"Parent" should {
"return parent" in {
JPath(".foo.bar").parent must beSome(JPath(".foo"))
}
"return Identity for path 1 level deep" in {
JPath(".foo").parent must beSome(NoJPath)
}
"return None when there is no parent" in {
NoJPath.parent mustEqual None
}
}
"Ancestors" should {
"return two ancestors" in {
JPath(".foo.bar.baz").ancestors mustEqual List(JPath(".foo.bar"), JPath(".foo"), NoJPath)
}
"return empty list for identity" in {
NoJPath.ancestors mustEqual Nil
}
}
"dropPrefix" should {
"return just the remainder" in {
JPath(".foo.bar[1].baz").dropPrefix(".foo.bar") must beSome(JPath("[1].baz"))
}
"return none on path mismatch" in {
JPath(".foo.bar[1].baz").dropPrefix(".foo.bar[2]") must beNone
}
}
"Ordering" should {
"sort according to nodes names/indexes" in {
val test = List(
JPath("[1]"),
JPath("[0]"),
JPath("a"),
JPath("a[9]"),
JPath("a[10]"),
JPath("b[10]"),
JPath("a[10].a[1]"),
JPath("b[10].a[1]"),
JPath("b[10].a.x"),
JPath("b[10].a[0]"),
JPath("b[10].a[0].a")
)
val expected = List(1, 0, 2, 3, 4, 6, 5, 9, 10, 7, 8) map test
test.sorted must_== expected
}
}
}
| jedesah/Quasar | blueeyes/src/test/scala/quasar/blueeyes/json/JPathSpec.scala | Scala | apache-2.0 | 3,257 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.data.jdbc.engine
import org.beangle.data.jdbc.meta._
/** RDBMS Dialect
* Focus on ddl and dml sql generation.
*/
trait Dialect {
def createTable(table: Table): String
def dropTable(table: String): String
def query(table: Table): String
def insert(table: Table): String
def alterTableAddColumn(table: Table, col: Column): List[String]
def alterTableDropColumn(table: Table, col: Column): String
def alterTableRenameColumn(table: Table, col: Column,newName:String): String
def alterTableModifyColumnType(table: Table, col: Column, sqlType: SqlType): String
def alterTableModifyColumnSetNotNull(table: Table, col: Column): String
def alterTableModifyColumnDropNotNull(table: Table, col: Column): String
def alterTableModifyColumnDefault(table: Table, col: Column, v: Option[String]): String
def alterTableAddForeignKey(fk: ForeignKey): String
def alterTableAddUnique(fk: UniqueKey): String
def alterTableAddPrimaryKey(table: Table, pk: PrimaryKey): String
def alterTableDropPrimaryKey(table: Table, pk: PrimaryKey): String
def alterTableDropConstraint(table: Table, name: String): String
def createSequence(seq: Sequence): String
def dropSequence(seq: Sequence): String
/** generate limit sql
*
* @param offset is 0 based
*/
def limit(query: String, offset: Int, limit: Int): (String, List[Int])
def commentOnColumn(table: Table, column: Column, comment: Option[String]): Option[String]
def commentsOnTable(table: Table,includeMissing:Boolean): List[String]
def commentOnTable(table: String, comment: Option[String]): Option[String]
def createIndex(i: Index): String
def dropIndex(i: Index): String
def supportSequence:Boolean
}
| beangle/data | jdbc/src/main/scala/org/beangle/data/jdbc/engine/Dialect.scala | Scala | lgpl-3.0 | 2,462 |
package dotty.tools.dotc
package transform
package init
import dotty.tools.dotc._
import ast.tpd
import tpd._
import dotty.tools.dotc.core._
import Contexts._
import Types._
import Symbols._
import StdNames._
import dotty.tools.dotc.transform._
import Phases._
import Semantic._
class Checker extends Phase {
override def phaseName: String = Checker.name
override def description: String = Checker.description
override val runsAfter = Set(Pickler.name)
override def isEnabled(using Context): Boolean =
super.isEnabled && ctx.settings.YcheckInit.value
override def runOn(units: List[CompilationUnit])(using Context): List[CompilationUnit] =
val checkCtx = ctx.fresh.setPhase(this.start)
Semantic.withInitialState {
val traverser = new InitTreeTraverser()
units.foreach { unit => traverser.traverse(unit.tpdTree) }
given Context = checkCtx
Semantic.check()
super.runOn(units)
}
def run(using Context): Unit = {
// ignore, we already called `Semantic.check()` in `runOn`
}
class InitTreeTraverser(using WorkList) extends TreeTraverser {
override def traverse(tree: Tree)(using Context): Unit =
traverseChildren(tree)
tree match {
case mdef: MemberDef =>
// self-type annotation ValDef has no symbol
if mdef.name != nme.WILDCARD then
mdef.symbol.defTree = tree
mdef match
case tdef: TypeDef if tdef.isClassDef =>
val cls = tdef.symbol.asClass
val thisRef = ThisRef(cls)
if shouldCheckClass(cls) then Semantic.addTask(thisRef)
case _ =>
case _ =>
}
}
private def shouldCheckClass(cls: ClassSymbol)(using Context) = {
val instantiable: Boolean =
cls.is(Flags.Module) ||
!cls.isOneOf(Flags.AbstractOrTrait) && {
// see `Checking.checkInstantiable` in typer
val tp = cls.appliedRef
val stp = SkolemType(tp)
val selfType = cls.givenSelfType.asSeenFrom(stp, cls)
!selfType.exists || stp <:< selfType
}
// A concrete class may not be instantiated if the self type is not satisfied
instantiable && cls.enclosingPackageClass != defn.StdLibPatchesPackage.moduleClass
}
}
object Checker:
val name: String = "initChecker"
val description: String = "check initialization of objects"
| dotty-staging/dotty | compiler/src/dotty/tools/dotc/transform/init/Checker.scala | Scala | apache-2.0 | 2,366 |
package play.api
/**
* Contains test helpers.
*/
package object test {
/**
* Provided as an implicit by WithServer and WithBrowser.
*/
type Port = Int
} | michaelahlers/team-awesome-wedding | vendor/play-2.2.1/framework/src/play-test/src/main/scala/play/api/test/package.scala | Scala | mit | 165 |
/*
* Random Access list.
* Copyright (C) 2014 Michael Thorsley
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see [http://www.gnu.org/licenses/].
*/
package com.eigenvektor.collections
import com.eigenvektor.collections.RandomAccessList.CompleteBinaryTree
import scala.collection.SeqLike
/** Implementation of a random access list
*
* This class is a list that supports constant-time head, tail and cons
* operations like a traditional linked list, but also supports access to
* arbitrary elements in the array in O(log(n)) time instead of the usual
* O(n) time for normal linked lists.
*
* This is principally taken from Chris Okasaki's paper, "Purely Functional
* Random-Access Lists".
*/
abstract class RandomAccessList[+A] protected[this] (private val trees:List[CompleteBinaryTree[A]])
extends Seq[A] with SeqLike[A, RandomAccessList[A]] {
import com.eigenvektor.collections.RandomAccessList.Leaf
import com.eigenvektor.collections.RandomAccessList.Node
import com.eigenvektor.collections.RandomAccessList.treeLookup
import com.eigenvektor.collections.RandomAccessList.treeUpdate
import scala.collection.mutable.ListBuffer
import scala.annotation.tailrec
/** Creates a new instance of this type of random access list
*
* Subclasses should implement to return an instance of their specific type.
*/
protected[this] def newInstance[B](trees:List[CompleteBinaryTree[B]]):RandomAccessList[B]
/** Gets the head of the list. */
override def head = trees.head.value
/** Tells if the list is empty. */
override def isEmpty = trees.isEmpty
/** The size of the list */
override lazy val size = trees.map(_.size).sum
/** The size of the list. */
override lazy val length = size;
/** Prepends a value to the list. */
def cons[B >: A](value:B) = {
trees match {
// If we're empty, make a singleton.
case Nil => newInstance[B](Leaf(value) :: Nil)
// If we're a singleton, make a double.
case tree :: Nil => newInstance[B](Leaf(value) :: trees)
case t1 :: t2 :: tail =>
// If the first two trees aren't the same size, we don't need to do anything special.
if (t1.size != t2.size) newInstance[B](Leaf(value) :: trees)
// If they are, we combine them with the new value.
else newInstance[B](Node(value, t1, t2) :: tail)
}
}
/** Prepends a value to the list. */
def ::[B >: A](value:B) = cons(value)
/** Gets the tail of the list. */
override def tail = {
trees match {
// If we're empty, there is no tail.
case Nil => throw new IllegalStateException("Empty list has no tail.")
// If the first tree is a singleton, the tail is just the rest.
case Leaf(value) :: rest => newInstance(rest)
// If the first tree is composite, prepend the subtrees to the rest.
case Node(value, left, right) :: rest => newInstance(left :: right :: rest)
}
}
/** Gets value of the list at a given index. */
def apply(idx:Int):A = {
/** Performs a lookup of an index in a list of trees. */
@tailrec def lookup[B >: A](remainingTrees:List[CompleteBinaryTree[B]], remainingIdx:Int):B = {
if (remainingTrees == Nil) throw new IndexOutOfBoundsException("Index out of bounds")
else if (remainingIdx < remainingTrees.head.size) treeLookup(remainingTrees.head, remainingIdx)
else lookup(remainingTrees.tail, remainingIdx - remainingTrees.head.size)
}
lookup(trees, idx)
}
/** Updates the value at a given index. Returns a new list with the updated element. */
def updated[B >: A](idx:Int, value:B):RandomAccessList[B] = {
/** Gets the updated list of trees for an update. */
@tailrec def getUpdatedTrees[B >: A](
prevTrees:ListBuffer[CompleteBinaryTree[B]], // The trees before the current position.
nextTrees:List[CompleteBinaryTree[B]], // The trees at or after the current position.
remainingIdx:Int, // The index remaining after being shifted to the current position.
value:B // The value to update.
):List[CompleteBinaryTree[B]] = {
if (nextTrees == Nil) throw new IndexOutOfBoundsException("Index out of bounds")
else if (remainingIdx < nextTrees.head.size) {
val newTree = treeUpdate(nextTrees.head, remainingIdx, value)
prevTrees.toList ::: newTree :: nextTrees.tail
}
else {
prevTrees += nextTrees.head
getUpdatedTrees(prevTrees, nextTrees.tail, remainingIdx - nextTrees.head.size, value)
}
}
newInstance[B](getUpdatedTrees(new ListBuffer[CompleteBinaryTree[B]], trees, idx, value))
}
/** Reverses the list in O(N) time */
override def reverse = {
def reverseInto(a:RandomAccessList[A], b:RandomAccessList[A]):RandomAccessList[A] = {
if (a.isEmpty) b
else reverseInto(a.tail, a.head :: b)
}
reverseInto(this, newInstance(Nil))
}
def iterator() = {
if (isEmpty) Iterator.empty
else {
/** A simple iterator for this object that assumes that this is non-empty. */
class RALIterator extends Iterator[A] {
val listIter = trees.iterator
var treeIter = listIter.next().iterator
def hasNext = treeIter.hasNext || listIter.hasNext
def next() = {
if (!treeIter.hasNext) {
treeIter = listIter.next.iterator
}
treeIter.next()
}
}
new RALIterator
}
}
override def reverseIterator() = {
if (isEmpty) Iterator.empty
else {
/** A simple reverse iterator. */
class RALReverseIterator extends Iterator[A] {
val listIter = trees.reverse.iterator
var treeIter = listIter.next().reverseIterator
def hasNext = treeIter.hasNext || listIter.hasNext
def next() = {
if (!treeIter.hasNext) {
treeIter = listIter.next.reverseIterator
}
treeIter.next()
}
}
new RALReverseIterator
}
}
/** Override of the seq method from Iterable. */
override def seq = this
/** Creates a builder for this class. */
override protected[this] def newBuilder = RandomAccessList.newBuilder[A]
override def equals(o:Any) = {
if (!o.isInstanceOf[RandomAccessList[A]]) false
else {
val ral = o.asInstanceOf[RandomAccessList[A]]
ral.trees.equals(this.trees)
}
}
/** Simple toString for debugging. */
override def toString = trees.map(_.size).mkString("RAList(", ",", ")")
}
object RandomAccessList {
import scala.collection.immutable.{Nil => NilList} // Because I'm making my own RandomAccessList Nil here.
import scala.collection.mutable.Builder
import scala.collection.generic.CanBuildFrom
import scala.annotation.tailrec
/** An implementation of a complete binary tree, to be used inside the
* general random access list.
*/
sealed private[collections] abstract class CompleteBinaryTree[+T] (val value:T) {
def size:Int
def iterator:Iterator[T] = new CBTIterator(this)
def reverseIterator:Iterator[T] = new ReverseCBTIterator(this)
}
private[collections] case class Leaf[+T] (v:T) extends CompleteBinaryTree[T](v) {
val size = 1;
}
private[collections] case class Node[+T] (v:T,
val left:CompleteBinaryTree[T],
val right:CompleteBinaryTree[T]) extends CompleteBinaryTree[T](v) {
require(left.size == right.size)
val size = left.size * 2 + 1
}
/** Finds the element of a complete binary tree at a given index in log(n) time */
@tailrec private[collections] def treeLookup[T](cbt:CompleteBinaryTree[T], idx:Int):T = {
cbt match {
case l: Leaf[T] => if (idx == 0) l.value else throw new IndexOutOfBoundsException("index out of bounds")
case n: Node[T] => if (idx == 0) n.value else {
val sz = n.left.size
if (idx <= sz) treeLookup(n.left, idx-1)
else treeLookup(n.right, idx - 1 - sz)
}
}
}
/** Creates a binary tree that is just like an existing one, but updated at a given index with a given value
* in log(n) time, reusing as much of the structure as possible.
*/
private[collections] def treeUpdate[T, U >: T](cbt:CompleteBinaryTree[T], idx:Int, value:U):CompleteBinaryTree[U] = {
cbt match {
case l: Leaf[T] => if (idx == 0) new Leaf(value) else throw new IndexOutOfBoundsException("index out of bounds")
case n: Node[T] => if (idx == 0) new Node(value, n.left, n.right) else
{
val sz = n.left.size
if (idx <= sz) new Node(n.value, treeUpdate(n.left, idx-1, value), n.right)
else new Node(n.value, n.left, treeUpdate(n.right, idx - 1 - sz, value))
}
}
}
/** A simple iterator for a complete binary tree that iterates in pre-order */
private[collections] final class CBTIterator[T] (cbt:CompleteBinaryTree[T]) extends Iterator[T] {
// The list of subtrees on the current path
private var trees:List[CompleteBinaryTree[T]] = cbt :: NilList
/** Tells if there are more values */
def hasNext = !trees.isEmpty
/** Gets the next value */
def next() = {
val ret = trees.head.value
// Advance the list to the next value.
trees = trees.head match {
case n: Node[T] => {
n.left :: n.right :: trees.tail
}
case Leaf(value) => trees.tail
}
ret
}
}
/** A simple iterator for a complete binary tree that iterates in reverse post-order */
private[collections] final class ReverseCBTIterator[T] (cbt:CompleteBinaryTree[T]) extends Iterator[T] {
// A list of trees combined with a flag to tells us whether we have already visited
// their left and right subtrees.
private var trees:List[Tuple3[CompleteBinaryTree[T], Boolean, Boolean]] = (cbt, false, false) :: NilList
// Advance the tree list.
trees = advanceTrees(trees)
/** Tells if there are more values. */
def hasNext = !trees.isEmpty
def next() = {
val ret = trees.head._1.value
trees = advanceTrees(trees.tail)
ret
}
/** Advance the trees to the next position. I.e. the position where the next node is at the head
* head of the list, and can be popped after use.
*/
@tailrec private def advanceTrees(trees:List[Tuple3[CompleteBinaryTree[T], Boolean, Boolean]])
:List[Tuple3[CompleteBinaryTree[T], Boolean, Boolean]] = {
// If we have an empty tree list, continue to have one.
if (trees.isEmpty) trees
else {
val head = trees.head
head match {
// If the head is a leaf or already visited node, we're already there.
case (Leaf(_), _ , _) => trees
case (Node(_, _, _), true, true) => trees
// If it is a completely unvisited node, advance right.
case (Node(_, _, _), false, false) => {
val n = head._1.asInstanceOf[Node[T]]
advanceTrees((n.right, false, false) :: (n, false, true) :: trees.tail)
}
// If it is visited on the right, but not the left, advance left.
case (Node(_, _, _), false, true) => {
val n = head._1.asInstanceOf[Node[T]]
advanceTrees((n.left, false, false) :: (n, true, true) :: trees.tail)
}
// This should not happen.
case _ => throw new IllegalStateException()
}
}
}
}
/** Creates a new, empty instance. */
def apply[T]():RandomAccessList[T] = new immutable.RandomAccessList[T](NilList)
/** Creates a new instance with elements passed in. */
def apply[T](elems:T*):RandomAccessList[T] =
elems.reverse.foldLeft(new immutable.RandomAccessList[T](NilList):RandomAccessList[T])(_.cons(_))
// A special Nil for this kind of list.
val Nil = new immutable.RandomAccessList[Nothing](NilList)
/** Creates a builder for this class. */
def newBuilder[T]: Builder[T, RandomAccessList[T]] = {
// A very simple builder that just collects its elements into a standard list
// and creates the RandomAccessList at the last moment.
class RALBuilder extends Builder[T, RandomAccessList[T]] {
private var elems:List[T] = NilList
def +=(elem:T) = {
elems = (elem :: elems)
this
}
def clear() = elems = NilList
def result():RandomAccessList[T] =
elems.foldLeft(new immutable.RandomAccessList[T](NilList):RandomAccessList[T])(_.cons(_))
}
new RALBuilder
}
implicit def canBuildFrom[T, U >: T] : CanBuildFrom[RandomAccessList[T], U, RandomAccessList[U]] =
new CanBuildFrom[RandomAccessList[T], U, RandomAccessList[U]] {
def apply():Builder[U, RandomAccessList[U]] = newBuilder[U]
def apply(from:RandomAccessList[T]) = newBuilder[U]
}
} | Vyzen/trout | src/main/scala/com/eigenvektor/collections/RandomAccessList.scala | Scala | gpl-3.0 | 13,667 |
/*
* sbt
* Copyright 2011 - 2018, Lightbend, Inc.
* Copyright 2008 - 2010, Mark Harrah
* Licensed under Apache License 2.0 (see LICENSE)
*/
package sbt.util
import java.util.concurrent.ConcurrentHashMap
import org.apache.logging.log4j.{ LogManager => XLogManager, Level => XLevel }
import org.apache.logging.log4j.core.{ Appender => XAppender, LoggerContext => XLoggerContext }
import org.apache.logging.log4j.core.config.{ AppenderRef, LoggerConfig }
import org.apache.logging.log4j.core.layout.PatternLayout
import sbt.internal.util._
import scala.collection.concurrent
import sjsonnew.JsonFormat
import org.apache.logging.log4j.core.appender.AsyncAppender
// http://logging.apache.org/log4j/2.x/manual/customconfig.html
// https://logging.apache.org/log4j/2.x/log4j-core/apidocs/index.html
sealed abstract class LogExchange {
private[sbt] lazy val context: XLoggerContext = init()
private[sbt] val stringCodecs: concurrent.Map[String, ShowLines[_]] = concurrent.TrieMap()
private[sbt] val builtInStringCodecs: Unit = initStringCodecs()
private[util] val configs = new ConcurrentHashMap[String, LoggerConfig]
private[util] def addConfig(name: String, config: LoggerConfig): Unit =
Util.ignoreResult(configs.putIfAbsent(name, config))
private[util] def removeConfig(name: String): Option[LoggerConfig] = Option(configs.remove(name))
@deprecated("Use LoggerContext to create loggers", "1.4.0")
def logger(name: String): ManagedLogger = logger(name, None, None)
@deprecated("Use LoggerContext to create loggers", "1.4.0")
def logger(name: String, channelName: Option[String], execId: Option[String]): ManagedLogger =
LoggerContext.globalContext.logger(name, channelName, execId)
@deprecated("Use LoggerContext to unbind appenders", "1.4.0")
def unbindLoggerAppenders(loggerName: String): Unit = {
LoggerContext.globalContext.clearAppenders(loggerName)
}
@deprecated("Use LoggerContext to bind appenders", "1.4.0")
def bindLoggerAppenders(
loggerName: String,
appenders: List[(XAppender, Level.Value)]
): Unit = {
appenders.foreach {
case (a, l) =>
LoggerContext.globalContext
.addAppender(loggerName, new ConsoleAppenderFromLog4J(loggerName, a) -> l)
}
}
@deprecated("Use LoggerContext to bind appenders", "1.4.0")
def bindLoggerAppenders(
loggerName: String,
appenders: Seq[(Appender, Level.Value)]
): Unit = bindLoggerAppenders(loggerName, appenders.map { case (a, l) => a.toLog4J -> l }.toList)
@deprecated("unused", "1.4.0")
def loggerConfig(loggerName: String): LoggerConfig = configs.get(loggerName)
@deprecated("unused", "1.4.0")
lazy val asyncStdout = buildAsyncStdout
@deprecated("unused", "1.4.0")
private[sbt] def buildAsyncStdout: AsyncAppender = {
val ctx = XLogManager.getContext(false) match { case x: XLoggerContext => x }
val config = ctx.getConfiguration
val appender = ConsoleAppender("Stdout").toLog4J
// CustomConsoleAppender.createAppender("Stdout", layout, null, null)
appender.start
config.addAppender(appender)
val asyncAppender: AsyncAppender = AsyncAppender
.newBuilder()
.setName("AsyncStdout")
.setAppenderRefs(Array(AppenderRef.createAppenderRef("Stdout", XLevel.DEBUG, null)))
.setBlocking(false)
.setConfiguration(config)
.build
asyncAppender.start
config.addAppender(asyncAppender)
asyncAppender
}
// Construct these StringTypeTags manually, because they're used at the very startup of sbt
// and we'll try not to initialize the universe by using the StringTypeTag.apply that requires a TypeTag
// A better long-term solution could be to make StringTypeTag.apply a macro.
lazy val stringTypeTagThrowable = StringTypeTag[Throwable]("scala.Throwable")
lazy val stringTypeTagTraceEvent = StringTypeTag[TraceEvent]("sbt.internal.util.TraceEvent")
lazy val stringTypeTagSuccessEvent = StringTypeTag[SuccessEvent]("sbt.internal.util.SuccessEvent")
private[sbt] def initStringCodecs(): Unit = {
import sbt.internal.util.codec.ThrowableShowLines._
import sbt.internal.util.codec.TraceEventShowLines._
import sbt.internal.util.codec.SuccessEventShowLines._
registerStringCodecByStringTypeTag(stringTypeTagThrowable)
registerStringCodecByStringTypeTag(stringTypeTagTraceEvent)
registerStringCodecByStringTypeTag(stringTypeTagSuccessEvent)
}
// This is a dummy layout to avoid casting error during PatternLayout.createDefaultLayout()
// that was originally used for ConsoleAppender.
// The stacktrace shows it's having issue initializing default DefaultConfiguration.
// Since we currently do not use Layout inside ConsoleAppender, the actual pattern is not relevant.
private[sbt] lazy val dummyLayout: PatternLayout = {
val _ = context
val ctx = XLogManager.getContext(false) match { case x: XLoggerContext => x }
val config = ctx.getConfiguration
val lo = PatternLayout.newBuilder
.withConfiguration(config)
.withPattern(PatternLayout.SIMPLE_CONVERSION_PATTERN)
.build
lo
}
@deprecated("It is now necessary to provide a json format instance", "1.4.0")
def jsonCodec[A](tag: String): Option[JsonFormat[A]] = None
@deprecated("Always returns false", "1.4.0")
def hasJsonCodec(tag: String): Boolean = false
@deprecated("This is a no-op", "1.4.0")
def getOrElseUpdateJsonCodec[A](tag: String, v: JsonFormat[A]): JsonFormat[A] = v
@deprecated("The log manager no longer caches jsonCodecs", "1.4.0")
def jsonCodecs(): concurrent.Map[String, JsonFormat[_]] = concurrent.TrieMap.empty
def stringCodec[A](tag: String): Option[ShowLines[A]] =
stringCodecs.get(tag) map { _.asInstanceOf[ShowLines[A]] }
def hasStringCodec(tag: String): Boolean =
stringCodecs.contains(tag)
def getOrElseUpdateStringCodec[A](tag: String, v: ShowLines[A]): ShowLines[A] =
stringCodecs.getOrElseUpdate(tag, v).asInstanceOf[ShowLines[A]]
@deprecated("Prefer macro based registerStringCodec", "1.4.0")
def registerStringCodec[A](
st: ShowLines[A],
tt: scala.reflect.runtime.universe.TypeTag[A]
): Unit = {
registerStringCodecByStringTypeTag(StringTypeTag.apply[A](tt))(st)
}
private[sbt] def registerStringCodec[A: ShowLines: StringTypeTag]: Unit = {
registerStringCodecByStringTypeTag(implicitly[StringTypeTag[A]])
}
private[sbt] def registerStringCodecByStringTypeTag[A: ShowLines](tag: StringTypeTag[A]): Unit = {
val ev = implicitly[ShowLines[A]]
val _ = getOrElseUpdateStringCodec(tag.key, ev)
}
private[sbt] def init(): XLoggerContext = {
import org.apache.logging.log4j.core.config.builder.api.ConfigurationBuilderFactory
import org.apache.logging.log4j.core.config.Configurator
val builder = ConfigurationBuilderFactory.newConfigurationBuilder
builder.setConfigurationName("sbt.util.logging")
val ctx = Configurator.initialize(builder.build())
ctx match { case x: XLoggerContext => x }
}
private[sbt] def init(name: String): XLoggerContext = new XLoggerContext(name)
}
object LogExchange extends LogExchange
| xuwei-k/xsbt | internal/util-logging/src/main/scala/sbt/util/LogExchange.scala | Scala | apache-2.0 | 7,122 |
package com.mesosphere.cosmos.jsonschema
import com.github.fge.jsonschema.main.JsonSchemaFactory
import io.circe.Json
import io.circe.JsonObject
import io.circe.jawn.parse
import io.circe.syntax._
import org.scalatest.FreeSpec
import org.scalatest.Tag
import scala.io.Source
import scala.util.Right
class JsonSchemaSpec extends FreeSpec {
private[this] implicit val jsf = JsonSchemaFactory.byDefault()
"JsonSchema should" - {
"be able to validate a document against a schema" - {
// the draft v4 json schema itself should be able to validate itself
val jsonSchemaDraftV4String = classpathJsonString("/draftv4/schema")
"as io.circe.JsonObject" in {
val Right(parsedJson: Json) = parse(jsonSchemaDraftV4String)
val xor = JsonSchema.jsonMatchesSchema(parsedJson, parsedJson)
assert(xor.isRight)
}
"as io.circe.Json" in {
val Right(parsedJson: Json) = parse(jsonSchemaDraftV4String)
val jObject: JsonObject = parsedJson.asObject.get
val xor = JsonSchema.jsonObjectMatchesSchema(jObject, jObject)
assert(xor.isRight)
}
}
"be able to extract default property values from a schema" - {
val expected = JsonObject.fromMap(Map(
"prop1" -> 57.asJson,
"prop2" -> Json.obj(
"sub1" -> "ta-da".asJson
)
))
"when schema does not use definition refs" in {
val s = classpathJsonString("/com/mesosphere/cosmos/jsonschema/no-definition-ref-used.json")
val Right(schema) = parse(s)
val defaults = JsonSchema.extractDefaultsFromSchema(schema.asObject.get)
assertResult(expected)(defaults)
}
"when schema does use definition refs" taggedAs Tag("https://mesosphere.atlassian.net/browse/DCOS-10455") ignore {
val s = classpathJsonString("/com/mesosphere/cosmos/jsonschema/definition-ref-used.json")
val Right(schema) = parse(s)
val defaults = JsonSchema.extractDefaultsFromSchema(schema.asObject.get)
assertResult(expected)(defaults)
}
}
}
private[this] def classpathJsonString(resourceName: String): String = {
Option(this.getClass.getResourceAsStream(resourceName)) match {
case Some(is) => Source.fromInputStream(is).mkString
case _ => throw new IllegalStateException(s"Unable to load classpath resource: $resourceName")
}
}
}
| dcos/cosmos | cosmos-test-common/src/test/scala/com/mesosphere/cosmos/jsonschema/JsonSchemaSpec.scala | Scala | apache-2.0 | 2,389 |
package scorex.network
import akka.actor.Actor.Receive
import akka.actor.Cancellable
import scorex.app.Application
import scorex.block.Block
import scorex.block.Block._
import scorex.crypto.encode.Base58.encode
import scorex.network.Coordinator.{AddBlock, SyncFinished}
import scorex.network.NetworkController.DataFromPeer
import scorex.network.message.Message
import scorex.transaction.History._
import scorex.transaction.{BlockSeq, History}
import scorex.utils.ScorexLogging
import shapeless.syntax.typeable._
import scala.concurrent.ExecutionContext.Implicits.global
class BlockchainSynchronizer(application: Application) extends ViewSynchronizer with ScorexLogging {
import BlockchainSynchronizer._
import Coordinator.SyncFinished._
import application.basicMessagesSpecsRepo._
override val messageSpecs = Seq(SignaturesSpec, BlockMessageSpec)
protected override lazy val networkControllerRef = application.networkController
private lazy val coordinator = application.coordinator
private val gettingBlockTimeout = application.settings.historySynchronizerTimeout
private val forkMaxLength = application.settings.forkMaxLength
private val operationRetries = application.settings.operationRetries
private val pinToInitialPeer = application.settings.pinToInitialPeer
private val minForkChunks = application.settings.minForkChunks
private var timeoutData = Option.empty[Cancellable]
override def receive: Receive = idle
def idle: Receive = state(Idle) {
case GetExtension(lastIds, peerScores) =>
start("gettingExtension") { _ =>
val msg = Message(GetSignaturesSpec, Right(lastIds), None)
networkControllerRef ! NetworkController.SendToNetwork(msg, SendToChosen(peerScores.keys.toSeq))
gettingExtension(lastIds.map(InnerId), peerScores.map(peer => peer._1 -> Peer(peer._2)))
}
}
def gettingExtension(requestedIds: InnerIds, peers: Peers): Receive =
state(GettingExtension, acceptSignaturesSpecOnlyFrom(peers.keySet)) {
case SignaturesFromPeer(blockIds, connectedPeer) =>
log.debug(s"Got blockIds: $blockIds")
blockIdsToStartDownload(blockIds, application.history) match {
case None =>
log.warn(s"Strange blockIds: $blockIds")
finishUnsuccessfully()
case Some((_, toDownload)) if toDownload.isEmpty =>
log.debug(s"All blockIds are already in the local blockchain: $blockIds")
finish(withEmptyResult)
case Some((commonBlockId, tail)) if requestedIds.contains(commonBlockId) =>
implicit val peerSet = PeerSet(
connectedPeer, if (pinToInitialPeer) peers.filterKeys(_ == connectedPeer) else peers)
gotoGettingExtensionTail(DownloadInfo(commonBlockId), tail)
case Some((commonBlockId, _)) =>
blacklistPeer(s"Block id: $commonBlockId has not been requested", connectedPeer)
finishUnsuccessfully()
}
}
private def gotoGettingExtensionTail(downloadInfo: DownloadInfo, tail: InnerIds)(implicit peers: PeerSet): Unit = {
val activePeer = peers.active
val blockIdsToDownload = downloadInfo.blockIds ++ tail
val noMoreBlockIds = tail.isEmpty
if (blockIdsToDownload.size > forkMaxLength || noMoreBlockIds) {
val fork = blockIdsToDownload.take(forkMaxLength)
fork.find(id => application.history.contains(id.blockId)) match {
case Some(suspiciousBlockId) =>
blacklistPeer(s"Suspicious block id: $suspiciousBlockId among blocks to be downloaded", activePeer)
finishUnsuccessfully()
case None =>
val forkStorage = application.blockStorage.blockSeq
val lastCommonBlockId = downloadInfo.lastCommon.blockId
val initialScore = application.history.scoreOf(lastCommonBlockId)
forkStorage.initialize(fork, initialScore)
run("gettingBlocks") { updatedPeerData =>
gettingBlocks(forkStorage, lastCommonBlockId, updatedPeerData)
}
}
} else {
val withTail = downloadInfo.copy(blockIds = blockIdsToDownload)
val overlap = withTail.lastTwoBlockIds
run("gettingExtensionTail") { updatedPeersData =>
val msg = Message(GetSignaturesSpec, Right(overlap.reverse.map(_.blockId)), None)
networkControllerRef ! NetworkController.SendToNetwork(msg, SendToChosen(updatedPeersData.active))
gettingExtensionTail(withTail, overlap, updatedPeersData)
}
}
}
def gettingExtensionTail(downloadInfo: DownloadInfo, overlap: InnerIds, peers: PeerSet): Receive =
state(GettingExtensionTail, acceptSignaturesSpecOnlyFrom(peers.active)) {
case SignaturesFromPeer(tail, connectedPeer) =>
log.debug(s"Got tail blockIds: $tail")
if (tail == overlap) {
gotoGettingExtensionTail(downloadInfo, Seq.empty)(peers)
} else if (tail.indexOf(overlap.last) == 0) {
gotoGettingExtensionTail(downloadInfo, tail.tail)(peers)
} else if (tail.lastOption.exists(downloadInfo.blockIds.contains)) {
log.warn(s"Tail blockIds have been already recieved - possible msg duplication: $tail")
} else {
blacklistPeer(s"Tail does not correspond to the overlap $overlap: $tail", connectedPeer)
finishUnsuccessfully()
}
}
def gettingBlocks(forkStorage: BlockSeq,
lastCommonBlockId: BlockId,
peers: PeerSet): Receive = {
val before = forkStorage.numberOfBlocks
val requestedIds = forkStorage.idsWithoutBlock.take(minForkChunks)
log.info(s"Going to request ${requestedIds.size} blocks, peer: ${peers.active}")
requestedIds.foreach { blockId =>
val msg = Message(GetBlockSpec, Right(blockId.blockId), None)
networkControllerRef ! NetworkController.SendToNetwork(msg, SendToChosen(peers.active))
}
state(GettingBlocks) {
case BlockFromPeer(block, connectedPeer) if peers.active == connectedPeer && forkStorage.containsBlockId(block.uniqueId) =>
if (forkStorage.addIfNotContained(block)) {
log.info("Got block: " + block.encodedId)
val currentScore = application.history.score()
val forkScore = forkStorage.cumulativeBlockScore
val author = Some(connectedPeer).filterNot(_ => peers.activeChanged)
val allBlocksAreLoaded = forkStorage.noIdsWithoutBlock
if (forkScore > currentScore) {
if (forkStorage.numberOfBlocks >= minForkChunks || allBlocksAreLoaded) {
finish(SyncFinished(success = true, Some(lastCommonBlockId, forkStorage.blocksInOrder, author)))
}
} else if (allBlocksAreLoaded) {
author.foreach { blacklistPeer("All blocks are loaded, but still not enough score", _) }
finish(SyncFinished.unsuccessfully)
}
} else if (forkStorage.numberOfBlocks - before >= minForkChunks) {
gettingBlocks(forkStorage, lastCommonBlockId, peers)
}
}
}
private def state(status: Status, stopFilter: StopFilter = noFilter)(logic: Receive): Receive = {
//combine specific logic with common for all the states
ignoreFor(stopFilter) orElse logic orElse {
case GetStatus =>
sender() ! status
case BlockFromPeer(block, peer) =>
coordinator ! AddBlock(block, Some(peer))
case SignaturesFromPeer(_, _) =>
case t @ TimeoutExceeded(_, _, _, _) =>
if (timeoutData.exists(!_.isCancelled)) handleTimeout(t)
case GetExtension(_, _) => // ignore if not idle
// the signal to initialize
case Unit =>
case nonsense: Any =>
log.warn(s"Got something strange in ${status.name}: $nonsense")
}
}
private def handleTimeout(t: TimeoutExceeded): Unit = {
val TimeoutExceeded(s, f, peerSet, runs) = t
log.debug(s"Attempt #$runs to rerun $s")
val updated = if (runs < operationRetries) updatedPeerSet(peerSet)
else {
log.info(s"Max number of retries ($operationRetries) is reached")
None
}
updated match {
case Some(updatedPeerSet) =>
log.info(s"New active peer is ${updatedPeerSet.active}" +
s" (was ${peerSet.map(_.active.toString).getOrElse("no one")})")
run(s, updated, runs + 1) { f }
case None => finishUnsuccessfully()
}
}
private def updatedPeerSet(peerSet: Option[PeerSet]): Option[PeerSet] =
peerSet.flatMap {
case PeerSet(active, peers, activeChanged) =>
log.debug("Trying to find a new active peer...")
val peerData @ Peer(score, retries) = peers(active)
val updatedRetries = retries + 1
val updatedPeers = (if (updatedRetries >= application.settings.retriesBeforeBlacklisted) {
if (pinToInitialPeer) blacklistPeer("Timeout exceeded", active)
peers - active
} else peers + (active -> peerData.copy(retries = updatedRetries))).filterNot(_._2.score < score)
val sortedByScore = updatedPeers.toSeq.sortBy(_._2.score).map(_._1)
sortedByScore.filterNot(_ == active).headOption
.orElse(sortedByScore.headOption)
.map(newActive => PeerSet(newActive, updatedPeers, activeChanged || newActive != active))
}
private def ignoreFor(stopFilter: StopFilter): Receive = {
case data @ DataFromPeer(msgId, _, connectedPeer) if stopFilter(msgId, connectedPeer) =>
log.debug(s"Ignoring data: $data")
}
private def acceptSignaturesSpecOnlyFrom(peer: ConnectedPeer): StopFilter =
acceptSignaturesSpecOnlyFrom(Set(peer))
private def acceptSignaturesSpecOnlyFrom(peers: Set[ConnectedPeer]): StopFilter = {
(code, peer) => (!peers.contains(peer)) && SignaturesSpec.messageCode == code
}
private def finishUnsuccessfully(): Unit = finish(unsuccessfully)
private def finish(result: SyncFinished): Unit = {
log.debug(s"Transition to idle, success == ${result.success}")
cancelPreviousTimeoutCountdown()
context become idle
coordinator ! result
}
private def run(status: String)(f: RepeatableCodeBlock)(implicit peers: PeerSet): Unit =
run(status, Some(peers), 1)(f)
private def start(status: String)(f: RepeatableCodeBlock): Unit = run(status, None, 1)(f)
private def run(status: String, initialPeerSet: Option[PeerSet], runs: Int)(f: RepeatableCodeBlock): Unit = {
log.debug(s"Transition to $status")
cancelPreviousTimeoutCountdown()
val behaviour = f(initialPeerSet.orNull)
val timeoutInfo = TimeoutExceeded(status, f, initialPeerSet, runs)
val cancellable = context.system.scheduler.schedule(gettingBlockTimeout, gettingBlockTimeout, self, timeoutInfo)
timeoutData = Some(cancellable)
context become behaviour
}
private def cancelPreviousTimeoutCountdown(): Unit = {
timeoutData.foreach(_.cancel())
timeoutData = None
}
private def blacklistPeer(reason: String, connectedPeer: ConnectedPeer): Unit = {
log.warn(s"$reason, blacklisted peer: $connectedPeer")
connectedPeer.handlerRef ! PeerConnectionHandler.Blacklist
}
private object BlockFromPeer {
def unapply(dataFromPeer: DataFromPeer[_]): Option[(Block, ConnectedPeer)] = {
if (dataFromPeer.messageType == BlockMessageSpec.messageCode) {
dataFromPeer match {
case DataFromPeer(msgId, block: Block, connectedPeer) if block.cast[Block].isDefined =>
Some((block, connectedPeer))
case _ =>
None
}
} else None
}
}
private object SignaturesFromPeer {
def unapply(dataFromPeer: DataFromPeer[_]): Option[(InnerIds, ConnectedPeer)] = {
if (dataFromPeer.messageType == SignaturesSpec.messageCode) {
dataFromPeer match {
case DataFromPeer(msgId, blockIds: Seq[Block.BlockId]@unchecked, connectedPeer) =>
Some((blockIds.map(InnerId), connectedPeer))
case _ =>
None
}
} else None
}
}
}
object BlockchainSynchronizer {
sealed trait Status {
val name: String
}
case object GettingExtension extends Status {
override val name = "getting extension"
}
case object GettingExtensionTail extends Status {
override val name = "getting extension tail"
}
case object GettingBlocks extends Status {
override val name = "getting blocks"
}
case object Idle extends Status {
override val name = "idle"
}
case object GetStatus
case class GetExtension(lastBlockIds: BlockIds, peerScores: Map[ConnectedPeer, BlockchainScore])
case class InnerId(blockId: BlockId) {
override def equals(obj: Any): Boolean = {
import shapeless.syntax.typeable._
obj.cast[InnerId].exists(_.blockId.sameElements(this.blockId))
}
override def hashCode(): Int = scala.util.hashing.MurmurHash3.seqHash(blockId)
override def toString: String = encode(blockId)
}
type InnerIds = Seq[InnerId]
def blockIdsToStartDownload(blockIds: InnerIds, history: History): Option[(InnerId, InnerIds)] = {
val (common, toDownload) = blockIds.span(id => history.contains(id.blockId))
if (common.nonEmpty) Some((common.last, toDownload)) else None
}
case class DownloadInfo(lastCommon: InnerId, blockIds: InnerIds = Seq.empty) {
def lastTwoBlockIds: InnerIds = if (blockIds.size > 1) blockIds.takeRight(2) else lastCommon +: blockIds
}
private type StopFilter = (Message.MessageCode, ConnectedPeer) => Boolean
private def noFilter: StopFilter = (_, _) => false
private type RepeatableCodeBlock = PeerSet => Receive
private case class TimeoutExceeded(status: String, f: RepeatableCodeBlock, peers: Option[PeerSet], runs: Integer)
private case class Peer(score: BlockchainScore, retries: Int = 0)
private type Peers = Map[ConnectedPeer, Peer]
private case class PeerSet(active: ConnectedPeer, peers: Peers, activeChanged: Boolean = false)
}
| alexeykiselev/WavesScorex | scorex-basics/src/main/scala/scorex/network/BlockchainSynchronizer.scala | Scala | cc0-1.0 | 13,891 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import java.util.Locale
import org.apache.spark.sql.{AnalysisException, SaveMode, SparkSession}
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.catalog._
import org.apache.spark.sql.catalyst.expressions.{Expression, InputFileBlockLength, InputFileBlockStart, InputFileName, RowOrdering}
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.connector.expressions.{FieldReference, RewritableTransform}
import org.apache.spark.sql.errors.QueryCompilationErrors
import org.apache.spark.sql.execution.command.DDLUtils
import org.apache.spark.sql.execution.datasources.{CreateTable => CreateTableV1}
import org.apache.spark.sql.execution.datasources.v2.FileDataSourceV2
import org.apache.spark.sql.sources.InsertableRelation
import org.apache.spark.sql.types.{AtomicType, StructType}
import org.apache.spark.sql.util.PartitioningUtils.normalizePartitionSpec
import org.apache.spark.sql.util.SchemaUtils
/**
* Replaces [[UnresolvedRelation]]s if the plan is for direct query on files.
*/
class ResolveSQLOnFile(sparkSession: SparkSession) extends Rule[LogicalPlan] {
private def maybeSQLFile(u: UnresolvedRelation): Boolean = {
conf.runSQLonFile && u.multipartIdentifier.size == 2
}
private def resolveDataSource(ident: Seq[String]): DataSource = {
val dataSource = DataSource(sparkSession, paths = Seq(ident.last), className = ident.head)
// `dataSource.providingClass` may throw ClassNotFoundException, the caller side will try-catch
// it and return the original plan, so that the analyzer can report table not found later.
val isFileFormat = classOf[FileFormat].isAssignableFrom(dataSource.providingClass)
if (!isFileFormat ||
dataSource.className.toLowerCase(Locale.ROOT) == DDLUtils.HIVE_PROVIDER) {
throw QueryCompilationErrors.unsupportedDataSourceTypeForDirectQueryOnFilesError(
dataSource.className)
}
dataSource
}
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case r @ RelationTimeTravel(u: UnresolvedRelation, timestamp, _)
if maybeSQLFile(u) && timestamp.forall(_.resolved) =>
// If we successfully look up the data source, then this is a path-based table, so we should
// fail to time travel. Otherwise, this is some other catalog table that isn't resolved yet,
// so we should leave it be for now.
try {
resolveDataSource(u.multipartIdentifier)
throw QueryCompilationErrors.timeTravelUnsupportedError("path-based tables")
} catch {
case _: ClassNotFoundException => r
}
case u: UnresolvedRelation if maybeSQLFile(u) =>
try {
val ds = resolveDataSource(u.multipartIdentifier)
LogicalRelation(ds.resolveRelation())
} catch {
case _: ClassNotFoundException => u
case e: Exception =>
// the provider is valid, but failed to create a logical plan
u.failAnalysis(e.getMessage, e)
}
}
}
/**
* Preprocess [[CreateTable]], to do some normalization and checking.
*/
case class PreprocessTableCreation(sparkSession: SparkSession) extends Rule[LogicalPlan] {
// catalog is a def and not a val/lazy val as the latter would introduce a circular reference
private def catalog = sparkSession.sessionState.catalog
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
// When we CREATE TABLE without specifying the table schema, we should fail the query if
// bucketing information is specified, as we can't infer bucketing from data files currently.
// Since the runtime inferred partition columns could be different from what user specified,
// we fail the query if the partitioning information is specified.
case c @ CreateTableV1(tableDesc, _, None) if tableDesc.schema.isEmpty =>
if (tableDesc.bucketSpec.isDefined) {
failAnalysis("Cannot specify bucketing information if the table schema is not specified " +
"when creating and will be inferred at runtime")
}
if (tableDesc.partitionColumnNames.nonEmpty) {
failAnalysis("It is not allowed to specify partition columns when the table schema is " +
"not defined. When the table schema is not provided, schema and partition columns " +
"will be inferred.")
}
c
// When we append data to an existing table, check if the given provider, partition columns,
// bucket spec, etc. match the existing table, and adjust the columns order of the given query
// if necessary.
case c @ CreateTableV1(tableDesc, SaveMode.Append, Some(query))
if query.resolved && catalog.tableExists(tableDesc.identifier) =>
// This is guaranteed by the parser and `DataFrameWriter`
assert(tableDesc.provider.isDefined)
val db = tableDesc.identifier.database.getOrElse(catalog.getCurrentDatabase)
val tableIdentWithDB = tableDesc.identifier.copy(database = Some(db))
val tableName = tableIdentWithDB.unquotedString
val existingTable = catalog.getTableMetadata(tableIdentWithDB)
if (existingTable.tableType == CatalogTableType.VIEW) {
throw QueryCompilationErrors.saveDataIntoViewNotAllowedError()
}
// Check if the specified data source match the data source of the existing table.
val existingProvider = DataSource.lookupDataSource(existingTable.provider.get, conf)
val specifiedProvider = DataSource.lookupDataSource(tableDesc.provider.get, conf)
// TODO: Check that options from the resolved relation match the relation that we are
// inserting into (i.e. using the same compression).
// If the one of the provider is [[FileDataSourceV2]] and the other one is its corresponding
// [[FileFormat]], the two providers are considered compatible.
if (fallBackV2ToV1(existingProvider) != fallBackV2ToV1(specifiedProvider)) {
throw QueryCompilationErrors.mismatchedTableFormatError(
tableName, existingProvider, specifiedProvider)
}
tableDesc.storage.locationUri match {
case Some(location) if location.getPath != existingTable.location.getPath =>
throw QueryCompilationErrors.mismatchedTableLocationError(
tableIdentWithDB, existingTable, tableDesc)
case _ =>
}
if (query.schema.length != existingTable.schema.length) {
throw QueryCompilationErrors.mismatchedTableColumnNumberError(
tableName, existingTable, query)
}
val resolver = conf.resolver
val tableCols = existingTable.schema.map(_.name)
// As we are inserting into an existing table, we should respect the existing schema and
// adjust the column order of the given dataframe according to it, or throw exception
// if the column names do not match.
val adjustedColumns = tableCols.map { col =>
query.resolve(Seq(col), resolver).getOrElse {
val inputColumns = query.schema.map(_.name).mkString(", ")
throw QueryCompilationErrors.cannotResolveColumnGivenInputColumnsError(col, inputColumns)
}
}
// Check if the specified partition columns match the existing table.
val specifiedPartCols = CatalogUtils.normalizePartCols(
tableName, tableCols, tableDesc.partitionColumnNames, resolver)
if (specifiedPartCols != existingTable.partitionColumnNames) {
val existingPartCols = existingTable.partitionColumnNames.mkString(", ")
throw QueryCompilationErrors.mismatchedTablePartitionColumnError(
tableName, specifiedPartCols, existingPartCols)
}
// Check if the specified bucketing match the existing table.
val specifiedBucketSpec = tableDesc.bucketSpec.map { bucketSpec =>
CatalogUtils.normalizeBucketSpec(tableName, tableCols, bucketSpec, resolver)
}
if (specifiedBucketSpec != existingTable.bucketSpec) {
val specifiedBucketString =
specifiedBucketSpec.map(_.toString).getOrElse("not bucketed")
val existingBucketString =
existingTable.bucketSpec.map(_.toString).getOrElse("not bucketed")
throw QueryCompilationErrors.mismatchedTableBucketingError(
tableName, specifiedBucketString, existingBucketString)
}
val newQuery = if (adjustedColumns != query.output) {
Project(adjustedColumns, query)
} else {
query
}
c.copy(
tableDesc = existingTable,
query = Some(TableOutputResolver.resolveOutputColumns(
tableDesc.qualifiedName, existingTable.schema.toAttributes, newQuery,
byName = true, conf)))
// Here we normalize partition, bucket and sort column names, w.r.t. the case sensitivity
// config, and do various checks:
// * column names in table definition can't be duplicated.
// * partition, bucket and sort column names must exist in table definition.
// * partition, bucket and sort column names can't be duplicated.
// * can't use all table columns as partition columns.
// * partition columns' type must be AtomicType.
// * sort columns' type must be orderable.
// * reorder table schema or output of query plan, to put partition columns at the end.
case c @ CreateTableV1(tableDesc, _, query) if query.forall(_.resolved) =>
if (query.isDefined) {
assert(tableDesc.schema.isEmpty,
"Schema may not be specified in a Create Table As Select (CTAS) statement")
val analyzedQuery = query.get
val normalizedTable = normalizeCatalogTable(analyzedQuery.schema, tableDesc)
DDLUtils.checkTableColumns(tableDesc.copy(schema = analyzedQuery.schema))
val output = analyzedQuery.output
val partitionAttrs = normalizedTable.partitionColumnNames.map { partCol =>
output.find(_.name == partCol).get
}
val newOutput = output.filterNot(partitionAttrs.contains) ++ partitionAttrs
val reorderedQuery = if (newOutput == output) {
analyzedQuery
} else {
Project(newOutput, analyzedQuery)
}
c.copy(tableDesc = normalizedTable, query = Some(reorderedQuery))
} else {
DDLUtils.checkTableColumns(tableDesc)
val normalizedTable = normalizeCatalogTable(tableDesc.schema, tableDesc)
val partitionSchema = normalizedTable.partitionColumnNames.map { partCol =>
normalizedTable.schema.find(_.name == partCol).get
}
val reorderedSchema =
StructType(normalizedTable.schema.filterNot(partitionSchema.contains) ++ partitionSchema)
c.copy(tableDesc = normalizedTable.copy(schema = reorderedSchema))
}
case create: V2CreateTablePlan if create.childrenResolved =>
val schema = create.tableSchema
val partitioning = create.partitioning
val identifier = create.tableName
val isCaseSensitive = conf.caseSensitiveAnalysis
// Check that columns are not duplicated in the schema
val flattenedSchema = SchemaUtils.explodeNestedFieldNames(schema)
SchemaUtils.checkColumnNameDuplication(
flattenedSchema,
s"in the table definition of $identifier",
isCaseSensitive)
// Check that columns are not duplicated in the partitioning statement
SchemaUtils.checkTransformDuplication(
partitioning, "in the partitioning", isCaseSensitive)
if (schema.isEmpty) {
if (partitioning.nonEmpty) {
throw QueryCompilationErrors.specifyPartitionNotAllowedWhenTableSchemaNotDefinedError()
}
create
} else {
// Resolve and normalize partition columns as necessary
val resolver = conf.resolver
val normalizedPartitions = partitioning.map {
case transform: RewritableTransform =>
val rewritten = transform.references().map { ref =>
// Throws an exception if the reference cannot be resolved
val position = SchemaUtils.findColumnPosition(ref.fieldNames(), schema, resolver)
FieldReference(SchemaUtils.getColumnName(position, schema))
}
transform.withReferences(rewritten)
case other => other
}
create.withPartitioning(normalizedPartitions)
}
}
private def fallBackV2ToV1(cls: Class[_]): Class[_] = cls.newInstance match {
case f: FileDataSourceV2 => f.fallbackFileFormat
case _ => cls
}
private def normalizeCatalogTable(schema: StructType, table: CatalogTable): CatalogTable = {
SchemaUtils.checkSchemaColumnNameDuplication(
schema,
"in the table definition of " + table.identifier,
conf.caseSensitiveAnalysis)
val normalizedPartCols = normalizePartitionColumns(schema, table)
val normalizedBucketSpec = normalizeBucketSpec(schema, table)
normalizedBucketSpec.foreach { spec =>
for (bucketCol <- spec.bucketColumnNames if normalizedPartCols.contains(bucketCol)) {
throw QueryCompilationErrors.bucketingColumnCannotBePartOfPartitionColumnsError(
bucketCol, normalizedPartCols)
}
for (sortCol <- spec.sortColumnNames if normalizedPartCols.contains(sortCol)) {
throw QueryCompilationErrors.bucketSortingColumnCannotBePartOfPartitionColumnsError(
sortCol, normalizedPartCols)
}
}
table.copy(partitionColumnNames = normalizedPartCols, bucketSpec = normalizedBucketSpec)
}
private def normalizePartitionColumns(schema: StructType, table: CatalogTable): Seq[String] = {
val normalizedPartitionCols = CatalogUtils.normalizePartCols(
tableName = table.identifier.unquotedString,
tableCols = schema.map(_.name),
partCols = table.partitionColumnNames,
resolver = conf.resolver)
SchemaUtils.checkColumnNameDuplication(
normalizedPartitionCols,
"in the partition schema",
conf.resolver)
if (schema.nonEmpty && normalizedPartitionCols.length == schema.length) {
if (DDLUtils.isHiveTable(table)) {
// When we hit this branch, it means users didn't specify schema for the table to be
// created, as we always include partition columns in table schema for hive serde tables.
// The real schema will be inferred at hive metastore by hive serde, plus the given
// partition columns, so we should not fail the analysis here.
} else {
failAnalysis("Cannot use all columns for partition columns")
}
}
schema.filter(f => normalizedPartitionCols.contains(f.name)).map(_.dataType).foreach {
case _: AtomicType => // OK
case other => failAnalysis(s"Cannot use ${other.catalogString} for partition column")
}
normalizedPartitionCols
}
private def normalizeBucketSpec(schema: StructType, table: CatalogTable): Option[BucketSpec] = {
table.bucketSpec match {
case Some(bucketSpec) =>
val normalizedBucketSpec = CatalogUtils.normalizeBucketSpec(
tableName = table.identifier.unquotedString,
tableCols = schema.map(_.name),
bucketSpec = bucketSpec,
resolver = conf.resolver)
SchemaUtils.checkColumnNameDuplication(
normalizedBucketSpec.bucketColumnNames,
"in the bucket definition",
conf.resolver)
SchemaUtils.checkColumnNameDuplication(
normalizedBucketSpec.sortColumnNames,
"in the sort definition",
conf.resolver)
normalizedBucketSpec.sortColumnNames.map(schema(_)).map(_.dataType).foreach {
case dt if RowOrdering.isOrderable(dt) => // OK
case other => failAnalysis(s"Cannot use ${other.catalogString} for sorting column")
}
Some(normalizedBucketSpec)
case None => None
}
}
private def failAnalysis(msg: String) = throw new AnalysisException(msg)
}
/**
* Preprocess the [[InsertIntoStatement]] plan. Throws exception if the number of columns mismatch,
* or specified partition columns are different from the existing partition columns in the target
* table. It also does data type casting and field renaming, to make sure that the columns to be
* inserted have the correct data type and fields have the correct names.
*/
object PreprocessTableInsertion extends Rule[LogicalPlan] {
private def preprocess(
insert: InsertIntoStatement,
tblName: String,
partColNames: StructType,
catalogTable: Option[CatalogTable]): InsertIntoStatement = {
val normalizedPartSpec = normalizePartitionSpec(
insert.partitionSpec, partColNames, tblName, conf.resolver)
val staticPartCols = normalizedPartSpec.filter(_._2.isDefined).keySet
val expectedColumns = insert.table.output.filterNot(a => staticPartCols.contains(a.name))
if (expectedColumns.length != insert.query.schema.length) {
throw QueryCompilationErrors.mismatchedInsertedDataColumnNumberError(
tblName, insert, staticPartCols)
}
val partitionsTrackedByCatalog = catalogTable.isDefined &&
catalogTable.get.partitionColumnNames.nonEmpty &&
catalogTable.get.tracksPartitionsInCatalog
if (partitionsTrackedByCatalog && normalizedPartSpec.nonEmpty) {
// empty partition column value
if (normalizedPartSpec.values.flatten.exists(v => v != null && v.isEmpty)) {
val spec = normalizedPartSpec.map(p => p._1 + "=" + p._2).mkString("[", ", ", "]")
throw QueryCompilationErrors.invalidPartitionSpecError(
s"The spec ($spec) contains an empty partition column value")
}
}
val newQuery = TableOutputResolver.resolveOutputColumns(
tblName, expectedColumns, insert.query, byName = false, conf)
if (normalizedPartSpec.nonEmpty) {
if (normalizedPartSpec.size != partColNames.length) {
throw QueryCompilationErrors.requestedPartitionsMismatchTablePartitionsError(
tblName, normalizedPartSpec, partColNames)
}
insert.copy(query = newQuery, partitionSpec = normalizedPartSpec)
} else {
// All partition columns are dynamic because the InsertIntoTable command does
// not explicitly specify partitioning columns.
insert.copy(query = newQuery, partitionSpec = partColNames.map(_.name).map(_ -> None).toMap)
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan resolveOperators {
case i @ InsertIntoStatement(table, _, _, query, _, _) if table.resolved && query.resolved =>
table match {
case relation: HiveTableRelation =>
val metadata = relation.tableMeta
preprocess(i, metadata.identifier.quotedString, metadata.partitionSchema,
Some(metadata))
case LogicalRelation(h: HadoopFsRelation, _, catalogTable, _) =>
val tblName = catalogTable.map(_.identifier.quotedString).getOrElse("unknown")
preprocess(i, tblName, h.partitionSchema, catalogTable)
case LogicalRelation(_: InsertableRelation, _, catalogTable, _) =>
val tblName = catalogTable.map(_.identifier.quotedString).getOrElse("unknown")
preprocess(i, tblName, new StructType(), catalogTable)
case _ => i
}
}
}
/**
* A rule to check whether the functions are supported only when Hive support is enabled
*/
object HiveOnlyCheck extends (LogicalPlan => Unit) {
def apply(plan: LogicalPlan): Unit = {
plan.foreach {
case CreateTableV1(tableDesc, _, _) if DDLUtils.isHiveTable(tableDesc) =>
throw QueryCompilationErrors.ddlWithoutHiveSupportEnabledError(
"CREATE Hive TABLE (AS SELECT)")
case i: InsertIntoDir if DDLUtils.isHiveTable(i.provider) =>
throw QueryCompilationErrors.ddlWithoutHiveSupportEnabledError(
"INSERT OVERWRITE DIRECTORY with the Hive format")
case _ => // OK
}
}
}
/**
* A rule to do various checks before reading a table.
*/
object PreReadCheck extends (LogicalPlan => Unit) {
def apply(plan: LogicalPlan): Unit = {
plan.foreach {
case operator: LogicalPlan =>
operator transformExpressionsUp {
case e @ (_: InputFileName | _: InputFileBlockLength | _: InputFileBlockStart) =>
checkNumInputFileBlockSources(e, operator)
e
}
}
}
private def checkNumInputFileBlockSources(e: Expression, operator: LogicalPlan): Int = {
operator match {
case _: HiveTableRelation => 1
case _ @ LogicalRelation(_: HadoopFsRelation, _, _, _) => 1
case _: LeafNode => 0
// UNION ALL has multiple children, but these children do not concurrently use InputFileBlock.
case u: Union =>
if (u.children.map(checkNumInputFileBlockSources(e, _)).sum >= 1) 1 else 0
case o =>
val numInputFileBlockSources = o.children.map(checkNumInputFileBlockSources(e, _)).sum
if (numInputFileBlockSources > 1) {
e.failAnalysis(s"'${e.prettyName}' does not support more than one sources")
} else {
numInputFileBlockSources
}
}
}
}
/**
* A rule to do various checks before inserting into or writing to a data source table.
*/
object PreWriteCheck extends (LogicalPlan => Unit) {
def failAnalysis(msg: String): Unit = { throw new AnalysisException(msg) }
def apply(plan: LogicalPlan): Unit = {
plan.foreach {
case InsertIntoStatement(l @ LogicalRelation(relation, _, _, _), partition, _, query, _, _) =>
// Get all input data source relations of the query.
val srcRelations = query.collect {
case LogicalRelation(src, _, _, _) => src
}
if (srcRelations.contains(relation)) {
failAnalysis("Cannot insert into table that is also being read from.")
} else {
// OK
}
relation match {
case _: HadoopFsRelation => // OK
// Right now, we do not support insert into a non-file-based data source table with
// partition specs.
case _: InsertableRelation if partition.nonEmpty =>
failAnalysis(s"Insert into a partition is not allowed because $l is not partitioned.")
case _ => failAnalysis(s"$relation does not allow insertion.")
}
case InsertIntoStatement(t, _, _, _, _, _)
if !t.isInstanceOf[LeafNode] ||
t.isInstanceOf[Range] ||
t.isInstanceOf[OneRowRelation] ||
t.isInstanceOf[LocalRelation] =>
failAnalysis(s"Inserting into an RDD-based table is not allowed.")
case _ => // OK
}
}
}
| shaneknapp/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/rules.scala | Scala | apache-2.0 | 23,353 |
package org.jetbrains.plugins.scala
package codeInsight.intention.controlflow
import com.intellij.codeInsight.intention.PsiElementBaseIntentionAction
import com.intellij.openapi.command.CommandProcessor
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.project.Project
import com.intellij.openapi.ui.popup.JBPopupFactory
import com.intellij.psi.tree.IElementType
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.{PsiDocumentManager, PsiElement, ResolveState}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScCaseClause
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory._
import org.jetbrains.plugins.scala.lang.resolve.processor.CompletionProcessor
import org.jetbrains.plugins.scala.lang.resolve.{ScalaResolveResult, StdKinds}
import org.jetbrains.plugins.scala.project.ProjectContext
import scala.collection.Set
/**
* Nikolay.Tropin
* 4/17/13
*/
object ReplaceDoWhileWithWhileIntention {
def familyName = "Replace do while with while"
}
class ReplaceDoWhileWithWhileIntention extends PsiElementBaseIntentionAction {
def getFamilyName: String = ReplaceDoWhileWithWhileIntention.familyName
override def getText: String = ReplaceDoWhileWithWhileIntention.familyName
def isAvailable(project: Project, editor: Editor, element: PsiElement): Boolean = {
for {
doStmt <- Option(PsiTreeUtil.getParentOfType(element, classOf[ScDoStmt], false))
condition <- doStmt.condition
body <- doStmt.getExprBody
} {
val offset = editor.getCaretModel.getOffset
//offset is on the word "do" or "while"
if ((offset >= doStmt.getTextRange.getStartOffset && offset < body.getTextRange.getStartOffset) ||
(offset > body.getTextRange.getEndOffset && offset < condition.getTextRange.getStartOffset))
return true
}
false
}
override def invoke(project: Project, editor: Editor, element: PsiElement) {
implicit val ctx: ProjectContext = project
//check for name conflicts
for {
doStmt <- Option(PsiTreeUtil.getParentOfType(element, classOf[ScDoStmt]))
body <- doStmt.getExprBody
doStmtParent <- doStmt.parent
} {
val nameConflict = (declaredNames(body) intersect declaredNames(doStmtParent)).nonEmpty
if (nameConflict) {
val message = "This action will cause name conflict."
showNotification(message)
return
}
}
doReplacement()
def showNotification(text: String) {
val popupFactory = JBPopupFactory.getInstance
popupFactory.createConfirmation(text, "Continue", "Cancel", new Runnable {
//action on confirmation
def run() {
//to make action Undoable
CommandProcessor.getInstance().executeCommand(project, new Runnable() {
def run() { doReplacement() }
}, null, null)
}
}, 0).showInBestPositionFor(editor)
}
def doReplacement() {
for {
doStmt <- Option(PsiTreeUtil.getParentOfType(element, classOf[ScDoStmt]))
condition <- doStmt.condition
body <- doStmt.getExprBody
doStmtParent <- doStmt.parent
} {
val bodyText = body.getText
val newWhileStmt = createExpressionFromText(s"while (${condition.getText}) $bodyText")
val newBody = createExpressionFromText(bodyText)
val parentBlockHasBraces: Boolean = doStmt.getParent.children.map(_.getNode.getElementType).contains(ScalaTokenTypes.tLBRACE)
val parentBlockNeedBraces: Boolean = doStmtParent match {
case _: ScalaFile => false
case block: ScBlock => block.getParent match {
case _: ScCaseClause => false
case _ => true
}
case _ => true
}
inWriteAction {
val newDoStmt =
if (!parentBlockHasBraces && parentBlockNeedBraces) {
val doStmtInBraces =
doStmt.replaceExpression(createBlockFromExpr(doStmt), removeParenthesis = true)
PsiTreeUtil.findChildOfType(doStmtInBraces, classOf[ScDoStmt], true)
} else doStmt
val newExpression: ScExpression = newDoStmt.replaceExpression(newWhileStmt, removeParenthesis = true)
val parent = newExpression.getParent
val bodyElements = newBody match {
case _: ScBlock => newBody.children
case _: ScExpression => Iterator(newBody)
}
for (elem <- bodyElements) {
val elementType: IElementType = elem.getNode.getElementType
if (elementType != ScalaTokenTypes.tLBRACE && elementType != ScalaTokenTypes.tRBRACE)
parent.addBefore(elem, newExpression)
}
parent.addBefore(createNewLine(), newExpression)
PsiDocumentManager.getInstance(project).commitDocument(editor.getDocument)
}
}
}
}
def declaredNames(element: PsiElement): Set[String] = {
implicit val ctx: ProjectContext = element
val firstChild: PsiElement = element.getFirstChild
val processor: CompletionProcessor = new CompletionProcessor(StdKinds.refExprLastRef, firstChild, collectImplicits = true)
element.processDeclarations(processor, ResolveState.initial(), firstChild, firstChild)
val candidates: Set[ScalaResolveResult] = processor.candidatesS
candidates.map(_.name)
}
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/codeInsight/intention/controlflow/ReplaceDoWhileWithWhileIntention.scala | Scala | apache-2.0 | 5,597 |
package dregex
import dregex.impl.Util
import org.scalatest.funsuite.AnyFunSuite
import scala.collection.immutable.Seq
class PerformanceTest extends AnyFunSuite {
test("slow regexs") {
val (regexes, elapsed1) = Util.time {
Regex.compile(
Seq(
"qwertyuiopasd",
"/aaaaaa/(?!xxc)(?!xxd)(?!xxe)(?!xxf)(?!xxg)(?!xxh)[a-zA-Z0-9]{7}.*",
"/aaaaaa/(?!x+c)(?!x+d)(?!x+e)(?!x+f)(?!x+g)(?!x+h)[a-zA-Z0-9]{7}.*",
"/aaaaaa/(?!x+c|x+d|x+e|x+f|x+g|x+h)[a-zA-Z0-9]{7}.*",
"/aaaaaa/(?!xxc)a(?!xxd)b(?!xxx)c(?!xxf)[a-zA-Z0-9]{7}.*", // disables lookahead combinations
"/aaaaaa/(?!xxc|xxd|xxe|xxf|xxg|xxh)[a-zA-Z0-9]{7}.*",
"/aaaaaa/(?!xxc.*)(?!xxd.*)(?!xxe.*)(?!xxf.*)(?!xxg.*)[a-zA-Z0-9]{7}.*"
))
}
info(s"compilation time: $elapsed1")
val elapsed2 = Util.time {
regexes.tail.foreach(_ doIntersect regexes.head)
}
info(s"intersection time: $elapsed2")
}
test("large character classes") {
val (regex, elapsed) = Util.time {
Regex.compile("""[\x{0}-\x{10FFFF}]""")
}
info(s"compilation time: $elapsed")
}
}
| marianobarrios/dregex | src/test/scala/dregex/PerformanceTest.scala | Scala | bsd-2-clause | 1,141 |
class B(x : () => Int)
class A(i : Int) extends B(() => i) { i }
| yusuke2255/dotty | tests/untried/pos/t803.scala | Scala | bsd-3-clause | 65 |
package com.ing.baker.runtime.recipe_manager
import java.util.UUID
import java.util.concurrent.TimeUnit
import _root_.akka.actor.ActorSystem
import _root_.akka.testkit.{TestKit, TestProbe}
import _root_.akka.util.Timeout
import com.ing.baker.il.CompiledRecipe
import com.ing.baker.runtime.akka.AkkaBakerConfig.Timeouts
import com.ing.baker.runtime.akka.actor.recipe_manager.RecipeManagerProtocol._
import com.ing.baker.runtime.common.RecipeRecord
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AsyncWordSpecLike
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll}
import org.scalatestplus.mockito.MockitoSugar
import scala.concurrent.duration.{Duration, FiniteDuration}
class RecipeManagerActorImplSpec extends TestKit(ActorSystem("MySpec"))
with AsyncWordSpecLike
with Matchers
with MockitoSugar
with BeforeAndAfter
with BeforeAndAfterAll {
private val timeout: FiniteDuration = FiniteDuration(5, TimeUnit.SECONDS)
private val timeouts: Timeouts = new Timeouts(timeout, timeout, timeout ,timeout ,timeout)
override def afterAll(): Unit = {
TestKit.shutdownActorSystem(system)
}
"RecipeManagerActorImpl" should {
"implement add" in {
val actor = TestProbe()
val manager = new ActorBasedRecipeManager(actor.ref, timeouts)
val recipe = mock[CompiledRecipe]
val eventualString = manager.put(RecipeRecord.of(recipe, System.currentTimeMillis()))
actor.expectMsg(AddRecipe(recipe))
val id = UUID.randomUUID().toString
actor.reply(AddRecipeResponse(id))
eventualString.map(_ shouldBe id)
}
"implement get" in {
val actor = TestProbe()
val manager = new ActorBasedRecipeManager(actor.ref, timeouts)
val recipe = mock[CompiledRecipe]
val id1 = UUID.randomUUID().toString
val eventualNotFound = manager.get(id1)
actor.expectMsg(GetRecipe(id1))
actor.reply(NoRecipeFound(id1))
val id2 = UUID.randomUUID().toString
val eventualFound = manager.get(id2)
actor.expectMsg(GetRecipe(id2))
val compiledRecipe = mock[CompiledRecipe]
val timestamp = 42l
actor.reply(RecipeFound(compiledRecipe, timestamp))
for {
_ <- eventualNotFound.map(_ shouldBe(None))
_ <- eventualFound.map(_.get.recipe.shouldBe(compiledRecipe))
} yield succeed
}
"implement getAll" in {
val actor = TestProbe()
val manager = new ActorBasedRecipeManager(actor.ref, timeouts)
val recipe = mock[CompiledRecipe]
val eventualString = manager.all
actor.expectMsg(GetAllRecipes)
val timestamp = 42l
actor.reply(AllRecipes(Seq(RecipeInformation(recipe, timestamp))))
eventualString.map(_ shouldBe Seq(RecipeRecord.of(recipe, timestamp)))
}
}
}
| ing-bank/baker | core/akka-runtime/src/test/scala/com/ing/baker/runtime/recipe_manager/RecipeManagerActorImplSpec.scala | Scala | mit | 2,800 |
import stainless.equations._
import stainless.annotation._
import stainless.lang._
object Equations1 {
@extern
def makeEqual(x: BigInt, y: BigInt): Unit = {
(??? : Unit)
} ensuring(_ => x == y)
def f(x: BigInt, y: BigInt) = {
x ==:| makeEqual(x,y) |:
y ==:| trivial |:
x
}
}
| epfl-lara/stainless | frontends/benchmarks/verification/invalid/Equations1.scala | Scala | apache-2.0 | 303 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.optimizerexitcodes
import edu.latrobe._
import edu.latrobe.blaze._
import scala.util.hashing.MurmurHash3
/**
* For breeze:
*
* FirstOrderMinimizer.FunctionValuesConverged
* or
* FirstOrderMinimizer.GradientConverged
*/
final class ThirdParty(override val description: String,
override val indicatesConvergence: Boolean,
override val indicatesFailure: Boolean)
extends IndependentOptimizerExitCode {
override def toString
: String = {
s"ThirdParty[$description, $indicatesConvergence, $indicatesFailure]"
}
override def hashCode()
: Int = {
var tmp = super.hashCode()
tmp = MurmurHash3.mix(tmp, description.hashCode)
tmp = MurmurHash3.mix(tmp, indicatesConvergence.hashCode)
tmp = MurmurHash3.mix(tmp, indicatesFailure.hashCode)
tmp
}
override def canEqual(that: Any)
: Boolean = that.isInstanceOf[ThirdParty]
override protected def doEquals(other: Equatable)
: Boolean = super.doEquals(other) && (other match {
case other: ThirdParty =>
description == other.description &&
indicatesConvergence == other.indicatesConvergence &&
indicatesFailure == other.indicatesFailure
case _ =>
false
})
}
object ThirdParty {
final def apply(description: String,
indicatesConvergence: Boolean,
indicatesFailure: Boolean)
: ThirdParty = new ThirdParty(
description,
indicatesConvergence,
indicatesFailure
)
final def convergence(description: String)
: ThirdParty = apply(
description,
indicatesConvergence = true,
indicatesFailure = false
)
final def failure(description: String)
: ThirdParty = apply(
description,
indicatesConvergence = false,
indicatesFailure = true
)
final def neutral(description: String)
: ThirdParty = apply(
description,
indicatesConvergence = false,
indicatesFailure = false
)
}
| bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/optimizerexitcodes/ThirdParty.scala | Scala | apache-2.0 | 2,703 |
package calculator
import math.pow
import math.sqrt
object Polynomial {
def computeDelta(a: Signal[Double], b: Signal[Double],
c: Signal[Double]): Signal[Double] = {
Signal[Double](pow(b(), 2) - 4*a()*c())
}
def computeSolutions(a: Signal[Double], b: Signal[Double],
c: Signal[Double], delta: Signal[Double]): Signal[Set[Double]] = {
Signal[Set[Double]](delta() match {
case x if x > 0 => Set((-b() + sqrt(x)) / (2.0*a()), (-b() - sqrt(x)) / (2.0*a()))
case _ => Set()
})
}
}
| tanderegg/reactive-class | week2/calculator/src/main/scala/calculator/Polynomial.scala | Scala | mit | 531 |
import Dependencies._
import sbt.Keys._
import sbt._
object Shared {
lazy val sparkVersion = SettingKey[String]("x-spark-version")
lazy val hadoopVersion = SettingKey[String]("x-hadoop-version")
lazy val jets3tVersion = SettingKey[String]("x-jets3t-version")
lazy val jlineDef = SettingKey[(String, String)]("x-jline-def")
lazy val withHive = SettingKey[Boolean]("x-with-hive")
lazy val sharedSettings: Seq[Def.Setting[_]] = Seq(
publishArtifact in Test := false,
publishMavenStyle := true,
organization := MainProperties.organization,
scalaVersion := defaultScalaVersion,
sparkVersion := defaultSparkVersion,
hadoopVersion := defaultHadoopVersion,
jets3tVersion := defaultJets3tVersion,
jlineDef := (if (defaultScalaVersion.startsWith("2.10")) {
("org.scala-lang", defaultScalaVersion)
} else {
("jline", "2.12")
}),
withHive := defaultWithHive,
libraryDependencies += guava,
libraryDependencies += jerseyClient
)
val gisSettings: Seq[Def.Setting[_]] = Seq(
libraryDependencies ++= geometryDeps
)
val repl: Seq[Def.Setting[_]] = Seq(
libraryDependencies += sparkRepl(sparkVersion.value)
)
val hive: Seq[Def.Setting[_]] = Seq(
libraryDependencies ++= {
if(withHive.value) Seq(sparkHive(sparkVersion.value)) else Seq.empty
}
)
val yarnWebProxy: Seq[Def.Setting[_]] = Seq(
libraryDependencies ++= {
val hv = hadoopVersion.value
if (!hv.startsWith("1")) Seq(yarnProxy(hv)) else Seq.empty
}
)
lazy val sparkSettings: Seq[Def.Setting[_]] = Seq(
libraryDependencies ++= {
val jets3tVersion = sys.props.get("jets3t.version") match {
case Some(jv) => jets3t(Some(jv), None)
case None => jets3t(None, Some(hadoopVersion.value))
}
val jettyVersion = "8.1.14.v20131031"
val libs = Seq(
sparkCore(sparkVersion.value),
sparkYarn(sparkVersion.value),
sparkSQL(sparkVersion.value),
hadoopClient(hadoopVersion.value),
jets3tVersion,
commonsCodec
) ++ (
if (!scalaVersion.value.startsWith("2.10")) {
// in 2.11
//Boot.scala → HttpServer → eclipse
// eclipse → provided boohooo :'-(
Seq(
"org.eclipse.jetty" % "jetty-http" % jettyVersion,
"org.eclipse.jetty" % "jetty-continuation" % jettyVersion,
"org.eclipse.jetty" % "jetty-servlet" % jettyVersion,
"org.eclipse.jetty" % "jetty-util" % jettyVersion,
"org.eclipse.jetty" % "jetty-security" % jettyVersion,
"org.eclipse.jetty" % "jetty-plus" % jettyVersion,
"org.eclipse.jetty" % "jetty-server" % jettyVersion
)
} else Nil
) ++ sparkMesos(sparkVersion.value) ++ sparkKubernetes(sparkVersion.value)
libs
}
) ++ repl ++ hive ++ yarnWebProxy
}
| andypetrella/spark-notebook | project/Shared.scala | Scala | apache-2.0 | 2,997 |
package com.thoughtworks.datacommons.prepbuddy.analyzers.completeness
import com.thoughtworks.datacommons.prepbuddy.exceptions.ApplicationException
import org.apache.spark.sql.types._
import org.apache.spark.sql.{DataFrame, Row, SparkSession}
import org.scalatest.FunSuite
class RowCompletenessRuleTest extends FunSuite {
private val spark: SparkSession = SparkSession.builder()
.appName(getClass.getCanonicalName)
.master("local[2]")
.getOrCreate()
def getPersonsWithSchema: Array[Row] = {
object Person {
private val firstName = StructField("firstName", StringType)
private val middleName = StructField("middleName", StringType)
private val lastName = StructField("lastName", StringType)
private val age = StructField("age", IntegerType)
private val married = StructField("married", BooleanType)
def getSchema: StructType = StructType(Array(firstName, middleName, lastName, age, married))
}
val data: List[Row] = List(
Row("John", "-", "-", 28, true),
Row("Stiven", "", "Smith", null, true),
Row("Prasun", "Kumar", "Pal", 20, true),
Row("Ram", "Lal", "Panwala", null, true),
Row("Babu", "Lal", "Phoolwala", 57, null)
)
val personData: DataFrame = spark.createDataFrame(spark.sparkContext.parallelize(data), Person.getSchema)
personData.collect()
}
private val persons: Array[Row] = getPersonsWithSchema
private val john = persons(0)
private val stiven = persons(1)
private val prasun = persons(2)
private val ramlal = persons(3)
private val babulal = persons(4)
test("should return false when the specified column values is/are null") {
val completenessRule: RowCompletenessRule = new RowCompletenessRule(
EvaluationMode.CUSTOM,
List("middleName", "age"),
List("N/A", "-")
)
assert(!completenessRule.isComplete(john))
assert(!completenessRule.isComplete(stiven))
assert(!completenessRule.isComplete(ramlal))
assert(completenessRule.isComplete(prasun))
assert(completenessRule.isComplete(babulal))
}
test("should return false when any of the column value is null") {
val completenessRule: RowCompletenessRule = new RowCompletenessRule(
EvaluationMode.STRICT,
possibleNullValues = List("empty", "-")
)
assert(!completenessRule.isComplete(john))
assert(!completenessRule.isComplete(stiven))
assert(!completenessRule.isComplete(ramlal))
assert(!completenessRule.isComplete(babulal))
assert(completenessRule.isComplete(prasun))
}
test("should throw exception when Evaluation mode is CUSTOM but mandatory columns are not specified") {
val resultException: ApplicationException = intercept[ApplicationException] {
new RowCompletenessRule(
EvaluationMode.CUSTOM,
possibleNullValues = List("empty", "-", "N/A")
)
}
assert("Requirement did not matched" == resultException.getMessage)
}
}
| data-commons/prep-buddy | src/test/scala/com/thoughtworks/datacommons/prepbuddy/analyzers/completeness/RowCompletenessRuleTest.scala | Scala | apache-2.0 | 3,211 |
package dpla.ingestion3.enrichments.normalizations.filters
import dpla.ingestion3.enrichments.normalizations.FilterList
import dpla.ingestion3.enrichments.normalizations.FilterRegex._
/**
* List of type terms that are allowed in the type field because they can be mapped to boarder DCMIType terms
*/
object TypeAllowList extends FilterList {
lazy val termList: Set[String] = getTermsFromFiles
.map(line => line.split(",")(0))
.map(_.allowListRegex)
override val files: Seq[String] = Seq(
"/types/dpla-type-normalization.csv"
)
}
| dpla/ingestion3 | src/main/scala/dpla/ingestion3/enrichments/normalizations/filters/TypeAllowList.scala | Scala | mit | 555 |
package models
case class Note(id: String, contents: String)
| Technius/noteit | app/models/Models.scala | Scala | mit | 62 |
package org.jetbrains.plugins.scala
package lang.refactoring
import com.intellij.codeInsight.editorActions.moveUpDown.StatementUpDownMover.MoveInfo
import com.intellij.codeInsight.editorActions.moveUpDown.{LineMover, LineRange}
import com.intellij.openapi.editor.Editor
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.{PsiComment, PsiElement, PsiFile, PsiWhiteSpace}
import org.jetbrains.plugins.scala.extensions.{PsiElementExt, _}
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns.ScCaseClause
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports.ScImportStmt
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScMember
/**
* Pavel Fatin
*/
class ScalaStatementMover extends LineMover {
private type ElementClass = Class[_ <: PsiElement]
override def checkAvailable(editor: Editor, file: PsiFile, info: MoveInfo, down: Boolean): Boolean = {
if(!super.checkAvailable(editor, file, info, down)) return false
if(editor.getSelectionModel.hasSelection) return false
if(!file.isInstanceOf[ScalaFile]) return false
def aim(sourceClass: ElementClass, predicate: PsiElement => Boolean, canUseLineAsTarget: Boolean = true): Option[(PsiElement, LineRange)] = {
findSourceOf(sourceClass).map { source =>
val targetRange = findTargetRangeFor(source, predicate).getOrElse {
if (canUseLineAsTarget) nextLineRangeFor(source) else null
}
(source, targetRange)
}
}
def findSourceOf(aClass: ElementClass) = findElementAt(aClass, editor, file, info.toMove.startLine)
def findTargetRangeFor(source: PsiElement, predicate: PsiElement => Boolean): Option[LineRange] = {
val siblings = if(down) source.nextSiblings else source.prevSiblings
siblings.filter(!_.isInstanceOf[PsiComment] )
.takeWhile(it => it.isInstanceOf[PsiWhiteSpace] || it.isInstanceOf[PsiComment] || it.isInstanceOf[ScImportStmt] || predicate(it))
.find(predicate)
.map(rangeOf(_, editor))
}
def nextLineRangeFor(source: PsiElement): LineRange = {
val range = rangeOf(source, editor)
if (down) {
val maxLine = editor.offsetToLogicalPosition(editor.getDocument.getTextLength).line
if (range.endLine < maxLine) new LineRange(range.endLine, range.endLine + 1) else null
} else {
new LineRange(range.startLine - 1, range.startLine)
}
}
val pair = aim(classOf[ScCaseClause], _.isInstanceOf[ScCaseClause], canUseLineAsTarget = false)
.orElse(aim(classOf[ScMember], it => it.isInstanceOf[ScMember] || it.isInstanceOf[ScImportStmt]))
.orElse(aim(classOf[ScIfStmt], _ => false))
.orElse(aim(classOf[ScForStatement], _ => false))
.orElse(aim(classOf[ScMatchStmt], _ => false))
.orElse(aim(classOf[ScTryStmt], _ => false))
.orElse(aim(classOf[ScMethodCall], isControlStructureLikeCall).filter(p => isControlStructureLikeCall(p._1)))
pair.foreach { it =>
info.toMove = rangeOf(it._1, editor)
info.toMove2 = it._2
}
pair.isDefined
}
private def isControlStructureLikeCall(element: PsiElement): Boolean = element match {
case call: ScMethodCall => call.argumentExpressions.lastOption.exists(_.isInstanceOf[ScBlockExpr])
case _ => false
}
private def rangeOf(e: PsiElement, editor: Editor) = {
val begin = editor.offsetToLogicalPosition(e.getTextRange.getStartOffset).line
val end = editor.offsetToLogicalPosition(e.getTextRange.getEndOffset).line + 1
new LineRange(begin, end)
}
private def findElementAt(cl: ElementClass, editor: Editor, file: PsiFile, line: Int): Option[PsiElement] = {
val edges = edgeLeafsOf(line, editor, file)
val left = edges._1.flatMap(PsiTreeUtil.getParentOfType(_, cl, false).toOption)
val right = edges._2.flatMap(PsiTreeUtil.getParentOfType(_, cl, false).toOption)
left.zip(right)
.collect { case (l, r) if l.withParentsInFile.contains(r) => r }
.find(it => editor.offsetToLogicalPosition(it.getTextOffset).line == line)
}
private def edgeLeafsOf(line: Int, editor: Editor, file: PsiFile): (Option[PsiElement], Option[PsiElement]) = {
val document = editor.getDocument
val start = document.getLineStartOffset(line)
val end = start.max(document.getLineEndOffset(line) - 1)
val span = start.to(end)
def firstLeafOf(seq: Seq[Int]) = seq.view.flatMap(file.getNode.findLeafElementAt(_).toOption.toSeq)
.filter(!_.getPsi.isInstanceOf[PsiWhiteSpace]).map(_.getPsi).headOption
(firstLeafOf(span), firstLeafOf(span.reverse))
}
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/refactoring/ScalaStatementMover.scala | Scala | apache-2.0 | 4,765 |
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.blaze
package http
package http2
import java.nio.ByteBuffer
import org.http4s.blaze.util.BufferTools
import scala.collection.mutable.ArrayBuffer
/** A more humane interface for writing HTTP messages. */
private final class FrameEncoder(remoteSettings: Http2Settings, headerEncoder: HeaderEncoder) {
// Just a shortcut
private[this] def maxFrameSize: Int = remoteSettings.maxFrameSize
/** Set the max table size of the header encoder */
def setMaxTableSize(size: Int): Unit =
headerEncoder.maxTableSize(size)
/** Generate a window update frame for the session flow window */
def sessionWindowUpdate(size: Int): ByteBuffer =
streamWindowUpdate(0, size)
/** Generate a window update frame for the specified stream flow window */
def streamWindowUpdate(streamId: Int, size: Int): ByteBuffer =
FrameSerializer.mkWindowUpdateFrame(streamId, size)
/** Generate a ping frame */
def pingFrame(data: Array[Byte]): ByteBuffer =
FrameSerializer.mkPingFrame(false, data)
/** Generate a ping ack frame with the specified data */
def pingAck(data: Array[Byte]): ByteBuffer =
FrameSerializer.mkPingFrame(true, data)
/** Generate a RST frame with the specified stream id and error code */
def rstFrame(streamId: Int, errorCode: Long): ByteBuffer =
FrameSerializer.mkRstStreamFrame(streamId, errorCode)
/** Generate stream data frame(s) for the specified data
*
* If the data exceeds the peers MAX_FRAME_SIZE setting, it is fragmented into a series of
* frames.
*/
def dataFrame(streamId: Int, endStream: Boolean, data: ByteBuffer): collection.Seq[ByteBuffer] = {
val limit = maxFrameSize
if (data.remaining <= limit)
FrameSerializer.mkDataFrame(streamId, endStream, padding = 0, data)
else { // need to fragment
val acc = new ArrayBuffer[ByteBuffer]
while (data.hasRemaining) {
val thisData =
BufferTools.takeSlice(data, math.min(data.remaining, limit))
val eos = endStream && !data.hasRemaining
acc ++= FrameSerializer.mkDataFrame(streamId, eos, padding = 0, thisData)
}
acc
}
}
/** Generate stream header frames from the provided header sequence
*
* If the compressed representation of the headers exceeds the MAX_FRAME_SIZE setting of the
* peer, it will be broken into a HEADERS frame and a series of CONTINUATION frames.
*/
def headerFrame(
streamId: Int,
priority: Priority,
endStream: Boolean,
headers: Headers
): collection.Seq[ByteBuffer] = {
val rawHeaders = headerEncoder.encodeHeaders(headers)
val limit = maxFrameSize
val headersPrioritySize =
if (priority.isDefined) 5 else 0 // priority(4) + weight(1), padding = 0
if (rawHeaders.remaining() + headersPrioritySize <= limit)
FrameSerializer.mkHeaderFrame(
streamId,
priority,
endHeaders = true,
endStream,
padding = 0,
rawHeaders)
else {
// need to fragment
val acc = new ArrayBuffer[ByteBuffer]
val headersBuf =
BufferTools.takeSlice(rawHeaders, limit - headersPrioritySize)
acc ++= FrameSerializer.mkHeaderFrame(
streamId,
priority,
endHeaders = false,
endStream,
padding = 0,
headersBuf)
while (rawHeaders.hasRemaining) {
val size = math.min(limit, rawHeaders.remaining)
val continueBuf = BufferTools.takeSlice(rawHeaders, size)
val endHeaders = !rawHeaders.hasRemaining
acc ++= FrameSerializer.mkContinuationFrame(streamId, endHeaders, continueBuf)
}
acc
}
}
}
| http4s/blaze | http/src/main/scala/org/http4s/blaze/http/http2/FrameEncoder.scala | Scala | apache-2.0 | 4,256 |
/*
* Copyright 2016 Nikolay Smelik
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalabot.common.extensions
import org.json4s.DefaultFormats
import org.json4s.native.JsonMethods._
import scalabot.common.chat.Chat
import scalabot.common.message.Intent
import spray.client.pipelining._
import spray.http._
import spray.caching.{Cache, LruCache}
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits._
import scala.concurrent.Future
/**
* Created by Nikolay.Smelik on 8/3/2016.
*/
trait SocketExtension extends BotExtension {
implicit private val formats = DefaultFormats
val cache: Cache[Any] = LruCache(timeToLive = cacheExpiration)
val pipeline: HttpRequest => Future[HttpResponse] = sendReceive
def cacheExpiration: Duration = 1 day
final def makeRequest[T](intent: SocketIntent)(implicit manifest: Manifest[T]): Unit = {
val uri = Uri(intent.url).withQuery(intent.requestParams.params)
if (intent.requestParams.canCache) {
val result = cache(uri) {
pipeline(HttpRequest(intent.requestParams.method, uri, intent.requestParams.headers))
.map(_.entity.asString)
.map(parse(_).extract[T])
}
result.foreach(result => self ! ResultIntent(intent.sender, result))
} else {
pipeline(HttpRequest(intent.requestParams.method, uri, intent.requestParams.headers))
.map(_.entity.asString)
.map(parse(_).extract[T])
.foreach(result => self ! ResultIntent(intent.sender, result))
}
}
final def makeRequest(intent: SocketIntent): Unit = {
val uri = Uri(intent.url).withQuery(intent.requestParams.params)
if (intent.requestParams.canCache) {
val result = cache(uri) {
pipeline(HttpRequest(intent.requestParams.method, uri, intent.requestParams.headers))
.map(_.entity.asString)
}
result.foreach(result => self ! ResultIntent(intent.sender, result))
} else {
pipeline(HttpRequest(intent.requestParams.method, uri, intent.requestParams.headers))
.map(_.entity.asString)
.foreach(result => self ! ResultIntent(intent.sender, result))
}
}
}
case class SocketIntent(sender: Chat, url: String, requestParams: RequestParams = RequestParams()) extends Intent
case class ResultIntent(sender: Chat, result: Any) extends Intent
case class RequestParams(method: HttpMethod = HttpMethods.GET, canCache: Boolean = false) {
var params: Map[String, String] = Map.empty
var headers: List[HttpHeader] = List.empty
def putParam(name: String, value: String): RequestParams = {
params = params + (name -> value)
this
}
def putHeader(httpHeader: HttpHeader): RequestParams = {
headers = headers :+ httpHeader
this
}
def ++=(requestParams: RequestParams): Unit = {
params = params ++ requestParams.params
}
} | kerzok/ScalaBot | BotApi/src/main/scala/scalabot/common/extensions/SocketExtension.scala | Scala | apache-2.0 | 3,360 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.gearpump.streaming.task
import org.apache.gearpump.streaming.partitioner.PartitionerDescription
import org.apache.gearpump.streaming.{DAG, LifeTime}
/**
* Each processor can have multiple downstream subscribers.
*
* For example: When processor A subscribe to processor B, then the output of B will be
* pushed to processor A.
*
* @param processorId subscriber processor Id
* @param partitionerDescription subscriber partitioner
*/
case class Subscriber(processorId: Int, partitionerDescription: PartitionerDescription,
parallelism: Int, lifeTime: LifeTime)
object Subscriber {
/**
*
* List subscriptions of a processor.
* The topology information is retrieved from dag
*
* @param processorId the processor to list
* @param dag the DAG
* @return the subscribers of this processor
*/
def of(processorId: Int, dag: DAG): List[Subscriber] = {
val edges = dag.graph.outgoingEdgesOf(processorId)
edges.foldLeft(List.empty[Subscriber]) { (list, nodeEdgeNode) =>
val (_, partitioner, downstreamProcessorId) = nodeEdgeNode
val downstreamProcessor = dag.processors(downstreamProcessorId)
list :+ Subscriber(downstreamProcessorId, partitioner,
downstreamProcessor.parallelism, downstreamProcessor.life)
}
}
}
| manuzhang/incubator-gearpump | streaming/src/main/scala/org/apache/gearpump/streaming/task/Subscriber.scala | Scala | apache-2.0 | 2,119 |
package scala.macros.internal
package trees
import scala.macros.internal.prettyprinters._
import scala.macros.Universe
trait TreeStructure { self: Universe =>
private[macros] implicit def treeStructure[T <: Tree]: Structure[T] = Structure { (p, tree) =>
// TODO: implement this
???
}
}
| xeno-by/scalamacros | core/src/main/scala/scala/macros/internal/trees/TreeStructure.scala | Scala | bsd-3-clause | 301 |
package com.xantoria.flippy.serialization
import net.liftweb.json._
import org.scalatest._
import com.xantoria.flippy.BaseSpec
import com.xantoria.flippy.condition.{Condition, NamespacedCondition}
class SerializerSpec extends BaseSpec {
val contextSerializer = new ContextValueSerializer()
val engine = SerializationEngine()
implicit val formats = DefaultFormats + contextSerializer + engine
"The serialization engine" should "error if the condition type is unrecognised" in {
val data = """
{
"condition_type": "emerald_weapon",
"wat": "this is bogus"
}
"""
assume(!engine.conditionTypes.contains("emerald_weapon"))
a [MappingException] should be thrownBy parse(data).extract[Condition]
}
"Equals serialiser" should "deserialize correctly" in {
val data = """
{
"condition_type": "equals",
"value": "Ms. Cloud"
}
"""
val extracted = parse(data).extract[Condition]
extracted shouldBe a [Condition.Equals]
extracted.appliesTo("Ms. Cloud") should be (true)
extracted.appliesTo("Ms. Barrett") should be (false)
}
it should "serialize correctly" in {
val c = Condition.Equals("Emerald Weapon")
val expected = """{"condition_type":"equals","value":"Emerald Weapon"}"""
val actual = Serialization.write(c)
actual should be (expected)
}
"And serializer" should "deserialize correctly" in {
val data = """
{
"condition_type": "and",
"conditions": [
{
"condition_type": "equals",
"value": "Tifa"
},
{
"condition_type": "equals",
"value": "Yuffie"
}
]
}
"""
val extracted = parse(data).extract[Condition]
extracted shouldBe a [Condition.And]
extracted.appliesTo("Tifa") should be (false)
extracted.appliesTo("Yuffie") should be (false)
}
it should "fail if malformed" in {
val noConditions = """{"condition_type": "and"}"""
val weirdConditionsType = """{"condition_type": "and", "conditions": 123}"""
a [MappingException] should be thrownBy parse(
noConditions
).extract[Condition]
a [MappingException] should be thrownBy parse(
weirdConditionsType
).extract[Condition]
}
it should "serialize correctly" in {
val c = Condition.And(List(
Condition.Equals("Emerald Weapon"), Condition.Equals("Ruby Weapon")
))
val expected = {
"""{"condition_type":"and","conditions":[""" +
"""{"condition_type":"equals","value":"Emerald Weapon"},""" +
"""{"condition_type":"equals","value":"Ruby Weapon"}]}"""
}
val actual = Serialization.write(c)
actual should be (expected)
}
"Or serializer" should "deserialize correctly" in {
val data = """
{
"condition_type": "or",
"conditions": [
{
"condition_type": "equals",
"value": "Cloud"
},
{
"condition_type": "equals",
"value": "Tifa"
}
]
}
"""
val extracted = parse(data).extract[Condition]
extracted shouldBe a [Condition.Or]
extracted.appliesTo("Cloud") should be (true)
extracted.appliesTo("Tifa") should be (true)
extracted.appliesTo("Yuffie") should be (false)
}
it should "fail if malformed" in {
val noConditions = """{"condition_type": "or", "conditions": null}"""
val weirdConditionsType = """{"condition_type": "or", "conditions": 321}"""
a [MappingException] should be thrownBy parse(
noConditions
).extract[Condition]
a [MappingException] should be thrownBy parse(
weirdConditionsType
).extract[Condition]
}
it should "serialize correctly" in {
val c = Condition.Or(List(Condition.Equals("Emerald Weapon"), Condition.Equals("Ruby Weapon")))
val expected = {
"""{"condition_type":"or","conditions":[""" +
"""{"condition_type":"equals","value":"Emerald Weapon"},""" +
"""{"condition_type":"equals","value":"Ruby Weapon"}]}"""
}
val actual = Serialization.write(c)
actual should be (expected)
}
"Not serializer" should "deserialize correctly" in {
val data = """
{
"condition_type": "not",
"condition": {
"condition_type": "equals",
"value": "Ms. Cloud"
}
}
"""
val parsed = parse(data).extract[Condition]
parsed shouldBe a [Condition.Not]
parsed.appliesTo("Ms. Cloud") should be (false)
parsed.appliesTo("Yuffie") should be (true)
}
it should "serialize correctly" in {
val c = Condition.Not(Condition.Equals("Midgar"))
val expected = {
"""{"condition_type":"not","condition":""" +
"""{"condition_type":"equals","value":"Midgar"}}"""
}
val actual = Serialization.write(c)
actual should be (expected)
}
"True/false serializers" should "deserialize correctly" in {
val trueData = """{"condition_type":"true"}"""
val falseData = """{"condition_type":"false"}"""
val parsedTrue = parse(trueData).extract[Condition]
val parsedFalse = parse(falseData).extract[Condition]
parsedTrue should be (Condition.True)
parsedFalse should be (Condition.False)
}
it should "serialize correctly" in {
val expectedTrue = """{"condition_type":"true"}"""
val expectedFalse = """{"condition_type":"false"}"""
val actualTrue = Serialization.write(Condition.True)
val actualFalse = Serialization.write(Condition.False)
actualTrue should be (expectedTrue)
actualFalse should be (expectedFalse)
}
"Namespaced serializer" should "deserialize correctly" in {
val data = """
{
"condition_type": "namespaced",
"attr": "name",
"fallback": false,
"condition": {
"condition_type": "equals",
"value": "Ms. Cloud"
}
}
"""
val cloud = Map("name" -> "Ms. Cloud")
val tifa = Map("name" -> "Tifa")
val namelessOne = Map("always_silent" -> true)
val parsed = parse(data).extract[Condition]
parsed shouldBe a [NamespacedCondition]
parsed.appliesTo(cloud) should be (true)
parsed.appliesTo(tifa) should be (false)
parsed.appliesTo(namelessOne) should be (false)
}
it should "respect fallback properly" in {
val data = """
{
"condition_type": "namespaced",
"attr": "name",
"fallback": true,
"condition": {
"condition_type": "equals",
"value": "Ms. Cloud"
}
}
"""
val cloud = Map("name" -> "Ms. Cloud")
val tifa = Map("name" -> "Tifa")
val namelessOne = Map("always_silent" -> true)
val parsed = parse(data).extract[Condition]
parsed shouldBe a [NamespacedCondition]
parsed.appliesTo(cloud) should be (true)
parsed.appliesTo(tifa) should be (false)
parsed.appliesTo(namelessOne) should be (true)
}
it should "serialize correctly" in {
val c = Condition.Equals("Cloud") on "name"
val expected = {
"""{"condition_type":"namespaced","attr":"name","condition":""" +
"""{"condition_type":"equals","value":"Cloud"},""" +
""""fallback":false}"""
}
val actual = Serialization.write(c)
actual should be (expected)
}
"Proportion serializer" should "deserialize correctly" in {
val data = """
{
"condition_type": "proportion",
"proportion": 0.2274
}
"""
val extracted = parse(data).extract[Condition]
extracted shouldBe a [Condition.Proportion]
extracted.asInstanceOf[Condition.Proportion].prop should be (0.2274)
}
it should "serialize correctly" in {
val c = Condition.Proportion(0.1143)
val expected = """{"condition_type":"proportion","proportion":0.1143}"""
val actual = Serialization.write(c)
actual should be (expected)
}
"Complex serialization conditions" should "work" in {
// A nice complicated condition: this should match anyone named "Cloud" or who is
// 17 years old, but only if they come from the Final Fantasy game franchise.
val data = """
{
"condition_type": "and",
"conditions": [
{
"condition_type": "namespaced",
"attr": "game_franchise",
"condition": {
"condition_type": "equals",
"value": "Final Fantasy"
}
},
{
"condition_type": "or",
"conditions": [
{
"condition_type": "namespaced",
"attr": "name",
"condition": {
"condition_type": "equals",
"value": "Cloud"
}
},
{
"condition_type": "namespaced",
"attr": "age",
"condition": {
"condition_type": "equals",
"value": 17
}
}
]
}
]
}
"""
val tifa = Map(
"name" -> "Tifa",
"age" -> 20,
"game_franchise" -> "Final Fantasy"
)
val cloud = Map(
"name" -> "Cloud",
"age" -> 21,
"game_franchise" -> "Final Fantasy"
)
val yuffie = Map(
"name" -> "Yuffie",
"age" -> 17,
"game_franchise" -> "Final Fantasy"
)
val kid = Map(
"name" -> "Kid",
"age" -> 17, // She's actually 16 in the game but never mind
"game_franchise" -> "Chrono series"
)
val parsed = parse(data).extract[Condition]
parsed.appliesTo(tifa) should be (false) // Correct franchise, wrong name, wrong age
parsed.appliesTo(cloud) should be (true) // Correct franchise, correct name, wrong age
parsed.appliesTo(yuffie) should be (true) // Correct franchise, wrong name, correct age
parsed.appliesTo(kid) should be (false) // Wrong franchise, wrong name, correct age
}
}
| giftig/flippy | core/src/test/scala/com/xantoria/flippy/serialization/SerializerSpec.scala | Scala | mit | 9,933 |
/***********************************************************************
* Copyright (c) 2013-2017 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.curve
import org.locationtech.geomesa.curve.XZ2SFC.{QueryWindow, XElement}
import org.locationtech.sfcurve.IndexRange
import scala.collection.mutable.ArrayBuffer
/**
* Extended Z-order curve implementation used for efficiently storing polygons.
*
* Based on 'XZ-Ordering: A Space-Filling Curve for Objects with Spatial Extension'
* by Christian Böhm, Gerald Klump and Hans-Peter Kriegel
*
* @param g resolution level of the curve - i.e. how many times the space will be recursively quartered
*/
class XZ2SFC(g: Short, xBounds: (Double, Double), yBounds: (Double, Double)) {
// TODO see what the max value of g can be where we can use Ints instead of Longs and possibly refactor to use Ints
private val xLo = xBounds._1
private val xHi = xBounds._2
private val yLo = yBounds._1
private val yHi = yBounds._2
private val xSize = xHi - xLo
private val ySize = yHi - yLo
/**
* Index a polygon by it's bounding box
*
* @param bounds (xmin, ymin, xmax, ymax)
* @return z value for the bounding box
*/
def index(bounds: (Double, Double, Double, Double)): Long = index(bounds._1, bounds._2, bounds._3, bounds._4)
/**
* Index a polygon by it's bounding box
*
* @param xmin min x value in xBounds
* @param ymin min y value in yBounds
* @param xmax max x value in xBounds, must be >= xmin
* @param ymax max y value in yBounds, must be >= ymin
* @param lenient standardize boundaries to valid values, or raise an exception
* @return z value for the bounding box
*/
def index(xmin: Double, ymin: Double, xmax: Double, ymax: Double, lenient: Boolean = false): Long = {
// normalize inputs to [0,1]
val (nxmin, nymin, nxmax, nymax) = normalize(xmin, ymin, xmax, ymax, lenient)
// calculate the length of the sequence code (section 4.1 of XZ-Ordering paper)
val maxDim = math.max(nxmax - nxmin, nymax - nymin)
// l1 (el-one) is a bit confusing to read, but corresponds with the paper's definitions
val l1 = math.floor(math.log(maxDim) / XZSFC.LogPointFive).toInt
// the length will either be (l1) or (l1 + 1)
val length = if (l1 >= g) { g } else {
val w2 = math.pow(0.5, l1 + 1) // width of an element at resolution l2 (l1 + 1)
// predicate for checking how many axis the polygon intersects
// math.floor(min / w2) * w2 == start of cell containing min
def predicate(min: Double, max: Double): Boolean = max <= (math.floor(min / w2) * w2) + (2 * w2)
if (predicate(nxmin, nxmax) && predicate(nymin, nymax)) l1 + 1 else l1
}
sequenceCode(nxmin, nymin, length)
}
/**
* Determine XZ-curve ranges that will cover a given query window
*
* @param query a window to cover in the form (xmin, ymin, xmax, ymax) where: all values are in user space
* @return
*/
def ranges(query: (Double, Double, Double, Double)): Seq[IndexRange] = ranges(Seq(query))
/**
* Determine XZ-curve ranges that will cover a given query window
*
* @param query a window to cover in the form (xmin, ymin, xmax, ymax) where all values are in user space
* @param maxRanges a rough upper limit on the number of ranges to generate
* @return
*/
def ranges(query: (Double, Double, Double, Double), maxRanges: Option[Int]): Seq[IndexRange] =
ranges(Seq(query), maxRanges)
/**
* Determine XZ-curve ranges that will cover a given query window
*
* @param xmin min x value in user space
* @param ymin min y value in user space
* @param xmax max x value in user space, must be >= xmin
* @param ymax max y value in user space, must be >= ymin
* @return
*/
def ranges(xmin: Double, ymin: Double, xmax: Double, ymax: Double): Seq[IndexRange] =
ranges(Seq((xmin, ymin, xmax, ymax)))
/**
* Determine XZ-curve ranges that will cover a given query window
*
* @param xmin min x value in user space
* @param ymin min y value in user space
* @param xmax max x value in user space, must be >= xmin
* @param ymax max y value in user space, must be >= ymin
* @param maxRanges a rough upper limit on the number of ranges to generate
* @return
*/
def ranges(xmin: Double, ymin: Double, xmax: Double, ymax: Double, maxRanges: Option[Int]): Seq[IndexRange] =
ranges(Seq((xmin, ymin, xmax, ymax)), maxRanges)
/**
* Determine XZ-curve ranges that will cover a given query window
*
* @param queries a sequence of OR'd windows to cover. Each window is in the form
* (xmin, ymin, xmax, ymax) where all values are in user space
* @param maxRanges a rough upper limit on the number of ranges to generate
* @return
*/
def ranges(queries: Seq[(Double, Double, Double, Double)], maxRanges: Option[Int] = None): Seq[IndexRange] = {
// normalize inputs to [0,1]
val windows = queries.map { case (xmin, ymin, xmax, ymax) =>
val (nxmin, nymin, nxmax, nymax) = normalize(xmin, ymin, xmax, ymax, lenient = false)
QueryWindow(nxmin, nymin, nxmax, nymax)
}
ranges(windows.toArray, maxRanges.getOrElse(Int.MaxValue))
}
/**
* Determine XZ-curve ranges that will cover a given query window
*
* @param query a sequence of OR'd windows to cover, normalized to [0,1]
* @param rangeStop a rough max value for the number of ranges to return
* @return
*/
private def ranges(query: Array[QueryWindow], rangeStop: Int): Seq[IndexRange] = {
import XZ2SFC.{LevelOneElements, LevelTerminator}
// stores our results - initial size of 100 in general saves us some re-allocation
val ranges = new java.util.ArrayList[IndexRange](100)
// values remaining to process - initial size of 100 in general saves us some re-allocation
val remaining = new java.util.ArrayDeque[XElement](100)
// checks if a quad is contained in the search space
def isContained(quad: XElement): Boolean = {
var i = 0
while (i < query.length) {
if (quad.isContained(query(i))) {
return true
}
i += 1
}
false
}
// checks if a quad overlaps the search space
def isOverlapped(quad: XElement): Boolean = {
var i = 0
while (i < query.length) {
if (quad.overlaps(query(i))) {
return true
}
i += 1
}
false
}
// checks a single value and either:
// eliminates it as out of bounds
// adds it to our results as fully matching, or
// adds it to our results as partial matching and queues up it's children for further processing
def checkValue(quad: XElement, level: Short): Unit = {
if (isContained(quad)) {
// whole range matches, happy day
val (min, max) = sequenceInterval(quad.xmin, quad.ymin, level, partial = false)
ranges.add(IndexRange(min, max, contained = true))
} else if (isOverlapped(quad)) {
// some portion of this range is excluded
// add the partial match and queue up each sub-range for processing
val (min, max) = sequenceInterval(quad.xmin, quad.ymin, level, partial = true)
ranges.add(IndexRange(min, max, contained = false))
quad.children.foreach(remaining.add)
}
}
// initial level
LevelOneElements.foreach(remaining.add)
remaining.add(LevelTerminator)
// level of recursion
var level: Short = 1
while (level < g && !remaining.isEmpty && ranges.size < rangeStop) {
val next = remaining.poll
if (next.eq(LevelTerminator)) {
// we've fully processed a level, increment our state
if (!remaining.isEmpty) {
level = (level + 1).toShort
remaining.add(LevelTerminator)
}
} else {
checkValue(next, level)
}
}
// bottom out and get all the ranges that partially overlapped but we didn't fully process
while (!remaining.isEmpty) {
val quad = remaining.poll
if (quad.eq(LevelTerminator)) {
level = (level + 1).toShort
} else {
val (min, max) = sequenceInterval(quad.xmin, quad.ymin, level, partial = false)
ranges.add(IndexRange(min, max, contained = false))
}
}
// we've got all our ranges - now reduce them down by merging overlapping values
// note: we don't bother reducing the ranges as in the XZ paper, as accumulo handles lots of ranges fairly well
ranges.sort(IndexRange.IndexRangeIsOrdered)
var current = ranges.get(0) // note: should always be at least one range
val result = ArrayBuffer.empty[IndexRange]
var i = 1
while (i < ranges.size()) {
val range = ranges.get(i)
if (range.lower <= current.upper + 1) {
// merge the two ranges
current = IndexRange(current.lower, math.max(current.upper, range.upper), current.contained && range.contained)
} else {
// append the last range and set the current range for future merging
result.append(current)
current = range
}
i += 1
}
// append the last range - there will always be one left that wasn't added
result.append(current)
result
}
/**
* Computes the sequence code for a given point - for polygons this is the lower-left corner.
*
* Based on Definition 2 from the XZ-Ordering paper
*
* @param x normalized x value [0,1]
* @param y normalized y value [0,1]
* @param length length of the sequence code that will be generated
* @return
*/
private def sequenceCode(x: Double, y: Double, length: Int): Long = {
var xmin = 0.0
var ymin = 0.0
var xmax = 1.0
var ymax = 1.0
var cs = 0L
var i = 0
while (i < length) {
val xCenter = (xmin + xmax) / 2.0
val yCenter = (ymin + ymax) / 2.0
(x < xCenter, y < yCenter) match {
case (true, true) => cs += 1L ; xmax = xCenter; ymax = yCenter
case (false, true) => cs += 1L + 1L * (math.pow(4, g - i).toLong - 1L) / 3L; xmin = xCenter; ymax = yCenter
case (true, false) => cs += 1L + 2L * (math.pow(4, g - i).toLong - 1L) / 3L; xmax = xCenter; ymin = yCenter
case (false, false) => cs += 1L + 3L * (math.pow(4, g - i).toLong - 1L) / 3L; xmin = xCenter; ymin = yCenter
}
i += 1
}
cs
}
/**
* Computes an interval of sequence codes for a given point - for polygons this is the lower-left corner.
*
* @param x normalized x value [0,1]
* @param y normalized y value [0,1]
* @param length length of the sequence code that will used as the basis for this interval
* @param partial true if the element partially intersects the query window, false if it is fully contained
* @return
*/
private def sequenceInterval(x: Double, y: Double, length: Short, partial: Boolean): (Long, Long) = {
val min = sequenceCode(x, y, length)
// if a partial match, we just use the single sequence code as an interval
// if a full match, we have to match all sequence codes starting with the single sequence code
val max = if (partial) { min } else {
// from lemma 3 in the XZ-Ordering paper
min + (math.pow(4, g - length + 1).toLong - 1L) / 3L
}
(min, max)
}
/**
* Normalize user space values to [0,1]
*
* @param xmin min x value in user space
* @param ymin min y value in user space
* @param xmax max x value in user space, must be >= xmin
* @param ymax max y value in user space, must be >= ymin
* @param lenient standardize boundaries to valid values, or raise an exception
* @return
*/
private def normalize(xmin: Double,
ymin: Double,
xmax: Double,
ymax: Double,
lenient: Boolean): (Double, Double, Double, Double) = {
require(xmin <= xmax && ymin <= ymax, s"Bounds must be ordered: [$xmin $xmax] [$ymin $ymax]")
try {
require(xmin >= xLo && xmax <= xHi && ymin >= yLo && ymax <= yHi,
s"Values out of bounds ([$xLo $xHi] [$yLo $yHi]): [$xmin $xmax] [$ymin $ymax]")
val nxmin = (xmin - xLo) / xSize
val nymin = (ymin - yLo) / ySize
val nxmax = (xmax - xLo) / xSize
val nymax = (ymax - yLo) / ySize
(nxmin, nymin, nxmax, nymax)
} catch {
case _: IllegalArgumentException if lenient =>
val bxmin = if (xmin < xLo) { xLo } else if (xmin > xHi) { xHi } else { xmin }
val bymin = if (ymin < yLo) { yLo } else if (ymin > yHi) { yHi } else { ymin }
val bxmax = if (xmax < xLo) { xLo } else if (xmax > xHi) { xHi } else { xmax }
val bymax = if (ymax < yLo) { yLo } else if (ymax > yHi) { yHi } else { ymax }
val nxmin = (bxmin - xLo) / xSize
val nymin = (bymin - yLo) / ySize
val nxmax = (bxmax - xLo) / xSize
val nymax = (bymax - yLo) / ySize
(nxmin, nymin, nxmax, nymax)
}
}
}
object XZ2SFC {
// the initial level of quads
private val LevelOneElements = XElement(0.0, 0.0, 1.0, 1.0, 1.0).children
// indicator that we have searched a full level of the quad/oct tree
private val LevelTerminator = XElement(-1.0, -1.0, -1.0, -1.0, 0)
private val cache = new java.util.concurrent.ConcurrentHashMap[Short, XZ2SFC]()
def apply(g: Short): XZ2SFC = {
var sfc = cache.get(g)
if (sfc == null) {
sfc = new XZ2SFC(g, (-180.0, 180.0), (-90.0, 90.0))
cache.put(g, sfc)
}
sfc
}
/**
* Region being queried. Bounds are normalized to [0-1].
*
* @param xmin x lower bound in [0-1]
* @param ymin y lower bound in [0-1]
* @param xmax x upper bound in [0-1], must be >= xmin
* @param ymax y upper bound in [0-1], must be >= ymin
*/
private case class QueryWindow(xmin: Double, ymin: Double, xmax: Double, ymax: Double)
/**
* An extended Z curve element. Bounds refer to the non-extended z element for simplicity of calculation.
*
* An extended Z element refers to a normal Z curve element that has it's upper bounds expanded by double it's
* width/height. By convention, an element is always square.
*
* @param xmin x lower bound in [0-1]
* @param ymin y lower bound in [0-1]
* @param xmax x upper bound in [0-1], must be >= xmin
* @param ymax y upper bound in [0-1], must be >= ymin
* @param length length of the non-extended side (note: by convention width should be equal to height)
*/
private case class XElement(xmin: Double, ymin: Double, xmax: Double, ymax: Double, length: Double) {
// extended x and y bounds
lazy val xext = xmax + length
lazy val yext = ymax + length
def isContained(window: QueryWindow): Boolean =
window.xmin <= xmin && window.ymin <= ymin && window.xmax >= xext && window.ymax >= yext
def overlaps(window: QueryWindow): Boolean =
window.xmax >= xmin && window.ymax >= ymin && window.xmin <= xext && window.ymin <= yext
def children: Seq[XElement] = {
val xCenter = (xmin + xmax) / 2.0
val yCenter = (ymin + ymax) / 2.0
val len = length / 2.0
val c0 = copy(xmax = xCenter, ymax = yCenter, length = len)
val c1 = copy(xmin = xCenter, ymax = yCenter, length = len)
val c2 = copy(xmax = xCenter, ymin = yCenter, length = len)
val c3 = copy(xmin = xCenter, ymin = yCenter, length = len)
Seq(c0, c1, c2, c3)
}
}
}
| ronq/geomesa | geomesa-z3/src/main/scala/org/locationtech/geomesa/curve/XZ2SFC.scala | Scala | apache-2.0 | 15,903 |
/*
* Copyright (C) 2012-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package scala
import reflect._
import tools.reflect.{ToolBox, ToolBoxError}
package object async {
implicit class objectops(obj: Any) {
def mustBe(other: Any) = assert(obj == other, obj + " is not " + other)
def mustEqual(other: Any) = mustBe(other)
}
implicit class stringops(text: String) {
def mustContain(substring: String) = assert(text contains substring, text)
def mustStartWith(prefix: String) = assert(text startsWith prefix, text)
}
implicit class listops(list: List[String]) {
def mustStartWith(prefixes: List[String]) = {
assert(list.length == prefixes.size, ("expected = " + prefixes.length + ", actual = " + list.length, list))
list.zip(prefixes).foreach{ case (el, prefix) => el mustStartWith prefix }
}
}
def intercept[T <: Throwable : ClassTag](body: => Any): T = {
try {
body
throw new Exception(s"Exception of type ${classTag[T]} was not thrown")
} catch {
case t: Throwable =>
if (classTag[T].runtimeClass != t.getClass) throw t
else t.asInstanceOf[T]
}
}
def eval(code: String, compileOptions: String = ""): Any = {
val tb = mkToolbox(compileOptions)
tb.eval(tb.parse(code))
}
def mkToolbox(compileOptions: String = ""): ToolBox[_ <: scala.reflect.api.Universe] = {
val m = scala.reflect.runtime.currentMirror
import scala.tools.reflect.ToolBox
m.mkToolBox(options = compileOptions)
}
import scala.tools.nsc._, reporters._
def mkGlobal(compileOptions: String = ""): Global = {
val settings = new Settings()
settings.processArgumentString(compileOptions)
val initClassPath = settings.classpath.value
settings.embeddedDefaults(getClass.getClassLoader)
if (initClassPath == settings.classpath.value)
settings.usejavacp.value = true // not running under SBT, try to use the Java claspath instead
val reporter = new StoreReporter
new Global(settings, reporter)
}
def scalaBinaryVersion: String = {
val PreReleasePattern = """.*-(M|RC).*""".r
val Pattern = """(\\d+\\.\\d+)\\..*""".r
val SnapshotPattern = """(\\d+\\.\\d+\\.\\d+)-\\d+-\\d+-.*""".r
scala.util.Properties.versionNumberString match {
case s @ PreReleasePattern(_) => s
case SnapshotPattern(v) => v + "-SNAPSHOT"
case Pattern(v) => v
case _ => ""
}
}
def toolboxClasspath = {
val f = new java.io.File(s"target/scala-${scalaBinaryVersion}/classes")
if (!f.exists) sys.error(s"output directory ${f.getAbsolutePath} does not exist.")
f.getAbsolutePath
}
def expectError(errorSnippet: String, compileOptions: String = "",
baseCompileOptions: String = s"-cp ${toolboxClasspath}")(code: String) {
intercept[ToolBoxError] {
eval(code, compileOptions + " " + baseCompileOptions)
}.getMessage mustContain errorSnippet
}
}
| anand-singh/async | src/test/scala/scala/async/package.scala | Scala | bsd-3-clause | 2,945 |
package org.zalando.jsonapi.json.circe
import io.circe.generic.auto._
import io.circe.parser._
import io.circe.syntax._
import org.zalando.jsonapi.model.RootObject
import spray.http.ContentTypes
import spray.http.MediaTypes._
import spray.httpx.marshalling.Marshaller
import spray.httpx.unmarshalling.Unmarshaller
trait CirceJsonapiSupport extends CirceJsonapiEncoders with CirceJsonapiDecoders {
implicit val circeJsonapiMarshaller = Marshaller.delegate[RootObject, String](
`application/vnd.api+json`,
`application/json`,
ContentTypes.`application/json`
)(_.asJson.noSpaces)
implicit val circeJsonapiUnmarshaller = Unmarshaller.delegate[String, RootObject](
`application/vnd.api+json`,
`application/json`
)(decode[RootObject](_).toOption.get)
}
object CirceJsonapiSupport extends CirceJsonapiSupport
| wlk/scala-jsonapi | src/main/scala/org/zalando/jsonapi/json/circe/CirceJsonapiSupport.scala | Scala | mit | 833 |
package soal.util
import org.scalatest.FlatSpec
import org.scalatest.Matchers
import java.util.Random
/*
* Created by plofgren on 4/16/15.
*/
class DiscreteAliasSamplerSpec extends FlatSpec with Matchers {
val random = new Random(1)
def testDistribution(unnormalizedProbabilities: Array[Float],
values: Seq[Int],
nSamples: Int = 10000
): Unit = {
val probabilities = unnormalizedProbabilities map { _ / unnormalizedProbabilities.sum }
val n = unnormalizedProbabilities.size
val valueToIndex = (values zip (0 until n)).toMap
val sampler = new DiscreteAliasSampler(values, unnormalizedProbabilities, random)
val sampleCounts = Array.fill(n)(0)
val tol = 4.0f / math.sqrt(nSamples).toFloat
for (i <- 0 until nSamples) {
val v = sampler.sample()
sampleCounts(valueToIndex(v)) += 1
}
for (i <- 0 until n) {
sampleCounts(i).toFloat / nSamples should equal (probabilities(i) +- tol)
}
def f(v: Int): Float = v.toFloat * v.toFloat // compute expectation of v => v^2
val trueExpectation = ((probabilities zip values) map { case (p, v) => p * v * v }).sum
sampler.expectation(f) shouldEqual (trueExpectation +- trueExpectation * 1.00001f)
}
"A Discrete Distribution" should "support sampling" in {
testDistribution(Array(575.6355f, 89.733475f, 86.90718f, 721.26416f), Array(2, 3, 5, 7))
testDistribution(Array(2.0f, 5.0f, 3.0f), Array(17, 11, 13))
testDistribution(Array(1.0f, 1.0f, 1.0f, 1.0f), Array(-2, 3, -5, 7))
testDistribution(Array(0.9f, 0.1f), Array(19, 17))
an[IllegalArgumentException] should be thrownBy {
new DiscreteAliasSampler(Array(1), Array(1.0f, 2.0f))
}
}
}
| plofgren/bidirectional-random-walk | src/test/scala/soal/util/DiscreteAliasSamplerSpec.scala | Scala | mit | 1,739 |
package com.twitter.finagle.loadbalancer.aperture
import com.twitter.conversions.DurationOps._
import com.twitter.finagle.Address
import com.twitter.finagle.loadbalancer.{EndpointFactory, LazyEndpointFactory}
import com.twitter.finagle.ServiceFactoryProxy
import com.twitter.finagle.stats.{StatsReceiver, InMemoryStatsReceiver}
import com.twitter.util._
import org.scalatest.fixture.FunSuite
class ExpirationTest extends FunSuite with ApertureSuite {
/**
* An aperture load balancer which mixes in expiration but no
* controller or load metric. We manually have to adjust the
* aperture to test for nodes falling in and out of the window.
*/
private class ExpiryBal(
val idleTime: Duration = 1.minute,
val mockTimer: MockTimer = new MockTimer,
val stats: InMemoryStatsReceiver = new InMemoryStatsReceiver)
extends TestBal
with Expiration[Unit, Unit] {
def expired: Long = stats.counters(Seq("expired"))
def noExpired: Boolean = stats.counters(Seq("expired")) == 0
protected def endpointIdleTime: Duration = idleTime / 2
protected def statsReceiver: StatsReceiver = stats
private[this] val expiryTask = newExpiryTask(mockTimer)
case class Node(factory: EndpointFactory[Unit, Unit])
extends ServiceFactoryProxy[Unit, Unit](factory)
with ExpiringNode
with ApertureNode {
def load: Double = 0
def pending: Int = 0
override val token: Int = 0
}
protected def newNode(factory: EndpointFactory[Unit, Unit]): Node = Node(factory)
protected def failingNode(cause: Throwable): Node = ???
override def close(when: Time) = {
expiryTask.cancel()
super.close(when)
}
}
private def newLazyEndpointFactory(sf: Factory) =
new LazyEndpointFactory(() => sf, Address.Failed(new Exception))
case class FixtureParam(tc: TimeControl)
def withFixture(test: OneArgTest) =
Time.withCurrentTimeFrozen { tc =>
test(FixtureParam(tc))
}
test("does not expire uninitialized nodes") { f =>
val bal = new ExpiryBal
val ep0, ep1 = Factory(0)
bal.update(Vector(ep0, ep1))
assert(bal.aperturex == 1)
f.tc.advance(bal.idleTime)
bal.mockTimer.tick()
assert(bal.noExpired)
}
test("expired counter is incremented once per close") { f =>
val bal = new ExpiryBal
val eps = Vector(Factory(0), Factory(1))
bal.update(eps.map(newLazyEndpointFactory))
bal.adjustx(1)
assert(bal.aperturex == 2)
(0 to 10).foreach { _ =>
Await.result(bal(), 5.seconds).close()
}
bal.adjustx(-1)
assert(bal.aperturex == 1)
f.tc.advance(bal.idleTime)
bal.mockTimer.tick()
assert(bal.expired == 1)
assert(eps.map(_.numCloses).sum == 1)
// Although calling `remake` on an already expired node is harmless,
// it makes the expired counter hard to reason about, so we want to
// ensure that we only increment it once per expiration.
(0 to 100).foreach { _ =>
f.tc.advance(bal.idleTime)
bal.mockTimer.tick()
assert(bal.expired == 1)
assert(eps.map(_.numCloses).sum == 1)
}
}
test("expires nodes outside of aperture") { f =>
val bal = new ExpiryBal
val eps = Vector.tabulate(10) { i =>
Factory(i)
}
bal.update(eps.map(newLazyEndpointFactory))
bal.adjustx(eps.size)
assert(bal.aperturex == eps.size)
// we rely on p2c to ensure that each endpoint gets
// a request for service acquisition.
def checkoutLoop(): Unit = (0 to 100).foreach { _ =>
Await.result(bal(), 5.seconds).close()
}
checkoutLoop()
assert(eps.filter(_.total > 0).size == eps.size)
// since our aperture covers all nodes no endpoint should go idle
f.tc.advance(bal.idleTime)
bal.mockTimer.tick()
assert(bal.noExpired)
eps.foreach(_.clear())
// set idle time on each node.
checkoutLoop()
// shrink aperture so some nodes qualify for expiration.
bal.adjustx(-eps.size / 2)
// tick the timer partially and no expirations
f.tc.advance(bal.idleTime / 4)
bal.mockTimer.tick()
assert(bal.noExpired)
// tick time fully
f.tc.advance(bal.idleTime)
bal.mockTimer.tick()
assert(bal.expired == eps.size / 2)
assert(eps.map(_.numCloses).sum == eps.size / 2)
}
test("idle time measured only on last response") { f =>
val bal = new ExpiryBal
val eps = Vector(Factory(0), Factory(1))
bal.update(eps)
bal.adjustx(1)
assert(bal.aperturex == 2)
val svcs = for (_ <- 0 until 100) yield { Await.result(bal(), 5.seconds) }
bal.adjustx(-1)
assert(bal.aperturex == 1)
assert(eps.map(_.outstanding).sum == 100)
val svcs0 = svcs.collect { case svc if svc.toString == "Service(0)" => svc }
val svcs1 = svcs.collect { case svc if svc.toString == "Service(1)" => svc }
for (svc <- svcs0 ++ svcs1.init) {
Await.result(svc.close(), 5.seconds)
f.tc.advance(bal.idleTime)
bal.mockTimer.tick()
assert(bal.noExpired)
}
assert(eps.map(_.outstanding).sum == 1)
Await.result(svcs1.last.close(), 5.seconds)
assert(eps.map(_.outstanding).sum == 0)
f.tc.advance(bal.idleTime)
bal.mockTimer.tick()
assert(bal.expired == 1)
}
}
| luciferous/finagle | finagle-core/src/test/scala/com/twitter/finagle/loadbalancer/aperture/ExpirationTest.scala | Scala | apache-2.0 | 5,247 |
package es.bernal.sparkmongoiot
import java.io.File
import com.mongodb.spark.MongoSpark
import es.bernal.sparkmongoiot.types.{DataPoint, DataPointCnt, DataPointDct, DsTime}
import es.bernal.sparkmongoiot.utils.Constants
import net.liftweb.json.DefaultFormats
import net.liftweb.json.Serialization.write
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.bson.Document
import scala.collection.mutable.ListBuffer
/**
* Created by bernal on 4/5/17.
*/
object DataLoader extends App {
println("DataLoader application")
val ss = SparkSession.builder()
.master("local")
.appName("DataLoader")
.config("spark.mongodb.output.uri", "mongodb://" + Constants.user + ":"+ Constants.password + "@" + Constants.ip + "/" + Constants.database + "." + Constants.collectionOut)
.getOrCreate()
def files: List[(String,String)] = getFiles
files.foreach(f => {
// Load Data
val dsRDD: RDD[DataPoint] = ss.sparkContext.textFile(f._2).map(a => parseDataPoint(f._1,a))
// extracción de documentos
val dsDocs: RDD[Document] = dsRDD
.map(dp => {
implicit val formats = DefaultFormats
val jsonStr = write(dp)
jsonStr
})
.map( i => Document.parse(i) )
// Volcar a Mongo
MongoSpark.save(dsDocs)
})
ss.close()
def getFiles(): List[(String,String)] = {
var files = new ListBuffer[(String)]()
var typeFilesMap = new ListBuffer[(String,String)]()
val path = getClass.getResource("/data")
val folder = new File(path.getPath)
if (folder.exists && folder.isDirectory)
folder.listFiles
.toList
.filter(f => (!f.getName.toLowerCase.contains(Constants.GPRS_SESSION) && f.getName.contains(Constants.CSV)))
.foreach(file => files+=file.toURI.getPath)
files.foreach(file => typeFilesMap += Tuple2(getTypeOfFile(file),file))
typeFilesMap.toList
}
// DataPoints Parser
def parseDataPoint(datastream: String, line: String): DataPoint = {
var a: Array[String] = Array()
var b: Array[String] = Array()
a = line.split(';')
datastream match{
case Constants.PRESENCE =>
b = a(5).split('|')
return DataPointDct(null, datastream, a(1), Constants.ORG, a(0), DsTime(b(1).toLong, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), null, b(0))
case Constants.STATUS =>
a = line.split(';')
b = a(4).split('|')
return DataPointCnt(null, Constants.COVERAGE, a(1), Constants.ORG, a(0), DsTime(b(1).toLong, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), null, b(0).toDouble)
case Constants.INVENTORY_ICC =>
a = line.split(';')
b = a(4).split('|')
return DataPointDct(null, datastream, a(1), Constants.ORG, a(0), DsTime(b(1).toLong, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), null, b(0))
case Constants.INVENTORY_IMEI =>
a = line.split(';')
b = a(4).split('|')
return DataPointDct(null, datastream, a(1), Constants.ORG, a(0), DsTime(b(1).toLong, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), null, b(0))
case Constants.INVENTORY_IMSI =>
a = line.split(';')
b = a(4).split('|')
return DataPointDct(null, datastream, a(1), Constants.ORG, a(0), DsTime(b(1).toLong, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), null, b(0))
case Constants.INVENTORY_MANUFACTURER =>
a = line.split(';')
b = a(4).split('|')
return DataPointDct(null, datastream, a(1), Constants.ORG, a(0), DsTime(b(2).toLong, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), null, b(0))
case Constants.INVENTORY_FIRMWARE =>
a = line.split(';')
b = a(4).split('|')
return DataPointDct(null, datastream, a(1), Constants.ORG, a(0), DsTime(b(1).toLong, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), null, b(0))
case Constants.LOCATION =>
a = line.split(';')
b = a(4).split('|')
return DataPointDct(null, datastream, a(1), Constants.ORG, a(0), DsTime(b(1).toLong, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), null, b(0))
case default => return null
}
}
def getTypeOfFile(filename: String): String ={
if(filename.toLowerCase().contains(Constants.PRESENCE)){
return Constants.PRESENCE
}
else if(filename.toLowerCase().contains(Constants.INVENTORY)){
if(filename.toLowerCase().contains(Constants.INVENTORY_IMEI)){
return Constants.INVENTORY_IMEI
}
else if(filename.toLowerCase().contains(Constants.INVENTORY_IMSI)){
return Constants.INVENTORY_IMSI
}
else if(filename.toLowerCase().contains(Constants.INVENTORY_MANUFACTURER)){
return Constants.INVENTORY_MANUFACTURER
}
else if(filename.toLowerCase().contains(Constants.INVENTORY_ICC)){
return Constants.INVENTORY_ICC
}
else return Constants.UNDEFINED
}
else if(filename.toLowerCase().contains(Constants.STATUS)){
return Constants.STATUS
}
else if(filename.toLowerCase().contains(Constants.LOCATION)){
return Constants.LOCATION
}
else{
return Constants.UNDEFINED
}
}
}
| giorbernal/spark-mongo-iot | src/main/scala/es/bernal/sparkmongoiot/DataLoader.scala | Scala | mit | 5,049 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.