code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
// Jubatus: Online machine learning framework for distributed environment
// Copyright (C) 2014-2015 Preferred Networks and Nippon Telegraph and Telephone Corporation.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License version 2.1 as published by the Free Software Foundation.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
package us.jubat.yarn.test
import us.jubat.yarn.common.{Location, LearningMachineType}
import us.jubat.yarn.client.{Resource, JubatusYarnApplication}
import java.net.InetAddress
import org.apache.hadoop.fs.Path
import scala.util.{Success, Failure}
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import scala.concurrent.ExecutionContext.Implicits.global
// 代替手段
object Test13 extends App {
def testcase(configString: String): Unit = {
println("アプリケーションを起動します")
val tApplicationFuture = JubatusYarnApplication.start(
"shogun",
LearningMachineType.Classifier,
List(Location(InetAddress.getLocalHost, 2181)),
configString,
Resource(priority = 0, memory = 512, virtualCores = 1),
1
).andThen {
case Failure(e) =>
println(e.getMessage)
e.printStackTrace()
case Success(tApplication) =>
try {
println(
"アプリケーションが起動しました\\n"
+ s"\\t${tApplication.jubatusProxy}\\n"
+ s"\\t${tApplication.jubatusServers}"
)
println("アプリケーションの状態を取得します")
val tStatus = tApplication.status
println(s"\\t${tStatus.jubatusProxy}")
println(s"\\t${tStatus.jubatusServers}")
println(s"\\t${tStatus.yarnApplication}")
println("モデルデータを保存します")
tApplication.saveModel(new Path("hdfs:///tmp/"), "test").get
// Thread.sleep(1000)
println("モデルデータを読み込みます")
tApplication.loadModel(new Path("hdfs:///tmp/"), "test").get
} finally {
println("アプリケーションを停止します")
Await.ready(
tApplication.stop().andThen {
case Failure(e) =>
println(e.getMessage)
e.printStackTrace()
case Success(_) =>
},
Duration.Inf
)
println("アプリケーションを停止しました")
}
}
Await.ready(tApplicationFuture, Duration.Inf)
Thread.sleep(1000)
}
println("==========================================================")
println("No1")
testcase(
"""
{
"method": "AROW",
"converter": {
"num_filter_types": {},
"num_filter_rules": [],
"string_filter_types": {},
"string_filter_rules": [],
"num_types": {},
"num_rules": [],
"string_types": {
"unigram": { "method": "ngram", "char_num": "1" }
},
"string_rules": [
{ "key": "*", "type": "unigram", "sample_weight": "bin", "global_weight": "bin" }
]
},
"parameter": {
"regularization_weight" : 1.0
}
}
"""
)
println("プログラムを終了します")
System.exit(0)
}
| jubatus/jubatus-on-yarn | jubatusonyarn/jubatus-on-yarn-test/src/main/scala/us/jubat/yarn/test/Test13.scala | Scala | lgpl-2.1 | 3,816 |
object i0 {
(null: Any) match {
case 1 , i1 @ (false)('i2', 'i3') <- flatMap(i4: Int, ) =>
true
type i0[i1] >: i2 <: i1[i2]
val i9: i2[scala] = i9
lazy i10 :: i7 === i8
i10[i3]('.i9', i7 == i10)
def <(i6: List[i1]): i4[i9, i10]
def i2(i3: i0 { type i6 <: mutable ; def Float[i77: i8]; i10, i18) }
trait i11[i4] {
def apply[i3 >: i14, i4 <: i60i4]: i3[i11, i10] = new i15[i2] {
def i2[i9](i11: i4, i15: String)(val i13: i7[i5 mkString { type i6[i7] = i3] })#i6] = Some(i11) i12 } | som-snytt/dotty | tests/fuzzy/f901d61a9bc6053c910245552c74c4c73e43efe0.scala | Scala | apache-2.0 | 478 |
package vm.interpreter.impl
import org.apache.bcel.generic.CHECKCAST
import sai.vm.Reference
import vm.Frame
import vm.interpreter.{InstructionInterpreter, InterpreterBuilder}
private[interpreter] object CheckcastInterpreter extends InterpreterBuilder[CHECKCAST] {
override def apply(i: CHECKCAST): InstructionInterpreter = {
case frame: Frame =>
val updatedStack = frame.stack.peek match {
case r: Reference =>
// we perform any cast, i.e., we do not check if the cast is legal
frame.stack.pop.push(Reference(i.getLoadClassType(frame.cpg), r.node))
case _ =>
frame.stack
}
frame.copy(stack = updatedStack)
}
}
| oliverhaase/sai | src/sai/vm/interpreter/impl/CheckcastInterpreter.scala | Scala | mit | 686 |
package net.categoricaldata.category
trait Category { category =>
type O
type M
def identity(o: O): M
def source(m: M): O
def target(m: M): O
def compose(m1: M, m2: M): M
// and now some convenience methods for composing many morphisms
def compose(m0: M, ms: M*): M = ms.fold(m0)(compose _)
def compose(o: O, ms: List[M]): M = {
ms match {
case Nil => identity(o)
case m :: Nil => m
case h :: t => compose(h, t: _*)
}
}
def exponentiate(m: M, k: Int): M = {
require(source(m) == target(m))
k match {
case 0 => identity(source(m))
case 1 => m
case k if k >= 2 => compose(source(m), List.fill(k)(m))
case k if k < 0 => throw new IllegalArgumentException
}
}
protected trait FunctorFrom extends Functor {
override val source: category.type = category // The source of a functor is generally a def, but here it's a val, namely "this".
}
protected trait NaturalTransformationFrom extends NaturalTransformation {
override val source: FunctorFrom
override val target: FunctorFrom
}
protected trait FunctorTo extends Functor {
override val target: category.type = category
}
protected trait NaturalTransformationTo extends NaturalTransformation {
override val source: FunctorTo
override val target: FunctorTo
}
protected trait EndoFunctor extends FunctorFrom with FunctorTo //This composes traits together: an endofunctor is a functorFrom that's also a functorTo.
trait Identity extends EndoFunctor {
override def onObjects(o: O) = o
override def onMorphisms(m: M) = m
}
object identityFunctor extends Identity
}
| JasonGross/categoricaldata | src/main/scala/net/categoricaldata/category/Category.scala | Scala | mit | 1,656 |
/*
* Skylark
* http://skylark.io
*
* Copyright 2012-2017 Quantarray, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.quantarray.skylark.measure
/**
* Measure provider.
*
* @author Araik Grigoryan
*/
trait MeasureProvider
{
trait MeasureReader
{
def apply(name: String): Option[AnyMeasure]
}
def read: MeasureReader
}
| quantarray/skylark | skylark-measure/src/main/scala/com/quantarray/skylark/measure/MeasureProvider.scala | Scala | apache-2.0 | 870 |
/*
* Copyright 2014 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.heikoseeberger.akkasse
import akka.actor.ActorSystem
import akka.http.marshalling.ToResponseMarshallable
import akka.http.model.HttpRequest
import akka.stream.ActorFlowMaterializer
import akka.stream.scaladsl.Source
import org.scalatest.{ BeforeAndAfterAll, Matchers, WordSpec }
import scala.concurrent.Await
import scala.concurrent.duration.DurationInt
class EventStreamMarshallingSpec extends WordSpec with Matchers with BeforeAndAfterAll with EventStreamMarshalling {
import system.dispatcher
implicit val system = ActorSystem()
implicit val flowMaterializer = ActorFlowMaterializer()
"A source of elements which can be viewed as ServerSentEvents" should {
"be marshallable to a HTTP response" in {
implicit def intToServerSentEvent(n: Int): ServerSentEvent = ServerSentEvent(n.toString)
val elements = 1 to 666
val marshallable = Source(elements): ToResponseMarshallable
val response = marshallable(HttpRequest()).flatMap {
_.entity.dataBytes
.map(_.utf8String)
.runFold(Vector.empty[String])(_ :+ _)
}
val actual = Await.result(response, 1 second)
val expected = elements.map(n => ServerSentEvent(n.toString).toString)
actual shouldBe expected
}
}
override protected def afterAll() = {
super.afterAll()
system.shutdown()
system.awaitTermination()
}
}
| huntc/akka-sse | src/test/scala/de/heikoseeberger/akkasse/EventStreamMarshallingSpec.scala | Scala | apache-2.0 | 1,969 |
package com.ubirch.backend.chain.model
/**
* author: cvandrei
* since: 2016-09-07
*/
object MetaModel {
val version: String = "1.0"
}
| ubirch/ubirch-storage-service | model/src/main/scala/com/ubirch/backend/chain/model/MetaModel.scala | Scala | apache-2.0 | 145 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalactic.Equality
import org.scalactic.Uniformity
import org.scalactic.Prettifier
import org.scalactic.StringNormalizations._
import SharedHelpers._
import FailureMessages.decorateToStringValue
import Matchers._
import exceptions.TestFailedException
class ListShouldContainNoElementsOfLogicalOrSpec extends FunSpec {
private val prettifier = Prettifier.default
//ADDITIONAL//
val invertedStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a != b
}
val invertedListOfStringEquality =
new Equality[List[String]] {
def areEqual(a: List[String], b: Any): Boolean = a != b
}
val upperCaseStringEquality =
new Equality[String] {
def areEqual(a: String, b: Any): Boolean = a.toUpperCase == b
}
val upperCaseListOfStringEquality =
new Equality[List[String]] {
def areEqual(a: List[String], b: Any): Boolean = a.map(_.toUpperCase) == b
}
private def upperCase(value: Any): Any =
value match {
case l: List[_] => l.map(upperCase(_))
case s: String => s.toUpperCase
case c: Char => c.toString.toUpperCase.charAt(0)
case (s1: String, s2: String) => (s1.toUpperCase, s2.toUpperCase)
case e: java.util.Map.Entry[_, _] =>
(e.getKey, e.getValue) match {
case (k: String, v: String) => Entry(k.toUpperCase, v.toUpperCase)
case _ => value
}
case _ => value
}
val fileName: String = "ListShouldContainNoElementsOfLogicalOrSpec.scala"
describe("a List") {
val fumList: List[String] = List("fum")
val toList: List[String] = List("to")
describe("when used with (contain noElementsOf Seq(..) or contain noElementsOf Seq(..))") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fam") or contain noElementsOf Seq("fie", "fee", "fam", "foe"))
fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fam") or contain noElementsOf Seq("fie", "fee", "fum", "foe"))
fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fum") or contain noElementsOf Seq("fie", "fee", "fam", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fum") or contain noElementsOf Seq("fie", "fee", "fum", "foe"))
}
checkMessageStackDepth(e1, FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("fee", "fie", "foe", "fum")) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("fie", "fee", "fum", "foe")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fum") or contain noElementsOf Seq("fee", "fie", "fum", "foe"))
fumList should (contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM") or contain noElementsOf Seq("fee", "fie", "fum", "foe"))
fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fum") or contain noElementsOf Seq("FEE", "FIE", "FUM", "FOE"))
val e1 = intercept[TestFailedException] {
fumList should (contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM") or (contain noElementsOf Seq("FEE", "FIE", "FUM", "FOE")))
}
checkMessageStackDepth(e1, FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("FEE", "FIE", "FUM", "FOE")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fum") or contain noElementsOf Seq("fee", "fie", "fum", "foe"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM") or contain noElementsOf Seq("fee", "fie", "fum", "foe"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fum") or contain noElementsOf Seq("FEE", "FIE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM") or contain noElementsOf Seq("FEE", "FIE", "FUM", "FOE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("FEE", "FIE", "FUM", "FOE")), fileName, thisLineNumber - 2)
(fumList should (contain noElementsOf Seq(" FEE ", " FIE ", " FOE ", " FAM ") or contain noElementsOf Seq(" FEE ", " FIE ", " FOE ", " FAM "))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
it("should allow RHS to contain duplicated value") {
fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fie", "fum") or contain noElementsOf Seq("fie", "fee", "fam", "foe"))
fumList should (contain noElementsOf Seq("fie", "fee", "fam", "foe") or contain noElementsOf Seq("fee", "fie", "foe", "fie", "fum"))
}
}
describe("when used with (equal (..) and contain noElementsOf Seq(..))") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (equal (fumList) or contain noElementsOf Seq("fie", "fee", "fam", "foe"))
fumList should (equal (toList) or contain noElementsOf Seq("fie", "fee", "fam", "foe"))
fumList should (equal (fumList) or contain noElementsOf Seq("fee", "fie", "foe", "fum"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) or contain noElementsOf Seq("fee", "fie", "foe", "fum"))
}
checkMessageStackDepth(e1, FailureMessages.didNotEqual(prettifier, fumList, toList) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("fee", "fie", "foe", "fum")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (equal (fumList) or contain noElementsOf Seq("fie", "fee", "fum", "foe"))
fumList should (equal (toList) or contain noElementsOf Seq("fie", "fee", "fum", "foe"))
fumList should (equal (fumList) or contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (equal (toList) or (contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e1, FailureMessages.didNotEqual(prettifier, fumList, toList) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (equal (toList) or contain noElementsOf Seq("fie", "fee", "fum", "foe"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (equal (fumList) or contain noElementsOf Seq("fie", "fee", "fum", "foe"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (equal (toList) or contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (equal (fumList) or contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM"))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.didNotEqual(prettifier, fumList, fumList) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
(fumList should (equal (toList) or contain noElementsOf Seq(" FEE ", " FIE ", " FOE ", " FAM "))) (decided by invertedListOfStringEquality, after being lowerCased and trimmed)
}
it("should allow RHS to contain duplicated value") {
fumList should (equal (fumList) or contain noElementsOf Seq("fee", "fie", "foe", "fie", "fum"))
}
}
describe("when used with (be (..) and contain noElementsOf Seq(..))") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (be (fumList) or contain noElementsOf Seq("fie", "fee", "fam", "foe"))
fumList should (be (toList) or contain noElementsOf Seq("fie", "fee", "fam", "foe"))
fumList should (be (fumList) or contain noElementsOf Seq("fie", "fee", "fum", "foe"))
val e1 = intercept[TestFailedException] {
fumList should (be (toList) or contain noElementsOf Seq("fie", "fee", "fum", "foe"))
}
checkMessageStackDepth(e1, FailureMessages.wasNotEqualTo(prettifier, fumList, toList) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("fie", "fee", "fum", "foe")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (be (fumList) or contain noElementsOf Seq("fee", "fie", "foe", "fum"))
fumList should (be (toList) or contain noElementsOf Seq("fee", "fie", "foe", "fum"))
fumList should (be (fumList) or contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM"))
val e1 = intercept[TestFailedException] {
fumList should (be (toList) or (contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e1, FailureMessages.wasNotEqualTo(prettifier, fumList, toList) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (be (fumList) or contain noElementsOf Seq("fee", "fie", "foe", "fum"))) (decided by upperCaseStringEquality)
(fumList should (be (toList) or contain noElementsOf Seq("fee", "fie", "foe", "fum"))) (decided by upperCaseStringEquality)
(fumList should (be (fumList) or contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (be (toList) or contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.wasNotEqualTo(prettifier, fumList, toList) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
(fumList should (be (fumList) or contain noElementsOf Seq(" FEE ", " FIE ", " FOE ", " FAM "))) (after being lowerCased and trimmed)
}
it("should allow RHS to contain duplicated value") {
fumList should (be (fumList) or contain noElementsOf Seq("fee", "fie", "foe", "fie", "fum"))
}
}
describe("when used with (contain noElementsOf Seq(..) and be (..))") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (contain noElementsOf Seq("fie", "fee", "fam", "foe") or be (fumList))
fumList should (contain noElementsOf Seq("fie", "fee", "fum", "foe") or be (fumList))
fumList should (contain noElementsOf Seq("fie", "fee", "fam", "foe") or be (toList))
val e1 = intercept[TestFailedException] {
fumList should (contain noElementsOf Seq("fie", "fee", "fum", "foe") or be (toList))
}
checkMessageStackDepth(e1, FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("fie", "fee", "fum", "foe")) + ", and " + FailureMessages.wasNotEqualTo(prettifier, fumList, toList), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fum") or be (fumList))
fumList should (contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM") or be (fumList))
fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fum") or be (toList))
val e1 = intercept[TestFailedException] {
fumList should (contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM") or be (toList))
}
checkMessageStackDepth(e1, FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")) + ", and " + FailureMessages.wasNotEqualTo(prettifier, fumList, toList), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fum") or be (fumList))) (decided by upperCaseStringEquality)
(fumList should (contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM") or be (fumList))) (decided by upperCaseStringEquality)
(fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fum") or be (toList))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (contain noElementsOf Seq("FEE", "FIE", "FOE", "FUM") or be (toList))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.containedAtLeastOneElementOf(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")) + ", and " + FailureMessages.wasNotEqualTo(prettifier, fumList, toList), fileName, thisLineNumber - 2)
(fumList should (contain noElementsOf Seq(" FEE ", " FIE ", " FOE ", " FUM ") or be (fumList))) (after being lowerCased and trimmed)
}
it("should allow RHS to contain duplicated value") {
fumList should (contain noElementsOf Seq("fee", "fie", "foe", "fie", "fum") or be (fumList))
}
}
describe("when used with (not contain noElementsOf Seq(..) and not contain noElementsOf Seq(..))") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (not contain noElementsOf (Seq("fee", "fie", "foe", "fum")) or not contain noElementsOf (Seq("fee", "fie", "fum", "foe")))
fumList should (not contain noElementsOf (Seq("fee", "fie", "foe", "fam")) or not contain noElementsOf (Seq("fee", "fie", "fum", "foe")))
fumList should (not contain noElementsOf (Seq("fee", "fie", "foe", "fum")) or not contain noElementsOf (Seq("fee", "fie", "fam", "foe")))
val e1 = intercept[TestFailedException] {
fumList should (not contain noElementsOf (Seq("fee", "fie", "foe", "fam")) or not contain noElementsOf (Seq("fee", "fie", "fam", "foe")))
}
checkMessageStackDepth(e1, FailureMessages.didNotContainAtLeastOneElementOf(prettifier, fumList, Seq("fee", "fie", "foe", "fam")) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, fumList, Seq("fee", "fie", "fam", "foe")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (not contain noElementsOf (Seq("FEE", "FIE", "FOE", "FUM")) or not contain noElementsOf (Seq("FEE", "FIE", "FUM", "FOE")))
fumList should (not contain noElementsOf (Seq("fee", "fie", "foe", "fum")) or not contain noElementsOf (Seq("FEE", "FIE", "FUM", "FOE")))
fumList should (not contain noElementsOf (Seq("FEE", "FIE", "FOE", "FUM")) or not contain noElementsOf (Seq("fee", "fie", "fum", "foe")))
val e1 = intercept[TestFailedException] {
fumList should (not contain noElementsOf (Seq("fee", "fie", "foe", "fum")) or not contain noElementsOf (Seq("fee", "fie", "fum", "foe")))
}
checkMessageStackDepth(e1, FailureMessages.didNotContainAtLeastOneElementOf(prettifier, fumList, Seq("fee", "fie", "foe", "fum")) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, fumList, Seq("fee", "fie", "fum", "foe")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (not contain noElementsOf (Seq("FEE", "FIE", "FOE", "FUM")) or not contain noElementsOf (Seq("FEE", "FIE", "FUM", "FOE")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (not contain noElementsOf (Seq("fee", "fie", "foe", "fum")) or not contain noElementsOf (Seq("FEE", "FIE", "FUM", "FOE")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(fumList should (not contain noElementsOf (Seq("FEE", "FIE", "FOE", "FUM")) or not contain noElementsOf (Seq("fee", "fie", "fum", "foe")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not contain noElementsOf (Seq("fee", "fie", "foe", "fum")) or not contain noElementsOf (Seq("fee", "fie", "fum", "foe")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.didNotContainAtLeastOneElementOf(prettifier, fumList, Seq("fee", "fie", "foe", "fum")) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, fumList, Seq("fee", "fie", "fum", "foe")), fileName, thisLineNumber - 2)
}
it("should allow RHS to contain duplicated value") {
fumList should (not contain noElementsOf (Seq("fee", "fie", "foe", "fie", "fum")) or not contain noElementsOf (Seq("fee", "fie", "fum", "foe")))
fumList should (not contain noElementsOf (Seq("fee", "fie", "fum", "foe")) or not contain noElementsOf (Seq("fee", "fie", "foe", "fie", "fum")))
}
}
describe("when used with (not equal (..) and not contain noElementsOf Seq(..))") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (not equal (toList) or not contain noElementsOf (Seq("fee", "fie", "foe", "fum")))
fumList should (not equal (fumList) or not contain noElementsOf (Seq("fee", "fie", "foe", "fum")))
fumList should (not equal (toList) or not contain noElementsOf (Seq("fee", "fie", "foe", "fam")))
val e1 = intercept[TestFailedException] {
fumList should (not equal (fumList) or not contain noElementsOf (Seq("fee", "fie", "foe", "fam")))
}
checkMessageStackDepth(e1, FailureMessages.equaled(prettifier, fumList, fumList) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, fumList, Seq("fee", "fie", "foe", "fam")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (not equal (toList) or not contain noElementsOf (Seq("FIE", "FEE", "FUM", "FOE")))
fumList should (not equal (fumList) or not contain noElementsOf (Seq("FIE", "FEE", "FUM", "FOE")))
fumList should (not equal (toList) or not contain noElementsOf (Seq("fie", "fee", "fum", "foe")))
val e2 = intercept[TestFailedException] {
fumList should (not equal (fumList) or (not contain noElementsOf (Seq("fie", "fee", "fum", "foe"))))
}
checkMessageStackDepth(e2, FailureMessages.equaled(prettifier, fumList, fumList) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, fumList, Seq("fie", "fee", "fum", "foe")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (not equal (fumList) or not contain noElementsOf (Seq("FIE", "FEE", "FUM", "FOE")))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (not equal (toList) or not contain noElementsOf (Seq("FIE", "FEE", "FUM", "FOE")))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
(fumList should (not equal (fumList) or not contain noElementsOf (Seq("fie", "fee", "fum", "foe")))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not equal (toList) or not contain noElementsOf (Seq("fie", "fee", "fum", "foe")))) (decided by invertedListOfStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.equaled(prettifier, fumList, toList) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, fumList, Seq("fie", "fee", "fum", "foe")), fileName, thisLineNumber - 2)
(fumList should (not contain noElementsOf (Seq(" FEE ", " FIE ", " FOE ", " FUM ")) or not contain noElementsOf (Seq(" FEE ", " FIE ", " FOE ", " FUM ")))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
it("should allow RHS to contain duplicated value") {
fumList should (not equal (toList) or not contain noElementsOf (Seq("fee", "fie", "foe", "fie", "fum")))
}
}
describe("when used with (not be (..) and not contain noElementsOf Seq(..))") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
fumList should (not be (toList) or not contain noElementsOf (Seq("fee", "fie", "foe", "fum")))
fumList should (not be (fumList) or not contain noElementsOf (Seq("fee", "fie", "foe", "fum")))
fumList should (not be (toList) or not contain noElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))
val e1 = intercept[TestFailedException] {
fumList should (not be (fumList) or not contain noElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))
}
checkMessageStackDepth(e1, FailureMessages.wasEqualTo(prettifier, fumList, fumList) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, fumList, Seq("FEE", "FIE", "FOE", "FUM")), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
fumList should (not be (toList) or not contain noElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))
fumList should (not be (fumList) or not contain noElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))
fumList should (not be (toList) or not contain noElementsOf (Seq("fee", "fie", "foe", "fum")))
val e1 = intercept[TestFailedException] {
fumList should (not be (fumList) or (not contain noElementsOf (Seq("fee", "fie", "foe", "fum"))))
}
checkMessageStackDepth(e1, FailureMessages.wasEqualTo(prettifier, fumList, fumList) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, fumList, Seq("fee", "fie", "foe", "fum")), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(fumList should (not be (toList) or not contain noElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))) (decided by upperCaseStringEquality)
(fumList should (not be (fumList) or not contain noElementsOf (Seq("FEE", "FIE", "FOE", "FUM")))) (decided by upperCaseStringEquality)
(fumList should (not be (toList) or not contain noElementsOf (Seq("fee", "fie", "foe", "fum")))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(fumList should (not be (fumList) or not contain noElementsOf (Seq("fee", "fie", "foe", "fum")))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, FailureMessages.wasEqualTo(prettifier, fumList, fumList) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, fumList, Seq("fee", "fie", "foe", "fum")), fileName, thisLineNumber - 2)
(fumList should (not contain noElementsOf (Seq(" FEE ", " FIE ", " FOE ", " FUM ")) or not contain noElementsOf (Seq(" FEE ", " FIE ", " FOE ", " FUM ")))) (after being lowerCased and trimmed, after being lowerCased and trimmed)
}
it("should allow RHS to contain duplicated value") {
fumList should (not be (toList) or not contain noElementsOf (Seq("fee", "fie", "foe", "fie", "fum")))
}
}
}
describe("collection of Lists") {
val list1s: Vector[List[Int]] = Vector(List(1), List(1), List(1))
val lists: Vector[List[Int]] = Vector(List(1), List(1), List(2))
val nils: Vector[List[Int]] = Vector(Nil, Nil, Nil)
val listsNil: Vector[List[Int]] = Vector(List(1), List(1), Nil)
val hiLists: Vector[List[String]] = Vector(List("hi"), List("hi"), List("hi"))
val toLists: Vector[List[String]] = Vector(List("to"), List("to"), List("to"))
def allErrMsg(index: Int, message: String, lineNumber: Int, left: Any): String =
"'all' inspection failed, because: \\n" +
" at index " + index + ", " + message + " (" + fileName + ":" + (lineNumber) + ") \\n" +
"in " + decorateToStringValue(prettifier, left)
describe("when used with (contain noElementsOf Seq(..) and contain noElementsOf Seq(..))") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
all (list1s) should (contain noElementsOf Seq(3, 6, 9) or contain noElementsOf Seq(2, 6, 8))
all (list1s) should (contain noElementsOf Seq(1, 2, 3) or contain noElementsOf Seq(2, 6, 8))
all (list1s) should (contain noElementsOf Seq(3, 6, 9) or contain noElementsOf Seq(1, 2, 3))
atLeast (2, lists) should (contain noElementsOf Seq(2, 6, 8) or contain noElementsOf Seq(3, 6, 9))
atLeast (2, lists) should (contain noElementsOf Seq(1, 2, 3) or contain noElementsOf Seq(3, 6, 9))
atLeast (2, lists) should (contain noElementsOf Seq(2, 6, 8) or contain noElementsOf Seq(1, 2, 3))
val e1 = intercept[TestFailedException] {
all (lists) should (contain noElementsOf Seq(2, 6, 8) or contain noElementsOf Seq(2, 3, 5))
}
checkMessageStackDepth(e1, allErrMsg(2, FailureMessages.containedAtLeastOneElementOf(prettifier, lists(2), Seq(2, 6, 8)) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, lists(2), Seq(2, 3, 5)), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
all (hiLists) should (contain noElementsOf Seq("hi", "he") or contain noElementsOf Seq("hi", "he"))
all (hiLists) should (contain noElementsOf Seq("hi", "he") or contain noElementsOf Seq("HI", "HE"))
all (hiLists) should (contain noElementsOf Seq("HI", "HE") or contain noElementsOf Seq("hi", "he"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (contain noElementsOf Seq("HI", "HE") or contain noElementsOf Seq("HI", "HE"))
}
checkMessageStackDepth(e1, allErrMsg(0, FailureMessages.containedAtLeastOneElementOf(prettifier, hiLists(0), Seq("HI", "HE")) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, hiLists(0), Seq("HI", "HE")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(all (hiLists) should (contain noElementsOf Seq("hi", "he") or contain noElementsOf Seq("hi", "he"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (contain noElementsOf Seq("hi", "he") or contain noElementsOf Seq("HI", "HE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (contain noElementsOf Seq("HI", "HE") or contain noElementsOf Seq("hi", "he"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (contain noElementsOf Seq("HI", "HE") or contain noElementsOf Seq("HI", "HE"))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, FailureMessages.containedAtLeastOneElementOf(prettifier, hiLists(0), Seq("HI", "HE")) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, hiLists(0), Seq("HI", "HE")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should allow RHS to contain duplicated value") {
all (list1s) should (contain noElementsOf Seq(1, 2, 2, 3) or contain noElementsOf Seq(2, 6, 8))
all (list1s) should (contain noElementsOf Seq(2, 6, 8) or contain noElementsOf Seq(1, 2, 2, 3))
}
}
describe("when used with (be (..) and contain noElementsOf Seq(..))") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
all (list1s) should (be (List(1)) or contain noElementsOf Seq(2, 6, 8))
all (list1s) should (be (List(2)) or contain noElementsOf Seq(2, 6, 8))
all (list1s) should (be (List(1)) or contain noElementsOf Seq(1, 2, 3))
val e1 = intercept[TestFailedException] {
all (list1s) should (be (List(2)) or contain noElementsOf Seq(1, 2, 3))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, List(1)) + " was not equal to " + decorateToStringValue(prettifier, List(2)) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, list1s(0), Seq(1, 2, 3)), thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
all (hiLists) should (be (List("hi")) or contain noElementsOf Seq("hi", "he"))
all (hiLists) should (be (List("ho")) or contain noElementsOf Seq("hi", "he"))
all (hiLists) should (be (List("hi")) or contain noElementsOf Seq("HI", "HE"))
val e1 = intercept[TestFailedException] {
all (hiLists) should (be (List("ho")) or contain noElementsOf Seq("HI", "HE"))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, List("hi")) + " was not equal to " + decorateToStringValue(prettifier, List("ho")) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, hiLists(0), Seq("HI", "HE")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(all (hiLists) should (be (List("hi")) or contain noElementsOf Seq("hi", "he"))) (decided by upperCaseStringEquality)
(all (hiLists) should (be (List("ho")) or contain noElementsOf Seq("hi", "he"))) (decided by upperCaseStringEquality)
(all (hiLists) should (be (List("hi")) or contain noElementsOf Seq("HI", "HE"))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (be (List("ho")) or contain noElementsOf Seq("HI", "HE"))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, List("hi")) + " was not equal to " + decorateToStringValue(prettifier, List("ho")) + ", and " + FailureMessages.containedAtLeastOneElementOf(prettifier, hiLists(0), Seq("HI", "HE")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should allow RHS to contain duplicated value") {
all (list1s) should (be (List(1)) or contain noElementsOf Seq(1, 2, 2, 3))
}
}
describe("when used with (not contain noElementsOf Seq(..) and not contain noElementsOf Seq(..))") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
all (list1s) should (not contain noElementsOf (Seq(1, 2, 3)) or not contain noElementsOf (Seq(1, 6, 8)))
all (list1s) should (not contain noElementsOf (Seq(2, 6, 8)) or not contain noElementsOf (Seq(1, 6, 8)))
all (list1s) should (not contain noElementsOf (Seq(1, 2, 3)) or not contain noElementsOf (Seq(2, 6, 8)))
val e1 = intercept[TestFailedException] {
all (lists) should (not contain noElementsOf (Seq(1, 6, 8)) or not contain noElementsOf (Seq(1, 3, 5)))
}
checkMessageStackDepth(e1, allErrMsg(2, FailureMessages.didNotContainAtLeastOneElementOf(prettifier, lists(2), Seq(1, 6, 8)) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, lists(2), Seq(1, 3, 5)), thisLineNumber - 2, lists), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not contain noElementsOf (Seq("HI", "HE")) or not contain noElementsOf (Seq("HI", "HE")))
all (hiLists) should (not contain noElementsOf (Seq("hi", "he")) or not contain noElementsOf (Seq("HI", "HE")))
all (hiLists) should (not contain noElementsOf (Seq("HI", "HE")) or not contain noElementsOf (Seq("hi", "he")))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not contain noElementsOf (Seq("hi", "he")) or not contain noElementsOf (Seq("hi", "he")))
}
checkMessageStackDepth(e1, allErrMsg(0, FailureMessages.didNotContainAtLeastOneElementOf(prettifier, hiLists(0), Seq("hi", "he")) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, hiLists(0), Seq("hi", "he")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(all (hiLists) should (not contain noElementsOf (Seq("HI", "HE")) or not contain noElementsOf (Seq("HI", "HE")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (not contain noElementsOf (Seq("hi", "he")) or not contain noElementsOf (Seq("HI", "HE")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
(all (hiLists) should (not contain noElementsOf (Seq("HI", "HE")) or not contain noElementsOf (Seq("hi", "he")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not contain noElementsOf (Seq("hi", "he")) or not contain noElementsOf (Seq("hi", "he")))) (decided by upperCaseStringEquality, decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, FailureMessages.didNotContainAtLeastOneElementOf(prettifier, hiLists(0), Seq("hi", "he")) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, hiLists(0), Seq("hi", "he")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should allow RHS to contain duplicated value") {
all (list1s) should (not contain noElementsOf (Seq(1, 2, 2, 3)) or not contain noElementsOf (Seq(1, 6, 8)))
all (list1s) should (not contain noElementsOf (Seq(1, 6, 8)) or not contain noElementsOf (Seq(1, 2, 2, 3)))
}
}
describe("when used with (not be (..) and not contain noElementsOf Seq(..))") {
it("should do nothing if valid, else throw a TFE with an appropriate error message") {
all (list1s) should (not be (List(2)) or not contain noElementsOf (Seq(1, 6, 8)))
all (list1s) should (not be (List(1)) or not contain noElementsOf (Seq(1, 6, 8)))
all (list1s) should (not be (List(2)) or not contain noElementsOf (Seq(2, 6, 8)))
val e1 = intercept[TestFailedException] {
all (list1s) should (not be (List(1)) or not contain noElementsOf (Seq(2, 6, 8)))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, List(1)) + " was equal to " + decorateToStringValue(prettifier, List(1)) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, list1s(0), Seq(2, 6, 8)), thisLineNumber - 2, list1s), fileName, thisLineNumber - 2)
}
it("should use the implicit Equality in scope") {
implicit val ise = upperCaseStringEquality
all (hiLists) should (not be (List("ho")) or not contain noElementsOf (Seq("HI", "HE")))
all (hiLists) should (not be (List("hi")) or not contain noElementsOf (Seq("HI", "HE")))
all (hiLists) should (not be (List("ho")) or not contain noElementsOf (Seq("hi", "he")))
val e1 = intercept[TestFailedException] {
all (hiLists) should (not be (List("hi")) or not contain noElementsOf (Seq("hi", "he")))
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, List("hi")) + " was equal to " + decorateToStringValue(prettifier, List("hi")) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, hiLists(0), Seq("hi", "he")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should use an explicitly provided Equality") {
(all (hiLists) should (not be (List("ho")) or not contain noElementsOf (Seq("HI", "HE")))) (decided by upperCaseStringEquality)
(all (hiLists) should (not be (List("hi")) or not contain noElementsOf (Seq("HI", "HE")))) (decided by upperCaseStringEquality)
(all (hiLists) should (not be (List("ho")) or not contain noElementsOf (Seq("hi", "he")))) (decided by upperCaseStringEquality)
val e1 = intercept[TestFailedException] {
(all (hiLists) should (not be (List("hi")) or not contain noElementsOf (Seq("hi", "he")))) (decided by upperCaseStringEquality)
}
checkMessageStackDepth(e1, allErrMsg(0, decorateToStringValue(prettifier, List("hi")) + " was equal to " + decorateToStringValue(prettifier, List("hi")) + ", and " + FailureMessages.didNotContainAtLeastOneElementOf(prettifier, hiLists(0), Seq("hi", "he")), thisLineNumber - 2, hiLists), fileName, thisLineNumber - 2)
}
it("should allow RHS to contain duplicated value") {
all (list1s) should (not be (List(2)) or not contain noElementsOf (Seq(1, 2, 2, 3)))
}
}
}
}
| dotty-staging/scalatest | scalatest-test/src/test/scala/org/scalatest/ListShouldContainNoElementsOfLogicalOrSpec.scala | Scala | apache-2.0 | 38,869 |
package types
trait Refs {
trait T
type TYPEREF = Int
type TERMREFpkg = /**/scala./**/Long
private[types] val TYPEREFpkg: Int = ???
type TYPEREFsymbol = T
def TYPEREFdirect[T]: T
def TERMREFdirect(x: Int): x.type
} | JetBrains/intellij-scala | tasty/runtime/data/types/Refs.scala | Scala | apache-2.0 | 235 |
package entities
import slick.lifted.{ForeignKeyQuery, ProvenShape}
import utils.DbModule
case class SubIssue(id: Long, issueId: Long, summary: String, assigneeId: Long, reporterId: Long) extends BaseEntity {
def toSimple = SimpleSubIssue(issueId, summary, assigneeId, reporterId)
def toVerySimple = VerySimpleSubIssue(summary, assigneeId, reporterId)
}
case class SimpleSubIssue(issueId: Long, summary: String, assigneeId: Long, reporterId: Long)
case class VerySimpleSubIssue(summary: String, assigneeId: Long, reporterId: Long) {
/**
* returns SubIssue with same values and id=0, issueId = 0
*/
def toNormal = SubIssue(id = 0, issueId = 0, summary, assigneeId, reporterId)
}
trait SubIssuesTableComponent extends BaseTableComponent {
this: DbModule with IssuesTableComponent =>
import profile.api._
class SubIssuesTable(tag: Tag) extends BaseTable[SubIssue](tag, "SUBISSUES") {
def issueId: Rep[Long] = column[Long]("issueId")
def summary: Rep[String] = column[String]("summary")
def assigneeId: Rep[Long] = column[Long]("assigneeId")
def reporterId: Rep[Long] = column[Long]("reporterId")
def issue: ForeignKeyQuery[IssuesTable, Issue] =
foreignKey("FK_SUBISSUES_ISSUES", issueId, TableQuery[IssuesTable])(_.id)
override def * : ProvenShape[SubIssue] =
(id, issueId, summary, assigneeId, reporterId) <> (SubIssue.tupled, SubIssue.unapply)
}
} | Kanris826/spray-slick-swagger | src/main/scala/entities/SubIssues.scala | Scala | apache-2.0 | 1,422 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
package types
import com.intellij.lang.ASTNode
import com.intellij.psi.{PsiElement, ResolveState}
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.types._
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScTypeAliasDeclaration, ScValueDeclaration}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.api.{ScalaElementVisitor, ScalaPsiElement}
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.api.designator.ScDesignatorType
import org.jetbrains.plugins.scala.lang.psi.types.api.{Nothing, Singleton}
import org.jetbrains.plugins.scala.lang.psi.types.result._
/**
* @author Alexander Podkhalyuzin
* Date: 13.03.2008
*/
class ScExistentialTypeElementImpl(node: ASTNode) extends ScalaPsiElementImpl(node) with ScExistentialTypeElement {
override protected def innerType: TypeResult = {
/** From SLS 3.2.10
*
* Existential Quantification over Values
*
* As a syntactic convenience, the bindings clause in an existential type may also contain
* value declarations val xx: TT.
* An existential type TT forSome { QQ; val xx: SS;Q′Q′ } is treated as a shorthand
* for the type T′T′ forSome { QQ; type tt <: SS with Singleton; Q′Q′ },
* where tt is a fresh type name and T′T′ results from TT by replacing every occurrence of
* xx.type with tt.
*/
def withDesugaredValTypes(quantified: ScType): ScType = {
val valDeclarations = clause.declarations.filterByType[ScValueDeclaration]
if (valDeclarations.isEmpty) quantified
else quantified.updateLeaves {
case des @ ScDesignatorType(named: ScTypedDefinition) =>
val valueDeclaration = valDeclarations.find(_.declaredElements.contains(named))
val valType = valueDeclaration.flatMap(_.typeElement).map(_.`type`().getOrAny)
valType match {
case Some(tp) =>
val compound = ScCompoundType(Seq(tp, Singleton))
val name = s"${named.name}$$type"
ScExistentialArgument(name, Nil, Nothing, compound)
case None => des
}
}
}
quantified.`type`().map { qt =>
ScExistentialType(withDesugaredValTypes(qt))
}
}
import com.intellij.psi.scope.PsiScopeProcessor
override def processDeclarations(processor: PsiScopeProcessor,
state: ResolveState,
lastParent: PsiElement,
place: PsiElement): Boolean = {
if (lastParent == quantified || (lastParent.isInstanceOf[ScalaPsiElement] &&
lastParent.asInstanceOf[ScalaPsiElement].getDeepSameElementInContext == quantified)) {
for (decl <- clause.declarations) {
decl match {
case alias: ScTypeAliasDeclaration => if (!processor.execute(alias, state)) return false
case valDecl: ScValueDeclaration =>
for (declared <- valDecl.declaredElements) if (!processor.execute(declared, state)) return false
}
}
}
true
}
override protected def acceptScala(visitor: ScalaElementVisitor): Unit = {
visitor.visitExistentialTypeElement(this)
}
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/base/types/ScExistentialTypeElementImpl.scala | Scala | apache-2.0 | 3,402 |
import scala.tools.partest.nest.FileManager._
object Test extends dotty.runtime.LegacyApp {
val cm = reflect.runtime.currentMirror
val u = cm.universe
import u._
val JavaUniverseTpe = typeOf[reflect.runtime.JavaUniverse]
val DefinitionsModule = JavaUniverseTpe.member(TermName("definitions"))
def forceCode(prefix: String, tp: Type): String = {
def isLazyAccessorOrObject(sym: Symbol) = (
(sym.isMethod && sym.asMethod.isLazy)
|| sym.isModule
)
val forcables = tp.members.sorted.filter(isLazyAccessorOrObject)
forcables.map {
sym =>
val path = s"$prefix.${sym.name}"
" " + (
if (sym.isPrivate || sym.isProtected) s"// inaccessible: $path"
else path
)
}.mkString("\n")
}
val code =
s"""|// Generated Code, validated by run/t6240-universe-code-gen.scala
|package scala.reflect
|package runtime
|
|trait JavaUniverseForce { self: runtime.JavaUniverse =>
| def force() {
| Literal(Constant(42)).duplicate
| nme.flattenedName()
| nme.raw
| WeakTypeTag
| TypeTag
| TypeTag.Byte.tpe
| TypeTag.Short.tpe
| TypeTag.Char.tpe
| TypeTag.Int.tpe
| TypeTag.Long.tpe
| TypeTag.Float.tpe
| TypeTag.Double.tpe
| TypeTag.Boolean.tpe
| TypeTag.Unit.tpe
| TypeTag.Any.tpe
| TypeTag.AnyVal.tpe
| TypeTag.AnyRef.tpe
| TypeTag.Object.tpe
| TypeTag.Nothing.tpe
| TypeTag.Null.tpe
|
|${forceCode("this", JavaUniverseTpe)}
|${forceCode("definitions", DefinitionsModule.info)}
|${forceCode("refChecks", typeOf[scala.reflect.internal.transform.RefChecks])}
|${forceCode("uncurry", typeOf[scala.reflect.internal.transform.UnCurry])}
|${forceCode("erasure", typeOf[scala.reflect.internal.transform.Erasure])}
| }
|}""".stripMargin
import java.io.File
val testFile = new File(sys.props("partest.test-path"))
val actualFile = new java.io.File(testFile.getParent + "/../../../src/reflect/scala/reflect/runtime/JavaUniverseForce.scala").getCanonicalFile
val actual = scala.io.Source.fromFile(actualFile)
val actualLines = actual.getLines.toList
val generatedLines = code.lines.toList
if (actualLines != generatedLines) {
val msg = s"""|${actualFile} must be updated.
|===========================================================
| DIFF:
|===========================================================
|${compareContents(actualLines, generatedLines)}
|===========================================================
| NEW CONTENTS:
|===========================================================
|${code}""".stripMargin
assert(false, msg)
}
}
| yusuke2255/dotty | tests/pending/run/t6240-universe-code-gen.scala | Scala | bsd-3-clause | 3,008 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.exceptions
import scala.runtime.AbstractFunction1
/**
* A composite exception represents a list of exceptions
* that were caught while delaying errors.
*/
final class CompositeException(val errors: Seq[Throwable])
extends RuntimeException() with Serializable {
override def toString: String = {
getClass.getName + (
if (errors.isEmpty) "" else {
val (first, last) = errors.splitAt(2)
val str = first.map(_.getClass.getName).mkString(", ")
val reasons = if (last.nonEmpty) str + "..." else str
"(" + reasons + ")"
})
}
}
object CompositeException extends AbstractFunction1[Seq[Throwable], CompositeException] {
/** Builder for [[CompositeException]]. */
def apply(errors: Seq[Throwable]): CompositeException =
new CompositeException(errors.toList)
/** For pattern matching [[CompositeException]] references. */
def unapply(ref: CompositeException): Option[List[Throwable]] =
Some(ref.errors.toList)
}
| Wogan/monix | monix-execution/shared/src/main/scala/monix/execution/exceptions/CompositeException.scala | Scala | apache-2.0 | 1,680 |
/* Copyright (C) 2011 by John A. De Goes
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package rosetta.json.dispatch
import dispatch.json._
import rosetta.json._
import rosetta.json.dispatch._
object JsonDispatchTest extends JsonTest[JsValue] {
val jsonImplementation = JsonDispatch
tuplesCanBeSerialized
basicCollectionsCanBeSerialized
invertibleSerializationForAllJson
getForAllObjects
mapDownWithIdentity
mapUpWithIdentity
} | jdegoes/RosettaJson | src/test/scala/rosetta/json/dispatch/JsonDispatchTest.scala | Scala | mit | 1,483 |
package com.ambrosoft.learning
/**
* Created by jacek on 3/12/17.
*/
case class Point(x: Int, y: Int) {
lazy val dist: Double = euclideanDist(x, y)
def dist(p: Point): Double = euclideanDist(p.x - x, p.y - y)
}
object Zero extends Point(0, 0)
object PointMain extends App {
println(Zero)
println(Zero.x)
println(Point(3, 4))
} | JacekAmbroziak/Ambrosoft | src/main/scala/com/ambrosoft/learning/Point.scala | Scala | apache-2.0 | 344 |
package com.twitter.finagle.thrift
import org.jboss.netty.channel.{SimpleChannelHandler, ChannelEvent, ChannelHandlerContext}
import org.jboss.netty.handler.codec.frame.{LengthFieldBasedFrameDecoder, LengthFieldPrepender}
class ThriftFrameCodec extends SimpleChannelHandler {
private[this] val decoder = new LengthFieldBasedFrameDecoder(0x7FFFFFFF, 0, 4, 0, 4)
private[this] val encoder = new LengthFieldPrepender(4)
override def handleUpstream(ctx: ChannelHandlerContext, e: ChannelEvent) =
decoder.handleUpstream(ctx, e)
override def handleDownstream(ctx: ChannelHandlerContext, e: ChannelEvent) =
encoder.handleDownstream(ctx, e)
}
| foursquare/finagle | finagle-thrift/src/main/scala/com/twitter/finagle/thrift/ThriftFrameCodec.scala | Scala | apache-2.0 | 655 |
package scuff.concurrent
import java.util.concurrent.{ ConcurrentLinkedQueue, CountDownLatch, RejectedExecutionException, ThreadFactory }
import scala.concurrent.{ ExecutionContextExecutor, Future }
import scala.util.control.NonFatal
object LockFreeExecutionContext {
/**
* Queue abstraction.
*/
trait RunQueue {
/** Get next or `null`. */
def poll(): Runnable
/** Add to queue, if possible. */
def offer(r: Runnable): Boolean
}
private class DefaultQueue extends RunQueue {
private[this] val queue = new ConcurrentLinkedQueue[Runnable]
def poll(): Runnable = queue.poll()
def offer(r: Runnable): Boolean = queue.offer(r)
}
def apply(
numThreads: Int,
tf: ThreadFactory,
failureReporter: Throwable => Unit,
whenIdle: => Unit = Thread.`yield`,
queue: RunQueue = new DefaultQueue) = {
val svc = new LockFreeExecutionContext(numThreads, tf, failureReporter, whenIdle, queue)
svc.start()
svc
}
}
/**
* High throughput executor. Use this for temporary processing
* of predictably high load, and shut down when done, as it
* relies on spinning threads, due to the lock-free nature.
* NOTE: This class is safe to use for multiple producers
* (that is, if the `RunQueue` implementation supports it;
* the default one does), but cannot safely be shut down unless
* all producers have stopped, so ensure shutdown coordination.
*/
final class LockFreeExecutionContext private (
consumerThreads: Int,
tf: ThreadFactory,
failureReporter: Throwable => Unit,
whenIdle: => Unit,
queue: LockFreeExecutionContext.RunQueue)
extends ExecutionContextExecutor {
require(consumerThreads > 0, s"Must have at least 1 consumer thread. Received $consumerThreads")
@volatile private[this] var isShutdown = false
private[this] val activeThreads = new CountDownLatch(consumerThreads)
private[LockFreeExecutionContext] def start() = threads.foreach(_.start)
private[this] val threads =
for (_ <- 1 to consumerThreads) yield tf newThread new Runnable {
def run = try pollQueue() finally activeThreads.countDown()
private def pollQueue(): Unit = {
while (!Thread.currentThread.isInterrupted) {
queue.poll() match {
case null =>
if (isShutdown) Thread.currentThread.interrupt()
else whenIdle
case r =>
try r.run() catch {
case NonFatal(e) => reportFailure(e)
}
}
}
}
}
@annotation.tailrec
def execute(runnable: Runnable): Unit = {
if (isShutdown) throw new RejectedExecutionException("Has been shut down")
if (!queue.offer(runnable)) {
execute(runnable)
}
}
def reportFailure(cause: Throwable): Unit = failureReporter(cause)
/**
* Shut down executor, completing jobs already submitted.
* @return Shutdown completion future
*/
def shutdown(): Future[Unit] = {
isShutdown = true
activeThreads.getCount match {
case 0 => Future.unit
case _ =>
Threads.onBlockingThread(s"Awaiting ${classOf[LockFreeExecutionContext].getName} shutdown")(activeThreads.await)
}
}
}
| nilskp/scuff | src/main/scala/scuff/concurrent/LockFreeExecutionContext.scala | Scala | mit | 3,194 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.compression.internal.operators
import java.util.zip.{CRC32, DataFormatException, Inflater}
import java.{util => ju}
import monix.execution.Ack
import monix.execution.Ack.{Continue, Stop}
import monix.reactive.Observable.Operator
import monix.reactive.compression.internal.operators.Gunzipper._
import monix.reactive.compression.{
gzipCompressionMethod,
gzipFlag,
gzipMagicFirstByte,
gzipMagicSecondByte,
CompressionException
}
import monix.reactive.observers.Subscriber
import scala.annotation.tailrec
import scala.concurrent.Future
import scala.util.Success
import scala.util.control.NonFatal
private[compression] final class GunzipOperator(bufferSize: Int) extends Operator[Array[Byte], Array[Byte]] {
def apply(out: Subscriber[Array[Byte]]): Subscriber[Array[Byte]] =
new Subscriber[Array[Byte]] {
implicit val scheduler = out.scheduler
private[this] var isDone = false
private[this] var ack: Future[Ack] = _
private[this] val gunzipper = new Gunzipper(bufferSize)
def onNext(elem: Array[Byte]): Future[Ack] = {
if (isDone) {
Stop
} else {
try {
val result = gunzipper.onChunk(elem)
// signaling downstream
ack = out.onNext(result)
ack
} catch {
case e if NonFatal(e) =>
onError(e)
Stop
}
}
}
def onError(ex: Throwable): Unit =
if (!isDone) {
isDone = true
gunzipper.close()
out.onError(ex)
}
def onComplete(): Unit =
if (!isDone) {
isDone = true
if (ack == null) ack = Continue
ack.syncOnComplete {
case Success(Continue) =>
// Protect against contract violations - we are only allowed to
// call onError if no other terminal event has been called.
var streamErrors = true
try {
val lastArray = gunzipper.finish()
streamErrors = false
out.onNext(lastArray)
out.onComplete()
} catch {
case NonFatal(e) if streamErrors =>
out.onError(e)
} finally {
gunzipper.close()
}
case _ => gunzipper.close()
}
}
}
}
// https://github.com/zio/zio/blob/master/streams/jvm/src/main/scala/zio/stream/compression/Gunzipper.scala
/**
* Performs few steps of parsing header, then decompresses body and checks trailer.
* With reasonably chosen bufferSize there shouldn't occur many concatenation of arrays.
*/
private final class Gunzipper(bufferSize: Int) {
private var state: State =
new ParseHeaderStep(Array.emptyByteArray, new CRC32)
def close(): Unit = state.close()
def onChunk(c: Array[Byte]): Array[Byte] = {
try {
val (newState, output) = state.feed(c)
state = newState
output
} catch {
case e: DataFormatException => throw CompressionException(e)
}
}
def finish(): Array[Byte] =
if (state.isInProgress) {
throw CompressionException("Stream closed before completion.")
} else {
Array.emptyByteArray
}
private def nextStep(
acc: Array[Byte],
checkCrc16: Boolean,
crc32: CRC32,
parseExtra: Boolean,
commentsToSkip: Int
): State =
if (parseExtra) new ParseExtraStep(acc, crc32, checkCrc16, commentsToSkip)
else if (commentsToSkip > 0)
new SkipCommentsStep(checkCrc16, crc32, commentsToSkip)
else if (checkCrc16)
new CheckCrc16Step(Array.emptyByteArray, crc32.getValue())
else new Decompress()
private class ParseHeaderStep(acc: Array[Byte], crc32: CRC32) extends State {
override def feed(chunkBytes: Array[Byte]): (State, Array[Byte]) = {
val bytes = acc ++ chunkBytes
if (bytes.length < fixedHeaderLength)
(new ParseHeaderStep(bytes, crc32), Array.emptyByteArray)
else {
val (header, leftover) = bytes.splitAt(fixedHeaderLength)
crc32.update(header)
if (header(0) != gzipMagicFirstByte || header(1) != gzipMagicSecondByte) {
throw CompressionException("Invalid GZIP header")
} else if (header(2) != gzipCompressionMethod.DEFLATE) {
throw CompressionException(
s"Only deflate (8) compression method is supported, present: ${header(2)}"
)
} else {
val flags = header(3)
val checkCrc16 = gzipFlag.fhcrc(flags)
val hasExtra = gzipFlag.fextra(flags)
val skipFileName = gzipFlag.fname(flags)
val skipFileComment = gzipFlag.fcomment(flags)
val commentsToSkip =
(if (skipFileName) 1 else 0) + (if (skipFileComment) 1 else 0)
nextStep(header, checkCrc16, crc32, hasExtra, commentsToSkip).feed(
leftover
)
}
}
}
override def isInProgress: Boolean = acc.nonEmpty
}
private class ParseExtraStep(
acc: Array[Byte],
crc32: CRC32,
checkCrc16: Boolean,
commentsToSkip: Int
) extends State {
override def feed(chunkBytes: Array[Byte]): (State, Array[Byte]) = {
val bytes = acc ++ chunkBytes
if (bytes.length < 12) {
(
new ParseExtraStep(bytes, crc32, checkCrc16, commentsToSkip),
Array.emptyByteArray
)
} else {
val xlenLenght = 2
val extraBytes: Int =
u16(bytes(fixedHeaderLength), bytes(fixedHeaderLength + 1))
val headerWithExtraLength = fixedHeaderLength + xlenLenght + extraBytes
if (bytes.length < headerWithExtraLength)
(
new ParseExtraStep(bytes, crc32, checkCrc16, commentsToSkip),
Array.emptyByteArray
)
else {
val (headerWithExtra, leftover) = bytes.splitAt(headerWithExtraLength)
crc32.update(headerWithExtra.drop(fixedHeaderLength))
nextStep(headerWithExtra, checkCrc16, crc32, false, commentsToSkip)
.feed(leftover)
}
}
}
}
private class SkipCommentsStep(
checkCrc16: Boolean,
crc32: CRC32,
commentsToSkip: Int
) extends State {
override def feed(chunkBytes: Array[Byte]): (State, Array[Byte]) = {
val idx = chunkBytes.indexOf(0)
val (upTo0, leftover) =
if (idx == -1) (chunkBytes, Array.emptyByteArray)
else chunkBytes.splitAt(idx + 1)
crc32.update(upTo0)
nextStep(
Array.emptyByteArray,
checkCrc16,
crc32,
false,
commentsToSkip - 1
).feed(leftover)
}
}
private class CheckCrc16Step(pastCrc16Bytes: Array[Byte], crcValue: Long) extends State {
override def feed(chunkBytes: Array[Byte]): (State, Array[Byte]) = {
val (crc16Bytes, leftover) = (pastCrc16Bytes ++ chunkBytes).splitAt(2)
//Unlikely but possible that chunk was 1 byte only, leftover is empty.
if (crc16Bytes.length < 2) {
(new CheckCrc16Step(crc16Bytes, crcValue), Array.emptyByteArray)
} else {
val computedCrc16 = (crcValue & 0xffffL).toInt
val expectedCrc = u16(crc16Bytes(0), crc16Bytes(1))
if (computedCrc16 != expectedCrc)
throw CompressionException("Invalid header CRC16")
else new Decompress().feed(leftover)
}
}
}
private class Decompress extends State {
private val inflater = new Inflater(true)
private val crc32: CRC32 = new CRC32
private val buffer: Array[Byte] = new Array[Byte](bufferSize)
private def pullOutput(
inflater: Inflater,
buffer: Array[Byte]
): Array[Byte] = {
@tailrec
def next(prev: Array[Byte]): Array[Byte] = {
val read = inflater.inflate(buffer)
val newBytes = ju.Arrays.copyOf(buffer, read)
crc32.update(newBytes)
val pulled = prev ++ newBytes
if (read > 0 && inflater.getRemaining > 0) next(pulled) else pulled
}
if (inflater.needsInput()) Array.emptyByteArray
else next(Array.emptyByteArray)
}
override def close(): Unit = inflater.end()
override def feed(chunkBytes: Array[Byte]): (State, Array[Byte]) = {
inflater.setInput(chunkBytes)
val newChunk = pullOutput(inflater, buffer)
if (inflater.finished()) {
val leftover = chunkBytes.takeRight(inflater.getRemaining())
val (state, nextChunks) =
new CheckTrailerStep(
Array.emptyByteArray,
crc32.getValue(),
inflater.getBytesWritten()
).feed(leftover)
(state, newChunk ++ nextChunks)
} else (this, newChunk)
}
}
private class CheckTrailerStep(
acc: Array[Byte],
expectedCrc32: Long,
expectedIsize: Long
) extends State {
private def readInt(a: Array[Byte]): Int = u32(a(0), a(1), a(2), a(3))
override def feed(chunkBytes: Array[Byte]): (State, Array[Byte]) = {
val bytes = acc ++ chunkBytes
if (bytes.length < 8) {
(
new CheckTrailerStep(
bytes,
expectedCrc32,
expectedIsize
),
Array.emptyByteArray
) // need more input
} else {
val (trailerBytes, leftover) = bytes.splitAt(8)
val crc32 = readInt(trailerBytes.take(4))
val isize = readInt(trailerBytes.drop(4))
if (expectedCrc32.toInt != crc32)
throw CompressionException("Invalid CRC32")
else if (expectedIsize.toInt != isize)
throw CompressionException("Invalid ISIZE")
else
new ParseHeaderStep(Array.emptyByteArray, new CRC32()).feed(leftover)
}
}
}
}
private object Gunzipper {
private val fixedHeaderLength = 10
private sealed trait State {
def close(): Unit = ()
def feed(chunkBytes: Array[Byte]): (State, Array[Byte])
def isInProgress: Boolean = true
}
private def u8(b: Byte): Int = b & 0xff
private def u16(b1: Byte, b2: Byte): Int = u8(b1) | (u8(b2) << 8)
private def u32(b1: Byte, b2: Byte, b3: Byte, b4: Byte) =
u16(b1, b2) | (u16(b3, b4) << 16)
}
| monifu/monifu | monix-reactive/jvm/src/main/scala/monix/reactive/compression/internal/operators/GunzipOperator.scala | Scala | apache-2.0 | 10,823 |
package com.twitter.finagle.exp.zookeeper.integration.standalone.v3_5.command
import com.twitter.finagle.exp.zookeeper.ZookeeperDefs.CreateMode
import com.twitter.finagle.exp.zookeeper.data.Ids
import com.twitter.finagle.exp.zookeeper.integration.standalone.StandaloneIntegrationConfig
import com.twitter.util.Await
import org.scalatest.FunSuite
class CheckWatcherTest extends FunSuite with StandaloneIntegrationConfig {
test("Should check a watcher without error") {
newClient()
connect()
Await.result {
for {
exists <- client.get.exists("/hello", true)
check <- client.get.checkWatcher(exists.watcher.get)
} yield check
}
disconnect()
Await.ready(client.get.close())
}
test("Should not check a watcher correctly") {
newClient()
connect()
intercept[Exception] {
Await.result {
for {
exists <- client.get.exists("/hello", true)
create <- client.get.create("/hello", "".getBytes, Ids.OPEN_ACL_UNSAFE, CreateMode.EPHEMERAL)
check <- client.get.checkWatcher(exists.watcher.get)
} yield check
}
}
disconnect()
Await.ready(client.get.close())
}
} | finagle/finagle-zookeeper | integration/src/test/scala/com/twitter/finagle/exp/zookeeper/integration/standalone/v3_5/command/CheckWatcherTest.scala | Scala | apache-2.0 | 1,190 |
import sbt._, Keys._
object Dependencies {
private val home = "file://" + Path.userHome.absolutePath
object Resolvers {
val typesafe = "typesafe.com" at "http://repo.typesafe.com/typesafe/releases/"
val sonatype = "sonatype" at "https://oss.sonatype.org/content/repositories/releases"
val sonatypeS = "sonatype snapshots" at "https://oss.sonatype.org/content/repositories/snapshots"
val t2v = "t2v.jp repo" at "http://www.t2v.jp/maven-repo/"
val jgitMaven = "jgit-maven" at "http://download.eclipse.org/jgit/maven"
val awesomepom = "awesomepom" at "https://raw.github.com/jibs/maven-repo-scala/master"
val sprayRepo = "spray repo" at "http://repo.spray.io"
val local = "local repo" at home + "/local-repo"
val roundeights = "RoundEights" at "http://maven.spikemark.net/roundeights"
val prismic = "Prismic.io kits" at "https://s3.amazonaws.com/prismic-maven-kits/repository/maven/"
val commons = Seq(
local,
sonatypeS,
sonatype,
awesomepom,
typesafe,
roundeights,
prismic,
t2v, jgitMaven, sprayRepo)
}
val scalaz = "org.scalaz" %% "scalaz-core" % "7.1.1"
val scalalib = "com.github.ornicar" %% "scalalib" % "5.3"
val config = "com.typesafe" % "config" % "1.3.0-M1"
val apache = "org.apache.commons" % "commons-lang3" % "3.3.2"
val guava = "com.google.guava" % "guava" % "18.0"
val findbugs = "com.google.code.findbugs" % "jsr305" % "2.0.3"
val hasher = "com.roundeights" %% "hasher" % "1.0.0"
val jgit = "org.eclipse.jgit" % "org.eclipse.jgit" % "3.2.0.201312181205-r"
val jodaTime = "joda-time" % "joda-time" % "2.7"
val elastic4s = "com.sksamuel.elastic4s" %% "elastic4s" % "1.3.2"
val RM = "org.reactivemongo" %% "reactivemongo" % "0.10.5.0.akka23"
val PRM = "org.reactivemongo" %% "play2-reactivemongo" % "0.10.5.0.akka23"
val maxmind = "com.sanoma.cda" %% "maxmind-geoip2-scala" % "1.2.3-THIB"
val prismic = "io.prismic" %% "scala-kit" % "1.3.1"
object play {
val version = "2.3.8"
val api = "com.typesafe.play" %% "play" % version
val test = "com.typesafe.play" %% "play-test" % version
}
object spray {
val version = "1.3.2"
val caching = "io.spray" %% "spray-caching" % version
val util = "io.spray" %% "spray-util" % version
}
}
| danilovsergey/i-bur | project/Dependencies.scala | Scala | mit | 2,301 |
package scala.quoted
package staging
import dotty.tools.dotc.CompilationUnit
import dotty.tools.dotc.util.NoSource
/** Compilation unit containing the contents of a quoted expression */
private class ExprCompilationUnit(val exprBuilder: QuoteContext => Expr[_]) extends CompilationUnit(NoSource)
| som-snytt/dotty | staging/src/scala/quoted/staging/ExprCompilationUnit.scala | Scala | apache-2.0 | 298 |
package org.embulk.input.dynamodb.operation
import java.util.Optional
import com.amazonaws.services.dynamodbv2.model.AttributeValue
import com.amazonaws.services.dynamodbv2.AmazonDynamoDB
import org.embulk.config.{
Config,
ConfigDefault,
ConfigException,
Task => EmbulkTask
}
object DynamodbOperationProxy {
trait Task extends EmbulkTask {
@Config("scan")
@ConfigDefault("null")
def getScan: Optional[DynamodbScanOperation.Task]
@Config("query")
@ConfigDefault("null")
def getQuery: Optional[DynamodbQueryOperation.Task]
@Config("table")
def getTable: String
}
def apply(task: Task): DynamodbOperationProxy = {
if (task.getScan.isPresent && task.getQuery.isPresent)
throw new ConfigException("You can use either \"scan\" or \"query\".")
if (!task.getScan.isPresent && !task.getQuery.isPresent)
throw new ConfigException("You must set either \"scan\" or \"query\".")
task.getScan.ifPresent(_.setTableName(task.getTable))
task.getQuery.ifPresent(_.setTableName(task.getTable))
new DynamodbOperationProxy(task)
}
}
case class DynamodbOperationProxy(task: DynamodbOperationProxy.Task)
extends EmbulkDynamodbOperation {
private def getOperation: EmbulkDynamodbOperation = {
task.getScan.ifPresent(t => return DynamodbScanOperation(t))
task.getQuery.ifPresent(t => return DynamodbQueryOperation(t))
throw new IllegalStateException()
}
private val operation: EmbulkDynamodbOperation = getOperation
override def getEmbulkTaskCount: Int = operation.getEmbulkTaskCount
override def run(
dynamodb: AmazonDynamoDB,
embulkTaskIndex: Int,
f: Seq[Map[String, AttributeValue]] => Unit
): Unit = operation.run(dynamodb, embulkTaskIndex, f)
}
| lulichn/embulk-input-dynamodb | src/main/scala/org/embulk/input/dynamodb/operation/DynamodbOperationProxy.scala | Scala | mit | 1,764 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.api.validation
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.table.api.internal.TableEnvironmentInternal
import org.apache.flink.table.api.{EnvironmentSettings, TableSchema, Types, ValidationException}
import org.apache.flink.table.sources._
import org.apache.flink.table.sources.tsextractors.ExistingField
import org.apache.flink.table.sources.wmstrategies.AscendingTimestamps
import org.apache.flink.table.utils.{TableTestBase, TestTableSourceWithTime}
import org.apache.flink.types.Row
import org.junit.Test
import java.util
import java.util.Collections
class TableSourceValidationTest extends TableTestBase{
val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
env.setStreamTimeCharacteristic(TimeCharacteristic.EventTime)
val settings: EnvironmentSettings = EnvironmentSettings.newInstance().useOldPlanner().build()
val tEnv: StreamTableEnvironment = StreamTableEnvironment.create(env, settings)
@Test(expected = classOf[ValidationException])
def testUnresolvedSchemaField(): Unit = {
val schema = new TableSchema(
Array("id", "name", "amount", "value"),
Array(Types.LONG, Types.STRING, Types.INT, Types.DOUBLE))
val rowType = new RowTypeInfo(
Array(Types.LONG, Types.STRING, Types.INT).asInstanceOf[Array[TypeInformation[_]]],
Array("id", "name", "amount"))
val ts = new TestTableSourceWithTime(schema, rowType, Seq[Row]())
// should fail because schema field "value" cannot be resolved in result type
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal("testTable", ts)
}
@Test(expected = classOf[ValidationException])
def testNonMatchingFieldTypes(): Unit = {
val schema = new TableSchema(
Array("id", "name", "amount"),
Array(Types.LONG, Types.INT, Types.INT))
val rowType = new RowTypeInfo(
Array(Types.LONG, Types.STRING, Types.INT).asInstanceOf[Array[TypeInformation[_]]],
Array("id", "name", "amount"))
val ts = new TestTableSourceWithTime(schema, rowType, Seq[Row]())
// should fail because types of "name" fields are different
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal("testTable", ts)
}
@Test(expected = classOf[ValidationException])
def testMappingToUnknownField(): Unit = {
val schema = new TableSchema(
Array("id", "name", "amount"),
Array(Types.LONG, Types.STRING, Types.DOUBLE))
val rowType = new RowTypeInfo(Types.LONG, Types.STRING, Types.DOUBLE)
val mapping = Map("id" -> "f3", "name" -> "f1", "amount" -> "f2")
val ts = new TestTableSourceWithTime(schema, rowType, Seq[Row](), mapping = mapping)
// should fail because mapping maps field "id" to unknown field
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal("testTable", ts)
}
@Test(expected = classOf[ValidationException])
def testMappingWithInvalidFieldType(): Unit = {
val schema = new TableSchema(
Array("id", "name", "amount"),
Array(Types.LONG, Types.STRING, Types.DOUBLE))
val rowType = new RowTypeInfo(Types.LONG, Types.STRING, Types.INT)
val mapping = Map("id" -> "f0", "name" -> "f1", "amount" -> "f2")
val ts = new TestTableSourceWithTime(schema, rowType, Seq[Row](), mapping = mapping)
// should fail because mapping maps fields with different types
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal("testTable", ts)
}
@Test(expected = classOf[ValidationException])
def testNonTimestampProctimeField(): Unit = {
val schema = new TableSchema(
Array("id", "name", "amount", "ptime"),
Array(Types.LONG, Types.STRING, Types.INT, Types.LONG))
val rowType = new RowTypeInfo(
Array(Types.LONG, Types.STRING, Types.INT).asInstanceOf[Array[TypeInformation[_]]],
Array("id", "name", "amount"))
val ts = new TestTableSourceWithTime(schema, rowType, Seq[Row](), proctime = "ptime")
// should fail because processing time field has invalid type
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal("testTable", ts)
}
@Test
def testDefinedRowtimeDoesNotExist(): Unit = {
expectedException.expect(classOf[ValidationException])
expectedException
.expectMessage(
"Found a rowtime attribute for field 'rowtime' but it does not exist in the Table")
val schema = new TableSchema(
Array("id", "name", "amount"),
Array(Types.LONG, Types.STRING, Types.INT))
val rowType = new RowTypeInfo(
Array(Types.LONG, Types.STRING, Types.SQL_TIMESTAMP(), Types.INT)
.asInstanceOf[Array[TypeInformation[_]]],
Array("id", "name", "rowtime", "amount"))
val ts = new TestTableSourceWithTime(schema, rowType, Seq[Row](), rowtime = "rowtime")
// should fail because rowtime field does not exist in the TableSchema
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal("testTable", ts)
}
@Test
def testDefinedProctimeDoesNotExist(): Unit = {
expectedException.expect(classOf[ValidationException])
expectedException
.expectMessage(
"Found a proctime attribute for field 'proctime' but it does not exist in the Table")
val schema = new TableSchema(
Array("id", "name", "amount"),
Array(Types.LONG, Types.STRING, Types.INT))
val rowType = new RowTypeInfo(
Array(Types.LONG, Types.STRING, Types.SQL_TIMESTAMP(), Types.INT)
.asInstanceOf[Array[TypeInformation[_]]],
Array("id", "name", "proctime", "amount"))
val ts = new TestTableSourceWithTime(schema, rowType, Seq[Row](), proctime = "proctime")
// should fail because proctime field does not exist in the TableSchema
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal("testTable", ts)
}
@Test(expected = classOf[ValidationException])
def testNonTimestampRowtimeField(): Unit = {
val schema = new TableSchema(
Array("id", "name", "amount", "rtime"),
Array(Types.LONG, Types.STRING, Types.INT, Types.LONG))
val rowType = new RowTypeInfo(
Array(Types.LONG, Types.STRING, Types.LONG, Types.INT)
.asInstanceOf[Array[TypeInformation[_]]],
Array("id", "name", "rtime", "amount"))
val ts = new TestTableSourceWithTime(schema, rowType, Seq[Row](), rowtime = "rtime")
// should fail because rowtime field has invalid type
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal("testTable", ts)
}
@Test(expected = classOf[ValidationException])
def testFieldRowtimeAndProctime(): Unit = {
val schema = new TableSchema(
Array("id", "name", "amount", "time"),
Array(Types.LONG, Types.STRING, Types.INT, Types.SQL_TIMESTAMP))
val rowType = new RowTypeInfo(
Array(Types.LONG, Types.STRING, Types.LONG, Types.INT)
.asInstanceOf[Array[TypeInformation[_]]],
Array("id", "name", "time", "amount"))
val ts =
new TestTableSourceWithTime(schema, rowType, Seq[Row](), rowtime = "time", proctime = "time")
// should fail because rowtime field has invalid type
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal("testTable", ts)
}
@Test(expected = classOf[ValidationException])
def testUnknownTimestampExtractorArgField(): Unit = {
val schema = new TableSchema(
Array("id", "name", "amount", "rtime"),
Array(Types.LONG, Types.STRING, Types.INT, Types.SQL_TIMESTAMP))
val rowType = new RowTypeInfo(
Array(Types.LONG, Types.STRING, Types.LONG, Types.INT)
.asInstanceOf[Array[TypeInformation[_]]],
Array("id", "name", "rtime", "amount"))
val ts =
new TestTableSourceWithTime(schema, rowType, Seq[Row]()) {
override def getRowtimeAttributeDescriptors: util.List[RowtimeAttributeDescriptor] = {
Collections.singletonList(new RowtimeAttributeDescriptor(
"rtime",
new ExistingField("doesNotExist"),
new AscendingTimestamps))
}
}
// should fail because timestamp extractor argument field does not exist
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal("testTable", ts)
}
@Test(expected = classOf[ValidationException])
def testFailingTimestampExtractorValidation(): Unit = {
val fieldNames = Array("id", "name", "amount")
val rowType = new RowTypeInfo(
Array(Types.LONG, Types.STRING, Types.INT).asInstanceOf[Array[TypeInformation[_]]],
fieldNames)
val schema = new TableSchema(
fieldNames,
Array(Types.LONG, Types.SQL_TIMESTAMP, Types.INT))
val ts = new TestTableSourceWithTime(schema, rowType, Seq[Row](), rowtime = "amount")
// should fail because configured rowtime field is not of type Long or Timestamp
tEnv.asInstanceOf[TableEnvironmentInternal].registerTableSourceInternal("testTable", ts)
}
// CsvTableSource Tests
@Test(expected = classOf[IllegalArgumentException])
def testCsvTableSourceBuilderWithNullPath(): Unit = {
CsvTableSource.builder()
.field("myfield", Types.STRING)
// should fail, path is not defined
.build()
}
@Test(expected = classOf[IllegalArgumentException])
def testCsvTableSourceBuilderWithDuplicateFieldName(): Unit = {
CsvTableSource.builder()
.path("/path/to/csv")
.field("myfield", Types.STRING)
// should fail, field name must no be duplicate
.field("myfield", Types.INT)
}
@Test(expected = classOf[IllegalArgumentException])
def testCsvTableSourceBuilderWithEmptyField(): Unit = {
CsvTableSource.builder()
.path("/path/to/csv")
// should fail, field can be empty
.build()
}
}
| tzulitai/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/api/validation/TableSourceValidationTest.scala | Scala | apache-2.0 | 10,788 |
/*
elm-scala: an implementation of ELM in Scala using MTJ
Copyright (C) 2014 Davi Pereira dos Santos
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package ml.classifiers
import ml.Pattern
import ml.models.{ELMIncModel, Model}
import ml.mtj.DenseMatrix2
import ml.neural.elm.{ConvergentELM, ELMUtils}
import no.uib.cipr.matrix.{DenseMatrix, DenseVector}
/**
* build() é continuável, isto é, ele simula internamente um modelo incremental.
*/
trait ConvergentIncremental extends ConvergentELM {
def update(model: Model, fastAndCheap: Boolean = false, semcrescer: Boolean = false)(pattern: Pattern) = {
val m = cast(model)
val Alfat = m.Alfat
val biases = m.biases
val P0 = m.P
val rnd = m.rnd
val Beta0 = m.Beta //LxO
val x = pattern.arraymtj
val (h, hm) = ELMUtils.feedHiddenv(x, Alfat, biases) //h: Lx1; H: NxL
val L = h.size()
val O = Beta0.numColumns()
val y = pattern.weighted_label_array //y: Ox1; Y: LxO
val ym = new DenseMatrix2(y)
ym.resize(1, O)
//P1
val tmpLx1 = new DenseVector(P0.numRows())
val tmpLx1m = new DenseMatrix(tmpLx1, false)
P0.mult(h, tmpLx1) //Lx1
val tmp = h.dot(tmpLx1)
val factor = -1 / (1 + tmp)
val P0hht = new DenseMatrix(L, L)
tmpLx1m.mult(hm, P0hht) //LxL
val deltaP = new DenseMatrix(L, L)
P0hht.mult(P0, deltaP) //LxL
deltaP.scale(factor)
val P1 = if (fastAndCheap) {
P0.add(deltaP)
P0
} else {
deltaP.add(P0)
deltaP
}
//Beta1
val parens = new DenseMatrix(1, O)
hm.mult(Beta0, parens) //1xO
parens.scale(-1)
parens.add(ym)
deltaP.mult(h, tmpLx1)
val tmpLxO = new DenseMatrix(L, O)
tmpLx1m.mult(parens, tmpLxO)
val Beta1 = if (fastAndCheap) {
Beta0.add(tmpLxO)
Beta0
} else {
tmpLxO.add(Beta0)
tmpLxO
}
//All of this is useless for OS-only ELM
val newXt = new DenseMatrix(m.Xt.numRows(), m.Xt.numColumns() + 1)
System.arraycopy(m.Xt.getData, 0, newXt.getData, 0, m.Xt.getData.size)
System.arraycopy(x.getData, 0, newXt.getData, m.Xt.getData.size, x.getData.size)
val newY = new DenseMatrix(m.Y.numRows() + 1, m.Y.numColumns())
var i = 0
while (i < m.Y.numRows()) {
var j = 0
while (j < m.Y.numColumns()) {
newY.set(i, j, m.Y.get(i, j))
j += 1
}
i += 1
}
var j = 0
while (j < m.Y.numColumns()) {
newY.set(i, j, y(j))
j += 1
}
ELMIncModel(rnd, Alfat, biases, Beta1, P1, m.N + 1, newXt, newY)
}
// def updateAll(model: Model, fastAndCheap: Boolean = false)(patterns: Seq[Pattern]) = patterns.foldLeft(model)((m, p) => update(m, fastAndCheap = true)(p))
} | Crespo911/elm-scala | src/main/scala/ml/classifiers/ConvergentIncremental.scala | Scala | gpl-3.0 | 3,449 |
package io.iohk.ethereum.faucet.jsonrpc
import io.iohk.ethereum.faucet.jsonrpc.FaucetDomain.{SendFundsRequest, SendFundsResponse, StatusRequest, StatusResponse}
import io.iohk.ethereum.jsonrpc.JsonMethodsImplicits
import io.iohk.ethereum.jsonrpc.JsonRpcError.InvalidParams
import io.iohk.ethereum.jsonrpc.serialization.JsonMethodDecoder.NoParamsMethodDecoder
import io.iohk.ethereum.jsonrpc.serialization.{JsonEncoder, JsonMethodDecoder}
import org.json4s.JsonAST.{JArray, JObject, JString}
object FaucetMethodsImplicits extends JsonMethodsImplicits {
implicit val sendFundsRequestDecoder: JsonMethodDecoder[SendFundsRequest] = {
case Some(JArray((input: JString) :: Nil)) => extractAddress(input).map(SendFundsRequest)
case _ => Left(InvalidParams())
}
implicit val sendFundsResponseEncoder: JsonEncoder[SendFundsResponse] = (t: SendFundsResponse) => encodeAsHex(t.txId)
implicit val statusRequestDecoder: JsonMethodDecoder[StatusRequest] = new NoParamsMethodDecoder(StatusRequest())
implicit val statusEncoder: JsonEncoder[StatusResponse] = (t: StatusResponse) =>
JObject(
"status" -> JString(t.status.toString)
)
}
| input-output-hk/etc-client | src/main/scala/io/iohk/ethereum/faucet/jsonrpc/FaucetMethodsImplicits.scala | Scala | mit | 1,156 |
/*
* Copyright (C) 2009-2017 Lightbend Inc. <https://www.lightbend.com>
*/
//###replace: package tasks
package scalaguide.scheduling
import play.api.ApplicationLoader.Context
import play.api.routing.Router
import play.api.{BuiltInComponentsFromContext, NoHttpFiltersComponents}
class MyBuiltInComponentsFromContext(context: Context)
extends BuiltInComponentsFromContext(context)
with NoHttpFiltersComponents {
override def router: Router = Router.empty
// Task is initialize here
initialize()
private def initialize(): Unit = {
new CodeBlockTask(actorSystem)
}
}
| Shruti9520/playframework | documentation/manual/working/commonGuide/schedule/code/scalaguide/scheduling/MyBuiltInComponentsFromContext.scala | Scala | apache-2.0 | 590 |
object Test extends App {
import Macros._
println("2".toOptionOfInt)
} | som-snytt/dotty | tests/disabled/macro/run/macro-term-declared-in-implicit-class/Test_2.scala | Scala | apache-2.0 | 74 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.mxnet
import org.scalatest.{BeforeAndAfterAll, FunSuite}
class KVStoreSuite extends FunSuite with BeforeAndAfterAll {
test("init and pull") {
val kv = KVStore.create()
val shape = Shape(2, 1)
val ndArray = NDArray.zeros(shape)
kv.init("3", NDArray.ones(shape))
kv.pull("3", ndArray)
assert(ndArray.toArray === Array(1f, 1f))
}
test("push and pull") {
val kv = KVStore.create()
val shape = Shape(2, 1)
val ndArray = NDArray.zeros(shape)
kv.init("3", NDArray.ones(shape))
kv.push("3", NDArray.ones(shape) * 4)
kv.pull("3", ndArray)
assert(ndArray.toArray === Array(4f, 4f))
}
test("test aggregate") {
val shape = Shape(4, 4)
val keys = Array("b", "c", "d")
val kv = KVStore.create()
kv.init("a", NDArray.zeros(shape))
kv.init(keys, Array.fill(keys.length)(NDArray.zeros(shape)))
val numDevs = 4
val devs = (0 until numDevs).map(Context.cpu(_))
val vals = devs.map(d => NDArray.ones(shape, d)).toArray
kv.push("a", vals)
kv.pull("a", outs = vals)
assert(vals.map(v => v.toArray.map(x => x - numDevs).sum).sum == 0f)
val valss = keys.map { k =>
val tmpVals = devs.map(d => NDArray.ones(shape, d) * 2f).toArray
kv.push(k, tmpVals)
kv.pull(k, outs = tmpVals)
tmpVals
}.flatten
assert(valss.map(v => v.toArray.map(x => x - numDevs * 2f).sum).sum == 0f)
}
test("updater runs when push") {
val kv = KVStore.create()
val updater = new MXKVStoreUpdater {
override def update(key: Int, input: NDArray, stored: NDArray): Unit = {
stored += input * 2
}
override def dispose(): Unit = {}
}
kv.setUpdater(updater)
val shape = Shape(2, 1)
val ndArray = NDArray.zeros(shape)
kv.init("3", NDArray.ones(shape) * 4)
kv.pull("3", ndArray)
assert(ndArray.toArray === Array(4f, 4f))
kv.push("3", NDArray.ones(shape))
kv.pull("3", ndArray)
assert(ndArray.toArray === Array(6f, 6f))
}
test("get type") {
val kv = KVStore.create("local")
assert(kv.`type` === "local")
}
test("get numWorkers and rank") {
val kv = KVStore.create("local")
assert(kv.numWorkers === 1)
assert(kv.rank === 0)
}
}
| eric-haibin-lin/mxnet | scala-package/core/src/test/scala/org/apache/mxnet/KVStoreSuite.scala | Scala | apache-2.0 | 3,048 |
package model
import play.api.libs.json._
/**
* Represents the Swagger definition for ClassesByClass.
* @param additionalProperties Any additional properties this model may have.
*/
@javax.annotation.Generated(value = Array("org.openapitools.codegen.languages.ScalaPlayFrameworkServerCodegen"), date = "2022-02-13T02:38:35.589632Z[Etc/UTC]")
case class ClassesByClass(
classes: Option[List[String]],
`class`: Option[String]
additionalProperties:
)
object ClassesByClass {
implicit lazy val classesByClassJsonFormat: Format[ClassesByClass] = {
val realJsonFormat = Json.format[ClassesByClass]
val declaredPropNames = Set("classes", "`class`")
Format(
Reads {
case JsObject(xs) =>
val declaredProps = xs.filterKeys(declaredPropNames)
val additionalProps = JsObject(xs -- declaredPropNames)
val restructuredProps = declaredProps + ("additionalProperties" -> additionalProps)
val newObj = JsObject(restructuredProps)
realJsonFormat.reads(newObj)
case _ =>
JsError("error.expected.jsobject")
},
Writes { classesByClass =>
val jsObj = realJsonFormat.writes(classesByClass)
val additionalProps = jsObj.value("additionalProperties").as[JsObject]
val declaredProps = jsObj - "additionalProperties"
val newObj = declaredProps ++ additionalProps
newObj
}
)
}
}
| cliffano/swaggy-jenkins | clients/scala-play-server/generated/app/model/ClassesByClass.scala | Scala | mit | 1,432 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.accumulo.data
import java.util
import com.typesafe.scalalogging.LazyLogging
import org.apache.accumulo.core.client.mock.MockInstance
import org.apache.accumulo.core.client.security.tokens.PasswordToken
import org.geotools.data.DataStoreFinder
import org.geotools.data.simple.SimpleFeatureSource
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.accumulo.index.{RecordIndex, XZ2Index}
import org.locationtech.geomesa.accumulo.iterators.TestData
import org.locationtech.geomesa.accumulo.iterators.TestData._
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.opengis.filter._
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConversions._
@RunWith(classOf[JUnitRunner])
class TableSharingTest extends Specification with LazyLogging {
sequential
val tableName = "sharingTest"
val ds = {
import AccumuloDataStoreParams._
DataStoreFinder.getDataStore(Map(
InstanceIdParam.key -> "mycloud",
ZookeepersParam.key -> "zoo1:2181,zoo2:2181,zoo3:2181",
UserParam.key -> "myuser",
PasswordParam.key -> "mypassword",
AuthsParam.key -> "A,B,C",
CatalogParam.key -> tableName,
MockParam.key -> "true")).asInstanceOf[AccumuloDataStore]
}
// Check existence of tables?
val mockInstance = new MockInstance("mycloud")
val c = mockInstance.getConnector("myuser", new PasswordToken("mypassword".getBytes("UTF8")))
// Three datasets. Each with a common field: attr2?
val sft1 = TestData.getFeatureType("1", tableSharing = true)
val sft2 = TestData.getFeatureType("2", tableSharing = true)
val sft3 = TestData.getFeatureType("3", tableSharing = false)
// Load up data
val mediumData1 = mediumData.map(createSF(_, sft1))
val mediumData2 = mediumData.map(createSF(_, sft2))
val mediumData3 = mediumData.map(createSF(_, sft3))
val fs1 = getFeatureStore(ds, sft1, mediumData1)
val fs2 = getFeatureStore(ds, sft2, mediumData2)
val fs3 = getFeatureStore(ds, sft3, mediumData3)
// TODO: Add tests to check if the correct tables exist and if the metadata is all correct.
// Check the sft's indexschema
val retrievedSFT1 = ds.getSchema(sft1.getTypeName)
val list2: util.SortedSet[String] = c.tableOperations().list
// At least three queries: st, attr, id.
def filterCount(f: Filter) = mediumData1.count(f.evaluate)
// note: size returns an estimated amount, instead we need to actually count the features
def queryCount(f: Filter, fs: SimpleFeatureSource) = SelfClosingIterator(fs.getFeatures(f)).length
val id = "IN(100001, 100011)"
val st = "INTERSECTS(geom, POLYGON ((41 28, 42 28, 42 29, 41 29, 41 28)))"
val at = "attr2 = '2nd100001'"
// This function compares the number of returned results.
def compare(fs: String, step: Int, featureStore2: SimpleFeatureSource = fs2) = {
val f = ECQL.toFilter(fs)
val fc = filterCount(f)
val q1 = queryCount(f, fs1)
val q3 = queryCount(f, fs3)
step match {
case 1 => check(q3)
case 2 =>
case 3 => check(0) // Feature source #2 should be empty
case 4 => check(q3)
}
// Checks feature source #2's query count against the input.
def check(count: Int) = {
val q2 = queryCount(f, featureStore2)
s"fs2 must get $count results from filter $fs" >> {
q2 mustEqual count
}
}
s"fc and fs1 get the same results from filter $fs" >> { fc mustEqual q1 }
s"fs1 and fs3 get the same results from filter $fs" >> { q1 mustEqual q3 }
}
"all three queries" should {
"work for all three features (after setup) " >> {
compare(id, 1)
compare(st, 1)
compare(at, 1)
}
}
// Delete one shared table feature to ensure that deleteSchema works.
s"Removing ${sft2.getTypeName}" should {
val sft2Scanner = ds.connector.createScanner(XZ2Index.getTableName(sft2.getTypeName, ds), ds.auths)
val sft2RecordScanner = ds.connector.createScanner(RecordIndex.getTableName(sft2.getTypeName, ds), ds.auths)
ds.removeSchema(sft2.getTypeName)
// TODO: Add tests to measure what tables exist, etc.
// TODO: test ds.getNames.
// TODO: Observe that this kind of collection is empty.
sft2Scanner.setRange(new org.apache.accumulo.core.data.Range())
sft2Scanner.iterator
.map(e => s"ST Key: ${e.getKey}")
.filter(_.contains("feature2"))
.take(10)
.foreach(s => logger.debug(s))
sft2RecordScanner.setRange(new org.apache.accumulo.core.data.Range())
sft2RecordScanner.iterator.take(10).foreach { e => logger.debug(s"Record Key: ${e.getKey}") }
s"result in FeatureStore named ${sft2.getTypeName} being gone" >> {
ds.getNames.contains(sft2.getTypeName) must beFalse
}
}
// Query again.
"all three queries" should {
"work for all three features (after delete) " >> {
compare(id, 2)
compare(st, 2)
compare(at, 2)
}
}
// Query again after recreating just the SFT for feature source 2.
"all three queries" should {
ds.createSchema(sft2)
val newfs2 = ds.getFeatureSource(sft2.getTypeName)
"work for all three features (after recreating the schema for SFT2) " >> {
compare(id, 3, newfs2)
compare(st, 3, newfs2)
compare(at, 3, newfs2)
}
}
// Query again after reingesting a feature source #2.
"all three queries" should {
val fs2ReIngested = getFeatureStore(ds, sft2, mediumData2)
"work for all three features (after re-ingest) " >> {
compare(id, 4, featureStore2 = fs2ReIngested)
compare(st, 4, featureStore2 = fs2ReIngested)
compare(at, 4, featureStore2 = fs2ReIngested)
}
}
}
| jahhulbert-ccri/geomesa | geomesa-accumulo/geomesa-accumulo-datastore/src/test/scala/org/locationtech/geomesa/accumulo/data/TableSharingTest.scala | Scala | apache-2.0 | 6,253 |
package org.mybatis.caches.rediscala.serializer
import org.xerial.snappy.Snappy
private[rediscala] object SnappyRedisSerializer {
def apply[T](inner: RedisSerializer[T] = FstRedisSerializer[T]()): SnappyRedisSerializer[T] = {
new SnappyRedisSerializer[T](inner)
}
}
/**
* Snappy 압축 알고리즘을 이용하여 객체를 직렬화 한 후 압축을 수행합니다.
* Created by debop on 2014. 3. 14.
*/
private[rediscala] class SnappyRedisSerializer[T](val innerSerializer: RedisSerializer[T]) extends RedisSerializer[T] {
override def serialize(graph: T): Array[Byte] = {
if (graph == null)
return EMPTY_BYTES
Snappy.compress(innerSerializer.serialize(graph))
}
override def deserialize(bytes: Array[Byte]): T = {
if (bytes == null || bytes.length == 0)
return null.asInstanceOf[T]
innerSerializer.deserialize(Snappy.uncompress(bytes))
}
}
| debop/mybatis-redis | src/main/scala/org/mybatis/caches/rediscala/serializer/SnappyRedisSerializer.scala | Scala | apache-2.0 | 948 |
package epic.features
import epic.framework.Feature
/**
*
* @author dlwh
*/
@SerialVersionUID(1L)
case class MultiSurfaceFeaturizer[W](feats: IndexedSeq[SurfaceFeaturizer[W]]) extends SurfaceFeaturizer[W] with Serializable {
def this(feats: SurfaceFeaturizer[W]*) = this(feats.toArray)
def anchor(w: IndexedSeq[W]): SurfaceFeatureAnchoring[W] = new SurfaceFeatureAnchoring[W] {
val anchs = feats.map(_.anchor(w)).toArray
def words: IndexedSeq[W] = w
def featuresForSpan(beg: Int, end: Int): Array[Feature] = anchs.flatMap(_.featuresForSpan(beg, end))
}
}
| maxim-rabinovich/epic | src/main/scala/epic/features/MultiSurfaceFeaturizer.scala | Scala | apache-2.0 | 580 |
import actors._
import com.google.inject.AbstractModule
import play.api.libs.concurrent.AkkaGuiceSupport
class Module extends AbstractModule with AkkaGuiceSupport {
override def configure(): Unit = {
bindActor[StocksActor]("stocksActor")
bindActor[UserParentActor]("userParentActor")
bindActorFactory[UserActor, UserActor.Factory]
}
} | BijanVan/play-scala-websocket-demo | app/Module.scala | Scala | mit | 351 |
import scala.collection.mutable.HashMap
object AssertionError extends AnyRef with App
{
abstract class A {}
object A1 extends A {}
object A2 extends A {}
class Manager
{
final class B {}
val map = new HashMap[A, B]
}
def test[T](f: => T): Unit = { f }
test {
val manager = new Manager
// This line is illegal and causes a compiler crash with Scala 2.3.1
assert(manager.map(A2) == List(manager.map(A2, A1)))
}
}
| lampepfl/dotty | tests/untried/neg/t876.scala | Scala | apache-2.0 | 497 |
package dazzle.waffle.adapter
import java.io.InputStream
import java.nio.file.Path
import scala.util.Try
trait Adapter {
/**
* Reads the content of the file
*
* @param key key
* @return input stream
*/
def read(key: String): Try[InputStream]
/**
* Writes the given content into the file
*
* @param key key
* @param content an instance of path
*/
def write(key: String, content: Path): Try[Long]
/**
* Writes the given content into the file
*
* @param key key
* @param content an instance of input stream
*/
def write(key: String, content: InputStream, length: Long): Try[Long]
/**
* Deletes the file
*
* @param key key
* @return boolean
*/
def delete(key: String): Try[Unit]
/**
* Moves the file
*
* @param sourceKey source file path
* @param targetKey target file path
* @return boolean
*/
def move(sourceKey: String, targetKey: String): Try[Unit]
/**
* Gets last modofied time
*
* @param key key
* @return mills
*/
def mtime(key: String): Try[Long]
/**
* Indicates whether the file exists
*
* @param key file path
* @return boolean
*/
def exists(key: String): Boolean
}
| dazzle-lab/waffle | src/main/scala/dazzle/waffle/adapter/Adapter.scala | Scala | mit | 1,216 |
import play.Project._
import sbt._
import Keys._
import play.Project._
import scala.Some
import scala.Some
object ApplicationBuild extends Build {
val appName = fromEnv("project.artifactId").getOrElse("play-maven-test")
val appVersion = fromEnv("project.version").getOrElse("1.0-SNAPSHOT")
val appDependencies = Seq.empty
val main = play.Project(appName, appVersion, appDependencies).settings(defaultJavaSettings:_*).settings(
// Source folders
// sourceDirectory in Compile <<= baseDirectory / "src/main/java",
// sourceDirectory in Test <<= baseDirectory / "src/test/java",
//
// scalaSource in Compile <<= baseDirectory / "src/main/scala",
// scalaSource in Test <<= baseDirectory / "src/test/scala",
//
// javaSource in Compile <<= baseDirectory / "src/main/java",
// javaSource in Test <<= baseDirectory / "src/test/java",
//
// confDirectory <<= baseDirectory / "src/main/conf",
// resourceDirectory in Compile <<= baseDirectory / "src/main/conf",
// resolvers ++= Seq(
// Resolver.url("sbt-plugin-releases", new URL("http://repo.scala-sbt.org/scalasbt/sbt-plugin-releases"))(Resolver.ivyStylePatterns))
)
private def fromEnv(name: String) = System.getenv(name) match {
case null => None
case value => Some(value)
}
} | cpcundill/nanoko-maven-play2-plugin-test | project/Build.scala | Scala | apache-2.0 | 1,286 |
package com.atomist.rug.runtime.lang.js
import javax.script.ScriptEngineManager
import jdk.nashorn.api.scripting.{JSObject, ScriptObjectMirror}
import org.scalatest.{FlatSpec, Matchers}
object NashornConstructorTest {
val SimpleJavascriptEditor: String =
"""
|"use strict";
|var RugOperation_1 = require('@atomist/rug/operations/RugOperation');
|var SimpleEditor = (function () {
| function SimpleEditor() {
| this.__kind = "editor"
| this.__name = "Simple";
| this.__description = "My simple editor";
| this.__parameters = [{"name": "content", "description": "desc", "pattern": "@any"}]
| }
| SimpleEditor.prototype.edit = function (project) {
| project.addFile("src/from/typescript", "Anders Hjelsberg is God");
| };
| return SimpleEditor;
|}());
|var editor = new SimpleEditor();
|exports.editor = editor;
|""".stripMargin
}
class NashornConstructorTest extends FlatSpec with Matchers {
private val engine = new ScriptEngineManager(null).getEngineByName("nashorn")
it should "inject a constructor param" in {
val withConstructor =
"""var ConstructedEditor = (function () {
| function ConstructedEditor(eng) {
| //print("cons:" + eng);
| this._eng = eng;
| }
| ConstructedEditor.prototype.edit = function () {
| //print("blah:" + this._eng);
| return this._eng.split(".");
| };
| return ConstructedEditor;
|}());
|
|""".stripMargin
engine.eval(withConstructor)
val eObj = engine.eval("ConstructedEditor").asInstanceOf[JSObject]
val newEditor = eObj.newObject("blah")
engine.put("X", newEditor)
engine.get("X").asInstanceOf[ScriptObjectMirror].callMember("edit")
}
}
| atomist/rug | src/test/scala/com/atomist/rug/runtime/lang/js/NashornConstructorTest.scala | Scala | gpl-3.0 | 1,908 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.util.concurrent.{CountDownLatch, TimeUnit}
import scala.util.Random
import com.google.common.util.concurrent.Uninterruptibles
import org.apache.spark.SparkFunSuite
class UninterruptibleThreadSuite extends SparkFunSuite {
/** Sleep millis and return true if it's interrupted */
private def sleep(millis: Long): Boolean = {
try {
Thread.sleep(millis)
false
} catch {
case _: InterruptedException =>
true
}
}
test("interrupt when runUninterruptibly is running") {
val enterRunUninterruptibly = new CountDownLatch(1)
@volatile var hasInterruptedException = false
@volatile var interruptStatusBeforeExit = false
val t = new UninterruptibleThread("test") {
override def run(): Unit = {
runUninterruptibly {
enterRunUninterruptibly.countDown()
hasInterruptedException = sleep(1000)
}
interruptStatusBeforeExit = Thread.interrupted()
}
}
t.start()
assert(enterRunUninterruptibly.await(10, TimeUnit.SECONDS), "await timeout")
t.interrupt()
t.join()
assert(hasInterruptedException === false)
assert(interruptStatusBeforeExit === true)
}
test("interrupt before runUninterruptibly runs") {
val interruptLatch = new CountDownLatch(1)
@volatile var hasInterruptedException = false
@volatile var interruptStatusBeforeExit = false
val t = new UninterruptibleThread("test") {
override def run(): Unit = {
Uninterruptibles.awaitUninterruptibly(interruptLatch, 10, TimeUnit.SECONDS)
try {
runUninterruptibly {
assert(false, "Should not reach here")
}
} catch {
case _: InterruptedException => hasInterruptedException = true
}
interruptStatusBeforeExit = Thread.interrupted()
}
}
t.start()
t.interrupt()
interruptLatch.countDown()
t.join()
assert(hasInterruptedException === true)
assert(interruptStatusBeforeExit === false)
}
test("nested runUninterruptibly") {
val enterRunUninterruptibly = new CountDownLatch(1)
val interruptLatch = new CountDownLatch(1)
@volatile var hasInterruptedException = false
@volatile var interruptStatusBeforeExit = false
val t = new UninterruptibleThread("test") {
override def run(): Unit = {
runUninterruptibly {
enterRunUninterruptibly.countDown()
Uninterruptibles.awaitUninterruptibly(interruptLatch, 10, TimeUnit.SECONDS)
hasInterruptedException = sleep(1)
runUninterruptibly {
if (sleep(1)) {
hasInterruptedException = true
}
}
if (sleep(1)) {
hasInterruptedException = true
}
}
interruptStatusBeforeExit = Thread.interrupted()
}
}
t.start()
assert(enterRunUninterruptibly.await(10, TimeUnit.SECONDS), "await timeout")
t.interrupt()
interruptLatch.countDown()
t.join()
assert(hasInterruptedException === false)
assert(interruptStatusBeforeExit === true)
}
test("stress test") {
@volatile var hasInterruptedException = false
val t = new UninterruptibleThread("test") {
override def run(): Unit = {
for (i <- 0 until 100) {
try {
runUninterruptibly {
if (sleep(Random.nextInt(10))) {
hasInterruptedException = true
}
runUninterruptibly {
if (sleep(Random.nextInt(10))) {
hasInterruptedException = true
}
}
if (sleep(Random.nextInt(10))) {
hasInterruptedException = true
}
}
Uninterruptibles.sleepUninterruptibly(Random.nextInt(10), TimeUnit.MILLISECONDS)
// 50% chance to clear the interrupted status
if (Random.nextBoolean()) {
Thread.interrupted()
}
} catch {
case _: InterruptedException =>
// The first runUninterruptibly may throw InterruptedException if the interrupt status
// is set before running `f`.
}
}
}
}
t.start()
for (i <- 0 until 400) {
Thread.sleep(Random.nextInt(10))
t.interrupt()
}
t.join()
assert(hasInterruptedException === false)
}
}
| bOOm-X/spark | core/src/test/scala/org/apache/spark/util/UninterruptibleThreadSuite.scala | Scala | apache-2.0 | 5,227 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.clustering
import java.util.Random
import scala.annotation.tailrec
import scala.collection.mutable
import org.apache.spark.annotation.Since
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.mllib.linalg.{BLAS, Vector, Vectors}
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
/**
* A bisecting k-means algorithm based on the paper "A comparison of document clustering techniques"
* by Steinbach, Karypis, and Kumar, with modification to fit Spark.
* The algorithm starts from a single cluster that contains all points.
* Iteratively it finds divisible clusters on the bottom level and bisects each of them using
* k-means, until there are `k` leaf clusters in total or no leaf clusters are divisible.
* The bisecting steps of clusters on the same level are grouped together to increase parallelism.
* If bisecting all divisible clusters on the bottom level would result more than `k` leaf clusters,
* larger clusters get higher priority.
*
* @param k the desired number of leaf clusters (default: 4). The actual number could be smaller if
* there are no divisible leaf clusters.
* @param maxIterations the max number of k-means iterations to split clusters (default: 20)
* @param minDivisibleClusterSize the minimum number of points (if greater than or equal 1.0) or
* the minimum proportion of points (if less than 1.0) of a divisible
* cluster (default: 1)
* @param seed a random seed (default: hash value of the class name)
*
* @see <a href="http://glaros.dtc.umn.edu/gkhome/fetch/papers/docclusterKDDTMW00.pdf">
* Steinbach, Karypis, and Kumar, A comparison of document clustering techniques,
* KDD Workshop on Text Mining, 2000.</a>
*/
@Since("1.6.0")
class BisectingKMeans private (
private var k: Int,
private var maxIterations: Int,
private var minDivisibleClusterSize: Double,
private var seed: Long) extends Logging {
import BisectingKMeans._
/**
* Constructs with the default configuration
*/
@Since("1.6.0")
def this() = this(4, 20, 1.0, classOf[BisectingKMeans].getName.##)
/**
* Sets the desired number of leaf clusters (default: 4).
* The actual number could be smaller if there are no divisible leaf clusters.
*/
@Since("1.6.0")
def setK(k: Int): this.type = {
require(k > 0, s"k must be positive but got $k.")
this.k = k
this
}
/**
* Gets the desired number of leaf clusters.
*/
@Since("1.6.0")
def getK: Int = this.k
/**
* Sets the max number of k-means iterations to split clusters (default: 20).
*/
@Since("1.6.0")
def setMaxIterations(maxIterations: Int): this.type = {
require(maxIterations > 0, s"maxIterations must be positive but got $maxIterations.")
this.maxIterations = maxIterations
this
}
/**
* Gets the max number of k-means iterations to split clusters.
*/
@Since("1.6.0")
def getMaxIterations: Int = this.maxIterations
/**
* Sets the minimum number of points (if greater than or equal to `1.0`) or the minimum proportion
* of points (if less than `1.0`) of a divisible cluster (default: 1).
*/
@Since("1.6.0")
def setMinDivisibleClusterSize(minDivisibleClusterSize: Double): this.type = {
require(minDivisibleClusterSize > 0.0,
s"minDivisibleClusterSize must be positive but got $minDivisibleClusterSize.")
this.minDivisibleClusterSize = minDivisibleClusterSize
this
}
/**
* Gets the minimum number of points (if greater than or equal to `1.0`) or the minimum proportion
* of points (if less than `1.0`) of a divisible cluster.
*/
@Since("1.6.0")
def getMinDivisibleClusterSize: Double = minDivisibleClusterSize
/**
* Sets the random seed (default: hash value of the class name).
*/
@Since("1.6.0")
def setSeed(seed: Long): this.type = {
this.seed = seed
this
}
/**
* Gets the random seed.
*/
@Since("1.6.0")
def getSeed: Long = this.seed
/**
* Runs the bisecting k-means algorithm.
* @param input RDD of vectors
* @return model for the bisecting kmeans
*/
@Since("1.6.0")
def run(input: RDD[Vector]): BisectingKMeansModel = {
if (input.getStorageLevel == StorageLevel.NONE) {
logWarning(s"The input RDD ${input.id} is not directly cached, which may hurt performance if"
+ " its parent RDDs are also not cached.")
}
val d = input.map(_.size).first()
logInfo(s"Feature dimension: $d.")
// Compute and cache vector norms for fast distance computation.
val norms = input.map(v => Vectors.norm(v, 2.0)).persist(StorageLevel.MEMORY_AND_DISK)
val vectors = input.zip(norms).map { case (x, norm) => new VectorWithNorm(x, norm) }
var assignments = vectors.map(v => (ROOT_INDEX, v))
var activeClusters = summarize(d, assignments)
val rootSummary = activeClusters(ROOT_INDEX)
val n = rootSummary.size
logInfo(s"Number of points: $n.")
logInfo(s"Initial cost: ${rootSummary.cost}.")
val minSize = if (minDivisibleClusterSize >= 1.0) {
math.ceil(minDivisibleClusterSize).toLong
} else {
math.ceil(minDivisibleClusterSize * n).toLong
}
logInfo(s"The minimum number of points of a divisible cluster is $minSize.")
var inactiveClusters = mutable.Seq.empty[(Long, ClusterSummary)]
val random = new Random(seed)
var numLeafClustersNeeded = k - 1
var level = 1
var preIndices: RDD[Long] = null
var indices: RDD[Long] = null
while (activeClusters.nonEmpty && numLeafClustersNeeded > 0 && level < LEVEL_LIMIT) {
// Divisible clusters are sufficiently large and have non-trivial cost.
var divisibleClusters = activeClusters.filter { case (_, summary) =>
(summary.size >= minSize) && (summary.cost > MLUtils.EPSILON * summary.size)
}
// If we don't need all divisible clusters, take the larger ones.
if (divisibleClusters.size > numLeafClustersNeeded) {
divisibleClusters = divisibleClusters.toSeq.sortBy { case (_, summary) =>
-summary.size
}.take(numLeafClustersNeeded)
.toMap
}
if (divisibleClusters.nonEmpty) {
val divisibleIndices = divisibleClusters.keys.toSet
logInfo(s"Dividing ${divisibleIndices.size} clusters on level $level.")
var newClusterCenters = divisibleClusters.flatMap { case (index, summary) =>
val (left, right) = splitCenter(summary.center, random)
Iterator((leftChildIndex(index), left), (rightChildIndex(index), right))
}.map(identity) // workaround for a Scala bug (SI-7005) that produces a not serializable map
var newClusters: Map[Long, ClusterSummary] = null
var newAssignments: RDD[(Long, VectorWithNorm)] = null
for (iter <- 0 until maxIterations) {
newAssignments = updateAssignments(assignments, divisibleIndices, newClusterCenters)
.filter { case (index, _) =>
divisibleIndices.contains(parentIndex(index))
}
newClusters = summarize(d, newAssignments)
newClusterCenters = newClusters.mapValues(_.center).map(identity)
}
if (preIndices != null) {
preIndices.unpersist(false)
}
preIndices = indices
indices = updateAssignments(assignments, divisibleIndices, newClusterCenters).keys
.persist(StorageLevel.MEMORY_AND_DISK)
assignments = indices.zip(vectors)
inactiveClusters ++= activeClusters
activeClusters = newClusters
numLeafClustersNeeded -= divisibleClusters.size
} else {
logInfo(s"None active and divisible clusters left on level $level. Stop iterations.")
inactiveClusters ++= activeClusters
activeClusters = Map.empty
}
level += 1
}
if (preIndices != null) {
preIndices.unpersist(false)
}
if (indices != null) {
indices.unpersist(false)
}
norms.unpersist(false)
val clusters = activeClusters ++ inactiveClusters
val root = buildTree(clusters)
new BisectingKMeansModel(root)
}
/**
* Java-friendly version of `run()`.
*/
def run(data: JavaRDD[Vector]): BisectingKMeansModel = run(data.rdd)
}
private object BisectingKMeans extends Serializable {
/** The index of the root node of a tree. */
private val ROOT_INDEX: Long = 1
private val MAX_DIVISIBLE_CLUSTER_INDEX: Long = Long.MaxValue / 2
private val LEVEL_LIMIT = math.log10(Long.MaxValue) / math.log10(2)
/** Returns the left child index of the given node index. */
private def leftChildIndex(index: Long): Long = {
require(index <= MAX_DIVISIBLE_CLUSTER_INDEX, s"Child index out of bound: 2 * $index.")
2 * index
}
/** Returns the right child index of the given node index. */
private def rightChildIndex(index: Long): Long = {
require(index <= MAX_DIVISIBLE_CLUSTER_INDEX, s"Child index out of bound: 2 * $index + 1.")
2 * index + 1
}
/** Returns the parent index of the given node index, or 0 if the input is 1 (root). */
private def parentIndex(index: Long): Long = {
index / 2
}
/**
* Summarizes data by each cluster as Map.
* @param d feature dimension
* @param assignments pairs of point and its cluster index
* @return a map from cluster indices to corresponding cluster summaries
*/
private def summarize(
d: Int,
assignments: RDD[(Long, VectorWithNorm)]): Map[Long, ClusterSummary] = {
assignments.aggregateByKey(new ClusterSummaryAggregator(d))(
seqOp = (agg, v) => agg.add(v),
combOp = (agg1, agg2) => agg1.merge(agg2)
).mapValues(_.summary)
.collect().toMap
}
/**
* Cluster summary aggregator.
* @param d feature dimension
*/
private class ClusterSummaryAggregator(val d: Int) extends Serializable {
private var n: Long = 0L
private val sum: Vector = Vectors.zeros(d)
private var sumSq: Double = 0.0
/** Adds a point. */
def add(v: VectorWithNorm): this.type = {
n += 1L
// TODO: use a numerically stable approach to estimate cost
sumSq += v.norm * v.norm
BLAS.axpy(1.0, v.vector, sum)
this
}
/** Merges another aggregator. */
def merge(other: ClusterSummaryAggregator): this.type = {
n += other.n
sumSq += other.sumSq
BLAS.axpy(1.0, other.sum, sum)
this
}
/** Returns the summary. */
def summary: ClusterSummary = {
val mean = sum.copy
if (n > 0L) {
BLAS.scal(1.0 / n, mean)
}
val center = new VectorWithNorm(mean)
val cost = math.max(sumSq - n * center.norm * center.norm, 0.0)
new ClusterSummary(n, center, cost)
}
}
/**
* Bisects a cluster center.
*
* @param center current cluster center
* @param random a random number generator
* @return initial centers
*/
private def splitCenter(
center: VectorWithNorm,
random: Random): (VectorWithNorm, VectorWithNorm) = {
val d = center.vector.size
val norm = center.norm
val level = 1e-4 * norm
val noise = Vectors.dense(Array.fill(d)(random.nextDouble()))
val left = center.vector.copy
BLAS.axpy(-level, noise, left)
val right = center.vector.copy
BLAS.axpy(level, noise, right)
(new VectorWithNorm(left), new VectorWithNorm(right))
}
/**
* Updates assignments.
* @param assignments current assignments
* @param divisibleIndices divisible cluster indices
* @param newClusterCenters new cluster centers
* @return new assignments
*/
private def updateAssignments(
assignments: RDD[(Long, VectorWithNorm)],
divisibleIndices: Set[Long],
newClusterCenters: Map[Long, VectorWithNorm]): RDD[(Long, VectorWithNorm)] = {
assignments.map { case (index, v) =>
if (divisibleIndices.contains(index)) {
val children = Seq(leftChildIndex(index), rightChildIndex(index))
val newClusterChildren = children.filter(newClusterCenters.contains(_))
if (newClusterChildren.nonEmpty) {
val selected = newClusterChildren.minBy { child =>
EuclideanDistanceMeasure.fastSquaredDistance(newClusterCenters(child), v)
}
(selected, v)
} else {
(index, v)
}
} else {
(index, v)
}
}
}
/**
* Builds a clustering tree by re-indexing internal and leaf clusters.
* @param clusters a map from cluster indices to corresponding cluster summaries
* @return the root node of the clustering tree
*/
private def buildTree(clusters: Map[Long, ClusterSummary]): ClusteringTreeNode = {
var leafIndex = 0
var internalIndex = -1
/**
* Builds a subtree from this given node index.
*/
def buildSubTree(rawIndex: Long): ClusteringTreeNode = {
val cluster = clusters(rawIndex)
val size = cluster.size
val center = cluster.center
val cost = cluster.cost
val isInternal = clusters.contains(leftChildIndex(rawIndex))
if (isInternal) {
val index = internalIndex
internalIndex -= 1
val leftIndex = leftChildIndex(rawIndex)
val rightIndex = rightChildIndex(rawIndex)
val indexes = Seq(leftIndex, rightIndex).filter(clusters.contains(_))
val height = math.sqrt(indexes.map { childIndex =>
EuclideanDistanceMeasure.fastSquaredDistance(center, clusters(childIndex).center)
}.max)
val children = indexes.map(buildSubTree(_)).toArray
new ClusteringTreeNode(index, size, center, cost, height, children)
} else {
val index = leafIndex
leafIndex += 1
val height = 0.0
new ClusteringTreeNode(index, size, center, cost, height, Array.empty)
}
}
buildSubTree(ROOT_INDEX)
}
/**
* Summary of a cluster.
*
* @param size the number of points within this cluster
* @param center the center of the points within this cluster
* @param cost the sum of squared distances to the center
*/
private case class ClusterSummary(size: Long, center: VectorWithNorm, cost: Double)
}
/**
* Represents a node in a clustering tree.
*
* @param index node index, negative for internal nodes and non-negative for leaf nodes
* @param size size of the cluster
* @param centerWithNorm cluster center with norm
* @param cost cost of the cluster, i.e., the sum of squared distances to the center
* @param height height of the node in the dendrogram. Currently this is defined as the max distance
* from the center to the centers of the children's, but subject to change.
* @param children children nodes
*/
@Since("1.6.0")
private[clustering] class ClusteringTreeNode private[clustering] (
val index: Int,
val size: Long,
private[clustering] val centerWithNorm: VectorWithNorm,
val cost: Double,
val height: Double,
val children: Array[ClusteringTreeNode]) extends Serializable {
/** Whether this is a leaf node. */
val isLeaf: Boolean = children.isEmpty
require((isLeaf && index >= 0) || (!isLeaf && index < 0))
/** Cluster center. */
def center: Vector = centerWithNorm.vector
/** Predicts the leaf cluster node index that the input point belongs to. */
def predict(point: Vector): Int = {
val (index, _) = predict(new VectorWithNorm(point))
index
}
/** Returns the full prediction path from root to leaf. */
def predictPath(point: Vector): Array[ClusteringTreeNode] = {
predictPath(new VectorWithNorm(point)).toArray
}
/** Returns the full prediction path from root to leaf. */
private def predictPath(pointWithNorm: VectorWithNorm): List[ClusteringTreeNode] = {
if (isLeaf) {
this :: Nil
} else {
val selected = children.minBy { child =>
EuclideanDistanceMeasure.fastSquaredDistance(child.centerWithNorm, pointWithNorm)
}
selected :: selected.predictPath(pointWithNorm)
}
}
/**
* Computes the cost (squared distance to the predicted leaf cluster center) of the input point.
*/
def computeCost(point: Vector): Double = {
val (_, cost) = predict(new VectorWithNorm(point))
cost
}
/**
* Predicts the cluster index and the cost of the input point.
*/
private def predict(pointWithNorm: VectorWithNorm): (Int, Double) = {
predict(pointWithNorm,
EuclideanDistanceMeasure.fastSquaredDistance(centerWithNorm, pointWithNorm))
}
/**
* Predicts the cluster index and the cost of the input point.
* @param pointWithNorm input point
* @param cost the cost to the current center
* @return (predicted leaf cluster index, cost)
*/
@tailrec
private def predict(pointWithNorm: VectorWithNorm, cost: Double): (Int, Double) = {
if (isLeaf) {
(index, cost)
} else {
val (selectedChild, minCost) = children.map { child =>
(child, EuclideanDistanceMeasure.fastSquaredDistance(child.centerWithNorm, pointWithNorm))
}.minBy(_._2)
selectedChild.predict(pointWithNorm, minCost)
}
}
/**
* Returns all leaf nodes from this node.
*/
def leafNodes: Array[ClusteringTreeNode] = {
if (isLeaf) {
Array(this)
} else {
children.flatMap(_.leafNodes)
}
}
}
| esi-mineset/spark | mllib/src/main/scala/org/apache/spark/mllib/clustering/BisectingKMeans.scala | Scala | apache-2.0 | 18,165 |
play {
val leastChange = LeastChange.ar(a = 394.84225, b = 327.47897)
val gbmanL = GbmanL.ar(freq = Seq(394.84225, 394.85), xi = 742.4219, yi = -2726.2134)
val fBSineL = FBSineL.ar(freq = Seq(859.40076, 859.41), im = 742.4219, fb = -0.0029116, a = 327.47897, c = 0.26494086, xi = 327.47897, yi = -0.46837935)
val lo = Pulse.ar(freq = Seq(3988.951, 3988.9), width = 327.47897)
// TIRand.ar(lo = lo, hi = 636.34235, trig = -0.0032175202)
val levelScale = FreeVerb2.ar(inL = 327.47897, inR = 20.02571, mix = -0.4713431, room = 1531.113, damp = fBSineL)
import Curve._
val envGen = EnvGen.ar(envelope = Env(0.0,Vector(
Env.Segment(TIRand.ar(Pulse.ar(3988.951,327.47897),636.34235,-0.0032175202), 327.47897, parametric(-4.0f)),
Env.Segment(3988.951, 0.0, parametric(-4.0f))),1.0,-99.0),
gate = 3988.951, levelScale = levelScale, levelBias = 859.40076, timeScale = 9.444879E-4, doneAction = doNothing)
val slope = Slope.ar(1.2822516)
val lag3 = Lag3.ar(636.34235, time = fBSineL)
val bRF = BRF.ar(Seq.fill(2)(636.34235), freq = -0.0029116, rq = -49.179382)
val mix_0 = Mix(Seq[GE](bRF, lag3, slope, envGen, gbmanL, leastChange))
val mix_1 = mix_0 // Mix.mono(mix_0)
val bad = CheckBadValues.ar(mix_1, id = 0.0, post = 0.0)
val gate_0 = Gate.ar(mix_1, gate = bad sig_== 0.0)
val lim = LeakDC.ar(Limiter.ar(LeakDC.ar(gate_0, coeff = 0.995), level = 1.0, dur = 0.01), coeff = 0.995)
// val fade = DelayN.ar(FadeIn(audio, "fade-in"), maxDelayTime = 0.02, delayTime = 0.02) * FadeOut(audio, "fade-out") * 1.0 - Attribute(control, "mute", 0.0) * Attribute(control, "gain", 1.0)
// ScanOut("out", lim * fade)
lim // Pan2.ar(lim)
}
| Sciss/Grenzwerte | individual_sounds/T1-L14-542-765.scala | Scala | gpl-3.0 | 1,701 |
package com.wixpress.guineapig.web
import com.wixpress.guineapig.drivers.SpecificationWithEnvSupport
import org.specs2.specification.Scope
class MetaDataControllerIT extends SpecificationWithEnvSupport {
trait Context extends Scope
"MetaData controller" should {
"fetch languages" in new Context {
httpDriver.get("http://localhost:9901/v1/languages").getBodyRaw must
(contain("en") and contain("English") and contain("de") and contain("German"))
}
}
}
| wix/petri | guineapig-webapp-os/src/it/java/com/wixpress/guineapig/web/MetaDataControllerIT.scala | Scala | bsd-3-clause | 487 |
/*
* Copyright 2016 Coursera Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.coursera.naptime.ari.fetcher
import javax.inject.Inject
import com.typesafe.scalalogging.StrictLogging
import org.coursera.naptime.NaptimeActionException
import org.coursera.naptime.actions.RestAction
import org.coursera.naptime.ari.FetcherApi
import org.coursera.naptime.ari.FetcherError
import org.coursera.naptime.ari.Request
import org.coursera.naptime.router2.NaptimeRoutes
import play.api.libs.json.JsArray
import play.api.libs.json.JsBoolean
import play.api.libs.json.JsNull
import play.api.libs.json.JsNumber
import play.api.libs.json.JsObject
import play.api.libs.json.JsString
import play.api.libs.json.JsValue
import play.api.libs.json.Json
import play.api.mvc.AnyContentAsEmpty
import play.api.mvc.Headers
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
/**
* Executes data requests against local Naptime resources (requires the use of Engine2 engines).
*
* @param naptimeRoutes The routing data structures required for handling requests.
*/
class LocalFetcher @Inject()(naptimeRoutes: NaptimeRoutes) extends FetcherApi with StrictLogging {
private[this] val schemas = naptimeRoutes.routerBuilders.map(_.schema)
private[this] val models = naptimeRoutes.routerBuilders.flatMap(_.types).map(_.tuple).toMap
private[this] val routers = naptimeRoutes.buildersToRouters.map {
case (builder, router) =>
naptimeRoutes.className(builder) -> router
}
override def data(request: Request, isDebugMode: Boolean)(
implicit executionContext: ExecutionContext): Future[FetcherResponse] = {
val resourceSchemaOpt = schemas.find { resourceSchema =>
// TODO: Handle nested resources.
resourceSchema.name == request.resource.topLevelName &&
resourceSchema.version.contains(request.resource.version)
}
val queryString = request.arguments.toMap.mapValues(arg => List(stringifyArg(arg)))
val url = s"/api/${request.resource.identifier}?" +
queryString.map { case (key, value) => key + "=" + value.mkString(",") }.mkString("&")
(for {
resourceSchema <- resourceSchemaOpt
router <- routers.get(resourceSchema.className)
path = s"/${request.resource.identifier}"
fakePlayRequestTarget = request.requestHeader.target
.withUriString(request.resource.identifier)
.withQueryString(queryString)
// We need to make a Headers object that does not have have Content-Type or Content-Length,
// because Content-Type and Content-Length headers cause the handler to attempt to parse the body and fail.
// The request.requestHeader.headers is of type play.core.server.akkahttp.AkkaHeadersWrapper
// which always adds back the Content-Type and Content-Length of the original request
// so we have to make our own clean play.api.mvc.Headers headers.
fakePlayRequestHeaders = Headers(request.requestHeader.headers.headers: _*)
.remove("Content-Type", "Content-Length")
fakePlayRequest = request.requestHeader
.withMethod("GET")
.withTarget(fakePlayRequestTarget)
.withHeaders(fakePlayRequestHeaders)
.withBody(())
// TODO: handle header filtering more properly
handler <- router.routeRequest(path, fakePlayRequest)
} yield {
logger.info(
s"Making local request to ${request.resource.identifier} / ${fakePlayRequest.queryString}")
val taggedRequest = handler.tagRequest(fakePlayRequest)
handler match {
case naptimeAction: RestAction[_, _, _, _, _, _] =>
naptimeAction
.localRun(fakePlayRequest, request.resource)
.map(response => Right(response.copy(url = Some(url))))
.recoverWith {
case actionException: NaptimeActionException =>
Future.successful(
Left(FetcherError(actionException.httpCode, actionException.toString, Some(url))))
case e: Throwable => throw e
}
case _ =>
val msg = "Handler was not a RestAction, or Get attempted"
logger.error(msg)
Future.successful(Left(FetcherError(404, msg, Some(url))))
}
}).getOrElse {
val msg = s"Unknown resource: ${request.resource}"
logger.warn(msg)
Future.successful(Left(FetcherError(404, msg, Some(url))))
}
}
private[this] def stringifyArg(value: JsValue): String = {
value match {
case JsArray(arrayElements) =>
arrayElements.map(stringifyArg).mkString(",")
case stringValue: JsString =>
stringValue.as[String]
case number: JsNumber =>
number.toString
case boolean: JsBoolean =>
boolean.toString
case jsObject: JsObject =>
Json.stringify(jsObject)
case JsNull =>
""
}
}
}
| coursera/naptime | naptime/src/main/scala/org/coursera/naptime/ari/fetcher/LocalFetcher.scala | Scala | apache-2.0 | 5,370 |
package cz.kamenitxan.jakon.webui.functions
import java.util
import com.mitchellbosecke.pebble.extension.Function
import cz.kamenitxan.jakon.core.template.pebble.PebbleExtension
/**
* Created by tomaspavel on 6.10.16.
*/
class AdminPebbleExtension extends PebbleExtension {
override def getFunctions: util.Map[String, Function] = {
val extensions = super.getFunctions
extensions.put("getAttr", new GetAttributeFun)
extensions.put("getAttrType", new GetAttributeTypeFun)
extensions.put("i18n", new I18nFun)
extensions.put("splitMessages", new SplitMessagesFun)
extensions.put("objectExtensions", new ObjectExtensionFun)
extensions.put("getAdminControllers", new GetAdminControllers)
extensions
}
} | kamenitxan/Jakon | modules/backend/src/main/scala/cz/kamenitxan/jakon/webui/functions/AdminPebbleExtension.scala | Scala | bsd-3-clause | 720 |
package net.bhardy.braintree.scala.gw
import net.bhardy.braintree.scala.exceptions.NotFoundException
import net.bhardy.braintree.scala.util.Http
import scala.math.BigDecimal
import net.bhardy.braintree.scala._
import search.SubscriptionSearchRequest
/**
* Provides methods to interact with {@link Subscription Subscriptions}.
* Including create, find, update, cancel, etc.
* This class does not need to be instantiated directly.
* Instead, use {@link BraintreeGateway#subscription()} to get an instance of this class:
*
* <pre>
* BraintreeGateway gateway = new BraintreeGateway(...);
* gateway.subscription().create(...)
* </pre>
*
* For more detailed information on {@link Subscription Subscriptions}, see <a href="http://www.braintreepayments.com/gateway/subscription-api" target="_blank">http://www.braintreepaymentsolutions.com/gateway/subscription-api</a>
*/
class SubscriptionGateway(http: Http) {
/**
* Cancels the {@link Subscription} with the given id.
* @param id of the { @link Subscription} to cancel.
* @return a { @link Result}.
*/
def cancel(id: String): Result[Subscription] = {
val node = http.put("/subscriptions/" + id + "/cancel")
Result.subscription(node)
}
/**
* Creates a {@link Subscription}.
* @param request the request.
* @return a { @link Result}.
*/
def create(request: SubscriptionRequest): Result[Subscription] = {
val node = http.post("/subscriptions", request)
Result.subscription(node)
}
def delete(customerId: String, id: String): Result[Subscription] = {
http.delete("/subscriptions/" + id)
Result.deleted
}
/**
* Finds a {@link Subscription} by id.
* @param id the id of the { @link Subscription}.
* @return the { @link Subscription} or raises a { @link net.bhardy.braintree.scala.exceptions.NotFoundException}.
*/
def find(id: String): Subscription = {
if (id == null || (id.trim == "")) throw new NotFoundException
new Subscription(http.get("/subscriptions/" + id))
}
/**
* Updates a {@link Subscription}.
* @param id the id of the { @link Subscription}.
* @param request the request.
* @return a { @link Result}.
*/
def update(id: String, request: SubscriptionRequest): Result[Subscription] = {
val node = http.put("/subscriptions/" + id, request)
Result.subscription(node)
}
/**
* Search for a {@link Subscription}.
* @param searchRequest the { @link SubscriptionSearchRequest}.
* @return a { @link Result}.
*/
def search(searchRequest: SubscriptionSearchRequest): ResourceCollection[Subscription] = {
val node = http.post("/subscriptions/advanced_search_ids", searchRequest)
new ResourceCollection[Subscription](Pager.subscription(this, searchRequest), node)
}
private[braintree] def fetchSubscriptions(search: SubscriptionSearchRequest, ids: List[String]): List[Subscription] = {
search.ids.in(ids)
val response = http.post("/subscriptions/advanced_search", search)
response.findAll("subscription").map{new Subscription(_)}.toList
}
private def retryCharge(txnRequest: SubscriptionTransactionRequest): Result[Transaction] = {
val response = http.post("/transactions", txnRequest)
Result.transaction(response)
}
def retryCharge(subscriptionId: String): Result[Transaction] = {
retryCharge(new SubscriptionTransactionRequest().subscriptionId(subscriptionId))
}
def retryCharge(subscriptionId: String, amount: BigDecimal): Result[Transaction] = {
retryCharge(new SubscriptionTransactionRequest().subscriptionId(subscriptionId).amount(amount))
}
} | benhardy/braintree-scala | src/main/scala/gw/SubscriptionGateway.scala | Scala | mit | 3,599 |
package org.pignat.bwatnwa
trait PointListener {
def point(s:Int)
} | RandomReaper/bwatnwa | src/main/scala/org/pignat/bwatnwa/PointListener.scala | Scala | agpl-3.0 | 70 |
package org.libss.util.helpers
/**
* date: 02.06.2016 22:44
* author: Kaa
*
* Gender enumeration class
*/
object Gender extends Enumeration {
type Gender = Value
val Male = Value("male")
val Female = Value("female")
}
| kanischev/libss | libss-utils/src/main/scala/org/libss/util/helpers/Gender.scala | Scala | apache-2.0 | 235 |
package breeze
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import io.{CSVWriter, CSVReader}
import linalg.operators._
import breeze.linalg.support.CanCopy
import math.Semiring
import storage.DefaultArrayValue
import java.io.{File, FileReader}
import scala.reflect.ClassTag
/**
* This package contains everything relating to Vectors, Matrices, Tensors, etc.
*
* If you're doing basic work, you probably want [[breeze.linalg.DenseVector]] and [[breeze.linalg.DenseMatrix]],
* which support most operations. We also have [[breeze.linalg.SparseVector]]s and (basic!) support
* for a sparse matrix ([[breeze.linalg.CSCMatrix]]).
*
* This package object contains Matlab-esque functions for interacting with tensors and matrices.
*
* @author dlwh
*/
package object linalg {
/**
* Computes y += x * a, possibly doing less work than actually doing that operation
*/
def axpy[A, X, Y](a: A, x: X, y: Y)(implicit axpy: scaleAdd.InPlaceImpl3[Y, A, X]) { axpy(y, a, x) }
/**
* Generates a vector of linearly spaced values between a and b (inclusive).
* The returned vector will have length elements, defaulting to 100.
*/
def linspace(a : Double, b : Double, length : Int = 100) : DenseVector[Double] = {
val increment = (b - a) / (length - 1)
DenseVector.tabulate(length)(i => a + increment * i)
}
/**
* Copy a T. Most tensor objects have a CanCopy implicit, which is what this farms out to.
*/
def copy[T](t: T)(implicit canCopy: CanCopy[T]): T = canCopy(t)
// io stuff
/**
* Reads in a DenseMatrix from a CSV File
*/
def csvread(file: File,
separator: Char=',',
quote: Char='"',
escape: Char='\\\\',
skipLines: Int = 0): DenseMatrix[Double] = {
val input = new FileReader(file)
var mat = CSVReader.read(input, separator, quote, escape, skipLines)
mat = mat.takeWhile(line => line.length != 0 && line.head.nonEmpty) // empty lines at the end
input.close()
if(mat.length == 0) {
DenseMatrix.zeros[Double](0,0)
} else {
DenseMatrix.tabulate(mat.length,mat.head.length)((i,j)=>mat(i)(j).toDouble)
}
}
def csvwrite(file: File, mat: Matrix[Double],
separator: Char=',',
quote: Char='\\0',
escape: Char='\\\\',
skipLines: Int = 0) {
CSVWriter.writeFile(file, IndexedSeq.tabulate(mat.rows,mat.cols)(mat(_,_).toString), separator, quote, escape)
}
object RangeExtender {
val All = new Range(0, -1, 1)
}
implicit class RangeExtender(val re: Range) extends Range(re.start, re.end, re.step) {
def getRangeWithoutNegativeIndexes(totalLength: Int): Range = {
if(re.isInclusive){
val (actualStart: Int, actualEnd: Int) =
(
if ( re.start < 0 ) totalLength + re.start else re.start , //actualStart will be given as argument to inclusive range "to"
if ( re.end < 0 ) totalLength + re.end else re.end //actualEnd will be given as argument to inclusive range "to"
)
(actualStart to actualEnd by re.step)
} else if( re.end < 0 || re.start < 0) {
throw new IllegalArgumentException("cannot use negative end indexing with 'until', due to ambiguities from Range.end being exclusive")
} else {
re
}
}
}
import math.Ring
import com.github.fommil.netlib.LAPACK.{getInstance=>lapack}
/**
* Basic linear algebraic operations.
*
* @author dlwh,dramage,retronym,afwlehmann,lancelet
*/
// import breeze.linalg._
private[linalg] def requireNonEmptyMatrix[V](mat: Matrix[V]) =
if (mat.cols == 0 || mat.rows == 0)
throw new MatrixEmptyException
private[linalg] def requireSquareMatrix[V](mat: Matrix[V]) =
if (mat.rows != mat.cols)
throw new MatrixNotSquareException
private[linalg] def requireSymmetricMatrix[V](mat: Matrix[V]) = {
requireSquareMatrix(mat)
for (i <- 0 until mat.rows; j <- 0 until i)
if (mat(i,j) != mat(j,i))
throw new MatrixNotSymmetricException
}
/**
* Vector cross product of 3D vectors a and b.
*/
def cross[V1](a: DenseVector[V1], b: DenseVector[V1])(implicit ring: Ring[V1], man: ClassTag[V1]): DenseVector[V1] = {
require(a.length == 3)
require(b.length == 3)
DenseVector(
ring.-(ring.*(a(1), b(2)), ring.*(a(2), b(1))),
ring.-(ring.*(a(2), b(0)), ring.*(a(0), b(2))),
ring.-(ring.*(a(0), b(1)), ring.*(a(1), b(0)))
)
}
/**
* Returns the rank of each element in the given vector, adjusting for
* ties.
*/
def ranks[V:Ordering](x : Vector[V]): Array[Double] = {
val a = x
val as = argsort(a)
val rv = new Array[Double](as.length)
var i = 0
while (i < as.length) {
// count number of tied values at rank i
var numTiedValuesAtI = 1
while (i + numTiedValuesAtI < as.length && a(as(i + numTiedValuesAtI)) == a(as(i))) {
numTiedValuesAtI += 1
}
// set return value for next numTiedValuesAtI indexes in as
val rank = 1 + i + (numTiedValuesAtI - 1) / 2.0
var j = 0
while (j < numTiedValuesAtI) {
rv(as(i + j)) = rank
j += 1
}
i += numTiedValuesAtI
}
rv
}
/**
* The lower triangular portion of the given real quadratic matrix X. Note
* that no check will be performed regarding the symmetry of X.
*/
def lowerTriangular[T: Semiring: ClassTag:DefaultArrayValue](X: Matrix[T]): DenseMatrix[T] = {
val N = X.rows
DenseMatrix.tabulate(N, N)( (i, j) =>
if(j <= i) X(i,j)
else implicitly[Semiring[T]].zero
)
}
/**
* The upper triangular portion of the given real quadratic matrix X. Note
* that no check will be performed regarding the symmetry of X.
*/
def upperTriangular[T: Semiring: ClassTag: DefaultArrayValue](X: Matrix[T]): DenseMatrix[T] = {
val N = X.rows
DenseMatrix.tabulate(N, N)( (i, j) =>
if(j >= i) X(i,j)
else implicitly[Semiring[T]].zero
)
}
/**
* Performs a principal components analysis on the given numeric data
* matrix and returns the results as an object of class PCA.
*
* If the no covariance matrix is supplied, one obtained from the given
* data is used.
*/
def princomp(
x: DenseMatrix[Double],
covmatOpt: Option[DenseMatrix[Double]] = None
) = {
covmatOpt match {
case Some(covmat) => new PCA(x, covmat)
case None => new PCA(x, cov(x))
}
}
/**
* A generic function (based on the R function of the same name) whose
* default method centers and/or scales the columns of a numeric matrix.
*
* If ‘scale’ is ‘TRUE’ then scaling is done by dividing the (centered)
* columns of ‘x’ by their standard deviations if ‘center’ is ‘TRUE’, and
* the root mean square otherwise. If ‘scale’ is ‘FALSE’, no scaling is
* done.
*/
def scale(
x: DenseMatrix[Double],
center: Boolean = true,
scale: Boolean = false
) = {
import breeze.stats.{mean, stddev}
if (center) {
val xc = x(*,::) - mean(x, Axis._0).toDenseVector
if (scale)
xc(*,::) :/ stddev(x(::, *)).toDenseVector
else
xc
} else {
if (scale)
x(*,::) :/ columnRMS(x)
else
x
}
}
/**
* Compute the covariance matrix from the given data, centering
* if necessary. Very simple, just does the basic thing.
*/
def cov(x: DenseMatrix[Double], center: Boolean = true) = {
val xc = scale(x,center,false)
(xc.t * xc) /= xc.rows - 1.0
}
/**
* Helper function to compute the root-mean-square of the columns of a
* matrix. Feel free to make this more general.
*/
private def columnRMS(x: DenseMatrix[Double]) =
(sum(x:*x,Axis._0) / (x.rows-1.0)).map(scala.math.sqrt).toDenseVector
}
| wavelets/breeze | src/main/scala/breeze/linalg/package.scala | Scala | apache-2.0 | 8,426 |
package com.pointr.tensorflow
import java.io.File
import com.pointr.tcp.util.Logger.{debug, error, warn}
import com.pointr.tcp.util.{FileUtils, Logger}
import com.pointr.tcp.rpc.TcpParams
import com.pointr.tensorflow.GpuClient.{GpuAlternate, GpuClientInfo, GpuInfo, GpusInfo}
import com.pointr.util.TfAppConfig
import scala.collection.mutable.{ArrayBuffer => AB}
case class FailoverInfo(appConfig: TfAppConfig, gpuInfos: GpusInfo)
class GpuFailoverService(failoverInfo: FailoverInfo, tfClientFailoverCallback: TfClientFailoverCallback) extends GpuRegistrationService with GpuLogger {
val appConfig = failoverInfo.appConfig
val gpuInfos = failoverInfo.gpuInfos
val alternates = AB[GpuAlternate](gpuInfos.alternates: _*)
var alternatesUnderflow = alternates.isEmpty
private val _broken = AB[GpuInfo]()
private val newGpus = AB[GpuInfo]()
private val _gpus = AB[GpuInfo](gpuInfos.gpus:_*)
override def gpus = Seq(_gpus:_*)
override def broken = Seq(_broken:_*)
override def registerAlternate(host: String, port: Int): Option[TfClient] = {
val hostPort = s"$host:$port"
val optTfClient = if (_gpus.map(_.tfServerHostAndPort).contains(hostPort)) {
warn(s"RegisterAlternate: we already have an active gpu with $hostPort")
None
} else if (alternates.map(_.tfServerHostAndPort).contains(hostPort)) {
warn(s"RegisterAlternate: we already have an alternate gpu with $hostPort")
None
} else {
val alternate = GpuAlternate(hostPort)
if (_broken.nonEmpty) {
val toRecover = _broken.remove(0)
alternates += alternate
val gpuInfo = activate(alternate, toRecover)
val tfClient = TfClient(appConfig, gpuInfo.parseTfServerHostAndPort._1, gpuInfo.parseTfServerHostAndPort._2)
tfClientFailoverCallback.failoverClient(gpuInfo.gpuNum, tfClient)
warn(s"Successfully replaced broken GPU $toRecover with alternate GPU $hostPort")
Some(tfClient)
}
else {
alternates += alternate
warn(s"Successfully added alternate GPU $hostPort")
None
}
}
optTfClient
}
def activate(gpuAlternate: GpuAlternate, baseGpu: GpuInfo) = {
assert(alternates.contains(gpuAlternate),s"Alternates did not contain $gpuAlternate ?")
alternates -= gpuAlternate
val gpuInfo = GpuInfo(gpuAlternate, baseGpu)
_gpus += gpuInfo
gpuInfo
}
def fail(gci: GpuClientInfo, msg: String, e: Option[Exception] = None) = this.synchronized {
val gi = gci.gpuInfo
if (broken.map(_.gpuNum).contains(gi.gpuNum)) {
warn(s"Got another FAIL on the BROKEN GPU $gi - $msg ${e.map(Logger.toString(_))}")
(gci, false)
} else {
error(s"Got a BROKEN GPU $gi - $msg ${e.map(Logger.toString(_))}")
val brokenGpu = _gpus.find { g => g.gpuNum == gi.gpuNum }.head
assert(brokenGpu != null, s"Why did we not find the brokenGpu for ${gi.gpuNum} in ${
_gpus.map {
_.gpuNum
}.mkString(",")
} ?")
_gpus -= brokenGpu
if (alternates.isEmpty) {
_broken += brokenGpu
throw new IllegalStateException(s"No more alternates available. We're cooked.")
} else {
val alt = alternates.head
val newGpu = brokenGpu.copy(tfServerHostAndPort = alt.tfServerHostAndPort)
newGpus += newGpu
alternates -= alt
// alternates -= alternates.filter(_.tfServerHostAndPort != alt.tfServerHostAndPort).head
warn(s"Successfully failed over from $brokenGpu to $newGpu")
_gpus += newGpu
debug(s"gpus=${gpus.mkString(",")} alternates=${alternates.mkString(",")}")
val newClientInfo = gci.copy(gpuInfo = newGpu)
resetFiles(newGpu.gpuNum, newClientInfo)
(newClientInfo,true)
}
}
}
def resetFiles(gpuNum: Int, c: GpuClientInfo) = {
val inDirProcessing = new File(s"${c.gpuInfo.dir}/processing")
if (inDirProcessing.exists && inDirProcessing.list.nonEmpty) {
val startDir = c.gpuInfo.dir
txError(gpuNum, s"Found non-empty processing dir $inDirProcessing: will move entries to $startDir..")
inDirProcessing.list.foreach(f => FileUtils.mv(s"$inDirProcessing/$f", s"$startDir/${FileUtils.fileName(f)}"))
}
}
def checkProcessingDir(gpuNum: Int, c: GpuClient) = {
val inDirProcessing = new File(s"${c.gci.gpuInfo.dir}/processing")
if (inDirProcessing.exists && inDirProcessing.list.nonEmpty) {
// val outDir = s"/tmp/imageProcessing/$gpuNum" // /processing"
val outDir = c.gci.gpuInfo.dir // /processing"
FileUtils.mkdirs(outDir)
txError(gpuNum, s"Found non-empty processing dir $inDirProcessing: will move entries to $outDir..")
inDirProcessing.list.foreach(f => FileUtils.mv(s"$inDirProcessing/$f", s"$outDir/${FileUtils.fileName(f)}"))
}
}
}
trait TfClientFailoverCallback {
def failoverClient(gpuNum: Int, newTfClient: TfClient)
}
| OpenChaiSpark/OCspark | tf/src/main/scala/com/pointr/tensorflow/GpuFailoverService.scala | Scala | apache-2.0 | 4,924 |
package com.github.slackey.codecs.responses
import com.github.slackey.codecs.types._
case class ImOpen(
channel: Channel
)
| slackey/slackey | src/main/scala/com/github/slackey/codecs/responses/ImOpen.scala | Scala | mit | 127 |
package dk.gp.gpc
import breeze.optimize.ApproximateGradientFunction
import breeze.linalg.DenseVector
import breeze.optimize.LBFGS
import dk.gp.gpc.util.calcGPCLoglik
import breeze.linalg._
import util._
import breeze.optimize._
object gpcTrain {
def apply(gpcModel: GpcModel, maxIter: Int = 100): GpcModel = {
val diffFunc = GpcLowerboundDiffFunction(gpcModel)
val initialParams = DenseVector(gpcModel.covFuncParams.toArray :+ gpcModel.gpMean)
val optimizer = new LBFGS[DenseVector[Double]](maxIter, m = 6, tolerance = 1.0E-6)
val optIterations = optimizer.iterations(diffFunc, initialParams).toList
val newParams = optIterations.last.x
val newCovFuncParams = DenseVector(newParams.toArray.dropRight(1))
val newGPMean = newParams.toArray.last
val trainedModel = gpcModel.copy(covFuncParams = newCovFuncParams, gpMean = newGPMean)
trainedModel
}
} | danielkorzekwa/bayes-scala-gp | src/main/scala/dk/gp/gpc/gpcTrain.scala | Scala | bsd-2-clause | 897 |
/*
* Copyright ixias.net All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license
* For the full copyright and license information,
* please view the LICENSE file that was distributed with this source code.
*/
package ixias.play.api.mvc
import play.api.mvc.Results._
import play.api.mvc.{ Request, AnyContent, Result }
import play.api.libs.json.{ Reads, Writes, JsSuccess, JsError }
import ixias.util.Logger
// Helper for JSON
//~~~~~~~~~~~~~~~~~~
object JsonHelper {
// -- [ Properties ]----------------------------------------------------------
protected lazy val logger = Logger.apply
// -- [ Methods ]-------------------------------------------------------------
/**
* Build a result object as JSON response.
*/
def toJson[T](o: T)(implicit tjs: Writes[T]): Result =
Ok(play.api.libs.json.Json.toJson(o))
/**
* To bind request data to a `T` component.
*/
def bindFromRequest[T](implicit request: Request[AnyContent], rds: Reads[T]): Either[Result, T] =
request.body.asJson match {
case None => Left(BadRequest)
case Some(json) => json.validate[T] match {
case JsSuccess(v, _) => Right(v)
case JsError(errs) => {
logger.error(JsError.toJson(errs).toString())
Left(BadRequest(JsError.toJson(errs)))
}
}
}
}
| sp1rytus/ixias | framework/ixias-play-core/src/main/scala/ixias/play/api/mvc/JsonHelper.scala | Scala | mit | 1,357 |
package io.github.mandar2812.PlasmaML.helios.core
import ammonite.ops._
import io.github.mandar2812.dynaml.utils.annotation.Experimental
import io.github.mandar2812.dynaml.DynaMLPipe._
import io.github.mandar2812.dynaml.tensorflow._
import io.github.mandar2812.dynaml.models._
import io.github.mandar2812.dynaml.utils
import io.github.mandar2812.dynaml.evaluation.Performance
import io.github.mandar2812.dynaml.pipes._
import io.github.mandar2812.dynaml.tensorflow.data.{DataSet, TFDataSet}
import org.json4s._
import org.json4s.jackson.Serialization.{read => read_json, write => write_json}
import org.platanios.tensorflow.api.core.types.{IsFloatOrDouble, TF}
import org.platanios.tensorflow.api._
/**
* <h3>Probabilistic Dynamic Time Lag Model</h3>
*
* @param time_window The size of the time window in steps.
* @param modelFunction Generates a tensorflow model instance
* from hyper-parameters.
* @param model_config_func Generates model training configuration
* from hyper-parameters.
* @param hyp_params A collection of hyper-parameters.
* @param persistent_hyp_params The subset of the hyper-parameters which
* are not updated.
* @param params_to_mutable_params A one-to-one invertible mapping between
* the loss function parameters to the
* cannonical parameters "alpha" and "sigma_sq".
* @param training_data The training data collection.
* @param tf_data_handle_ops An instance of [[dtflearn.model.Ops]], describes
* how the data patterns should be loaded into a
* Tensorflow dataset handle.
* @param fitness_to_scalar A function which processes all the computed metrics
* and returns a single fitness score.
* @param validation_data An optional validation data collection.
*
* @param data_split_func An optional data pipeline which divides the
* training collection into a train and validation split.
*
* */
class PDTModel[
Pattern,
In,
IT,
ID,
IS,
T: TF: IsFloatOrDouble,
Loss: TF: IsFloatOrDouble
](val time_window: Int,
override val modelFunction: TunableTFModel.ModelFunc[
In,
Output[T],
(Output[T], Output[T]),
Loss,
IT,
ID,
IS,
Tensor[T],
DataType[T],
Shape,
(Tensor[T], Tensor[T]),
(DataType[T], DataType[T]),
(Shape, Shape)
],
val model_config_func: dtflearn.tunable_tf_model.ModelConfigFunction[
In,
Output[T]
],
override val hyp_params: Seq[String],
val persistent_hyp_params: Seq[String],
val params_to_mutable_params: Encoder[
dtflearn.tunable_tf_model.HyperParams,
dtflearn.tunable_tf_model.HyperParams
],
override protected val training_data: DataSet[Pattern],
override val tf_data_handle_ops: dtflearn.model.TFDataHandleOps[
Pattern,
(IT, Tensor[T]),
(Tensor[T], Tensor[T]),
(In, Output[T])
],
override val fitness_to_scalar: DataPipe[Seq[Tensor[Float]], Double] =
DataPipe[Seq[Tensor[Float]], Double](m =>
m.map(_.scalar.toDouble).sum / m.length),
override protected val validation_data: Option[DataSet[Pattern]] = None,
override protected val data_split_func: Option[DataPipe[Pattern, Boolean]] =
None)
extends TunableTFModel[Pattern, In, Output[T], (Output[T], Output[T]), Loss, IT, ID, IS, Tensor[
T
], DataType[T], Shape, (Tensor[T], Tensor[T]), (DataType[T], DataType[T]), (Shape, Shape)](
modelFunction,
model_config_func,
hyp_params,
training_data,
tf_data_handle_ops,
Seq(PDTModel.s0, PDTModel.c1, PDTModel.c2),
fitness_to_scalar,
validation_data,
data_split_func
) {
type Model = TFModel[
In,
Output[T],
(Output[T], Output[T]),
Loss,
IT,
ID,
IS,
Tensor[T],
DataType[T],
Shape,
(Tensor[T], Tensor[T]),
(DataType[T], DataType[T]),
(Shape, Shape)
]
val mutable_params_to_metric_functions: DataPipe[
dtflearn.tunable_tf_model.HyperParams,
Seq[
DataPipe2[(Output[T], Output[T]), Output[T], Output[Float]]
]
] =
DataPipe(
(c: Map[String, Double]) =>
Seq(
PDTModel.s0,
PDTModel.c1(
c("alpha").asInstanceOf[T],
c("sigma_sq").asInstanceOf[T],
time_window
),
PDTModel.c2(
c("alpha").asInstanceOf[T],
c("sigma_sq").asInstanceOf[T],
time_window
)
)
)
val params_to_metric_funcs: DataPipe[
dtflearn.tunable_tf_model.HyperParams,
Seq[
DataPipe2[(Output[T], Output[T]), Output[T], Output[Float]]
]
] = params_to_mutable_params > mutable_params_to_metric_functions
val metrics_to_mutable_params
: DataPipe[Seq[Tensor[Float]], dtflearn.tunable_tf_model.HyperParams] =
DataPipe((s: Seq[Tensor[Float]]) => {
val s0 = s(0).scalar.toDouble
val c1 = s(1).scalar.toDouble / s0
Map(
"alpha" -> math
.max(time_window * (1d - c1) / (c1 * (time_window - 1)), 0d),
"sigma_sq" -> s0 * (time_window - c1) / (time_window - 1)
)
})
def write_data_sets(
directory: Path,
pattern_to_str: DataPipe[Pattern, (String, String)]
): Unit = {
println("Writing training data set")
val train_features_file = directory / "train_split_features.csv"
val validation_features_file = directory / "validation_split_features.csv"
val train_targets_file = directory / "train_split_targets.csv"
val validation_targets_file = directory / "validation_split_targets.csv"
train_split.data.foreach(pattern => {
val (input_pattern, target_pattern) = pattern_to_str(pattern)
write.append(train_features_file, input_pattern)
write.append(train_targets_file, target_pattern)
})
println("Writing validation data sets")
validation_split.data.foreach(pattern => {
val (input_pattern, target_pattern) = pattern_to_str(pattern)
write.append(validation_features_file, input_pattern)
write.append(validation_targets_file, target_pattern)
})
}
def write_model_outputs(
model_instance: Option[Model],
train_config: dtflearn.model.Config[(In, Output[T])],
iden: String
): Unit = {
val handle_ops: dtflearn.model.TFDataHandleOps[
Pattern,
IT,
(Tensor[T], Tensor[T]),
In
] = dtflearn.model.tf_data_handle_ops[Pattern, IT, (Tensor[T], Tensor[T]), In](
bufferSize = tf_data_handle_ops.bufferSize,
patternToTensor = Some(tf_data_handle_ops.patternToTensor.get > tup2_1[IT, Tensor[T]])
)
val training_data_preds
: Either[(Tensor[T], Tensor[T]), DataSet[(Tensor[T], Tensor[T])]] =
model_instance.get.infer_batch(
train_split,
handle_ops
)
val intermediate_results_dir = train_config.summaryDir
training_data_preds match {
case Left(outputs) => {
timelag.utils
.write_model_outputs[T](
outputs,
intermediate_results_dir,
s"train_split_${iden}"
)
}
case Right(collection) => {
collection.data.foreach(
batch =>
timelag.utils.write_model_outputs[T](
batch,
intermediate_results_dir,
s"train_split_${iden}",
append = true
)
)
}
}
}
private def update(
p: Map[String, Double],
h: Map[String, Double],
iteration_index: Int,
config: Option[dtflearn.model.Config[(In, Output[T])]] = None,
eval_trigger: Option[Int] = None,
log_predictors: Boolean = false
): Map[String, Double] = {
//Create the training configuration based on the given hyper-parameters
val train_config = config match {
case None => modelConfigFunc(p.toMap)
case Some(config) => config
}
val stability_metrics = params_to_metric_funcs(h)
.zip(PDTModel.stability_quantities)
.map(fitness_function => {
Performance[((Output[T], Output[T]), (In, Output[T]))](
fitness_function._2,
DataPipe[
((Output[T], Output[T]), (In, Output[T])),
Output[Float]
](
c => fitness_function._1(c._1, c._2._2)
)
)
})
val eval_metrics = eval_trigger match {
case None => None
case Some(t) =>
Some(
PDTModel.stability_quantities
.zip(params_to_metric_funcs(h))
)
}
val (model_instance, updated_params): (Option[Model], Map[String, Double]) =
try {
//Train model instance
val model: Model = train_model(
p ++ h,
Some(train_config),
evaluation_metrics = eval_metrics,
eval_trigger,
evaluate_train = false
)
println("Computing PDT stability metrics.")
//Compute stability metrics s0, c1 & c2
val metrics = model.evaluate(
train_data_tf,
train_split.size,
stability_metrics,
train_config.data_processing.copy(shuffleBuffer = 0, repeat = 0),
true,
null
)
//Return updated loss function parameters alpha and sigma^2
val new_state =
(metrics_to_mutable_params > params_to_mutable_params.i)(metrics)
(Some(model), new_state)
} catch {
case e: java.lang.IllegalStateException =>
(None, h)
case e: Throwable =>
e.printStackTrace()
(None, h)
}
if (log_predictors && model_instance.isDefined) {
write_model_outputs(
model_instance,
train_config,
s"pdtit_${iteration_index}"
)
model_instance.map(_.close())
}
println("\\nUpdated Parameters: ")
pprint.pprintln(p ++ updated_params)
println()
updated_params
}
def solve(
pdt_iterations: Int,
hyper_params: TunableTFModel.HyperParams,
config: Option[dtflearn.model.Config[(In, Output[T])]] = None,
eval_trigger: Option[Int] = None,
log_predictors: Boolean = false
): Map[String, Double] = {
val (p, t) =
hyper_params.toSeq.partition(kv => persistent_hyp_params.contains(kv._1))
if (pdt_iterations > 0) (1 to pdt_iterations).foldLeft(t.toMap)((s, it) => {
val buffstr = if (it >= 10) "=" * (math.log10(it).toInt) else ""
println()
println(s"╔=═════════════════════════════════════════${buffstr}═╗")
println(s"║ PDT Alternate Optimization - Iteration: ${it} ║")
println(s"╚══════════════════════════════════════════${buffstr}=╝")
println()
update(p.toMap, s, it, config, eval_trigger, log_predictors)
})
else t.toMap
}
def build(
pdt_iterations: Int,
hyper_params: dtflearn.tunable_tf_model.HyperParams,
config: Option[dtflearn.model.Config[(In, Output[T])]] = None,
eval_trigger: Option[Int] = None,
log_predictors: Boolean = false,
pattern_to_str: Option[DataPipe[Pattern, (String, String)]] = None
) = {
val p = hyper_params.filterKeys(persistent_hyp_params.contains _)
//Train and evaluate the model on the given hyper-parameters
//Start by loading the model configuration,
//which depends only on the `persistent`
//hyper-parameters.
val train_config = config match {
case None => modelConfigFunc(p.toMap)
case Some(config) => config
}
if (pattern_to_str.isDefined)
write_data_sets(train_config.summaryDir, pattern_to_str.get)
//Run the hyper-parameter refinement procedure.
val final_config: Map[String, Double] =
solve(
pdt_iterations,
hyper_params,
Some(train_config),
eval_trigger,
log_predictors
)
val stability_metrics = params_to_metric_funcs(final_config)
.zip(PDTModel.stability_quantities)
.map(fitness_function => {
Performance[((Output[T], Output[T]), (In, Output[T]))](
fitness_function._2,
DataPipe[
((Output[T], Output[T]), (In, Output[T])),
Output[Float]
](
c => fitness_function._1(c._1, c._2._2)
)
)
})
val eval_metrics = eval_trigger match {
case None => None
case Some(t) =>
Some(
PDTModel.stability_quantities
.zip(params_to_metric_funcs(final_config))
)
}
println("\\nTraining model based on final chosen parameters")
pprint.pprintln(p.toMap ++ final_config)
println()
val model = train_model(
p.toMap ++ final_config,
Some(train_config),
evaluation_metrics = eval_metrics,
eval_trigger,
evaluate_train = false
)
if (log_predictors)
write_model_outputs(
Some(model),
train_config,
s"pdtit_${pdt_iterations + 1}"
)
(model, final_config)
}
override def energy(
hyper_params: TunableTFModel.HyperParams,
options: Map[String, String]
): Double = {
val p = hyper_params.filterKeys(persistent_hyp_params.contains _)
//Train and evaluate the model on the given hyper-parameters
//Start by loading the model configuration,
//which depends only on the `persistent`
//hyper-parameters.
val train_config = modelConfigFunc(p.toMap)
//The number of times the mutable hyper-parameters
//will be updated.
val loop_count = options.getOrElse("loops", "2").toInt
//Now compute the model fitness score.
val (fitness, comment, final_config) = try {
//Run the refinement procedure.
val (model, final_config) = build(
loop_count,
hyper_params,
eval_trigger = options.get("evalTrigger").map(_.toInt)
)
val stability_metrics = params_to_metric_funcs(final_config)
.zip(PDTModel.stability_quantities)
.map(fitness_function => {
Performance[((Output[T], Output[T]), (In, Output[T]))](
fitness_function._2,
DataPipe[
((Output[T], Output[T]), (In, Output[T])),
Output[Float]
](
c => fitness_function._1(c._1, c._2._2)
)
)
})
println("Computing Energy.")
val e = fitness_to_scalar(
model.evaluate(
validation_data_tf,
validation_split.size,
stability_metrics,
train_config.data_processing.copy(shuffleBuffer = 0, repeat = 0),
true,
null
)
)
model.close()
(e, None, final_config)
} catch {
case e: java.lang.IllegalStateException =>
(Double.PositiveInfinity, Some(e.getMessage), hyper_params)
case e: Throwable =>
e.printStackTrace()
(Double.PositiveInfinity, Some(e.getMessage), hyper_params)
}
//Append the model fitness to the hyper-parameter configuration
val hyp_config_json = write_json(
p.toMap ++ final_config ++ Map(
"energy" -> fitness,
"comment" -> comment.getOrElse("")
)
)
//Write the configuration along with its fitness into the model
//instance's summary directory
write.append(train_config.summaryDir / "state.json", hyp_config_json + "\\n")
//Return the model fitness.
fitness
}
}
object PDTModel {
final val mutable_params: Seq[String] = Seq("alpha", "sigma_sq")
val stability_quantities = Seq("s0", "c1", "c2")
def s0[T: TF: IsFloatOrDouble] =
DataPipe2[(Output[T], Output[T]), Output[T], Output[Float]](
(outputs, targets) => {
val (preds, probs) = outputs
val sq_errors = preds.subtract(targets).square
sq_errors.mean(axes = 1).castTo[Float]
}
)
def c1[T: TF: IsFloatOrDouble] =
DataPipe2[(Output[T], Output[T]), Output[T], Output[Float]](
(outputs, targets) => {
val (preds, probs) = outputs
val sq_errors = preds.subtract(targets).square
probs.multiply(sq_errors).sum(axes = 1).castTo[Float]
}
)
def c2[T: TF: IsFloatOrDouble] =
DataPipe2[(Output[T], Output[T]), Output[T], Output[Float]](
(outputs, targets) => {
val (preds, probs) = outputs
val sq_errors = preds.subtract(targets).square
val c1 = probs.multiply(sq_errors).sum(axes = 1, keepDims = true)
probs
.multiply(sq_errors.subtract(c1).square)
.sum(axes = 1)
.castTo[Float]
}
)
def c1[T: TF: IsFloatOrDouble](alpha: T, sigma_sq: T, n: Int) =
DataPipe2[(Output[T], Output[T]), Output[T], Output[Float]](
(outputs, targets) => {
val (preds, probs) = outputs
val sq_errors = preds.subtract(targets).square
val one = Tensor(1d).toOutput.castTo[T]
val two = Tensor(2d).toOutput.castTo[T]
val un_p = probs * (
tf.exp(
tf.log(one + alpha) / two - (sq_errors * alpha) / (two * sigma_sq)
)
)
//Calculate the saddle point probability
val p = un_p / un_p.sum(axes = 1, keepDims = true)
val c1 = p.multiply(sq_errors).sum(axes = 1).castTo[Float]
c1
}
)
def c2[T: TF: IsFloatOrDouble](alpha: T, sigma_sq: T, n: Int) =
DataPipe2[(Output[T], Output[T]), Output[T], Output[Float]](
(outputs, targets) => {
val (preds, probs) = outputs
val sq_errors = preds.subtract(targets).square
val one = Tensor(1d).toOutput.castTo[T]
val two = Tensor(2d).toOutput.castTo[T]
val un_p = probs * (
tf.exp(
tf.log(one + alpha) / two - (sq_errors * alpha) / (two * sigma_sq)
)
)
//Calculate the saddle point probability
val p = un_p / un_p.sum(axes = 1, keepDims = true)
val c1 = p.multiply(sq_errors).sum(axes = 1, keepDims = true)
p.multiply(sq_errors.subtract(c1).square)
.sum(axes = 1)
.castTo[Float]
}
)
def apply[
Pattern,
In,
IT,
ID,
IS,
T: TF: IsFloatOrDouble,
Loss: TF: IsFloatOrDouble
](time_window: Int,
modelFunction: TunableTFModel.ModelFunc[
In,
Output[T],
(Output[T], Output[T]),
Loss,
IT,
ID,
IS,
Tensor[T],
DataType[T],
Shape,
(Tensor[T], Tensor[T]),
(DataType[T], DataType[T]),
(Shape, Shape)
],
model_config_func: dtflearn.tunable_tf_model.ModelConfigFunction[
In,
Output[T]
],
hyp_params: Seq[String],
persistent_hyp_params: Seq[String],
params_to_mutable_params: Encoder[
dtflearn.tunable_tf_model.HyperParams,
dtflearn.tunable_tf_model.HyperParams
],
training_data: DataSet[Pattern],
tf_data_handle_ops: dtflearn.model.TFDataHandleOps[
Pattern,
(IT, Tensor[T]),
(Tensor[T], Tensor[T]),
(In, Output[T])
],
fitness_to_scalar: DataPipe[Seq[Tensor[Float]], Double] =
DataPipe[Seq[Tensor[Float]], Double](m =>
m.map(_.scalar.toDouble).sum / m.length),
validation_data: Option[DataSet[Pattern]] = None,
data_split_func: Option[DataPipe[Pattern, Boolean]] = None
) = new PDTModel[Pattern, In, IT, ID, IS, T, Loss](
time_window,
modelFunction,
model_config_func,
hyp_params,
persistent_hyp_params,
params_to_mutable_params,
training_data,
tf_data_handle_ops,
fitness_to_scalar,
validation_data,
data_split_func
)
}
| mandar2812/PlasmaML | helios/src/main/scala/io/github/mandar2812/PlasmaML/helios/PDTModel.scala | Scala | lgpl-2.1 | 19,795 |
package ghtorrent
case class GHTorrentException(message: String) extends Exception(message)
| PRioritizer/PRioritizer-analyzer | src/main/scala/ghtorrent/GHTorrentException.scala | Scala | mit | 94 |
/**
* Copyright 2016, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.utils
import spray.json.JsObject
import io.deepsense.deeplang.InnerWorkflowParser
import io.deepsense.deeplang.doperables.{CustomTransformer, ParamWithValues}
import io.deepsense.deeplang.params.custom.PublicParam
object CustomTransformerFactory {
def createCustomTransformer(
innerWorkflowParser: InnerWorkflowParser,
innerWorkflowJson: JsObject): CustomTransformer = {
val innerWorkflow = innerWorkflowParser.parse(innerWorkflowJson)
val selectedParams: Seq[ParamWithValues[_]] =
innerWorkflow.publicParams.flatMap {
case PublicParam(nodeId, paramName, publicName) =>
innerWorkflow.graph.nodes.find(_.id == nodeId)
.flatMap(node => node.value.params.find(_.name == paramName)
.map(p => {
ParamWithValues(
param = p.replicate(publicName),
defaultValue = node.value.getDefault(p),
setValue = node.value.get(p))
}))
}
CustomTransformer(innerWorkflow, selectedParams)
}
}
| deepsense-io/seahorse-workflow-executor | deeplang/src/main/scala/io/deepsense/deeplang/utils/CustomTransformerFactory.scala | Scala | apache-2.0 | 1,641 |
/*******************************************************************************
* (C) Copyright 2015 ADP, LLC.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*******************************************************************************/
package unicorn.unibase
import java.nio.ByteBuffer
import unicorn.bigtable.{Column, ColumnFamily}
import unicorn.json._
/** Document serializer. By default, document key size is up to 64KB, column size is up to 10MB.
*
* @author Haifeng Li
*/
class DocumentSerializer(
val keySerializer: BsonSerializer = new BsonSerializer(ByteBuffer.allocate(65536)),
val valueSerializer: ColumnarJsonSerializer = new ColumnarJsonSerializer(ByteBuffer.allocate(10485760))) {
/** Serialize document data. */
def serialize(json: JsObject): Seq[Column] = {
valueSerializer.serialize(json).map { case (path, value) =>
Column(valueSerializer.str2Bytes(path), value)
}.toSeq
}
/** Serialize document id. */
def serialize(tenant: JsValue, id: JsValue): Array[Byte] = {
keySerializer.clear
keySerializer.put(tenant)
keySerializer.put(id)
keySerializer.toBytes
}
/** Return the row prefix of a tenant. */
def tenantRowKeyPrefix(tenant: JsValue): Array[Byte] = {
keySerializer.clear
keySerializer.put(tenant)
keySerializer.toBytes
}
/** Deserialize document key. */
def deserialize(key: Array[Byte]): (JsValue, JsValue) = {
val buffer = ByteBuffer.wrap(key)
val tenant = keySerializer.deserialize(buffer)
val id = keySerializer.deserialize(buffer)
(tenant, id)
}
/** Assembles the document from multi-column family data. */
def deserialize(data: Seq[ColumnFamily]): Option[JsObject] = {
val objects = data.map { case ColumnFamily(family, columns) =>
val map = columns.map { case Column(qualifier, value, _) =>
(new String(qualifier, valueSerializer.charset), value.bytes)
}.toMap
val json = valueSerializer.deserialize(map)
json.asInstanceOf[JsObject]
}
if (objects.size == 0)
None
else if (objects.size == 1)
Some(objects(0))
else {
val fold = objects.foldLeft(JsObject()) { (doc, family) =>
doc.fields ++= family.fields
doc
}
Some(fold)
}
}
}
| adplabs/unicorn | unibase/src/main/scala/unicorn/unibase/DocumentSerializer.scala | Scala | apache-2.0 | 2,783 |
package nest.sparkle.util
import scala.concurrent.Promise
import org.scalatest.{Matchers, FunSuite}
import rx.lang.scala.Observable
import nest.sparkle.util.ObservableFuture._
import nest.sparkle.util.FutureAwait.Implicits._
import scala.concurrent.duration._
class TestObservableUtil extends FunSuite with Matchers {
test("headTail") {
val obs = Observable.from(Seq(1,2,3))
val (head, tail) = ObservableUtil.headTail(obs)
head.toBlocking.toList shouldBe Seq(1)
tail.toBlocking.toList shouldBe Seq(2,3)
}
test("headTail on empty observable") {
val (head,tail) = ObservableUtil.headTail(Observable.empty)
head.toBlocking.toList shouldBe Seq()
tail.toBlocking.toList shouldBe Seq()
}
test("headTail on ongoing observable") {
val obs = Observable.from(Seq(3,2)) ++ Observable.interval(20.milliseconds)
val (head,tail) = ObservableUtil.headTail(obs)
head.toBlocking.toList shouldBe Seq(3)
tail.take(4).toBlocking.toList shouldBe Seq(2,0,1,2)
}
test("reduceSafe on empty observable") {
val result = ObservableUtil.reduceSafe(Observable.empty){(a:Int,b:Int) => a}
result.toBlocking.toList shouldBe Seq()
}
test("reduceSafe") {
val obs = Observable.from(Seq(1,2,3))
val result = ObservableUtil.reduceSafe(obs){ _ + _ }
result.toBlocking.toList shouldBe Seq(6)
}
}
| mighdoll/sparkle | util-tests/src/test/java/nest/sparkle/util/TestObservableUtil.scala | Scala | apache-2.0 | 1,352 |
/**
---------------------------------------------------------------------------
Copyright (c) 2011 Dan Simpson
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
---------------------------------------------------------------------------
**/
package org.ds.satchel.processors
class JstProcessor extends SatchelProcessor {
override def process(content:String):String = {
"JST={};" + content
}
override def process(path:String, content:String):String = {
"JST[\\"" + path.replace('\\\\','/') + "\\"] = \\"" + clean(content) + "\\";";
}
private def clean(content:String) = {
content
.replaceAll("\\"", "\\\\\\\\\\"")
.replaceAll(""">\\s+<""","><")
.replaceAll("\\n","")
}
} | dansimpson/satchel | core/src/main/scala/org/ds/satchel/processors/JstProcessor.scala | Scala | mit | 1,717 |
package pl.touk.nussknacker.openapi.functional
import com.typesafe.scalalogging.LazyLogging
import org.asynchttpclient.DefaultAsyncHttpClient
import org.scalatest._
import pl.touk.nussknacker.engine.api._
import pl.touk.nussknacker.engine.api.test.EmptyInvocationCollector.Instance
import pl.touk.nussknacker.engine.api.typed.TypedMap
import pl.touk.nussknacker.engine.lite.api.runtimecontext.LiteEngineRuntimeContextPreparer
import pl.touk.nussknacker.engine.util.service.EagerServiceWithStaticParametersAndReturnType
import pl.touk.nussknacker.openapi.enrichers.{SwaggerEnricherCreator, SwaggerEnrichers}
import pl.touk.nussknacker.openapi.http.backend.FixedAsyncHttpClientBackendProvider
import pl.touk.nussknacker.openapi.parser.SwaggerParser
import pl.touk.nussknacker.openapi.{ApiKeyConfig, OpenAPIServicesConfig}
import pl.touk.nussknacker.test.PatientScalaFutures
import java.net.URL
import scala.collection.JavaConverters._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.io.Source
class OpenAPIServiceSpec extends fixture.FunSuite with BeforeAndAfterAll with Matchers with LazyLogging with PatientScalaFutures {
implicit val metaData: MetaData = MetaData("testProc", StreamMetaData())
implicit val contextId: ContextId = ContextId("testContextId")
type FixtureParam = EagerServiceWithStaticParametersAndReturnType
def withFixture(test: OneArgTest): Outcome = {
val definition = Source.fromInputStream(getClass.getClassLoader.getResourceAsStream("customer-swagger.json")).mkString
val client = new DefaultAsyncHttpClient()
try {
StubService.withCustomerService { port =>
val securities = Map("apikey" -> ApiKeyConfig("TODO"))
val config = OpenAPIServicesConfig(securities = Some(securities),
rootUrl = Some(new URL(s"http://localhost:$port")))
val services = SwaggerParser.parse(definition, config)
val enricher = new SwaggerEnrichers(Some(new URL(s"http://localhost:$port")), new SwaggerEnricherCreator(new FixedAsyncHttpClientBackendProvider(client)))
.enrichers(services, Nil, Map.empty).head.service.asInstanceOf[EagerServiceWithStaticParametersAndReturnType]
enricher.open(LiteEngineRuntimeContextPreparer.noOp
.prepare(JobData(metaData, ProcessVersion.empty)))
withFixture(test.toNoArgTest(enricher))
}
} finally {
client.close()
}
}
test("service returns customers") { service =>
val valueWithChosenFields = service.invoke(Map("customer_id" -> "10")).futureValue.asInstanceOf[TypedMap].asScala
valueWithChosenFields shouldEqual Map("name" -> "Robert Wright", "id" -> 10, "category" -> "GOLD")
}
}
| TouK/nussknacker | components/openapi/src/it/scala/pl/touk/nussknacker/openapi/functional/OpenAPIServiceSpec.scala | Scala | apache-2.0 | 2,687 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.tasks
import java.io.IOException
import scala.collection.mutable
import org.apache.carbondata.core.cache.dictionary.{Dictionary, DictionaryColumnUniqueIdentifier}
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.metadata.{CarbonTableIdentifier, ColumnIdentifier}
import org.apache.carbondata.core.metadata.schema.table.column.ColumnSchema
import org.apache.carbondata.core.service.CarbonCommonFactory
import org.apache.carbondata.core.util.DataTypeUtil
import org.apache.carbondata.core.writer.CarbonDictionaryWriter
/**
*
* @param valuesBuffer
* @param dictionary
* @param dictionaryColumnUniqueIdentifier
* @param columnSchema
* @param isDictionaryFileExist
* @param writer
*/
class DictionaryWriterTask(valuesBuffer: mutable.HashSet[String],
dictionary: Dictionary,
dictionaryColumnUniqueIdentifier: DictionaryColumnUniqueIdentifier,
columnSchema: ColumnSchema,
isDictionaryFileExist: Boolean,
var writer: CarbonDictionaryWriter = null) {
/**
* execute the task
*
* @return distinctValueList and time taken to write
*/
def execute(): java.util.List[String] = {
val values = valuesBuffer.toArray
java.util.Arrays.sort(values, Ordering[String])
val dictService = CarbonCommonFactory.getDictionaryService
writer = dictService.getDictionaryWriter(dictionaryColumnUniqueIdentifier)
val distinctValues: java.util.List[String] = new java.util.ArrayList()
try {
if (!isDictionaryFileExist) {
writer.write(CarbonCommonConstants.MEMBER_DEFAULT_VAL)
distinctValues.add(CarbonCommonConstants.MEMBER_DEFAULT_VAL)
}
if (values.length >= 1) {
if (isDictionaryFileExist) {
for (value <- values) {
val parsedValue = DataTypeUtil.normalizeColumnValueForItsDataType(value,
columnSchema)
if (null != parsedValue && dictionary.getSurrogateKey(parsedValue) ==
CarbonCommonConstants.INVALID_SURROGATE_KEY) {
writer.write(parsedValue)
distinctValues.add(parsedValue)
}
}
} else {
for (value <- values) {
val parsedValue = DataTypeUtil.normalizeColumnValueForItsDataType(value,
columnSchema)
if (null != parsedValue) {
writer.write(parsedValue)
distinctValues.add(parsedValue)
}
}
}
}
} catch {
case ex: IOException =>
throw ex
} finally {
if (null != writer) {
writer.close()
}
}
distinctValues
}
/**
* update dictionary metadata
*/
def updateMetaData() {
if (null != writer) {
writer.commit()
}
}
}
| sgururajshetty/carbondata | integration/spark-common/src/main/scala/org/apache/carbondata/spark/tasks/DictionaryWriterTask.scala | Scala | apache-2.0 | 3,596 |
package org.vitrivr.adampro.data.index.structures.va
import breeze.linalg._
import org.apache.spark.ml.feature.PCA
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{DataFrame, Dataset}
import org.vitrivr.adampro.config.AttributeNames
import org.apache.spark.ml.linalg.{DenseVector, Vectors}
import org.vitrivr.adampro.data.datatypes.vector.Vector._
import org.vitrivr.adampro.utils.exception.QueryNotConformException
import org.vitrivr.adampro.query.tracker.QueryTracker
import org.vitrivr.adampro.data.index.Index._
import org.vitrivr.adampro.data.index.structures.IndexTypes
import org.vitrivr.adampro.data.index.structures.va.marks.VAPlusMarksGenerator
import org.vitrivr.adampro.data.index.structures.va.signature.VariableSignatureGenerator
import org.vitrivr.adampro.data.index.{IndexGenerator, IndexGeneratorFactory, IndexingTaskTuple, ParameterInfo}
import org.vitrivr.adampro.process.SharedComponentContext
import org.vitrivr.adampro.query.distance.{DistanceFunction, MinkowskiDistance}
/**
* ADAMpro
*
* Ivan Giangreco
* September 2016
*
* see H. Ferhatosmanoglu, E. Tuncel, D. Agrawal, A. El Abbadi (2006): High dimensional nearest neighbor searching. Information Systems.
*/
class VAPlusIndexGenerator(totalNumOfBits: Option[Int], ndims : Option[Int], trainingSize: Int, distance: MinkowskiDistance)(@transient implicit val ac: SharedComponentContext) extends IndexGenerator {
override val indextypename: IndexTypeName = IndexTypes.VAPLUSINDEX
/**
*
* @param data raw data to index
* @return
*/
override def index(data: DataFrame, attribute : String)(tracker : QueryTracker): (DataFrame, Serializable) = {
log.trace("VA-File (plus) started indexing")
val meta = train(getSample(math.max(trainingSize, MINIMUM_NUMBER_OF_TUPLE), attribute)(data), data, attribute)
val pcaBc = ac.sc.broadcast(meta.pca)
val cellUDF = udf((c: DenseVector) => {
getCells(c.values, meta.marks).map(_.toShort)
})
val transformed = pcaBc.value.setInputCol(attribute).setOutputCol("ap_" + attribute + "pca").transform(data.withColumn(attribute, toVecUDF(data(attribute)))).drop(attribute).withColumnRenamed("ap_" + attribute + "pca", attribute)
val indexed = transformed.withColumn(AttributeNames.featureIndexColumnName, cellUDF(transformed(attribute)))
log.trace("VA-File (plus) finished indexing")
(indexed, meta)
}
/**
*
* @param array
* @return
*/
private def getMaxIndex(array: Array[Double]): Int = {
var maxIndex = -1
var max = Double.MinValue
for (index <- 0 until array.length) {
val element = array(index)
if (element > max) {
max = element
maxIndex = index
}
}
maxIndex
}
/**
*
* @param trainData training data
* @return
*/
private def train(trainData: Seq[IndexingTaskTuple], data : DataFrame, attribute : String): VAPlusIndexMetaData = {
log.trace("VA-File (plus) started training")
val dim = ndims.getOrElse(trainData.head.ap_indexable.size)
val pca = new PCA().setInputCol(attribute + "_vec").setK(dim).fit(data.withColumn(attribute + "_vec", toVecUDF(data(attribute))))
//data
val dTrainData = trainData.map(x => x.ap_indexable.map(x => x.toDouble).toArray)
val dataMatrix = DenseMatrix(dTrainData.toList: _*)
// pca
val variance = diag(cov(dataMatrix, center = true)).toArray
var k = 0
var modes = Seq.fill(dim)(0).toArray
//based on results from paper and from Weber/Böhm (2000): Trading Quality for Time with Nearest Neighbor Search
val nbits = totalNumOfBits.getOrElse(dim * math.max(5, math.ceil(5 + 0.5 * math.log(dim / 10) / math.log(2)).toInt))
while (k < nbits) {
val j = getMaxIndex(variance)
modes(j) += 1
variance(j) = variance(j) / 4.0
k += 1
}
val signatureGenerator = new VariableSignatureGenerator(modes)
val marks = VAPlusMarksGenerator.getMarks(trainData, modes.map(x => math.min(math.max(1, 2 << (x - 1)), Short.MaxValue)).toSeq)
log.trace("VA-File (variable) finished training")
new VAPlusIndexMetaData(marks, signatureGenerator, pca, dim > pca.getK)
}
val toVecUDF = udf((c: DenseSparkVector) => {
Vectors.dense(c.map(_.toDouble).toArray)
})
/**
*
*/
@inline private def getCells(f: Iterable[Double], marks: Seq[Seq[VectorBase]]): Seq[Int] = {
f.zip(marks).map {
case (x, l) =>
val index = l.toArray.indexWhere(p => p >= x, 1)
if (index == -1) l.length - 1 - 1 else index - 1
}.toSeq
}
}
class VAPlusIndexGeneratorFactory extends IndexGeneratorFactory {
/**
* @param distance distance function
* @param properties indexing properties
*/
def getIndexGenerator(distance: DistanceFunction, properties: Map[String, String] = Map[String, String]())(implicit ac: SharedComponentContext): IndexGenerator = {
if (!distance.isInstanceOf[MinkowskiDistance]) {
throw new QueryNotConformException("VAF index only supports Minkowski distance")
}
val nbits = if (properties.get("signature-nbits").isDefined) {
Some(properties.get("signature-nbits").get.toInt)
} else {
None
}
val trainingSize = properties.getOrElse("ntraining", "1000").toInt
val ndims = properties.get("ndims").map(_.toInt)
new VAPlusIndexGenerator(nbits, ndims, trainingSize, distance.asInstanceOf[MinkowskiDistance])
}
/**
*
* @return
*/
override def parametersInfo: Seq[ParameterInfo] = Seq(
new ParameterInfo("ntraining", "number of training tuples", Seq[String]()),
new ParameterInfo("signature-nbits", "number of bits for the complete signature", Seq(32, 64, 128, 256, 1024).map(_.toString)),
new ParameterInfo("ndims", "distribution of marks", Seq(64, 128, 256, 512, 1024).map(_.toString)) //TODO: this should rather be a function based on the ndims
)
} | dbisUnibas/ADAMpro | src/main/scala/org/vitrivr/adampro/data/index/structures/va/VAPlusIndexGenerator.scala | Scala | mit | 5,916 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.kafka010
import java.{util => ju}
import java.util.{Locale, UUID}
import scala.collection.JavaConverters._
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.kafka.clients.producer.ProducerConfig
import org.apache.kafka.common.serialization.{ByteArrayDeserializer, ByteArraySerializer}
import org.apache.spark.internal.Logging
import org.apache.spark.kafka010.KafkaConfigUpdater
import org.apache.spark.sql.{AnalysisException, DataFrame, SaveMode, SQLContext}
import org.apache.spark.sql.catalyst.util.CaseInsensitiveMap
import org.apache.spark.sql.execution.streaming.{Sink, Source}
import org.apache.spark.sql.sources._
import org.apache.spark.sql.sources.v2._
import org.apache.spark.sql.sources.v2.TableCapability._
import org.apache.spark.sql.sources.v2.reader.{Batch, Scan, ScanBuilder}
import org.apache.spark.sql.sources.v2.reader.streaming.{ContinuousStream, MicroBatchStream}
import org.apache.spark.sql.sources.v2.writer.{BatchWrite, WriteBuilder}
import org.apache.spark.sql.sources.v2.writer.streaming.StreamingWrite
import org.apache.spark.sql.streaming.OutputMode
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.util.CaseInsensitiveStringMap
/**
* The provider class for all Kafka readers and writers. It is designed such that it throws
* IllegalArgumentException when the Kafka Dataset is created, so that it can catch
* missing options even before the query is started.
*/
private[kafka010] class KafkaSourceProvider extends DataSourceRegister
with StreamSourceProvider
with StreamSinkProvider
with RelationProvider
with CreatableRelationProvider
with TableProvider
with Logging {
import KafkaSourceProvider._
override def shortName(): String = "kafka"
/**
* Returns the name and schema of the source. In addition, it also verifies whether the options
* are correct and sufficient to create the [[KafkaSource]] when the query is started.
*/
override def sourceSchema(
sqlContext: SQLContext,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): (String, StructType) = {
validateStreamOptions(parameters)
require(schema.isEmpty, "Kafka source has a fixed schema and cannot be set with a custom one")
(shortName(), KafkaOffsetReader.kafkaSchema)
}
override def createSource(
sqlContext: SQLContext,
metadataPath: String,
schema: Option[StructType],
providerName: String,
parameters: Map[String, String]): Source = {
validateStreamOptions(parameters)
// Each running query should use its own group id. Otherwise, the query may be only assigned
// partial data since Kafka will assign partitions to multiple consumers having the same group
// id. Hence, we should generate a unique id for each query.
val uniqueGroupId = streamingUniqueGroupId(parameters, metadataPath)
val caseInsensitiveParams = parameters.map { case (k, v) => (k.toLowerCase(Locale.ROOT), v) }
val specifiedKafkaParams = convertToSpecifiedParams(parameters)
val startingStreamOffsets = KafkaSourceProvider.getKafkaOffsetRangeLimit(caseInsensitiveParams,
STARTING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit)
val kafkaOffsetReader = new KafkaOffsetReader(
strategy(caseInsensitiveParams),
kafkaParamsForDriver(specifiedKafkaParams),
parameters,
driverGroupIdPrefix = s"$uniqueGroupId-driver")
new KafkaSource(
sqlContext,
kafkaOffsetReader,
kafkaParamsForExecutors(specifiedKafkaParams, uniqueGroupId),
parameters,
metadataPath,
startingStreamOffsets,
failOnDataLoss(caseInsensitiveParams))
}
override def getTable(options: CaseInsensitiveStringMap): KafkaTable = {
new KafkaTable
}
/**
* Returns a new base relation with the given parameters.
*
* @note The parameters' keywords are case insensitive and this insensitivity is enforced
* by the Map that is passed to the function.
*/
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String]): BaseRelation = {
validateBatchOptions(parameters)
val caseInsensitiveParams = parameters.map { case (k, v) => (k.toLowerCase(Locale.ROOT), v) }
val specifiedKafkaParams = convertToSpecifiedParams(parameters)
val startingRelationOffsets = KafkaSourceProvider.getKafkaOffsetRangeLimit(
caseInsensitiveParams, STARTING_OFFSETS_OPTION_KEY, EarliestOffsetRangeLimit)
assert(startingRelationOffsets != LatestOffsetRangeLimit)
val endingRelationOffsets = KafkaSourceProvider.getKafkaOffsetRangeLimit(caseInsensitiveParams,
ENDING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit)
assert(endingRelationOffsets != EarliestOffsetRangeLimit)
new KafkaRelation(
sqlContext,
strategy(caseInsensitiveParams),
sourceOptions = parameters,
specifiedKafkaParams = specifiedKafkaParams,
failOnDataLoss = failOnDataLoss(caseInsensitiveParams),
startingOffsets = startingRelationOffsets,
endingOffsets = endingRelationOffsets)
}
override def createSink(
sqlContext: SQLContext,
parameters: Map[String, String],
partitionColumns: Seq[String],
outputMode: OutputMode): Sink = {
val defaultTopic = parameters.get(TOPIC_OPTION_KEY).map(_.trim)
val specifiedKafkaParams = kafkaParamsForProducer(parameters)
new KafkaSink(sqlContext, specifiedKafkaParams, defaultTopic)
}
override def createRelation(
outerSQLContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
mode match {
case SaveMode.Overwrite | SaveMode.Ignore =>
throw new AnalysisException(s"Save mode $mode not allowed for Kafka. " +
s"Allowed save modes are ${SaveMode.Append} and " +
s"${SaveMode.ErrorIfExists} (default).")
case _ => // good
}
val topic = parameters.get(TOPIC_OPTION_KEY).map(_.trim)
val specifiedKafkaParams = kafkaParamsForProducer(parameters)
KafkaWriter.write(outerSQLContext.sparkSession, data.queryExecution, specifiedKafkaParams,
topic)
/* This method is suppose to return a relation that reads the data that was written.
* We cannot support this for Kafka. Therefore, in order to make things consistent,
* we return an empty base relation.
*/
new BaseRelation {
override def sqlContext: SQLContext = unsupportedException
override def schema: StructType = unsupportedException
override def needConversion: Boolean = unsupportedException
override def sizeInBytes: Long = unsupportedException
override def unhandledFilters(filters: Array[Filter]): Array[Filter] = unsupportedException
private def unsupportedException =
throw new UnsupportedOperationException("BaseRelation from Kafka write " +
"operation is not usable.")
}
}
private def strategy(caseInsensitiveParams: Map[String, String]) =
caseInsensitiveParams.find(x => STRATEGY_OPTION_KEYS.contains(x._1)).get match {
case (ASSIGN, value) =>
AssignStrategy(JsonUtils.partitions(value))
case (SUBSCRIBE, value) =>
SubscribeStrategy(value.split(",").map(_.trim()).filter(_.nonEmpty))
case (SUBSCRIBE_PATTERN, value) =>
SubscribePatternStrategy(value.trim())
case _ =>
// Should never reach here as we are already matching on
// matched strategy names
throw new IllegalArgumentException("Unknown option")
}
private def failOnDataLoss(caseInsensitiveParams: Map[String, String]) =
caseInsensitiveParams.getOrElse(FAIL_ON_DATA_LOSS_OPTION_KEY, "true").toBoolean
private def validateGeneralOptions(parameters: Map[String, String]): Unit = {
// Validate source options
val caseInsensitiveParams = parameters.map { case (k, v) => (k.toLowerCase(Locale.ROOT), v) }
val specifiedStrategies =
caseInsensitiveParams.filter { case (k, _) => STRATEGY_OPTION_KEYS.contains(k) }.toSeq
if (specifiedStrategies.isEmpty) {
throw new IllegalArgumentException(
"One of the following options must be specified for Kafka source: "
+ STRATEGY_OPTION_KEYS.mkString(", ") + ". See the docs for more details.")
} else if (specifiedStrategies.size > 1) {
throw new IllegalArgumentException(
"Only one of the following options can be specified for Kafka source: "
+ STRATEGY_OPTION_KEYS.mkString(", ") + ". See the docs for more details.")
}
caseInsensitiveParams.find(x => STRATEGY_OPTION_KEYS.contains(x._1)).get match {
case (ASSIGN, value) =>
if (!value.trim.startsWith("{")) {
throw new IllegalArgumentException(
"No topicpartitions to assign as specified value for option " +
s"'assign' is '$value'")
}
case (SUBSCRIBE, value) =>
val topics = value.split(",").map(_.trim).filter(_.nonEmpty)
if (topics.isEmpty) {
throw new IllegalArgumentException(
"No topics to subscribe to as specified value for option " +
s"'subscribe' is '$value'")
}
case (SUBSCRIBE_PATTERN, value) =>
val pattern = caseInsensitiveParams(SUBSCRIBE_PATTERN).trim()
if (pattern.isEmpty) {
throw new IllegalArgumentException(
"Pattern to subscribe is empty as specified value for option " +
s"'subscribePattern' is '$value'")
}
case _ =>
// Should never reach here as we are already matching on
// matched strategy names
throw new IllegalArgumentException("Unknown option")
}
// Validate minPartitions value if present
if (caseInsensitiveParams.contains(MIN_PARTITIONS_OPTION_KEY)) {
val p = caseInsensitiveParams(MIN_PARTITIONS_OPTION_KEY).toInt
if (p <= 0) throw new IllegalArgumentException("minPartitions must be positive")
}
// Validate user-specified Kafka options
if (caseInsensitiveParams.contains(s"kafka.${ConsumerConfig.GROUP_ID_CONFIG}")) {
logWarning(CUSTOM_GROUP_ID_ERROR_MESSAGE)
if (caseInsensitiveParams.contains(GROUP_ID_PREFIX)) {
logWarning("Option 'groupIdPrefix' will be ignored as " +
s"option 'kafka.${ConsumerConfig.GROUP_ID_CONFIG}' has been set.")
}
}
if (caseInsensitiveParams.contains(s"kafka.${ConsumerConfig.AUTO_OFFSET_RESET_CONFIG}")) {
throw new IllegalArgumentException(
s"""
|Kafka option '${ConsumerConfig.AUTO_OFFSET_RESET_CONFIG}' is not supported.
|Instead set the source option '$STARTING_OFFSETS_OPTION_KEY' to 'earliest' or 'latest'
|to specify where to start. Structured Streaming manages which offsets are consumed
|internally, rather than relying on the kafkaConsumer to do it. This will ensure that no
|data is missed when new topics/partitions are dynamically subscribed. Note that
|'$STARTING_OFFSETS_OPTION_KEY' only applies when a new Streaming query is started, and
|that resuming will always pick up from where the query left off. See the docs for more
|details.
""".stripMargin)
}
if (caseInsensitiveParams.contains(s"kafka.${ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG}")) {
throw new IllegalArgumentException(
s"Kafka option '${ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG}' is not supported as keys "
+ "are deserialized as byte arrays with ByteArrayDeserializer. Use DataFrame operations "
+ "to explicitly deserialize the keys.")
}
if (caseInsensitiveParams.contains(s"kafka.${ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG}"))
{
throw new IllegalArgumentException(
s"Kafka option '${ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG}' is not supported as "
+ "values are deserialized as byte arrays with ByteArrayDeserializer. Use DataFrame "
+ "operations to explicitly deserialize the values.")
}
val otherUnsupportedConfigs = Seq(
ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, // committing correctly requires new APIs in Source
ConsumerConfig.INTERCEPTOR_CLASSES_CONFIG) // interceptors can modify payload, so not safe
otherUnsupportedConfigs.foreach { c =>
if (caseInsensitiveParams.contains(s"kafka.$c")) {
throw new IllegalArgumentException(s"Kafka option '$c' is not supported")
}
}
if (!caseInsensitiveParams.contains(s"kafka.${ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG}")) {
throw new IllegalArgumentException(
s"Option 'kafka.${ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG}' must be specified for " +
s"configuring Kafka consumer")
}
}
private def validateStreamOptions(caseInsensitiveParams: Map[String, String]) = {
// Stream specific options
caseInsensitiveParams.get(ENDING_OFFSETS_OPTION_KEY).map(_ =>
throw new IllegalArgumentException("ending offset not valid in streaming queries"))
validateGeneralOptions(caseInsensitiveParams)
}
private def validateBatchOptions(caseInsensitiveParams: Map[String, String]) = {
// Batch specific options
KafkaSourceProvider.getKafkaOffsetRangeLimit(
caseInsensitiveParams, STARTING_OFFSETS_OPTION_KEY, EarliestOffsetRangeLimit) match {
case EarliestOffsetRangeLimit => // good to go
case LatestOffsetRangeLimit =>
throw new IllegalArgumentException("starting offset can't be latest " +
"for batch queries on Kafka")
case SpecificOffsetRangeLimit(partitionOffsets) =>
partitionOffsets.foreach {
case (tp, off) if off == KafkaOffsetRangeLimit.LATEST =>
throw new IllegalArgumentException(s"startingOffsets for $tp can't " +
"be latest for batch queries on Kafka")
case _ => // ignore
}
}
KafkaSourceProvider.getKafkaOffsetRangeLimit(
caseInsensitiveParams, ENDING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit) match {
case EarliestOffsetRangeLimit =>
throw new IllegalArgumentException("ending offset can't be earliest " +
"for batch queries on Kafka")
case LatestOffsetRangeLimit => // good to go
case SpecificOffsetRangeLimit(partitionOffsets) =>
partitionOffsets.foreach {
case (tp, off) if off == KafkaOffsetRangeLimit.EARLIEST =>
throw new IllegalArgumentException(s"ending offset for $tp can't be " +
"earliest for batch queries on Kafka")
case _ => // ignore
}
}
validateGeneralOptions(caseInsensitiveParams)
// Don't want to throw an error, but at least log a warning.
if (caseInsensitiveParams.get(MAX_OFFSET_PER_TRIGGER.toLowerCase(Locale.ROOT)).isDefined) {
logWarning("maxOffsetsPerTrigger option ignored in batch queries")
}
}
class KafkaTable extends Table with SupportsRead with SupportsWrite {
override def name(): String = "KafkaTable"
override def schema(): StructType = KafkaOffsetReader.kafkaSchema
override def capabilities(): ju.Set[TableCapability] = {
// ACCEPT_ANY_SCHEMA is needed because of the following reasons:
// * Kafka writer validates the schema instead of the SQL analyzer (the schema is fixed)
// * Read schema differs from write schema (please see Kafka integration guide)
Set(BATCH_READ, BATCH_WRITE, MICRO_BATCH_READ, CONTINUOUS_READ, STREAMING_WRITE,
ACCEPT_ANY_SCHEMA).asJava
}
override def newScanBuilder(options: CaseInsensitiveStringMap): ScanBuilder =
() => new KafkaScan(options)
override def newWriteBuilder(options: CaseInsensitiveStringMap): WriteBuilder = {
new WriteBuilder {
private var inputSchema: StructType = _
private val topic = Option(options.get(TOPIC_OPTION_KEY)).map(_.trim)
private val producerParams = kafkaParamsForProducer(options.asScala.toMap)
override def withInputDataSchema(schema: StructType): WriteBuilder = {
this.inputSchema = schema
this
}
override def buildForBatch(): BatchWrite = {
assert(inputSchema != null)
new KafkaBatchWrite(topic, producerParams, inputSchema)
}
override def buildForStreaming(): StreamingWrite = {
assert(inputSchema != null)
new KafkaStreamingWrite(topic, producerParams, inputSchema)
}
}
}
}
class KafkaScan(options: CaseInsensitiveStringMap) extends Scan {
override def readSchema(): StructType = KafkaOffsetReader.kafkaSchema
override def toBatch(): Batch = {
val caseInsensitiveOptions = CaseInsensitiveMap(options.asScala.toMap)
validateBatchOptions(caseInsensitiveOptions)
val specifiedKafkaParams = convertToSpecifiedParams(caseInsensitiveOptions)
val startingRelationOffsets = KafkaSourceProvider.getKafkaOffsetRangeLimit(
caseInsensitiveOptions, STARTING_OFFSETS_OPTION_KEY, EarliestOffsetRangeLimit)
val endingRelationOffsets = KafkaSourceProvider.getKafkaOffsetRangeLimit(
caseInsensitiveOptions, ENDING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit)
new KafkaBatch(
strategy(caseInsensitiveOptions),
caseInsensitiveOptions,
specifiedKafkaParams,
failOnDataLoss(caseInsensitiveOptions),
startingRelationOffsets,
endingRelationOffsets)
}
override def toMicroBatchStream(checkpointLocation: String): MicroBatchStream = {
val parameters = options.asScala.toMap
validateStreamOptions(parameters)
// Each running query should use its own group id. Otherwise, the query may be only assigned
// partial data since Kafka will assign partitions to multiple consumers having the same group
// id. Hence, we should generate a unique id for each query.
val uniqueGroupId = streamingUniqueGroupId(parameters, checkpointLocation)
val caseInsensitiveParams = parameters.map { case (k, v) => (k.toLowerCase(Locale.ROOT), v) }
val specifiedKafkaParams = convertToSpecifiedParams(parameters)
val startingStreamOffsets = KafkaSourceProvider.getKafkaOffsetRangeLimit(
caseInsensitiveParams, STARTING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit)
val kafkaOffsetReader = new KafkaOffsetReader(
strategy(parameters),
kafkaParamsForDriver(specifiedKafkaParams),
parameters,
driverGroupIdPrefix = s"$uniqueGroupId-driver")
new KafkaMicroBatchStream(
kafkaOffsetReader,
kafkaParamsForExecutors(specifiedKafkaParams, uniqueGroupId),
options,
checkpointLocation,
startingStreamOffsets,
failOnDataLoss(caseInsensitiveParams))
}
override def toContinuousStream(checkpointLocation: String): ContinuousStream = {
val parameters = options.asScala.toMap
validateStreamOptions(parameters)
// Each running query should use its own group id. Otherwise, the query may be only assigned
// partial data since Kafka will assign partitions to multiple consumers having the same group
// id. Hence, we should generate a unique id for each query.
val uniqueGroupId = streamingUniqueGroupId(parameters, checkpointLocation)
val caseInsensitiveParams = parameters.map { case (k, v) => (k.toLowerCase(Locale.ROOT), v) }
val specifiedKafkaParams =
parameters
.keySet
.filter(_.toLowerCase(Locale.ROOT).startsWith("kafka."))
.map { k => k.drop(6).toString -> parameters(k) }
.toMap
val startingStreamOffsets = KafkaSourceProvider.getKafkaOffsetRangeLimit(
caseInsensitiveParams, STARTING_OFFSETS_OPTION_KEY, LatestOffsetRangeLimit)
val kafkaOffsetReader = new KafkaOffsetReader(
strategy(caseInsensitiveParams),
kafkaParamsForDriver(specifiedKafkaParams),
parameters,
driverGroupIdPrefix = s"$uniqueGroupId-driver")
new KafkaContinuousStream(
kafkaOffsetReader,
kafkaParamsForExecutors(specifiedKafkaParams, uniqueGroupId),
options,
checkpointLocation,
startingStreamOffsets,
failOnDataLoss(caseInsensitiveParams))
}
}
}
private[kafka010] object KafkaSourceProvider extends Logging {
private val ASSIGN = "assign"
private val SUBSCRIBE_PATTERN = "subscribepattern"
private val SUBSCRIBE = "subscribe"
private val STRATEGY_OPTION_KEYS = Set(SUBSCRIBE, SUBSCRIBE_PATTERN, ASSIGN)
private[kafka010] val STARTING_OFFSETS_OPTION_KEY = "startingoffsets"
private[kafka010] val ENDING_OFFSETS_OPTION_KEY = "endingoffsets"
private val FAIL_ON_DATA_LOSS_OPTION_KEY = "failondataloss"
private[kafka010] val MIN_PARTITIONS_OPTION_KEY = "minpartitions"
private[kafka010] val MAX_OFFSET_PER_TRIGGER = "maxOffsetsPerTrigger"
private[kafka010] val FETCH_OFFSET_NUM_RETRY = "fetchOffset.numRetries"
private[kafka010] val FETCH_OFFSET_RETRY_INTERVAL_MS = "fetchOffset.retryIntervalMs"
private[kafka010] val CONSUMER_POLL_TIMEOUT = "kafkaConsumer.pollTimeoutMs"
private val GROUP_ID_PREFIX = "groupidprefix"
val TOPIC_OPTION_KEY = "topic"
val INSTRUCTION_FOR_FAIL_ON_DATA_LOSS_FALSE =
"""
|Some data may have been lost because they are not available in Kafka any more; either the
| data was aged out by Kafka or the topic may have been deleted before all the data in the
| topic was processed. If you want your streaming query to fail on such cases, set the source
| option "failOnDataLoss" to "true".
""".stripMargin
val INSTRUCTION_FOR_FAIL_ON_DATA_LOSS_TRUE =
"""
|Some data may have been lost because they are not available in Kafka any more; either the
| data was aged out by Kafka or the topic may have been deleted before all the data in the
| topic was processed. If you don't want your streaming query to fail on such cases, set the
| source option "failOnDataLoss" to "false".
""".stripMargin
val CUSTOM_GROUP_ID_ERROR_MESSAGE =
s"""Kafka option 'kafka.${ConsumerConfig.GROUP_ID_CONFIG}' has been set on this query, it is
| not recommended to set this option. This option is unsafe to use since multiple concurrent
| queries or sources using the same group id will interfere with each other as they are part
| of the same consumer group. Restarted queries may also suffer interference from the
| previous run having the same group id. The user should have only one query per group id,
| and/or set the option 'kafka.session.timeout.ms' to be very small so that the Kafka
| consumers from the previous query are marked dead by the Kafka group coordinator before the
| restarted query starts running.
""".stripMargin
private val serClassName = classOf[ByteArraySerializer].getName
private val deserClassName = classOf[ByteArrayDeserializer].getName
def getKafkaOffsetRangeLimit(
params: Map[String, String],
offsetOptionKey: String,
defaultOffsets: KafkaOffsetRangeLimit): KafkaOffsetRangeLimit = {
params.get(offsetOptionKey).map(_.trim) match {
case Some(offset) if offset.toLowerCase(Locale.ROOT) == "latest" =>
LatestOffsetRangeLimit
case Some(offset) if offset.toLowerCase(Locale.ROOT) == "earliest" =>
EarliestOffsetRangeLimit
case Some(json) => SpecificOffsetRangeLimit(JsonUtils.partitionOffsets(json))
case None => defaultOffsets
}
}
def kafkaParamsForDriver(specifiedKafkaParams: Map[String, String]): ju.Map[String, Object] =
KafkaConfigUpdater("source", specifiedKafkaParams)
.set(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, deserClassName)
.set(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, deserClassName)
// Set to "earliest" to avoid exceptions. However, KafkaSource will fetch the initial
// offsets by itself instead of counting on KafkaConsumer.
.set(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
// So that consumers in the driver does not commit offsets unnecessarily
.set(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
// So that the driver does not pull too much data
.set(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, java.lang.Integer.valueOf(1))
// If buffer config is not set, set it to reasonable value to work around
// buffer issues (see KAFKA-3135)
.setIfUnset(ConsumerConfig.RECEIVE_BUFFER_CONFIG, 65536: java.lang.Integer)
.build()
def kafkaParamsForExecutors(
specifiedKafkaParams: Map[String, String],
uniqueGroupId: String): ju.Map[String, Object] =
KafkaConfigUpdater("executor", specifiedKafkaParams)
.set(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, deserClassName)
.set(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, deserClassName)
// Make sure executors do only what the driver tells them.
.set(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "none")
// So that consumers in executors do not mess with any existing group id
.setIfUnset(ConsumerConfig.GROUP_ID_CONFIG, s"$uniqueGroupId-executor")
// So that consumers in executors does not commit offsets unnecessarily
.set(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false")
// If buffer config is not set, set it to reasonable value to work around
// buffer issues (see KAFKA-3135)
.setIfUnset(ConsumerConfig.RECEIVE_BUFFER_CONFIG, 65536: java.lang.Integer)
.build()
/**
* Returns a unique batch consumer group (group.id), allowing the user to set the prefix of
* the consumer group
*/
private[kafka010] def batchUniqueGroupId(parameters: Map[String, String]): String = {
val groupIdPrefix = parameters
.getOrElse(GROUP_ID_PREFIX, "spark-kafka-relation")
s"${groupIdPrefix}-${UUID.randomUUID}"
}
/**
* Returns a unique streaming consumer group (group.id), allowing the user to set the prefix of
* the consumer group
*/
private def streamingUniqueGroupId(
parameters: Map[String, String],
metadataPath: String): String = {
val groupIdPrefix = parameters
.getOrElse(GROUP_ID_PREFIX, "spark-kafka-source")
s"${groupIdPrefix}-${UUID.randomUUID}-${metadataPath.hashCode}"
}
private[kafka010] def kafkaParamsForProducer(
parameters: Map[String, String]): ju.Map[String, Object] = {
val caseInsensitiveParams = parameters.map { case (k, v) => (k.toLowerCase(Locale.ROOT), v) }
if (caseInsensitiveParams.contains(s"kafka.${ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG}")) {
throw new IllegalArgumentException(
s"Kafka option '${ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG}' is not supported as keys "
+ "are serialized with ByteArraySerializer.")
}
if (caseInsensitiveParams.contains(s"kafka.${ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG}")) {
throw new IllegalArgumentException(
s"Kafka option '${ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG}' is not supported as "
+ "value are serialized with ByteArraySerializer.")
}
val specifiedKafkaParams = convertToSpecifiedParams(parameters)
KafkaConfigUpdater("executor", specifiedKafkaParams)
.set(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, serClassName)
.set(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, serClassName)
.build()
}
private def convertToSpecifiedParams(parameters: Map[String, String]): Map[String, String] = {
parameters
.keySet
.filter(_.toLowerCase(Locale.ROOT).startsWith("kafka."))
.map { k => k.drop(6).toString -> parameters(k) }
.toMap
}
}
| actuaryzhang/spark | external/kafka-0-10-sql/src/main/scala/org/apache/spark/sql/kafka010/KafkaSourceProvider.scala | Scala | apache-2.0 | 28,494 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.matchers
import org.scalatest._
import org.scalatest.prop.Checkers
import org.scalacheck._
import Arbitrary._
import Prop._
import org.scalatest.exceptions.TestFailedException
class ShouldEndWithSubstringSpec extends Spec with ShouldMatchers with Checkers with ReturnsNormallyThrowsAssertion {
object `The endWith substring syntax` {
def `should do nothing if the string ends with the specified substring` {
"1.78" should endWith (".78")
"21.7" should endWith ("7")
"21.78" should endWith ("21.78")
check((s: String, t: String) => returnsNormally(s + t should endWith (t)))
}
def `should do nothing if the string does not end with the specified substring when used with not` {
"eight" should not { endWith ("1.7") }
"eight" should not endWith ("1.7")
check((s: String, t: String) => !(s + t).endsWith(s) ==> returnsNormally(s + t should not (endWith (s))))
check((s: String, t: String) => !(s + t).endsWith(s) ==> returnsNormally(s + t should not endWith (s)))
}
def `should do nothing if the string does not end with the specified substring when used in a logical-and expression` {
"1.7b" should ((endWith ("1.7b")) and (endWith ("7b")))
"1.7b" should (endWith ("1.7b") and (endWith ("7b")))
"1.7b" should (endWith ("1.7b") and endWith ("7b"))
check((s: String, t: String) => returnsNormally(s + t should (endWith (t) and endWith (""))))
}
def `should do nothing if the string does not end with the specified substring when used in a logical-or expression` {
"1.7b" should (endWith ("hello") or (endWith ("1.7b")))
"1.7b" should ((endWith ("hello")) or (endWith ("1.7b")))
"1.7b" should (endWith ("hello") or endWith ("1.7b"))
"1.7b" should (endWith ("hello") or (endWith ("7b")))
"1.7b" should ((endWith ("hello")) or (endWith ("7b")))
"1.7b" should (endWith ("hello") or endWith ("7b"))
check((s: String, t: String) => returnsNormally(s + t should (endWith ("hi") or endWith (t))))
}
def `should do nothing if the string does not end with the specified substring when used in a logical-and expression with not` {
"fred" should (not (endWith ("fre")) and not (endWith ("1.7")))
"fred" should ((not endWith ("fre")) and (not endWith ("1.7")))
"fred" should (not endWith ("fre") and not endWith ("1.7"))
check((s: String) => !(s endsWith "bob") && !(s endsWith "1.7") ==> returnsNormally(s should (not endWith ("bob") and not endWith ("1.7"))))
}
def `should do nothing if the string does not end with the specified substring when used in a logical-or expression with not` {
"fred" should (not (endWith ("fred")) or not (endWith ("1.7")))
"fred" should ((not endWith ("fred")) or (not endWith ("1.7")))
"fred" should (not endWith ("fred") or not endWith ("1.7"))
check((s: String) => s.indexOf("a") != 0 || s.indexOf("b") != 0 ==> returnsNormally(s should (not endWith ("a") or not endWith ("b"))))
}
def `should throw TestFailedException if the string does not match the specified substring` {
val caught1 = intercept[TestFailedException] {
"1.7" should endWith ("1.78")
}
assert(caught1.getMessage === "\\"1.7\\" did not end with substring \\"1.78\\"")
val caught2 = intercept[TestFailedException] {
"1.7" should endWith ("21.7")
}
assert(caught2.getMessage === "\\"1.7\\" did not end with substring \\"21.7\\"")
val caught3 = intercept[TestFailedException] {
"1.78" should endWith ("1.7")
}
assert(caught3.getMessage === "\\"1.78\\" did not end with substring \\"1.7\\"")
val caught6 = intercept[TestFailedException] {
"eight" should endWith ("1.7")
}
assert(caught6.getMessage === "\\"eight\\" did not end with substring \\"1.7\\"")
val caught7 = intercept[TestFailedException] {
"one.eight" should endWith ("1.7")
}
assert(caught7.getMessage === "\\"one.eight\\" did not end with substring \\"1.7\\"")
val caught8 = intercept[TestFailedException] {
"onedoteight" should endWith ("1.7")
}
assert(caught8.getMessage === "\\"onedoteight\\" did not end with substring \\"1.7\\"")
val caught9 = intercept[TestFailedException] {
"***" should endWith ("1.7")
}
assert(caught9.getMessage === "\\"***\\" did not end with substring \\"1.7\\"")
check((s: String) => !(s endsWith "1.7") ==> throwsTestFailedException(s should endWith ("1.7")))
}
def `should throw TestFailedException if the string does matches the specified substring when used with not` {
val caught1 = intercept[TestFailedException] {
"1.7" should not { endWith ("1.7") }
}
assert(caught1.getMessage === "\\"1.7\\" ended with substring \\"1.7\\"")
val caught2 = intercept[TestFailedException] {
"1.7" should not { endWith ("7") }
}
assert(caught2.getMessage === "\\"1.7\\" ended with substring \\"7\\"")
val caught3 = intercept[TestFailedException] {
"-1.8" should not { endWith (".8") }
}
assert(caught3.getMessage === "\\"-1.8\\" ended with substring \\".8\\"")
val caught4 = intercept[TestFailedException] {
"8b" should not { endWith ("b") }
}
assert(caught4.getMessage === "\\"8b\\" ended with substring \\"b\\"")
val caught5 = intercept[TestFailedException] {
"1." should not { endWith ("1.") }
}
assert(caught5.getMessage === "\\"1.\\" ended with substring \\"1.\\"")
val caught11 = intercept[TestFailedException] {
"1.7" should not endWith (".7")
}
assert(caught11.getMessage === "\\"1.7\\" ended with substring \\".7\\"")
val caught13 = intercept[TestFailedException] {
"-1.8" should not endWith ("8")
}
assert(caught13.getMessage === "\\"-1.8\\" ended with substring \\"8\\"")
val caught14 = intercept[TestFailedException] {
"8" should not endWith ("")
}
assert(caught14.getMessage === "\\"8\\" ended with substring \\"\\"")
val caught15 = intercept[TestFailedException] {
"1." should not endWith ("1.")
}
assert(caught15.getMessage === "\\"1.\\" ended with substring \\"1.\\"")
val caught21 = intercept[TestFailedException] {
"1.7a" should not { endWith ("7a") }
}
assert(caught21.getMessage === "\\"1.7a\\" ended with substring \\"7a\\"")
val caught22 = intercept[TestFailedException] {
"b1.7" should not { endWith ("1.7") }
}
assert(caught22.getMessage === "\\"b1.7\\" ended with substring \\"1.7\\"")
val caught23 = intercept[TestFailedException] {
"ba-1.8" should not { endWith ("a-1.8") }
}
assert(caught23.getMessage === "\\"ba-1.8\\" ended with substring \\"a-1.8\\"")
check((s: String) => s.length != 0 ==> throwsTestFailedException(s should not endWith (s.substring(s.length - 1, s.length))))
}
def `should throw TestFailedException if the string ends with the specified substring when used in a logical-and expression` {
val caught1 = intercept[TestFailedException] {
"1.7" should (endWith ("1.7") and (endWith ("1.8")))
}
assert(caught1.getMessage === "\\"1.7\\" ended with substring \\"1.7\\", but \\"1.7\\" did not end with substring \\"1.8\\"")
val caught2 = intercept[TestFailedException] {
"1.7" should ((endWith ("7")) and (endWith ("1.8")))
}
assert(caught2.getMessage === "\\"1.7\\" ended with substring \\"7\\", but \\"1.7\\" did not end with substring \\"1.8\\"")
val caught3 = intercept[TestFailedException] {
"1.7" should (endWith (".7") and endWith ("1.8"))
}
assert(caught3.getMessage === "\\"1.7\\" ended with substring \\".7\\", but \\"1.7\\" did not end with substring \\"1.8\\"")
// Check to make sure the error message "short circuits" (i.e., just reports the left side's failure)
val caught4 = intercept[TestFailedException] {
"one.eight" should (endWith ("1.7") and (endWith ("1.8")))
}
assert(caught4.getMessage === "\\"one.eight\\" did not end with substring \\"1.7\\"")
val caught5 = intercept[TestFailedException] {
"one.eight" should ((endWith ("1.7")) and (endWith ("1.8")))
}
assert(caught5.getMessage === "\\"one.eight\\" did not end with substring \\"1.7\\"")
val caught6 = intercept[TestFailedException] {
"one.eight" should (endWith ("1.7") and endWith ("1.8"))
}
assert(caught6.getMessage === "\\"one.eight\\" did not end with substring \\"1.7\\"")
check((s: String, t: String, u: String) => !((s + u) endsWith t) ==> throwsTestFailedException(s + u should (endWith (u) and endWith (t))))
}
def `should throw TestFailedException if the string ends with the specified substring when used in a logical-or expression` {
val caught1 = intercept[TestFailedException] {
"one.seven" should (endWith ("1.7") or (endWith ("1.8")))
}
assert(caught1.getMessage === "\\"one.seven\\" did not end with substring \\"1.7\\", and \\"one.seven\\" did not end with substring \\"1.8\\"")
val caught2 = intercept[TestFailedException] {
"one.seven" should ((endWith ("1.7")) or (endWith ("1.8")))
}
assert(caught2.getMessage === "\\"one.seven\\" did not end with substring \\"1.7\\", and \\"one.seven\\" did not end with substring \\"1.8\\"")
val caught3 = intercept[TestFailedException] {
"one.seven" should (endWith ("1.7") or endWith ("1.8"))
}
assert(caught3.getMessage === "\\"one.seven\\" did not end with substring \\"1.7\\", and \\"one.seven\\" did not end with substring \\"1.8\\"")
check(
(s: String, t: String, u: String, v: String) => {
(t.length != 0 && v.length != 0 && !(s + u).endsWith(t) && !(s + u).endsWith(v)) ==>
throwsTestFailedException(s + u should (endWith (t) or endWith (v)))
}
)
}
def `should throw TestFailedException if the string ends with the specified substring when used in a logical-and expression used with not` {
val caught1 = intercept[TestFailedException] {
"1.7" should (not endWith ("1.8") and (not endWith ("1.7")))
}
assert(caught1.getMessage === "\\"1.7\\" did not end with substring \\"1.8\\", but \\"1.7\\" ended with substring \\"1.7\\"")
val caught2 = intercept[TestFailedException] {
"1.7" should ((not endWith ("1.8")) and (not endWith ("1.7")))
}
assert(caught2.getMessage === "\\"1.7\\" did not end with substring \\"1.8\\", but \\"1.7\\" ended with substring \\"1.7\\"")
val caught3 = intercept[TestFailedException] {
"1.7" should (not endWith ("1.8") and not endWith ("1.7"))
}
assert(caught3.getMessage === "\\"1.7\\" did not end with substring \\"1.8\\", but \\"1.7\\" ended with substring \\"1.7\\"")
val caught4 = intercept[TestFailedException] {
"a1.7" should (not endWith ("1.8") and (not endWith ("a1.7")))
}
assert(caught4.getMessage === "\\"a1.7\\" did not end with substring \\"1.8\\", but \\"a1.7\\" ended with substring \\"a1.7\\"")
val caught5 = intercept[TestFailedException] {
"b1.7" should ((not endWith ("1.8")) and (not endWith ("1.7")))
}
assert(caught5.getMessage === "\\"b1.7\\" did not end with substring \\"1.8\\", but \\"b1.7\\" ended with substring \\"1.7\\"")
val caught6 = intercept[TestFailedException] {
"a1.7b" should (not endWith ("1.8") and not endWith ("1.7b"))
}
assert(caught6.getMessage === "\\"a1.7b\\" did not end with substring \\"1.8\\", but \\"a1.7b\\" ended with substring \\"1.7b\\"")
check(
(s: String, t: String, u: String) =>
(s + t + u).indexOf("hi") != 0 ==>
throwsTestFailedException(s + t + u should (not endWith ("hi") and not endWith (u)))
)
}
def `should throw TestFailedException if the string ends with the specified substring when used in a logical-or expression used with not` {
val caught1 = intercept[TestFailedException] {
"1.7" should (not endWith ("1.7") or (not endWith ("1.7")))
}
assert(caught1.getMessage === "\\"1.7\\" ended with substring \\"1.7\\", and \\"1.7\\" ended with substring \\"1.7\\"")
val caught2 = intercept[TestFailedException] {
"1.7" should ((not endWith ("1.7")) or (not endWith ("1.7")))
}
assert(caught2.getMessage === "\\"1.7\\" ended with substring \\"1.7\\", and \\"1.7\\" ended with substring \\"1.7\\"")
val caught3 = intercept[TestFailedException] {
"1.7" should (not endWith ("1.7") or not endWith ("1.7"))
}
assert(caught3.getMessage === "\\"1.7\\" ended with substring \\"1.7\\", and \\"1.7\\" ended with substring \\"1.7\\"")
val caught4 = intercept[TestFailedException] {
"1.7" should (not (endWith ("1.7")) or not (endWith ("1.7")))
}
assert(caught4.getMessage === "\\"1.7\\" ended with substring \\"1.7\\", and \\"1.7\\" ended with substring \\"1.7\\"")
val caught5 = intercept[TestFailedException] {
"a1.7" should (not endWith (".7") or (not endWith ("a1.7")))
}
assert(caught5.getMessage === "\\"a1.7\\" ended with substring \\".7\\", and \\"a1.7\\" ended with substring \\"a1.7\\"")
val caught6 = intercept[TestFailedException] {
"b1.7" should ((not endWith ("1.7")) or (not endWith ("1.7")))
}
assert(caught6.getMessage === "\\"b1.7\\" ended with substring \\"1.7\\", and \\"b1.7\\" ended with substring \\"1.7\\"")
val caught7 = intercept[TestFailedException] {
"a1.7b" should (not endWith ("1.7b") or not endWith ("7b"))
}
assert(caught7.getMessage === "\\"a1.7b\\" ended with substring \\"1.7b\\", and \\"a1.7b\\" ended with substring \\"7b\\"")
val caught8 = intercept[TestFailedException] {
"a1.7b" should (not (endWith ("1.7b")) or not (endWith ("7b")))
}
assert(caught8.getMessage === "\\"a1.7b\\" ended with substring \\"1.7b\\", and \\"a1.7b\\" ended with substring \\"7b\\"")
check(
(s: String, t: String) =>
throwsTestFailedException(s + t should (not endWith (t) or not endWith ("")))
)
}
}
}
| travisbrown/scalatest | src/test/scala/org/scalatest/matchers/ShouldEndWithSubstringSpec.scala | Scala | apache-2.0 | 14,804 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.rdd
import org.apache.spark.SparkContext._
import org.apache.spark.RangePartitioner
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.models.{ ReferencePosition, SequenceRecord, SequenceDictionary }
import org.bdgenomics.adam.projections.Projection
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.util.ADAMFunSuite
import org.bdgenomics.formats.avro.{ AlignmentRecord, Contig }
import scala.util.Random
class GenomicPositionPartitionerSuite extends ADAMFunSuite {
test("partitions the UNMAPPED ReferencePosition into the top partition") {
val parter = GenomicPositionPartitioner(10, SequenceDictionary(record("foo", 1000)))
assert(parter.numPartitions === 11)
assert(parter.getPartition(ReferencePosition.UNMAPPED) === 10)
}
test("if we do not have a contig for a record, we throw an IAE") {
val parter = GenomicPositionPartitioner(10, SequenceDictionary(record("foo", 1000)))
assert(parter.numPartitions === 11)
intercept[IllegalArgumentException] {
parter.getPartition(ReferencePosition("chrFoo", 10))
}
}
test("partitioning into N pieces on M total sequence length, where N > M, results in M partitions") {
val parter = GenomicPositionPartitioner(10, SequenceDictionary(record("foo", 9)))
assert(parter.numPartitions === 10)
}
test("correctly partitions a single dummy sequence into two pieces") {
val parter = GenomicPositionPartitioner(2, SequenceDictionary(record("foo", 10)))
assert(parter.getPartition(ReferencePosition("foo", 3)) === 0)
assert(parter.getPartition(ReferencePosition("foo", 7)) === 1)
}
test("correctly counts cumulative lengths") {
val parter = GenomicPositionPartitioner(3, SequenceDictionary(record("foo", 20), record("bar", 10)))
assert(parter.cumulativeLengths("bar") === 0)
assert(parter.cumulativeLengths("foo") === 10)
}
test("correctly partitions positions across two dummy sequences") {
val parter = GenomicPositionPartitioner(3, SequenceDictionary(record("bar", 20), record("foo", 10)))
// check easy examples
assert(parter.getPartition(ReferencePosition("foo", 8)) === 2)
assert(parter.getPartition(ReferencePosition("foo", 18)) === 3)
assert(parter.getPartition(ReferencePosition("bar", 18)) === 1)
assert(parter.getPartition(ReferencePosition("bar", 8)) === 0)
// check edge cases
assert(parter.getPartition(ReferencePosition("foo", 0)) === 2)
assert(parter.getPartition(ReferencePosition("foo", 10)) === 3)
assert(parter.getPartition(ReferencePosition("bar", 0)) === 0)
}
sparkTest("test that we can range partition ADAMRecords") {
val rand = new Random(1000L)
val count = 1000
val pos = sc.parallelize((1 to count).map(i => adamRecord("chr1", "read_%d".format(i), rand.nextInt(100), readMapped = true)), 1)
val parts = 200
val pairs = pos.map(p => (ReferencePosition(p.getContigName, p.getStart), p))
val parter = new RangePartitioner(parts, pairs)
val partitioned = pairs.sortByKey().partitionBy(parter)
assert(partitioned.count() === count)
// check here to make sure that we have at least increased the number of partitions
// as of spark 1.1.0, range partitioner does not guarantee that you will receive a
// number of partitions equal to the number requested
assert(partitioned.partitions.length > 1)
}
sparkTest("test that we can range partition ADAMRecords indexed by sample") {
val rand = new Random(1000L)
val count = 1000
val pos = sc.parallelize((1 to count).map(i => adamRecord("chr1", "read_%d".format(i), rand.nextInt(100), readMapped = true)), 1)
val parts = 200
val pairs = pos.map(p => ((ReferencePosition(p.getContigName, p.getStart), "sample"), p))
val parter = new RangePartitioner(parts, pairs)
val partitioned = pairs.sortByKey().partitionBy(parter)
assert(partitioned.count() === count)
assert(partitioned.partitions.length > 1)
}
sparkTest("test that simple partitioning works okay on a reasonable set of ADAMRecords") {
val filename = resourcePath("reads12.sam")
val parts = 1
val p = {
import org.bdgenomics.adam.projections.AlignmentRecordField._
Projection(contigName, start, readName, readMapped)
}
val gRdd = sc.loadAlignments(filename, projection = Some(p))
val rdd = gRdd.rdd
val parter = GenomicPositionPartitioner(parts, gRdd.sequences)
assert(rdd.count() === 200)
val keyed =
rdd.map(rec => (ReferencePosition(rec.getContigName, rec.getStart), rec)).sortByKey()
val keys = keyed.map(_._1).collect()
assert(!keys.exists(rp => parter.getPartition(rp) < 0 || parter.getPartition(rp) >= parts))
val partitioned = keyed.partitionBy(parter)
assert(partitioned.count() === 200)
val partSizes = partitioned.mapPartitions {
itr =>
List(itr.size).iterator
}
assert(partSizes.count() === parts + 1)
}
sparkTest("test indexed ReferencePosition partitioning works on a set of indexed ADAMRecords") {
val filename = resourcePath("reads12.sam")
val parts = 10
val gRdd = sc.loadAlignments(filename)
val rdd = gRdd.rdd
val parter = GenomicPositionPartitioner(parts, gRdd.sequences)
val p = {
import org.bdgenomics.adam.projections.AlignmentRecordField._
Projection(contigName, start, readName, readMapped)
}
assert(rdd.count() === 200)
val keyed =
rdd.keyBy(rec => (ReferencePosition(rec.getContigName, rec.getStart), "sample")).sortByKey()
val keys = keyed.map(_._1).collect()
assert(!keys.exists(rp => parter.getPartition(rp) < 0 || parter.getPartition(rp) >= parts))
val partitioned = keyed.partitionBy(parter)
assert(partitioned.count() === 200)
val partSizes = partitioned.mapPartitions {
itr =>
List(itr.size).iterator
}
assert(partSizes.count() === parts + 1)
}
def adamRecord(referenceName: String, readName: String, start: Long, readMapped: Boolean) = {
val contig = Contig.newBuilder
.setContigName(referenceName)
.build
AlignmentRecord.newBuilder()
.setContigName(contig.getContigName)
.setReadName(readName)
.setReadMapped(readMapped)
.setStart(start)
.build()
}
def record(name: String, length: Long) = SequenceRecord(name.toString, length.toInt)
}
class PositionKeyed[U <: Serializable] extends Serializable {
}
class SerializableIterator[U](itr: Iterator[U]) extends Iterator[U] with Serializable {
def hasNext: Boolean = itr.hasNext
def next(): U = itr.next()
}
| tdanford/adam | adam-core/src/test/scala/org/bdgenomics/adam/rdd/GenomicPositionPartitionerSuite.scala | Scala | apache-2.0 | 7,417 |
import scala.tools.nsc.interactive.tests._
import scala.reflect.internal.util._
object Test extends InteractiveTest {
import compiler._, definitions._
override def runDefaultTests(): Unit = {
def resolveTypeTagHyperlink(): Unit = {
val sym = compiler.askForResponse(() => compiler.currentRun.runDefinitions.TypeTagClass).get.swap.getOrElse(???)
val r = new Response[Position]
compiler.askLinkPos(sym, new BatchSourceFile("", source), r)
r.get
}
def checkTypeTagSymbolConsistent(): Unit = {
compiler.askForResponse {
() => {
val runDefinitions = currentRun.runDefinitions
import runDefinitions._
import Predef._
assert(TypeTagsClass.map(sym => getMemberClass(sym, tpnme.TypeTag)) == TypeTagClass)
assert(TypeTagsClass.map(sym => getMemberClass(sym, tpnme.WeakTypeTag)) == WeakTypeTagClass)
assert(TypeTagsClass.map(sym => getMemberModule(sym, nme.WeakTypeTag)) == WeakTypeTagModule)
assert(getMemberMethod(ReflectPackage, nme.materializeClassTag) == materializeClassTag)
assert(ReflectApiPackage.map(sym => getMemberMethod(sym, nme.materializeWeakTypeTag)) == materializeWeakTypeTag)
assert(ReflectApiPackage.map(sym => getMemberMethod(sym, nme.materializeTypeTag)) == materializeTypeTag)
()
}
}.get match {
case Right(t) => t.printStackTrace
case Left(_) =>
}
}
resolveTypeTagHyperlink()
// The presentation compiler loads TypeTags from source; we'll get new symbols for its members.
// Need to make sure we didn't cache the old ones in Definitions.
checkTypeTagSymbolConsistent()
}
def source =
"""
|package scala
|package reflect
|package api
|
|trait TypeTags { self: Universe =>
| import definitions._
|
| @annotation.implicitNotFound(msg = "No WeakTypeTag available for ${T}")
| trait WeakTypeTag[T] extends Equals with Serializable {
| val mirror: Mirror
| def in[U <: Universe with Singleton](otherMirror: scala.reflect.api.Mirror[U]): U # WeakTypeTag[T]
| def tpe: Type
| }
| object WeakTypeTag
|
| trait TypeTag[T] extends WeakTypeTag[T] with Equals with Serializable {
| }
| object TypeTag
|
""".stripMargin
}
| scala/scala | test/files/presentation/t7678/Runner.scala | Scala | apache-2.0 | 2,378 |
package org.bitcoins.core.dlc.accounting
import org.bitcoins.core.currency.{CurrencyUnit, CurrencyUnits}
/** Similar to [[org.bitcoins.core.dlc.accounting.DLCAccounting]], but
* represents the entire accounting for the wallet
*/
case class DLCWalletAccounting(
myCollateral: CurrencyUnit,
theirCollateral: CurrencyUnit,
myPayout: CurrencyUnit,
theirPayout: CurrencyUnit)
extends PayoutAccounting
object DLCWalletAccounting {
def fromDLCAccounting(
accountings: Vector[DLCAccounting]): DLCWalletAccounting = {
val myCollateral =
accountings.foldLeft(CurrencyUnits.zero)(_ + _.myCollateral)
val theirCollateral =
accountings.foldLeft(CurrencyUnits.zero)(_ + _.theirCollateral)
val myPayouts = accountings.foldLeft(CurrencyUnits.zero)(_ + _.myPayout)
val theirPayouts =
accountings.foldLeft(CurrencyUnits.zero)(_ + _.theirPayout)
DLCWalletAccounting(myCollateral, theirCollateral, myPayouts, theirPayouts)
}
}
| bitcoin-s/bitcoin-s | core/src/main/scala/org/bitcoins/core/dlc/accounting/DLCWalletAccounting.scala | Scala | mit | 985 |
package dbtarzan.gui.tabletabs
import dbtarzan.db.DBTableStructure
/*
Normally it shows the name of the table.
If the tab is derived from a foreign key, it is in the form:
[table] < [origin table]
where origin table is the table on the other side of the foreign key
If a filter (where) has been applied, a star (*) character is shown at the end of the text
*/
object TableStructureText {
def buildTabText(structure : DBTableStructure) : String = {
val description = structure.description
description.name + description.origin.map("<"+_).getOrElse("") + starForFilter(structure)
}
private def starForFilter(structure : DBTableStructure): String =
if(DBTableStructure.hasFilter(structure))
" *"
else
""
}
| aferrandi/dbtarzan | src/main/scala/dbtarzan/gui/tabletabs/TableStructureText.scala | Scala | apache-2.0 | 756 |
/*******************************************************************************
* This file is part of tiscaf.
*
* tiscaf is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Foobar is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with tiscaf. If not, see <http://www.gnu.org/licenses/>.
******************************************************************************/
package tiscaf
import javax.net.ssl._
import java.security._
import java.nio.channels._
import java.nio._
import scala.collection.{ mutable => mute }
/** tiscaf SSL context. It manages the session cache as well as SSL settings. */
trait HSslContext {
//---------------------- to implement ------------------------------
/** SSL ports. */
def port: Int
/** Keystore passphrase. */
def passphrase: String
/** Keystore containing server certificate and CA certificate(s). */
def keystore: KeyStore
//---------------------- to override ------------------------------
/** Require client authentication. By default `false`. */
def clientAuth: HClientAuth.Value = HClientAuth.None // client authentication
/** Trusted client certificates and CA certificates. By default `None`. */
def truststore: Option[KeyStore] = None // trusted client certificates and CA certificates
/** Trusted client certificate depth according to CA certificate(s). By default `1`. */
def trustDepth = 1 // trust depth for client certificate according to CA certificates in the truststore
/** The protocol. By default `SSL` */
def protocol = "SSL"
/** Specific JCE provider name if one wants to use it.
* By default `None` which means that the default provider is used.
*/
def provider: Option[String] = None
/** SSL session timeout in minutes. By default `5`. */
def sslSessionTimeoutMin: Int = 5
//---------------------- internals ------------------------------
private val keyManagers = {
val factory = provider match {
case Some(p) =>
KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm, p)
case None =>
KeyManagerFactory.getInstance(KeyManagerFactory.getDefaultAlgorithm)
}
factory.init(keystore, passphrase.toCharArray)
factory.getKeyManagers
}
private val trustManagers =
truststore match {
case Some(ts) =>
val factory = provider match {
case Some(p) =>
TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm, p)
case None =>
TrustManagerFactory.getInstance(TrustManagerFactory.getDefaultAlgorithm)
}
factory.init(ts)
factory.getTrustManagers
case None => null
}
private[tiscaf] val sslContext = {
val context = provider match {
case Some(p) => SSLContext.getInstance(protocol, p)
case None => SSLContext.getInstance(protocol)
}
context.init(keyManagers, trustManagers, new SecureRandom)
context
}
private[tiscaf] def engine(host: String, port: Int) = {
val engine = sslContext.createSSLEngine(host, port)
// we are on the server side
engine.setUseClientMode(false)
// configure client authentication
clientAuth match {
case HClientAuth.Accepted => engine.setWantClientAuth(true)
case HClientAuth.Required => engine.setNeedClientAuth(true)
case HClientAuth.None => // ...
}
engine.setEnabledProtocols(Array("SSLv3", "TLSv1"))
engine
}
}
private[tiscaf] object HSsl {
// do the SSL stuffs
/* Performs the handshake and returns */
def handshake(channel: SocketChannel, engine: SSLEngine) {
// get the ssl session
val session = engine.getSession
// determine buffer sizes from session
// Create byte buffers to use for holding application data
val myAppData = ByteBuffer.allocate(session.getApplicationBufferSize)
val myNetData = ByteBuffer.allocate(session.getPacketBufferSize)
val peerAppData = ByteBuffer.allocate(session.getApplicationBufferSize)
val peerNetData = ByteBuffer.allocate(session.getPacketBufferSize)
// Begin handshake
engine.beginHandshake
var hs = engine.getHandshakeStatus
// Process handshaking message
import SSLEngineResult.HandshakeStatus._
while (hs != FINISHED && hs != NOT_HANDSHAKING) {
hs match {
case NEED_UNWRAP =>
// Receive handshaking data from peer
if (channel.read(peerNetData) >= 0) {
// Process incoming handshaking data
peerNetData.flip
val res = engine.unwrap(peerNetData, peerAppData)
peerNetData.compact
// set the new status
hs = res.getHandshakeStatus
// Check status
res.getStatus match {
case SSLEngineResult.Status.BUFFER_OVERFLOW =>
// clear the buffer
peerAppData.clear
case _ => // on OK, BUFFER_UNDERFLOW or CLOSED, just continue
}
}
case NEED_WRAP =>
// Empty the local network packet buffer.
myNetData.clear
// Generate handshaking data
val res = engine.wrap(myAppData, myNetData)
hs = res.getHandshakeStatus
// Check status
res.getStatus match {
case SSLEngineResult.Status.OK =>
myNetData.flip
// Send the handshaking data to peer
while (myNetData.hasRemaining) {
if (channel.write(myNetData) < 0) {
// TODO Handle closed channel
}
}
case SSLEngineResult.Status.BUFFER_OVERFLOW =>
// compact net buffer
myNetData.compact
case _ =>
throw new Exception("Is it possible that this happens?")
}
case NEED_TASK =>
// blocking tasks in another thread
var runnable = engine.getDelegatedTask
while (runnable != null) {
runnable.run
runnable = engine.getDelegatedTask
}
hs = engine.getHandshakeStatus
case _ =>
throw new Exception("Is it possible that this happens?")
}
}
}
}
/** Indicates whether client authentication is accepted, required,
* or if none is needed.
*/
object HClientAuth extends Enumeration {
val None, Accepted, Required = Value
}
| gnieh/tiscaf | core/src/main/scala/tiscaf/HSslContext.scala | Scala | lgpl-3.0 | 6,842 |
package assets.mustache.forces
import uk.gov.gds.ier.transaction.forces.nationality.NationalityMustache
import uk.gov.gds.ier.test._
class NationalityTemplateTest
extends TemplateTestSuite
with NationalityMustache {
it should "properly render all properties from the model" in {
running(FakeApplication()) {
val data = NationalityModel(
question = Question(postUrl = "/whatever-url",
title = "nationality title"
),
nationality = FieldSet("nationalityClass"),
britishOption = Field(
id = "britishOptionId",
name = "britishOptionName",
attributes = "foo=\\"foo\\""
),
irishOption = Field(
id = "irishOptionId",
name = "irishOptionName",
attributes = "foo=\\"foo\\""
),
hasOtherCountryOption = Field(
id = "hasOtherCountryOptionId",
name = "hasOtherCountryOptionName",
attributes = "foo=\\"foo\\""
),
otherCountry = FieldSet("otherCountryClass"),
otherCountries0 = Field(
id = "otherCountries0Id",
name = "otherCountries0Name",
value = "otherCountries0Value",
classes = "otherCountries0Class"
),
otherCountries1 = Field(
id = "otherCountries1Id",
name = "otherCountries1Name",
value = "otherCountries1Value",
classes = "otherCountries1Class"
),
otherCountries2 = Field(
id = "otherCountries2Id",
name = "otherCountries2Name",
value = "otherCountries2Value",
classes = "otherCountries2Class"
),
noNationalityReason = Field (
id = "noNationalityReasonId",
name = "noNationalityReasonName",
value = "noNationalityReasonValue"
),
noNationalityReasonShowFlag = "noNationalityReasonShowFlag",
emailField = Field(
id = "emailFieldId",
name = "emailFieldName",
classes = "emailFieldClass",
value = "emailFieldValue"
)
)
val html = Mustache.render("forces/nationality", data)
val doc = Jsoup.parse(html.toString)
val nationalityFieldSet = doc.select("fieldset").first()
nationalityFieldSet.attr("class") should include("nationalityClass")
val britishOptionInput = doc.select("input[id=britishOptionId]").first()
britishOptionInput.attr("id") should be("britishOptionId")
britishOptionInput.attr("name") should be("britishOptionName")
britishOptionInput.attr("foo") should be("foo")
val irishOptionInput = doc.select("input[id=irishOptionId]").first()
irishOptionInput.attr("id") should be("irishOptionId")
irishOptionInput.attr("name") should be("irishOptionName")
irishOptionInput.attr("foo") should be("foo")
val otherCountryValidation = doc.select("div").first()
otherCountryValidation.attr("class") should include("otherCountryClass")
val otherCountry0Label = doc.select("label[for=otherCountries0Id]").first()
otherCountry0Label.attr("for") should be("otherCountries0Id")
val otherCountry0Input = doc.select("input[id=otherCountries0Id]").first()
otherCountry0Input.attr("id") should be("otherCountries0Id")
otherCountry0Input.attr("name") should be("otherCountries0Name")
otherCountry0Input.attr("value") should be("otherCountries0Value")
otherCountry0Input.attr("class") should include("otherCountries0Class")
val otherCountry1Label = doc.select("label[for=otherCountries1Id]").first()
otherCountry1Label.attr("for") should be("otherCountries1Id")
val otherCountry1Input = doc.select("input[id=otherCountries1Id]").first()
otherCountry1Input.attr("id") should be("otherCountries1Id")
otherCountry1Input.attr("name") should be("otherCountries1Name")
otherCountry1Input.attr("value") should be("otherCountries1Value")
otherCountry1Input.attr("class") should include("otherCountries1Class")
val otherCountry2Label = doc.select("label[for=otherCountries2Id]").first()
otherCountry2Label.attr("for") should be("otherCountries2Id")
val otherCountry2Input = doc.select("input[id=otherCountries2Id]").first()
otherCountry2Input.attr("id") should be("otherCountries2Id")
otherCountry2Input.attr("name") should be("otherCountries2Name")
otherCountry2Input.attr("value") should be("otherCountries2Value")
otherCountry2Input.attr("class") should include("otherCountries2Class")
}
}
}
| alphagov/ier-frontend | test/assets/mustache/forces/NationalityTemplateTest.scala | Scala | mit | 4,569 |
package filodb.coordinator.client
import filodb.core.query.{ColumnFilter, QueryContext}
import filodb.query.{LogicalPlan => LogicalPlan2, QueryCommand}
object QueryCommands {
import filodb.core._
// These correspond to the ColumnStore PartitionScan methods, but take in raw data ie strings, ints
// Which partitions should I query?
sealed trait PartitionQuery
final case class SinglePartitionQuery(key: Seq[Any]) extends PartitionQuery
final case class MultiPartitionQuery(keys: Seq[Seq[Any]]) extends PartitionQuery
final case class FilteredPartitionQuery(filters: Seq[ColumnFilter]) extends PartitionQuery
/**
* Returns a Seq[String] of the first *limit* tags or columns indexed
* Or Nil if the dataset is not found.
*/
final case class GetIndexNames(dataset: DatasetRef,
limit: Int = 10,
submitTime: Long = System.currentTimeMillis()) extends QueryCommand
/**
* Returns a Seq[(String, Int)] of the top *limit* most popular values indexed for a given tag/column.
* Or Nil if the dataset or indexName is not found.
* @param shardOpt the shard to query for index values, if None, then the first shard is picked
*/
final case class GetIndexValues(dataset: DatasetRef,
indexName: String,
shard: Int,
limit: Int = 100,
submitTime: Long = System.currentTimeMillis()) extends QueryCommand
final case class GetTopkCardinality(dataset: DatasetRef,
shards: Seq[Int],
shardKeyPrefix: Seq[String],
depth: Int,
k: Int,
addInactive: Boolean,
submitTime: Long = System.currentTimeMillis()) extends QueryCommand
final case class StaticSpreadProvider(spreadChange: SpreadChange = SpreadChange()) extends SpreadProvider {
def spreadFunc(filter: Seq[ColumnFilter]): Seq[SpreadChange] = {
Seq(spreadChange)
}
}
case class SpreadAssignment(shardKeysMap: collection.Map[String, String], spread: Int)
/**
* Serialize with care! would be based on the provided function.
* @param f a function that would act as the spread provider
*/
final case class FunctionalSpreadProvider(f: Seq[ColumnFilter] => Seq[SpreadChange] = { _ => Seq(SpreadChange()) })
extends SpreadProvider {
def spreadFunc(filter: Seq[ColumnFilter]): Seq[SpreadChange] = {
f (filter)
}
}
final case class FunctionalTargetSchemaProvider(f: Seq[ColumnFilter] => Seq[TargetSchemaChange] = { _ => Seq.empty})
extends TargetSchemaProvider {
def targetSchemaFunc(filter: Seq[ColumnFilter]): Seq[TargetSchemaChange] = f(filter)
}
/**
* Executes a query using a LogicalPlan and returns the result as one message to the client.
* Depends on queryOptions, the query will fan out to multiple nodes and shards as needed to gather
* results.
* @param dataset the dataset (and possibly database) to query
* @param logicalPlan the LogicalPlan for the query to run
* @param qContext options to control routing of query
* @return AggregateResponse, or BadQuery, BadArgument, WrongNumberOfArgs, UndefinedColumns
*/
final case class LogicalPlan2Query(dataset: DatasetRef,
logicalPlan: LogicalPlan2,
qContext: QueryContext = QueryContext(),
submitTime: Long = System.currentTimeMillis()) extends QueryCommand
final case class ExplainPlan2Query(dataset: DatasetRef,
logicalPlan: LogicalPlan2,
qContext: QueryContext = QueryContext(),
submitTime: Long = System.currentTimeMillis()) extends QueryCommand
// Error responses from query
final case class UndefinedColumns(undefined: Set[String]) extends ErrorResponse
final case class BadArgument(msg: String) extends ErrorResponse with QueryResponse
final case class BadQuery(msg: String) extends ErrorResponse with QueryResponse
final case class WrongNumberOfArgs(actual: Int, expected: Int) extends ErrorResponse with QueryResponse
}
| filodb/FiloDB | coordinator/src/main/scala/filodb.coordinator/client/QueryCommands.scala | Scala | apache-2.0 | 4,422 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import org.apache.spark.MapOutputStatistics
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.rules.Rule
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.sql.internal.SQLConf
/**
* A rule to coalesce the shuffle partitions based on the map output statistics, which can
* avoid many small reduce tasks that hurt performance.
*/
case class CoalesceShufflePartitions(session: SparkSession) extends Rule[SparkPlan] {
import CoalesceShufflePartitions._
private def conf = session.sessionState.conf
override def apply(plan: SparkPlan): SparkPlan = {
if (!conf.coalesceShufflePartitionsEnabled) {
return plan
}
if (!plan.collectLeaves().forall(_.isInstanceOf[QueryStageExec])
|| plan.find(_.isInstanceOf[CustomShuffleReaderExec]).isDefined) {
// If not all leaf nodes are query stages, it's not safe to reduce the number of
// shuffle partitions, because we may break the assumption that all children of a spark plan
// have same number of output partitions.
return plan
}
def collectShuffleStages(plan: SparkPlan): Seq[ShuffleQueryStageExec] = plan match {
case stage: ShuffleQueryStageExec => Seq(stage)
case _ => plan.children.flatMap(collectShuffleStages)
}
val shuffleStages = collectShuffleStages(plan)
// ShuffleExchanges introduced by repartition do not support changing the number of partitions.
// We change the number of partitions in the stage only if all the ShuffleExchanges support it.
if (!shuffleStages.forall(_.shuffle.canChangeNumPartitions)) {
plan
} else {
val shuffleMetrics = shuffleStages.map { stage =>
assert(stage.resultOption.isDefined, "ShuffleQueryStageExec should already be ready")
stage.resultOption.get.asInstanceOf[MapOutputStatistics]
}
// `ShuffleQueryStageExec` gives null mapOutputStatistics when the input RDD has 0 partitions,
// we should skip it when calculating the `partitionStartIndices`.
val validMetrics = shuffleMetrics.filter(_ != null)
// We may have different pre-shuffle partition numbers, don't reduce shuffle partition number
// in that case. For example when we union fully aggregated data (data is arranged to a single
// partition) and a result of a SortMergeJoin (multiple partitions).
val distinctNumPreShufflePartitions =
validMetrics.map(stats => stats.bytesByPartitionId.length).distinct
if (validMetrics.nonEmpty && distinctNumPreShufflePartitions.length == 1) {
// We fall back to Spark default parallelism if the minimum number of coalesced partitions
// is not set, so to avoid perf regressions compared to no coalescing.
val minPartitionNum = conf.getConf(SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM)
.getOrElse(session.sparkContext.defaultParallelism)
val partitionSpecs = ShufflePartitionsUtil.coalescePartitions(
validMetrics.toArray,
advisoryTargetSize = conf.getConf(SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES),
minNumPartitions = minPartitionNum)
// This transformation adds new nodes, so we must use `transformUp` here.
val stageIds = shuffleStages.map(_.id).toSet
plan.transformUp {
// even for shuffle exchange whose input RDD has 0 partition, we should still update its
// `partitionStartIndices`, so that all the leaf shuffles in a stage have the same
// number of output partitions.
case stage: ShuffleQueryStageExec if stageIds.contains(stage.id) =>
CustomShuffleReaderExec(stage, partitionSpecs)
}
} else {
plan
}
}
}
}
| matthewfranglen/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/adaptive/CoalesceShufflePartitions.scala | Scala | mit | 4,577 |
/*
* Copyright (c) 2015-2019 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics
package snowplow
package enrich
package common
package adapters
package registry
// Java
import com.fasterxml.jackson.core.JsonParseException
// Scalaz
import scalaz.Scalaz._
// json4s
import org.json4s._
import org.json4s.jackson.JsonMethods._
// Iglu
import com.snowplowanalytics.iglu.client.{Resolver, SchemaKey}
// Joda Time
import org.joda.time.{DateTime, DateTimeZone}
import org.joda.time.format.{DateTimeFormat, DateTimeFormatter}
// This project
import com.snowplowanalytics.snowplow.enrich.common.loaders.CollectorPayload
import com.snowplowanalytics.snowplow.enrich.common.utils.{JsonUtils => JU}
/**
* Transforms a collector payload which conforms to
* a known version of the UrbanAirship Connect API
* into raw events.
*/
object UrbanAirshipAdapter extends Adapter {
// Vendor name for Failure Message
private val VendorName = "UrbanAirship"
// Tracker version for an UrbanAirship Connect API
private val TrackerVersion = "com.urbanairship.connect-v1"
// Schemas for reverse-engineering a Snowplow unstructured event
private val EventSchemaMap = Map(
"CLOSE" -> SchemaKey("com.urbanairship.connect", "CLOSE", "jsonschema", "1-0-0").toSchemaUri,
"CUSTOM" -> SchemaKey("com.urbanairship.connect", "CUSTOM", "jsonschema", "1-0-0").toSchemaUri,
"FIRST_OPEN" -> SchemaKey("com.urbanairship.connect", "FIRST_OPEN", "jsonschema", "1-0-0").toSchemaUri,
"IN_APP_MESSAGE_DISPLAY" -> SchemaKey("com.urbanairship.connect", "IN_APP_MESSAGE_DISPLAY", "jsonschema", "1-0-0").toSchemaUri,
"IN_APP_MESSAGE_EXPIRATION" -> SchemaKey("com.urbanairship.connect",
"IN_APP_MESSAGE_EXPIRATION",
"jsonschema",
"1-0-0").toSchemaUri,
"IN_APP_MESSAGE_RESOLUTION" -> SchemaKey("com.urbanairship.connect",
"IN_APP_MESSAGE_RESOLUTION",
"jsonschema",
"1-0-0").toSchemaUri,
"LOCATION" -> SchemaKey("com.urbanairship.connect", "LOCATION", "jsonschema", "1-0-0").toSchemaUri,
"OPEN" -> SchemaKey("com.urbanairship.connect", "OPEN", "jsonschema", "1-0-0").toSchemaUri,
"PUSH_BODY" -> SchemaKey("com.urbanairship.connect", "PUSH_BODY", "jsonschema", "1-0-0").toSchemaUri,
"REGION" -> SchemaKey("com.urbanairship.connect", "REGION", "jsonschema", "1-0-0").toSchemaUri,
"RICH_DELETE" -> SchemaKey("com.urbanairship.connect", "RICH_DELETE", "jsonschema", "1-0-0").toSchemaUri,
"RICH_DELIVERY" -> SchemaKey("com.urbanairship.connect", "RICH_DELIVERY", "jsonschema", "1-0-0").toSchemaUri,
"RICH_HEAD" -> SchemaKey("com.urbanairship.connect", "RICH_HEAD", "jsonschema", "1-0-0").toSchemaUri,
"SEND" -> SchemaKey("com.urbanairship.connect", "SEND", "jsonschema", "1-0-0").toSchemaUri,
"TAG_CHANGE" -> SchemaKey("com.urbanairship.connect", "TAG_CHANGE", "jsonschema", "1-0-0").toSchemaUri,
"UNINSTALL" -> SchemaKey("com.urbanairship.connect", "UNINSTALL", "jsonschema", "1-0-0").toSchemaUri
)
/**
* Converts payload into a single validated event
* Expects a valid json, returns failure if one is not present
*
* @param body_json json payload as a string
* @param payload other payload details
* @return a validated event - a success will contain the corresponding RawEvent, failures will
* contain a reason for failure
*/
private def payloadBodyToEvent(body_json: String, payload: CollectorPayload): Validated[RawEvent] = {
def toTtmFormat(jsonTimestamp: String) =
"%d".format(new DateTime(jsonTimestamp).getMillis)
try {
val parsed = parse(body_json)
val eventType = (parsed \ "type").extractOpt[String]
val trueTimestamp = (parsed \ "occurred").extractOpt[String]
val eid = (parsed \ "id").extractOpt[String]
val collectorTimestamp = (parsed \ "processed").extractOpt[String]
lookupSchema(eventType, VendorName, EventSchemaMap) map { schema =>
RawEvent(
api = payload.api,
parameters = toUnstructEventParams(TrackerVersion,
toMap(payload.querystring) ++ Map("ttm" -> toTtmFormat(trueTimestamp.get),
"eid" -> eid.get),
schema,
parsed,
"srv"),
contentType = payload.contentType,
source = payload.source,
context = payload.context.copy(timestamp = Some(new DateTime(collectorTimestamp.get, DateTimeZone.UTC)))
)
}
} catch {
case e: JsonParseException => {
val exception = JU.stripInstanceEtc(e.toString).orNull
s"$VendorName event failed to parse into JSON: [$exception]".failNel
}
}
}
/**
* Converts a CollectorPayload instance into raw events.
* A UrbanAirship connect API payload only contains a single event.
* We expect the name parameter to match the supported events, else
* we have an unsupported event type.
*
* @param payload The CollectorPayload containing one or more
* raw events as collected by a Snowplow collector
* @param resolver (implicit) The Iglu resolver used for
* schema lookup and validation. Not used
* @return a Validation boxing either a NEL of RawEvents on
* Success, or a NEL of Failure Strings
*/
def toRawEvents(payload: CollectorPayload)(implicit resolver: Resolver): ValidatedRawEvents =
(payload.body, payload.contentType) match {
case (None, _) => s"Request body is empty: no ${VendorName} event to process".failNel
case (_, Some(ct)) => s"Content type of ${ct} provided, expected None for ${VendorName}".failNel
case (Some(body), _) => {
val event = payloadBodyToEvent(body, payload)
rawEventsListProcessor(List(event))
}
}
}
| RetentionGrid/snowplow | 3-enrich/scala-common-enrich/src/main/scala/com.snowplowanalytics.snowplow.enrich/common/adapters/registry/UrbanAirshipAdapter.scala | Scala | apache-2.0 | 6,930 |
package org.jetbrains.plugins.scala.testingSupport.test.scalatest
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.testingSupport.test.TestConfigurationUtil.isInheritor
import org.jetbrains.plugins.scala.testingSupport.test.scalatest.ScalaTestMigrationUtils.MigrationOps._
/**
* NOTE!!!
* see [[ScalaTestMigrationUtils]] and [[ScalaTestMigrationUtils.MigrationOps.SetOps.withMigrated]]
*/
object ScalaTestUtil {
val itWordFqns: Set[String] = {
val flatSpecItWordFqns = Set(
"org.scalatest.FlatSpecLike.ItWord",
"org.scalatest.FlatSpecLike.ItVerbStringTaggedAs",
"org.scalatest.FlatSpecLike.ItVerbString",
"org.scalatest.fixture.FlatSpecLike.ItWord",
"org.scalatest.fixture.FlatSpecLike.ItVerbStringTaggedAs",
"org.scalatest.fixture.FlatSpecLike.ItVerbString"
)
val asyncFlatSpecItWordFqns = Set(
"org.scalatest.AsyncFlatSpecLike.ItWord",
"org.scalatest.AsyncFlatSpecLike.ItVerbStringTaggedAs",
"org.scalatest.AsyncFlatSpecLike.ItVerbString",
"org.scalatest.fixture.AsyncFlatSpecLike.ItWord",
"org.scalatest.fixture.AsyncFlatSpecLike.ItVerbStringTaggedAs",
"org.scalatest.fixture.AsyncFlatSpecLike.ItVerbString",
)
flatSpecItWordFqns ++ asyncFlatSpecItWordFqns ++ Set(
"org.scalatest.FunSpecLike.ItWord",
"org.scalatest.fixture.FunSpecLike.ItWord",
"org.scalatest.path.FunSpecLike.ItWord",
"org.scalatest.WordSpecLike.ItWord",
"org.scalatest.fixture.WordSpecLike.ItWord",
)
}.withMigrated
val theyWordFqns: Set[String] = {
val flatSpecTheyWordFqns = Set(
"org.scalatest.FlatSpecLike.TheyWord",
"org.scalatest.FlatSpecLike.TheyVerbStringTaggedAs",
"org.scalatest.FlatSpecLike.TheyVerbString",
"org.scalatest.fixture.FlatSpecLike.TheyWord",
"org.scalatest.fixture.FlatSpecLike.TheyVerbStringTaggedAs",
"org.scalatest.fixture.FlatSpecLike.TheyVerbString"
)
val asyncFlatSpecTheyWordFqns = Set(
"org.scalatest.AsyncFlatSpecLike.TheyWord",
"org.scalatest.AsyncFlatSpecLike.TheyVerbStringTaggedAs",
"org.scalatest.AsyncFlatSpecLike.TheyVerbString",
"org.scalatest.fixture.AsyncFlatSpecLike.TheyWord",
"org.scalatest.fixture.AsyncFlatSpecLike.TheyVerbStringTaggedAs",
"org.scalatest.fixture.AsyncFlatSpecLike.TheyVerbString",
)
flatSpecTheyWordFqns ++ asyncFlatSpecTheyWordFqns ++ Set(
"org.scalatest.FunSpecLike.TheyWord",
"org.scalatest.fixture.FunSpecLike.TheyWord",
"org.scalatest.path.FunSpecLike.TheyWord",
"org.scalatest.WordSpecLike.TheyWord",
"org.scalatest.fixture.WordSpecLike.TheyWord",
)
}.withMigrated
val funSuiteBases: List[String] = List(
"org.scalatest.FunSuite",
"org.scalatest.FunSuiteLike",
"org.scalatest.fixture.FunSuite",
"org.scalatest.fixture.FunSuiteLike",
"org.scalatest.fixture.FixtureFunSuite",
"org.scalatest.fixture.MultipleFixtureFunSuite"
).withMigrated
private val featureSpecOldBases = List(
"org.scalatest.FeatureSpec",
"org.scalatest.FeatureSpecLike",
"org.scalatest.fixture.FeatureSpec",
"org.scalatest.fixture.FeatureSpecLike",
"org.scalatest.fixture.FixtureFeatureSpec",
"org.scalatest.fixture.MultipleFixtureFeatureSpec"
)
private val featureSpecNewBases = featureSpecOldBases.migrated
val featureSpecBases: List[String] = featureSpecOldBases ++ featureSpecNewBases
val freeSpecBases: List[String] = List(
"org.scalatest.FreeSpec",
"org.scalatest.FreeSpecLike",
"org.scalatest.fixture.FreeSpec",
"org.scalatest.fixture.FreeSpecLike",
"org.scalatest.fixture.FixtureFreeSpec",
"org.scalatest.fixture.MultipleFixtureFreeSpec",
"org.scalatest.path.FreeSpec",
"org.scalatest.path.FreeSpecLike"
).withMigrated
val JUnit3SuiteBases: List[String] = List(
"org.scalatest.junit.JUnit3Suite"
).withMigrated
val JUnitSuiteBases: List[String] = List(
"org.scalatest.junit.JUnitSuite",
"org.scalatest.junit.JUnitSuiteLike"
).withMigrated
val propSpecBases: List[String] = List(
"org.scalatest.PropSpec",
"org.scalatest.PropSpecLike",
"org.scalatest.fixture.PropSpec",
"org.scalatest.fixture.PropSpecLike",
"org.scalatest.fixture.FixturePropSpec",
"org.scalatest.fixture.MultipleFixturePropSpec"
).withMigrated
val funSpecBasesPre2_0: List[String] = List(
"org.scalatest.Spec",
"org.scalatest.SpecLike",
"org.scalatest.fixture.Spec",
"org.scalatest.fixture.SpecLike",
"org.scalatest.fixture.FixtureSpec",
"org.scalatest.fixture.MultipleFixtureSpec"
).withMigrated
val funSpecBasesPost2_0: List[String] = List(
"org.scalatest.FunSpec",
"org.scalatest.FunSpecLike",
"org.scalatest.fixture.FunSpec",
"org.scalatest.fixture.FunSpecLike",
"org.scalatest.path.FunSpec",
"org.scalatest.path.FunSpecLike"
).withMigrated
val testNGSuiteBases: List[String] = List(
"org.scalatest.testng.TestNGSuite",
"org.scalatest.testng.TestNGSuiteLike"
).withMigrated
val flatSpecBases: List[String] = List(
"org.scalatest.FlatSpec",
"org.scalatest.FlatSpecLike",
"org.scalatest.fixture.FlatSpec",
"org.scalatest.fixture.FlatSpecLike",
"org.scalatest.fixture.FixtureFlatSpec",
"org.scalatest.fixture.MultipleFixtureFlatSpec"
).withMigrated
val wordSpecBases: List[String] = List(
"org.scalatest.WordSpec",
"org.scalatest.WordSpecLike",
"org.scalatest.fixture.WordSpec",
"org.scalatest.fixture.WordSpecLike",
"org.scalatest.fixture.FixtureWordSpec",
"org.scalatest.fixture.MultipleFixtureWordSpec"
).withMigrated
val suitePaths: List[String] = List(
"org.scalatest.Suite"
).withMigrated
def isFeatureSpecOld(typeDef: ScTypeDefinition): Boolean = isInheritor(typeDef, featureSpecOldBases)
def isFeatureSpecNew(typeDef: ScTypeDefinition): Boolean = isInheritor(typeDef, featureSpecNewBases)
def isFlatSpec(typeDef: ScTypeDefinition): Boolean = isInheritor(typeDef, flatSpecBases)
def isFreeSpec(typeDef: ScTypeDefinition): Boolean = isInheritor(typeDef, freeSpecBases)
def isFunSpec(typeDef: ScTypeDefinition): Boolean = isInheritor(typeDef, funSpecBasesPost2_0)
def isFunSuite(typeDef: ScTypeDefinition): Boolean = isInheritor(typeDef, funSuiteBases)
def isPropSpec(typeDef: ScTypeDefinition): Boolean = isInheritor(typeDef, propSpecBases)
def isWorldSpec(typeDef: ScTypeDefinition): Boolean = isInheritor(typeDef, wordSpecBases)
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/testingSupport/test/scalatest/ScalaTestUtil.scala | Scala | apache-2.0 | 6,568 |
object Test {
final class Tag[T]
def foo[Z >: Int <: Int, Y >: Z <: Z, X >: Y <: Y, T]: Tag[T] => T = {
case _ : Tag[X] => 0
}
}
| lampepfl/dotty | tests/pos/i11682.scala | Scala | apache-2.0 | 141 |
package com.datastax.driver.spark.connector
import java.lang.reflect.{Proxy, Method, InvocationHandler}
import com.datastax.driver.core.Session
/** Wraps a `Session` and intercepts `close` method to invoke `afterClose` handler. */
class SessionProxy(session: Session, afterClose: Session => Any) extends InvocationHandler {
private var closed = false
override def invoke(proxy: Any, method: Method, args: Array[AnyRef]) = {
try {
method.invoke(session, args: _*)
}
finally {
if (method.getName == "close" && !closed) {
closed = true
afterClose(session)
}
}
}
}
object SessionProxy {
/** Registers a callback on `Session#close` method.
* @param afterClose code to be invoked after the session has been closed */
def withCloseAction(session: Session)(afterClose: Session => Any): Session =
Proxy.newProxyInstance(
session.getClass.getClassLoader,
Array(classOf[Session]),
new SessionProxy(session, afterClose)).asInstanceOf[Session]
} | bovigny/cassandra-driver-spark | src/main/scala/com/datastax/driver/spark/connector/SessionProxy.scala | Scala | apache-2.0 | 1,024 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.batch.table.validation
import org.apache.flink.api.scala._
import org.apache.flink.table.api.internal.TableEnvironmentImpl
import org.apache.flink.table.api.scala._
import org.apache.flink.table.api.{EnvironmentSettings, ValidationException}
import org.apache.flink.table.planner.runtime.utils.CollectionBatchExecTable
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit._
class SetOperatorsValidationTest extends TableTestBase {
@Test(expected = classOf[ValidationException])
def testUnionDifferentColumnSize(): Unit = {
val util = batchTestUtil()
val ds1 = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
val ds2 = util.addTableSource[(Int, Long, Int, String, Long)]("Table5", 'a, 'b, 'd, 'c, 'e)
// must fail. Union inputs have different column size.
ds1.unionAll(ds2)
}
@Test(expected = classOf[ValidationException])
def testUnionDifferentFieldTypes(): Unit = {
val util = batchTestUtil()
val ds1 = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
val ds2 = util.addTableSource[(Int, Long, Int, String, Long)]("Table5", 'a, 'b, 'c, 'd, 'e)
.select('a, 'b, 'c)
// must fail. Union inputs have different field types.
ds1.unionAll(ds2)
}
@Test(expected = classOf[ValidationException])
def testUnionTablesFromDifferentEnvs(): Unit = {
val settings = EnvironmentSettings.newInstance().inBatchMode().build()
val tEnv1 = TableEnvironmentImpl.create(settings)
val tEnv2 = TableEnvironmentImpl.create(settings)
val ds1 = CollectionBatchExecTable.getSmall3TupleDataSet(tEnv1)
val ds2 = CollectionBatchExecTable.getSmall3TupleDataSet(tEnv2)
// Must fail. Tables are bound to different TableEnvironments.
ds1.unionAll(ds2).select('c)
}
@Test(expected = classOf[ValidationException])
def testMinusDifferentFieldTypes(): Unit = {
val util = batchTestUtil()
val ds1 = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
val ds2 = util.addTableSource[(Int, Long, Int, String, Long)]("Table5", 'a, 'b, 'c, 'd, 'e)
.select('a, 'b, 'c)
// must fail. Minus inputs have different field types.
ds1.minus(ds2)
}
@Test(expected = classOf[ValidationException])
def testMinusAllTablesFromDifferentEnvs(): Unit = {
val settings = EnvironmentSettings.newInstance().inBatchMode().build()
val tEnv1 = TableEnvironmentImpl.create(settings)
val tEnv2 = TableEnvironmentImpl.create(settings)
val ds1 = CollectionBatchExecTable.getSmall3TupleDataSet(tEnv1)
val ds2 = CollectionBatchExecTable.getSmall3TupleDataSet(tEnv2)
// Must fail. Tables are bound to different TableEnvironments.
ds1.minusAll(ds2).select('c)
}
@Test(expected = classOf[ValidationException])
def testIntersectWithDifferentFieldTypes(): Unit = {
val util = batchTestUtil()
val ds1 = util.addTableSource[(Int, Long, String)]("Table3",'a, 'b, 'c)
val ds2 = util.addTableSource[(Int, Long, Int, String, Long)]("Table5", 'a, 'b, 'c, 'd, 'e)
.select('a, 'b, 'c)
// must fail. Intersect inputs have different field types.
ds1.intersect(ds2)
}
@Test(expected = classOf[ValidationException])
def testIntersectTablesFromDifferentEnvs(): Unit = {
val settings = EnvironmentSettings.newInstance().inBatchMode().build()
val tEnv1 = TableEnvironmentImpl.create(settings)
val tEnv2 = TableEnvironmentImpl.create(settings)
val ds1 = CollectionBatchExecTable.getSmall3TupleDataSet(tEnv1)
val ds2 = CollectionBatchExecTable.getSmall3TupleDataSet(tEnv2)
// Must fail. Tables are bound to different TableEnvironments.
ds1.intersect(ds2).select('c)
}
}
| hequn8128/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/batch/table/validation/SetOperatorsValidationTest.scala | Scala | apache-2.0 | 4,527 |
class MotherClass extends MixinWithSymbol {
def foo = Symbol("sym1")
}
object Test {
def main(args: Array[String]): Unit = {
(new MotherClass).symbolFromTrait
}
}
| som-snytt/dotty | tests/run/t8933b/Test.scala | Scala | apache-2.0 | 174 |
package me.laiseca.restcale.methodcall
import scala.reflect.runtime.{universe => ru}
import scala.reflect.runtime.universe._
class TypeTransformer {
def supports(tpe: Type):Boolean = TypeTransformer.TYPE_TRANSFORMERS.contains(tpe)
def transform(stringValue: String, tpe: Type):Option[Any] =
Option.apply(transformValue(stringValue, tpe))
private def transformValue(stringValue: String, tpe: Type):Any = {
val transformer = TypeTransformer.TYPE_TRANSFORMERS.get(tpe)
if(transformer.isDefined) {
try {
(transformer.get)(stringValue)
} catch {
case e@(_:IllegalArgumentException | _:NumberFormatException)
=> throw new IllegalValueException(stringValue, e)
}
} else {
null
}
}
}
private object TypeTransformer {
val TYPE_TRANSFORMERS:Map[Type,(String) => Any] = Map(
ru.typeTag[Byte].tpe -> ((value:String) => value.toByte),
ru.typeTag[Short].tpe -> ((value:String) => value.toShort),
ru.typeTag[Int].tpe -> ((value:String) => value.toInt),
ru.typeTag[Long].tpe -> ((value:String) => value.toLong),
ru.typeTag[Float].tpe -> ((value:String) => value.toFloat),
ru.typeTag[Double].tpe -> ((value:String) => value.toDouble),
ru.typeTag[Char].tpe -> ((value:String) => {
if(value.length() == 1) {
value.charAt(0)
} else {
throw new IllegalArgumentException
}
}),
ru.typeTag[String].tpe -> ((value:String) => value),
ru.typeTag[java.lang.String].tpe -> ((value:String) => value),
ru.typeTag[Boolean].tpe -> ((value:String) => value.toBoolean)
)
} | xabierlaiseca/restcale | core/src/main/scala/me/laiseca/restcale/methodcall/TypeTransformer.scala | Scala | apache-2.0 | 1,611 |
/**
* Copyright (C) 2009-2011 the original author or authors.
* See the notice.md file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.fusesource.scalate
package filter
import org.fusesource.scalate.{TemplateEngine, TemplateEngineAddOn}
import org.fusesource.scalamd.Markdown
/**
* Renders markdown syntax.
*
* @author <a href="http://hiramchirino.com">Hiram Chirino</a>
*/
object ScalaMarkdownFilter extends Filter with TemplateEngineAddOn {
def filter(context: RenderContext, content: String) = {
Markdown.apply(content).stripLineEnd
}
/**
* Add the markdown filter to the template engine.
*/
def apply(te: TemplateEngine) = {
te.filters += "markdown"->ScalaMarkdownFilter
te.pipelines += "md"->List(ScalaMarkdownFilter)
te.pipelines += "markdown"->List(ScalaMarkdownFilter)
}
} | dnatic09/scalate | scalate-core/src/main/scala/org/fusesource/scalate/filter/ScalaMarkdownFilter.scala | Scala | apache-2.0 | 1,436 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalog.v2
import org.apache.spark.SparkException
import org.apache.spark.annotation.Experimental
@Experimental
class CatalogNotFoundException(message: String, cause: Throwable)
extends SparkException(message, cause) {
def this(message: String) = this(message, null)
}
| aosagie/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalog/v2/CatalogNotFoundException.scala | Scala | apache-2.0 | 1,107 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import kafka.utils.Logging
class KafkaServerStartable(val serverConfig: KafkaConfig) extends Logging {
private var server : KafkaServer = null
init
private def init() {
server = new KafkaServer(serverConfig)
}
def startup() {
try {
server.startup()
}
catch {
case e =>
fatal("Fatal error during KafkaServerStable startup. Prepare to shutdown", e)
shutdown()
}
}
def shutdown() {
try {
server.shutdown()
}
catch {
case e =>
fatal("Fatal error during KafkaServerStable shutdown. Prepare to halt", e)
Runtime.getRuntime.halt(1)
}
}
def awaitShutdown() {
server.awaitShutdown
}
}
| piavlo/operations-debs-kafka | core/src/main/scala/kafka/server/KafkaServerStartable.scala | Scala | apache-2.0 | 1,526 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.calculations.IntangibleAssetsCalculator
import uk.gov.hmrc.ct.accounts.frs102.retriever.{Frs102AccountsBoxRetriever, FullAccountsBoxRetriever}
import uk.gov.hmrc.ct.box._
case class AC121A(value: Option[Int]) extends CtBoxIdentifier(name = "Intangible assets - Goodwill - Amortisation - Amortisation at [POA END]")
with CtOptionalInteger
with Input
with ValidatableBox[Frs102AccountsBoxRetriever]
with Validators {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value, min = 0)
)
}
}
object AC121A extends Calculated[AC121A, FullAccountsBoxRetriever]
with IntangibleAssetsCalculator {
override def calculate(boxRetriever: FullAccountsBoxRetriever): AC121A = {
import boxRetriever._
calculateAC121A(ac118A(), ac119A(), ac120A(), ac211A())
}
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC121A.scala | Scala | apache-2.0 | 1,545 |
package tabula
import Tabula._
import shapeless._
import shapeless.ops.hlist._
/** A column that has a name, most commonly produced by calling the
* `[[Column.@@]]` method on a column.
*/
class NamedColumn[F, T, C, Col](val name: Cell[String], val underlying: Col)(
implicit ev: Col <:< Column[F, T, C])
extends Column[F, T, C](underlying.f)(underlying.cz, underlying.mf)
with ColFun[F, T, C]
object NamedColumn {
def names(cols: List[Column[_, _, _]]): List[Option[String]] =
cols.flatMap {
case list: ListColumn[_, _, _, _] => names(list.underlying)
case named: NamedColumn[_, _, _, _] =>
named.underlying match {
case list: ListColumn[_, _, _, _] => names(list.underlying)
case _ => named.name.value :: Nil
}
case namer: Namer => Some(namer.simpleName) :: Nil
case _ => None :: Nil
}
def names[F, T, C, NcT <: HList, Col](cols: Col :: NcT)(
implicit ev: Col <:< Column[F, T, C],
tl: ToList[Col :: NcT, Column[_, _, _]]): List[Option[String]] =
names(cols.toList[Column[_, _, _]])
}
| maxaf/tabula | core/src/main/scala/tabula/NamedColumn.scala | Scala | mit | 1,092 |
package scalasthlm.jms
import javax.jms.ConnectionFactory
import akka.NotUsed
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.alpakka.jms.JmsSinkSettings
import akka.stream.alpakka.jms.scaladsl.JmsSink
import akka.stream.scaladsl.{Sink, Source}
class BaseTrait {
implicit val actorSystem = ActorSystem()
implicit val actorMaterializer = ActorMaterializer()
implicit val executionContext = actorSystem.dispatcher
def wait(seconds: Int) = Thread.sleep(seconds * 1000)
}
| ScalaSthlm/alpakka-integration-patterns | module-template/src/main/scala/scalasthlm/template/BaseTrait.scala | Scala | apache-2.0 | 518 |
package me.yingrui.segment.core
import org.junit.Assert._
import org.junit.Test
class SegmentResultTest {
@Test
def should_return_original_index_of_word {
val segmentResult = SegmentWorker(
"segment.lang.en" -> "true",
"segment.lang.en.stemming" -> "true").segment("我们,He loves us!")
println(segmentResult)
assertEquals(0, segmentResult.getWordStartAt(0))
assertEquals(2, segmentResult.getWordStartAt(1))
assertEquals(3, segmentResult.getWordStartAt(2))
assertEquals(6, segmentResult.getWordStartAt(3))
assertEquals(12, segmentResult.getWordStartAt(4))
assertEquals(14, segmentResult.getWordStartAt(5))
assertEquals(2, segmentResult.getWordEndAt(0))
assertEquals(3, segmentResult.getWordEndAt(1))
assertEquals(5, segmentResult.getWordEndAt(2))
assertEquals(11, segmentResult.getWordEndAt(3))
assertEquals(14, segmentResult.getWordEndAt(4))
assertEquals(15, segmentResult.getWordEndAt(5))
}
}
| yingrui/mahjong | lib-segment/src/test/scala/me/yingrui/segment/core/SegmentResultTest.scala | Scala | gpl-3.0 | 978 |
package com.emotioncity.soriento
import com.orientechnologies.orient.core.command.OCommandRequest
import com.orientechnologies.orient.core.db.ODatabaseRecordThreadLocal
import com.orientechnologies.orient.core.db.document.{ODatabaseDocument, ODatabaseDocumentTx}
import com.orientechnologies.orient.core.record.impl.ODocument
import com.orientechnologies.orient.core.sql.OCommandSQL
import com.orientechnologies.orient.core.sql.query.OSQLSynchQuery
import scala.collection.JavaConversions._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.{ExecutionContext, Future}
/**
* Created by stream on 31.03.15.
*/
object RichODatabaseDocumentImpl {
implicit class RichODatabaseDocumentTx(db: ODatabaseDocument) {
def queryBySql(sql: String): List[ODocument] = {
val results: java.util.List[ODocument] = db.query(new OSQLSynchQuery[ODocument](sql))
results.toList
}
def queryBySql[T](query: String)(implicit reader: ODocumentReader[T]): List[T] = {
val results: java.util.List[ODocument] = db.query(new OSQLSynchQuery[ODocument](query))
results.toList.map(document => reader.read(document))
}
def command(query: String) = {
db.command[OCommandRequest](new OCommandSQL(query)).execute() //type annotation of return?
}
def queryDoc(sql: String): List[ODocument] = {
val results: java.util.List[ODocument] = db.query(new OSQLSynchQuery[ODocument](sql))
results.toList
}
protected def asyncCall[T](x: ODatabaseDocumentTx => T): Future[T] = {
val instance = ODatabaseRecordThreadLocal.INSTANCE.get
Future {
val internalDb = instance.asInstanceOf[ODatabaseDocumentTx].copy
try {
x(internalDb)
} finally {
if (internalDb != null) {
internalDb.close()
}
}
}
}
def asyncQueryBySql(sql: String): Future[List[ODocument]] = asyncCall { internalDb =>
val results: java.util.List[ODocument] = internalDb.query(new OSQLSynchQuery[ODocument](sql))
results.toList
}
def asyncQueryBySql[T](query: String)(implicit reader: ODocumentReader[T]): Future[List[T]] = asyncCall { internalDb =>
val results: java.util.List[ODocument] = internalDb.query(new OSQLSynchQuery[ODocument](query))
results.toList.map(document => reader.read(document))
}
}
}
| b0c1/Soriento | src/main/scala/com/emotioncity/soriento/RichODatabaseDocumentImpl.scala | Scala | apache-2.0 | 2,384 |
package scalan.it
import scalan._
import scalan.compilation.{GraphVizConfig, Compiler}
import scalan.util.FileUtil
import scalan.util.FileUtil.file
// extracted so it can be used with different suite styles
trait ItTestsUtil[Prog <: Scalan] extends TestsUtil {
override def testOutDir = "it-out"
// can be overridden
def defaultGraphVizConfig = GraphVizConfig.default
type ProgCompiler = Compiler[_ <: Prog with ScalanDslExp]
class CompilerWithConfig private (val compiler: ProgCompiler)(_config: ProgCompiler#CompilerConfig) {
def config = _config.asInstanceOf[compiler.CompilerConfig]
}
object CompilerWithConfig {
def apply(compiler: ProgCompiler)(config: compiler.CompilerConfig) = new CompilerWithConfig(compiler)(config)
}
implicit def compilerWithDefaultConfig(compiler: ProgCompiler): CompilerWithConfig =
CompilerWithConfig(compiler)(compiler.defaultCompilerConfig)
def cwc(compiler: ProgCompiler)(compilerConfig: compiler.CompilerConfig) =
CompilerWithConfig(compiler)(compilerConfig)
def defaultCompilers: Seq[CompilerWithConfig]
val progStd: Prog with ScalanStd
/** Utility method to be used when defining [[defaultCompilers]]. */
def compilers(cs: CompilerWithConfig*) = cs
private def sourceDir(functionName: String) =
file(prefix, functionName)
// gives each compiler a subfolder of sourceDir(functionName) to make them unique
private def compilersWithSourceDirs(compilers: Seq[CompilerWithConfig], functionName: String, deleteBaseDir: Boolean = true) = {
val baseDir = sourceDir(functionName)
if (deleteBaseDir) {
FileUtil.deleteIfExist(baseDir)
}
compilers match {
case Seq(onlyCompiler) =>
Seq(onlyCompiler -> baseDir)
case _ => compilers.zipWithIndex.map {
case (cwc, index) => (cwc, file(baseDir, s"${index + 1}_${cwc.compiler.name}"))
}
}
}
def assertFileContentCheck(name: String): Unit =
FileUtil.read(file(prefix, name)) should be(FileUtil.read(file(prefix, name + ".check")))
def buildGraphs[A, B](f: Prog => Prog#Rep[A => B],
compilers: Seq[CompilerWithConfig] = defaultCompilers,
graphVizConfig: GraphVizConfig = defaultGraphVizConfig,
functionName: String = currentTestNameAsFileName) =
compilersWithSourceDirs(compilers, functionName).foreach { case (cwc, dir) =>
cwc.compiler.buildGraph(dir, functionName,
f(cwc.compiler.scalan).asInstanceOf[cwc.compiler.Exp[A => B]], graphVizConfig)(cwc.config)
}
def compileSource[A, B](f: Prog => Prog#Rep[A => B],
compilers: Seq[CompilerWithConfig] = defaultCompilers,
graphVizConfig: GraphVizConfig = defaultGraphVizConfig,
functionName: String = currentTestNameAsFileName) =
compilersWithSourceDirs(compilers, functionName).map { case (cwc, dir) =>
cwc.compiler.buildExecutable(dir, functionName,
f(cwc.compiler.scalan).asInstanceOf[cwc.compiler.Exp[A => B]], graphVizConfig)(cwc.config)
}
def getStagedOutput[A, B](f: Prog => Prog#Rep[A => B],
compilers: Seq[CompilerWithConfig] = defaultCompilers,
graphVizConfig: GraphVizConfig = defaultGraphVizConfig,
functionName: String = currentTestNameAsFileName)(inputs: A*) = {
val compiled = compilersWithSourceDirs(compilers, functionName).map { case (cwc, dir) =>
val out = cwc.compiler.buildExecutable(dir, functionName,
f(cwc.compiler.scalan).asInstanceOf[cwc.compiler.Exp[A => B]],
graphVizConfig)(cwc.config)
(cwc.compiler, out)
}
inputs.map { input =>
compiled.map { case (compiler, out) =>
compiler.execute(out.asInstanceOf[compiler.CompilerOutput[A, B]], input)
}
}
}
def compareOutputWithStd[A, B](f: Prog => Prog#Rep[A => B],
compilers: Seq[CompilerWithConfig] = defaultCompilers,
graphVizConfig: GraphVizConfig = defaultGraphVizConfig,
functionName: String = currentTestNameAsFileName)(inputs: A*) = {
val fStd = f(progStd).asInstanceOf[A => B]
val expectedOutputs = inputs.map { x => (x, fStd(x)) }
compareOutputWithExpected(f, compilers, graphVizConfig, functionName)(expectedOutputs: _*)
}
def compareOutputWithExpected[A, B](f: Prog => Prog#Rep[A => B],
compilers: Seq[CompilerWithConfig] = defaultCompilers,
graphVizConfig: GraphVizConfig = defaultGraphVizConfig,
functionName: String = currentTestNameAsFileName)
(expectedOutputs: (A, B)*) = {
val compiled = compilersWithSourceDirs(compilers, functionName).map { case (cwc, dir) =>
val out = cwc.compiler.buildExecutable(dir, functionName,
f(cwc.compiler.scalan).asInstanceOf[cwc.compiler.Exp[A => B]], graphVizConfig)(cwc.config)
(cwc.compiler, out)
}
for {
(input, expected) <- expectedOutputs
(compiler, out_) <- compiled
} {
val out = out_.asInstanceOf[compiler.CompilerOutput[A, B]]
val output = compiler.execute(out, input)
assert(expected === output, s"Compiler: $compiler,\\n input: $input,\\n expected: $expected,\\n got: $output")
}
}
// Note: deprecated API will be removed before next release (0.2.11 or 0.3.0)
final class GetStagedOutput[S <: Scalan, Back <: Compiler[S with ScalanDslExp]](val back: Back) {
def apply[A, B](f: S => S#Rep[A => B], functionName: String, input: A, compilerConfig: back.CompilerConfig = back.defaultCompilerConfig): B = {
val compiled = compileSource[S](back)(f, functionName, compilerConfig)
back.execute(compiled, input)
}
@deprecated("Use the overload taking f: S => S#Rep[A => B] instead", "0.2.10")
def apply[A, B](f: back.scalan.Exp[A => B], functionName: String, input: A): B =
getStagedOutputConfig(back)(f, functionName, input, back.defaultCompilerConfig)
}
// TODO still used in LmsMSTItTests
// @deprecated("Use overload taking compilers instead", "0.2.11")
def getStagedOutput[S <: Scalan](back: Compiler[S with ScalanDslExp]) = new GetStagedOutput[S, back.type](back)
@deprecated("Use getStagedOutput with f: S => S#Rep[A => B] instead", "0.2.10")
def getStagedOutputConfig[A, B](back: Compiler[_ <: ScalanDslExp])(f: back.scalan.Exp[A => B], functionName: String, input: A, compilerConfig: back.CompilerConfig): B = {
val compiled = compileSource(back)(f, functionName, compilerConfig)
back.execute(compiled, input)
}
final class CompileSource[S <: Scalan, Back <: Compiler[S with ScalanDslExp]](val back: Back) {
def apply[A, B](f: S => S#Rep[A => B], functionName: String, compilerConfig: back.CompilerConfig = back.defaultCompilerConfig): back.CompilerOutput[A, B] = {
back.buildExecutable(sourceDir(functionName), functionName, f(back.scalan).asInstanceOf[back.Exp[A => B]], defaultGraphVizConfig)(compilerConfig)
}
@deprecated("Use the overload taking f: S => S#Rep[A => B] instead", "0.2.10")
def apply[A, B](f: back.scalan.Exp[A => B], functionName: String, compilerConfig: back.CompilerConfig) : back.CompilerOutput[A, B] = {
back.buildExecutable(sourceDir(functionName), functionName, f, defaultGraphVizConfig)(compilerConfig)
}
}
// TODO still used in UniCompilerItTests
// @deprecated("Use overload taking compilers instead", "0.2.11")
def compileSource[S <: Scalan](back: Compiler[S with ScalanDslExp]) = new CompileSource[S, back.type](back)
implicit def defaultComparator[A](expected: A, actual: A): Unit = {
actual should equal(expected)
}
@deprecated("Use overload taking compilers instead", "0.2.11")
final class CompareOutputWithSequential[S <: Scalan, Back <: Compiler[S with ScalanDslExp]](val back: Back, forth: S with ScalanDslStd) {
def apply[A, B](f: S => S#Rep[A => B], functionName: String, input: A, compilerConfig: back.CompilerConfig = back.defaultCompilerConfig)
(implicit comparator: (B, B) => Unit) = {
compareOutputWithExpected[S](back)(f(forth).asInstanceOf[A => B](input), f, functionName, input, compilerConfig)
}
}
@deprecated("Use overload taking compilers instead", "0.2.11")
def compareOutputWithStd[S <: Scalan](back: Compiler[S with ScalanDslExp], forth: S with ScalanDslStd) = new CompareOutputWithSequential[S, back.type](back, forth)
@deprecated("Use the overload taking f: S => S#Rep[A => B] instead", "0.2.10")
def compareOutputWithStd[A, B](back: Compiler[_ <: ScalanDslExp])
(fSeq: A => B, f: back.scalan.Exp[A => B], functionName: String, input: A)
(implicit comparator: (B, B) => Unit) {
compareOutputWithSequentialConfig(back)(fSeq, f, functionName, input, back.defaultCompilerConfig)
}
@deprecated("Use compareOutputWithSequential with f: S => S#Rep[A => B] instead", "0.2.10")
def compareOutputWithSequentialConfig[A, B](back: Compiler[_ <: ScalanDslExp])
(fSeq: A => B, f: back.scalan.Exp[A => B], functionName: String, input: A, config: back.CompilerConfig)
(implicit comparator: (B, B) => Unit) {
compareOutputWithExpectedConfig(back)(fSeq(input), f, functionName, input, config)
}
@deprecated("Use overload taking compilers instead", "0.2.11")
final class CompareOutputWithExpected[S <: Scalan, Back <: Compiler[S with ScalanDslExp]](val back: Back) {
def apply[A, B](expected: B, f: S => S#Rep[A => B], functionName: String, input: A, compilerConfig: back.CompilerConfig = back.defaultCompilerConfig)
(implicit comparator: (B, B) => Unit) = {
val actual = getStagedOutput[S](back)(f, functionName, input, compilerConfig)
comparator(expected, actual)
}
@deprecated("Use the overload taking f: S => S#Rep[A => B] instead", "0.2.10")
def apply[A, B](expected: B, f: back.scalan.Exp[A => B], functionName: String, input: A)
(implicit comparator: (B, B) => Unit) = {
compareOutputWithExpectedConfig(back)(expected, f, functionName, input, back.defaultCompilerConfig)
}
}
@deprecated("Use overload taking compilers instead", "0.2.11")
def compareOutputWithExpected[S <: Scalan](back: Compiler[S with ScalanDslExp]) = new CompareOutputWithExpected[S, back.type](back)
@deprecated("Use compareOutputWithExpected with f: S => S#Rep[A => B] instead", "0.2.10")
def compareOutputWithExpectedConfig[A, B](back: Compiler[_ <: ScalanDslExp])
(expected: B, f: back.scalan.Exp[A => B], functionName: String, input: A, config: back.CompilerConfig)
(implicit comparator: (B, B) => Unit) {
val actual = getStagedOutputConfig(back)(f, functionName, input, config)
comparator(expected, actual)
}
def untyped[S <: Scalan](f: S => S#Rep[_ => _]) = f.asInstanceOf[S => S#Rep[Any => Any]]
}
| scalan/scalan | core/src/test/scala/scalan/it/ItTestsUtil.scala | Scala | apache-2.0 | 11,222 |
package co.ledger.wallet.core.wallet.ripple.api
import co.ledger.wallet.core.device.utils.{EventEmitter, EventReceiver}
import co.ledger.wallet.core.net.{WebSocket, WebSocketFactory}
import co.ledger.wallet.core.wallet.ripple.XRP
import co.ledger.wallet.core.wallet.ripple.events.{NewBlock, NewTransaction}
import co.ledger.wallet.web.ripple.core.event.JsEventEmitter
import co.ledger.wallet.web.ripple.core.utils.ChromeGlobalPreferences
import co.ledger.wallet.web.ripple.wallet.RippleLibApi.LedgerEvent
import co.ledger.wallet.web.ripple.wallet.RippleWalletClient
import exceptions.{DisconnectedException, MissingTagException, RippleException}
import io.circe.JsonObject
import org.json.{JSONArray, JSONObject}
import org.scalajs.dom
import org.scalajs.dom.{CloseEvent, ErrorEvent}
import scala.collection.mutable.ArrayBuffer
import scala.scalajs.js.timers
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.scalajs.js
import scala.util.{Failure, Success, Try}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.scalajs.js.timers.{SetTimeoutHandle, clearTimeout, setTimeout}
import co.ledger.wallet.web.ripple.sentry.SentryManager
import co.ledger.wallet.web.ripple.logz.LogzManager
/**
* Created by alix on 5/2/17.
*/
class WebSocketRipple(factory: WebSocketFactory,
addresses: Array[String],
wallet: RippleWalletClient) {
import WebSocketRipple._
def start(): Unit = {
if (_socket.isEmpty) {
connect()
}
}
var connecting = Promise[Unit]()
var connected = false
val emmiter = new JsEventEmitter
private var _ws: Option[WebSocket] = None
js.Dynamic.global.gui.Window.get().on("close", () => {
try {
if(isRunning && _ws.isDefined){
_ws.get.close()
_socket = None
}
} finally {
js.Dynamic.global.gui.Window.get().close(true)
}
})
private def connect(): Unit = {
println("connecting socket")
_socket = Some(factory.connect(""))
_socket.get onComplete {
case Success(ws) =>
println("success socket")
_ws = Some(ws)
ws.onJsonMessage(onMessage _)
val subscribeMessage = js.Dynamic.literal(
command = "subscribe",
accounts = js.Array(addresses(0))) //TODO: change in case of multi account
send(subscribeMessage) map {(msg) =>
if (!connected) {
println("Subscribed")
connected = true
connecting.success()
}
}
ws onClose { (ex) =>
println("close websocket: "+ex.getMessage())
LogzManager.log("Web socket closed: "+ex.getMessage())
connecting = Promise[Unit]()
connected = false
emmiter.emit(WebSocketDisconnectedEvent())
if (isRunning)
connect()
}
ws onError { (ex) =>
println("websocket error: "+ex.getMessage())
SentryManager.log("[Websocket error]: "+ex.getMessage())
}
case Failure(ex) =>
println("failure websocket"+ex.getMessage())
ex.printStackTrace()
SentryManager.log("[Failure connecting] "+ex.getMessage())
if (isRunning)
connect()
}
}
var promisesTable: Map[Int,Promise[JSONObject]] = Map.empty
def onMessage(msg: JSONObject): Unit = {
println("received",msg.toString.substring(0,400))
if (msg.has("id")) {
val callId = msg.getInt("id")
val p = promisesTable.get(callId).get
promisesTable -= callId
if (!p.isCompleted){
p success msg
}
}
if (msg.optString("type", "") == "transaction" && msg.optBoolean("validated", false) && msg.optString("engine_result", "") == "tesSUCCESS") {
if (msg.optJSONObject("transaction").optString("Account", addresses(0)) != addresses(0)) {
wallet.synchronize()
}
}
if (msg.optString("type","") == "transaction" && msg.optBoolean("validated", false) && msg.optJSONObject("transaction") != null) {
if (msg.optJSONObject("transaction").optString("Account", "") == addresses(0)) {
msg.optJSONObject("meta").optString("TransactionResult", "") match {
case "tesSUCCESS" => {
println("transactions received")
emmiter.emit(WebSocketTransactionSentEvent(msg.optJSONObject("transaction").optString("TxnSignature", "")))
}
case other => {
println("error with tx")
emmiter.emit(WebSocketErrorEvent(other, msg.optJSONObject("transaction").optString("TxnSignature", "")))
}
}
}
}
}
/*private def onMessage(json: JSONObject): Unit = {
println(json.toString.substring(0,200))
if (json.optString("type", "") == "transaction" && json.optBoolean("validated", false) && json.optString("engine_result", "") == "tesSUCCESS") {
setTimeout(2000) {
wallet.synchronize()
}
}
if (json.optString("type","") == "response" && json.optString("status", "") == "success" ) {
if (json.optJSONObject("result").has("account_data")) {
if (json.optJSONObject("result").getJSONObject("account_data").optString("Account","") == addresses(0)) {
println("balance received")
emmiter.emit(WebSocketResponseEvent("balance", json))
}
}
}
if (json.toString == "{\\"result\\":{},\\"status\\":\\"success\\",\\"type\\":\\"response\\"}") {
if (!connected) {
println("Subscribed")
connected = true
connecting.success()
}
}
if (json.optString("type","") == "transaction" && json.optBoolean("validated", false) && json.optJSONObject("transaction") != null) {
if (json.optJSONObject("transaction").optString("Account", "") == addresses(0) &&
json.optJSONObject("meta").optString("TransactionResult", "") == "tesSUCCESS") {
println("transactions received")
emmiter.emit(WebSocketTransactionSentEvent(json.optJSONObject("transaction").optString("TxnSignature", "")))
}
}
if (json.optString("type","") == "transaction" && json.optBoolean("validated", false) && json.optJSONObject("transaction") != null) {
if (json.optJSONObject("transaction").optString("Account", "") == addresses(0) &&
json.optJSONObject("meta").optString("TransactionResult", "") == "tecDST_TAG_NEEDED") {
emmiter.emit(WebSocketErrorEvent("tecDST_TAG_NEEDED", json.optJSONObject("transaction").optString("TxnSignature", "")))
}
}
if (json.optString("type","") == "response" && json.optString("status", "") == "success" ) {
if (json.optJSONObject("result").optString("account","") == addresses(0) &&
json.optJSONObject("result").has("transactions")) {
println("transactions received")
println(json.getJSONObject("result").getJSONArray("transactions").length())
emmiter.emit(WebSocketResponseEvent("transactions", json))
}
}
}*/
private var callCounter=0
private def _callId = {
callCounter+=1
callCounter
}
def send(json: js.Dynamic) = {
val callId = _callId +10
val p = Promise[JSONObject]()
json.updateDynamic("id")(callId)
promisesTable += (callId->p)
println("Sending", js.JSON.stringify(json))
_ws.get.send(js.JSON.stringify(json))
setTimeout(2000) {
if (!p.isCompleted) {
p failure(RippleException())
}
}
p.future
}
def stop(): Unit = {
_socket foreach {(s) =>
_socket = None
s.foreach(_.close())
}
}
def balance(account: String = ""): Future[XRP] = {
if (!connected) {
Future.failed(DisconnectedException())
} else {
var target = account
if (account == ""){
target = addresses(0)
}
val balance = js.Dynamic.literal(
command = "account_info",
account = target)
println("target",target)
send(balance) map {(msg) =>
if (msg.optString("status","error")=="success"){
XRP(msg.optJSONObject("result").getJSONObject("account_data").optString("Balance", ""))
} else {
XRP.Zero
}
}
}
}
def fee(): Future[XRP] = {
if (!connected) {
Future.failed(DisconnectedException())
} else {
val fee = js.Dynamic.literal(
command = "fee" )
send(fee) map {(msg) =>
if (msg.optString("status","error")=="success"){
XRP(msg.optJSONObject("result").getJSONObject("drops").optString("base_fee", "10"))
} else {
XRP(10)
}
}
}
}
def transactions(ledger_min: Long = 0): Future[Array[JsonTransaction]] = {
if (!connected) {
Future.failed(DisconnectedException())
} else {
var offset: Int = 0
var transactionsBuffer = ArrayBuffer[JsonTransaction]()
def iterate(off: Int = 0): Future[Array[JsonTransaction]] = {
val txs = js.Dynamic.literal(
command = "account_tx",
account = addresses(0),
ledger_index_min = ledger_min,
forward = true,
offset = off
)
send(txs) flatMap {(json) =>
println("length received",json.getJSONObject("result").getJSONArray("transactions").length())
if (json.getJSONObject("result").getJSONArray("transactions").length() > 0) {
val txs = json.getJSONObject("result").getJSONArray("transactions")
(0 until txs.length()) map { (index: Int) =>
if (txs.getJSONObject(index).getJSONObject("meta").getString("TransactionResult") == "tesSUCCESS") {
transactionsBuffer.append(new JsonTransaction(txs.getJSONObject(index)))
println(transactionsBuffer.last)
}
}
println("buffer length",transactionsBuffer.length)
offset = offset + json.getJSONObject("result").getJSONArray("transactions").length()
iterate(offset)
} else {
Future.successful(transactionsBuffer.toArray)
}
}
}
iterate()
}
}
def isRunning = _socket.isDefined
private var _socket: Option[Future[WebSocket]] = None
}
object WebSocketRipple {
case class WebSocketDisconnectedEvent()
case class WebSocketErrorEvent(name: String, data: String)
case class WebSocketTransactionSentEvent(txn: String)
case class WebSocketTransactionReceivedEvent(txn: String, tx: JSONObject)
case class WebSocketResponseEvent(name: String, response: JSONObject)
}
| LedgerHQ/ledger-wallet-ripple | src/main/scala/co/ledger/wallet/core/wallet/ripple/api/WebSocketRipple.scala | Scala | mit | 10,478 |
package BIDMach.models
import BIDMat.{Mat,SBMat,CMat,DMat,FMat,IMat,HMat,GMat,GIMat,GSMat,SMat,SDMat}
import BIDMat.MatFunctions._
import BIDMat.SciFunctions._
import BIDMach.datasources._
import BIDMach.updaters._
import BIDMach._
import java.text.NumberFormat
import edu.berkeley.bid.CUMACH._
import scala.collection.mutable._
/**
* A Bayesian Network implementation with fast parallel Gibbs Sampling (e.g., for MOOC data).
*
* Haoyu Chen and Daniel Seita are building off of Huasha Zhao's original code.
*
* The input needs to be (1) a graph, (2) a sparse data matrix, and (3) a states-per-node file.
* Make sure the dag and states files are aligned, and that variables are in a topological ordering!
*/
class BayesNet(val dag:Mat,
val states:Mat,
override val opts:BayesNet.Opts = new BayesNet.Options) extends Model(opts) {
var mm:Mat = null // Local copy of the cpt
var cptOffset:Mat = null // For global variable offsets
var graph:Graph = null // Data structure representing the DAG
var iproject:Mat = null // Local CPT offset matrix
var pproject:Mat = null // Parent tracking matrix
var statesPerNode:Mat = null // Variables can have an arbitrary number of states
var colorInfo:Array[ColorGroup] = null // Gives us, for each color, a colorStuff class (of arrays)
var zeroMap:HashMap[(Int,Int),Mat] = null // Map from (nr,nc) -> a zero matrix (to avoid allocation)
var randMap:HashMap[(Int,Int),Mat] = null // Map from (nr,nc) -> a rand matrix (to avoid allocation)
var normConstMatrix:Mat = null // Normalizes the cpt. Do cpt / (cpt.t * nConstMat).t
var useGPUnow:Boolean = false
var batchSize:Int = -1
var counts:Mat = null
/**
* Performs a series of initialization steps.
*
* - Builds iproject/pproject for local offsets and computing probabilities, respectively.
* - For each color group, determine some necessary matrices for uupdate later.
* - Build the CPT, which is actually counts, not probabilities. I initialize it randomly.
*
* Note that the randomization of the input data to be put back in the data is done in uupdate.
*/
override def init() = {
useGPUnow = opts.useGPU && (Mat.hasCUDA > 0)
// Establish the states per node, the (colored) Graph data structure, and its projection matrices.
statesPerNode = IMat(states)
graph = new Graph(dag, opts.dim, statesPerNode)
graph.color
iproject = if (useGPUnow) GSMat((graph.iproject).t) else (graph.iproject).t
pproject = if (useGPUnow) GSMat(graph.pproject) else graph.pproject
// Build the CPT. For now, it stores counts, and to avoid div-by-zero errors, initialize randomly.
val numSlotsInCpt = IMat(exp(ln(FMat(statesPerNode).t) * SMat(pproject)) + 1e-4)
cptOffset = izeros(graph.n, 1)
cptOffset(1 until graph.n) = cumsum(numSlotsInCpt)(0 until graph.n-1)
cptOffset = convertMat(cptOffset)
val lengthCPT = sum(numSlotsInCpt).dv.toInt
val cpt = convertMat(rand(lengthCPT,1))
// To finish building CPT, we normalize it based on the batch size and normalizing constant matrix.
normConstMatrix = getNormConstMatrix(lengthCPT)
cpt <-- ( cpt / (cpt.t * normConstMatrix).t )
val mats = datasource.next
cpt <-- (cpt * mats(0).ncols)
datasource.reset
setmodelmats(new Array[Mat](1))
modelmats(0) = cpt
mm = modelmats(0)
updatemats = new Array[Mat](1)
updatemats(0) = mm.zeros(mm.nrows, mm.ncols)
// For each color group, pre-compute most relevant matrices we need later (this does a lot!)
colorInfo = new Array[ColorGroup](graph.ncolors)
for (c <- 0 until graph.ncolors) {
colorInfo(c) = computeAllColorGroupInfo(c)
}
zeroMap = new HashMap[(Int,Int),Mat]()
randMap = new HashMap[(Int,Int),Mat]()
// Finally, create/convert a few matrices, reset some variables, and add some debugging info
counts = mm.izeros(mm.length, 1)
statesPerNode = convertMat(statesPerNode)
batchSize = -1
}
/** Calls a uupdate/mupdate sequence. Known data is in gmats(0), sampled data is in gmats(1). */
override def dobatch(gmats:Array[Mat], ipass:Int, here:Long) = {
uupdate(gmats(0), gmats(1), ipass)
mupdate(gmats(0), gmats(1), ipass)
}
/** Calls a uupdate/evalfun sequence. Known data is in gmats(0), sampled data is in gmats(1). */
override def evalbatch(gmats:Array[Mat], ipass:Int, here:Long):FMat = {
uupdate(gmats(0), gmats(1), ipass)
evalfun(gmats(0), gmats(1))
}
/**
* Computes an update for the conditional probability table by sampling each variable once (for now).
*
* In the first ipass, it randomizes the user matrix except for those values are already known from
* sdata. It also establishes various matrices to be put in the colorInfo array or the hash maps (for
* caching purposes). For each data batch, it iterates through color groups and samples in parallel.
*
* @param sdata The sparse data matrix for this batch (0s = unknowns), which the user matrix shifts by -1.
* @param user A data matrix with the same dimensions as sdata, and whose columns represent various iid
* assignments to all the variables. The known values of sdata are inserted in the same spots in this
* matrix, but the unknown values are randomized to be in {0,1,...,k}.
* @param ipass The current pass over the full data source (not the Gibbs sampling iteration number).
*/
def uupdate(sdata:Mat, user:Mat, ipass:Int):Unit = {
var numGibbsIterations = opts.samplingRate
// For the first pass, we need to create a lot of matrices that rely on knowledge of the batch size.
if (ipass == 0) {
numGibbsIterations = numGibbsIterations + opts.numSamplesBurn
establishMatrices(sdata.ncols)
val state = convertMat(rand(sdata.nrows, sdata.ncols))
state <-- float( min( int(statesPerNode ∘ state), statesPerNode-1 ) ) // Need an extra float() outside
val data = full(sdata)
val select = data > 0
user ~ (select ∘ (data-1)) + ((1-select) ∘ state)
}
val usertrans = user.t
for (k <- 0 until numGibbsIterations) {
for (c <- 0 until graph.ncolors) {
// Prepare our data by establishing the appropriate offset matrices for the entire CPT blocks
usertrans(?, colorInfo(c).idsInColor) = zeroMap( (usertrans.nrows, colorInfo(c).numNodes) )
val offsetMatrix = usertrans * colorInfo(c).iprojectSliced + (colorInfo(c).globalOffsetVector).t
val replicatedOffsetMatrix = int(offsetMatrix * colorInfo(c).replicationMatrix) + colorInfo(c).strideVector
val logProbs = ln(mm(replicatedOffsetMatrix))
val nonExponentiatedProbs = (logProbs * colorInfo(c).combinationMatrix).t
// Establish matrices needed for the multinomial sampling
val keys = if (user.ncols == batchSize) colorInfo(c).keysMatrix else colorInfo(c).keysMatrixLast
val bkeys = if (user.ncols == batchSize) colorInfo(c).bkeysMatrix else colorInfo(c).bkeysMatrixLast
val bkeysOff = if (user.ncols == batchSize) colorInfo(c).bkeysOffsets else colorInfo(c).bkeysOffsetsLast
val randIndices = if (user.ncols == batchSize) colorInfo(c).randMatrixIndices else colorInfo(c).randMatrixIndicesLast
val sampleIndices = if (user.ncols == batchSize) colorInfo(c).sampleIDindices else colorInfo(c).sampleIDindicesLast
// Parallel multinomial sampling. Check the colorInfo matrices since they contain a lot of info.
val maxInGroup = cummaxByKey(nonExponentiatedProbs, keys)(bkeys)
val probs = exp(nonExponentiatedProbs - maxInGroup)
val cumprobs = cumsumByKey(probs, keys)
val normedProbs = cumprobs / cumprobs(bkeys)
// With cumulative probabilities set up in normedProbs matrix, create a random matrix and sample
val randMatrix = randMap( (colorInfo(c).numNodes, usertrans.nrows) )
rand(randMatrix)
randMatrix <-- randMatrix * 0.99999f
val lessThan = normedProbs < randMatrix(randIndices)
val sampleIDs = cumsumByKey(lessThan, keys)(sampleIndices)
usertrans(?, colorInfo(c).idsInColor) = sampleIDs.t
// After we finish sampling with this color group, we override the known values.
val data = full(sdata)
val select = data > 0
usertrans ~ (select *@ (data-1)).t + ((1-select) *@ usertrans.t).t
}
}
user <-- usertrans.t
}
/**
* After one set of Gibbs sampling iterations, we put the sampled counts in the updatemats(0)
* value so that it gets "averaged into" the cpt (mm or modelmats(0)), from the IncNorm updater.
* Also, if the uupdate call involved more than one Gibbs sampling iterations, then the mupdate
* effectively "thins" the sampler by only taking results from every n^th^ sample.
*
* @param sdata The sparse data matrix for this batch (0s = unknowns), which we do not use here.
* @param user A data matrix with the same dimensions as sdata, and whose columns represent various
* iid assignments to all the variables. The known values of sdata are inserted in the same spots
* in this matrix, but the unknown values are randomized to be in {0,1,...,k}.
* @param ipass The current pass over the full data source (not the Gibbs sampling iteration number).
*/
def mupdate(sdata:Mat, user:Mat, ipass:Int):Unit = {
val index = int(cptOffset + (user.t * iproject).t)
val linearIndices = index(?)
counts <-- accum(linearIndices, 1, counts.length, 1)
updatemats(0) <-- (float(counts) + opts.alpha)
}
/**
* Evaluates the log-likelihood of the data (per column, or per full assignment of all variables).
* First, we get the index matrix, which indexes into the CPT for each column's variable assignment.
* Then, using the normalized CPT, we find the log probabilities of the user matrix, and sum
* vertically (i.e., each variable, valid due to derived rules) and then horizontally (i.e., each
* sample, which can do since we assume i.i.d.).
*/
def evalfun(sdata:Mat, user:Mat):FMat = {
val index = int(cptOffset + (user.t * iproject).t)
val cptNormalized = mm / (mm.t * normConstMatrix).t
val result = FMat( sum(sum(ln(cptNormalized(index)),1),2) ) / user.ncols
return result
}
// -----------------------------------
// Various debugging or helper methods
// -----------------------------------
/**
* Determines a variety of information for this color group, and stores it in a ColorGroup object.
* First, it establishes some basic information from each color group. Then it computes the more
* complicated replication matrices, stride vectors, and combination matrices. Check the colorInfo
* class for details on what the individual matrices represent.
*
* @param c The integer index of the given color group.
*/
def computeAllColorGroupInfo(c:Int) : ColorGroup = {
val cg = new ColorGroup
cg.idsInColor = find(IMat(graph.colors) == c)
cg.numNodes = cg.idsInColor.length
cg.chIdsInColor = find(FMat(sum(SMat(pproject)(cg.idsInColor,?),1)))
cg.numNodesCh = cg.chIdsInColor.length
cg.iprojectSliced = SMat(iproject)(?,cg.chIdsInColor)
cg.globalOffsetVector = convertMat(FMat(cptOffset(cg.chIdsInColor))) // Need FMat to avoid GMat+GIMat
val startingIndices = izeros(cg.numNodes,1)
startingIndices(1 until cg.numNodes) = cumsum(IMat(statesPerNode(cg.idsInColor)))(0 until cg.numNodes-1)
cg.startingIndices = convertMat(startingIndices)
// Gather useful information for determining the replication, stride, and combination matrices
var ncols = 0
val numOnes = izeros(1,cg.numNodesCh) // Determine how many 1s to have
val strideFactors = izeros(1,cg.numNodesCh) // Get stride factors for the stride vector
val parentOf = izeros(1,cg.numNodesCh) // Get index of parent (or itself) in idsInColor
val fullIproject = full(iproject)
for (i <- 0 until cg.numNodesCh) {
var nodeIndex = cg.chIdsInColor(i).dv.toInt
if (IMat(cg.idsInColor).data.contains(nodeIndex)) { // This node is in the color group
numOnes(i) = statesPerNode(nodeIndex)
ncols = ncols + statesPerNode(nodeIndex).dv.toInt
strideFactors(i) = 1
parentOf(i) = IMat(cg.idsInColor).data.indexOf(nodeIndex)
} else { // This node is a child of a node in the color group
val parentIndices = find( FMat( sum(SMat(pproject)(?,nodeIndex),2) ) )
var parentIndex = -1
var k = 0
while (parentIndex == -1 && k < parentIndices.length) {
if (IMat(cg.idsInColor).data.contains(parentIndices(k))) {
parentIndex = parentIndices(k)
parentOf(i) = IMat(cg.idsInColor).data.indexOf(parentIndices(k))
}
k = k + 1
}
if (parentIndex == -1) {
throw new RuntimeException("Node at index " +nodeIndex+ " is missing a parent in its color group.")
}
numOnes(i) = statesPerNode(parentIndex)
ncols = ncols + statesPerNode(parentIndex).dv.toInt
strideFactors(i) = fullIproject(parentIndex,IMat(nodeIndex)).dv.toInt
}
}
// Form the replication (the dim is (#-of-ch_id-variables x ncols)) and stride matrices
var col = 0
val strideVector = izeros(1, ncols)
val ii = izeros(ncols, 1)
for (i <- 0 until cg.numNodesCh) {
val num = numOnes(i)
ii(col until col+num) = i
strideVector(col until col+num) = (0 until num)*strideFactors(i)
col = col + num
}
val jj = icol(0 until ncols)
val vv = ones(ncols, 1)
cg.strideVector = convertMat(strideVector)
cg.replicationMatrix = if (useGPUnow) GSMat(sparse(ii,jj,vv)) else sparse(ii,jj,vv)
// Form keys and ikeys vectors
val numStatesIds = statesPerNode(cg.idsInColor)
val ncolsCombo = sum(numStatesIds).dv.toInt
val keys = izeros(1, ncolsCombo)
val scaledKeys = izeros(1, ncolsCombo)
val ikeys = izeros(1, cg.numNodes)
var keyIndex = 0
for (i <- 0 until cg.numNodes) {
val nodeIndex = cg.idsInColor(i)
val numStates = statesPerNode(nodeIndex).dv.toInt
keys(keyIndex until keyIndex+numStates) = nodeIndex * iones(1,numStates)
scaledKeys(keyIndex until keyIndex+numStates) = i * iones(1,numStates)
keyIndex += numStates
ikeys(i) = keyIndex-1
}
cg.scaledKeys = convertMat(scaledKeys)
cg.keys = convertMat(keys)
cg.ikeys = convertMat(ikeys)
cg.bkeys = cg.ikeys(cg.scaledKeys)
// Form the combination matrix (# of rows is # of columns of replication matrix)
val indicesColumns = izeros(1,cg.numNodes)
indicesColumns(1 until cg.numNodes) = cumsum(numStatesIds.asInstanceOf[IMat])(0 until cg.numNodes-1)
val nrowsCombo = ncols
val indicesRows = izeros(1,cg.numNodesCh)
indicesRows(1 until cg.numNodesCh) = cumsum(numOnes)(0 until numOnes.length-1)
val iii = izeros(nrowsCombo,1)
val jjj = izeros(nrowsCombo,1)
val vvv = ones(nrowsCombo,1)
for (i <- 0 until cg.numNodesCh) {
val p = parentOf(i) // Index into the node itself or its parent if it isn't in the color group
iii(indicesRows(i) until indicesRows(i)+numOnes(i)) = indicesRows(i) until indicesRows(i)+numOnes(i)
jjj(indicesRows(i) until indicesRows(i)+numOnes(i)) = indicesColumns(p) until indicesColumns(p)+numOnes(i)
}
cg.combinationMatrix = if (useGPUnow) {
GSMat(sparse(iii,jjj,vvv,nrowsCombo,ncolsCombo))
} else {
sparse(iii,jjj,vvv,nrowsCombo,ncolsCombo)
}
cg.idsInColor = convertMat(cg.idsInColor)
cg.chIdsInColor = convertMat(cg.chIdsInColor)
if (useGPUnow) {
cg.iprojectSliced = GSMat(cg.iprojectSliced.asInstanceOf[SMat])
}
return cg
}
/**
* Called during the first pass over the data to set up matrices for later. These matrices are
* used in future uupdate calls, and they depend on the batch size, hence why we can only form
* these during the pass over the data, and not in init().
*
* There are several types of matrices we create:
*
* - zero matrices to put in zeroMap, for clearing out usertrans
* - "rand" matries to put in randMap, for containers to randomize values during sampling
* - five colorInfo(c) matrices for the purposes of sampling
*
* In the very likely case that the last batch does not have the same number of columns as the
* first n-1 batches, then we need to repeat this process for that batch.
*
* @param ncols The number of columns in the current data, or the batch size.
*/
def establishMatrices(ncols:Int) = {
if (batchSize == -1) { // Only true if we're on the first mini-batch of ipass = 0.
batchSize = ncols
val onesVector = mm.ones(1, ncols)
val untilVector = convertMat( float(0 until ncols) )
for (c <- 0 until graph.ncolors) {
val numVars = colorInfo(c).numNodes
val randOffsets = int(untilVector * numVars)
zeroMap += ((ncols,numVars) -> mm.zeros(ncols,numVars))
randMap += ((numVars,ncols) -> mm.zeros(numVars,ncols))
colorInfo(c).keysMatrix = (colorInfo(c).keys).t * onesVector
colorInfo(c).bkeysOffsets = int(untilVector * colorInfo(c).keys.ncols)
colorInfo(c).bkeysMatrix = int(colorInfo(c).bkeys.t * onesVector) + colorInfo(c).bkeysOffsets
colorInfo(c).randMatrixIndices = int((colorInfo(c).scaledKeys).t * onesVector) + randOffsets
colorInfo(c).sampleIDindices = int((colorInfo(c).ikeys).t * onesVector) + colorInfo(c).bkeysOffsets
}
}
else if (ncols != batchSize) { // On the last batch of ipass = 0 w/different # of columns
val onesVectorLast = mm.ones(1, ncols)
val untilVectorLast = convertMat( float(0 until ncols) )
for (c <- 0 until graph.ncolors) {
val numVars = colorInfo(c).numNodes
val randOffsets = int(untilVectorLast * numVars)
zeroMap += ((ncols,numVars) -> mm.zeros(ncols,numVars))
randMap += ((numVars,ncols) -> mm.zeros(numVars,ncols))
colorInfo(c).keysMatrixLast = (colorInfo(c).keys).t * onesVectorLast
colorInfo(c).bkeysOffsetsLast = int(untilVectorLast * colorInfo(c).keys.ncols)
colorInfo(c).bkeysMatrixLast = int(colorInfo(c).bkeys.t * onesVectorLast) + colorInfo(c).bkeysOffsetsLast
colorInfo(c).randMatrixIndicesLast = int((colorInfo(c).scaledKeys).t * onesVectorLast) + randOffsets
colorInfo(c).sampleIDindicesLast = int((colorInfo(c).ikeys).t * onesVectorLast) + colorInfo(c).bkeysOffsetsLast
}
}
}
/**
* Creates normalizing matrix N that we can then multiply with the CPT to get a column vector
* of the same length as the CPT but such that it has normalized probabilties, not counts.
*
* Specific usage: our CPT is a column vector of counts. To normalize and get probabilities, use
* CPT / (CPT.t * N).t
*
* Alternatively, one could avoid those two transposes by making CPT a row vector, but since the
* code assumes it's a column vector, it makes sense to maintain that convention.
*/
def getNormConstMatrix(cptLength : Int) : Mat = {
var ii = izeros(1,1)
var jj = izeros(1,1)
for (i <- 0 until graph.n-1) {
var offset = IMat(cptOffset)(i)
val endOffset = IMat(cptOffset)(i+1)
val ns = statesPerNode(i).dv.toInt
var indices = find2(ones(ns,ns))
while (offset < endOffset) {
ii = ii on (indices._1 + offset)
jj = jj on (indices._2 + offset)
offset = offset + ns
}
}
var offsetLast = IMat(cptOffset)(graph.n-1)
var indices = find2(ones(statesPerNode.asInstanceOf[IMat](graph.n-1), statesPerNode.asInstanceOf[IMat](graph.n-1)))
while (offsetLast < cptLength) {
ii = ii on (indices._1 + offsetLast)
jj = jj on (indices._2 + offsetLast)
offsetLast = offsetLast + statesPerNode.asInstanceOf[IMat](graph.n-1)
}
val res = sparse(ii(1 until ii.length), jj(1 until jj.length), ones(ii.length-1,1), cptLength, cptLength)
if (useGPUnow) { // Note that here we have to transpose!
return GSMat(res.t)
} else {
return res.t
}
}
/** A debugging method to print matrices, without being constrained by the command line's cropping. */
def printMatrix(mat: Mat) = {
for(i <- 0 until mat.nrows) {
for (j <- 0 until mat.ncols) {
print(mat(IMat(i),IMat(j)) + " ")
}
println()
}
}
}
/**
* There are three things the BayesNet needs as input:
*
* - A states per node array. Each value needs to be an integer that is at least two.
* - A DAG array, in which column i represents node i and its parents.
* - A sparse data matrix, where 0 indicates an unknown element, and rows are variables.
*
* That's it. Other settings, such as the number of Gibbs iterations, are set in "opts".
*/
object BayesNet {
trait Opts extends Model.Opts {
var alpha = 0.1f
var samplingRate = 1
var numSamplesBurn = 0
}
class Options extends Opts {}
/**
* A learner with a matrix data source, with states per node, and with a dag prepared. Call this
* using (assuming proper names):
*
* val (nn,opts) = BayesNet.learner(loadIMat("states.lz4"), loadSMat("dag.lz4"), loadSMat("sdata.lz4"))
*/
def learner(statesPerNode:Mat, dag:Mat, data:Mat) = {
class xopts extends Learner.Options with BayesNet.Opts with MatDS.Opts with IncNorm.Opts
val opts = new xopts
opts.dim = dag.ncols
opts.batchSize = math.min(100000, data.ncols/20 + 1)
opts.useGPU = false
opts.npasses = 2
opts.isprob = false // Our CPT should NOT be normalized across their (one) column.
opts.putBack = 1 // Because this stores samples across ipasses, as required by Gibbs sampling
val secondMatrix = data.zeros(data.nrows,data.ncols)
val nn = new Learner(
new MatDS(Array(data:Mat, secondMatrix), opts),
new BayesNet(SMat(dag), statesPerNode, opts),
null,
new IncNorm(opts),
opts)
(nn, opts)
}
}
/**
* A graph structure for Bayesian Networks. Includes features for:
*
* (1) moralizing graphs, 'moral' matrix must be (i,j) = 1 means node i is connected to node j
* (2) coloring moralized graphs, not sure why there is a maxColor here, though...
*
* @param dag An adjacency matrix with a 1 at (i,j) if node i has an edge TOWARDS node j.
* @param n The number of vertices in the graph.
* @param statesPerNode A column vector where elements denote number of states for corresponding variables.
*/
class Graph(val dag: Mat, val n: Int, val statesPerNode: Mat) {
var mrf: Mat = null
var colors: Mat = null
var ncolors = 0
val maxColor = 100
/**
* Connects the parents of a certain node, a single step in the process of moralizing the graph.
*
* Iterates through the parent indices and insert 1s in the 'moral' matrix to indicate an edge.
*
* @param moral A matrix that represents an adjacency matrix "in progress" in the sense that it
* is continually getting updated each iteration from the "moralize" method.
* @param parents An array representing the parent indices of the node of interest.
*/
def connectParents(moral: FMat, parents: IMat) = {
val l = parents.length
for (i <- 0 until l) {
for (j <- 0 until l) {
if (parents(i) != parents(j)) {
moral(parents(i), parents(j)) = 1f
}
}
}
moral
}
/** Forms the pproject matrix (dag + identity) used for computing model parameters. */
def pproject : SMat = {
return SMat(dag) + sparse(IMat(0 until n), IMat(0 until n), ones(1, n))
}
/**
* Forms the iproject matrix, which is left-multiplied to send a Pr(X_i | parents) query to its
* appropriate spot in the cpt via LOCAL offsets for X_i.
*/
def iproject : SMat = {
var res = (pproject.copy).t
for (i <- 0 until n) {
val parents = find(SMat(pproject(?, i)))
var cumRes = 1f
val parentsLen = parents.length
for (j <- 1 until parentsLen) {
cumRes = cumRes * IMat(statesPerNode)(parents(parentsLen - j))
res.asInstanceOf[SMat](i, parents(parentsLen - j - 1)) = cumRes
}
}
return SMat(res)
}
/**
* Moralize the graph.
*
* This means we convert the graph from directed to undirected and connect parents of nodes in
* the directed graph. First, copy the dag to the moral graph because all 1s in the dag matrix
* are 1s in the moral matrix (these are adjacency matrices). For each node, find its parents,
* connect them, and update the matrix. Then make it symmetric because the graph is undirected.
*/
def moralize = {
var moral = full(dag)
for (i <- 0 until n) {
var parents = find(SMat(dag(?, i)))
moral = connectParents(FMat(moral), parents)
}
mrf = ((moral + moral.t) > 0)
}
/**
* Sequentially colors the moralized graph of the dag so that one can run parallel Gibbs sampling.
*
* Steps: first, moralize the graph. Then iterate through each node, find its neighbors, and apply a
* "color mask" to ensure current node doesn't have any of those colors. Then find the legal color
* with least count (a useful heuristic). If that's not possible, then increase "ncolor".
*/
def color = {
moralize
var colorCount = izeros(maxColor, 1)
colors = -1 * iones(n, 1)
ncolors = 0
// Access nodes sequentially. Find the color map of its neighbors, then find the legal color w/least count
val seq = IMat(0 until n)
// Can also access nodes randomly
// val r = rand(n, 1); val (v, seq) = sort2(r)
for (i <- 0 until n) {
var node = seq(i)
var nbs = find(FMat(mrf(?, node)))
var colorMap = iones(ncolors, 1)
for (j <- 0 until nbs.length) {
if (colors(nbs(j)).dv.toInt > -1) {
colorMap(colors(nbs(j))) = 0
}
}
var c = -1
var minc = 999999
for (k <- 0 until ncolors) {
if ((colorMap(k) > 0) && (colorCount(k) < minc)) {
c = k
minc = colorCount(k)
}
}
if (c == -1) {
c = ncolors
ncolors = ncolors + 1
}
colors(node) = c
colorCount(c) += 1
}
colors
}
}
/**
* This will store a lot of pre-computed variables (mostly matrices) for each color group.
*
* A high-level description of the categories:
*
* - numNodes and numNodesCh are the number of nodes, and the number of nodes and children
* in this color group, respectively.
* - idsInColor and chIdsInColor are indices of the variables in this color group, and in
* this color group plus children of those nodes, respectively.
* - replicationMatrix is a sparse matrix of rows of ones, used to replicate columns
* - strideVector is a vector where groups are (0 until k)*stride(x) where k is determined
* by the node or its parent, and stride(x) is 1 if the node is in the color group.
* - combinationMatrix is a sparse identity matrix that combines parents with children for
* probability computations
* - keys, scaledKeys, ikeys, and bkeys are for the purposes of
* - the remaining ten (!) matrices rely on knowledge
*/
class ColorGroup {
var numNodes:Int = -1
var numNodesCh:Int = -1
var idsInColor:Mat = null
var chIdsInColor:Mat = null
var globalOffsetVector:Mat = null
var iprojectSliced:Mat = null
var startingIndices:Mat = null
var replicationMatrix:Mat = null
var strideVector:Mat = null
var combinationMatrix:Mat = null
var keys:Mat = null
var scaledKeys:Mat = null
var ikeys:Mat = null
var bkeys:Mat = null
var keysMatrix:Mat = null
var keysMatrixLast:Mat = null
var bkeysMatrix:Mat = null
var bkeysMatrixLast:Mat = null
var bkeysOffsets:Mat = null
var bkeysOffsetsLast:Mat = null
var sampleIDindices:Mat = null
var sampleIDindicesLast:Mat = null
var randMatrixIndices:Mat = null
var randMatrixIndicesLast:Mat = null
}
| ypkang/BIDMach | src/main/scala/BIDMach/models/BayesNet.scala | Scala | bsd-3-clause | 28,204 |
package controllers.s_employment
import play.api.Play._
import scala.language.reflectiveCalls
import play.api.mvc.Controller
import play.api.data.Form
import play.api.data.Forms._
import models.view.{Navigable, CachedClaim}
import models.domain.{BeenEmployed, PensionAndExpenses}
import utils.helpers.CarersForm._
import Employment._
import utils.helpers.PastPresentLabelHelper._
import controllers.mappings.Mappings._
import play.api.data.FormError
import controllers.CarersForms._
import models.yesNo.YesNoWithText
import play.api.i18n._
object GPensionAndExpenses extends Controller with CachedClaim with Navigable with I18nSupport {
override val messagesApi: MessagesApi = current.injector.instanceOf[MMessages]
val payPensionScheme =
"payPensionScheme" -> mapping (
"answer" -> nonEmptyText.verifying(validYesNo),
"text" -> optional(carersText(minLength=1, maxLength = 300))
)(YesNoWithText.apply)(YesNoWithText.unapply)
.verifying("payPensionScheme.text.required", YesNoWithText.validateOnYes _)
val haveExpensesForJob =
"haveExpensesForJob" -> mapping (
"answer" -> nonEmptyText.verifying(validYesNo),
"text" -> optional(carersText(minLength=1, maxLength = 300))
)(YesNoWithText.apply)(YesNoWithText.unapply)
.verifying("haveExpensesForJob.text.required", YesNoWithText.validateOnYes _)
val payForThings =
"payForThings" -> mapping (
"answer" -> nonEmptyText.verifying(validYesNo),
"text" -> optional(carersText(minLength=1, maxLength = 300))
)(YesNoWithText.apply)(YesNoWithText.unapply)
.verifying("payForThings.text.required", YesNoWithText.validateOnYes _)
val form = Form(mapping(
"iterationID" -> nonEmptyText,
payPensionScheme,
payForThings,
haveExpensesForJob
)(PensionAndExpenses.apply)(PensionAndExpenses.unapply))
def present(iterationID: String) = claimingWithCheck { implicit claim => implicit request => implicit request2lang =>
track(PensionAndExpenses) { implicit claim => Ok(views.html.s_employment.g_pensionAndExpenses(form.fillWithJobID(PensionAndExpenses, iterationID))) }
}
def submit = claimingWithCheckInIteration { iterationID => implicit claim => implicit request => implicit request2lang =>
form.bindEncrypted.fold(
formWithErrors => {
val formWithErrorsUpdate = formWithErrors
.replaceError("payPensionScheme.answer",errorRequired,FormError("payPensionScheme.answer",errorRequired, Seq(labelForEmployment(claim, request2lang, "payPensionScheme.answer", iterationID))))
.replaceError("payPensionScheme","payPensionScheme.text.required",FormError("payPensionScheme.text",errorRequired, Seq(labelForEmployment(claim, request2lang, "payPensionScheme.text", iterationID))))
.replaceError("payPensionScheme","payPensionScheme.text.maxLength",FormError("payPensionScheme.text",maxLengthError, Seq(labelForEmployment(claim, request2lang, "payPensionScheme.text", iterationID))))
.replaceError("payPensionScheme.text",errorRestrictedCharacters,FormError("payPensionScheme.text",errorRestrictedCharacters, Seq(labelForEmployment(claim, request2lang, "payPensionScheme.text", iterationID))))
.replaceError("payForThings.answer",errorRequired,FormError("payForThings.answer",errorRequired, Seq(labelForEmployment(claim, request2lang, "payForThings.answer", iterationID))))
.replaceError("payForThings","payForThings.text.required",FormError("payForThings.text",errorRequired, Seq(labelForEmployment(claim, request2lang, "payForThings.text", iterationID))))
.replaceError("payForThings","payForThings.text.maxLength",FormError("payForThings.text",maxLengthError, Seq(labelForEmployment(claim, request2lang, "payForThings.text", iterationID))))
.replaceError("payForThings.text",errorRestrictedCharacters,FormError("payForThings.text",errorRestrictedCharacters, Seq(labelForEmployment(claim, request2lang, "payForThings.text", iterationID))))
.replaceError("haveExpensesForJob.answer",errorRequired,FormError("haveExpensesForJob.answer",errorRequired, Seq(labelForEmployment(claim, request2lang, "haveExpensesForJob.answer", iterationID))))
.replaceError("haveExpensesForJob","haveExpensesForJob.text.required",FormError("haveExpensesForJob.text",errorRequired, Seq(labelForEmployment(claim, request2lang, "haveExpensesForJob.text", iterationID))))
.replaceError("haveExpensesForJob","haveExpensesForJob.text..maxLength",FormError("haveExpensesForJob.text",maxLengthError, Seq(labelForEmployment(claim, request2lang, "haveExpensesForJob.text", iterationID))))
.replaceError("haveExpensesForJob.text",errorRestrictedCharacters,FormError("haveExpensesForJob.text",errorRestrictedCharacters, Seq(labelForEmployment(claim, request2lang, "haveExpensesForJob.text", iterationID))))
BadRequest(views.html.s_employment.g_pensionAndExpenses(formWithErrorsUpdate))
},
// Must delete the BeenEmployed question group so it doesn't prepopulate the
// question 'Have you had any more employments...'
aboutExpenses => claim.update(jobs.update(aboutExpenses).completeJob(iterationID)).delete(BeenEmployed) -> Redirect(routes.GBeenEmployed.present()))
}
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/app/controllers/s_employment/GPensionAndExpenses.scala | Scala | mit | 5,260 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.parquet.thrift
import cascading.scheme.Scheme
import com.twitter.scalding._
import com.twitter.scalding.parquet.{
StrictColumnProjectionString,
DeprecatedColumnProjectionString,
HasColumnProjection,
HasFilterPredicate,
ParquetValueScheme
}
import com.twitter.scalding.source.{ DailySuffixSource, HourlySuffixSource }
import java.io.Serializable
import org.apache.thrift.{ TBase, TFieldIdEnum }
import scala.reflect.ClassTag
object ParquetThrift extends Serializable {
type ThriftBase = TBase[_ <: TBase[_, _], _ <: TFieldIdEnum]
}
trait ParquetThriftBase[T] extends LocalTapSource with HasFilterPredicate with HasColumnProjection {
implicit def ct: ClassTag[T]
def config: ParquetValueScheme.Config[T] = {
val clazz = ct.runtimeClass.asInstanceOf[Class[T]]
val config = new ParquetValueScheme.Config[T].withRecordClass(clazz)
val configWithFp = withFilter match {
case Some(fp) => config.withFilterPredicate(fp)
case None => config
}
val configWithProjection = columnProjectionString match {
case Some(s @ DeprecatedColumnProjectionString(_)) => configWithFp.withProjectionString(s.asSemicolonString)
case Some(s @ StrictColumnProjectionString(_)) => configWithFp.withStrictProjectionString(s.asSemicolonString)
case None => configWithFp
}
configWithProjection
}
}
trait ParquetThriftBaseFileSource[T] extends FileSource with ParquetThriftBase[T] with SingleMappable[T] with TypedSink[T] {
override def setter[U <: T] = TupleSetter.asSubSetter[T, U](TupleSetter.singleSetter[T])
}
trait ParquetThrift[T <: ParquetThrift.ThriftBase] extends ParquetThriftBaseFileSource[T] {
override def hdfsScheme = {
// See docs in Parquet346TBaseScheme
val scheme = new Parquet346TBaseScheme[T](this.config)
HadoopSchemeInstance(scheme.asInstanceOf[Scheme[_, _, _, _, _]])
}
}
/**
* When Using these sources or creating subclasses of them, you can
* provide a filter predicate and / or a set of fields (columns) to keep (project).
*
* The filter predicate will be pushed down to the input format, potentially
* making the filter significantly more efficient than a filter applied to
* a TypedPipe (parquet push-down filters can skip reading entire chunks of data off disk).
*
* For data with a large schema (many fields / columns), providing the set of columns
* you intend to use can also make your job significantly more efficient (parquet column projection
* push-down will skip reading unused columns from disk).
* The columns are specified in the format described here:
* https://github.com/apache/parquet-mr/blob/master/parquet_cascading.md#21-projection-pushdown-with-thriftscrooge-records
*
* These settings are defined in the traits [[com.twitter.scalding.parquet.HasFilterPredicate]]
* and [[com.twitter.scalding.parquet.HasColumnProjection]]
*
* Here are two ways you can use these in a parquet source:
*
* {{{
* class MyParquetSource(dr: DateRange) extends DailySuffixParquetThrift("/a/path", dr)
*
* val mySourceFilteredAndProjected = new MyParquetSource(dr) {
* override val withFilter: Option[FilterPredicate] = Some(myFp)
* override val withColumnProjections: Set[String] = Set("a.b.c", "x.y")
* }
* }}}
*
* The other way is to add these as constructor arguments:
*
* {{{
* class MyParquetSource(
* dr: DateRange,
* override val withFilter: Option[FilterPredicate] = None
* override val withColumnProjections: Set[String] = Set()
* ) extends DailySuffixParquetThrift("/a/path", dr)
*
* val mySourceFilteredAndProjected = new MyParquetSource(dr, Some(myFp), Set("a.b.c", "x.y"))
* }}}
*/
class DailySuffixParquetThrift[T <: ParquetThrift.ThriftBase](
path: String,
dateRange: DateRange)(implicit override val ct: ClassTag[T])
extends DailySuffixSource(path, dateRange) with ParquetThrift[T]
class HourlySuffixParquetThrift[T <: ParquetThrift.ThriftBase](
path: String,
dateRange: DateRange)(implicit override val ct: ClassTag[T])
extends HourlySuffixSource(path, dateRange) with ParquetThrift[T]
class FixedPathParquetThrift[T <: ParquetThrift.ThriftBase](paths: String*)(implicit override val ct: ClassTag[T])
extends FixedPathSource(paths: _*) with ParquetThrift[T]
| tresata/scalding | scalding-parquet/src/main/scala/com/twitter/scalding/parquet/thrift/ParquetThrift.scala | Scala | apache-2.0 | 4,826 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO2
package com.google.protobuf.descriptor
/** Describes the relationship between generated code and its original source
* file. A GeneratedCodeInfo message is associated with only one generated
* source file, but may contain references to different source .proto files.
*
* @param annotation
* An Annotation connects some span of text in generated code to an element
* of its generating .proto file.
*/
@SerialVersionUID(0L)
final case class GeneratedCodeInfo(
annotation: _root_.scala.Seq[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation] = _root_.scala.Seq.empty,
unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty
) extends scalapb.GeneratedMessage with scalapb.lenses.Updatable[GeneratedCodeInfo] {
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
annotation.foreach { __item =>
val __value = __item
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
__size += unknownFields.serializedSize
__size
}
override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
annotation.foreach { __v =>
val __m = __v
_output__.writeTag(1, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
unknownFields.writeTo(_output__)
}
def clearAnnotation = copy(annotation = _root_.scala.Seq.empty)
def addAnnotation(__vs: com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation*): GeneratedCodeInfo = addAllAnnotation(__vs)
def addAllAnnotation(__vs: Iterable[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]): GeneratedCodeInfo = copy(annotation = annotation ++ __vs)
def withAnnotation(__v: _root_.scala.Seq[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]): GeneratedCodeInfo = copy(annotation = __v)
def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v)
def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => annotation
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => _root_.scalapb.descriptors.PRepeated(annotation.iterator.map(_.toPMessage).toVector)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.descriptor.GeneratedCodeInfo
}
object GeneratedCodeInfo extends scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.GeneratedCodeInfo] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.GeneratedCodeInfo] = this
def merge(`_message__`: com.google.protobuf.descriptor.GeneratedCodeInfo, `_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.descriptor.GeneratedCodeInfo = {
val __annotation = (_root_.scala.collection.immutable.Vector.newBuilder[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation] ++= `_message__`.annotation)
var `_unknownFields__`: _root_.scalapb.UnknownFieldSet.Builder = null
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 10 =>
__annotation += _root_.scalapb.LiteParser.readMessage(_input__, com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation.defaultInstance)
case tag =>
if (_unknownFields__ == null) {
_unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder(_message__.unknownFields)
}
_unknownFields__.parseField(tag, _input__)
}
}
com.google.protobuf.descriptor.GeneratedCodeInfo(
annotation = __annotation.result(),
unknownFields = if (_unknownFields__ == null) _message__.unknownFields else _unknownFields__.result()
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.descriptor.GeneratedCodeInfo] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage eq scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.descriptor.GeneratedCodeInfo(
annotation = __fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).map(_.as[_root_.scala.Seq[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]]).getOrElse(_root_.scala.Seq.empty)
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = DescriptorProtoCompanion.javaDescriptor.getMessageTypes().get(20)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = DescriptorProtoCompanion.scalaDescriptor.messages(20)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = {
var __out: _root_.scalapb.GeneratedMessageCompanion[_] = null
(__number: @_root_.scala.unchecked) match {
case 1 => __out = com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation
}
__out
}
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] =
Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]](
_root_.com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation
)
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.google.protobuf.descriptor.GeneratedCodeInfo(
annotation = _root_.scala.Seq.empty
)
/** @param path
* Identifies the element in the original source .proto file. This field
* is formatted the same as SourceCodeInfo.Location.path.
* @param sourceFile
* Identifies the filesystem path to the original source .proto.
* @param begin
* Identifies the starting offset in bytes in the generated code
* that relates to the identified object.
* @param end
* Identifies the ending offset in bytes in the generated code that
* relates to the identified offset. The end offset should be one past
* the last relevant byte (so the length of the text = end - begin).
*/
@SerialVersionUID(0L)
final case class Annotation(
path: _root_.scala.Seq[_root_.scala.Int] = _root_.scala.Seq.empty,
sourceFile: _root_.scala.Option[_root_.scala.Predef.String] = _root_.scala.None,
begin: _root_.scala.Option[_root_.scala.Int] = _root_.scala.None,
end: _root_.scala.Option[_root_.scala.Int] = _root_.scala.None,
unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet.empty
) extends scalapb.GeneratedMessage with scalapb.lenses.Updatable[Annotation] {
private[this] def pathSerializedSize = {
if (__pathSerializedSizeField == 0) __pathSerializedSizeField = {
var __s: _root_.scala.Int = 0
path.foreach(__i => __s += _root_.com.google.protobuf.CodedOutputStream.computeInt32SizeNoTag(__i))
__s
}
__pathSerializedSizeField
}
@transient private[this] var __pathSerializedSizeField: _root_.scala.Int = 0
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
if(path.nonEmpty) {
val __localsize = pathSerializedSize
__size += 1 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__localsize) + __localsize
}
if (sourceFile.isDefined) {
val __value = sourceFile.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeStringSize(2, __value)
};
if (begin.isDefined) {
val __value = begin.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeInt32Size(3, __value)
};
if (end.isDefined) {
val __value = end.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeInt32Size(4, __value)
};
__size += unknownFields.serializedSize
__size
}
override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
if (path.nonEmpty) {
_output__.writeTag(1, 2)
_output__.writeUInt32NoTag(pathSerializedSize)
path.foreach(_output__.writeInt32NoTag)
};
sourceFile.foreach { __v =>
val __m = __v
_output__.writeString(2, __m)
};
begin.foreach { __v =>
val __m = __v
_output__.writeInt32(3, __m)
};
end.foreach { __v =>
val __m = __v
_output__.writeInt32(4, __m)
};
unknownFields.writeTo(_output__)
}
def clearPath = copy(path = _root_.scala.Seq.empty)
def addPath(__vs: _root_.scala.Int*): Annotation = addAllPath(__vs)
def addAllPath(__vs: Iterable[_root_.scala.Int]): Annotation = copy(path = path ++ __vs)
def withPath(__v: _root_.scala.Seq[_root_.scala.Int]): Annotation = copy(path = __v)
def getSourceFile: _root_.scala.Predef.String = sourceFile.getOrElse("")
def clearSourceFile: Annotation = copy(sourceFile = _root_.scala.None)
def withSourceFile(__v: _root_.scala.Predef.String): Annotation = copy(sourceFile = Option(__v))
def getBegin: _root_.scala.Int = begin.getOrElse(0)
def clearBegin: Annotation = copy(begin = _root_.scala.None)
def withBegin(__v: _root_.scala.Int): Annotation = copy(begin = Option(__v))
def getEnd: _root_.scala.Int = end.getOrElse(0)
def clearEnd: Annotation = copy(end = _root_.scala.None)
def withEnd(__v: _root_.scala.Int): Annotation = copy(end = Option(__v))
def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v)
def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => path
case 2 => sourceFile.orNull
case 3 => begin.orNull
case 4 => end.orNull
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => _root_.scalapb.descriptors.PRepeated(path.iterator.map(_root_.scalapb.descriptors.PInt).toVector)
case 2 => sourceFile.map(_root_.scalapb.descriptors.PString).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 3 => begin.map(_root_.scalapb.descriptors.PInt).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 4 => end.map(_root_.scalapb.descriptors.PInt).getOrElse(_root_.scalapb.descriptors.PEmpty)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation
}
object Annotation extends scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation] = this
def merge(`_message__`: com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation, `_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation = {
val __path = (_root_.scala.collection.immutable.Vector.newBuilder[_root_.scala.Int] ++= `_message__`.path)
var __sourceFile = `_message__`.sourceFile
var __begin = `_message__`.begin
var __end = `_message__`.end
var `_unknownFields__`: _root_.scalapb.UnknownFieldSet.Builder = null
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 8 =>
__path += _input__.readInt32()
case 10 => {
val length = _input__.readRawVarint32()
val oldLimit = _input__.pushLimit(length)
while (_input__.getBytesUntilLimit > 0) {
__path += _input__.readInt32()
}
_input__.popLimit(oldLimit)
}
case 18 =>
__sourceFile = Option(_input__.readStringRequireUtf8())
case 24 =>
__begin = Option(_input__.readInt32())
case 32 =>
__end = Option(_input__.readInt32())
case tag =>
if (_unknownFields__ == null) {
_unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder(_message__.unknownFields)
}
_unknownFields__.parseField(tag, _input__)
}
}
com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation(
path = __path.result(),
sourceFile = __sourceFile,
begin = __begin,
end = __end,
unknownFields = if (_unknownFields__ == null) _message__.unknownFields else _unknownFields__.result()
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage eq scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation(
path = __fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).map(_.as[_root_.scala.Seq[_root_.scala.Int]]).getOrElse(_root_.scala.Seq.empty),
sourceFile = __fieldsMap.get(scalaDescriptor.findFieldByNumber(2).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Predef.String]]),
begin = __fieldsMap.get(scalaDescriptor.findFieldByNumber(3).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Int]]),
end = __fieldsMap.get(scalaDescriptor.findFieldByNumber(4).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Int]])
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = com.google.protobuf.descriptor.GeneratedCodeInfo.javaDescriptor.getNestedTypes().get(0)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = com.google.protobuf.descriptor.GeneratedCodeInfo.scalaDescriptor.nestedMessages(0)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = throw new MatchError(__number)
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__fieldNumber)
lazy val defaultInstance = com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation(
path = _root_.scala.Seq.empty,
sourceFile = _root_.scala.None,
begin = _root_.scala.None,
end = _root_.scala.None
)
implicit class AnnotationLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation](_l) {
def path: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[_root_.scala.Int]] = field(_.path)((c_, f_) => c_.copy(path = f_))
def sourceFile: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Predef.String] = field(_.getSourceFile)((c_, f_) => c_.copy(sourceFile = Option(f_)))
def optionalSourceFile: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Predef.String]] = field(_.sourceFile)((c_, f_) => c_.copy(sourceFile = f_))
def begin: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Int] = field(_.getBegin)((c_, f_) => c_.copy(begin = Option(f_)))
def optionalBegin: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Int]] = field(_.begin)((c_, f_) => c_.copy(begin = f_))
def end: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Int] = field(_.getEnd)((c_, f_) => c_.copy(end = Option(f_)))
def optionalEnd: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Int]] = field(_.end)((c_, f_) => c_.copy(end = f_))
}
final val PATH_FIELD_NUMBER = 1
final val SOURCE_FILE_FIELD_NUMBER = 2
final val BEGIN_FIELD_NUMBER = 3
final val END_FIELD_NUMBER = 4
def of(
path: _root_.scala.Seq[_root_.scala.Int],
sourceFile: _root_.scala.Option[_root_.scala.Predef.String],
begin: _root_.scala.Option[_root_.scala.Int],
end: _root_.scala.Option[_root_.scala.Int]
): _root_.com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation = _root_.com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation(
path,
sourceFile,
begin,
end
)
// @@protoc_insertion_point(GeneratedMessageCompanion[google.protobuf.GeneratedCodeInfo.Annotation])
}
implicit class GeneratedCodeInfoLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.descriptor.GeneratedCodeInfo]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.descriptor.GeneratedCodeInfo](_l) {
def annotation: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]] = field(_.annotation)((c_, f_) => c_.copy(annotation = f_))
}
final val ANNOTATION_FIELD_NUMBER = 1
def of(
annotation: _root_.scala.Seq[com.google.protobuf.descriptor.GeneratedCodeInfo.Annotation]
): _root_.com.google.protobuf.descriptor.GeneratedCodeInfo = _root_.com.google.protobuf.descriptor.GeneratedCodeInfo(
annotation
)
// @@protoc_insertion_point(GeneratedMessageCompanion[google.protobuf.GeneratedCodeInfo])
}
| trueaccord/ScalaPB | scalapb-runtime/src/main/scalajs/com/google/protobuf/descriptor/GeneratedCodeInfo.scala | Scala | apache-2.0 | 19,393 |
package com.lljv.analytics.hadoopengine
import java.util.HashMap
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.clients.producer.{KafkaProducer, ProducerConfig, ProducerRecord}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import scala.util.control.NonFatal
class KafkaEngine(val settings: KafkaSettings) extends Serializable {
var producer: Option[KafkaProducer[String, String]] = None
def getStreamingParameters(): Map[String, String] = {
val parameters = Map[String, String](
"metadata.broker.list" -> settings.kafkaBroker,
"bootstrap.servers" -> settings.kafkaBroker,
"key.serializer" -> settings.stringSerializerType,
"value.serializer" -> settings.stringSerializerType,
"key.deserializer" -> settings.stringDeserializerType,
"value.deserializer" -> settings.stringDeserializerType,
"group.id" -> settings.consumerGroupId
)
return parameters
}
def startStream(
topicName: String,
sparkEngine: SparkStreamEngine
): Option[InputDStream[ConsumerRecord[String, String]]] =
{
val stream: Option[InputDStream[ConsumerRecord[String, String]]] = try {
Some(KafkaUtils.createDirectStream[String,String](
sparkEngine.streamingContext.get,
PreferConsistent,
Subscribe[String, String](Array(topicName), this.getStreamingParameters())
))
} catch {
case NonFatal(exc) => {
// printf(exc.getMessage())
// TODO: logging
None
}
}
return stream
}
/**
* Initialize the engine's Producer instance. If there is a problem starting
*
* @return
* True if the producer was started successfully, false otherwise.
*
*/
def startProducer(): Boolean = {
val serverConfig = ProducerConfig.BOOTSTRAP_SERVERS_CONFIG
val valueSerializer = ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG
val keySerializer = ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG
val properties = new HashMap[String, Object]()
properties.put(serverConfig, settings.kafkaBroker)
properties.put(keySerializer, settings.stringSerializerType)
properties.put(valueSerializer, settings.stringSerializerType)
val newProducer: Option[KafkaProducer[String, String]] = try {
Some(new KafkaProducer[String, String](properties))
} catch {
case _: Throwable => {
// TODO: logging
None
}
}
if (newProducer.isEmpty)
return false
this.producer = newProducer
return true
}
/**
*
* @return
* False if the Producer instance could not be closed.
*/
/*
def closeProducer(): Boolean = {
producer.get.close()
if (this.producer.isEmpty)
return true
return false
}
*/
def writeToTopic(topicName: String, message: String, kafkaProducerKey: String): Boolean = {
try {
val producerMessage = new ProducerRecord[String, String](topicName, kafkaProducerKey, message)
this.producer.get.send(producerMessage)
} catch {
case _: Throwable => {
// TODO: logging
}
return false
}
return true
}
/*
def writeToTopic(
topicName: String,
jsonStream: DStream[(String, String)],
kafkaBroker: String,
stringSerializerType: String
): Unit =
{
val serverConfig = ProducerConfig.BOOTSTRAP_SERVERS_CONFIG
val valueSerializer = ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG
val keySerializer = ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG
val topic: String = settings.topicOut
jsonStream.foreachRDD(rdd =>
rdd.foreachPartition(partition =>
partition.foreach {
case record: (String, String) => {
val properties = new HashMap[String, Object]()
properties.put(serverConfig, kafkaBroker)
properties.put(keySerializer, stringSerializerType)
properties.put(valueSerializer, stringSerializerType)
val producer: Option[KafkaProducer[String, String]] = try {
Some(new KafkaProducer[String, String](properties))
} catch {
case _: Throwable => {
// TODO: logging
None
}
}
val kafkaConsumerKey = record._1
val messageOut = record._2
// println("================\\nKAFKA CONSUMER KEY: " + kafkaConsumerKey + "\\n================")
val message = new ProducerRecord[String, String](topic, kafkaConsumerKey, messageOut)
if (!producer.isEmpty) {
producer.get.send(message)
producer.get.close()
}
}
}
)
)
}
*/
}
| dotdeb/Science-Finder | Analytics/HadoopEngine/src/main/scala/com/lljv/analytics/hadoopengine/KafkaEngine.scala | Scala | apache-2.0 | 5,081 |
package troy.tast
sealed trait SimpleSelection
object SimpleSelection {
sealed trait ColumnName[columnName <: Identifier] extends SimpleSelection
sealed trait ColumnNameOf[columnName <: Identifier, term <: Term] extends SimpleSelection
sealed trait ColumnNameDot[columnName <: Identifier, fieldName <: String] extends SimpleSelection
}
| schemasafe/troy | typelevel-ast/src/main/scala/troy/tast/SimpleSelection.scala | Scala | apache-2.0 | 343 |
/*****************************************************************************************
* Copyright 2017 Capital One Services, LLC and Bitwise, Inc.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
* http://www.apache.org/licenses/LICENSE-2.0
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
****************************************************************************************/
package hydrograph.engine.spark.components
import java.sql.SQLException
import hydrograph.engine.core.component.entity.InputRDBMSEntity
import hydrograph.engine.spark.components.base.InputComponentBase
import hydrograph.engine.spark.components.platform.BaseComponentParams
import hydrograph.engine.spark.components.utils.{SchemaCreator, SchemaUtils, TeradataTableUtils}
import org.apache.spark.sql._
import org.apache.spark.sql.types._
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.JavaConverters._
/**
* The Class InputTeradataComponent.
*
* @author Bitwise
*
*/
class InputTeradataComponent(inputRDBMSEntity: InputRDBMSEntity,
iComponentsParams: BaseComponentParams) extends InputComponentBase {
val LOG: Logger = LoggerFactory.getLogger(classOf[InputTeradataComponent])
val driverName = null
override def createComponent(): Map[String, DataFrame] = {
val schemaField = SchemaCreator(inputRDBMSEntity).makeSchema()
val sparkSession = iComponentsParams.getSparkSession()
val numPartitions: Int = inputRDBMSEntity getNumPartitionsValue
val upperBound: Int = inputRDBMSEntity getUpperBound
val lowerBound: Int = inputRDBMSEntity getLowerBound
val fetchSizeValue: String = inputRDBMSEntity getFetchSize match {
case null => "1000"
case _ => inputRDBMSEntity getFetchSize
}
val columnForPartitioning: String = inputRDBMSEntity.getColumnName
val extraUrlParams: String = inputRDBMSEntity.getExtraUrlParameters match {
case null => ""
case _ => ","+inputRDBMSEntity.getExtraUrlParameters
}
val properties = inputRDBMSEntity.getRuntimeProperties
properties.setProperty("user", inputRDBMSEntity.getUsername)
properties.setProperty("password", inputRDBMSEntity.getPassword)
properties.setProperty("fetchsize", fetchSizeValue)
val driverName = "com.teradata.jdbc.TeraDriver"
if (inputRDBMSEntity.getJdbcDriver().equals("TeraJDBC4")) {
properties.setProperty("driver", driverName)
}
LOG.info("Created Input Teradata Component '" + inputRDBMSEntity.getComponentId
+ "' in Batch " + inputRDBMSEntity.getBatch
+ " with output socket " + inputRDBMSEntity.getOutSocketList.get(0).getSocketId)
val selectQuery = if (inputRDBMSEntity.getTableName == null) {
LOG.debug("Select query : " + inputRDBMSEntity.getSelectQuery)
"(" + inputRDBMSEntity.getSelectQuery + ") as aliass"
}
else "(" + TeradataTableUtils()
.getSelectQuery(inputRDBMSEntity.getFieldsList.asScala.toList,inputRDBMSEntity.getTableName) + ") as aliass"
if (inputRDBMSEntity.getTableName != null)
LOG.debug("Component Id '" + inputRDBMSEntity.getComponentId
+ "' in Batch " + inputRDBMSEntity.getBatch
+ " having schema: [ " + inputRDBMSEntity.getFieldsList.asScala.mkString(",") + " ]"
+ " reading data from '" + selectQuery + "' table")
else
LOG.debug("Component Id '" + inputRDBMSEntity.getComponentId
+ "' in Batch " + inputRDBMSEntity.getBatch
+ " having schema: [ " + inputRDBMSEntity.getFieldsList.asScala.mkString(",") + " ]"
+ " reading data from '" + selectQuery + "' query")
val connectionURL: String = "jdbc:teradata://" + inputRDBMSEntity.getHostName() + "/DBS_PORT=" + inputRDBMSEntity.getPort() + ",DATABASE=" +
inputRDBMSEntity.getDatabaseName()+",TYPE=DEFAULT"+extraUrlParams
/*+inputRDBMSEntity.get_interface()+*/
LOG.info("Connection url for Teradata input component: " + connectionURL)
def createJdbcDataframe: Int => DataFrame = (partitionValue:Int) => partitionValue match {
case Int.MinValue => sparkSession.read.jdbc(connectionURL, selectQuery, properties)
case (partitionValues: Int) => sparkSession.read.jdbc(connectionURL,
selectQuery,
columnForPartitioning,
lowerBound,
upperBound,
partitionValues,
properties)
}
try {
val df: DataFrame = createJdbcDataframe(numPartitions)
SchemaUtils().compareSchema(getMappedSchema(schemaField),df.schema.toList)
val key = inputRDBMSEntity.getOutSocketList.get(0).getSocketId
Map(key -> df)
} catch {
case e: SQLException =>
LOG.error("\\"Error in Input Teradata input component '" + inputRDBMSEntity.getComponentId + "', Error" + e.getMessage, e)
throw TableDoesNotExistException("\\"Error in Input Teradata input component '" + inputRDBMSEntity.getComponentId + "', Error" + e.getMessage, e)
case e: Exception =>
LOG.error("Error in Input Teradata input component '" + inputRDBMSEntity.getComponentId + "', Error" + e.getMessage, e)
throw new RuntimeException("Error in Input Teradata Component " + inputRDBMSEntity.getComponentId, e)
}
}
def getMappedSchema(schema:StructType) : List[StructField] = schema.toList.map(stuctField=> new StructField(stuctField.name,getDataType(stuctField.dataType).getOrElse(stuctField.dataType)))
private def getDataType(dataType: DataType): Option[DataType] = {
dataType.typeName.toUpperCase match {
case "DOUBLE" => Option(FloatType)
/** In teradata if we create a table with a field type as Double,
* it creates a schema and replaces the Double datatype with Float datatype which is Teradata specific.
* Contrary to that if we attempt to read the data from a Teradata table, we have created by using the
* output schema as Double, the execution gets stopped
* as the data gets exported from Teradata as Float. In order to get Double type data while reading from a Teradata
* datanase, we mapped FLoatType to java.lang.Double*/
case "SHORT" => Option(IntegerType)
case "BOOLEAN" => Option(IntegerType)
case _ => None
}
}
}
case class TableDoesNotExistException(errorMessage: String, e: Exception) extends RuntimeException(errorMessage,e) | capitalone/Hydrograph | hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/components/InputTeradataComponent.scala | Scala | apache-2.0 | 6,730 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package xlang
import leon.purescala.Common._
import leon.purescala.Definitions._
import leon.purescala.Expressions._
import leon.purescala.Extractors._
import leon.purescala.Constructors._
import leon.purescala.ExprOps._
import leon.purescala.TypeOps.leastUpperBound
import leon.purescala.Types._
import leon.xlang.Expressions._
object ImperativeCodeElimination extends UnitPhase[Program] {
val name = "Imperative Code Elimination"
val description = "Transform imperative constructs into purely functional code"
def apply(ctx: LeonContext, pgm: Program): Unit = {
for {
fd <- pgm.definedFunctions
body <- fd.body if exists(requireRewriting)(body)
} {
val (res, scope, _) = toFunction(body)(State(fd, Set(), Map()))
fd.body = Some(scope(res))
}
//probably not the cleanest way to do it, but if somehow we still have Old
//expressions at that point, they can be safely removed as the object is
//equals to its original value
for {
fd <- pgm.definedFunctions
} {
fd.postcondition = fd.postcondition.map(post => {
preMap{
case Old(v) => Some(v.toVariable)
case _ => None
}(post)
})
}
}
/* varsInScope refers to variable declared in the same level scope.
Typically, when entering a nested function body, the scope should be
reset to empty */
private case class State(
parent: FunDef,
varsInScope: Set[Identifier],
funDefsMapping: Map[FunDef, (FunDef, List[Identifier])]
) {
def withVar(i: Identifier) = copy(varsInScope = varsInScope + i)
def withFunDef(fd: FunDef, nfd: FunDef, ids: List[Identifier]) =
copy(funDefsMapping = funDefsMapping + (fd -> (nfd, ids)))
}
//return a "scope" consisting of purely functional code that defines potentially needed
//new variables (val, not var) and a mapping for each modified variable (var, not val :) )
//to their new name defined in the scope. The first returned valued is the value of the expression
//that should be introduced as such in the returned scope (the val already refers to the new names)
private def toFunction(expr: Expr)(implicit state: State): (Expr, Expr => Expr, Map[Identifier, Identifier]) = {
import state._
expr match {
case LetVar(id, e, b) =>
val newId = id.freshen
val (rhsVal, rhsScope, rhsFun) = toFunction(e)
val (bodyRes, bodyScope, bodyFun) = toFunction(b)(state.withVar(id))
val scope = (body: Expr) => rhsScope(Let(newId, rhsVal, replaceNames(rhsFun + (id -> newId), bodyScope(body))).copiedFrom(expr))
(bodyRes, scope, (rhsFun + (id -> newId)) ++ bodyFun)
case Assignment(id, e) =>
assert(varsInScope.contains(id))
val newId = id.freshen
val (rhsVal, rhsScope, rhsFun) = toFunction(e)
val scope = (body: Expr) => rhsScope(Let(newId, rhsVal, body).copiedFrom(expr))
(UnitLiteral(), scope, rhsFun + (id -> newId))
case ite@IfExpr(cond, tExpr, eExpr) =>
val (cRes, cScope, cFun) = toFunction(cond)
val (tRes, tScope, tFun) = toFunction(tExpr)
val (eRes, eScope, eFun) = toFunction(eExpr)
val iteRType = leastUpperBound(tRes.getType, eRes.getType)
val modifiedVars: Seq[Identifier] = (tFun.keys ++ eFun.keys).toSet.intersect(varsInScope).toSeq
val resId = FreshIdentifier("res", iteRType)
val freshIds = modifiedVars.map( _.freshen )
val iteType = tupleTypeWrap(resId.getType +: freshIds.map(_.getType))
val thenVal = tupleWrap(tRes +: modifiedVars.map(vId => tFun.getOrElse(vId, vId).toVariable))
val elseVal = tupleWrap(eRes +: modifiedVars.map(vId => eFun.getOrElse(vId, vId).toVariable))
val iteExpr = IfExpr(cRes, replaceNames(cFun, tScope(thenVal)), replaceNames(cFun, eScope(elseVal))).copiedFrom(ite)
val scope = (body: Expr) => {
val tupleId = FreshIdentifier("t", iteType)
cScope(Let(tupleId, iteExpr, Let(
resId,
tupleSelect(tupleId.toVariable, 1, modifiedVars.nonEmpty),
freshIds.zipWithIndex.foldLeft(body)((b, id) =>
Let(id._1, tupleSelect(tupleId.toVariable, id._2 + 2, true), b)
))
).copiedFrom(expr))
}
(resId.toVariable, scope, cFun ++ modifiedVars.zip(freshIds).toMap)
case m @ MatchExpr(scrut, cses) =>
val csesRhs = cses.map(_.rhs) //we can ignore pattern, and the guard is required to be pure
val (csesRes, csesScope, csesFun) = csesRhs.map(toFunction).unzip3
val (scrutRes, scrutScope, scrutFun) = toFunction(scrut)
val modifiedVars: Seq[Identifier] = csesFun.toSet.flatMap((m: Map[Identifier, Identifier]) => m.keys).intersect(varsInScope).toSeq
val resId = FreshIdentifier("res", m.getType)
val freshIds = modifiedVars.map(id => FreshIdentifier(id.name, id.getType))
val matchType = tupleTypeWrap(resId.getType +: freshIds.map(_.getType))
val csesVals = csesRes.zip(csesFun).map {
case (cRes, cFun) => tupleWrap(cRes +: modifiedVars.map(vId => cFun.getOrElse(vId, vId).toVariable))
}
val newRhs = csesVals.zip(csesScope).map {
case (cVal, cScope) => replaceNames(scrutFun, cScope(cVal))
}
val matchE = matchExpr(scrutRes, cses.zip(newRhs).map {
case (mc @ MatchCase(pat, guard, _), newRhs) =>
//guard need to update ids (substitution of new names, and new fundef)
//but wont have side effects
val finalGuard = guard.map(g => {
val (resGuard, scopeGuard, _) = toFunction(g)
replaceNames(scrutFun, scopeGuard(resGuard))
})
MatchCase(pat, finalGuard, newRhs).setPos(mc)
}).setPos(m)
val scope = (body: Expr) => {
val tupleId = FreshIdentifier("t", matchType)
scrutScope(
Let(tupleId, matchE,
Let(resId, tupleSelect(tupleId.toVariable, 1, freshIds.nonEmpty),
freshIds.zipWithIndex.foldLeft(body)((b, id) =>
Let(id._1, tupleSelect(tupleId.toVariable, id._2 + 2, true), b)
)
)
)
)
}
(resId.toVariable, scope, scrutFun ++ modifiedVars.zip(freshIds).toMap)
case wh@While(cond, body) =>
val whileFunDef = new FunDef(parent.id.duplicate(name = (parent.id.name + "While")), Nil, Nil, UnitType).setPos(wh)
whileFunDef.addFlag(IsLoop(parent))
whileFunDef.body = Some(
IfExpr(cond,
Block(Seq(body), FunctionInvocation(whileFunDef.typed, Seq()).setPos(wh)),
UnitLiteral()))
whileFunDef.precondition = wh.invariant
whileFunDef.postcondition = Some(
Lambda(
Seq(ValDef(FreshIdentifier("bodyRes", UnitType))),
and(Not(getFunctionalResult(cond)), wh.invariant.getOrElse(BooleanLiteral(true))).setPos(wh)
).setPos(wh)
)
val newExpr = LetDef(Seq(whileFunDef), FunctionInvocation(whileFunDef.typed, Seq()).setPos(wh)).setPos(wh)
toFunction(newExpr)
case Block(Seq(), expr) =>
toFunction(expr)
case Block(exprs, expr) =>
val (scope, fun) = exprs.foldRight((body: Expr) => body, Map[Identifier, Identifier]())((e, acc) => {
val (accScope, accFun) = acc
val (rVal, rScope, rFun) = toFunction(e)
val scope = (body: Expr) => {
rVal match {
case FunctionInvocation(tfd, args) =>
rScope(replaceNames(rFun, Let(FreshIdentifier("tmp", tfd.returnType), rVal, accScope(body))))
case _ =>
rScope(replaceNames(rFun, accScope(body)))
}
}
(scope, rFun ++ accFun)
})
val (lastRes, lastScope, lastFun) = toFunction(expr)
val finalFun = fun ++ lastFun
(
replaceNames(finalFun, lastRes),
(body: Expr) => scope(replaceNames(fun, lastScope(body))),
finalFun
)
//pure expression (that could still contain side effects as a subexpression) (evaluation order is from left to right)
case Let(id, e, b) =>
val (bindRes, bindScope, bindFun) = toFunction(e)
val (bodyRes, bodyScope, bodyFun) = toFunction(b)
(
bodyRes,
(b2: Expr) => bindScope(Let(id, bindRes, replaceNames(bindFun, bodyScope(b2))).copiedFrom(expr)),
bindFun ++ bodyFun
)
//a function invocation can update variables in scope.
case fi@FunctionInvocation(tfd, args) =>
val (recArgs, argScope, argFun) = args.foldRight((Seq[Expr](), (body: Expr) => body, Map[Identifier, Identifier]()))((arg, acc) => {
val (accArgs, accScope, accFun) = acc
val (argVal, argScope, argFun) = toFunction(arg)
val newScope = (body: Expr) => argScope(replaceNames(argFun, accScope(body)))
(argVal +: accArgs, newScope, argFun ++ accFun)
})
val fd = tfd.fd
state.funDefsMapping.get(fd) match {
case Some((newFd, modifiedVars)) => {
val newInvoc = FunctionInvocation(newFd.typed, recArgs ++ modifiedVars.map(id => id.toVariable)).setPos(fi)
val freshNames = modifiedVars.map(id => id.freshen)
val tmpTuple = FreshIdentifier("t", newFd.returnType)
val scope = (body: Expr) => {
argScope(Let(tmpTuple, newInvoc,
freshNames.zipWithIndex.foldRight(body)((p, b) =>
Let(p._1, TupleSelect(tmpTuple.toVariable, p._2 + 2), b))
))
}
val newMap = argFun ++ modifiedVars.zip(freshNames).toMap
(TupleSelect(tmpTuple.toVariable, 1), scope, newMap)
}
case None =>
(FunctionInvocation(tfd, recArgs).copiedFrom(fi), argScope, argFun)
}
case LetDef(fds, b) =>
if(fds.size > 1) {
//TODO: no support for true mutual recursion
toFunction(LetDef(Seq(fds.head), LetDef(fds.tail, b)))
} else {
val fd = fds.head
def fdWithoutSideEffects = {
fd.body.foreach { bd =>
val (fdRes, fdScope, _) = toFunction(bd)
fd.body = Some(fdScope(fdRes))
}
val (bodyRes, bodyScope, bodyFun) = toFunction(b)
(bodyRes, (b2: Expr) => LetDef(Seq(fd), bodyScope(b2)).setPos(fd).copiedFrom(expr), bodyFun)
}
fd.body match {
case Some(bd) => {
//we take any vars in scope needed (even for read only).
//if read only, we only need to capture it without returning, but
//returning it simplifies the code (more consistent) and should
//not have a big impact on performance
val modifiedVars: List[Identifier] = {
val freeVars = variablesOf(fd.fullBody)
val transitiveVars = collect[Identifier]({
case FunctionInvocation(tfd, _) => state.funDefsMapping.get(tfd.fd).map(p => p._2.toSet).getOrElse(Set())
case _ => Set()
})(fd.fullBody)
(freeVars ++ transitiveVars).intersect(state.varsInScope).toList
}
//val modifiedVars: List[Identifier] =
// collect[Identifier]({
// case Assignment(v, _) => Set(v)
// case FunctionInvocation(tfd, _) => state.funDefsMapping.get(tfd.fd).map(p => p._2.toSet).getOrElse(Set())
// case _ => Set()
// })(bd).intersect(state.varsInScope).toList
if(modifiedVars.isEmpty) fdWithoutSideEffects else {
val freshNames: List[Identifier] = modifiedVars.map(id => id.freshen)
val newParams: Seq[ValDef] = fd.params ++ freshNames.map(n => ValDef(n))
val freshVarDecls: List[Identifier] = freshNames.map(id => id.freshen)
val rewritingMap: Map[Identifier, Identifier] =
modifiedVars.zip(freshVarDecls).toMap
val freshBody =
preMap({
case Assignment(v, e) => rewritingMap.get(v).map(nv => Assignment(nv, e))
case Variable(id) => rewritingMap.get(id).map(nid => Variable(nid))
case _ => None
})(bd)
val wrappedBody = freshNames.zip(freshVarDecls).foldLeft(freshBody)((body, p) => {
LetVar(p._2, Variable(p._1), body)
})
val newReturnType = TupleType(fd.returnType :: modifiedVars.map(_.getType))
val newFd = new FunDef(fd.id.freshen, fd.tparams, newParams, newReturnType).setPos(fd)
newFd.addFlags(fd.flags)
val (fdRes, fdScope, fdFun) =
toFunction(wrappedBody)(
State(state.parent,
Set(),
state.funDefsMapping.map{case (fd, (nfd, mvs)) => (fd, (nfd, mvs.map(v => rewritingMap.getOrElse(v, v))))} +
(fd -> ((newFd, freshVarDecls))))
)
val newRes = Tuple(fdRes :: freshVarDecls.map(vd => fdFun(vd).toVariable))
val newBody = fdScope(newRes)
newFd.body = Some(newBody)
newFd.precondition = fd.precondition.map(prec => {
val fresh = replace(modifiedVars.zip(freshNames).map(p => (p._1.toVariable, p._2.toVariable)).toMap, prec)
//still apply recursively to update all function invocation
val (res, scope, _) = toFunction(fresh)
scope(res)
})
newFd.postcondition = fd.postcondition.map(post => {
val Lambda(Seq(res), postBody) = post
val newRes = ValDef(FreshIdentifier(res.id.name, newFd.returnType))
val newBody =
replace(
modifiedVars.zipWithIndex.map{ case (v, i) =>
(v.toVariable, TupleSelect(newRes.toVariable, i+2)): (Expr, Expr)}.toMap ++
modifiedVars.zip(freshNames).map{ case (ov, nv) =>
(Old(ov), nv.toVariable)}.toMap +
(res.toVariable -> TupleSelect(newRes.toVariable, 1)),
postBody)
val (r, scope, _) = toFunction(newBody)
Lambda(Seq(newRes), scope(r)).setPos(post)
})
val (bodyRes, bodyScope, bodyFun) = toFunction(b)(state.withFunDef(fd, newFd, modifiedVars))
(bodyRes, (b2: Expr) => LetDef(Seq(newFd), bodyScope(b2)).copiedFrom(expr), bodyFun)
}
}
case None => fdWithoutSideEffects
}
}
//TODO: handle vars in scope, just like LetDef
case ld@Lambda(params, body) =>
val (bodyVal, bodyScope, bodyFun) = toFunction(body)
(Lambda(params, bodyScope(bodyVal)).copiedFrom(ld), (e: Expr) => e, Map())
case c @ Choose(b) =>
//Recall that Choose cannot mutate variables from the scope
(c, (b2: Expr) => b2, Map())
case And(args) =>
val ifExpr = args.reduceRight((el, acc) => IfExpr(el, acc, BooleanLiteral(false)))
toFunction(ifExpr)
case Or(args) =>
val ifExpr = args.reduceRight((el, acc) => IfExpr(el, BooleanLiteral(true), acc))
toFunction(ifExpr)
//TODO: this should be handled properly by the Operator case, but there seems to be a subtle bug in the way Let's are lifted
// which leads to Assert refering to the wrong value of a var in some cases.
case a@Assert(cond, msg, body) =>
val (condVal, condScope, condFun) = toFunction(cond)
val (bodyRes, bodyScope, bodyFun) = toFunction(body)
val scope = (body: Expr) => condScope(Assert(condVal, msg, replaceNames(condFun, bodyScope(body))).copiedFrom(a))
(bodyRes, scope, condFun ++ bodyFun)
case n @ Operator(args, recons) =>
val (recArgs, scope, fun) = args.foldRight((Seq[Expr](), (body: Expr) => body, Map[Identifier, Identifier]()))((arg, acc) => {
val (accArgs, accScope, accFun) = acc
val (argVal, argScope, argFun) = toFunction(arg)
val newScope = (body: Expr) => argScope(replaceNames(argFun, accScope(body)))
(argVal +: accArgs, newScope, argFun ++ accFun)
})
(recons(recArgs).copiedFrom(n), scope, fun)
case _ =>
sys.error("not supported: " + expr)
}
}
def replaceNames(fun: Map[Identifier, Identifier], expr: Expr) = replaceFromIDs(fun mapValues Variable, expr)
/* Extract functional result value. Useful to remove side effect from conditions when moving it to post-condition */
private def getFunctionalResult(expr: Expr): Expr = {
preMap({
case Block(_, res) => Some(res)
case _ => None
})(expr)
}
private def requireRewriting(expr: Expr) = expr match {
case (e: Block) => true
case (e: Assignment) => true
case (e: While) => true
case (e: LetVar) => true
case _ => false
}
}
| regb/leon | src/main/scala/leon/xlang/ImperativeCodeElimination.scala | Scala | gpl-3.0 | 17,364 |
package kpi.twitter.analysis
import com.typesafe.config.{Config, ConfigFactory}
package object utils {
/**
* kpi.twitter.analysis.BuildInfo class is auto generated before
* compilation phase by all other modules that depend on this one.
*/
lazy val buildInfo = Class.forName("kpi.twitter.analysis.BuildInfo")
.newInstance
.asInstanceOf[{ val info: Map[String, String] }]
.info
def getOptions(fileName: String = "application.conf"): Config =
ConfigFactory.load(fileName)
/*
Project specific configuration keys for all project
modules should be declared and explained here to
enhance readability
*/
// Twitter OAuth credentials
val twitterConsumerKey = "twitter.consumerKey"
val twitterConsumerSecret = "twitter.consumerSecret"
val twitterAccessToken = "twitter.accessToken"
val twitterAccessTokenSecret = "twitter.accessTokenSecret"
/**
* The time interval at which streaming data will be divided into batches
*/
val batchDurationMs = "batch.duration.ms"
/**
* Comma-separated list of hash-tags, e.g.
* @example #spark,#bigdata
*/
val hashTagsFilter = "filter.hashtags"
/**
* Initial set of Kafka brokers to discover Kafka cluster from
*/
val kafkaBootstrapServers = "kafka.bootstrap.servers"
/**
* Topic to store all read tweets
*/
val kafkaTweetsAllTopic = "kafka.tweets.all.topic"
val trainingPath = "training.path"
val modelPath = "model.path"
val kafkaTweetsPredictedSentimentTopic = "kafka.tweets.predicted.sentiment.topic"
}
| GRpro/TwitterAnalytics | lib/utils/src/main/scala/kpi/twitter/analysis/utils/package.scala | Scala | apache-2.0 | 1,562 |
/*
* Copyright (C) 2015 Language Technology Group
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package model.queryable.impl
import scalikejdbc.NamedDB
import testFactories.{DatabaseRollback, FlatSpecWithDatabaseTrait}
// scalastyle:off
import scalikejdbc._
// scalastyle:on
class RelationshipQueryableImplTest extends FlatSpecWithDatabaseTrait with DatabaseRollback {
override def testDatabase: NamedDB = NamedDB('newsleakTestDB)
// Mocking setup
final class RelationshipQueryableTestable extends RelationshipQueryableImpl {
override def connector: NamedDB = testDatabase
}
val uut = new RelationshipQueryableTestable
override def beforeAll(): Unit = {
testDatabase.localTx { implicit session =>
sql"INSERT INTO relationship VALUES (1, 1, 2, 3, false)".update.apply()
sql"INSERT INTO relationship VALUES (2, 3, 4, 10, true)".update.apply()
sql"INSERT INTO entity VALUES (1, ${"Angela Merkel"}, ${"PER"}, 7, true)".update.apply()
}
}
"deleteRelationship" should "set the backlist flag to true" in {
uut.delete(1)
val actual = testDatabase.readOnly { implicit session =>
sql"SELECT isBlacklisted FROM relationship WHERE id = 1".map(_.boolean("isBlacklisted")).single().apply()
}.getOrElse(fail)
assert(actual)
}
"getById" should "not return blacklisted relationships" in {
val actual = uut.getById(2)
assert(!actual.isDefined)
}
"getById" should "not return relationships if one participating entity is blacklisted" in {
val actual = uut.getById(1)
assert(!actual.isDefined)
}
"getByEntity" should "not return relationship if entity is blacklisted" in {
val actual = uut.getByEntity(1)
assert(actual.isEmpty)
}
}
| tudarmstadt-lt/newsleak | common/src/test/scala/model/queryable/impl/RelationshipQueryableImplTest.scala | Scala | agpl-3.0 | 2,362 |
/*
* Copyright 2015 Simin You
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spatialspark.partition.udp
/**
* Created by Simin You on 10/30/14.
*/
/**
* user can bring in their predefine partition
*/
object UserDefinedPartition {
}
| manueltimita/SpatialSpark | src/main/scala/spatialspark/partition/udp/UserDefinedPartition.scala | Scala | apache-2.0 | 790 |
package org.dmonix.area51.akka.extension
import java.util.concurrent.atomic.AtomicLong
import akka.actor.{Actor, ActorSystem, ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider, Props}
object DummyExtension extends ExtensionId[DummyExtension]
with ExtensionIdProvider {
//The lookup method is required by ExtensionIdProvider,
// so we return ourselves here, this allows us
// to configure our extension to be loaded when
// the ActorSystem starts up
override def lookup = DummyExtension
//This method will be called by Akka
// to instantiate our Extension
override def createExtension(system: ExtendedActorSystem) = new DummyExtension
/**
* Java API: retrieve the Count extension for the given system.
*/
override def get(system: ActorSystem): DummyExtension = super.get(system)
}
/**
* @author Peter Nerg
*/
class DummyExtension extends Extension {
//Since this Extension is a shared instance
// per ActorSystem we need to be threadsafe
private val counter = new AtomicLong(0)
//This is the operation this Extension provides
def increment() = counter.incrementAndGet()
}
/**
* @author Peter Nerg
*/
class DummyActor extends Actor {
private val dummyExtension = DummyExtension(context.system)
override def receive:Receive = {
case _ =>
println(self.path+":"+dummyExtension.increment())
}
}
/**
* Dummy application to start the [[DummyActor]] and perform some testing of the Extension
* @author Peter Nerg
*/
object DummyApp extends App {
import org.dmonix.area51.akka.Configuration._
private val actorSystem = ActorSystem("TestingExtensions", cfg)
private val dummyActor1 = actorSystem.actorOf(Props(new DummyActor), "DummyActor-1")
private val dummyActor2 = actorSystem.actorOf(Props(new DummyActor), "DummyActor-2")
dummyActor1 ! "whatever"
dummyActor2 ! "whatever"
dummyActor1 ! "whatever"
Thread.sleep(1000)
actorSystem.terminate()
private def cfg = s"""akka {
|extensions = ["org.dmonix.area51.akka.extension.DummyExtension"] |
|}""".asCfg
}
| pnerg/area51-akka | src/main/scala/org/dmonix/area51/akka/extension/DummyExtension.scala | Scala | apache-2.0 | 2,146 |
package hasheq
import org.scalacheck.Properties
import org.scalatest.FunSuite
import org.scalatest.prop.Checkers
trait TestSuite extends FunSuite with Checkers {
protected def checkAll(props: Properties): Unit =
for ((name, prop) <- props.properties) {
test(name) { check(prop) }
}
} | TomasMikula/hasheq | src/test/scala/hasheq/TestSuite.scala | Scala | bsd-3-clause | 303 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.