code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package com.alvinalexander.breakandcontinue
import util.control.Breaks._
import org.apache.spark.SparkContext._
import org.apache.spark.rdd._
import org.apache.spark._
import scala.collection.immutable.Set
import java.text.SimpleDateFormat
import java.util.Date
import java.io._
/*
REF: http://wanghuanming.com/2014/10/spark-apriori/
*/
object Sapriori{
def main(args: Array[String]){
var stime = System.currentTimeMillis()
val DATA_PATH = "hdfs://zhm01:9000/user/root/data/webdocs.dat"
val minSup = 0.2
val conf = new SparkConf().setAppName("Sapriori")
.set("spark.master","spark://zhm01:7077") //.set("spark.master","yarn")
.set("spark.driver.memory","28g")
.set("spark.executor.memory","18g")
.set("spark.cores.max","80")//.set("spark.executor.cores","8") //.set("spark.executor.instances","12")
.set("spark.default.parallelism","80")
val sc = new SparkContext(conf)
var FIs = collection.mutable.ArrayBuffer[Array[(Set[Int], Int)]]()
val rawTrans = sc.textFile(DATA_PATH,80).map(_.split(" ").map(_.trim.toInt)).cache() // map: to each line; flatMap: to all lines
// rawTrans: RDD[Array[Int]]
// .textFile->Array[String]; map->String,flatMap->Char
// val broadTrans = sc.broadcast(rawTrans.collect()) // broadTrans: Array[Array[Int]]
// val lenTrans = sc.broadcast(rawTrans.count())
val minCount = sc.broadcast(rawTrans.count()*minSup)
val oneFIS = rawTrans.flatMap(line=>line).map((_,1)).reduceByKey(_ + _).filter(_._2 > minCount.value) //RDD[(Int, Int)]
var freqItems = Array[(Set[Int], Int)]()
var cdds = scala.collection.immutable.Set[Set[Int]]()
// println(s"\\nlenOfTrans:${lenTrans.value}\\n")
// println(s"\\nrawTrans:$rawTrans\\n")
def getNowDate():String = {new SimpleDateFormat("yyyy-MM-dd HH:mm:ss").format(new Date())}
/////////////Fk-1 -> Ck////////////////
def genCdds(freqItems:Array[(Set[Int], Int)], k:Int):Set[Set[Int]] = {
//if(k<2)
var fsets:Array[Set[Int]] = freqItems.map(x=>x._1) //if flatMap, return 'Array[Int]' not 'Array[(Set[Int]]'
var cdds = Set[Set[Int]]()
for{
s1<-fsets
s2<-fsets
if s1!=s2
if s1.toList.sorted.take(k-2)==s2.toList.sorted.take(k-2) //not (k-1)! ไธ็ถๆ ๅ้้็ๆ.//gen 3C, use 2F, compare 1 // Fk->Ck+1,Fk-1==Fk-1
}cdds+=s1.union(s2)
cdds
}
////////////Ck->Fk////////////////////
def supOrNot(trans:Set[Int], cdd:Set[Int]):Int = {if(cdd.subsetOf(trans)) 1 else 0}
def validateCdds3(cdds:Set[Set[Int]]) = {
rawTrans.flatMap(line => {
var tmp = Set[(Set[Int], Int)]()
for(cdd <- cdds){
tmp += cdd -> {if(cdd.subsetOf(line.toSet)) 1 else 0}
}
tmp
}).reduceByKey(_ + _).filter(_._2 > minCount.value)
}
//////
def validateCdds(cdds:Set[Set[Int]]) = { // validateCdds(): RDD[(Set[Int], Int)]
rawTrans.flatMap(line => { // rawTrans -> sc.parallelize(broadTrans.value,12)
// flatMap:map+flatten-> Set[(Set[Int], Int)].flatten-> ( (Set[Int], Int), (Set[Int], Int), ...), not flatten first.//Get elements in "Set[]".
var tmp = Set[(Set[Int], Int)]()
for(cdd <- cdds){
tmp += cdd -> supOrNot(line.toSet, cdd)
}
tmp //zi ->return tmp
}).reduceByKey(_ + _).filter(_._2 > minCount.value)
}
////////////////////////////////
//1F: RDD[(Int,Int)]-> Array[(Set[Int], Int)], like Array((Set(36),6812),...)
freqItems = oneFIS.map(x => (Set(x._1),x._2)).collect()
val keySet = freqItems.map(_._1)
FIs += freqItems
// generate 2C
println(s"Gen 2C. ${getNowDate}")
for{i <- keySet;j <- keySet;if i != j}cdds += i.union(j) //cdds += Set(i, j)
println(s"GG! Size of 2C is ${cdds.size}. ${getNowDate}")
// 2C->2F
println(s"2C -> 2F. ${getNowDate}")
freqItems = validateCdds3(cdds).collect() //freqItems:RDD[(Set[Int], Int)].collect()-> Array[(Set[Int], Int)] //results like "(Set(86, 76),0.53)"
println(s"GG! Size of 2F is ${freqItems.size}. ${getNowDate}\\n")
var k:Int = 3
breakable{
while(freqItems.size > 0){// && k<=6){ //??freqItems.length > 0?? freqItems.length != null ??
FIs += freqItems
//->Ck //genCdds(freqItems:Array[(Set[Int], Int)], k:Int): Set[Set[Int]] //
var tmpS1 = System.currentTimeMillis();println(s"Gen ${k}C.")
var lcdds:org.apache.spark.broadcast.Broadcast[scala.collection.immutable.Set[scala.collection.immutable.Set[Int]]] = null
if(freqItems.size<=40000){
var rcdds = sc.parallelize(freqItems.map(x=>x._1),20).cartesian(sc.parallelize(freqItems.map(x=>x._1),20)).map(x=>(x._1++x._2)).filter(_.size==k)
lcdds = sc.broadcast(rcdds.collect().toSet)
}else{
lcdds = sc.broadcast(genCdds(freqItems, k))
}
var tmpE1 = System.currentTimeMillis()
println(s"GG! Size of ${k}C is ${lcdds.value.size}.Timecost: ${(tmpE1-tmpS1)/1000}s.")
//Ck->Fk //def validateCdds(cdds:Set[Set[Int]]): RDD[(Set[Int], Int)]
var tmpS2 = System.currentTimeMillis()
println(s"${k}C -> ${k}F.")
freqItems = validateCdds3(lcdds.value).collect()
//// def supOrNot(trans:Set[Int], cdd:Set[Int]):Int = {if(cdd.subsetOf(trans)) 1 else 0}
// freqItems = rcdds.repartition(100).map(cdd => {
// var count:Int = 0
// for(trans <- broadTrans.value){
// if(cdd.subsetOf(trans.toSet)) count += 1;
// }
// cdd->count
//}).reduceByKey(_ + _).filter(_._2 > minCount.value).collect()
////
lcdds.destroy()
var tmpE2 = System.currentTimeMillis()
println(s"GG! Size of ${k}F is ${freqItems.size}.Timecost: ${(tmpE2-tmpS2)/1000}s.")
println(s"This iteration cost ${(tmpE1-tmpS1+tmpE2-tmpS2)/1000}s\\n")
k += 1
}
}
//////
//FIs: ArrayBuffer[Array[(Set[Int], Int)]]
var etime = System.currentTimeMillis()
println(s"Timecost: ${(etime-stime)/1000}s.")
val file = new File("/home/zhm/sparkAprioriV7E/FIs.txt")
val bw = new BufferedWriter(new FileWriter(file))
for(vl<-FIs.flatMap(line=>line).sortBy(-_._2)){
bw.write(vl.toString+"\\n")
}
println(s"Write into $file")
bw.close()
//
sc.stop()
}
}
| heming621/postgraduate- | sparkAprioriV7E/src/main/scala/Sapriori.scala | Scala | mit | 7,191 |
package serialization
import com.mongodb.DBObject
import com.mongodb.casbah.commons.Implicits._
import com.mongodb.casbah.commons.MongoDBObject
import com.novus.salat.transformers.CustomTransformer
import models._
trait Bson {
object SourceTransformer extends CustomTransformer[Source, DBObject] {
def deserialize(dbo: DBObject) = {
dbo.as[String]("type") match {
case "gnip" => GnipSource(dbo.as[List[String]]("rules"))
case "datasift" => DatasiftSource(dbo.as[String]("csdl"))
case _ => throw new Exception("unknown source")
}
}
def serialize(source: Source) = {
source match {
case g: GnipSource => MongoDBObject("type" -> "gnip", "rules" -> g.rules)
case d: DatasiftSource => MongoDBObject("type" -> "datasift", "csdl" -> d.csdl)
case _ => MongoDBObject("type" -> "unknown")
}
}
}
}
| fernando-romero/salat-test | app/serialization/Bson.scala | Scala | mit | 885 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler.cluster.k8s
import java.util.concurrent.TimeUnit
import io.fabric8.kubernetes.api.model.PodListBuilder
import io.fabric8.kubernetes.client.KubernetesClient
import org.jmock.lib.concurrent.DeterministicScheduler
import org.mockito.{Mock, MockitoAnnotations}
import org.mockito.Mockito.{verify, when}
import org.scalatest.BeforeAndAfter
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.deploy.k8s.Config._
import org.apache.spark.deploy.k8s.Constants._
import org.apache.spark.deploy.k8s.Fabric8Aliases._
import org.apache.spark.scheduler.cluster.k8s.ExecutorLifecycleTestUtils._
class ExecutorPodsPollingSnapshotSourceSuite extends SparkFunSuite with BeforeAndAfter {
private val sparkConf = new SparkConf
private val pollingInterval = sparkConf.get(KUBERNETES_EXECUTOR_API_POLLING_INTERVAL)
@Mock
private var kubernetesClient: KubernetesClient = _
@Mock
private var podOperations: PODS = _
@Mock
private var appIdLabeledPods: LABELED_PODS = _
@Mock
private var executorRoleLabeledPods: LABELED_PODS = _
@Mock
private var eventQueue: ExecutorPodsSnapshotsStore = _
private var pollingExecutor: DeterministicScheduler = _
private var pollingSourceUnderTest: ExecutorPodsPollingSnapshotSource = _
before {
MockitoAnnotations.initMocks(this)
pollingExecutor = new DeterministicScheduler()
pollingSourceUnderTest = new ExecutorPodsPollingSnapshotSource(
sparkConf,
kubernetesClient,
eventQueue,
pollingExecutor)
pollingSourceUnderTest.start(TEST_SPARK_APP_ID)
when(kubernetesClient.pods()).thenReturn(podOperations)
when(podOperations.withLabel(SPARK_APP_ID_LABEL, TEST_SPARK_APP_ID))
.thenReturn(appIdLabeledPods)
when(appIdLabeledPods.withLabel(SPARK_ROLE_LABEL, SPARK_POD_EXECUTOR_ROLE))
.thenReturn(executorRoleLabeledPods)
}
test("Items returned by the API should be pushed to the event queue") {
when(executorRoleLabeledPods.list())
.thenReturn(new PodListBuilder()
.addToItems(
runningExecutor(1),
runningExecutor(2))
.build())
pollingExecutor.tick(pollingInterval, TimeUnit.MILLISECONDS)
verify(eventQueue).replaceSnapshot(Seq(runningExecutor(1), runningExecutor(2)))
}
}
| pgandhi999/spark | resource-managers/kubernetes/core/src/test/scala/org/apache/spark/scheduler/cluster/k8s/ExecutorPodsPollingSnapshotSourceSuite.scala | Scala | apache-2.0 | 3,106 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.moneyservicebusiness
import models.Country
import org.scalatestplus.play.PlaySpec
import jto.validation._
import jto.validation.forms.UrlFormEncoded
import jto.validation.ValidationError
import play.api.libs.json.{JsPath, JsSuccess, Json}
class MostTransactionsSpec extends PlaySpec {
"MostTransactions" must {
val rule: Rule[UrlFormEncoded, MostTransactions] = implicitly
val write: Write[MostTransactions, UrlFormEncoded] = implicitly
"roundtrip through json" in {
val model: MostTransactions =
MostTransactions(Seq(Country("United Kingdom", "GB")))
Json.fromJson[MostTransactions](Json.toJson(model)) mustEqual JsSuccess(model, JsPath \\ "mostTransactionsCountries")
}
"roundtrip through forms" in {
val model: MostTransactions =
MostTransactions(Seq(Country("United Kingdom", "GB")))
rule.validate(write.writes(model)) mustEqual Valid(model)
}
"fail to validate when there are no countries" in {
val form: UrlFormEncoded = Map(
"mostTransactionsCountries" -> Seq.empty
)
rule.validate(form) mustEqual Invalid(
Seq((Path \\ "mostTransactionsCountries") -> Seq(ValidationError("error.required.countries.msb.most.transactions")))
)
}
"fail to validate when there are more than 3 countries" in {
// scalastyle:off magic.number
val form: UrlFormEncoded = Map(
"mostTransactionsCountries[]" -> Seq.fill(4)("GB")
)
rule.validate(form) mustEqual Invalid(
Seq((Path \\ "mostTransactionsCountries") -> Seq(ValidationError("error.maxLength", 3)))
)
}
}
"MostTransactions Form Writes" when {
"an item is repeated" must {
"serialise all items correctly" in {
MostTransactions.formW.writes(MostTransactions(List(
Country("Country2", "BB"),
Country("Country1", "AA"),
Country("Country1", "AA")
))) must be (
Map(
"mostTransactionsCountries[0]" -> List("BB"),
"mostTransactionsCountries[1]" -> List("AA"),
"mostTransactionsCountries[2]" -> List("AA")
)
)
}
}
}
"Most Transactions Form Reads" when {
"all countries are valid" must {
"Successfully read from the form" in {
MostTransactions.formR.validate(
Map(
"mostTransactionsCountries[0]" -> Seq("GB"),
"mostTransactionsCountries[1]" -> Seq("MK"),
"mostTransactionsCountries[2]" -> Seq("JO")
)
) must be(Valid(MostTransactions(Seq(
Country("United Kingdom", "GB"),
Country("Macedonia, the Former Yugoslav Republic of", "MK"),
Country("Jordan", "JO")
))))
}
}
"the second country is invalid" must {
"fail validation" in {
val x: VA[MostTransactions] = MostTransactions.formR.validate(
Map(
"mostTransactionsCountries[0]" -> Seq("GB"),
"mostTransactionsCountries[1]" -> Seq("hjjkhjkjh"),
"mostTransactionsCountries[2]" -> Seq("MK")
)
)
x must be (Invalid(Seq((Path \\ "mostTransactionsCountries" \\ 1) -> Seq(ValidationError("error.invalid.country")))))
}
}
}
}
| hmrc/amls-frontend | test/models/moneyservicebusiness/MostTransactionsSpec.scala | Scala | apache-2.0 | 3,872 |
package de.choffmeister.akkavsnode.models
import reactivemongo.api.collections.default.BSONCollection
import reactivemongo.api.indexes._
import reactivemongo.bson._
import scala.concurrent._
case class User(
id: BSONObjectID = BSONObjectID("00" * 12),
name: String,
description: String) extends BaseModel
class UserTable(database: Database, collection: BSONCollection)(implicit executor: ExecutionContext) extends Table[User](database, collection) {
implicit val reader = UserBSONFormat.Reader
implicit val writer = UserBSONFormat.Writer
}
object UserBSONFormat {
implicit object Reader extends BSONDocumentReader[User] {
def read(doc: BSONDocument): User = User(
id = doc.getAs[BSONObjectID]("_id").get,
name = doc.getAs[String]("name").get,
description = doc.getAs[String]("description").get
)
}
implicit object Writer extends BSONDocumentWriter[User] {
def write(obj: User): BSONDocument = BSONDocument(
"_id" -> obj.id,
"name" -> obj.name,
"description" -> obj.description
)
}
}
| choffmeister/akka-vs-node | akka/src/main/scala/de/choffmeister/akkavsnode/models/User.scala | Scala | mit | 1,059 |
import quoted._
import scala.quoted.staging._
object Test {
given Toolbox = Toolbox.make(getClass.getClassLoader)
def main(args: Array[String]): Unit = withQuoteContext {
val q = '{
type T[X] = List[X]
val x = "foo"
${
val y = 'x
'{ val z: T[String] = List($y) }
}
x
}
println(q.show)
}
}
| som-snytt/dotty | tests/run-staging/quote-nested-6.scala | Scala | apache-2.0 | 356 |
package TwentyOneToThirty
/**
* Created by Farrell on 5/21/15.
*/
object P22 {
}
| Spinlocks/99Problems | src/TwentyOneToThirty/P22.scala | Scala | apache-2.0 | 85 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.javadsl.persistence.cassandra
import java.util.concurrent.{ CompletableFuture, CompletionStage }
import java.util.function.BiFunction
import java.util.{ Collections, Optional, UUID, List => JList }
import com.datastax.driver.core.BoundStatement
import com.lightbend.lagom.javadsl.persistence.{ AggregateEvent, AggregateEventTag }
/**
* Consume events produced by [[com.lightbend.lagom.javadsl.persistence.PersistentEntity]]
* instances and update one or more tables in Cassandra that are optimized for queries.
* The events belong to a [[com.lightbend.lagom.javadsl.persistence.AggregateEventTag]], e.g. all
* persistent events of all `Order` entities.
*/
@deprecated("Use ReadSideProcessor instead with CassandraReadSide builder", "1.2.0")
abstract class CassandraReadSideProcessor[Event <: AggregateEvent[Event]] {
case class EventHandlers(handlers: Map[Class[_ <: Event], BiFunction[_ <: Event, UUID, CompletionStage[JList[BoundStatement]]]])
/**
* Mutable builder for defining event handlers.
*/
class EventHandlersBuilder {
private var handlers: Map[Class[_ <: Event], BiFunction[_ <: Event, UUID, CompletionStage[JList[BoundStatement]]]] = Map.empty
/**
* Define the event handler that will be used for events of a given class.
*/
def setEventHandler[E <: Event](eventClass: Class[E], handler: BiFunction[E, UUID, CompletionStage[JList[BoundStatement]]]): Unit =
handlers = handlers.updated(eventClass, handler)
/**
* When all event handlers have been defined the immutable
* `ReadSideHandler` is created with this method.
*/
def build(): EventHandlers = new EventHandlers(handlers)
}
/**
* The processed events belong to a [[com.lightbend.lagom.javadsl.persistence.AggregateEventTag]]
* that is specified by this method, e.g. all persistent events of all `Order` entities.
*/
def aggregateTag: AggregateEventTag[Event]
/**
* First you must tell where in the event stream the processing should start,
* i.e. return the offset. The current offset is a parameter to the event
* handler for each event and it should typically be stored so that it can be
* restored with a `select` statement here. Use the [[CassandraSession]]
* to get the stored offset.
*
* Other things that is typically performed in this method is to create
* prepared statements that are later used when processing the events.
* Use [[CassandraSession#prepare]] to create the prepared statements.
*
* Return [[#noOffset]] if you want to processes all events, e.g. when
* starting the first time or if the number of events are known to be small
* enough to processes all events.
*/
def prepare(session: CassandraSession): CompletionStage[Optional[UUID]]
/**
* Define the event handlers that are to be used. Use the supplied
* `builder` to define the event handlers. One handler for each event class.
* A handler is a `BiFunction` that takes the event and the offset as
* parameters and returns zero or more bound statements that will be executed
* before processing next event.
*/
def defineEventHandlers(builder: EventHandlersBuilder): EventHandlers
/**
* Convenience method to create an already completed `CompletionStage`
* with one `BoundStatement`.
*/
final def completedStatement(stmt: BoundStatement): CompletionStage[JList[BoundStatement]] =
CompletableFuture.completedFuture(Collections.singletonList(stmt))
/**
* Convenience method to create an already completed `CompletionStage`
* with several `BoundStatement`.
*/
final def completedStatements(stmts: JList[BoundStatement]): CompletionStage[JList[BoundStatement]] =
CompletableFuture.completedFuture(stmts)
/**
* Convenience method to create an already completed `CompletionStage`
* with zero `BoundStatement`.
*/
final val emptyStatements: CompletionStage[JList[BoundStatement]] =
CompletableFuture.completedFuture(Collections.emptyList[BoundStatement]())
final val noOffset: CompletionStage[Optional[UUID]] =
CompletableFuture.completedFuture(Optional.empty())
}
| edouardKaiser/lagom | persistence-cassandra/javadsl/src/main/scala/com/lightbend/lagom/javadsl/persistence/cassandra/CassandraReadSideProcessor.scala | Scala | apache-2.0 | 4,227 |
package akashic.storage
import akashic.storage.admin.User
import org.apache.commons.codec.binary.Base64
import org.apache.http.client.entity.EntityBuilder
import org.apache.http.client.methods.{HttpPut, HttpGet, HttpPost}
import org.apache.http.impl.client.HttpClients
import org.apache.http.message.BasicHeader
import scala.xml.XML
class AdminTest extends ServerTestBase {
case class FixtureParam()
override protected def withFixture(test: OneArgTest) = {
test(FixtureParam())
}
def rootURL = s"http://${server.address}/admin/user"
val authHeader = new BasicHeader("Authorization", s"Basic ${Base64.encodeBase64URLSafeString("admin:passwd".getBytes)}")
test("post -> get") { p =>
// POST
val postReq = new HttpPost(rootURL)
postReq.addHeader(authHeader)
val postRes = HttpClients.createDefault.execute(postReq)
assert(postRes.getStatusLine.getStatusCode === 200)
val user = User.fromXML(XML.load(postRes.getEntity.getContent))
// GET
val getReq = new HttpGet(s"${rootURL}/${user.id}")
getReq.addHeader(authHeader)
val getRes = HttpClients.createDefault.execute(getReq)
assert(getRes.getStatusLine.getStatusCode === 200)
val gotUser = User.fromXML(XML.load(getRes.getEntity.getContent))
assert(user === gotUser)
}
test("post -> put -> get") { p =>
// POST
val postReq = new HttpPost(rootURL)
postReq.addHeader(authHeader)
val postRes = HttpClients.createDefault.execute(postReq)
assert(postRes.getStatusLine.getStatusCode === 200)
val user = User.fromXML(XML.load(postRes.getEntity.getContent))
assert(user.name !== "hige")
assert(user.email !== "[email protected]")
// PUT
val xml =
<User>
<Name>hige</Name>
<Email>[email protected]</Email>
</User>
val putReq = new HttpPut(s"${rootURL}/${user.id}")
putReq.addHeader(authHeader)
putReq.setEntity(EntityBuilder.create.setText(xml.toString).build)
val putRes = HttpClients.createDefault.execute(putReq)
assert(putRes.getStatusLine.getStatusCode === 200)
// GET
val getReq = new HttpGet(s"${rootURL}/${user.id}")
getReq.addHeader(authHeader)
val getRes = HttpClients.createDefault.execute(getReq)
assert(postRes.getStatusLine.getStatusCode === 200)
val gotUser = User.fromXML(XML.load(getRes.getEntity.getContent))
assert(gotUser.name === "hige")
assert(gotUser.email === "[email protected]")
}
}
| akiradeveloper/fss3 | src/test/scala/akashic/storage/AdminTest.scala | Scala | apache-2.0 | 2,441 |
package com.gh.helper.domain
// import slick.driver.MySQLDriver.simple._
import slick.model.ForeignKeyAction
import slick.driver.MySQLDriver.api._
import scala.concurrent.ExecutionContext.Implicits.global
import java.sql.Timestamp
case class PersonList(id: Option[Long], userId: String, listId: Long, isOwner: Boolean)
class PersonLists(tag: Tag) extends Table[PersonList](tag, "userlist") {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def userId = column[String]("userId", O.Length(64))
def listId = column[Long]("listId")
def isOwner = column[Boolean]("isOwner")
def * = (id.?, userId, listId, isOwner) <> ((PersonList.apply _).tupled, PersonList.unapply)
def user = foreignKey("useruserlist_FK", userId, TableQuery[Users])(_.fbId)
def list = foreignKey("listuserlist_FK", listId, TableQuery[GroceryLists])(_.id)
def uniqueConst = index("unique_idx", (userId, listId), unique = true)
}
| saetar/grocery-backend | src/main/scala/com/gh/helper/domain/PersonsLists.scala | Scala | unlicense | 926 |
package com.evojam.mongodb.client.cursor
import scala.concurrent.ExecutionContext
import com.mongodb.ReadPreference
import org.bson.codecs.{ Codec, Encoder }
import com.evojam.mongodb.client.ObservableOperationExecutor
import com.evojam.mongodb.client.model.operation.ListCollectionOperation
private[client] case class ListCollectionsCursor[T: Encoder](
dbName: String,
readPreference: ReadPreference,
executor: ObservableOperationExecutor,
filter: Option[T] = None,
maxTime: Option[Long] = None,
batchSize: Option[Int] = None) extends Cursor {
require(dbName != null, "dbName cannot be null")
require(dbName.nonEmpty, "dbName cannot be empty")
require(readPreference != null, "readPreference cannot be null")
require(filter != null, "filter cannot be null")
require(maxTime != null, "maxTime cannot be null")
require(batchSize != null, "batchSize cannot be null")
override protected def rawHead[R: Codec]()(implicit exc: ExecutionContext) =
cursor(queryOperation.copy(batchSize = Some(-1)))
.head()
override protected def rawForeach[R: Codec](f: R => Unit)(implicit exc: ExecutionContext) =
cursor().foreach(f)
override protected def rawObservable[R: Codec]()(implicit exc: ExecutionContext) =
cursor().observable()
override protected def rawObservable[R: Codec](batchSize: Int)(implicit exc: ExecutionContext) =
cursor().observable(batchSize)
def filter(filter: T): ListCollectionsCursor[T] = {
require(filter != null, "filter cannot be null")
this.copy(filter = Some(filter))
}
def maxTime(time: Long): ListCollectionsCursor[T] = {
require(time >= 0L, "time cannot be negative")
this.copy(maxTime = Some(time))
}
def batchSize(size: Int): ListCollectionsCursor[T] = {
require(size >= 0, "size cannot be negative")
this.copy(batchSize = Some(size))
}
private def cursor[R: Codec](): OperationCursor[R] =
cursor(queryOperation[R])
private def cursor[R: Codec](lco: ListCollectionOperation[T, R]): OperationCursor[R] =
OperationCursor(lco, readPreference, executor)
private def queryOperation[R]()(implicit c: Codec[R]) =
ListCollectionOperation[T, R](dbName, c, filter, batchSize, maxTime)
}
| evojam/mongodb-driver-scala | src/main/scala/com/evojam/mongodb/client/cursor/ListCollectionsCursor.scala | Scala | apache-2.0 | 2,216 |
package com.cloudera.hue.livy.server
import javax.servlet.ServletContext
import com.cloudera.hue.livy.{Utils, Logging, LivyConf, WebServer}
import org.scalatra._
import org.scalatra.servlet.ScalatraListener
object Main {
val SESSION_KIND = "livy-server.session.kind"
val THREAD_SESSION = "thread"
val PROCESS_SESSION = "process"
val YARN_SESSION = "yarn"
def main(args: Array[String]): Unit = {
val livyConf = new LivyConf()
Utils.loadDefaultLivyProperties(livyConf)
val host = livyConf.get("livy.server.host", "0.0.0.0")
val port = livyConf.getInt("livy.server.port", 8998)
val server = new WebServer(host, port)
server.context.setResourceBase("src/main/com/cloudera/hue/livy/server")
server.context.setInitParameter(ScalatraListener.LifeCycleKey, classOf[ScalatraBootstrap].getCanonicalName)
server.context.addEventListener(new ScalatraListener)
server.start()
try {
System.setProperty("livy.server.callback-url", f"http://${server.host}:${server.port}")
} finally {
server.join()
server.stop()
// Make sure to close all our outstanding http requests.
dispatch.Http.shutdown()
}
}
}
class ScalatraBootstrap extends LifeCycle with Logging {
var sessionManager: SessionManager = null
override def init(context: ServletContext): Unit = {
val livyConf = new LivyConf()
val sessionFactoryKind = livyConf.get("livy.server.session.factory", "thread")
info(f"Using $sessionFactoryKind sessions")
val sessionFactory = sessionFactoryKind match {
case "thread" => new ThreadSessionFactory(livyConf)
case "process" => new ProcessSessionFactory(livyConf)
case "yarn" => new YarnSessionFactory(livyConf)
case _ =>
println(f"Unknown session factory: $sessionFactoryKind}")
sys.exit(1)
}
sessionManager = new SessionManager(sessionFactory)
context.mount(new WebApp(sessionManager), "/*")
}
override def destroy(context: ServletContext): Unit = {
if (sessionManager != null) {
sessionManager.shutdown()
}
}
}
| dulems/hue | apps/spark/java/livy-server/src/main/scala/com/cloudera/hue/livy/server/Main.scala | Scala | apache-2.0 | 2,096 |
package mesosphere.marathon.core.flow.impl
import akka.actor.ActorSystem
import akka.testkit.TestActorRef
import mesosphere.marathon.MarathonSpec
import mesosphere.marathon.core.flow.LaunchTokenConfig
import mesosphere.marathon.core.matcher.manager.OfferMatcherManager
import mesosphere.marathon.core.task.bus.{
TaskStatusUpdateTestHelper,
MarathonTaskStatusTestHelper,
TaskStatusObservables
}
import mesosphere.marathon.core.task.bus.TaskStatusObservables.TaskStatusUpdate
import org.mockito.Mockito
import rx.lang.scala.{ Observable, Subject }
import rx.lang.scala.subjects.PublishSubject
class OfferMatcherLaunchTokensActorTest extends MarathonSpec {
test("initially setup tokens") {
Mockito.verify(taskStatusObservables).forAll
Mockito.verify(offerMatcherManager).setLaunchTokens(conf.launchTokens())
}
test("refill on running tasks without health info") {
// startup
Mockito.verify(taskStatusObservables).forAll
Mockito.verify(offerMatcherManager).setLaunchTokens(conf.launchTokens())
allObservable.onNext(TaskStatusUpdateTestHelper.running.wrapped)
Mockito.verify(offerMatcherManager).addLaunchTokens(1)
}
test("refill on running healthy task") {
// startup
Mockito.verify(taskStatusObservables).forAll
Mockito.verify(offerMatcherManager).setLaunchTokens(conf.launchTokens())
allObservable.onNext(TaskStatusUpdateTestHelper.runningHealthy.wrapped)
Mockito.verify(offerMatcherManager).addLaunchTokens(1)
}
test("DO NOT refill on running UNhealthy task") {
// startup
Mockito.verify(taskStatusObservables).forAll
Mockito.verify(offerMatcherManager).setLaunchTokens(conf.launchTokens())
allObservable.onNext(TaskStatusUpdateTestHelper.runningUnhealthy.wrapped)
}
private[this] implicit var actorSystem: ActorSystem = _
private[this] var allObservable: Subject[TaskStatusUpdate] = _
private[this] var conf: LaunchTokenConfig = _
private[this] var taskStatusObservables: TaskStatusObservables = _
private[this] var offerMatcherManager: OfferMatcherManager = _
private[this] var actorRef: TestActorRef[OfferMatcherLaunchTokensActor] = _
before {
actorSystem = ActorSystem()
conf = new LaunchTokenConfig {}
conf.afterInit()
allObservable = PublishSubject[TaskStatusObservables.TaskStatusUpdate]()
taskStatusObservables = mock[TaskStatusObservables]
Mockito.when(taskStatusObservables.forAll).thenReturn(allObservable)
offerMatcherManager = mock[OfferMatcherManager]
actorRef = TestActorRef[OfferMatcherLaunchTokensActor](
OfferMatcherLaunchTokensActor.props(conf, taskStatusObservables, offerMatcherManager)
)
}
after {
Mockito.verifyNoMoreInteractions(taskStatusObservables)
Mockito.verifyNoMoreInteractions(offerMatcherManager)
actorSystem.shutdown()
actorSystem.awaitTermination()
}
}
| EasonYi/marathon | src/test/scala/mesosphere/marathon/core/flow/impl/OfferMatcherLaunchTokensActorTest.scala | Scala | apache-2.0 | 2,866 |
package scala.tools.nsc
package symtab
import org.junit.Assert._
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.runners.JUnit4
import scala.tools.testing.BytecodeTesting
@RunWith(classOf[JUnit4])
class FlagsTest extends BytecodeTesting {
object symbolTable extends SymbolTableForUnitTesting
import symbolTable._
import Flags._
def sym = NoSymbol.newTermSymbol(nme.EMPTY)
def withFlagMask[A](mask: Long)(body: => A): A = enteringPhase(new Phase(NoPhase) {
override def flagMask = mask
def name = ""
def run() = ()
})(body)
def testTimedFlag(flag: Long, test: Symbol => Boolean, enabling: Boolean) = {
assertEquals(withFlagMask(InitialFlags)(test(sym.setFlag(flag))), !enabling)
assertEquals(withFlagMask(InitialFlags | flag)(test(sym.setFlag(flag))), enabling)
}
def testLate(flag: Long, test: Symbol => Boolean) = testTimedFlag(flag, test, enabling = true)
def testNot(flag: Long, test: Symbol => Boolean) = testTimedFlag(flag, test, enabling = false)
@Test
def testTimedFlags(): Unit = {
testNot(PROTECTED | notPROTECTED, _.isProtected)
testNot(PRIVATE | notPRIVATE, _.isPrivate)
assertFalse(withFlagMask(AllFlags)(sym.setFlag(PRIVATE | notPRIVATE).isPrivate))
assertEquals(withFlagMask(InitialFlags)(sym.setFlag(PRIVATE | notPRIVATE).flags & PRIVATE), PRIVATE)
assertEquals(withFlagMask(AllFlags)(sym.setFlag(PRIVATE | notPRIVATE).flags & PRIVATE), 0)
}
@Test
def normalLateOverlap(): Unit = {
// late flags are shifted by LateShift == 47.
// however, the first late flag is lateDEFERRED, which is DEFERRED << 47 == (1 << 4) << 47 == 1 << 51
// the flags from 1 << 47 to 1 << 50 are not late flags. this is ensured by the LateFlags mask.
for (i <- 0 to 3) {
val f = 1L << i
assertEquals(withFlagMask(AllFlags)(sym.setFlag(f << LateShift).flags & f), 0) // not treated as late flag
}
for (i <- 4 to 8) {
val f = 1L << i
assertEquals(withFlagMask(AllFlags)(sym.setFlag(f << LateShift).flags & f), f) // treated as late flag
}
}
@Test
def normalAnti(): Unit = {
for (i <- 0 to 2) {
val f = 1L << i
assertEquals(withFlagMask(AllFlags)(sym.setFlag(f | (f << AntiShift)).flags & f), 0) // negated flags
}
for (i <- 3 to 7) {
val f = 1L << i
assertEquals(withFlagMask(AllFlags)(sym.setFlag(f | (f << AntiShift)).flags & f), f) // not negated
}
}
@Test
def lateAntiCrossCheck(): Unit = {
val allButNegatable = AllFlags & ~(PROTECTED | OVERRIDE | PRIVATE)
val lateable = 0L | DEFERRED | FINAL | INTERFACE | METHOD | MODULE
val lateFlags = lateable << LateShift
val allButLateable = AllFlags & ~lateable
assertEquals(withFlagMask(AllFlags)(sym.setFlag(AllFlags).flags), allButNegatable)
assertEquals(withFlagMask(AllFlags)(sym.setFlag(allButLateable).flags), allButNegatable)
assertEquals(withFlagMask(AllFlags)(sym.setFlag(lateFlags).flags), lateFlags | lateable)
}
@Test
def javaClassMirrorAnnotationFlag(): Unit = {
import scala.reflect.runtime.universe._
val dep = typeOf[java.lang.Deprecated].typeSymbol
assertTrue(dep.isJavaAnnotation && dep.isJava)
}
@Test
def interfaceFlag(): Unit = {
// scala traits are `isInterface` if they have only type defs and abstract methods / fields.
// java interfaces are always `isInterface`.
val scalaCode =
"""package p
|trait T1 {
| import scala.collection
| def m: Int
| val f: Int
| type T <: AnyRef
|}
|trait T2 {
| def m = 1
|}
|trait T3 {
| val f = 1
|}
|trait T4 {
| println()
|}
""".stripMargin
val javaI1 = "package p; interface I1 { int m(); }"
val javaI2 = "package p; interface I2 { default int m() { return 1; } }"
compiler.compileClasses(code = scalaCode, javaCode = (javaI1, "I1.java") :: (javaI2, "I2.java") :: Nil)
import compiler.global.rootMirror._
assert( getRequiredClass("p.T1").isInterface)
assert(!getRequiredClass("p.T2").isInterface)
assert(!getRequiredClass("p.T3").isInterface)
assert(!getRequiredClass("p.T4").isInterface)
assert( getRequiredClass("p.I1").isInterface)
assert( getRequiredClass("p.I2").isInterface)
}
}
| felixmulder/scala | test/junit/scala/tools/nsc/symtab/FlagsTest.scala | Scala | bsd-3-clause | 4,357 |
package scorex.network
import java.net.{InetAddress, InetSocketAddress}
import org.scalacheck.Gen
import org.scalatest.prop.{GeneratorDrivenPropertyChecks, PropertyChecks}
import org.scalatest.{Matchers, PropSpec}
import play.api.libs.json.{JsObject, Json}
import scorex.network.peer.{PeerDatabaseImpl, PeerInfo}
import scorex.settings.Settings
class PeerDatabaseSpecification extends PropSpec with PropertyChecks with GeneratorDrivenPropertyChecks
with Matchers {
object TestSettings extends Settings {
override lazy val settingsJSON: JsObject = Json.obj()
override val filename: String = ""
}
val db = new PeerDatabaseImpl(TestSettings, None)
val pi = PeerInfo(System.currentTimeMillis(), None, None)
val portGen = Gen.choose(1, 0xFFFF)
val hostGen = for {
a1: Int <- Gen.choose(1, 255)
a2: Int <- Gen.choose(1, 255)
a3: Int <- Gen.choose(1, 255)
a4: Int <- Gen.choose(1, 255)
} yield s"$a1.$a2.$a3.$a4"
property("peer blacklisting") {
forAll(hostGen, portGen) { (host: String, port: Int) =>
val address = new InetSocketAddress(InetAddress.getByName(host), port)
db.addOrUpdateKnownPeer(address, pi)
db.knownPeers(false).contains(address) shouldBe true
db.blacklist(address)
db.knownPeers(false).contains(address) shouldBe false
db.blacklisted.contains(address) shouldBe true
}
}
} | alexeykiselev/WavesScorex | scorex-basics/src/test/scala/scorex/network/PeerDatabaseSpecification.scala | Scala | cc0-1.0 | 1,380 |
import scala.math.pow
object main extends App{
/*
* Foreach.
*/
val someNumber = List(-11, -10, -5, 0, 5, 10, 11)
someNumber.foreach((x: Int) => println(pow(x, 2)))
/*
* Partially applied function
* 1st version:
*/
someNumber.foreach(println(_))
// 2nd version
someNumber.foreach(println _)
// Filter
val onlyPositiveNumbers = someNumber.filter((x: Int) => x > 0)
println(onlyPositiveNumbers)
// Filter with placeholder syntax
val onlyPositiveNumberPlaceholder = someNumber.filter(_ > 0)
} | arcyfelix/Courses | 18-10-18-Programming-in-Scala-by-Martin-Odersky-Lex-Spoon-and-Bill-Venners/27-ForeachAndFilterAndPartiallyAppliedFunctions/src/main.scala | Scala | apache-2.0 | 533 |
class MotherClass extends MixinWithSymbol {
def foo = 'sym1
}
object Test {
def main(args: Array[String]): Unit = {
(new MotherClass).symbolFromTrait
}
}
| yusuke2255/dotty | tests/run/t8933b/Test.scala | Scala | bsd-3-clause | 165 |
import sbt._
object Dependencies {
object Versions {
val akka = "2.3.4"
val apacheCommonsEmail = "1.3.2"
val aspectj = "1.7.4"
val scalajHttp = "0.3.16"
val typesafeConfig = "1.2.0"
val scalaTest = "2.2.0"
val scalaMock = "3.1.RC1"
val scalaCheck = "1.11.4"
}
val akkaActor = "com.typesafe.akka" %% "akka-actor" % Versions.akka
val akkaTest = "com.typesafe.akka" %% "akka-testkit" % Versions.akka
val apacheCommonsEMail = "org.apache.commons" % "commons-email" % Versions.apacheCommonsEmail
val scalajHttp = "org.scalaj" %% "scalaj-http" % Versions.scalajHttp
val typesafeConfig = "com.typesafe" % "config" % Versions.typesafeConfig
val scalaTest = "org.scalatest" %% "scalatest" % Versions.scalaTest
val scalaMock = "org.scalamock" %% "scalamock-scalatest-support" % Versions.scalaMock
val scalaCheck = "org.scalacheck" %% "scalacheck" % Versions.scalaCheck
def compile (modules: ModuleID*): Seq[ModuleID] = modules map (_ % "compile")
def provided (modules: ModuleID*): Seq[ModuleID] = modules map (_ % "provided")
def test (modules: ModuleID*): Seq[ModuleID] = modules map (_ % "test")
def runtime (modules: ModuleID*): Seq[ModuleID] = modules map (_ % "runtime")
def container (modules: ModuleID*): Seq[ModuleID] = modules map (_ % "container")
}
| Coiney/akka-mailer | project/Dependencies.scala | Scala | bsd-3-clause | 1,625 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.spark.api.java
import java.util
import java.util.AbstractMap.SimpleEntry
import java.util.Map.Entry
import org.apache.hadoop.conf.Configuration
import org.apache.spark.api.java.JavaRDD._
import org.apache.spark.api.java._
import org.apache.spark.rdd.RDD
import org.geotools.data.Query
import org.locationtech.geomesa.spark.{GeoMesaSpark, Schema, SpatialRDD, SpatialRDDProvider}
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
object JavaGeoMesaSpark {
def apply(params: java.util.Map[String, _ <: java.io.Serializable]) =
JavaSpatialRDDProvider(GeoMesaSpark.apply(params))
}
object JavaSpatialRDDProvider {
def apply(provider: SpatialRDDProvider) = new JavaSpatialRDDProvider(provider)
}
class JavaSpatialRDDProvider(provider: SpatialRDDProvider) {
import scala.collection.JavaConverters._
def rdd(
conf: Configuration,
jsc: JavaSparkContext,
params: java.util.Map[String, String],
query: Query): JavaSpatialRDD =
provider.rdd(conf, jsc.sc, params.asScala.toMap, query)
def save(jrdd: JavaRDD[SimpleFeature], params: java.util.Map[String, String], typeName: String): Unit =
provider.save(jrdd, params.asScala.toMap, typeName)
}
object JavaSpatialRDD {
import scala.collection.JavaConverters._
def apply(rdd: SpatialRDD): JavaSpatialRDD = new JavaSpatialRDD(rdd)
implicit def toJavaSpatialRDD(rdd: SpatialRDD):JavaSpatialRDD = JavaSpatialRDD(rdd)
implicit def toValueList(in: RDD[SimpleFeature] with Schema): RDD[java.util.List[AnyRef]] =
in.map(_.getAttributes)
implicit def toKeyValueEntryList(in: RDD[SimpleFeature] with Schema): RDD[java.util.List[Entry[String, AnyRef]]] = {
in.map { sf =>
val entries = new java.util.ArrayList[Entry[String, AnyRef]](sf.getAttributeCount)
sf.getProperties.asScala.foreach(p => entries.add(new SimpleEntry(p.getName.getLocalPart, p.getValue)))
entries
}
}
implicit def toKeyValueArrayList(in: RDD[SimpleFeature] with Schema): RDD[java.util.List[Array[AnyRef]]] = {
in.map { sf =>
val entries = new java.util.ArrayList[Array[AnyRef]](sf.getAttributeCount)
sf.getProperties.asScala.foreach(p => entries.add(Array[AnyRef](p.getName.getLocalPart, p.getValue)))
entries
}
}
implicit def toKeyValueJavaMap(in: RDD[SimpleFeature] with Schema): RDD[java.util.Map[String, AnyRef]] =
SpatialRDD.toKeyValueMap(in).map(_.asJava)
implicit def toGeoJSONString(in: RDD[SimpleFeature] with Schema): RDD[String] =
SpatialRDD.toGeoJSONString(in)
}
class JavaSpatialRDD(val srdd: SpatialRDD) extends JavaRDD[SimpleFeature](srdd) with Schema {
import JavaSpatialRDD._
def schema: SimpleFeatureType = srdd.schema
def asValueList: JavaRDD[util.List[Object]] = toValueList(srdd)
def asKeyValueEntryList: JavaRDD[util.List[util.Map.Entry[String, Object]]] = toKeyValueEntryList(srdd)
def asKeyValueArrayList: JavaRDD[util.List[Array[AnyRef]]] = toKeyValueArrayList(srdd)
def asKeyValueMap: JavaRDD[util.Map[String, Object]] = toKeyValueJavaMap(srdd)
def asGeoJSONString: JavaRDD[String] = toGeoJSONString(srdd)
@deprecated
def asKeyValueList = asKeyValueEntryList
}
| elahrvivaz/geomesa | geomesa-spark/geomesa-spark-core/src/main/scala/org/locationtech/geomesa/spark/api/java/JavaGeoMesaSpark.scala | Scala | apache-2.0 | 3,784 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.holdenkarau.spark.testing
import scala.collection.mutable
import scala.language.implicitConversions
import scala.reflect.ClassTag
import org.apache.spark.Logging
import org.apache.spark.streaming.dstream.DStream
import org.scalactic.Equality
import org.scalatest.{BeforeAndAfterAll, Suite}
/**
* This is the base trait for Spark Streaming testsuites. This provides basic functionality
* to run user-defined set of input on user-defined stream operations, and verify the output.
*/
trait StreamingSuiteBase extends BeforeAndAfterAll with Logging
with StreamingSuiteCommon with SharedSparkContext {
self: Suite =>
// Default before function for any streaming test suite. Override this
// if you want to add your stuff to "before" (i.e., don't call before { } )
override def beforeAll() {
setupClock()
super.beforeAll()
}
// Default after function for any streaming test suite. Override this
// if you want to add your stuff to "after" (i.e., don't call after { } )
override def afterAll() {
System.clearProperty("spark.streaming.clock")
super.afterAll()
}
/**
* Verify whether the output values after running a DStream operation
* is same as the expected output values, by comparing the output
* collections either as lists (order matters) or sets (order does not matter)
*
* @param ordered Compare output values with expected output values
* within the same output batch ordered or unordered.
* Comparing doubles may not work well in case of unordered.
*/
def verifyOutput[V: ClassTag](
output: Seq[Seq[V]],
expectedOutput: Seq[Seq[V]],
ordered: Boolean
) (implicit equality: Equality[V]): Unit = {
logInfo("--------------------------------")
logInfo("output.size = " + output.size)
logInfo("output")
output.foreach(x => logInfo("[" + x.mkString(",") + "]"))
logInfo("expected output.size = " + expectedOutput.size)
logInfo("expected output")
expectedOutput.foreach(x => logInfo("[" + x.mkString(",") + "]"))
logInfo("--------------------------------")
// Match the output with the expected output
assert(output.size === expectedOutput.size, "Number of outputs do not match")
if (ordered) {
for (i <- output.indices)
equalsOrdered(output(i), expectedOutput(i))
} else {
for (i <- output.indices)
equalsUnordered(output(i), expectedOutput(i))
}
logInfo("Output verified successfully")
}
private def equalsUnordered[V](output: Seq[V], expected: Seq[V])(implicit equality: Equality[V]) = {
assert(output.length === expected.length)
val length = output.length
val set = new mutable.BitSet(length)
for (i <- 0 until length) {
val equalElements = (0 until length).filter(x => (!set.contains(x) && output(i) === expected(x))).take(1)
if (equalElements.isEmpty)
assert(output === expected) // only to show the two unequal lists to user
set += equalElements(0)
}
}
private def equalsOrdered[V](output: Seq[V], expected: Seq[V])(implicit equality: Equality[V]) = {
assert(output.length === expected.length)
for (i <- output.indices)
assert(output(i) === expected(i))
}
// Wrappers with ordered = false
def testOperation[U: ClassTag, V: ClassTag](
input: Seq[Seq[U]],
operation: DStream[U] => DStream[V],
expectedOutput: Seq[Seq[V]]
) (implicit equality: Equality[V]): Unit = {
testOperation(input, operation, expectedOutput, false)
}
def testOperation[U: ClassTag, V: ClassTag, W: ClassTag](
input1: Seq[Seq[U]],
input2: Seq[Seq[V]],
operation: (DStream[U], DStream[V]) => DStream[W],
expectedOutput: Seq[Seq[W]]
) (implicit equality: Equality[W]): Unit = {
testOperation(input1, input2, operation, expectedOutput, false)
}
/**
* Test unary DStream operation with a list of inputs, with number of
* batches to run same as the number of input values.
* You can simulate the input batch as a List of values or as null to simulate empty batch.
*
* @param input Sequence of input collections
* @param operation Binary DStream operation to be applied to the 2 inputs
* @param expectedOutput Sequence of expected output collections
* @param ordered Compare output values with expected output values
* within the same output batch ordered or unordered.
* Comparing doubles may not work well in case of unordered.
*/
def testOperation[U: ClassTag, V: ClassTag](
input: Seq[Seq[U]],
operation: DStream[U] => DStream[V],
expectedOutput: Seq[Seq[V]],
ordered: Boolean
) (implicit equality: Equality[V]): Unit = {
val numBatches = input.size
withOutputAndStreamingContext(setupStreams[U, V](input, operation)) { (outputStream, ssc) =>
val output: Seq[Seq[V]] = runStreams[V](outputStream, ssc, numBatches, expectedOutput.size)
verifyOutput[V](output, expectedOutput, ordered)
}
}
/**
* Test binary DStream operation with two lists of inputs, with number of
* batches to run same as the number of input values. The size of the two input lists Should be the same.
* You can simulate the input batch as a List of values or as null to simulate empty batch.
*
* @param input1 First sequence of input collections
* @param input2 Second sequence of input collections
* @param operation Binary DStream operation to be applied to the 2 inputs
* @param expectedOutput Sequence of expected output collections
* @param ordered Compare output values with expected output values
* within the same output batch ordered or unOrdered.
* Comparing doubles may not work well in case of unordered.
*/
def testOperation[U: ClassTag, V: ClassTag, W: ClassTag](
input1: Seq[Seq[U]],
input2: Seq[Seq[V]],
operation: (DStream[U], DStream[V]) => DStream[W],
expectedOutput: Seq[Seq[W]],
ordered: Boolean
) (implicit equality: Equality[W]): Unit = {
assert(input1.length === input2.length, "Length of the input lists are not equal")
val numBatches = input1.size
withOutputAndStreamingContext(setupStreams[U, V, W](input1, input2, operation)) {
(outputStream, ssc) =>
val output = runStreams[W](outputStream, ssc, numBatches, expectedOutput.size)
verifyOutput[W](output, expectedOutput, ordered)
}
}
}
| mahmoudhanafy/spark-testing-base | src/main/1.3/scala/com/holdenkarau/spark/testing/StreamingSuiteBase.scala | Scala | apache-2.0 | 7,367 |
package uima.rs.en
import org.apache.uima.jcas.JCas
import uima.rs.MultiLingualQuestion
import us.feliscat.m17n.English
import us.feliscat.types.Question
import us.feliscat.util.uima.JCasID
/**
* <pre>
* Created on 2017/03/21.
* </pre>
*
* @author K.Sakamoto
*/
class EnglishQuestion(casId: JCasID,
aJCas: JCas,
question: Question) extends MultiLingualQuestion(casId, aJCas, question) with English | ktr-skmt/FelisCatusZero-multilingual | src/main/scala/uima/rs/en/EnglishQuestion.scala | Scala | apache-2.0 | 455 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.master
import org.apache.spark.deploy.ApplicationDescription
import java.util.Date
import akka.actor.ActorRef
import scala.collection.mutable
private[spark] class ApplicationInfo(
val startTime: Long,
val id: String,
val desc: ApplicationDescription,
val submitDate: Date,
val driver: ActorRef,
val appUiUrl: String)
extends Serializable {
@transient var state: ApplicationState.Value = _
@transient var executors: mutable.HashMap[Int, ExecutorInfo] = _
@transient var coresGranted: Int = _
@transient var endTime: Long = _
@transient var appSource: ApplicationSource = _
@transient private var nextExecutorId: Int = _
init()
private def readObject(in: java.io.ObjectInputStream) : Unit = {
in.defaultReadObject()
init()
}
private def init() {
state = ApplicationState.WAITING
executors = new mutable.HashMap[Int, ExecutorInfo]
coresGranted = 0
endTime = -1L
appSource = new ApplicationSource(this)
nextExecutorId = 0
}
private def newExecutorId(useID: Option[Int] = None): Int = {
useID match {
case Some(id) =>
nextExecutorId = math.max(nextExecutorId, id + 1)
id
case None =>
val id = nextExecutorId
nextExecutorId += 1
id
}
}
def addExecutor(worker: WorkerInfo, cores: Int, useID: Option[Int] = None): ExecutorInfo = {
val exec = new ExecutorInfo(newExecutorId(useID), this, worker, cores, desc.memoryPerSlave)
executors(exec.id) = exec
coresGranted += cores
exec
}
def removeExecutor(exec: ExecutorInfo) {
if (executors.contains(exec.id)) {
executors -= exec.id
coresGranted -= exec.cores
}
}
def coresLeft: Int = desc.maxCores - coresGranted
private var _retryCount = 0
def retryCount = _retryCount
def incrementRetryCount = {
_retryCount += 1
_retryCount
}
def markFinished(endState: ApplicationState.Value) {
state = endState
endTime = System.currentTimeMillis()
}
def duration: Long = {
if (endTime != -1) {
endTime - startTime
} else {
System.currentTimeMillis() - startTime
}
}
}
| mkolod/incubator-spark | core/src/main/scala/org/apache/spark/deploy/master/ApplicationInfo.scala | Scala | apache-2.0 | 2,988 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.dataset
import org.apache.calcite.plan._
class DataSetConvention extends Convention {
override def toString: String = getName
override def useAbstractConvertersForConversion(
fromTraits: RelTraitSet,
toTraits: RelTraitSet): Boolean = false
override def canConvertConvention(toConvention: Convention): Boolean = false
def getInterface: Class[_] = classOf[DataSetRel]
def getName: String = "DATASET"
def getTraitDef: RelTraitDef[_ <: RelTrait] = ConventionTraitDef.INSTANCE
def satisfies(`trait`: RelTrait): Boolean = this eq `trait`
def register(planner: RelOptPlanner): Unit = { }
}
object DataSetConvention {
val INSTANCE = new DataSetConvention
}
| DieBauer/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/plan/nodes/dataset/DataSetConvention.scala | Scala | apache-2.0 | 1,536 |
package com.twitter.finagle
import com.twitter.finagle.dispatch.{GenSerialClientDispatcher, SerialClientDispatcher, SerialServerDispatcher}
import com.twitter.finagle.netty3.transport.ChannelTransport
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.tracing.TraceInitializerFilter
import com.twitter.finagle.transport.Transport
import com.twitter.util.Closable
import java.net.{InetSocketAddress, SocketAddress}
import org.jboss.netty.channel.{Channel, ChannelPipeline, ChannelPipelineFactory}
/**
* Codecs provide protocol encoding and decoding via netty pipelines
* as well as a standard filter stack that is applied to services
* from this codec.
*/
trait Codec[Req, Rep] {
/**
* The pipeline factory that implements the protocol.
*/
def pipelineFactory: ChannelPipelineFactory
/* Note: all of the below interfaces are scheduled for deprecation in favor of
* clients/servers
*/
/**
* Prepare a factory for usage with the codec. Used to allow codec
* modifications to the service at the top of the network stack.
*/
def prepareServiceFactory(
underlying: ServiceFactory[Req, Rep]
): ServiceFactory[Req, Rep] =
underlying
/**
* Prepare a connection factory. Used to allow codec modifications
* to the service at the bottom of the stack (connection level).
*/
final def prepareConnFactory(underlying: ServiceFactory[Req, Rep]): ServiceFactory[Req, Rep] =
prepareConnFactory(underlying, Stack.Params.empty)
def prepareConnFactory(
underlying: ServiceFactory[Req, Rep],
params: Stack.Params
): ServiceFactory[Req, Rep] = underlying
/**
* Note: the below ("raw") interfaces are low level, and require a
* good understanding of finagle internals to implement correctly.
* Proceed with care.
*/
def newClientTransport(ch: Channel, statsReceiver: StatsReceiver): Transport[Any, Any] =
new ChannelTransport(ch)
final def newClientDispatcher(transport: Transport[Any, Any]): Service[Req, Rep] =
newClientDispatcher(transport, Stack.Params.empty)
def newClientDispatcher(
transport: Transport[Any, Any],
params: Stack.Params
): Service[Req, Rep] = {
// In order to not break the Netty 3 API, we provide some 'alternative facts'
// and continue without our dynamic check
val clazz = classOf[Any].asInstanceOf[Class[Rep]]
new SerialClientDispatcher(
Transport.cast[Req, Rep](clazz, transport),
params[param.Stats].statsReceiver.scope(GenSerialClientDispatcher.StatsScope)
)
}
def newServerDispatcher(
transport: Transport[Any, Any],
service: Service[Req, Rep]
): Closable = {
// In order to not break the Netty 3 API, we provide some 'alternative facts'
// and continue without our dynamic check
val clazz = classOf[Any].asInstanceOf[Class[Req]]
new SerialServerDispatcher[Req, Rep](Transport.cast[Rep, Req](clazz, transport), service)
}
/**
* Is this Codec OK for failfast? This is a temporary hack to
* disable failFast for codecs for which it isn't well-behaved.
*/
def failFastOk = true
/**
* A hack to allow for overriding the TraceInitializerFilter when using
* Client/Server Builders rather than stacks.
*/
def newTraceInitializer: Stackable[ServiceFactory[Req, Rep]] = TraceInitializerFilter.clientModule[Req, Rep]
/**
* A protocol library name to use for displaying which protocol library this client or server is using.
*/
def protocolLibraryName: String = "not-specified"
}
/**
* An abstract class version of the above for java compatibility.
*/
abstract class AbstractCodec[Req, Rep] extends Codec[Req, Rep]
object Codec {
def ofPipelineFactory[Req, Rep](makePipeline: => ChannelPipeline) =
new Codec[Req, Rep] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline = makePipeline
}
}
def ofPipeline[Req, Rep](p: ChannelPipeline) = new Codec[Req, Rep] {
def pipelineFactory = new ChannelPipelineFactory {
def getPipeline = p
}
}
}
/**
* Codec factories create codecs given some configuration.
*/
/**
* Clients
*/
case class ClientCodecConfig(serviceName: String)
/**
* Servers
*/
case class ServerCodecConfig(serviceName: String, boundAddress: SocketAddress) {
def boundInetSocketAddress = boundAddress match {
case ia: InetSocketAddress => ia
case _ => new InetSocketAddress(0)
}
}
/**
* A combined codec factory provides both client and server codec
* factories in one (when available).
*/
trait CodecFactory[Req, Rep] {
type Client = ClientCodecConfig => Codec[Req, Rep]
type Server = ServerCodecConfig => Codec[Req, Rep]
def client: Client
def server: Server
/**
* A protocol library name to use for displaying which protocol library this client or server is using.
*/
def protocolLibraryName: String = "not-specified"
}
| spockz/finagle | finagle-core/src/main/scala/com/twitter/finagle/Codec.scala | Scala | apache-2.0 | 4,888 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.loadBalancer
import akka.actor.{ActorRef, ActorSystem, Props}
import akka.stream.ActorMaterializer
import org.apache.openwhisk.common._
import org.apache.openwhisk.core.WhiskConfig._
import org.apache.openwhisk.core.connector._
import org.apache.openwhisk.core.containerpool.ContainerPoolConfig
import org.apache.openwhisk.core.entity.ControllerInstanceId
import org.apache.openwhisk.core.entity._
import org.apache.openwhisk.core.invoker.InvokerProvider
import org.apache.openwhisk.core.{ConfigKeys, WhiskConfig}
import org.apache.openwhisk.spi.SpiLoader
import org.apache.openwhisk.utils.ExecutionContextFactory
import pureconfig._
import pureconfig.generic.auto._
import org.apache.openwhisk.core.entity.size._
import scala.concurrent.Future
/**
* Lean loadbalancer implemetation.
*
* Communicates with Invoker directly without Kafka in the middle. Invoker does not exist as a separate entity, it is built together with Controller
* Uses LeanMessagingProvider to use in-memory queue instead of Kafka
*/
class LeanBalancer(config: WhiskConfig,
feedFactory: FeedFactory,
controllerInstance: ControllerInstanceId,
implicit val messagingProvider: MessagingProvider = SpiLoader.get[MessagingProvider])(
implicit actorSystem: ActorSystem,
logging: Logging,
materializer: ActorMaterializer)
extends CommonLoadBalancer(config, feedFactory, controllerInstance) {
/** Loadbalancer interface methods */
override def invokerHealth(): Future[IndexedSeq[InvokerHealth]] = Future.successful(IndexedSeq.empty[InvokerHealth])
override def clusterSize: Int = 1
val poolConfig: ContainerPoolConfig = loadConfigOrThrow[ContainerPoolConfig](ConfigKeys.containerPool)
val invokerName = InvokerInstanceId(0, None, None, poolConfig.userMemory)
/** 1. Publish a message to the loadbalancer */
override def publish(action: ExecutableWhiskActionMetaData, msg: ActivationMessage)(
implicit transid: TransactionId): Future[Future[Either[ActivationId, WhiskActivation]]] = {
/** 2. Update local state with the activation to be executed scheduled. */
val activationResult = setupActivation(msg, action, invokerName)
sendActivationToInvoker(messageProducer, msg, invokerName).map(_ => activationResult)
}
/** Creates an invoker for executing user actions. There is only one invoker in the lean model. */
private def makeALocalThreadedInvoker(): Unit = {
implicit val ec = ExecutionContextFactory.makeCachedThreadPoolExecutionContext()
val limitConfig: ConcurrencyLimitConfig = loadConfigOrThrow[ConcurrencyLimitConfig](ConfigKeys.concurrencyLimit)
SpiLoader.get[InvokerProvider].instance(config, invokerName, messageProducer, poolConfig, limitConfig)
}
makeALocalThreadedInvoker()
override protected val invokerPool: ActorRef = actorSystem.actorOf(Props.empty)
override protected def releaseInvoker(invoker: InvokerInstanceId, entry: ActivationEntry) = {
// Currently do nothing
}
override protected def emitMetrics() = {
super.emitMetrics()
}
}
object LeanBalancer extends LoadBalancerProvider {
override def instance(whiskConfig: WhiskConfig, instance: ControllerInstanceId)(
implicit actorSystem: ActorSystem,
logging: Logging,
materializer: ActorMaterializer): LoadBalancer = {
new LeanBalancer(whiskConfig, createFeedFactory(whiskConfig, instance), instance)
}
def requiredProperties =
ExecManifest.requiredProperties ++
wskApiHost
}
| jasonpet/openwhisk | core/controller/src/main/scala/org/apache/openwhisk/core/loadBalancer/LeanBalancer.scala | Scala | apache-2.0 | 4,338 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.history
import java.io.{FileNotFoundException, IOException, OutputStream}
import java.util.UUID
import java.util.concurrent.{Executors, ExecutorService, Future, TimeUnit}
import java.util.zip.{ZipEntry, ZipOutputStream}
import scala.collection.mutable
import scala.xml.Node
import com.google.common.io.ByteStreams
import com.google.common.util.concurrent.{MoreExecutors, ThreadFactoryBuilder}
import org.apache.hadoop.fs.{FileStatus, FileSystem, Path}
import org.apache.hadoop.hdfs.DistributedFileSystem
import org.apache.hadoop.hdfs.protocol.HdfsConstants
import org.apache.hadoop.security.AccessControlException
import org.apache.spark.{SecurityManager, SparkConf, SparkException}
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.Logging
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.ReplayListenerBus._
import org.apache.spark.ui.SparkUI
import org.apache.spark.util.{Clock, SystemClock, ThreadUtils, Utils}
/**
* A class that provides application history from event logs stored in the file system.
* This provider checks for new finished applications in the background periodically and
* renders the history application UI by parsing the associated event logs.
*
* == How new and updated attempts are detected ==
*
* - New attempts are detected in [[checkForLogs]]: the log dir is scanned, and any
* entries in the log dir whose modification time is greater than the last scan time
* are considered new or updated. These are replayed to create a new [[FsApplicationAttemptInfo]]
* entry and update or create a matching [[FsApplicationHistoryInfo]] element in the list
* of applications.
* - Updated attempts are also found in [[checkForLogs]] -- if the attempt's log file has grown, the
* [[FsApplicationAttemptInfo]] is replaced by another one with a larger log size.
* - When [[updateProbe()]] is invoked to check if a loaded [[SparkUI]]
* instance is out of date, the log size of the cached instance is checked against the app last
* loaded by [[checkForLogs]].
*
* The use of log size, rather than simply relying on modification times, is needed to
* address the following issues
* - some filesystems do not appear to update the `modtime` value whenever data is flushed to
* an open file output stream. Changes to the history may not be picked up.
* - the granularity of the `modtime` field may be 2+ seconds. Rapid changes to the FS can be
* missed.
*
* Tracking filesize works given the following invariant: the logs get bigger
* as new events are added. If a format was used in which this did not hold, the mechanism would
* break. Simple streaming of JSON-formatted events, as is implemented today, implicitly
* maintains this invariant.
*/
private[history] class FsHistoryProvider(conf: SparkConf, clock: Clock)
extends ApplicationHistoryProvider with Logging {
def this(conf: SparkConf) = {
this(conf, new SystemClock())
}
import FsHistoryProvider._
// Interval between safemode checks.
private val SAFEMODE_CHECK_INTERVAL_S = conf.getTimeAsSeconds(
"spark.history.fs.safemodeCheck.interval", "5s")
// Interval between each check for event log updates
private val UPDATE_INTERVAL_S = conf.getTimeAsSeconds("spark.history.fs.update.interval", "10s")
// Interval between each cleaner checks for event logs to delete
private val CLEAN_INTERVAL_S = conf.getTimeAsSeconds("spark.history.fs.cleaner.interval", "1d")
// Number of threads used to replay event logs.
private val NUM_PROCESSING_THREADS = conf.getInt(SPARK_HISTORY_FS_NUM_REPLAY_THREADS,
Math.ceil(Runtime.getRuntime.availableProcessors() / 4f).toInt)
private val logDir = conf.getOption("spark.history.fs.logDirectory")
.map { d => Utils.resolveURI(d).toString }
.getOrElse(DEFAULT_LOG_DIR)
private val hadoopConf = SparkHadoopUtil.get.newConfiguration(conf)
private val fs = Utils.getHadoopFileSystem(logDir, hadoopConf)
// Used by check event thread and clean log thread.
// Scheduled thread pool size must be one, otherwise it will have concurrent issues about fs
// and applications between check task and clean task.
private val pool = Executors.newScheduledThreadPool(1, new ThreadFactoryBuilder()
.setNameFormat("spark-history-task-%d").setDaemon(true).build())
// The modification time of the newest log detected during the last scan. Currently only
// used for logging msgs (logs are re-scanned based on file size, rather than modtime)
private val lastScanTime = new java.util.concurrent.atomic.AtomicLong(-1)
// Mapping of application IDs to their metadata, in descending end time order. Apps are inserted
// into the map in order, so the LinkedHashMap maintains the correct ordering.
@volatile private var applications: mutable.LinkedHashMap[String, FsApplicationHistoryInfo]
= new mutable.LinkedHashMap()
val fileToAppInfo = new mutable.HashMap[Path, FsApplicationAttemptInfo]()
// List of application logs to be deleted by event log cleaner.
private var attemptsToClean = new mutable.ListBuffer[FsApplicationAttemptInfo]
private val pendingReplayTasksCount = new java.util.concurrent.atomic.AtomicInteger(0)
/**
* Return a runnable that performs the given operation on the event logs.
* This operation is expected to be executed periodically.
*/
private def getRunner(operateFun: () => Unit): Runnable = {
new Runnable() {
override def run(): Unit = Utils.tryOrExit {
operateFun()
}
}
}
/**
* Fixed size thread pool to fetch and parse log files.
*/
private val replayExecutor: ExecutorService = {
if (!conf.contains("spark.testing")) {
ThreadUtils.newDaemonFixedThreadPool(NUM_PROCESSING_THREADS, "log-replay-executor")
} else {
MoreExecutors.sameThreadExecutor()
}
}
// Conf option used for testing the initialization code.
val initThread = initialize()
private[history] def initialize(): Thread = {
if (!isFsInSafeMode()) {
startPolling()
null
} else {
startSafeModeCheckThread(None)
}
}
private[history] def startSafeModeCheckThread(
errorHandler: Option[Thread.UncaughtExceptionHandler]): Thread = {
// Cannot probe anything while the FS is in safe mode, so spawn a new thread that will wait
// for the FS to leave safe mode before enabling polling. This allows the main history server
// UI to be shown (so that the user can see the HDFS status).
val initThread = new Thread(new Runnable() {
override def run(): Unit = {
try {
while (isFsInSafeMode()) {
logInfo("HDFS is still in safe mode. Waiting...")
val deadline = clock.getTimeMillis() +
TimeUnit.SECONDS.toMillis(SAFEMODE_CHECK_INTERVAL_S)
clock.waitTillTime(deadline)
}
startPolling()
} catch {
case _: InterruptedException =>
}
}
})
initThread.setDaemon(true)
initThread.setName(s"${getClass().getSimpleName()}-init")
initThread.setUncaughtExceptionHandler(errorHandler.getOrElse(
new Thread.UncaughtExceptionHandler() {
override def uncaughtException(t: Thread, e: Throwable): Unit = {
logError("Error initializing FsHistoryProvider.", e)
System.exit(1)
}
}))
initThread.start()
initThread
}
private def startPolling(): Unit = {
// Validate the log directory.
val path = new Path(logDir)
try {
if (!fs.getFileStatus(path).isDirectory) {
throw new IllegalArgumentException(
"Logging directory specified is not a directory: %s".format(logDir))
}
} catch {
case f: FileNotFoundException =>
var msg = s"Log directory specified does not exist: $logDir"
if (logDir == DEFAULT_LOG_DIR) {
msg += " Did you configure the correct one through spark.history.fs.logDirectory?"
}
throw new FileNotFoundException(msg).initCause(f)
}
// Disable the background thread during tests.
if (!conf.contains("spark.testing")) {
// A task that periodically checks for event log updates on disk.
logDebug(s"Scheduling update thread every $UPDATE_INTERVAL_S seconds")
pool.scheduleWithFixedDelay(getRunner(checkForLogs), 0, UPDATE_INTERVAL_S, TimeUnit.SECONDS)
if (conf.getBoolean("spark.history.fs.cleaner.enabled", false)) {
// A task that periodically cleans event logs on disk.
pool.scheduleWithFixedDelay(getRunner(cleanLogs), 0, CLEAN_INTERVAL_S, TimeUnit.SECONDS)
}
} else {
logDebug("Background update thread disabled for testing")
}
}
override def getListing(): Iterator[FsApplicationHistoryInfo] = applications.values.iterator
override def getApplicationInfo(appId: String): Option[FsApplicationHistoryInfo] = {
applications.get(appId)
}
override def getEventLogsUnderProcess(): Int = pendingReplayTasksCount.get()
override def getLastUpdatedTime(): Long = lastScanTime.get()
override def getAppUI(appId: String, attemptId: Option[String]): Option[LoadedAppUI] = {
try {
applications.get(appId).flatMap { appInfo =>
appInfo.attempts.find(_.attemptId == attemptId).flatMap { attempt =>
val replayBus = new ReplayListenerBus()
val ui = {
val conf = this.conf.clone()
val appSecManager = new SecurityManager(conf)
SparkUI.createHistoryUI(conf, replayBus, appSecManager, appInfo.name,
HistoryServer.getAttemptURI(appId, attempt.attemptId), attempt.startTime)
// Do not call ui.bind() to avoid creating a new server for each application
}
val fileStatus = fs.getFileStatus(new Path(logDir, attempt.logPath))
val appListener = replay(fileStatus, isApplicationCompleted(fileStatus), replayBus)
if (appListener.appId.isDefined) {
val uiAclsEnabled = conf.getBoolean("spark.history.ui.acls.enable", false)
ui.getSecurityManager.setAcls(uiAclsEnabled)
// make sure to set admin acls before view acls so they are properly picked up
ui.getSecurityManager.setAdminAcls(appListener.adminAcls.getOrElse(""))
ui.getSecurityManager.setViewAcls(attempt.sparkUser,
appListener.viewAcls.getOrElse(""))
ui.getSecurityManager.setAdminAclsGroups(appListener.adminAclsGroups.getOrElse(""))
ui.getSecurityManager.setViewAclsGroups(appListener.viewAclsGroups.getOrElse(""))
Some(LoadedAppUI(ui, updateProbe(appId, attemptId, attempt.fileSize)))
} else {
None
}
}
}
} catch {
case e: FileNotFoundException => None
}
}
override def getEmptyListingHtml(): Seq[Node] = {
<p>
Did you specify the correct logging directory? Please verify your setting of
<span style="font-style:italic">spark.history.fs.logDirectory</span>
listed above and whether you have the permissions to access it.
<br/>
It is also possible that your application did not run to
completion or did not stop the SparkContext.
</p>
}
override def getConfig(): Map[String, String] = {
val safeMode = if (isFsInSafeMode()) {
Map("HDFS State" -> "In safe mode, application logs not available.")
} else {
Map()
}
Map("Event log directory" -> logDir.toString) ++ safeMode
}
override def stop(): Unit = {
if (initThread != null && initThread.isAlive()) {
initThread.interrupt()
initThread.join()
}
}
/**
* Builds the application list based on the current contents of the log directory.
* Tries to reuse as much of the data already in memory as possible, by not reading
* applications that haven't been updated since last time the logs were checked.
*/
private[history] def checkForLogs(): Unit = {
try {
val newLastScanTime = getNewLastScanTime()
logDebug(s"Scanning $logDir with lastScanTime==$lastScanTime")
val statusList = Option(fs.listStatus(new Path(logDir))).map(_.toSeq)
.getOrElse(Seq[FileStatus]())
// scan for modified applications, replay and merge them
val logInfos: Seq[FileStatus] = statusList
.filter { entry =>
try {
val prevFileSize = fileToAppInfo.get(entry.getPath()).map{_.fileSize}.getOrElse(0L)
!entry.isDirectory() &&
// FsHistoryProvider generates a hidden file which can't be read. Accidentally
// reading a garbage file is safe, but we would log an error which can be scary to
// the end-user.
!entry.getPath().getName().startsWith(".") &&
prevFileSize < entry.getLen()
} catch {
case e: AccessControlException =>
// Do not use "logInfo" since these messages can get pretty noisy if printed on
// every poll.
logDebug(s"No permission to read $entry, ignoring.")
false
}
}
.flatMap { entry => Some(entry) }
.sortWith { case (entry1, entry2) =>
entry1.getModificationTime() >= entry2.getModificationTime()
}
if (logInfos.nonEmpty) {
logDebug(s"New/updated attempts found: ${logInfos.size} ${logInfos.map(_.getPath)}")
}
var tasks = mutable.ListBuffer[Future[_]]()
try {
for (file <- logInfos) {
tasks += replayExecutor.submit(new Runnable {
override def run(): Unit = mergeApplicationListing(file)
})
}
} catch {
// let the iteration over logInfos break, since an exception on
// replayExecutor.submit (..) indicates the ExecutorService is unable
// to take any more submissions at this time
case e: Exception =>
logError(s"Exception while submitting event log for replay", e)
}
pendingReplayTasksCount.addAndGet(tasks.size)
tasks.foreach { task =>
try {
// Wait for all tasks to finish. This makes sure that checkForLogs
// is not scheduled again while some tasks are already running in
// the replayExecutor.
task.get()
} catch {
case e: InterruptedException =>
throw e
case e: Exception =>
logError("Exception while merging application listings", e)
} finally {
pendingReplayTasksCount.decrementAndGet()
}
}
lastScanTime.set(newLastScanTime)
} catch {
case e: Exception => logError("Exception in checking for event log updates", e)
}
}
private def getNewLastScanTime(): Long = {
val fileName = "." + UUID.randomUUID().toString
val path = new Path(logDir, fileName)
val fos = fs.create(path)
try {
fos.close()
fs.getFileStatus(path).getModificationTime
} catch {
case e: Exception =>
logError("Exception encountered when attempting to update last scan time", e)
lastScanTime.get()
} finally {
if (!fs.delete(path, true)) {
logWarning(s"Error deleting ${path}")
}
}
}
override def writeEventLogs(
appId: String,
attemptId: Option[String],
zipStream: ZipOutputStream): Unit = {
/**
* This method compresses the files passed in, and writes the compressed data out into the
* [[OutputStream]] passed in. Each file is written as a new [[ZipEntry]] with its name being
* the name of the file being compressed.
*/
def zipFileToStream(file: Path, entryName: String, outputStream: ZipOutputStream): Unit = {
val fs = file.getFileSystem(hadoopConf)
val inputStream = fs.open(file, 1 * 1024 * 1024) // 1MB Buffer
try {
outputStream.putNextEntry(new ZipEntry(entryName))
ByteStreams.copy(inputStream, outputStream)
outputStream.closeEntry()
} finally {
inputStream.close()
}
}
applications.get(appId) match {
case Some(appInfo) =>
try {
// If no attempt is specified, or there is no attemptId for attempts, return all attempts
appInfo.attempts.filter { attempt =>
attempt.attemptId.isEmpty || attemptId.isEmpty || attempt.attemptId.get == attemptId.get
}.foreach { attempt =>
val logPath = new Path(logDir, attempt.logPath)
zipFileToStream(logPath, attempt.logPath, zipStream)
}
} finally {
zipStream.close()
}
case None => throw new SparkException(s"Logs for $appId not found.")
}
}
/**
* Replay the log files in the list and merge the list of old applications with new ones
*/
private def mergeApplicationListing(fileStatus: FileStatus): Unit = {
val newAttempts = try {
val eventsFilter: ReplayEventsFilter = { eventString =>
eventString.startsWith(APPL_START_EVENT_PREFIX) ||
eventString.startsWith(APPL_END_EVENT_PREFIX)
}
val logPath = fileStatus.getPath()
val appCompleted = isApplicationCompleted(fileStatus)
val appListener = replay(fileStatus, appCompleted, new ReplayListenerBus(), eventsFilter)
// Without an app ID, new logs will render incorrectly in the listing page, so do not list or
// try to show their UI.
if (appListener.appId.isDefined) {
val attemptInfo = new FsApplicationAttemptInfo(
logPath.getName(),
appListener.appName.getOrElse(NOT_STARTED),
appListener.appId.getOrElse(logPath.getName()),
appListener.appAttemptId,
appListener.startTime.getOrElse(-1L),
appListener.endTime.getOrElse(-1L),
fileStatus.getModificationTime(),
appListener.sparkUser.getOrElse(NOT_STARTED),
appCompleted,
fileStatus.getLen()
)
fileToAppInfo(logPath) = attemptInfo
logDebug(s"Application log ${attemptInfo.logPath} loaded successfully: $attemptInfo")
Some(attemptInfo)
} else {
logWarning(s"Failed to load application log ${fileStatus.getPath}. " +
"The application may have not started.")
None
}
} catch {
case e: Exception =>
logError(
s"Exception encountered when attempting to load application log ${fileStatus.getPath}",
e)
None
}
if (newAttempts.isEmpty) {
return
}
// Build a map containing all apps that contain new attempts. The app information in this map
// contains both the new app attempt, and those that were already loaded in the existing apps
// map. If an attempt has been updated, it replaces the old attempt in the list.
val newAppMap = new mutable.HashMap[String, FsApplicationHistoryInfo]()
applications.synchronized {
newAttempts.foreach { attempt =>
val appInfo = newAppMap.get(attempt.appId)
.orElse(applications.get(attempt.appId))
.map { app =>
val attempts =
app.attempts.filter(_.attemptId != attempt.attemptId) ++ List(attempt)
new FsApplicationHistoryInfo(attempt.appId, attempt.name,
attempts.sortWith(compareAttemptInfo))
}
.getOrElse(new FsApplicationHistoryInfo(attempt.appId, attempt.name, List(attempt)))
newAppMap(attempt.appId) = appInfo
}
// Merge the new app list with the existing one, maintaining the expected ordering (descending
// end time). Maintaining the order is important to avoid having to sort the list every time
// there is a request for the log list.
val newApps = newAppMap.values.toSeq.sortWith(compareAppInfo)
val mergedApps = new mutable.LinkedHashMap[String, FsApplicationHistoryInfo]()
def addIfAbsent(info: FsApplicationHistoryInfo): Unit = {
if (!mergedApps.contains(info.id)) {
mergedApps += (info.id -> info)
}
}
val newIterator = newApps.iterator.buffered
val oldIterator = applications.values.iterator.buffered
while (newIterator.hasNext && oldIterator.hasNext) {
if (newAppMap.contains(oldIterator.head.id)) {
oldIterator.next()
} else if (compareAppInfo(newIterator.head, oldIterator.head)) {
addIfAbsent(newIterator.next())
} else {
addIfAbsent(oldIterator.next())
}
}
newIterator.foreach(addIfAbsent)
oldIterator.foreach(addIfAbsent)
applications = mergedApps
}
}
/**
* Delete event logs from the log directory according to the clean policy defined by the user.
*/
private[history] def cleanLogs(): Unit = {
try {
val maxAge = conf.getTimeAsSeconds("spark.history.fs.cleaner.maxAge", "7d") * 1000
val now = clock.getTimeMillis()
val appsToRetain = new mutable.LinkedHashMap[String, FsApplicationHistoryInfo]()
def shouldClean(attempt: FsApplicationAttemptInfo): Boolean = {
now - attempt.lastUpdated > maxAge && attempt.completed
}
// Scan all logs from the log directory.
// Only completed applications older than the specified max age will be deleted.
applications.values.foreach { app =>
val (toClean, toRetain) = app.attempts.partition(shouldClean)
attemptsToClean ++= toClean
if (toClean.isEmpty) {
appsToRetain += (app.id -> app)
} else if (toRetain.nonEmpty) {
appsToRetain += (app.id ->
new FsApplicationHistoryInfo(app.id, app.name, toRetain.toList))
}
}
applications = appsToRetain
val leftToClean = new mutable.ListBuffer[FsApplicationAttemptInfo]
attemptsToClean.foreach { attempt =>
try {
fs.delete(new Path(logDir, attempt.logPath), true)
} catch {
case e: AccessControlException =>
logInfo(s"No permission to delete ${attempt.logPath}, ignoring.")
case t: IOException =>
logError(s"IOException in cleaning ${attempt.logPath}", t)
leftToClean += attempt
}
}
attemptsToClean = leftToClean
} catch {
case t: Exception => logError("Exception in cleaning logs", t)
}
}
/**
* Comparison function that defines the sort order for the application listing.
*
* @return Whether `i1` should precede `i2`.
*/
private def compareAppInfo(
i1: FsApplicationHistoryInfo,
i2: FsApplicationHistoryInfo): Boolean = {
val a1 = i1.attempts.head
val a2 = i2.attempts.head
if (a1.endTime != a2.endTime) a1.endTime >= a2.endTime else a1.startTime >= a2.startTime
}
/**
* Comparison function that defines the sort order for application attempts within the same
* application. Order is: attempts are sorted by descending start time.
* Most recent attempt state matches with current state of the app.
*
* Normally applications should have a single running attempt; but failure to call sc.stop()
* may cause multiple running attempts to show up.
*
* @return Whether `a1` should precede `a2`.
*/
private def compareAttemptInfo(
a1: FsApplicationAttemptInfo,
a2: FsApplicationAttemptInfo): Boolean = {
a1.startTime >= a2.startTime
}
/**
* Replays the events in the specified log file on the supplied `ReplayListenerBus`. Returns
* an `ApplicationEventListener` instance with event data captured from the replay.
* `ReplayEventsFilter` determines what events are replayed and can therefore limit the
* data captured in the returned `ApplicationEventListener` instance.
*/
private def replay(
eventLog: FileStatus,
appCompleted: Boolean,
bus: ReplayListenerBus,
eventsFilter: ReplayEventsFilter = SELECT_ALL_FILTER): ApplicationEventListener = {
val logPath = eventLog.getPath()
logInfo(s"Replaying log path: $logPath")
// Note that the eventLog may have *increased* in size since when we grabbed the filestatus,
// and when we read the file here. That is OK -- it may result in an unnecessary refresh
// when there is no update, but will not result in missing an update. We *must* prevent
// an error the other way -- if we report a size bigger (ie later) than the file that is
// actually read, we may never refresh the app. FileStatus is guaranteed to be static
// after it's created, so we get a file size that is no bigger than what is actually read.
val logInput = EventLoggingListener.openEventLog(logPath, fs)
try {
val appListener = new ApplicationEventListener
bus.addListener(appListener)
bus.replay(logInput, logPath.toString, !appCompleted, eventsFilter)
appListener
} finally {
logInput.close()
}
}
/**
* Return true when the application has completed.
*/
private def isApplicationCompleted(entry: FileStatus): Boolean = {
!entry.getPath().getName().endsWith(EventLoggingListener.IN_PROGRESS)
}
/**
* Checks whether HDFS is in safe mode.
*
* Note that DistributedFileSystem is a `@LimitedPrivate` class, which for all practical reasons
* makes it more public than not.
*/
private[history] def isFsInSafeMode(): Boolean = fs match {
case dfs: DistributedFileSystem =>
isFsInSafeMode(dfs)
case _ =>
false
}
private[history] def isFsInSafeMode(dfs: DistributedFileSystem): Boolean = {
/* true to check only for Active NNs status */
dfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_GET, true)
}
/**
* String description for diagnostics
* @return a summary of the component state
*/
override def toString: String = {
val header = s"""
| FsHistoryProvider: logdir=$logDir,
| last scan time=$lastScanTime
| Cached application count =${applications.size}}
""".stripMargin
val sb = new StringBuilder(header)
applications.foreach(entry => sb.append(entry._2).append("\\n"))
sb.toString
}
/**
* Look up an application attempt
* @param appId application ID
* @param attemptId Attempt ID, if set
* @return the matching attempt, if found
*/
def lookup(appId: String, attemptId: Option[String]): Option[FsApplicationAttemptInfo] = {
applications.get(appId).flatMap { appInfo =>
appInfo.attempts.find(_.attemptId == attemptId)
}
}
/**
* Return true iff a newer version of the UI is available. The check is based on whether the
* fileSize for the currently loaded UI is smaller than the file size the last time
* the logs were loaded.
*
* This is a very cheap operation -- the work of loading the new attempt was already done
* by [[checkForLogs]].
* @param appId application to probe
* @param attemptId attempt to probe
* @param prevFileSize the file size of the logs for the currently displayed UI
*/
private def updateProbe(
appId: String,
attemptId: Option[String],
prevFileSize: Long)(): Boolean = {
lookup(appId, attemptId) match {
case None =>
logDebug(s"Application Attempt $appId/$attemptId not found")
false
case Some(latest) =>
prevFileSize < latest.fileSize
}
}
}
private[history] object FsHistoryProvider {
val DEFAULT_LOG_DIR = "file:/tmp/spark-events"
private val NOT_STARTED = "<Not Started>"
private val SPARK_HISTORY_FS_NUM_REPLAY_THREADS = "spark.history.fs.numReplayThreads"
private val APPL_START_EVENT_PREFIX = "{\\"Event\\":\\"SparkListenerApplicationStart\\""
private val APPL_END_EVENT_PREFIX = "{\\"Event\\":\\"SparkListenerApplicationEnd\\""
}
/**
* Application attempt information.
*
* @param logPath path to the log file, or, for a legacy log, its directory
* @param name application name
* @param appId application ID
* @param attemptId optional attempt ID
* @param startTime start time (from playback)
* @param endTime end time (from playback). -1 if the application is incomplete.
* @param lastUpdated the modification time of the log file when this entry was built by replaying
* the history.
* @param sparkUser user running the application
* @param completed flag to indicate whether or not the application has completed.
* @param fileSize the size of the log file the last time the file was scanned for changes
*/
private class FsApplicationAttemptInfo(
val logPath: String,
val name: String,
val appId: String,
attemptId: Option[String],
startTime: Long,
endTime: Long,
lastUpdated: Long,
sparkUser: String,
completed: Boolean,
val fileSize: Long)
extends ApplicationAttemptInfo(
attemptId, startTime, endTime, lastUpdated, sparkUser, completed) {
/** extend the superclass string value with the extra attributes of this class */
override def toString: String = {
s"FsApplicationAttemptInfo($name, $appId," +
s" ${super.toString}, source=$logPath, size=$fileSize"
}
}
/**
* Application history information
* @param id application ID
* @param name application name
* @param attempts list of attempts, most recent first.
*/
private class FsApplicationHistoryInfo(
id: String,
override val name: String,
override val attempts: List[FsApplicationAttemptInfo])
extends ApplicationHistoryInfo(id, name, attempts)
| Panos-Bletsos/spark-cost-model-optimizer | core/src/main/scala/org/apache/spark/deploy/history/FsHistoryProvider.scala | Scala | apache-2.0 | 30,167 |
package fpinscala.exercises.ch04errorhandling
import org.scalatest.FunSuite
/**
* Created by hjmao on 17-3-3.
*/
class EitherTest extends FunSuite {
test("testMap with right value") {
val me = Right(1)
val f = (x: Int) => x + 1
val expected = Right(2)
val actual = me.map(f)
assert(actual == expected)
}
test("testMap with error value") {
val me = Left("Error")
val f = (x: Int) => x + 1
val expected = Left("Error")
val actual = me.map(f)
assert(actual == expected)
}
test("testFlatMap with right value") {
val me = Right(1)
val f = (x: Int) => Right(x + 1)
val expected = Right(2)
val actual = me.flatMap(f)
assert(actual == expected)
}
test("testFlatMap with error value") {
val me = Left("Error")
val f = (x: Int) => Right(x + 1)
val expected = Left("Error")
val actual = me.flatMap(f)
assert(actual == expected)
}
test("testOrElse with right value") {
val me = Right(1)
val b = Right(2)
val expected = Right(1)
val actual = me.orElse(b)
assert(actual == expected)
}
test("testOrElse with error value") {
val me = Left("Error")
val b = Right(1)
val expected = Right(1)
val actual = me.orElse(b)
assert(actual == expected)
}
test("testOrElse with two error values") {
val me = Left("Error")
val b = Left(1)
val expected = Left(1)
val actual = me.orElse(b)
assert(actual == expected)
}
test("testMap2 with right values") {
val me = Right(1)
val b = Right(2)
val f = (a: Int, b: Int) => a + b
val expected = Right(3)
val actual = me.map2(b)(f)
assert(actual == expected)
}
test("testMap2 with one error value") {
val me = Right(1)
val b = Left(2)
val f = (a: Int, b: Int) => a + b
val expected = Left(2)
val actual = me.map2(b)(f)
assert(actual == expected)
}
test("testMap2 with two error values") {
val me = Left(1)
val b = Left(2)
val f = (a: Int, b: Int) => a + b
val expected = Left(1)
val actual = me.map2(b)(f)
assert(actual == expected)
}
test("testTraverse with right values") {
val as = List(1, 2, 3, 4)
val f = (x: Int) => Right(x + 1)
val expected = Right(List(2, 3, 4, 5))
val actual = Either.traverse(as)(f)
assert(actual == expected)
}
test("testTraverse with string values") {
val as = List("1", "2", "3", "4")
val f = (x: String) => Either.Try(x.toInt + 1)
val expected = Right(List(2, 3, 4, 5))
val actual = Either.traverse(as)(f)
assert(actual == expected)
}
test("testTraverse with error string value") {
val as = List("x", "2", "y", "4")
val f = (x: String) => Either.Try(x.toInt + 1)
val expected = Either.Try("x".toInt).toString
val actual = Either.traverse(as)(f).toString
assert(actual == expected)
}
}
| huajianmao/fpinscala | src/test/scala/fpinscala/exercises/ch04errorhandling/EitherTest.scala | Scala | mit | 2,869 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.jobs
import java.util.Locale
import javax.servlet.http.HttpServletRequest
import scala.collection.mutable.{Buffer, ListBuffer}
import scala.xml.{Node, NodeSeq, Unparsed, Utility}
import org.apache.commons.lang3.StringEscapeUtils
import org.apache.spark.JobExecutionStatus
import org.apache.spark.scheduler._
import org.apache.spark.status.AppStatusStore
import org.apache.spark.status.api.v1
import org.apache.spark.ui._
/** Page showing statistics and stage list for a given job */
private[ui] class JobPage(parent: JobsTab, store: AppStatusStore) extends WebUIPage("job") {
private val STAGES_LEGEND =
<div class="legend-area"><svg width="150px" height="85px">
<rect class="completed-stage-legend"
x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="17px">Completed</text>
<rect class="failed-stage-legend"
x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="42px">Failed</text>
<rect class="active-stage-legend"
x="5px" y="55px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="67px">Active</text>
</svg></div>.toString.filter(_ != '\n')
private val EXECUTORS_LEGEND =
<div class="legend-area"><svg width="150px" height="55px">
<rect class="executor-added-legend"
x="5px" y="5px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="17px">Added</text>
<rect class="executor-removed-legend"
x="5px" y="30px" width="20px" height="15px" rx="2px" ry="2px"></rect>
<text x="35px" y="42px">Removed</text>
</svg></div>.toString.filter(_ != '\n')
private def makeStageEvent(stageInfos: Seq[v1.StageData]): Seq[String] = {
stageInfos.map { stage =>
val stageId = stage.stageId
val attemptId = stage.attemptId
val name = stage.name
val status = stage.status.toString.toLowerCase(Locale.ROOT)
val submissionTime = stage.submissionTime.get.getTime()
val completionTime = stage.completionTime.map(_.getTime())
.getOrElse(System.currentTimeMillis())
// The timeline library treats contents as HTML, so we have to escape them. We need to add
// extra layers of escaping in order to embed this in a Javascript string literal.
val escapedName = Utility.escape(name)
val jsEscapedName = StringEscapeUtils.escapeEcmaScript(escapedName)
s"""
|{
| 'className': 'stage job-timeline-object ${status}',
| 'group': 'stages',
| 'start': new Date(${submissionTime}),
| 'end': new Date(${completionTime}),
| 'content': '<div class="job-timeline-content" data-toggle="tooltip"' +
| 'data-placement="top" data-html="true"' +
| 'data-title="${jsEscapedName} (Stage ${stageId}.${attemptId})<br>' +
| 'Status: ${status.toUpperCase(Locale.ROOT)}<br>' +
| 'Submitted: ${UIUtils.formatDate(submissionTime)}' +
| '${
if (status != "running") {
s"""<br>Completed: ${UIUtils.formatDate(completionTime)}"""
} else {
""
}
}">' +
| '${jsEscapedName} (Stage ${stageId}.${attemptId})</div>',
|}
""".stripMargin
}
}
def makeExecutorEvent(executors: Seq[v1.ExecutorSummary]): Seq[String] = {
val events = ListBuffer[String]()
executors.foreach { e =>
val addedEvent =
s"""
|{
| 'className': 'executor added',
| 'group': 'executors',
| 'start': new Date(${e.addTime.getTime()}),
| 'content': '<div class="executor-event-content"' +
| 'data-toggle="tooltip" data-placement="bottom"' +
| 'data-title="Executor ${e.id}<br>' +
| 'Added at ${UIUtils.formatDate(e.addTime)}"' +
| 'data-html="true">Executor ${e.id} added</div>'
|}
""".stripMargin
events += addedEvent
e.removeTime.foreach { removeTime =>
val removedEvent =
s"""
|{
| 'className': 'executor removed',
| 'group': 'executors',
| 'start': new Date(${removeTime.getTime()}),
| 'content': '<div class="executor-event-content"' +
| 'data-toggle="tooltip" data-placement="bottom"' +
| 'data-title="Executor ${e.id}<br>' +
| 'Removed at ${UIUtils.formatDate(removeTime)}' +
| '${
e.removeReason.map { reason =>
s"""<br>Reason: ${reason.replace("\n", " ")}"""
}.getOrElse("")
}"' +
| 'data-html="true">Executor ${e.id} removed</div>'
|}
""".stripMargin
events += removedEvent
}
}
events.toSeq
}
private def makeTimeline(
stages: Seq[v1.StageData],
executors: Seq[v1.ExecutorSummary],
appStartTime: Long): Seq[Node] = {
val stageEventJsonAsStrSeq = makeStageEvent(stages)
val executorsJsonAsStrSeq = makeExecutorEvent(executors)
val groupJsonArrayAsStr =
s"""
|[
| {
| 'id': 'executors',
| 'content': '<div>Executors</div>${EXECUTORS_LEGEND}',
| },
| {
| 'id': 'stages',
| 'content': '<div>Stages</div>${STAGES_LEGEND}',
| }
|]
""".stripMargin
val eventArrayAsStr =
(stageEventJsonAsStrSeq ++ executorsJsonAsStrSeq).mkString("[", ",", "]")
<span class="expand-job-timeline">
<span class="expand-job-timeline-arrow arrow-closed"></span>
<a data-toggle="tooltip" title={ToolTips.STAGE_TIMELINE} data-placement="right">
Event Timeline
</a>
</span> ++
<div id="job-timeline" class="collapsed">
<div class="control-panel">
<div id="job-timeline-zoom-lock">
<input type="checkbox"></input>
<span>Enable zooming</span>
</div>
</div>
</div> ++
<script type="text/javascript">
{Unparsed(s"drawJobTimeline(${groupJsonArrayAsStr}, ${eventArrayAsStr}, " +
s"${appStartTime}, ${UIUtils.getTimeZoneOffset()});")}
</script>
}
def render(request: HttpServletRequest): Seq[Node] = {
// stripXSS is called first to remove suspicious characters used in XSS attacks
val parameterId = UIUtils.stripXSS(request.getParameter("id"))
require(parameterId != null && parameterId.nonEmpty, "Missing id parameter")
val jobId = parameterId.toInt
val (jobData, sqlExecutionId) = store.asOption(store.jobWithAssociatedSql(jobId)).getOrElse {
val content =
<div id="no-info">
<p>No information to display for job {jobId}</p>
</div>
return UIUtils.headerSparkPage(
request, s"Details for Job $jobId", content, parent)
}
val isComplete = jobData.status != JobExecutionStatus.RUNNING
val stages = jobData.stageIds.map { stageId =>
// This could be empty if the listener hasn't received information about the
// stage or if the stage information has been garbage collected
store.asOption(store.lastStageAttempt(stageId)).getOrElse {
new v1.StageData(
v1.StageStatus.PENDING,
stageId,
0, 0, 0, 0, 0, 0, 0,
0L, 0L, None, None, None, None,
0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L, 0L,
"Unknown",
None,
"Unknown",
null,
Nil,
Nil,
None,
None,
Map())
}
}
val activeStages = Buffer[v1.StageData]()
val completedStages = Buffer[v1.StageData]()
// If the job is completed, then any pending stages are displayed as "skipped":
val pendingOrSkippedStages = Buffer[v1.StageData]()
val failedStages = Buffer[v1.StageData]()
for (stage <- stages) {
if (stage.submissionTime.isEmpty) {
pendingOrSkippedStages += stage
} else if (stage.completionTime.isDefined) {
if (stage.status == v1.StageStatus.FAILED) {
failedStages += stage
} else {
completedStages += stage
}
} else {
activeStages += stage
}
}
val basePath = "jobs/job"
val pendingOrSkippedTableId =
if (isComplete) {
"pending"
} else {
"skipped"
}
val activeStagesTable =
new StageTableBase(store, request, activeStages, "active", "activeStage", parent.basePath,
basePath, parent.isFairScheduler,
killEnabled = parent.killEnabled, isFailedStage = false)
val pendingOrSkippedStagesTable =
new StageTableBase(store, request, pendingOrSkippedStages, pendingOrSkippedTableId,
"pendingStage", parent.basePath, basePath, parent.isFairScheduler,
killEnabled = false, isFailedStage = false)
val completedStagesTable =
new StageTableBase(store, request, completedStages, "completed", "completedStage",
parent.basePath, basePath, parent.isFairScheduler,
killEnabled = false, isFailedStage = false)
val failedStagesTable =
new StageTableBase(store, request, failedStages, "failed", "failedStage", parent.basePath,
basePath, parent.isFairScheduler,
killEnabled = false, isFailedStage = true)
val shouldShowActiveStages = activeStages.nonEmpty
val shouldShowPendingStages = !isComplete && pendingOrSkippedStages.nonEmpty
val shouldShowCompletedStages = completedStages.nonEmpty
val shouldShowSkippedStages = isComplete && pendingOrSkippedStages.nonEmpty
val shouldShowFailedStages = failedStages.nonEmpty
val summary: NodeSeq =
<div>
<ul class="unstyled">
<li>
<Strong>Status:</Strong>
{jobData.status}
</li>
{
if (sqlExecutionId.isDefined) {
<li>
<strong>Associated SQL Query: </strong>
{<a href={"%s/SQL/execution/?id=%s".format(
UIUtils.prependBaseUri(request, parent.basePath),
sqlExecutionId.get)
}>{sqlExecutionId.get}</a>}
</li>
}
}
{
if (jobData.jobGroup.isDefined) {
<li>
<strong>Job Group:</strong>
{jobData.jobGroup.get}
</li>
}
}
{
if (shouldShowActiveStages) {
<li>
<a href="#active"><strong>Active Stages:</strong></a>
{activeStages.size}
</li>
}
}
{
if (shouldShowPendingStages) {
<li>
<a href="#pending">
<strong>Pending Stages:</strong>
</a>{pendingOrSkippedStages.size}
</li>
}
}
{
if (shouldShowCompletedStages) {
<li>
<a href="#completed"><strong>Completed Stages:</strong></a>
{completedStages.size}
</li>
}
}
{
if (shouldShowSkippedStages) {
<li>
<a href="#skipped"><strong>Skipped Stages:</strong></a>
{pendingOrSkippedStages.size}
</li>
}
}
{
if (shouldShowFailedStages) {
<li>
<a href="#failed"><strong>Failed Stages:</strong></a>
{failedStages.size}
</li>
}
}
</ul>
</div>
var content = summary
val appStartTime = store.applicationInfo().attempts.head.startTime.getTime()
content ++= makeTimeline(activeStages ++ completedStages ++ failedStages,
store.executorList(false), appStartTime)
val operationGraphContent = store.asOption(store.operationGraphForJob(jobId)) match {
case Some(operationGraph) => UIUtils.showDagVizForJob(jobId, operationGraph)
case None =>
<div id="no-info">
<p>No DAG visualization information to display for job {jobId}</p>
</div>
}
content ++= operationGraphContent
if (shouldShowActiveStages) {
content ++=
<span id="active" class="collapse-aggregated-activeStages collapse-table"
onClick="collapseTable('collapse-aggregated-activeStages','aggregated-activeStages')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Active Stages ({activeStages.size})</a>
</h4>
</span> ++
<div class="aggregated-activeStages collapsible-table">
{activeStagesTable.toNodeSeq}
</div>
}
if (shouldShowPendingStages) {
content ++=
<span id="pending" class="collapse-aggregated-pendingOrSkippedStages collapse-table"
onClick="collapseTable('collapse-aggregated-pendingOrSkippedStages',
'aggregated-pendingOrSkippedStages')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Pending Stages ({pendingOrSkippedStages.size})</a>
</h4>
</span> ++
<div class="aggregated-pendingOrSkippedStages collapsible-table">
{pendingOrSkippedStagesTable.toNodeSeq}
</div>
}
if (shouldShowCompletedStages) {
content ++=
<span id="completed" class="collapse-aggregated-completedStages collapse-table"
onClick="collapseTable('collapse-aggregated-completedStages',
'aggregated-completedStages')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Completed Stages ({completedStages.size})</a>
</h4>
</span> ++
<div class="aggregated-completedStages collapsible-table">
{completedStagesTable.toNodeSeq}
</div>
}
if (shouldShowSkippedStages) {
content ++=
<span id="skipped" class="collapse-aggregated-pendingOrSkippedStages collapse-table"
onClick="collapseTable('collapse-aggregated-pendingOrSkippedStages',
'aggregated-pendingOrSkippedStages')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Skipped Stages ({pendingOrSkippedStages.size})</a>
</h4>
</span> ++
<div class="aggregated-pendingOrSkippedStages collapsible-table">
{pendingOrSkippedStagesTable.toNodeSeq}
</div>
}
if (shouldShowFailedStages) {
content ++=
<span id ="failed" class="collapse-aggregated-failedStages collapse-table"
onClick="collapseTable('collapse-aggregated-failedStages','aggregated-failedStages')">
<h4>
<span class="collapse-table-arrow arrow-open"></span>
<a>Failed Stages ({failedStages.size})</a>
</h4>
</span> ++
<div class="aggregated-failedStages collapsible-table">
{failedStagesTable.toNodeSeq}
</div>
}
UIUtils.headerSparkPage(
request, s"Details for Job $jobId", content, parent, showVisualization = true)
}
}
| guoxiaolongzte/spark | core/src/main/scala/org/apache/spark/ui/jobs/JobPage.scala | Scala | apache-2.0 | 16,115 |
package samples
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.{Matchers, Suite, BeforeAndAfterAll}
trait TestBase extends BeforeAndAfterAll with Matchers with ScalaFutures {
this: Suite =>
implicit val actorSystem = ActorSystem()
implicit val flowMaterializer = ActorMaterializer.create(actorSystem)
override protected def afterAll(): Unit =
actorSystem.shutdown()
}
| fsat/akka-http-messing-about | src/test/scala/samples/TestBase.scala | Scala | mit | 470 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.kafka
import java.util.Properties
import java.util.concurrent.{ThreadPoolExecutor, ConcurrentHashMap}
import scala.collection.{Map, mutable}
import scala.reflect.{ClassTag, classTag}
import kafka.common.TopicAndPartition
import kafka.consumer.{Consumer, ConsumerConfig, ConsumerConnector, KafkaStream}
import kafka.message.MessageAndMetadata
import kafka.serializer.Decoder
import kafka.utils.{VerifiableProperties, ZKGroupTopicDirs, ZKStringSerializer, ZkUtils}
import org.I0Itec.zkclient.ZkClient
import org.apache.spark.{Logging, SparkEnv}
import org.apache.spark.storage.{StorageLevel, StreamBlockId}
import org.apache.spark.streaming.receiver.{BlockGenerator, BlockGeneratorListener, Receiver}
import org.apache.spark.util.ThreadUtils
/**
* ReliableKafkaReceiver offers the ability to reliably store data into BlockManager without loss.
* It is turned off by default and will be enabled when
* spark.streaming.receiver.writeAheadLog.enable is true. The difference compared to KafkaReceiver
* is that this receiver manages topic-partition/offset itself and updates the offset information
* after data is reliably stored as write-ahead log. Offsets will only be updated when data is
* reliably stored, so the potential data loss problem of KafkaReceiver can be eliminated.
*
* Note: ReliableKafkaReceiver will set auto.commit.enable to false to turn off automatic offset
* commit mechanism in Kafka consumer. So setting this configuration manually within kafkaParams
* will not take effect.
*/
private[streaming]
class ReliableKafkaReceiver[
K: ClassTag,
V: ClassTag,
U <: Decoder[_]: ClassTag,
T <: Decoder[_]: ClassTag](
kafkaParams: Map[String, String],
topics: Map[String, Int],
storageLevel: StorageLevel)
extends Receiver[(K, V)](storageLevel) with Logging {
private val groupId = kafkaParams("group.id")
private val AUTO_OFFSET_COMMIT = "auto.commit.enable"
private def conf = SparkEnv.get.conf
/** High level consumer to connect to Kafka. */
private var consumerConnector: ConsumerConnector = null
/** zkClient to connect to Zookeeper to commit the offsets. */
private var zkClient: ZkClient = null
/**
* A HashMap to manage the offset for each topic/partition, this HashMap is called in
* synchronized block, so mutable HashMap will not meet concurrency issue.
*/
private var topicPartitionOffsetMap: mutable.HashMap[TopicAndPartition, Long] = null
/** A concurrent HashMap to store the stream block id and related offset snapshot. */
private var blockOffsetMap: ConcurrentHashMap[StreamBlockId, Map[TopicAndPartition, Long]] = null
/**
* Manage the BlockGenerator in receiver itself for better managing block store and offset
* commit.
*/
private var blockGenerator: BlockGenerator = null
/** Thread pool running the handlers for receiving message from multiple topics and partitions. */
private var messageHandlerThreadPool: ThreadPoolExecutor = null
override def onStart(): Unit = {
logInfo(s"Starting Kafka Consumer Stream with group: $groupId")
// Initialize the topic-partition / offset hash map.
topicPartitionOffsetMap = new mutable.HashMap[TopicAndPartition, Long]
// Initialize the stream block id / offset snapshot hash map.
blockOffsetMap = new ConcurrentHashMap[StreamBlockId, Map[TopicAndPartition, Long]]()
// Initialize the block generator for storing Kafka message.
blockGenerator = new BlockGenerator(new GeneratedBlockHandler, streamId, conf)
if (kafkaParams.contains(AUTO_OFFSET_COMMIT) && kafkaParams(AUTO_OFFSET_COMMIT) == "true") {
logWarning(s"$AUTO_OFFSET_COMMIT should be set to false in ReliableKafkaReceiver, " +
"otherwise we will manually set it to false to turn off auto offset commit in Kafka")
}
val props = new Properties()
kafkaParams.foreach(param => props.put(param._1, param._2))
// Manually set "auto.commit.enable" to "false" no matter user explicitly set it to true,
// we have to make sure this property is set to false to turn off auto commit mechanism in
// Kafka.
props.setProperty(AUTO_OFFSET_COMMIT, "false")
val consumerConfig = new ConsumerConfig(props)
assert(!consumerConfig.autoCommitEnable)
logInfo(s"Connecting to Zookeeper: ${consumerConfig.zkConnect}")
consumerConnector = Consumer.create(consumerConfig)
logInfo(s"Connected to Zookeeper: ${consumerConfig.zkConnect}")
zkClient = new ZkClient(consumerConfig.zkConnect, consumerConfig.zkSessionTimeoutMs,
consumerConfig.zkConnectionTimeoutMs, ZKStringSerializer)
messageHandlerThreadPool = ThreadUtils.newDaemonFixedThreadPool(
topics.values.sum, "KafkaMessageHandler")
blockGenerator.start()
val keyDecoder = classTag[U].runtimeClass.getConstructor(classOf[VerifiableProperties])
.newInstance(consumerConfig.props)
.asInstanceOf[Decoder[K]]
val valueDecoder = classTag[T].runtimeClass.getConstructor(classOf[VerifiableProperties])
.newInstance(consumerConfig.props)
.asInstanceOf[Decoder[V]]
val topicMessageStreams = consumerConnector.createMessageStreams(
topics, keyDecoder, valueDecoder)
topicMessageStreams.values.foreach { streams =>
streams.foreach { stream =>
messageHandlerThreadPool.submit(new MessageHandler(stream))
}
}
}
override def onStop(): Unit = {
if (messageHandlerThreadPool != null) {
messageHandlerThreadPool.shutdown()
messageHandlerThreadPool = null
}
if (consumerConnector != null) {
consumerConnector.shutdown()
consumerConnector = null
}
if (zkClient != null) {
zkClient.close()
zkClient = null
}
if (blockGenerator != null) {
blockGenerator.stop()
blockGenerator = null
}
if (topicPartitionOffsetMap != null) {
topicPartitionOffsetMap.clear()
topicPartitionOffsetMap = null
}
if (blockOffsetMap != null) {
blockOffsetMap.clear()
blockOffsetMap = null
}
}
/** Store a Kafka message and the associated metadata as a tuple. */
private def storeMessageAndMetadata(
msgAndMetadata: MessageAndMetadata[K, V]): Unit = {
val topicAndPartition = TopicAndPartition(msgAndMetadata.topic, msgAndMetadata.partition)
val data = (msgAndMetadata.key, msgAndMetadata.message)
val metadata = (topicAndPartition, msgAndMetadata.offset)
blockGenerator.addDataWithCallback(data, metadata)
}
/** Update stored offset */
private def updateOffset(topicAndPartition: TopicAndPartition, offset: Long): Unit = {
topicPartitionOffsetMap.put(topicAndPartition, offset)
}
/**
* Remember the current offsets for each topic and partition. This is called when a block is
* generated.
*/
private def rememberBlockOffsets(blockId: StreamBlockId): Unit = {
// Get a snapshot of current offset map and store with related block id.
val offsetSnapshot = topicPartitionOffsetMap.toMap
blockOffsetMap.put(blockId, offsetSnapshot)
topicPartitionOffsetMap.clear()
}
/**
* Store the ready-to-be-stored block and commit the related offsets to zookeeper. This method
* will try a fixed number of times to push the block. If the push fails, the receiver is stopped.
*/
private def storeBlockAndCommitOffset(
blockId: StreamBlockId, arrayBuffer: mutable.ArrayBuffer[_]): Unit = {
var count = 0
var pushed = false
var exception: Exception = null
while (!pushed && count <= 3) {
try {
store(arrayBuffer.asInstanceOf[mutable.ArrayBuffer[(K, V)]])
pushed = true
} catch {
case ex: Exception =>
count += 1
exception = ex
}
}
if (pushed) {
Option(blockOffsetMap.get(blockId)).foreach(commitOffset)
blockOffsetMap.remove(blockId)
} else {
stop("Error while storing block into Spark", exception)
}
}
/**
* Commit the offset of Kafka's topic/partition, the commit mechanism follow Kafka 0.8.x's
* metadata schema in Zookeeper.
*/
private def commitOffset(offsetMap: Map[TopicAndPartition, Long]): Unit = {
if (zkClient == null) {
val thrown = new IllegalStateException("Zookeeper client is unexpectedly null")
stop("Zookeeper client is not initialized before commit offsets to ZK", thrown)
return
}
for ((topicAndPart, offset) <- offsetMap) {
try {
val topicDirs = new ZKGroupTopicDirs(groupId, topicAndPart.topic)
val zkPath = s"${topicDirs.consumerOffsetDir}/${topicAndPart.partition}"
ZkUtils.updatePersistentPath(zkClient, zkPath, offset.toString)
} catch {
case e: Exception =>
logWarning(s"Exception during commit offset $offset for topic" +
s"${topicAndPart.topic}, partition ${topicAndPart.partition}", e)
}
logInfo(s"Committed offset $offset for topic ${topicAndPart.topic}, " +
s"partition ${topicAndPart.partition}")
}
}
/** Class to handle received Kafka message. */
private final class MessageHandler(stream: KafkaStream[K, V]) extends Runnable {
override def run(): Unit = {
while (!isStopped) {
try {
val streamIterator = stream.iterator()
while (streamIterator.hasNext) {
storeMessageAndMetadata(streamIterator.next)
}
} catch {
case e: Exception =>
reportError("Error handling message", e)
}
}
}
}
/** Class to handle blocks generated by the block generator. */
private final class GeneratedBlockHandler extends BlockGeneratorListener {
def onAddData(data: Any, metadata: Any): Unit = {
// Update the offset of the data that was added to the generator
if (metadata != null) {
val (topicAndPartition, offset) = metadata.asInstanceOf[(TopicAndPartition, Long)]
updateOffset(topicAndPartition, offset)
}
}
def onGenerateBlock(blockId: StreamBlockId): Unit = {
// Remember the offsets of topics/partitions when a block has been generated
rememberBlockOffsets(blockId)
}
def onPushBlock(blockId: StreamBlockId, arrayBuffer: mutable.ArrayBuffer[_]): Unit = {
// Store block and commit the blocks offset
storeBlockAndCommitOffset(blockId, arrayBuffer)
}
def onError(message: String, throwable: Throwable): Unit = {
reportError(message, throwable)
}
}
}
| andrewor14/iolap | external/kafka/src/main/scala/org/apache/spark/streaming/kafka/ReliableKafkaReceiver.scala | Scala | apache-2.0 | 11,289 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package collection
package immutable
import scala.collection.Stepper.EfficientSplit
import scala.collection.generic.DefaultSerializable
import scala.collection.mutable.ReusableBuilder
import scala.runtime.Statics.releaseFence
/** $factoryInfo
* @define Coll `Vector`
* @define coll vector
*/
@SerialVersionUID(3L)
object Vector extends StrictOptimizedSeqFactory[Vector] {
def empty[A]: Vector[A] = NIL
def from[E](it: collection.IterableOnce[E]): Vector[E] =
it match {
case as: ArraySeq[E] if as.length <= 32 && as.unsafeArray.isInstanceOf[Array[AnyRef]] =>
if (as.isEmpty) NIL
else {
val v = new Vector(0, as.length, 0)
v.display0 = as.unsafeArray.asInstanceOf[Array[AnyRef]]
v.depth = 1
releaseFence()
v
}
case v: Vector[E] => v
case _ =>
val knownSize = it.knownSize
if (knownSize == 0) empty[E]
else if (knownSize > 0 && knownSize <= 32) {
val display0 = new Array[AnyRef](knownSize)
var i = 0
val iterator = it.iterator
while (iterator.hasNext) {
display0(i) = iterator.next().asInstanceOf[AnyRef]
i += 1
}
val v = new Vector[E](0, knownSize, 0)
v.depth = 1
v.display0 = display0
releaseFence()
v
} else {
(newBuilder ++= it).result()
}
}
def newBuilder[A]: ReusableBuilder[A, Vector[A]] = new VectorBuilder[A]
/** Creates a Vector of one element. Not safe for publication, the caller is responsible for `releaseFence` */
private def single[A](elem: A): Vector[A] = {
val s = new Vector[A](0, 1, 0)
s.depth = 1
s.display0 = Array[AnyRef](elem.asInstanceOf[AnyRef])
s
}
@transient
private val NIL = new Vector[Nothing](0, 0, 0)
private val defaultApplyPreferredMaxLength: Int =
try System.getProperty("scala.collection.immutable.Vector.defaultApplyPreferredMaxLength",
"1024").toInt
catch {
case _: SecurityException => 1024
}
// Constants governing concat strategy for performance
private final val Log2ConcatFaster = 5
private final val TinyAppendFaster = 2
}
// in principle, most members should be private. however, access privileges must
// be carefully chosen to not prevent method inlining
/** Vector is a general-purpose, immutable data structure. It provides random access and updates
* in effectively constant time, as well as very fast append and prepend. Because vectors strike
* a good balance between fast random selections and fast random functional updates, they are
* currently the default implementation of immutable indexed sequences. It is backed by a little
* endian bit-mapped vector trie with a branching factor of 32. Locality is very good, but not
* contiguous, which is good for very large sequences.
*
* @see [[http://docs.scala-lang.org/overviews/collections/concrete-immutable-collection-classes.html#vectors "Scala's Collection Library overview"]]
* section on `Vectors` for more information.
*
* @tparam A the element type
*
* @define Coll `Vector`
* @define coll vector
* @define orderDependent
* @define orderDependentFold
* @define mayNotTerminateInf
* @define willNotTerminateInf
*/
final class Vector[+A] private[immutable] (private[collection] val startIndex: Int, private[collection] val endIndex: Int, private[immutable] val focus: Int)
extends AbstractSeq[A]
with IndexedSeq[A]
with IndexedSeqOps[A, Vector, Vector[A]]
with StrictOptimizedSeqOps[A, Vector, Vector[A]]
with IterableFactoryDefaults[A, Vector]
with VectorPointer[A]
with DefaultSerializable { self =>
override def iterableFactory: SeqFactory[Vector] = Vector
// Code paths that mutates `dirty` _must_ call `Statics.releaseFence()` before returning from
// the public method.
private[immutable] var dirty = false
// While most JDKs would implicit add this fence because of >= 1 final field, the spec only mandates
// it if all fields are final, so let's add this in explicitly.
releaseFence()
def length: Int = endIndex - startIndex
private[collection] def initIterator[B >: A](s: VectorIterator[B]): Unit = {
s.initFrom(this)
if (dirty) s.stabilize(focus)
if (s.depth > 1) s.gotoPos(startIndex, startIndex ^ focus)
}
override def iterator: Iterator[A] = {
if(isEmpty)
Iterator.empty
else {
val s = new VectorIterator[A](startIndex, endIndex)
initIterator(s)
s
}
}
override def stepper[S <: Stepper[_]](implicit shape: StepperShape[A, S]): S with EfficientSplit = {
import convert.impl._
var depth = -1
val displaySource: VectorPointer[A] =
if (dirty) iterator.asInstanceOf[VectorIterator[A]]
else this
val trunk: Array[AnyRef] =
if (endIndex <= (1 << 5)) { depth = 0; displaySource.display0 }
else if (endIndex <= (1 << 10)) { depth = 1; displaySource.display1.asInstanceOf[Array[AnyRef]] }
else if (endIndex <= (1 << 15)) { depth = 2; displaySource.display2.asInstanceOf[Array[AnyRef]] }
else if (endIndex <= (1 << 20)) { depth = 3; displaySource.display3.asInstanceOf[Array[AnyRef]] }
else if (endIndex <= (1 << 25)) { depth = 4; displaySource.display4.asInstanceOf[Array[AnyRef]] }
else /* endIndex <= 1 << 30*/ { depth = 5; displaySource.display5.asInstanceOf[Array[AnyRef]] }
val s = shape.shape match {
case StepperShape.IntShape => new IntVectorStepper (startIndex, endIndex, depth, trunk)
case StepperShape.LongShape => new LongVectorStepper (startIndex, endIndex, depth, trunk)
case StepperShape.DoubleShape => new DoubleVectorStepper(startIndex, endIndex, depth, trunk)
case _ => shape.parUnbox(new AnyVectorStepper[A](startIndex, endIndex, depth, trunk))
}
s.asInstanceOf[S with EfficientSplit]
}
// Ideally, clients will inline calls to map all the way down, including the iterator/builder methods.
// In principle, escape analysis could even remove the iterator/builder allocations and do it
// with local variables exclusively. But we're not quite there yet ...
@throws[IndexOutOfBoundsException]
def apply(index: Int): A = {
val idx = checkRangeConvert(index)
getElem(idx, idx ^ focus)
}
@throws[IndexOutOfBoundsException]
private def checkRangeConvert(index: Int) = {
val idx = index + startIndex
if (index >= 0 && idx < endIndex)
idx
else
throw new IndexOutOfBoundsException(s"$index is out of bounds (min 0, max ${endIndex-1})")
}
// requires structure is at pos oldIndex = xor ^ index
private final def getElem(index: Int, xor: Int): A = {
if (xor < (1 << 5)) { // level = 0
(display0
(index & 31).asInstanceOf[A])
} else if (xor < (1 << 10)) { // level = 1
(display1
((index >>> 5) & 31)
(index & 31).asInstanceOf[A])
} else if (xor < (1 << 15)) { // level = 2
(display2
((index >>> 10) & 31)
((index >>> 5) & 31)
(index & 31).asInstanceOf[A])
} else if (xor < (1 << 20)) { // level = 3
(display3
((index >>> 15) & 31)
((index >>> 10) & 31)
((index >>> 5) & 31)
(index & 31).asInstanceOf[A])
} else if (xor < (1 << 25)) { // level = 4
(display4
((index >>> 20) & 31)
((index >>> 15) & 31)
((index >>> 10) & 31)
((index >>> 5) & 31)
(index & 31).asInstanceOf[A])
} else if (xor < (1 << 30)) { // level = 5
(display5
((index >>> 25) & 31)
((index >>> 20) & 31)
((index >>> 15) & 31)
((index >>> 10) & 31)
((index >>> 5) & 31)
(index & 31).asInstanceOf[A])
} else { // level = 6
throw new IllegalArgumentException()
}
}
override def updated[B >: A](index: Int, elem: B): Vector[B] = updateAt(index, elem)
override def take(n: Int): Vector[A] = {
if (n <= 0)
Vector.empty
else if (startIndex < endIndex - n)
dropBack0(startIndex + n)
else
this
}
override def drop(n: Int): Vector[A] = {
if (n <= 0)
this
else if (startIndex < endIndex - n)
dropFront0(startIndex + n)
else
Vector.empty
}
override def takeRight(n: Int): Vector[A] = {
if (n <= 0)
Vector.empty
else if (endIndex - n > startIndex)
dropFront0(endIndex - n)
else
this
}
override def dropRight(n: Int): Vector[A] = {
if (n <= 0)
this
else if (endIndex - n > startIndex)
dropBack0(endIndex - n)
else
Vector.empty
}
override def head: A = {
if (isEmpty) throw new NoSuchElementException("empty.head")
apply(0)
}
override def tail: Vector[A] = {
if (isEmpty) throw new UnsupportedOperationException("empty.tail")
drop(1)
}
override def last: A = {
if (isEmpty) throw new UnsupportedOperationException("empty.last")
apply(length - 1)
}
override def init: Vector[A] = {
if (isEmpty) throw new UnsupportedOperationException("empty.init")
dropRight(1)
}
override def appendedAll[B >: A](suffix: collection.IterableOnce[B]): Vector[B] =
suffix match {
case v: Vector[B] =>
val thisLength = this.length
val thatLength = v.length
if (thisLength == 0) v
else if (thatLength == 0) this
else if (thatLength <= Vector.TinyAppendFaster) {
// Often it's better to append small numbers of elements (or prepend if RHS is a vector)
var v0: Vector[B] = this
var i = 0
while (i < thatLength) {
v0 = v0.appended(v(i))
i += 1
}
v0
} else {
if (thisLength < (thatLength >>> Vector.Log2ConcatFaster)) {
var v0 = v
val iter = this.reverseIterator
while(iter.hasNext) {
v0 = iter.next() +: v0
}
v0
} else {
new VectorBuilder[B]().addAll(this).addAll(suffix).result()
}
}
case _ =>
val thatKnownSize = suffix.knownSize
if (thatKnownSize == 0) this
else if (thatKnownSize > 0 && thatKnownSize <= Vector.TinyAppendFaster) {
var v0: Vector[B] = this
val iter = suffix.iterator
while (iter.hasNext) {
v0 = v0.appended(iter.next())
}
v0
} else {
val iter = suffix.iterator
if (iter.hasNext) {
new VectorBuilder[B]().addAll(this).addAll(suffix).result()
} else this
}
}
override def prependedAll[B >: A](prefix: collection.IterableOnce[B]): Vector[B] = {
// Implementation similar to `appendAll`: when of the collections to concatenate (either `this` or `prefix`)
// has a small number of elements compared to the other, then we add them using `:+` or `+:` in a loop
import Vector.{Log2ConcatFaster, TinyAppendFaster}
if (prefix.iterator.isEmpty) this
else {
prefix match {
case prefix: collection.Iterable[B] =>
prefix.size match {
case n if n <= TinyAppendFaster || n < (this.size >>> Log2ConcatFaster) =>
var v: Vector[B] = this
val it = prefix.toIndexedSeq.reverseIterator
while (it.hasNext) v = it.next() +: v
v
case n if this.size < (n >>> Log2ConcatFaster) && prefix.isInstanceOf[Vector[_]] =>
var v = prefix.asInstanceOf[Vector[B]]
val it = this.iterator
while (it.hasNext) v = v :+ it.next()
v
case _ => super.prependedAll(prefix)
}
case _ =>
super.prependedAll(prefix)
}
}
}
// semi-private api
private[immutable] def updateAt[B >: A](index: Int, elem: B): Vector[B] = {
val idx = checkRangeConvert(index)
val s = new Vector[B](startIndex, endIndex, idx)
s.initFrom(this)
s.dirty = dirty
s.gotoPosWritable(focus, idx, focus ^ idx) // if dirty commit changes; go to new pos and prepare for writing
s.display0(idx & 31) = elem.asInstanceOf[AnyRef]
releaseFence()
s
}
private def gotoPosWritable(oldIndex: Int, newIndex: Int, xor: Int) = if (dirty) {
gotoPosWritable1(oldIndex, newIndex, xor)
} else {
gotoPosWritable0(newIndex, xor)
dirty = true
}
private def gotoFreshPosWritable(oldIndex: Int, newIndex: Int, xor: Int) = if (dirty) {
gotoFreshPosWritable1(oldIndex, newIndex, xor)
} else {
gotoFreshPosWritable0(oldIndex, newIndex, xor)
dirty = true
}
override def prepended[B >: A](value: B): Vector[B] = {
val thisLength = length
val result =
if (depth == 1 && thisLength < 32) {
val s = new Vector(0, thisLength + 1, 0)
s.depth = 1
val newDisplay0 = new Array[AnyRef](thisLength + 1)
System.arraycopy(display0, startIndex, newDisplay0, 1, thisLength)
newDisplay0(0) = value.asInstanceOf[AnyRef]
s.display0 = newDisplay0
s
} else if (thisLength > 0) {
val blockIndex = (startIndex - 1) & ~31
val lo = (startIndex - 1) & 31
if (startIndex != blockIndex + 32) {
val s = new Vector(startIndex - 1, endIndex, blockIndex)
s.initFrom(this)
s.dirty = dirty
s.gotoPosWritable(focus, blockIndex, focus ^ blockIndex)
s.display0(lo) = value.asInstanceOf[AnyRef]
s
} else {
val freeSpace = (1 << (5 * depth)) - endIndex // free space at the right given the current tree-structure depth
val shift = freeSpace & ~((1 << (5 * (depth - 1))) - 1) // number of elements by which we'll shift right (only move at top level)
val shiftBlocks = freeSpace >>> (5 * (depth - 1)) // number of top-level blocks
if (shift != 0) {
// case A: we can shift right on the top level
if (depth > 1) {
val newBlockIndex = blockIndex + shift
val newFocus = focus + shift
val s = new Vector(startIndex - 1 + shift, endIndex + shift, newBlockIndex)
s.initFrom(this)
s.dirty = dirty
s.shiftTopLevel(0, shiftBlocks) // shift right by n blocks
s.gotoFreshPosWritable(newFocus, newBlockIndex, newFocus ^ newBlockIndex) // maybe create pos; prepare for writing
s.display0(lo) = value.asInstanceOf[AnyRef]
s
} else {
val newBlockIndex = blockIndex + 32
val newFocus = focus
val s = new Vector(startIndex - 1 + shift, endIndex + shift, newBlockIndex)
s.initFrom(this)
s.dirty = dirty
s.shiftTopLevel(0, shiftBlocks) // shift right by n elements
s.gotoPosWritable(newFocus, newBlockIndex, newFocus ^ newBlockIndex) // prepare for writing
s.display0(shift - 1) = value.asInstanceOf[AnyRef]
s
}
} else if (blockIndex < 0) {
// case B: we need to move the whole structure
val move = (1 << (5 * (depth + 1))) - (1 << (5 * depth))
val newBlockIndex = blockIndex + move
val newFocus = focus + move
val s = new Vector(startIndex - 1 + move, endIndex + move, newBlockIndex)
s.initFrom(this)
s.dirty = dirty
s.gotoFreshPosWritable(newFocus, newBlockIndex, newFocus ^ newBlockIndex) // could optimize: we know it will create a whole branch
s.display0(lo) = value.asInstanceOf[AnyRef]
s
} else {
val newBlockIndex = blockIndex
val newFocus = focus
val s = new Vector(startIndex - 1, endIndex, newBlockIndex)
s.initFrom(this)
s.dirty = dirty
s.gotoFreshPosWritable(newFocus, newBlockIndex, newFocus ^ newBlockIndex)
s.display0(lo) = value.asInstanceOf[AnyRef]
s
}
}
} else Vector.single(value)
releaseFence()
result
}
override def appended[B >: A](value: B): Vector[B] = {
val thisLength = length
val result =
if (depth == 1 && thisLength < 32) {
val s = new Vector(0, thisLength + 1, 0)
s.depth = 1
val newDisplay0 = new Array[AnyRef](thisLength + 1)
System.arraycopy(display0, startIndex, newDisplay0, 0, thisLength)
newDisplay0(thisLength) = value.asInstanceOf[AnyRef]
s.display0 = newDisplay0
s
} else if (thisLength > 0) {
val blockIndex = endIndex & ~31 // round down to nearest 32
val lo = endIndex & 31 // remainder of blockIndex / 32
if (endIndex != blockIndex) {
val s = new Vector(startIndex, endIndex + 1, blockIndex)
s.initFrom(this)
s.dirty = dirty
s.gotoPosWritable(focus, blockIndex, focus ^ blockIndex)
s.display0(lo) = value.asInstanceOf[AnyRef]
s
} else {
val shift = startIndex & ~((1 << (5 * (depth - 1))) - 1)
val shiftBlocks = startIndex >>> (5 * (depth - 1))
if (shift != 0) {
if (depth > 1) {
val newBlockIndex = blockIndex - shift
val newFocus = focus - shift
val s = new Vector(startIndex - shift, endIndex + 1 - shift, newBlockIndex)
s.initFrom(this)
s.dirty = dirty
s.shiftTopLevel(shiftBlocks, 0) // shift left by n blocks
s.gotoFreshPosWritable(newFocus, newBlockIndex, newFocus ^ newBlockIndex)
s.display0(lo) = value.asInstanceOf[AnyRef]
s
} else {
val newBlockIndex = blockIndex - 32
val newFocus = focus
val s = new Vector(startIndex - shift, endIndex + 1 - shift, newBlockIndex)
s.initFrom(this)
s.dirty = dirty
s.shiftTopLevel(shiftBlocks, 0) // shift right by n elements
s.gotoPosWritable(newFocus, newBlockIndex, newFocus ^ newBlockIndex)
if (s.display0.length < 32 - shift - 1) {
val newDisplay0 = new Array[AnyRef](32 - shift - 1)
s.display0.copyToArray(newDisplay0)
s.display0 = newDisplay0
}
s.display0(32 - shift) = value.asInstanceOf[AnyRef]
s
}
} else {
val newBlockIndex = blockIndex
val newFocus = focus
val s = new Vector(startIndex, endIndex + 1, newBlockIndex)
s.initFrom(this)
s.dirty = dirty
s.gotoFreshPosWritable(newFocus, newBlockIndex, newFocus ^ newBlockIndex)
s.display0(lo) = value.asInstanceOf[AnyRef]
s
}
}
} else Vector.single(value)
releaseFence()
result
}
// low-level implementation (needs cleanup, maybe move to util class)
private def shiftTopLevel(oldLeft: Int, newLeft: Int) = (depth - 1) match {
case 0 => display0 = copyRange(display0, oldLeft, newLeft)
case 1 => display1 = copyRange(display1, oldLeft, newLeft)
case 2 => display2 = copyRange(display2, oldLeft, newLeft)
case 3 => display3 = copyRange(display3, oldLeft, newLeft)
case 4 => display4 = copyRange(display4, oldLeft, newLeft)
case 5 => display5 = copyRange(display5, oldLeft, newLeft)
}
private def zeroLeft(array: Array[AnyRef], index: Int): Unit = {
var i = 0
while (i < index) {
array(i) = null
i += 1
}
}
private def zeroRight(array: Array[AnyRef], index: Int): Unit = {
var i = index
while (i < array.length) {
array(i) = null
i += 1
}
}
private def copyLeft[T <: AnyRef](array: Array[T], right: Int): Array[T] = {
val copy = array.clone()
java.util.Arrays.fill(copy.asInstanceOf[Array[AnyRef]], right, array.length, null)
copy
}
private def copyRight[T <: AnyRef](array: Array[T], left: Int): Array[T] = {
val copy = array.clone()
java.util.Arrays.fill(copy.asInstanceOf[Array[AnyRef]], 0, left, null)
copy
}
// requires structure is at index cutIndex and writable at level 0
private def cleanLeftEdge(cutIndex: Int) = {
if (cutIndex < (1 << 5)) {
zeroLeft(display0, cutIndex)
} else if (cutIndex < (1 << 10)) {
zeroLeft(display0, cutIndex & 31)
display1 = copyRight(display1, cutIndex >>> 5)
} else if (cutIndex < (1 << 15)) {
zeroLeft(display0, cutIndex & 31)
display1 = copyRight(display1, (cutIndex >>> 5) & 31)
display2 = copyRight(display2, cutIndex >>> 10)
} else if (cutIndex < (1 << 20)) {
zeroLeft(display0, cutIndex & 31)
display1 = copyRight(display1, (cutIndex >>> 5) & 31)
display2 = copyRight(display2, (cutIndex >>> 10) & 31)
display3 = copyRight(display3, cutIndex >>> 15)
} else if (cutIndex < (1 << 25)) {
zeroLeft(display0, cutIndex & 31)
display1 = copyRight(display1, (cutIndex >>> 5) & 31)
display2 = copyRight(display2, (cutIndex >>> 10) & 31)
display3 = copyRight(display3, (cutIndex >>> 15) & 31)
display4 = copyRight(display4, cutIndex >>> 20)
} else if (cutIndex < (1 << 30)) {
zeroLeft(display0, cutIndex & 31)
display1 = copyRight(display1, (cutIndex >>> 5) & 31)
display2 = copyRight(display2, (cutIndex >>> 10) & 31)
display3 = copyRight(display3, (cutIndex >>> 15) & 31)
display4 = copyRight(display4, (cutIndex >>> 20) & 31)
display5 = copyRight(display5, cutIndex >>> 25)
} else {
throw new IllegalArgumentException()
}
}
// requires structure is writable and at index cutIndex
private def cleanRightEdge(cutIndex: Int) = {
// we're actually sitting one block left if cutIndex lies on a block boundary
// this means that we'll end up erasing the whole block!!
if (cutIndex <= (1 << 5)) {
zeroRight(display0, cutIndex)
} else if (cutIndex <= (1 << 10)) {
zeroRight(display0, ((cutIndex - 1) & 31) + 1)
display1 = copyLeft(display1, cutIndex >>> 5)
} else if (cutIndex <= (1 << 15)) {
zeroRight(display0, ((cutIndex - 1) & 31) + 1)
display1 = copyLeft(display1, (((cutIndex - 1) >>> 5) & 31) + 1)
display2 = copyLeft(display2, cutIndex >>> 10)
} else if (cutIndex <= (1 << 20)) {
zeroRight(display0, ((cutIndex - 1) & 31) + 1)
display1 = copyLeft(display1, (((cutIndex - 1) >>> 5) & 31) + 1)
display2 = copyLeft(display2, (((cutIndex - 1) >>> 10) & 31) + 1)
display3 = copyLeft(display3, cutIndex >>> 15)
} else if (cutIndex <= (1 << 25)) {
zeroRight(display0, ((cutIndex - 1) & 31) + 1)
display1 = copyLeft(display1, (((cutIndex - 1) >>> 5) & 31) + 1)
display2 = copyLeft(display2, (((cutIndex - 1) >>> 10) & 31) + 1)
display3 = copyLeft(display3, (((cutIndex - 1) >>> 15) & 31) + 1)
display4 = copyLeft(display4, cutIndex >>> 20)
} else if (cutIndex <= (1 << 30)) {
zeroRight(display0, ((cutIndex - 1) & 31) + 1)
display1 = copyLeft(display1, (((cutIndex - 1) >>> 5) & 31) + 1)
display2 = copyLeft(display2, (((cutIndex - 1) >>> 10) & 31) + 1)
display3 = copyLeft(display3, (((cutIndex - 1) >>> 15) & 31) + 1)
display4 = copyLeft(display4, (((cutIndex - 1) >>> 20) & 31) + 1)
display5 = copyLeft(display5, cutIndex >>> 25)
} else {
throw new IllegalArgumentException()
}
}
private def requiredDepth(xor: Int) = {
if (xor < (1 << 5)) 1
else if (xor < (1 << 10)) 2
else if (xor < (1 << 15)) 3
else if (xor < (1 << 20)) 4
else if (xor < (1 << 25)) 5
else if (xor < (1 << 30)) 6
else throw new IllegalArgumentException()
}
private def dropFront0(cutIndex: Int): Vector[A] = {
val blockIndex = cutIndex & ~31
val xor = cutIndex ^ (endIndex - 1)
val d = requiredDepth(xor)
val shift = cutIndex & ~((1 << (5 * d)) - 1)
// need to init with full display iff going to cutIndex requires swapping block at level >= d
val s = new Vector(cutIndex - shift, endIndex - shift, blockIndex - shift)
s.initFrom(this)
s.dirty = dirty
s.gotoPosWritable(focus, blockIndex, focus ^ blockIndex)
s.preClean(d)
s.cleanLeftEdge(cutIndex - shift)
releaseFence()
s
}
private def dropBack0(cutIndex: Int): Vector[A] = {
val blockIndex = (cutIndex - 1) & ~31
val xor = startIndex ^ (cutIndex - 1)
val d = requiredDepth(xor)
val shift = startIndex & ~((1 << (5 * d)) - 1)
val s = new Vector(startIndex - shift, cutIndex - shift, blockIndex - shift)
s.initFrom(this)
s.dirty = dirty
s.gotoPosWritable(focus, blockIndex, focus ^ blockIndex)
s.preClean(d)
s.cleanRightEdge(cutIndex - shift)
releaseFence()
s
}
override protected def applyPreferredMaxLength: Int = Vector.defaultApplyPreferredMaxLength
override def equals(o: Any): Boolean = o match {
case that: Vector[_] =>
if (this eq that) true
else if (this.length != that.length) false
else if ( //
this.startIndex == that.startIndex && //
this.endIndex == that.endIndex && //
(this.display0 eq that.display0) && //
(this.display1 eq that.display1) && //
(this.display2 eq that.display2) && //
(this.display3 eq that.display3) && //
(this.display4 eq that.display4) && //
(this.display5 eq that.display5) //
) true
else super.equals(o)
case _ => super.equals(o)
}
override def copyToArray[B >: A](xs: Array[B], start: Int, len: Int): Int = iterator.copyToArray(xs, start, len)
override def toVector: Vector[A] = this
override protected[this] def className = "Vector"
}
//TODO: When making this class private, make it final as well.
@deprecated("This class is not intended for public consumption and will be made private in the future.","2.13.0")
class VectorIterator[+A](_startIndex: Int, private[this] var endIndex: Int)
extends AbstractIterator[A]
with VectorPointer[A] {
private[this] final var blockIndex: Int = _startIndex & ~31
private[this] final var lo: Int = _startIndex & 31
private[this] final var endLo = Math.min(endIndex - blockIndex, 32)
override def hasNext: Boolean = _hasNext
private[this] final var _hasNext = blockIndex + lo < endIndex
private[this] def advanceToNextBlockIfNecessary(): Unit = {
if (lo == endLo) {
if (blockIndex + lo < endIndex) {
val newBlockIndex = blockIndex + 32
gotoNextBlockStart(newBlockIndex, blockIndex ^ newBlockIndex)
blockIndex = newBlockIndex
endLo = Math.min(endIndex - blockIndex, 32)
lo = 0
} else {
_hasNext = false
}
}
}
override def take(n: Int): Iterator[A] = {
if(n <= 0) {
_hasNext = false
endIndex = 0
} else {
val dropR = remainingElementCount - n
if(dropR > 0) {
endIndex -= dropR
endLo = Math.min(endIndex - blockIndex, 32)
_hasNext = blockIndex + lo < endIndex
}
}
this
}
override def drop(n: Int): Iterator[A] = {
if (n > 0) {
val longLo = lo.toLong + n
if (blockIndex + longLo < endIndex) {
// We only need to adjust the block if we are outside the current block
// We know that we are within the collection as < endIndex
lo = longLo.toInt
if (lo >= 32) {
blockIndex = (blockIndex + lo) & ~31
gotoNewBlockStart(blockIndex, depth)
endLo = Math.min(endIndex - blockIndex, 32)
lo = lo & 31
}
} else {
_hasNext = false
endIndex = 0
}
}
this
}
override def slice(from: Int, until: Int): Iterator[A] = {
val _until =
if(from > 0) {
drop(from)
until - from
} else until
take(_until)
}
override def next(): A = {
if (!_hasNext) throw new NoSuchElementException("reached iterator end")
val res = display0(lo).asInstanceOf[A]
lo += 1
advanceToNextBlockIfNecessary()
res
}
override def copyToArray[B >: A](xs: Array[B], start: Int, len: Int): Int = {
val xsLen = xs.length
val totalToBeCopied = IterableOnce.elemsToCopyToArray(remainingElementCount, xsLen, start, len)
var totalCopied = 0
while (hasNext && totalCopied < totalToBeCopied) {
val _start = start + totalCopied
val toBeCopied = IterableOnce.elemsToCopyToArray(endLo - lo, xsLen, _start, len - totalCopied)
Array.copy(display0, lo, xs, _start, toBeCopied)
totalCopied += toBeCopied
lo += toBeCopied
advanceToNextBlockIfNecessary()
}
totalCopied
}
private[collection] def remainingElementCount: Int = (endIndex - (blockIndex + lo)) max 0
override def knownSize: Int = remainingElementCount
/** Creates a new vector which consists of elements remaining in this iterator.
* Such a vector can then be split into several vectors using methods like `take` and `drop`.
*/
private[collection] def remainingVector: Vector[A] = {
if(!_hasNext) Vector.empty
else {
val v = new Vector(blockIndex + lo, endIndex, blockIndex + lo)
v.initFrom(this)
v
}
}
}
/** A class to build instances of `Vector`. This builder is reusable. */
final class VectorBuilder[A]() extends ReusableBuilder[A, Vector[A]] with VectorPointer[A] {
// possible alternative: start with display0 = null, blockIndex = -32, lo = 32
// to avoid allocating initial array if the result will be empty anyways
display0 = new Array[AnyRef](32)
depth = 1
/** The index within the final Vector of `this.display0(0)` */
private[this] var blockIndex = 0
/** The index within `this.display0` which is the next available index to write to.
* This value may be equal to display0.length, in which case before writing, a new block
* should be created (see advanceToNextBlockIfNecessary)*/
private[this] var lo = 0
/** Indicates an offset of the final vector from the actual underlying array elements. This is
* used for example in `drop(1)` where instead of copying the entire Vector, only the startIndex is changed.
*
* This is present in the Builder because we may be able to share structure with a Vector that is `addAll`'d to this.
* In which case we must track that Vector's startIndex offset.
* */
private[this] var startIndex = 0
def size: Int = (blockIndex & ~31) + lo - startIndex
def isEmpty: Boolean = size == 0
def nonEmpty: Boolean = size != 0
override def knownSize: Int = size
private[this] def advanceToNextBlockIfNecessary(): Unit = {
if (lo >= display0.length) {
val newBlockIndex = blockIndex + 32
gotoNextBlockStartWritable(newBlockIndex, blockIndex ^ newBlockIndex)
blockIndex = newBlockIndex
lo = 0
}
}
def addOne(elem: A): this.type = {
advanceToNextBlockIfNecessary()
display0(lo) = elem.asInstanceOf[AnyRef]
lo += 1
this
}
override def addAll(xs: IterableOnce[A]): this.type = {
xs match {
case v: Vector[A] if this.isEmpty && v.length >= 32 =>
depth = v.depth
blockIndex = (v.endIndex - 1) & ~31
lo = v.endIndex - blockIndex
startIndex = v.startIndex
/** `initFrom` will overwrite display0. Keep reference to it so we can reuse the array.*/
val initialDisplay0 = display0
initFrom(v)
stabilize(v.focus)
gotoPosWritable1(v.focus, blockIndex, v.focus ^ blockIndex, initialDisplay0)
depth match {
case 2 =>
display1((blockIndex >>> 5) & 31) = display0
case 3 =>
display1((blockIndex >>> 5) & 31) = display0
display2((blockIndex >>> 10) & 31) = display1
case 4 =>
display1((blockIndex >>> 5) & 31) = display0
display2((blockIndex >>> 10) & 31) = display1
display3((blockIndex >>> 15) & 31) = display2
case 5 =>
display1((blockIndex >>> 5) & 31) = display0
display2((blockIndex >>> 10) & 31) = display1
display3((blockIndex >>> 15) & 31) = display2
display4((blockIndex >>> 20) & 31) = display3
case 6 =>
display1((blockIndex >>> 5) & 31) = display0
display2((blockIndex >>> 10) & 31) = display1
display3((blockIndex >>> 15) & 31) = display2
display4((blockIndex >>> 20) & 31) = display3
display5((blockIndex >>> 25) & 31) = display4
case _ => ()
}
case _ =>
val it = (xs.iterator : Iterator[A]).asInstanceOf[Iterator[AnyRef]]
while (it.hasNext) {
advanceToNextBlockIfNecessary()
lo += it.copyToArray(xs = display0, start = lo, len = display0.length - lo)
}
}
this
}
def result(): Vector[A] = {
val size = this.size
if (size == 0)
return Vector.empty
val s = new Vector[A](startIndex, blockIndex + lo, 0) // should focus front or back?
s.initFrom(this)
if (depth > 1) s.gotoPos(startIndex, blockIndex + lo - 1) // we're currently focused to size - 1, not size!
releaseFence()
s
}
def clear(): Unit = {
preClean(1)
display0 = new Array[AnyRef](32)
blockIndex = 0
lo = 0
startIndex = 0
}
}
private[immutable] trait VectorPointer[+T] {
private[immutable] var depth: Int = _
private[immutable] var display0: Array[AnyRef] = _
private[immutable] var display1: Array[Array[AnyRef]] = _
private[immutable] var display2: Array[Array[Array[AnyRef]]] = _
private[immutable] var display3: Array[Array[Array[Array[AnyRef]]]] = _
private[immutable] var display4: Array[Array[Array[Array[Array[AnyRef]]]]] = _
private[immutable] var display5: Array[Array[Array[Array[Array[Array[AnyRef]]]]]] = _
protected def preClean(depth: Int): Unit = {
this.depth = depth
(depth - 1) match {
case 0 =>
display1 = null
display2 = null
display3 = null
display4 = null
display5 = null
case 1 =>
display2 = null
display3 = null
display4 = null
display5 = null
case 2 =>
display3 = null
display4 = null
display5 = null
case 3 =>
display4 = null
display5 = null
case 4 =>
display5 = null
case 5 =>
}
}
// used
private[immutable] final def initFrom[U](that: VectorPointer[U]): Unit = initFrom(that, that.depth)
private[immutable] final def initFrom[U](that: VectorPointer[U], depth: Int) = {
this.depth = depth
(depth - 1) match {
case -1 =>
case 0 =>
display0 = that.display0
case 1 =>
display1 = that.display1
display0 = that.display0
case 2 =>
display2 = that.display2
display1 = that.display1
display0 = that.display0
case 3 =>
display3 = that.display3
display2 = that.display2
display1 = that.display1
display0 = that.display0
case 4 =>
display4 = that.display4
display3 = that.display3
display2 = that.display2
display1 = that.display1
display0 = that.display0
case 5 =>
display5 = that.display5
display4 = that.display4
display3 = that.display3
display2 = that.display2
display1 = that.display1
display0 = that.display0
}
}
// go to specific position
// requires structure is at pos oldIndex = xor ^ index,
// ensures structure is at pos index
private[immutable] final def gotoPos(index: Int, xor: Int): Unit = {
if (xor < (1 << 5)) { // level = 0
// we're already at the block start pos
} else if (xor < (1 << 10)) { // level = 1
display0 = display1((index >>> 5) & 31)
} else if (xor < (1 << 15)) { // level = 2
display1 = display2((index >>> 10) & 31)
display0 = display1((index >>> 5) & 31)
} else if (xor < (1 << 20)) { // level = 3
display2 = display3((index >>> 15) & 31)
display1 = display2((index >>> 10) & 31)
display0 = display1((index >>> 5) & 31)
} else if (xor < (1 << 25)) { // level = 4
display3 = display4((index >>> 20) & 31)
display2 = display3((index >>> 15) & 31)
display1 = display2((index >>> 10) & 31)
display0 = display1((index >>> 5) & 31)
} else if (xor < (1 << 30)) { // level = 5
display4 = display5((index >>> 25) & 31)
display3 = display4((index >>> 20) & 31)
display2 = display3((index >>> 15) & 31)
display1 = display2((index >>> 10) & 31)
display0 = display1((index >>> 5) & 31)
} else { // level = 6
throw new IllegalArgumentException()
}
}
// USED BY ITERATOR
// xor: oldIndex ^ index
private[immutable] final def gotoNextBlockStart(index: Int, xor: Int): Unit = { // goto block start pos
if (xor < (1 << 10)) { // level = 1
display0 = display1((index >>> 5) & 31)
} else if (xor < (1 << 15)) { // level = 2
display1 = display2((index >>> 10) & 31)
display0 = display1(0)
} else if (xor < (1 << 20)) { // level = 3
display2 = display3((index >>> 15) & 31)
display1 = display2(0)
display0 = display1(0)
} else if (xor < (1 << 25)) { // level = 4
display3 = display4((index >>> 20) & 31)
display2 = display3(0)
display1 = display2(0)
display0 = display1(0)
} else if (xor < (1 << 30)) { // level = 5
display4 = display5((index >>> 25) & 31)
display3 = display4(0)
display2 = display3(0)
display1 = display2(0)
display0 = display1(0)
} else { // level = 6
throw new IllegalArgumentException()
}
}
private[immutable] final def gotoNewBlockStart(index: Int, depth: Int): Unit = {
if (depth > 5) display4 = display5((index >>> 25) & 31)
if (depth > 4) display3 = display4((index >>> 20) & 31)
if (depth > 3) display2 = display3((index >>> 15) & 31)
if (depth > 2) display1 = display2((index >>> 10) & 31)
if (depth > 1) display0 = display1((index >>> 5) & 31)
}
// USED BY BUILDER
// xor: oldIndex ^ index
private[immutable] final def gotoNextBlockStartWritable(index: Int, xor: Int): Unit = { // goto block start pos
if (xor < (1 << 10)) { // level = 1
if (depth == 1) { display1 = new Array(32); display1(0) = display0; depth += 1 }
display0 = new Array(32)
display1((index >>> 5) & 31) = display0
} else if (xor < (1 << 15)) { // level = 2
if (depth == 2) { display2 = new Array(32); display2(0) = display1; depth += 1 }
display0 = new Array(32)
display1 = new Array(32)
display1((index >>> 5) & 31) = display0
display2((index >>> 10) & 31) = display1
} else if (xor < (1 << 20)) { // level = 3
if (depth == 3) { display3 = new Array(32); display3(0) = display2; depth += 1 }
display0 = new Array(32)
display1 = new Array(32)
display2 = new Array(32)
display1((index >>> 5) & 31) = display0
display2((index >>> 10) & 31) = display1
display3((index >>> 15) & 31) = display2
} else if (xor < (1 << 25)) { // level = 4
if (depth == 4) { display4 = new Array(32); display4(0) = display3; depth += 1 }
display0 = new Array(32)
display1 = new Array(32)
display2 = new Array(32)
display3 = new Array(32)
display1((index >>> 5) & 31) = display0
display2((index >>> 10) & 31) = display1
display3((index >>> 15) & 31) = display2
display4((index >>> 20) & 31) = display3
} else if (xor < (1 << 30)) { // level = 5
if (depth == 5) { display5 = new Array(32); display5(0) = display4; depth += 1 }
display0 = new Array(32)
display1 = new Array(32)
display2 = new Array(32)
display3 = new Array(32)
display4 = new Array(32)
display1((index >>> 5) & 31) = display0
display2((index >>> 10) & 31) = display1
display3((index >>> 15) & 31) = display2
display4((index >>> 20) & 31) = display3
display5((index >>> 25) & 31) = display4
} else { // level = 6
throw new IllegalArgumentException()
}
}
// STUFF BELOW USED BY APPEND / UPDATE
/** Sets array(index) to null and returns an array with same contents as what was previously at array(index)
*
* If `destination` array is not null, original contents of array(index) will be copied to it, and it will be returned.
* Otherwise array(index).clone() is returned
*/
private[immutable] final def nullSlotAndCopy[T <: AnyRef](array: Array[Array[T]], index: Int, destination: Array[T] = null): Array[T] = {
val x = array(index)
array(index) = null
if (destination == null) x.clone()
else {
x.copyToArray(destination, 0)
destination
}
}
// make sure there is no aliasing
// requires structure is at pos index
// ensures structure is clean and at pos index and writable at all levels except 0
private[immutable] final def stabilize(index: Int) = (depth - 1) match {
case 5 =>
display5 = display5.clone()
display4 = display4.clone()
display3 = display3.clone()
display2 = display2.clone()
display1 = display1.clone()
display5((index >>> 25) & 31) = display4
display4((index >>> 20) & 31) = display3
display3((index >>> 15) & 31) = display2
display2((index >>> 10) & 31) = display1
display1((index >>> 5) & 31) = display0
case 4 =>
display4 = display4.clone()
display3 = display3.clone()
display2 = display2.clone()
display1 = display1.clone()
display4((index >>> 20) & 31) = display3
display3((index >>> 15) & 31) = display2
display2((index >>> 10) & 31) = display1
display1((index >>> 5) & 31) = display0
case 3 =>
display3 = display3.clone()
display2 = display2.clone()
display1 = display1.clone()
display3((index >>> 15) & 31) = display2
display2((index >>> 10) & 31) = display1
display1((index >>> 5) & 31) = display0
case 2 =>
display2 = display2.clone()
display1 = display1.clone()
display2((index >>> 10) & 31) = display1
display1((index >>> 5) & 31) = display0
case 1 =>
display1 = display1.clone()
display1((index >>> 5) & 31) = display0
case 0 =>
}
/// USED IN UPDATE AND APPEND BACK
// prepare for writing at an existing position
// requires structure is clean and at pos oldIndex = xor ^ newIndex,
// ensures structure is dirty and at pos newIndex and writable at level 0
private[immutable] final def gotoPosWritable0(newIndex: Int, xor: Int): Unit = (depth - 1) match {
case 5 =>
display5 = display5.clone()
display4 = nullSlotAndCopy(display5, (newIndex >>> 25) & 31)
display3 = nullSlotAndCopy(display4, (newIndex >>> 20) & 31)
display2 = nullSlotAndCopy(display3, (newIndex >>> 15) & 31)
display1 = nullSlotAndCopy(display2, (newIndex >>> 10) & 31)
display0 = nullSlotAndCopy(display1, (newIndex >>> 5) & 31)
case 4 =>
display4 = display4.clone()
display3 = nullSlotAndCopy(display4, (newIndex >>> 20) & 31)
display2 = nullSlotAndCopy(display3, (newIndex >>> 15) & 31)
display1 = nullSlotAndCopy(display2, (newIndex >>> 10) & 31)
display0 = nullSlotAndCopy(display1, (newIndex >>> 5) & 31)
case 3 =>
display3 = display3.clone()
display2 = nullSlotAndCopy(display3, (newIndex >>> 15) & 31)
display1 = nullSlotAndCopy(display2, (newIndex >>> 10) & 31)
display0 = nullSlotAndCopy(display1, (newIndex >>> 5) & 31)
case 2 =>
display2 = display2.clone()
display1 = nullSlotAndCopy(display2, (newIndex >>> 10) & 31)
display0 = nullSlotAndCopy(display1, (newIndex >>> 5) & 31)
case 1 =>
display1 = display1.clone()
display0 = nullSlotAndCopy(display1, (newIndex >>> 5) & 31)
case 0 =>
display0 = display0.clone()
}
// requires structure is dirty and at pos oldIndex,
// ensures structure is dirty and at pos newIndex and writable at level 0
private[immutable] final def gotoPosWritable1(oldIndex: Int, newIndex: Int, xor: Int, reuseDisplay0: Array[AnyRef] = null): Unit = {
if (xor < (1 << 5)) { // level = 0
display0 = display0.clone()
} else if (xor < (1 << 10)) { // level = 1
display1 = display1.clone()
display1((oldIndex >>> 5) & 31) = display0
display0 = nullSlotAndCopy(display1, (newIndex >>> 5) & 31)
} else if (xor < (1 << 15)) { // level = 2
display1 = display1.clone()
display2 = display2.clone()
display1((oldIndex >>> 5) & 31) = display0
display2((oldIndex >>> 10) & 31) = display1
display1 = nullSlotAndCopy(display2, (newIndex >>> 10) & 31)
display0 = nullSlotAndCopy(display1, (newIndex >>> 5) & 31, reuseDisplay0)
} else if (xor < (1 << 20)) { // level = 3
display1 = display1.clone()
display2 = display2.clone()
display3 = display3.clone()
display1((oldIndex >>> 5) & 31) = display0
display2((oldIndex >>> 10) & 31) = display1
display3((oldIndex >>> 15) & 31) = display2
display2 = nullSlotAndCopy(display3, (newIndex >>> 15) & 31)
display1 = nullSlotAndCopy(display2, (newIndex >>> 10) & 31)
display0 = nullSlotAndCopy(display1, (newIndex >>> 5) & 31, reuseDisplay0)
} else if (xor < (1 << 25)) { // level = 4
display1 = display1.clone()
display2 = display2.clone()
display3 = display3.clone()
display4 = display4.clone()
display1((oldIndex >>> 5) & 31) = display0
display2((oldIndex >>> 10) & 31) = display1
display3((oldIndex >>> 15) & 31) = display2
display4((oldIndex >>> 20) & 31) = display3
display3 = nullSlotAndCopy(display4, (newIndex >>> 20) & 31)
display2 = nullSlotAndCopy(display3, (newIndex >>> 15) & 31)
display1 = nullSlotAndCopy(display2, (newIndex >>> 10) & 31)
display0 = nullSlotAndCopy(display1, (newIndex >>> 5) & 31, reuseDisplay0)
} else if (xor < (1 << 30)) { // level = 5
display1 = display1.clone()
display2 = display2.clone()
display3 = display3.clone()
display4 = display4.clone()
display5 = display5.clone()
display1((oldIndex >>> 5) & 31) = display0
display2((oldIndex >>> 10) & 31) = display1
display3((oldIndex >>> 15) & 31) = display2
display4((oldIndex >>> 20) & 31) = display3
display5((oldIndex >>> 25) & 31) = display4
display4 = nullSlotAndCopy(display5, (newIndex >>> 25) & 31)
display3 = nullSlotAndCopy(display4, (newIndex >>> 20) & 31)
display2 = nullSlotAndCopy(display3, (newIndex >>> 15) & 31)
display1 = nullSlotAndCopy(display2, (newIndex >>> 10) & 31)
display0 = nullSlotAndCopy(display1, (newIndex >>> 5) & 31, reuseDisplay0)
} else { // level = 6
throw new IllegalArgumentException()
}
}
// USED IN DROP
private[immutable] final def copyRange[T <: AnyRef](array: Array[T], oldLeft: Int, newLeft: Int) = {
val elems = java.lang.reflect.Array.newInstance(array.getClass.getComponentType, 32).asInstanceOf[Array[T]]
java.lang.System.arraycopy(array, oldLeft, elems, newLeft, 32 - Math.max(newLeft, oldLeft))
elems
}
// USED IN APPEND
// create a new block at the bottom level (and possibly nodes on its path) and prepares for writing
// requires structure is clean and at pos oldIndex,
// ensures structure is dirty and at pos newIndex and writable at level 0
private[immutable] final def gotoFreshPosWritable0(oldIndex: Int, newIndex: Int, xor: Int): Unit = { // goto block start pos
if (xor < (1 << 5)) { // level = 0
// we're already at the block start
} else if (xor < (1 << 10)) { // level = 1
if (depth == 1) {
display1 = new Array(32)
display1((oldIndex >>> 5) & 31) = display0
depth += 1
}
display0 = new Array(32)
} else if (xor < (1 << 15)) { // level = 2
if (depth == 2) {
display2 = new Array(32)
display2((oldIndex >>> 10) & 31) = display1
depth += 1
}
display1 = display2((newIndex >>> 10) & 31)
if (display1 == null) display1 = new Array(32)
display0 = new Array(32)
} else if (xor < (1 << 20)) { // level = 3
if (depth == 3) {
display3 = new Array(32)
display3((oldIndex >>> 15) & 31) = display2
depth += 1
}
display2 = display3((newIndex >>> 15) & 31)
if (display2 == null) display2 = new Array(32)
display1 = display2((newIndex >>> 10) & 31)
if (display1 == null) display1 = new Array(32)
display0 = new Array(32)
} else if (xor < (1 << 25)) { // level = 4
if (depth == 4) {
display4 = new Array(32)
display4((oldIndex >>> 20) & 31) = display3
depth += 1
}
display3 = display4((newIndex >>> 20) & 31)
if (display3 == null) display3 = new Array(32)
display2 = display3((newIndex >>> 15) & 31)
if (display2 == null) display2 = new Array(32)
display1 = display2((newIndex >>> 10) & 31)
if (display1 == null) display1 = new Array(32)
display0 = new Array(32)
} else if (xor < (1 << 30)) { // level = 5
if (depth == 5) {
display5 = new Array(32)
display5((oldIndex >>> 25) & 31) = display4
depth += 1
}
display4 = display5((newIndex >>> 25) & 31)
if (display4 == null) display4 = new Array(32)
display3 = display4((newIndex >>> 20) & 31)
if (display3 == null) display3 = new Array(32)
display2 = display3((newIndex >>> 15) & 31)
if (display2 == null) display2 = new Array(32)
display1 = display2((newIndex >>> 10) & 31)
if (display1 == null) display1 = new Array(32)
display0 = new Array(32)
} else { // level = 6
throw new IllegalArgumentException()
}
}
// requires structure is dirty and at pos oldIndex,
// ensures structure is dirty and at pos newIndex and writable at level 0
private[immutable] final def gotoFreshPosWritable1(oldIndex: Int, newIndex: Int, xor: Int): Unit = {
stabilize(oldIndex)
gotoFreshPosWritable0(oldIndex, newIndex, xor)
}
}
| martijnhoekstra/scala | src/library/scala/collection/immutable/Vector.scala | Scala | apache-2.0 | 51,747 |
/* Copyright 2009-2016 EPFL, Lausanne */
import leon.annotation._
object IfExpr1 {
@pure
def foo(): Int = {
var a = 1
var b = 2
if({a = a + 1; a != b})
a = a + 3
else
b = a + b
a
} ensuring(_ == 2)
}
| epfl-lara/leon | src/test/resources/regression/verification/xlang/valid/IfExpr1.scala | Scala | gpl-3.0 | 241 |
package fpinscala.parsing
import language.higherKinds
import language.implicitConversions
trait JSON
object JSON {
case object JNull extends JSON
case class JNumber(get: Double) extends JSON
case class JString(get: String) extends JSON
case class JBool(get: Boolean) extends JSON
case class JArray(get: IndexedSeq[JSON]) extends JSON
case class JObject(get: Map[String, JSON]) extends JSON
def jsonParser[Parser[+_]](P: Parsers[Parser]): Parser[JSON] = {
import P._
// val spaces = char(' ').many.slice
def array: Parser[JSON] = surround("[", "]")(value sep "," map (vs => JArray(vs.toIndexedSeq)))
def obj: Parser[JSON] = surround("{", "}")(keyval sep "," map (kvs => JObject(kvs.toMap)))
def keyval = escapedQuoted ** (":" *> value)
def lit: Parser[JSON] =
"null".as(JNull) |
double.map(JNumber) |
escapedQuoted.map(JString) |
"true".as(JBool(true)) |
"false".as(JBool(false))
def value: Parser[JSON] = lit | obj | array
obj | array
}
}
| Tillaert/fpinscala | exercises/src/main/scala/fpinscala/parsing/JSON.scala | Scala | mit | 1,027 |
package bank
import vdf.VDF
class Link(val id: ID, var shape: Array[GeoPos], val from: Node, val to: Node, var function: VDF,
var atts: scala.collection.mutable.Map[String, Any]) extends NetworElement {
def network = from.network
} | frohfroh/SPARCAS | src/main/scala/bank/Link.scala | Scala | gpl-3.0 | 239 |
object Test {
def main(args: Array[String]) {
val gs = for (x <- (1 to 5)) yield { if (x % 2 == 0) List(1).seq else List(1).par }
println(gs.flatten)
println(gs.transpose)
val s = Stream(Vector(1).par, Vector(2).par)
println(s.flatten.toList)
println(s.transpose.map(_.toList).toList)
}
}
| felixmulder/scala | test/files/run/t4761.scala | Scala | bsd-3-clause | 318 |
package com.sksamuel.elastic4s.requests.searches.queries
import com.sksamuel.elastic4s.ElasticDsl._
import com.sksamuel.elastic4s.requests.common.DistanceUnit
import com.sksamuel.elastic4s.requests.searches.queries.geo.GeoDistanceQuery
import com.sksamuel.elastic4s.requests.searches.sort.{GeoDistanceSort, SortOrder}
import com.sksamuel.elastic4s.requests.searches.{GeoPoint, SearchBodyBuilderFn}
import com.sksamuel.elastic4s.ext.OptionImplicits._
import org.scalatest.funsuite.AnyFunSuite
import org.scalatest.matchers.should.Matchers
class SearchBodyBuilderFnTest extends AnyFunSuite with Matchers {
test("highlight with 'matchedMatchedFields' generates proper 'matched_fields' field as array field.") {
val request = search("example") highlighting {
highlight("text")
.matchedFields("text", "text.ngram", "text.japanese")
}
SearchBodyBuilderFn(request).string() shouldBe
"""{"highlight":{"fields":{"text":{"matched_fields":["text","text.ngram","text.japanese"]}}}}"""
}
test("highlight with 'highlighterType' generates 'type' field.") {
val request = search("example") highlighting {
highlight("text")
.highlighterType("fvh")
}
SearchBodyBuilderFn(request).string() shouldBe
"""{"highlight":{"fields":{"text":{"type":"fvh"}}}}"""
}
test("highlight with 'boundaryChars' generates 'boundary_chars' field.") {
val request = search("example") highlighting {
highlight("text")
.boundaryChars("test")
}
SearchBodyBuilderFn(request).string() shouldBe
"""{"highlight":{"fields":{"text":{"boundary_chars":"test"}}}}"""
}
test("geo distance query with sort") {
val geoDistanceQueryDefinition = GeoDistanceQuery(
field = "location",
point = Some(43.65435, -79.38871),
distanceStr = "100km".some
)
val req = search("partner-location") limit 100 query geoDistanceQueryDefinition sortBy GeoDistanceSort(
field = "location",
points = Seq(GeoPoint(43.65435, -79.38871)),
order = Some(SortOrder.ASC),
unit = Some(DistanceUnit.KILOMETERS)
)
SearchBodyBuilderFn(req).string shouldBe
"""{"query":{"geo_distance":{"distance":"100km","location":[-79.38871,43.65435]}},"size":100,"sort":[{"_geo_distance":{"location":[[-79.38871,43.65435]],"order":"asc","unit":"km"}}]}"""
}
}
| sksamuel/elastic4s | elastic4s-core/src/test/scala/com/sksamuel/elastic4s/requests/searches/queries/SearchBodyBuilderFnTest.scala | Scala | apache-2.0 | 2,343 |
package org.dbpedia.spotlight.db
import opennlp.tools.chunker.{ChunkerME, ChunkerModel}
import opennlp.tools.namefind.{NameFinderME, TokenNameFinderModel}
import opennlp.tools.util.Span
import org.dbpedia.spotlight.db.model.SurfaceFormStore
import org.dbpedia.spotlight.model._
/**
* @author Joachim Daiber
*
* OpenNLP based Spotter performing NP chunking and selecting the longest sub-chunk in the dictionary of surface forms.
*
* This is similar to OpenNLPNGramSpotter but a bit simpler and uses a dictionary of known surface forms.
*
*/
class OpenNLPSpotter(
chunkerModel: Option[ChunkerModel],
nerModels: List[TokenNameFinderModel],
surfaceFormStore: SurfaceFormStore,
stopwords: Set[String],
spotFeatureWeights: Option[Seq[Double]],
phraseTags: Set[String] = Set("NP"),
nnTag: String = "NN"
) extends DBSpotter(surfaceFormStore, spotFeatureWeights, stopwords) {
val chunker = chunkerModel match {
case Some(m) => Some(new ChunkerME(m))
case None => None
}
val ners = nerModels.map{ m: TokenNameFinderModel =>
new NameFinderME(m)
}
def generateCandidates(sentence: List[Token]): Seq[Span] = {
val tokens = sentence.map(_.token).toArray
var spans = findUppercaseSequences(tokens)
chunker match {
case Some(c) => {
val tags = sentence.map(_.featureValue[String]("pos").get).toArray
this.synchronized {
spans ++= c.chunkAsSpans(tokens, tags).filter(chunkSpan => phraseTags.contains(chunkSpan.getType))
}
}
case None =>
}
if (!ners.isEmpty)
this.synchronized {
spans ++= ners.flatMap(_.find(tokens))
}
spans
}
def typeOrder = Array.concat(Array("person", "organization", "location", "misc"), phraseTags.toArray, Array("Capital_Sequences"))
private var name = "Spotter based on an OpenNLP NP chunker and a simple spot dictionary."
def getName = name
def setName(name: String) {
this.name = name
}
}
| Skunnyk/dbpedia-spotlight-model | core/src/main/scala/org/dbpedia/spotlight/db/OpenNLPSpotter.scala | Scala | apache-2.0 | 1,970 |
package io.fintrospect.parameters
import io.fintrospect.util.{Extracted, Extraction, Extractor}
sealed trait FormFieldExtractor {
def apply(fields: Seq[Extractor[Form, _]], f: Form): Extraction[Form]
}
object WebFormFieldExtractor extends FormFieldExtractor {
override def apply(fields: Seq[Extractor[Form, _]], t: Form): Extraction[Form] = Extracted(t)
}
object StrictFormFieldExtractor extends FormFieldExtractor {
override def apply(fields: Seq[Extractor[Form, _]], form: Form): Extraction[Form] =
Extraction.combine(fields.map(_.extract(form))).map(_ => form)
}
| daviddenton/fintrospect | core/src/main/scala/io/fintrospect/parameters/FormFieldExtractor.scala | Scala | apache-2.0 | 581 |
package io.stored.server.common
import io.viper.core.server.router.JsonResponse
import collection.mutable.ListBuffer
import org.json.{JSONObject, JSONArray}
object JsonUtil {
def toJsonArray[A,B](list: List[A]) : JSONArray = {
val arr = new JSONArray
list.foreach{x: A => arr.put(x)}
arr
}
def toJsonArray[A,JSONObject](list: List[A], f: A => JSONObject) : JSONArray = {
val arr = new JSONArray
list.foreach{x: A => arr.put(f(x))}
arr
}
def jsonResponse(args: AnyRef*) : JsonResponse = {
if (args.length % 2 != 0) throw new RuntimeException("expecting key value pairs")
val obj = new JSONObject
(0 until args.length by 2).foreach(i => obj.put(args(i).toString(), args(i+1)))
new JsonResponse(obj)
}
def toJsonArray(set: Set[Int]) : JSONArray = {
val jsonArray = new JSONArray
set.foreach(jsonArray.put)
jsonArray
}
def intSetFromJsonArray(rawJsonArray : String) : Set[Int] = {
val ja = new JSONArray(rawJsonArray)
(0 until ja.length()).map(i => ja.getInt(i)).toSet
}
}
| briangu/stored.io | src/main/scala/io/stored/server/common/JsonUtil.scala | Scala | apache-2.0 | 1,056 |
package asobu.distributed
import akka.actor.ActorSystem
import scala.collection.JavaConverters._
/**
* Validate if the distributed data is configured correctly.
*/
object SystemValidator {
def validate(system: ActorSystem): Either[String, Unit] = {
val cfg = system.settings.config
val rolePath = "akka.cluster.distributed-data.role"
if (!cfg.hasPath(rolePath))
Left("akka.distributed-data must be enabled")
else {
val ddRole = cfg.getString(rolePath)
val roles = cfg.getStringList("akka.cluster.roles").asScala
if (!ddRole.isEmpty && !roles.contains(ddRole))
Left(s"cluster roles (${roles.mkString}) must contain distributed-data scope role $ddRole")
else
Right(())
}
}
}
| iheartradio/asobu | distributed/src/main/scala/asobu/distributed/SystemValidator.scala | Scala | apache-2.0 | 747 |
package org.scalajs.core.compiler.test.util
import java.io._
import scala.tools.nsc._
import reporters.{Reporter, ConsoleReporter}
import org.junit.Assert._
import scala.util.matching.Regex
trait TestHelpers extends DirectTest {
private[this] val errBuffer = new CharArrayWriter
override def newReporter(settings: Settings): Reporter = {
val in = new BufferedReader(new StringReader(""))
val out = new PrintWriter(errBuffer)
new ConsoleReporter(settings, in, out)
}
/** will be prefixed to every code that is compiled. use for imports */
def preamble: String = ""
/** pimps a string to compile it and apply the specified test */
implicit class CompileTests(val code: String) {
def hasErrors(expected: String): Unit = {
val reps = repResult {
assertFalse("snippet shouldn't compile", compileString(preamble + code))
}
assertEquals("should have right errors",
expected.stripMargin.trim, reps.trim)
}
def hasWarns(expected: String): Unit = {
val reps = repResult {
assertTrue("snippet should compile", compileString(preamble + code))
}
assertEquals("should have right warnings",
expected.stripMargin.trim, reps.trim)
}
def containsWarns(expected: String): Unit = {
val reps = repResult {
assertTrue("snippet should compile", compileString(preamble + code))
}
assertTrue("should contain the right warnings",
reps.trim.contains(expected.stripMargin.trim))
}
def hasNoWarns(): Unit = {
val reps = repResult {
assertTrue("snippet should compile", compileString(preamble + code))
}
assertTrue("should not have warnings", reps.isEmpty)
}
def fails(): Unit =
assertFalse("snippet shouldn't compile", compileString(preamble + code))
def warns(): Unit = {
val reps = repResult {
assertTrue("snippet should compile", compileString(preamble + code))
}
assertFalse("should have warnings", reps.isEmpty)
}
def succeeds(): Unit =
assertTrue("snippet should compile", compileString(preamble + code))
private def repResult(body: => Unit) = {
errBuffer.reset()
body
errBuffer.toString.replaceAll("\\r\\n?", "\\n")
}
}
implicit class CodeWrappers(sc: StringContext) {
def expr(): CompileTests =
new CompileTests(s"class A { ${sc.parts.mkString} }")
}
}
| lrytz/scala-js | compiler/src/test/scala/org/scalajs/core/compiler/test/util/TestHelpers.scala | Scala | bsd-3-clause | 2,436 |
package rest
import scala.concurrent.duration._
import akka.util.Timeout
import spray.http.StatusCodes._
import spray.http.MediaTypes._
import spray.json._
import spray.httpx.SprayJsonSupport
import scala.concurrent.Future
import concurrent.ExecutionContext.Implicits.global
import util.transform.json.DemonstratorProtocol._
import util.csv.CsvReader
import client.aws.S3Client
trait Debugger {
import org.slf4j._
val log = LoggerFactory.getLogger("Debugger")
}
import akka.actor.{Props, Actor}
import akka.pattern.ask
import spray.routing.HttpService
import spray.routing.directives.CachingDirectives
import spray.http._
import dal.mongodb._
import core._
class DatalizeServiceActor extends Actor with DatalizeService{
def actorRefFactory = context
def receive = runRoute(departuresRoute)
}
trait DatalizeService extends HttpService {
implicit val timeout = Timeout(20 seconds)
val departuresRoute = {
pathPrefix(""){
getFromDirectory("src/main/webapp/")
}~
path(""){
respondWithMediaType(`text/html`){
getFromFile("src/main/webapp/index.html")
}
}~
pathPrefix("api"){
dataUpload("")
}/*~
pathPrefix("scripts"){
getFromDirectory("target/scala-2.9.2/resource_managed/main/js/")
}~
pathPrefix("stylesheets"){
getFromDirectory("target/scala-2.9.2/resource_managed/main/webapp/css/")
}*/
}
private def dataUpload(user: String) = {
pathPrefix("upload"){
post{
path(""){
entity(as[String]) { data =>
//val dataHandler = actorRefFactory.actorOf(Props[DataItemsDAO])
CsvReader(data).entries map{row=>
actorRefFactory.actorOf(Props[DataItemsDAO]) ! DataItem(row.head, row.tail.map{pair=>
val (header, data) = pair
(header, data.toInt)
}.toVector)
}
//actorRefFactory.stop(dataHandler)
complete("Saving to db")
}
}~
path("s3"){
entity(as[Array[Byte]]) { data =>
S3Client(data).save()
complete("Saving to S3")
}
}
}
}
}
}
| javierg1975/Datamation | src/main/scala/rest/Routes.scala | Scala | mit | 2,199 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.eval.stream
import akka.NotUsed
import akka.http.scaladsl.model.Uri
import akka.stream.Attributes
import akka.stream.FlowShape
import akka.stream.Inlet
import akka.stream.Outlet
import akka.stream.scaladsl.Source
import akka.stream.stage.GraphStage
import akka.stream.stage.GraphStageLogic
import akka.stream.stage.InHandler
import akka.stream.stage.OutHandler
import com.netflix.atlas.akka.DiagnosticMessage
import com.netflix.atlas.core.model.DataExpr
import com.netflix.atlas.core.model.EvalContext
import com.netflix.atlas.core.model.StatefulExpr
import com.netflix.atlas.core.model.StyleExpr
import com.netflix.atlas.core.model.TimeSeries
import com.netflix.atlas.core.util.IdentityMap
import com.netflix.atlas.eval.model.AggrDatapoint
import com.netflix.atlas.eval.model.TimeGroup
import com.netflix.atlas.eval.model.TimeSeriesMessage
import com.netflix.atlas.eval.stream.Evaluator.DataSources
import com.netflix.atlas.eval.stream.Evaluator.MessageEnvelope
import com.typesafe.scalalogging.StrictLogging
import scala.collection.mutable
/**
* Takes the set of data sources and time grouped partial aggregates as input and performs
* the final evaluation step.
*
* @param interpreter
* Used for evaluating the expressions.
*/
private[stream] class FinalExprEval(interpreter: ExprInterpreter)
extends GraphStage[FlowShape[AnyRef, Source[MessageEnvelope, NotUsed]]]
with StrictLogging {
private val in = Inlet[AnyRef]("FinalExprEval.in")
private val out = Outlet[Source[MessageEnvelope, NotUsed]]("FinalExprEval.out")
override val shape: FlowShape[AnyRef, Source[MessageEnvelope, NotUsed]] = FlowShape(in, out)
override def createLogic(inheritedAttributes: Attributes): GraphStageLogic = {
new GraphStageLogic(shape) with InHandler with OutHandler {
// Maintains the state for each expression we need to evaluate. TODO: implement
// limits to sanity check against running of our memory
private val states =
scala.collection.mutable.AnyRefMap.empty[StyleExpr, Map[StatefulExpr, Any]]
// Step size for datapoints flowing through, it will be determined by the first data
// sources message that arrives and should be consistent for the life of this stage
private var step = -1L
// Each expression matched with a list of data source ids that should receive
// the data for it
private var recipients = List.empty[(StyleExpr, List[String])]
// Track the set of DataExprs per DataSource
private var dataSourceIdToDataExprs = Map.empty[String, Set[DataExpr]]
// Empty data map used as base to account for expressions that do not have any
// matches for a given time interval
private var noData = Map.empty[DataExpr, List[TimeSeries]]
private def error(expr: String, hint: String, t: Throwable): DiagnosticMessage = {
val str = s"$hint [[$expr]]: ${t.getClass.getSimpleName}: ${t.getMessage}"
DiagnosticMessage.error(str)
}
// Updates the recipients list
private def handleDataSources(ds: DataSources): Unit = {
import scala.jdk.CollectionConverters._
val sources = ds.getSources.asScala.toList
step = ds.stepSize()
// Get set of expressions before we update the list
val previous = recipients.map(t => t._1 -> t._1).toMap
// Error messages for invalid expressions
val errors = List.newBuilder[MessageEnvelope]
// Compute the new set of expressions
recipients = sources
.flatMap { s =>
try {
val exprs = interpreter.eval(Uri(s.getUri))
// Reuse the previous evaluated expression if available. States for the stateful
// expressions are maintained in an IdentityHashMap so if the instances change
// the state will be reset.
exprs.map(e => previous.getOrElse(e, e) -> s.getId)
} catch {
case e: Exception =>
errors += new MessageEnvelope(s.getId, error(s.getUri, "invalid expression", e))
Nil
}
}
.groupBy(_._1)
.map(t => t._1 -> t._2.map(_._2))
.toList
dataSourceIdToDataExprs = recipients
.flatMap(styleExprAndIds =>
styleExprAndIds._2.map(id => id -> styleExprAndIds._1.expr.dataExprs.toSet)
)
// Fold to mutable map to avoid creating new Map on every update
.foldLeft(mutable.Map.empty[String, Set[DataExpr]]) {
case (map, (id, dataExprs)) => {
map += map.get(id).fold(id -> dataExprs) { vs =>
id -> (dataExprs ++ vs)
}
}
}
.toMap
// Cleanup state for any expressions that are no longer needed
val removed = previous.keySet -- recipients.map(_._1).toSet
removed.foreach { expr =>
states -= expr
}
// Setup no data map
noData = recipients
.flatMap(_._1.expr.dataExprs)
.distinct
.map {
// If there is no grouping, then use a no data line, otherwise use an empty set
case e if e.finalGrouping.isEmpty =>
e -> List(TimeSeries.noData(e.query, step))
case e =>
e -> Nil
}
.toMap
push(out, Source(errors.result()))
}
// Generate a no data line for a full expression. Use the tagging information from the
// first data expression that is found.
private def noData(expr: StyleExpr): TimeSeries = {
expr.expr.dataExprs.headOption match {
case Some(e) => TimeSeries.noData(e.query, step)
case None => TimeSeries.noData(step)
}
}
// Perform the final evaluation and create a source with the TimeSeriesMessages
// addressed to each recipient
private def handleData(group: TimeGroup): Unit = {
// Finalize the DataExprs, needed as input for further evaluation
val timestamp = group.timestamp
val groupedDatapoints = group.dataExprValues
val dataExprToDatapoints = noData ++ groupedDatapoints.map {
case (k, vs) =>
k -> AggrDatapoint.aggregate(vs.values).map(_.toTimeSeries)
}
// Collect input and intermediate data size per DataSource
val rateCollector = new EvalDataRateCollector(timestamp, step)
dataSourceIdToDataExprs.foreach {
case (id, dataExprSet) =>
dataExprSet.foreach(dataExpr => {
group.dataExprValues.get(dataExpr).foreach { info =>
rateCollector.incrementInput(id, dataExpr, info.numRawDatapoints)
rateCollector.incrementIntermediate(id, dataExpr, info.values.size)
}
})
}
// Generate the time series and diagnostic output
val output = recipients.flatMap {
case (styleExpr, ids) =>
// Use an identity map for the state to ensure that multiple equivalent stateful
// expressions, e.g. derivative(a) + derivative(a), will have isolated state.
val state = states.getOrElse(styleExpr, IdentityMap.empty[StatefulExpr, Any])
val context = EvalContext(timestamp, timestamp + step, step, state)
try {
val result = styleExpr.expr.eval(context, dataExprToDatapoints)
states(styleExpr) = result.state
val data = if (result.data.isEmpty) List(noData(styleExpr)) else result.data
val msgs = data.map { t =>
TimeSeriesMessage(styleExpr, context, t.withLabel(styleExpr.legend(t)))
}
// Collect final data size per DataSource
ids.foreach(rateCollector.incrementOutput(_, data.size))
ids.flatMap { id =>
msgs.map { msg =>
new MessageEnvelope(id, msg)
}
}
} catch {
case e: Exception =>
val msg = error(styleExpr.toString, "final eval failed", e)
ids.map { id =>
new MessageEnvelope(id, msg)
}
}
}
val rateMessages = rateCollector.getAll.map {
case (id, rate) => new MessageEnvelope(id, rate)
}.toList
push(out, Source(output ++ rateMessages))
}
override def onPush(): Unit = {
grab(in) match {
case ds: DataSources => handleDataSources(ds)
case data: TimeGroup => handleData(data)
case v => throw new MatchError(v)
}
}
override def onPull(): Unit = {
pull(in)
}
override def onUpstreamFinish(): Unit = {
completeStage()
}
setHandlers(in, out, this)
}
}
}
| brharrington/atlas | atlas-eval/src/main/scala/com/netflix/atlas/eval/stream/FinalExprEval.scala | Scala | apache-2.0 | 9,499 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala
package reflect
package internal
/** Additions to the type checker that can be added at
* run time. Typically these are added by
* compiler plugins. */
trait AnnotationCheckers {
self: SymbolTable =>
/** An additional checker for annotations on types.
* Typically these are registered by compiler plugins
* with the addAnnotationChecker method. */
trait AnnotationChecker {
/**
* Selectively activate this annotation checker. When using both an annotation checker
* and an analyzer plugin, it is common to run both of them only during selected
* compiler phases. See documentation in AnalyzerPlugin.isActive.
*/
def isActive(): Boolean = true
/** Check the annotations on two types conform. */
def annotationsConform(tpe1: Type, tpe2: Type): Boolean
/** Refine the computed least upper bound of a list of types.
* All this should do is add annotations. */
def annotationsLub(tp: Type, ts: List[Type]): Type = tp
/** Refine the computed greatest lower bound of a list of types.
* All this should do is add annotations. */
def annotationsGlb(tp: Type, ts: List[Type]): Type = tp
/** Refine the bounds on type parameters to the given type arguments. */
def adaptBoundsToAnnotations(bounds: List[TypeBounds], tparams: List[Symbol],
targs: List[Type]): List[TypeBounds] = bounds
/**
* Modify the type that has thus far been inferred for a tree. All this should
* do is add annotations.
*/
@deprecatedOverriding("create an AnalyzerPlugin and use pluginsTyped", "2.10.1")
def addAnnotations(tree: Tree, tpe: Type): Type = tpe
/**
* Decide whether this analyzer plugin can adapt a tree that has an annotated type to the
* given type tp, taking into account the given mode (see method adapt in trait Typers).
*/
@deprecatedOverriding("create an AnalyzerPlugin and use canAdaptAnnotations", "2.10.1")
def canAdaptAnnotations(tree: Tree, mode: Mode, pt: Type): Boolean = false
/**
* Adapt a tree that has an annotated type to the given type tp, taking into account the given
* mode (see method adapt in trait Typers).
*
* An implementation cannot rely on canAdaptAnnotations being called before. If the implementing
* class cannot do the adapting, it should return the tree unchanged.
*/
@deprecatedOverriding("create an AnalyzerPlugin and use adaptAnnotations", "2.10.1")
def adaptAnnotations(tree: Tree, mode: Mode, pt: Type): Tree = tree
/**
* Adapt the type of a return expression. The decision of a typer plugin whether the type
* should be adapted is based on the type of the expression which is returned, as well as the
* result type of the method (pt).
*
* By default, this method simply returns the passed `default` type.
*/
@deprecatedOverriding(
"Create an AnalyzerPlugin and use pluginsTypedReturn. Note: the 'tree' argument here is\\n"+
"the 'expr' of a Return tree; 'pluginsTypedReturn' takes the Return tree itself as argument", "2.10.1")
def adaptTypeOfReturn(tree: Tree, pt: Type, default: => Type): Type = default
}
// Syncnote: Annotation checkers inaccessible to reflection, so no sync in var necessary.
/** The list of annotation checkers that have been registered */
private[this] var annotationCheckers: List[AnnotationChecker] = Nil
/** Register an annotation checker. Typically these are added by compiler plugins. */
def addAnnotationChecker(checker: AnnotationChecker): Unit = {
if (!(annotationCheckers contains checker))
annotationCheckers = checker :: annotationCheckers
}
/** Remove all annotation checkers */
def removeAllAnnotationCheckers(): Unit = {
annotationCheckers = Nil
}
/** @see AnnotationChecker.annotationsConform */
def annotationsConform(tp1: Type, tp2: Type): Boolean =
if (annotationCheckers.isEmpty || (tp1.annotations.isEmpty && tp2.annotations.isEmpty)) true
else annotationCheckers.forall(checker => {
!checker.isActive() || checker.annotationsConform(tp1,tp2)
})
/** @see AnnotationChecker.annotationsLub */
def annotationsLub(tpe: Type, ts: List[Type]): Type =
if (annotationCheckers.isEmpty) tpe
else annotationCheckers.foldLeft(tpe)((tpe, checker) =>
if (!checker.isActive()) tpe else checker.annotationsLub(tpe, ts))
/** @see AnnotationChecker.annotationsGlb */
def annotationsGlb(tpe: Type, ts: List[Type]): Type =
if (annotationCheckers.isEmpty) tpe
else annotationCheckers.foldLeft(tpe)((tpe, checker) =>
if (!checker.isActive()) tpe else checker.annotationsGlb(tpe, ts))
/** @see AnnotationChecker.adaptBoundsToAnnotations */
def adaptBoundsToAnnotations(bounds: List[TypeBounds], tparams: List[Symbol],
targs: List[Type]): List[TypeBounds] =
if (annotationCheckers.isEmpty) bounds
else annotationCheckers.foldLeft(bounds)((bounds, checker) =>
if (!checker.isActive()) bounds else checker.adaptBoundsToAnnotations(bounds, tparams, targs))
/* The following methods will be removed with the deprecated methods is AnnotationChecker. */
def addAnnotations(tree: Tree, tpe: Type): Type =
if (annotationCheckers.isEmpty) tpe
else annotationCheckers.foldLeft(tpe)((tpe, checker) =>
if (!checker.isActive()) tpe else checker.addAnnotations(tree, tpe))
def canAdaptAnnotations(tree: Tree, mode: Mode, pt: Type): Boolean =
if (annotationCheckers.isEmpty) false
else annotationCheckers.exists(checker => {
checker.isActive() && checker.canAdaptAnnotations(tree, mode, pt)
})
def adaptAnnotations(tree: Tree, mode: Mode, pt: Type): Tree =
if (annotationCheckers.isEmpty) tree
else annotationCheckers.foldLeft(tree)((tree, checker) =>
if (!checker.isActive()) tree else checker.adaptAnnotations(tree, mode, pt))
def adaptTypeOfReturn(tree: Tree, pt: Type, default: => Type): Type =
if (annotationCheckers.isEmpty) default
else annotationCheckers.foldLeft(default)((tpe, checker) =>
if (!checker.isActive()) tpe else checker.adaptTypeOfReturn(tree, pt, tpe))
}
| scala/scala | src/reflect/scala/reflect/internal/AnnotationCheckers.scala | Scala | apache-2.0 | 6,512 |
package org.jetbrains.plugins.scala.testingSupport.scalatest.staticStringTest
import org.jetbrains.plugins.scala.testingSupport.IntegrationTest
/**
* @author Roman.Shein
* @since 26.06.2015.
*/
trait FunSuiteStaticStringTest extends IntegrationTest {
val funSuiteClassName = "FunSuiteStringTest"
val funSuiteFileName = funSuiteClassName + ".scala"
def addFunSuite(): Unit = {
addFileToProject(funSuiteFileName,
"""
|import org.scalatest._
|
|class FunSuiteStringTest extends FunSuite {
|
| val constName = "consts"
| test("should" + " work with sums") {
| }
|
| test(constName) {
| }
|
| test("should sum " + constName) {
| }
|}
""".stripMargin.trim()
)
}
def testFunSuiteSum() = {
addFunSuite()
assert(checkConfigAndSettings(createTestFromLocation(5, 10, funSuiteFileName), funSuiteClassName,
"should work with sums"))
}
def testFunSuiteVal() = {
addFunSuite()
assert(checkConfigAndSettings(createTestFromLocation(8, 10, funSuiteFileName), funSuiteClassName,
"consts"))
}
def testFunSuiteValSum() = {
addFunSuite()
assert(checkConfigAndSettings(createTestFromLocation(11, 10, funSuiteFileName), funSuiteClassName,
"should sum consts"))
}
}
| triggerNZ/intellij-scala | test/org/jetbrains/plugins/scala/testingSupport/scalatest/staticStringTest/FunSuiteStaticStringTest.scala | Scala | apache-2.0 | 1,355 |
package com.twitter.finagle.http2
import com.twitter.finagle.Stack
import com.twitter.finagle.http2.transport.server.H2ServerFilter
import com.twitter.finagle.netty4.http.handler.UriValidatorHandler
import com.twitter.finagle.netty4.transport.ChannelTransport
import com.twitter.finagle.param.Timer
import io.netty.channel.ChannelPipeline
import io.netty.handler.codec.http2.Http2MultiplexHandler
import scala.jdk.CollectionConverters._
private[http2] object Http2PipelineInitializer {
/**
* Install Finagle specific filters and handlers common across all HTTP/2 only pipelines
*
* @param pipeline which to operate on.
* @param params used to configure the server.
*/
def setupServerPipeline(pipeline: ChannelPipeline, params: Stack.Params): Unit = {
// we insert immediately after the Http2MultiplexHandler#0, which we know are the
// last Http2 frames before they're converted to Http/1.1
val timer = params[Timer].timer
val codecName = pipeline
.context(classOf[Http2MultiplexHandler])
.name
pipeline
.addAfter(codecName, H2ServerFilter.HandlerName, new H2ServerFilter(timer, pipeline.channel))
.remove(UriValidatorHandler)
pruneDeadHandlers(pipeline)
}
private[this] def pruneDeadHandlers(pipeline: ChannelPipeline): Unit = {
val deadPipelineEntries =
pipeline.iterator.asScala
.map(_.getKey)
.dropWhile(_ != H2ServerFilter.HandlerName)
.drop(1) // Now we're past the H2ServerFilter
.takeWhile(_ != ChannelTransport.HandlerName)
.toList
deadPipelineEntries.foreach(pipeline.remove(_))
}
}
| twitter/finagle | finagle-http2/src/main/scala/com/twitter/finagle/http2/Http2PipelineInitializer.scala | Scala | apache-2.0 | 1,628 |
package KeYmaeraD
/*
abstract class Rational
case class ExactInt(n) extends Rational
case class
*/
// exact nums are either rationals or integers. Is this worth it?
object Exact {
val zero : Num = Integer(0);
val one : Num = Integer(1);
val negone : Num = Integer(-1);
trait Num {
def +(that: Num): Num
def -(that: Num): Num
def unary_- : Num
def *(that: Num): Num
def /(that: Num): Num
def <(that: Num): Boolean
def <=(that: Num): Boolean
def >(that: Num): Boolean
def >=(that: Num): Boolean
def ==(that: Num): Boolean
def is_positive : Boolean
def is_zero : Boolean
def is_one : Boolean
def intValue : Int
def compare(that: Num): Int
}
case class Rational(p: BigInt, q: BigInt) extends Num {
// This check eats a lot of time!
// require(q != 0);
def this(p: Int, q: Int) = this(BigInt(p),BigInt(q));
def this(n: Int) = this(BigInt(n),BigInt(1));
def this(n: BigInt) = this(n,BigInt(1));
def this(s: String) = this(BigInt(s),1);
def +(that: Num): Num = that match {
case Rational(p1,q1) =>
new Rational(p * q1 + p1 * q, q * q1)
case Integer(m) => new Rational(p + m * q, q)
}
def -(that: Num): Num = that match {
case Rational(p1,q1) =>
new Rational(p * q1 - p1 * q, q * q1)
case Integer(m) => new Rational(p - m*q, q)
}
def unary_- : Num = {
(new Rational( - p, q)).reduce;
}
def *(that: Num): Num = that match {
case Rational(p1,q1) => new Rational(p * p1, q * q1)
// case num@Int(m) if num.is_one => this
// case num@Int(m) if num.is_zero => num
case Integer(m) => new Rational(p * m, q)
}
def /(that: Num): Num = that match {
case Rational(p1,q1) => new Rational(p * q1, q * p1)
// case num@Int(m) if num.is_one => this
case Integer(m) => new Rational(p , q * m)
}
def <(that: Num): Boolean = {
(that - this).is_positive
}
def <=(that: Num): Boolean = {
val v = that - this;
v.is_positive || v.is_zero
}
def >(that: Num): Boolean = {
(this - that).is_positive
}
def >=(that: Num): Boolean = {
val v = this - that;
v.is_positive || v.is_zero
}
def ==(that: Num): Boolean = that match {
case Rational(p1,q1) => p * q1 == q * p1
case Integer(m) => m * q == p
}
def is_positive : Boolean = {
(p * q).signum == 1
}
def is_zero : Boolean = {
p.signum == 0;
}
def is_one : Boolean = {
p == q;
}
def intValue : Int = {
p.intValue / q.intValue
}
def reduce : Num = {
val g = p gcd q;
if(g == q) new Integer(p/g)
else new Rational(p/g, q/g)
}
def compare(that: Num): Int = {
val d = this - that;
if(d.is_positive) 1
else if(d.is_zero) 0
else -1
}
override def toString = {
if(q == BigInt(1)) p.toString
else {p.toString + "/" + q.toString}
}
}
case class Integer(n: BigInt) extends Num {
// This check eats a lot of time!
// require(q != 0);
def this(n: Int) = this(BigInt(n));
def this(s: String) = this(BigInt(s));
def +(that: Num): Num = that match {
case Rational(p,q) => new Rational(q * n + p, q)
case Integer(m) => new Integer(n + m)
}
def -(that: Num): Num = that match {
case Rational(p,q) => new Rational(q * n - p, q)
case Integer(m) => new Integer(n - m)
}
def unary_- : Num = {
new Integer(-n)
}
def *(that: Num): Num = that match {
case Rational(p,q) => new Rational(p * n, q)
case Integer(m) => new Integer(n * m)
}
def /(that: Num): Num = that match {
case Rational(p,q) => new Rational(q * n, p)
case Integer(m) => new Rational(n, m)
}
def <(that: Num): Boolean = {
(that - this).is_positive
}
def <=(that: Num): Boolean = {
val v = that - this;
v.is_positive || v.is_zero
}
def >(that: Num): Boolean = {
(this - that).is_positive
}
def >=(that: Num): Boolean = {
val v = this - that;
v.is_positive || v.is_zero
}
def ==(that: Num): Boolean = that match {
case Rational(p,q) => n * q == p
case Integer(m) => n == m
}
def is_positive : Boolean = {
n.signum == 1
}
def is_zero : Boolean = {
n.signum == 0;
}
def is_one : Boolean = {
n == BigInt(1);
}
def intValue : Int = {
n.intValue
}
def compare(that: Num): Int = {
val d = this - that;
if(d.is_positive) 1
else if(d.is_zero) 0
else -1
}
override def toString = {
n.toString
}
}
}
| keymaerad/KeYmaeraD | rational.scala | Scala | bsd-3-clause | 4,659 |
import scala.reflect.runtime.universe._
import scala.reflect.runtime.{currentMirror => cm}
import scala.reflect.ClassTag
object R { override def toString = "R" }
class Foo{
import Test._
def foo = {
val classTag = implicitly[ClassTag[R.type]]
val sym = cm.moduleSymbol(classTag.runtimeClass)
val cls = cm.reflectModule(sym)
cls.instance
}
}
object Test extends dotty.runtime.LegacyApp{
val foo = new Foo
println(foo.foo)
}
| yusuke2255/dotty | tests/pending/run/reflection-modulemirror-toplevel-good.scala | Scala | bsd-3-clause | 441 |
/*
* ecalogic: a tool for performing energy consumption analysis.
*
* Copyright (c) 2013, J. Neutelings, D. Peelen, M. Schoolderman
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification,
* are permitted provided that the following conditions are met:
*
* Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* Redistributions in binary form must reproduce the above copyright notice, this
* list of conditions and the following disclaimer in the documentation and/or
* other materials provided with the distribution.
*
* Neither the name of the Radboud University Nijmegen nor the names of its
* contributors may be used to endorse or promote products derived from
* this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
* ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package nl.ru.cs.ecalogic
package util
import scala.collection.mutable._
import java.io.{File, PrintWriter}
import java.net.URI
import scala.util.control.NonFatal
/** Base trait for error handlers.
*
* @author Jascha Neutelings
*/
trait ErrorHandler {
/** Resets the error handler to its original state. */
def reset()
/** Reports a fatal error.
*
* @param exception error to report
*/
def fatalError(exception: ECAException): Nothing
/** Reports an error.
*
* @param exception error to report
*/
def error(exception: ECAException)
/** Reports a warning.
*
* @param exception error to report
*/
def warning(exception: ECAException)
/** Returns whether an error has occurred. */
def errorOccurred: Boolean
/** Tries to evaluate the given expression and optionally returns the result and otherwise reports an error.
*
* @param expression expression to evaluate
* @tparam A type of the expression
* @return optional result
*/
def tryCatch[A](expression: => A): Option[A] = try Some(expression) catch {
case e: ECAException =>
error(e)
None
}
/** Does nothing if no errors occurred; otherwise, reports and throws a fatal exception
*
* @param complaint explanation of the error condition
*/
def successOrElse(complaint: String) {
if(errorOccurred) fatalError(new ECAException(complaint))
}
/** Reports all exceptions thrown by ''block''.
*
* Caught [[nl.ru.cs.ecalogic.ECAException]]s that have been reported before won't be reported again. Other non-fatal
* exceptions as defined by [[scala.util.control.NonFatal]] are wrapped inside an ''ECAException'' and reported. Any
* exceptions that have been caught are thrown again.
*
* @param block code block to execute
* @tparam A result type of code block
* @return result of code block
*/
def report[A](block: => A): A = {
try {
reset()
block
} catch {
case e: ECAException if e.reported => throw e
case e: ECAException => fatalError(e)
case NonFatal(e) => fatalError(new ECAException(e.toString, e))
}
}
/** Reports all exceptions thrown by ''block'' and reports a fatal error given by the ''complaint'' message
* if any non-fatal errors were reported by ''block''.
*
* @param complaint message to report if non-fatal errors occurred
* @param block code block to execute
* @tparam A result type of code block
* @return result of code block
*/
def reportAll[A](complaint: String)(block: => A): A = report {
val res = block
successOrElse(complaint)
res
}
}
/** Default implementation for error handlers.
*
* Prints error messages to a `java.io.PrintWriter`. Counts the number of errors reported and throws an exception
* if the maximum number is exceeded. If file name and/or source string are provided more informative messages will
* be generated. Throws an exception on a fatal error after reporting it first.
*
* @param maxErrorCount maximum number of error messages
* @param writer output
* @param sourceText optional input
* @param sourceURI optional uri
*
* @author Jascha Neutelings
*/
class DefaultErrorHandler(maxErrorCount: Int = 10,
writer: PrintWriter = new PrintWriter(Console.err),
sourceText: Option[String] = None,
sourceURI: Option[URI] = None) extends ErrorHandler {
import ECAException.StackTrace
private var errorCount = 0
private def printMessage(tpe: String, message: String, position: Option[Position], stackTrace: StackTrace) {
writer.print(tpe)
sourceURI.filter(_ => position.isDefined).foreach(u => writer.print(s" in '$u'"))
position.foreach(p => writer.print(s" at line ${p.line}, column ${p.column}"))
writer.printf(":%n %s%n", message)
sourceText.map(_ + "\\n").zip(position).foreach { case (s, Position(l, c)) =>
val line = s.lines.drop(l - 1).next()
val trimmedLine = line.dropWhile(_ <= ' ')
if (!trimmedLine.isEmpty) {
val n = c - line.takeWhile(_ <= ' ').length
writer.printf("%n %s", trimmedLine)
writer.printf("%n %" + n + "s%n", "^")
}
}
if (!stackTrace.isEmpty) {
writer.println("Stacktrace:")
stackTrace.foreach { case (name, position) =>
writer.println(s" $name${position.fold("")(p => s" [$p]")}")
}
}
writer.flush()
}
def reset() {
errorCount = 0
}
def errorOccurred: Boolean = errorCount > 0
def fatalError(exception: ECAException) = {
//printMessage("Fatal error", exception.message, exception.position, exception.stackTrace)
//throw new ECAException(s"Fatal error occurred: ${exception.message}", exception).markReported
throw exception
}
def error(exception: ECAException) {
printMessage("Error", exception.message, exception.position, exception.stackTrace)
errorCount += 1
if (maxErrorCount > 0 && errorCount >= maxErrorCount) {
fatalError(new ECAException("Maximum number of errors reached."))
}
}
def warning(exception: ECAException) {
printMessage("Warning", exception.message, exception.position, exception.stackTrace)
}
}
/** Error handler that caches error and prints them on demand in the order they occurred.
*
* Wraps around an existing error handler.
*
* @param output error handler to wrap
*
* @author Jascha Neutelings
*/
class CachingErrorHandler(val output: ErrorHandler = new DefaultErrorHandler) extends ErrorHandler {
private val errors = Queue.empty[(ECAException, Boolean)]
def reset() {
errors.clear()
output.reset()
}
def errorOccurred: Boolean = output.errorOccurred || errors.exists(!_._2)
def fatalError(exception: ECAException) = {
flush()
output.fatalError(exception)
}
def error(exception: ECAException) {
errors += ((exception, false))
}
def warning(exception: ECAException) {
errors += ((exception, true))
}
/** Flushes all error messages to the underlying error handler and clears the buffer. */
def flush() {
errors.dequeueAll(x => { true }).foreach { case (e, w) =>
if (w)
output.warning(e)
else
output.error(e)
}
}
}
| jangroothuijse/ecalogic-c | src/main/scala/nl/ru/cs/ecalogic/util/ErrorHandler.scala | Scala | bsd-3-clause | 8,094 |
package app.flashcard.service
import java.util.UUID
import akka.NotUsed
import akka.stream.FlowShape
import akka.stream.scaladsl.{Broadcast, Flow, GraphDSL, Sink, Source, ZipWith}
import app.flashcard.repository.UserRepository
import app.flashcard.repository.UserRepository.User
import app.flashcard.service.MailService.{Mail, MailTemplate}
import scala.concurrent.duration._
class UserService(userRepository: UserRepository, mailService: MailService) {
private def charge: Flow[UUID, BigDecimal, NotUsed] = Flow[UUID].map { _ => BigDecimal(4) }
/**
* *
* ---> only language ---> mailService.template --->
* User as a input ---> only id ---------> charge -----------------> Mail as a output
* ------------------------------------------------>
* *
* Use Broadcast to split user data to three streams. map on Broadcast is allowed.
* Use ZipWith[MailTemplate, BigDecimal, User, Mail] to zip MailTemplate, BigDecimal, User to Mail, mailService.fillBody can be used.
*/
private def calculate: Flow[User, Mail, NotUsed] = Flow.fromGraph(GraphDSL.create() { implicit builder: GraphDSL.Builder[NotUsed] =>
import GraphDSL.Implicits._
val user = builder.add(Broadcast[User](3))
val zip = builder.add(ZipWith[MailTemplate, BigDecimal, User, Mail](mailService.fillBody))
//@formatter:off
user.map(_.language) ~> mailService.template ~> zip.in0
user.map(_.id) ~> charge ~> zip.in1
user ~> zip.in2
//@formatter:on
FlowShape(user.in, zip.out)
})
/**
* Check Source.tick.
*
* @return
*/
def sendSummary = Source.tick(10 second, 30 seconds, ())
.mapConcat(_ => userRepository.find)
.via(calculate)
.via(mailService.send)
.to(Sink.ignore)
}
object UserService {
def apply(userRepository: UserRepository, mailService: MailService): UserService =
new UserService(userRepository, mailService)
}
| mateuszjancy/intro-to-akka-stream | service/src/main/scala/app/flashcard/service/UserService.scala | Scala | apache-2.0 | 1,961 |
/*
* Copyright 2016-2020 Daniel Urban and contributors listed in AUTHORS
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.example.invariant
import spire.math.Polynomial
import spire.std.float._
import spire.syntax.eq._
import io.circe._
import io.circe.syntax._
import dev.tauri.seals.Reified
import dev.tauri.seals.circe.Codecs._
object Main extends App with PolySupport {
/** xยฒ + 2.0x + 4.0 */
val original: Polynomial[Float] =
Polynomial(Map(2 -> 1.0f, 1 -> 2.0f, 0 -> 4.0f))
val serialized: Json =
original.asJson
val deserialized: Polynomial[Float] =
serialized.as[Polynomial[Float]].getOrElse(throw new Exception)
implicitly[spire.algebra.Eq[Polynomial[Float]]]
assert(original === deserialized)
println(s"Original: ${original}")
println(s"Deserialized: ${deserialized}")
}
trait PolySupport {
implicit val polyReified: Reified[Polynomial[Float]] = {
Reified[List[(Int, Float)]].imap[Polynomial[Float]] { lst =>
Polynomial[Float](lst.toMap)
} { poly =>
poly.terms.map(_.toTuple)
}
}
}
| durban/seals | examples/invariant/src/main/scala/com/example/invariant/main.scala | Scala | apache-2.0 | 1,591 |
package org.bitcoins.testkitcore.gen
import org.scalacheck.Gen
import org.bitcoins.core.hd._
import scala.util.Try
/** Generators related to HD wallet functionality
*/
object HDGenerators {
/** Generates a BIP 32 path segment
*/
def bip32Child: Gen[BIP32Node] = Gen.oneOf(softBip32Child, hardBip32Child)
/** Generates a non-hardened BIP 32 path segment
*/
def softBip32Child: Gen[BIP32Node] =
for {
index <- NumberGenerator.positiveInts
} yield BIP32Node(index, hardened = false)
/** Generates a hardened BIP 32 path segment
*/
def hardBip32Child: Gen[BIP32Node] =
for {
soft <- softBip32Child
} yield soft.copy(hardened = true)
/** Generates a BIP32 path
*/
def bip32Path: Gen[BIP32Path] =
for {
children <- Gen.listOf(bip32Child)
} yield BIP32Path(children.toVector)
/** Generates a non-hardened BIP 32 path
*/
def softBip32Path: Gen[BIP32Path] =
for {
children <- Gen.listOf(softBip32Child)
} yield BIP32Path(children.toVector)
/** Generates a valid BIP44 chain type (external/internal change)
*/
def hdChainType: Gen[HDChainType] =
Gen.oneOf(HDChainType.Change, HDChainType.External)
/** Generates a valid BIP44 chain path
*/
def hdChain: Gen[HDChain] =
for {
chainType <- hdChainType
account <- hdAccount
} yield HDChain(chainType, account)
/** Generates a valid HD coin type
*/
def hdCoinType: Gen[HDCoinType] =
Gen.oneOf(HDCoinType.Testnet, HDCoinType.Bitcoin)
/** Generates a valid HD purpose path */
def hdPurpose: Gen[HDPurpose] =
Gen.oneOf(HDPurposes.Legacy, HDPurposes.NestedSegWit, HDPurposes.SegWit)
def hdCoin: Gen[HDCoin] =
for {
purpose <- hdPurpose
coinType <- hdCoinType
} yield HDCoin(purpose, coinType)
/** Generates a valid HD account path
*/
def hdAccount: Gen[HDAccount] =
for {
coin <- hdCoin
int <- NumberGenerator.positiveInts
} yield HDAccount(coin = coin, index = int)
/** Generates a valid HD adddress path
*/
def hdAddress: Gen[HDAddress] =
for {
chain <- hdChain
int <- NumberGenerator.positiveInts
} yield HDAddress(chain, int)
/** Generates a valid BIP44 path
*/
def legacyHdPath: Gen[LegacyHDPath] =
for {
coinType <- hdCoinType
purpose = HDPurposes.Legacy
accountIndex <- NumberGenerator.positiveInts
addressIndex <- NumberGenerator.positiveInts
chainType <- hdChainType
} yield LegacyHDPath(coinType = coinType,
addressIndex = addressIndex,
accountIndex = accountIndex,
chainType = chainType)
def segwithHdPath: Gen[SegWitHDPath] =
for {
coinType <- hdCoinType
accountIndex <- NumberGenerator.positiveInts
addressIndex <- NumberGenerator.positiveInts
chainType <- hdChainType
} yield SegWitHDPath(coinType = coinType,
addressIndex = addressIndex,
accountIndex = accountIndex,
chainType = chainType)
def nestedSegwithHdPath: Gen[NestedSegWitHDPath] =
for {
coinType <- hdCoinType
accountIndex <- NumberGenerator.positiveInts
addressIndex <- NumberGenerator.positiveInts
chainType <- hdChainType
} yield NestedSegWitHDPath(coinType = coinType,
addressIndex = addressIndex,
accountIndex = accountIndex,
chainType = chainType)
def hdPath: Gen[HDPath] =
Gen.oneOf(legacyHdPath, segwithHdPath, nestedSegwithHdPath)
type HDPathConstructor = Vector[BIP32Node] => Try[HDPath]
def hdPathWithConstructor: Gen[(HDPath, HDPathConstructor)] =
for {
path <- hdPath
} yield path match {
case legacy: LegacyHDPath => (legacy, LegacyHDPath(_))
case nested: NestedSegWitHDPath => (nested, NestedSegWitHDPath(_))
case segwit: SegWitHDPath => (segwit, SegWitHDPath(_))
}
/** Generates a pair of paths that can be diffed.
*
* In code, this means that this is always true:
* {{{
* diffableHDPaths.map {
* case (short. long) => short.diff(long).isDefined
* }
* }}}
*/
def diffableHDPaths: Gen[(BIP32Path, BIP32Path)] = {
for {
path <- bip32Path.suchThat(_.path.length > 1)
n <- Gen.chooseNum(0, path.path.length - 1)
} yield (BIP32Path(path.path.dropRight(n)), path)
}
}
| bitcoin-s/bitcoin-s | testkit-core/src/main/scala/org/bitcoins/testkitcore/gen/HDGenerators.scala | Scala | mit | 4,518 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.graphframes
import java.io.File
import com.google.common.io.Files
import org.apache.commons.io.FileUtils
import org.apache.hadoop.fs.Path
import org.apache.spark.graphx.{Edge, Graph}
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types.{IntegerType, StringType}
import org.apache.spark.sql.{DataFrame, Row}
import org.graphframes.examples.Graphs
class GraphFrameSuite extends SparkFunSuite with GraphFrameTestSparkContext {
import GraphFrame._
var vertices: DataFrame = _
val localVertices = Map(1L -> "A", 2L -> "B", 3L -> "C")
val localEdges = Map((1L, 2L) -> "love", (2L, 1L) -> "hate", (2L, 3L) -> "follow")
var edges: DataFrame = _
var tempDir: File = _
override def beforeAll(): Unit = {
super.beforeAll()
tempDir = Files.createTempDir()
vertices = sqlContext.createDataFrame(localVertices.toSeq).toDF("id", "name")
edges = sqlContext.createDataFrame(localEdges.toSeq.map {
case ((src, dst), action) =>
(src, dst, action)
}).toDF("src", "dst", "action")
}
override def afterAll(): Unit = {
FileUtils.deleteQuietly(tempDir)
super.afterAll()
}
test("construction from DataFrames") {
val g = GraphFrame(vertices, edges)
g.vertices.collect().foreach { case Row(id: Long, name: String) =>
assert(localVertices(id) === name)
}
g.edges.collect().foreach { case Row(src: Long, dst: Long, action: String) =>
assert(localEdges((src, dst)) === action)
}
intercept[IllegalArgumentException] {
val badVertices = vertices.select(col("id").as("uid"), col("name"))
GraphFrame(badVertices, edges)
}
intercept[IllegalArgumentException] {
val badEdges = edges.select(col("src").as("srcId"), col("dst"), col("action"))
GraphFrame(vertices, badEdges)
}
intercept[IllegalArgumentException] {
val badEdges = edges.select(col("src"), col("dst").as("dstId"), col("action"))
GraphFrame(vertices, badEdges)
}
}
test("construction from edge DataFrame") {
val g = GraphFrame.fromEdges(edges)
assert(g.vertices.columns === Array("id"))
val idsFromVertices = g.vertices.select("id").rdd.map(_.getLong(0)).collect()
val idsFromVerticesSet = idsFromVertices.toSet
assert(idsFromVertices.length === idsFromVerticesSet.size)
val idsFromEdgesSet = g.edges.select("src", "dst").rdd.flatMap { case Row(src: Long, dst: Long) =>
Seq(src, dst)
}.collect().toSet
assert(idsFromVerticesSet === idsFromEdgesSet)
}
test("construction from GraphX") {
val vv: RDD[(Long, String)] = vertices.rdd.map { case Row(id: Long, name: String) =>
(id, name)
}
val ee: RDD[Edge[String]] = edges.rdd.map { case Row(src: Long, dst: Long, action: String) =>
Edge(src, dst, action)
}
val g = Graph(vv, ee)
val gf = GraphFrame.fromGraphX(g)
gf.vertices.select("id", "attr").collect().foreach { case Row(id: Long, name: String) =>
assert(localVertices(id) === name)
}
gf.edges.select("src", "dst", "attr").collect().foreach {
case Row(src: Long, dst: Long, action: String) =>
assert(localEdges((src, dst)) === action)
}
}
test("convert to GraphX: Long IDs") {
val gf = GraphFrame(vertices, edges)
val g = gf.toGraphX
g.vertices.collect().foreach { case (id0, Row(id1: Long, name: String)) =>
assert(id0 === id1)
assert(localVertices(id0) === name)
}
g.edges.collect().foreach {
case Edge(src0, dst0, Row(src1: Long, dst1: Long, action: String)) =>
assert(src0 === src1)
assert(dst0 === dst1)
assert(localEdges((src0, dst0)) === action)
}
}
test("convert to GraphX: Int IDs") {
val vv = vertices.select(col("id").cast(IntegerType).as("id"), col("name"))
val ee = edges.select(col("src").cast(IntegerType).as("src"),
col("dst").cast(IntegerType).as("dst"), col("action"))
val gf = GraphFrame(vv, ee)
val g = gf.toGraphX
// Int IDs should be directly cast to Long, so ID values should match.
val vCols = gf.vertexColumnMap
val eCols = gf.edgeColumnMap
g.vertices.collect().foreach { case (id0: Long, attr: Row) =>
val id1 = attr.getInt(vCols("id"))
val name = attr.getString(vCols("name"))
assert(id0 === id1)
assert(localVertices(id0) === name)
}
g.edges.collect().foreach {
case Edge(src0: Long, dst0: Long, attr: Row) =>
val src1 = attr.getInt(eCols("src"))
val dst1 = attr.getInt(eCols("dst"))
val action = attr.getString(eCols("action"))
assert(src0 === src1)
assert(dst0 === dst1)
assert(localEdges((src0, dst0)) === action)
}
}
test("convert to GraphX: String IDs") {
try {
val vv = vertices.select(col("id").cast(StringType).as("id"), col("name"))
val ee = edges.select(col("src").cast(StringType).as("src"),
col("dst").cast(StringType).as("dst"), col("action"))
val gf = GraphFrame(vv, ee)
val g = gf.toGraphX
// String IDs will be re-indexed, so ID values may not match.
val vCols = gf.vertexColumnMap
val eCols = gf.edgeColumnMap
// First, get index.
val new2oldID: Map[Long, String] = g.vertices.map { case (id: Long, attr: Row) =>
(id, attr.getString(vCols("id")))
}.collect().toMap
// Same as in test with Int IDs, but with re-indexing
g.vertices.collect().foreach { case (id0: Long, attr: Row) =>
val id1 = attr.getString(vCols("id"))
val name = attr.getString(vCols("name"))
assert(new2oldID(id0) === id1)
assert(localVertices(new2oldID(id0).toInt) === name)
}
g.edges.collect().foreach {
case Edge(src0: Long, dst0: Long, attr: Row) =>
val src1 = attr.getString(eCols("src"))
val dst1 = attr.getString(eCols("dst"))
val action = attr.getString(eCols("action"))
assert(new2oldID(src0) === src1)
assert(new2oldID(dst0) === dst1)
assert(localEdges((new2oldID(src0).toInt, new2oldID(dst0).toInt)) === action)
}
} catch {
case e: Exception =>
e.printStackTrace()
throw e
}
}
test("save/load") {
val g0 = GraphFrame(vertices, edges)
val vPath = new Path(tempDir.getPath, "vertices").toString
val ePath = new Path(tempDir.getPath, "edges").toString
g0.vertices.write.parquet(vPath)
g0.edges.write.parquet(ePath)
val v1 = sqlContext.read.parquet(vPath)
val e1 = sqlContext.read.parquet(ePath)
val g1 = GraphFrame(v1, e1)
g1.vertices.collect().foreach { case Row(id: Long, name: String) =>
assert(localVertices(id) === name)
}
g1.edges.collect().foreach { case Row(src: Long, dst: Long, action: String) =>
assert(localEdges((src, dst)) === action)
}
}
test("degree metrics") {
val g = GraphFrame(vertices, edges)
assert(g.outDegrees.columns === Seq("id", "outDegree"))
val outDegrees = g.outDegrees.collect().map { case Row(id: Long, outDeg: Int) =>
(id, outDeg)
}.toMap
assert(outDegrees === Map(1L -> 1, 2L -> 2))
assert(g.inDegrees.columns === Seq("id", "inDegree"))
val inDegrees = g.inDegrees.collect().map { case Row(id: Long, inDeg: Int) =>
(id, inDeg)
}.toMap
assert(inDegrees === Map(1L -> 1, 2L -> 1, 3L -> 1))
assert(g.degrees.columns === Seq("id", "degree"))
val degrees = g.degrees.collect().map { case Row(id: Long, deg: Int) =>
(id, deg)
}.toMap
assert(degrees === Map(1L -> 2, 2L -> 3, 3L -> 1))
}
test("cache") {
val g = GraphFrame(vertices, edges)
g.persist(StorageLevel.MEMORY_ONLY)
g.unpersist()
// org.apache.spark.sql.execution.columnar.InMemoryRelation is private and not accessible
// This has prevented us from validating DataFrame's are cached.
}
test("basic operations on an empty graph") {
for (empty <- Seq(Graphs.empty[Int], Graphs.empty[Long], Graphs.empty[String])) {
assert(empty.inDegrees.count() === 0L)
assert(empty.outDegrees.count() === 0L)
assert(empty.degrees.count() === 0L)
assert(empty.triplets.count() === 0L)
}
}
test("skewed long ID assignments") {
val sqlContext = this.sqlContext
import sqlContext.implicits._
val n = 5
// union a star graph and a chain graph and cast integral IDs to strings
val star = Graphs.star(n)
val chain = Graphs.chain(n + 1)
val vertices = star.vertices.select(col(ID).cast("string").as(ID))
val edges =
star.edges.select(col(SRC).cast("string").as(SRC), col(DST).cast("string").as(DST))
.unionAll(
chain.edges.select(col(SRC).cast("string").as(SRC), col(DST).cast("string").as(DST)))
val localVertices = vertices.select(ID).as[String].collect().toSet
val localEdges = edges.select(SRC, DST).as[(String, String)].collect().toSet
val defaultThreshold = GraphFrame.broadcastThreshold
assert(defaultThreshold === 1000000,
s"Default broadcast threshold should be 1000000 but got $defaultThreshold.")
for (threshold <- Seq(0, 4, 10)) {
GraphFrame.setBroadcastThreshold(threshold)
val g = GraphFrame(vertices, edges)
g.persist(StorageLevel.MEMORY_AND_DISK)
val indexedVertices = g.indexedVertices.select(ID, LONG_ID).as[(String, Long)].collect().toMap
assert(indexedVertices.keySet === localVertices)
assert(indexedVertices.values.toSeq.distinct.size === localVertices.size)
val origEdges = g.indexedEdges.select(SRC, DST).as[(String, String)].collect().toSet
assert(origEdges === localEdges)
g.indexedEdges
.select(SRC, LONG_SRC, DST, LONG_DST).as[(String, Long, String, Long)]
.collect()
.foreach {
case (src, longSrc, dst, longDst) =>
assert(indexedVertices(src) === longSrc)
assert(indexedVertices(dst) === longDst)
}
}
GraphFrame.setBroadcastThreshold(defaultThreshold)
}
}
| graphframes/graphframes | src/test/scala/org/graphframes/GraphFrameSuite.scala | Scala | apache-2.0 | 10,874 |
package org.scalafmt.config
import metaconfig._
case class SortSettings(
order: List[SortSettings.ModKey]
)
object SortSettings {
implicit val SortSettingsModKeyReader: ConfCodec[ModKey] =
ReaderUtil.oneOfIgnoreBackticks[ModKey](
`implicit`,
`final`,
`sealed`,
`abstract`,
`override`,
`private`,
`protected`,
`lazy`
)
val defaultOrder: List[ModKey] = List(
`implicit`,
//
`final`,
`sealed`,
`abstract`,
//
`override`,
//
`private`,
`protected`,
//
`lazy`
)
implicit val surface: generic.Surface[SortSettings] = generic.deriveSurface
implicit lazy val encoder: ConfEncoder[SortSettings] =
generic.deriveEncoder
implicit val reader: ConfDecoder[SortSettings] =
generic.deriveDecoder(SortSettings(defaultOrder)).flatMap { result =>
if (result.order.distinct.length != 8) {
val diff = defaultOrder.diff(result.order.distinct)
ConfError
.message(
s"Incomplete 'sortModifiers.order', missing values: ${diff.mkString(", ")}. " +
s"If specified, it has to contain all of the following values in the order you wish them sorted:" +
"""["private", "protected" , "abstract", "final", "sealed", "implicit", "override", "lazy"]"""
)
.notOk
} else {
Configured.ok(result)
}
}
def default: SortSettings =
SortSettings(defaultOrder)
sealed trait ModKey extends Product
case object `private` extends ModKey
case object `protected` extends ModKey
case object `final` extends ModKey
case object `sealed` extends ModKey
case object `abstract` extends ModKey
case object `implicit` extends ModKey
case object `override` extends ModKey
case object `lazy` extends ModKey
}
| olafurpg/scalafmt | scalafmt-core/shared/src/main/scala/org/scalafmt/config/SortSettings.scala | Scala | apache-2.0 | 1,826 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
import org.scalatest.events._
import java.io.File
import org.scalatest.exceptions.StackDepthException
import scala.annotation.tailrec
import scala.collection.GenMap
import scala.collection.GenTraversable
import scala.collection.SortedMap
import scala.collection.SortedSet
import FailureMessages.decorateToStringValue
import java.util.concurrent.Executors
import org.scalactic.Prettifier
import org.scalactic.ArrayHelper.deep
object SharedHelpers extends Assertions with LineNumberHelper {
object SilentReporter extends Reporter {
def apply(event: Event): Unit = ()
}
object NoisyReporter extends Reporter {
def apply(event: Event): Unit = { println(event) }
}
class TestDurationReporter extends Reporter {
var testSucceededWasFiredAndHadADuration = false
var testFailedWasFiredAndHadADuration = false
override def apply(event: Event): Unit = {
event match {
case event: TestSucceeded => testSucceededWasFiredAndHadADuration = event.duration.isDefined
case event: TestFailed => testFailedWasFiredAndHadADuration = event.duration.isDefined
case _ =>
}
}
}
class SuiteDurationReporter extends Reporter {
var suiteCompletedWasFiredAndHadADuration = false
var suiteAbortedWasFiredAndHadADuration = false
override def apply(event: Event): Unit = {
event match {
case event: SuiteCompleted => suiteCompletedWasFiredAndHadADuration = event.duration.isDefined
case event: SuiteAborted => suiteAbortedWasFiredAndHadADuration = event.duration.isDefined
case _ =>
}
}
}
class PendingReporter extends Reporter {
var testPendingWasFired = false
override def apply(event: Event): Unit = {
event match {
case _: TestPending => testPendingWasFired = true
case _ =>
}
}
}
// This now needs to be thread safe, because I'm setting it in one thread
// and asserting using it from a different thread in Async tests.
class EventRecordingReporter extends Reporter {
private var eventList: List[Event] = List()
def eventsReceived = synchronized { eventList.reverse }
def testSucceededEventsReceived: List[TestSucceeded] = {
synchronized {
eventsReceived filter {
case event: TestSucceeded => true
case _ => false
} map {
case event: TestSucceeded => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def testStartingEventsReceived: List[TestStarting] = {
synchronized {
eventsReceived filter {
case event: TestStarting => true
case _ => false
} map {
case event: TestStarting => event
case _ => throw new RuntimeException("should never happen")
}
}
}
// Why doesn't this work:
// for (event: TestSucceeded <- eventsReceived) yield event
def infoProvidedEventsReceived: List[InfoProvided] = {
synchronized {
eventsReceived filter {
case event: InfoProvided => true
case _ => false
} map {
case event: InfoProvided => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def noteProvidedEventsReceived: List[NoteProvided] = {
synchronized {
eventsReceived filter {
case event: NoteProvided => true
case _ => false
} map {
case event: NoteProvided => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def alertProvidedEventsReceived: List[AlertProvided] = {
synchronized {
eventsReceived filter {
case event: AlertProvided => true
case _ => false
} map {
case event: AlertProvided => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def markupProvidedEventsReceived: List[MarkupProvided] = {
synchronized {
eventsReceived filter {
case event: MarkupProvided => true
case _ => false
} map {
case event: MarkupProvided => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def scopeOpenedEventsReceived: List[ScopeOpened] = {
synchronized {
eventsReceived filter {
case event: ScopeOpened => true
case _ => false
} map {
case event: ScopeOpened => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def scopeClosedEventsReceived: List[ScopeClosed] = {
synchronized {
eventsReceived filter {
case event: ScopeClosed => true
case _ => false
} map {
case event: ScopeClosed => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def scopePendingEventsReceived: List[ScopePending] = {
synchronized {
eventsReceived filter {
case event: ScopePending => true
case _ => false
} map {
case event: ScopePending => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def testPendingEventsReceived: List[TestPending] = {
synchronized {
eventsReceived filter {
case event: TestPending => true
case _ => false
} map {
case event: TestPending => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def testCanceledEventsReceived: List[TestCanceled] = {
synchronized {
eventsReceived filter {
case event: TestCanceled => true
case _ => false
} map {
case event: TestCanceled => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def testFailedEventsReceived: List[TestFailed] = {
synchronized {
eventsReceived filter {
case event: TestFailed => true
case _ => false
} map {
case event: TestFailed => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def testIgnoredEventsReceived: List[TestIgnored] = {
synchronized {
eventsReceived filter {
case event: TestIgnored => true
case _ => false
} map {
case event: TestIgnored => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def suiteStartingEventsReceived: List[SuiteStarting] = {
synchronized {
eventsReceived filter {
case event: SuiteStarting => true
case _ => false
} map {
case event: SuiteStarting => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def suiteCompletedEventsReceived: List[SuiteCompleted] = {
synchronized {
eventsReceived filter {
case event: SuiteCompleted => true
case _ => false
} map {
case event: SuiteCompleted => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def suiteAbortedEventsReceived: List[SuiteAborted] = {
synchronized {
eventsReceived filter {
case event: SuiteAborted => true
case _ => false
} map {
case event: SuiteAborted => event
case _ => throw new RuntimeException("should never happen")
}
}
}
def apply(event: Event): Unit = {
synchronized {
eventList ::= event
}
}
}
def getIndexesForTestInformerEventOrderTests(suite: Suite, testName: String, infoMsg: String): (Int, Int) = {
val myRep = new EventRecordingReporter
suite.run(None, Args(myRep))
val indexedList = myRep.eventsReceived.zipWithIndex
val testStartingOption = indexedList.find(_._1.isInstanceOf[TestStarting])
val testSucceededOption = indexedList.find(_._1.isInstanceOf[TestSucceeded])
assert(testStartingOption.isDefined, "TestStarting for Suite='" + suite.suiteId + "', testName='" + testName + "' not defined.")
assert(testSucceededOption.isDefined, "TestSucceeded for Suite='" + suite.suiteId + "', testName='" + testName + "' not defined.")
val testStartingIndex = testStartingOption.get._2
val testSucceededIndex = testSucceededOption.get._2
val testStarting = testStartingOption.get._1.asInstanceOf[TestStarting]
val testSucceeded = testSucceededOption.get._1.asInstanceOf[TestSucceeded]
val recordedEvents = testSucceeded.recordedEvents
val infoProvidedOption = recordedEvents.find {
case event: InfoProvided => event.message == infoMsg
case _ => false
}
assert(infoProvidedOption.isDefined, "InfoProvided for Suite='" + suite.suiteId + "', testName='" + testName + "' not defined.")
(testStartingIndex, testSucceededIndex)
}
def getIndexesForInformerEventOrderTests(suite: Suite, testName: String, infoMsg: String): (Int, Int, Int) = {
val myRep = new EventRecordingReporter
suite.run(None, Args(myRep))
val indexedList = myRep.eventsReceived.zipWithIndex
val testStartingOption = indexedList.find(_._1.isInstanceOf[TestStarting])
val infoProvidedOption = indexedList.find {
case (event: InfoProvided, index) => event.message == infoMsg
case _ => false
}
val testSucceededOption = indexedList.find(_._1.isInstanceOf[TestSucceeded])
assert(testStartingOption.isDefined, "TestStarting for Suite='" + suite.suiteId + "', testName='" + testName + "' not defined.")
assert(infoProvidedOption.isDefined, "InfoProvided for Suite='" + suite.suiteId + "', testName='" + testName + "' not defined.")
assert(testSucceededOption.isDefined, "TestSucceeded for Suite='" + suite.suiteId + "', testName='" + testName + "' not defined.")
val testStartingIndex = testStartingOption.get._2
val infoProvidedIndex = infoProvidedOption.get._2
val testSucceededIndex = testSucceededOption.get._2
val testStarting = testStartingOption.get._1.asInstanceOf[TestStarting]
val infoProvided = infoProvidedOption.get._1.asInstanceOf[InfoProvided]
val testSucceeded = testSucceededOption.get._1.asInstanceOf[TestSucceeded]
assert(testStarting.testName === testName, "TestStarting.testName expected to be '" + testName + "', but got '" + testStarting.testName + "'.")
assert(infoProvided.message === infoMsg, "InfoProvide.message expected to be '" + infoMsg + "', but got '" + infoProvided.message + "'.")
assert(testSucceeded.testName === testName, "TestSucceeded.testName expected to be '" + testName + "', but got '" + testSucceeded.testName + "'.")
(infoProvidedIndex, testStartingIndex, testSucceededIndex)
}
def getIndentedTextFromInfoProvided(suite: Suite): IndentedText = {
val myRep = new EventRecordingReporter
suite.run(None, Args(myRep))
val infoProvidedOption = myRep.eventsReceived.find(_.isInstanceOf[InfoProvided])
infoProvidedOption match {
case Some(infoProvided: InfoProvided) =>
infoProvided.formatter match {
case Some(indentedText: IndentedText) => indentedText
case _ => fail("An InfoProvided was received that didn't include an IndentedText formatter: " + infoProvided.formatter)
}
case _ => fail("No InfoProvided was received by the Reporter during the run.")
}
}
def getIndentedTextFromTestInfoProvided(suite: Suite): IndentedText = {
val myRep = new EventRecordingReporter
suite.run(None, Args(myRep))
val recordedEvents: Seq[Event] = myRep.eventsReceived.find { e =>
e match {
case testSucceeded: TestSucceeded =>
true
case testFailed: TestFailed =>
true
case testPending: TestPending =>
true
case testCanceled: TestCanceled =>
true
case _ =>
false
}
} match {
case Some(testCompleted) =>
testCompleted match {
case testSucceeded: TestSucceeded =>
testSucceeded.recordedEvents
case testFailed: TestFailed =>
testFailed.recordedEvents
case testPending: TestPending =>
testPending.recordedEvents
case testCanceled: TestCanceled =>
testCanceled.recordedEvents
case _ => throw new RuntimeException("should never get here")
}
case None =>
fail("Test completed event is expected but not found.")
}
assert(recordedEvents.size === 1)
recordedEvents(0) match {
case ip: InfoProvided =>
ip.formatter match {
case Some(indentedText: IndentedText) => indentedText
case _ => fail("An InfoProvided was received that didn't include an IndentedText formatter: " + ip.formatter)
}
case _ => fail("No InfoProvided was received by the Reporter during the run.")
}
}
def ensureTestFailedEventReceived(suite: Suite, testName: String): Unit = {
val reporter = new EventRecordingReporter
suite.run(None, Args(reporter))
val testFailedEvent = reporter.eventsReceived.find(_.isInstanceOf[TestFailed])
assert(testFailedEvent.isDefined)
assert(testFailedEvent.get.asInstanceOf[TestFailed].testName === testName)
}
def ensureTestFailedEventReceivedWithCorrectMessage(suite: Suite, testName: String, expectedMessage: String): Unit = {
val reporter = new EventRecordingReporter
suite.run(None, Args(reporter))
val testFailedEvent = reporter.eventsReceived.find(_.isInstanceOf[TestFailed])
assert(testFailedEvent.isDefined)
assert(testFailedEvent.get.asInstanceOf[TestFailed].testName == testName)
assert(testFailedEvent.get.asInstanceOf[TestFailed].message == expectedMessage)
}
class TestIgnoredTrackingReporter extends Reporter {
var testIgnoredReceived = false
var lastEvent: Option[TestIgnored] = None
def apply(event: Event): Unit = {
event match {
case event: TestIgnored =>
testIgnoredReceived = true
lastEvent = Some(event)
case _ =>
}
}
}
def getIndex[T](xs: GenTraversable[T], value: T): Int = {
@tailrec
def getIndexAcc[T](itr: Iterator[T], count: Int): Int = {
if (itr.hasNext) {
val next = itr.next
if (next == value)
count
else
getIndexAcc(itr, count + 1)
}
else
-1
}
getIndexAcc(xs.toIterator, 0)
}
def getKeyIndex[K, V](xs: GenMap[K, V], value: K): Int = {
@tailrec
def getIndexAcc[K, V](itr: Iterator[(K, V)], count: Int): Int = {
if (itr.hasNext) {
val next = itr.next
if (next._1 == value)
count
else
getIndexAcc(itr, count + 1)
}
else
-1
}
getIndexAcc(xs.toIterator, 0)
}
def getIndex(xs: java.util.Collection[_], value: Any): Int = {
@tailrec
def getIndexAcc(itr: java.util.Iterator[_], count: Int): Int = {
if (itr.hasNext) {
val next = itr.next
if (next == value)
count
else
getIndexAcc(itr, count + 1)
}
else
-1
}
getIndexAcc(xs.iterator, 0)
}
def getIndex[K, V](xs: java.util.Map[K, V], value: java.util.Map.Entry[K, V]): Int = {
@tailrec
def getIndexAcc(itr: java.util.Iterator[java.util.Map.Entry[K, V]], count: Int): Int = {
if (itr.hasNext) {
val next = itr.next
if (next == value)
count
else
getIndexAcc(itr, count + 1)
}
else
-1
}
getIndexAcc(xs.entrySet.iterator, 0)
}
def getKeyIndex[K, V](xs: java.util.Map[K, V], value: K): Int = {
@tailrec
def getIndexAcc[K, V](itr: java.util.Iterator[java.util.Map.Entry[K, V]], count: Int): Int = {
if (itr.hasNext) {
val next = itr.next
if (next.getKey == value)
count
else
getIndexAcc(itr, count + 1)
}
else
-1
}
getIndexAcc(xs.entrySet.iterator, 0)
}
def getIndexes[T](xs: GenTraversable[T], values: GenTraversable[T]): GenTraversable[Int] = {
@tailrec
def getIndexesAcc[T](itr: Iterator[T], indexes: IndexedSeq[Int], count: Int): IndexedSeq[Int] = {
if (itr.hasNext) {
val next = itr.next
if (values.exists(_ == next))
getIndexesAcc(itr, indexes :+ count, count + 1)
else
getIndexesAcc(itr, indexes, count + 1)
}
else
indexes
}
val itr = xs.toIterator
getIndexesAcc(itr, IndexedSeq.empty, 0)
}
def getIndexesInJavaCol[T](xs: java.util.Collection[T], values: java.util.Collection[T]): GenTraversable[Int] = {
import collection.JavaConverters._
val javaValues = values.asScala
@tailrec
def getIndexesAcc[T](itr: java.util.Iterator[T], indexes: IndexedSeq[Int], count: Int): IndexedSeq[Int] = {
if (itr.hasNext) {
val next = itr.next
if (javaValues.exists(_ == next))
getIndexesAcc(itr, indexes :+ count, count + 1)
else
getIndexesAcc(itr, indexes, count + 1)
}
else
indexes
}
val itr = xs.iterator
getIndexesAcc(itr, IndexedSeq.empty, 0)
}
@tailrec
final def getNext[T](itr: Iterator[T], predicate: T => Boolean): T = {
val next = itr.next
if (predicate(next))
next
else
getNext(itr, predicate)
}
final def getNextInString(itr: Iterator[Char], predicate: Char => Boolean) =
getNext[Char](itr, predicate)
@tailrec
final def getNextInJavaIterator[T](itr: java.util.Iterator[T], predicate: T => Boolean): T = {
val next = itr.next
if (predicate(next))
next
else
getNextInJavaIterator(itr, predicate)
}
//final def getNextInJavaMap[K, V](map: java.util.Map[K, V], predicate: java.util.Map.Entry[K, V] => Boolean): java.util.Map.Entry[K, V] =
//getNextInJavaIterator(map.entrySet.iterator, predicate)
final def getNextInJavaMap[K, V](itr: java.util.Iterator[java.util.Map.Entry[K, V]], predicate: java.util.Map.Entry[K, V] => Boolean): java.util.Map.Entry[K, V] =
getNextInJavaIterator(itr, predicate)
def getFirst[T](col: GenTraversable[T], predicate: T => Boolean): T =
getNext(col.toIterator, predicate)
def getFirstInJavaCol[T](col: java.util.Collection[T], predicate: T => Boolean): T =
getNextInJavaIterator(col.iterator, predicate)
def getFirstInJavaMap[K, V](map: java.util.Map[K, V], predicate: java.util.Map.Entry[K, V] => Boolean): java.util.Map.Entry[K, V] =
getNextInJavaIterator(map.entrySet.iterator, predicate)
def getFirstInString(str: String, predicate: Char => Boolean): Char =
getNext(str.toCharArray.iterator, predicate)
@tailrec
final def getNextNot[T](itr: Iterator[T], predicate: T => Boolean): T = {
val next = itr.next
if (!predicate(next))
next
else
getNextNot(itr, predicate)
}
@tailrec
final def getNextNotInJavaCol[T](itr: java.util.Iterator[T], predicate: T => Boolean): T = {
val next = itr.next
if (!predicate(next))
next
else
getNextNotInJavaCol(itr, predicate)
}
def getFirstNot[T](col: GenTraversable[T], predicate: T => Boolean): T =
getNextNot(col.toIterator, predicate)
def getFirstEqual[T](col: GenTraversable[T], right: T): T =
getFirst[T](col, _ == right)
def getFirstNotEqual[T](col: GenTraversable[T], right: T): T =
getFirst[T](col, _ != right)
def getFirstEqual[K, V](col: java.util.Map[K, V], right: java.util.Map.Entry[K, V]): java.util.Map.Entry[K, V] =
getFirstInJavaMap[K, V](col, (e: java.util.Map.Entry[K, V]) => e.getKey == right.getKey && e.getValue == right.getValue)
def getFirstNotEqual[K, V](col: java.util.Map[K, V], right: java.util.Map.Entry[K, V]): java.util.Map.Entry[K, V] =
getFirstInJavaMap[K, V](col, (e: java.util.Map.Entry[K, V]) => e.getKey != right.getKey || e.getValue != right.getValue)
def getFirstMoreThanEqual(col: GenTraversable[Int], right: Int): Int =
getFirst[Int](col, _ >= right)
def getFirstLessThanEqual(col: GenTraversable[Int], right: Int): Int =
getFirst[Int](col, _ <= right)
def getFirstMoreThan(col: GenTraversable[Int], right: Int): Int =
getFirst[Int](col, _ > right)
def getFirstLessThan(col: GenTraversable[Int], right: Int): Int =
getFirst[Int](col, _ < right)
def getFirstIsEmpty(col: GenTraversable[String], right: String): String = // right is not used, but to be consistent to other so that easier for code generation
getFirst[String](col, _.isEmpty)
def getFirstIsNotEmpty(col: GenTraversable[String], right: String): String = // right is not used, but to be consistent to other so that easier for code generation
getFirst[String](col, !_.isEmpty)
def getFirstLengthEqual(col: GenTraversable[String], right: Int): String =
getFirst[String](col, _.length == right)
def getFirstLengthNotEqual(col: GenTraversable[String], right: Int): String =
getFirst[String](col, _.length != right)
def getFirstLengthNotEqualLength(col: GenTraversable[String], right: Int): String =
getFirst[String](col, _.length != right)
def getFirstSizeEqual(col: GenTraversable[String], right: Int): String =
getFirst[String](col, _.size == right)
def getFirstSizeNotEqual(col: GenTraversable[String], right: Int): String =
getFirst[String](col, _.size != right)
def getFirstRefEqual[T <: AnyRef](col: GenTraversable[T], right: T): T =
getFirst[T](col, _ eq right)
def getFirstNotRefEqual[T <: AnyRef](col: GenTraversable[T], right: T): T =
getFirst[T](col, _ ne right)
def getFirstStartsWith(col: GenTraversable[String], right: String): String =
getFirst[String](col, _.startsWith(right))
def getFirstNotStartsWith(col: GenTraversable[String], right: String): String =
getFirst[String](col, !_.startsWith(right))
def getFirstEndsWith(col: GenTraversable[String], right: String): String =
getFirst[String](col, _.endsWith(right))
def getFirstNotEndsWith(col: GenTraversable[String], right: String): String =
getFirst[String](col, !_.endsWith(right))
def getFirstInclude(col: GenTraversable[String], right: String): String =
getFirst[String](col, _.indexOf(right) >= 0)
def getFirstNotInclude(col: GenTraversable[String], right: String): String =
getFirst[String](col, _.indexOf(right) < 0)
def getFirstMatches(col: GenTraversable[String], right: String): String =
getFirst[String](col, _.matches(right))
def getFirstNotMatches(col: GenTraversable[String], right: String): String =
getFirst[String](col, !_.matches(right))
def getFirstNot[T](col: java.util.Collection[T], predicate: T => Boolean): T =
getNextNotInJavaCol(col.iterator, predicate)
def getFirstEqual[T](col: java.util.Collection[T], right: T): T =
getFirstInJavaCol[T](col, _ == right)
def getFirstNotEqual[T](col: java.util.Collection[T], right: T): T =
getFirstInJavaCol[T](col, _ != right)
def getFirstMoreThanEqual(col: java.util.Collection[Int], right: Int): Int =
getFirstInJavaCol[Int](col, _ >= right)
def getFirstLessThanEqual(col: java.util.Collection[Int], right: Int): Int =
getFirstInJavaCol[Int](col, _ <= right)
def getFirstMoreThan(col: java.util.Collection[Int], right: Int): Int =
getFirstInJavaCol[Int](col, _ > right)
def getFirstLessThan(col: java.util.Collection[Int], right: Int): Int =
getFirstInJavaCol[Int](col, _ < right)
def getFirstIsEmpty(col: java.util.Collection[String], right: String): String = // right is not used, but to be consistent to other so that easier for code generation
getFirstInJavaCol[String](col, _.isEmpty)
def getFirstIsNotEmpty(col: java.util.Collection[String], right: String): String = // right is not used, but to be consistent to other so that easier for code generation
getFirstInJavaCol[String](col, !_.isEmpty)
def getFirstLengthEqual(col: java.util.Collection[String], right: Int): String =
getFirstInJavaCol[String](col, _.length == right)
def getFirstLengthNotEqual(col: java.util.Collection[String], right: Int): String =
getFirstInJavaCol[String](col, _.length != right)
def getFirstLengthNotEqualLength(col: java.util.Collection[String], right: Int): String =
getFirstInJavaCol[String](col, _.length != right)
def getFirstSizeEqual(col: java.util.Collection[String], right: Int): String =
getFirstInJavaCol[String](col, _.size == right)
def getFirstSizeNotEqual(col: java.util.Collection[String], right: Int): String =
getFirstInJavaCol[String](col, _.size != right)
def getFirstRefEqual[T <: AnyRef](col: java.util.Collection[T], right: T): T =
getFirstInJavaCol[T](col, _ eq right)
def getFirstNotRefEqual[T <: AnyRef](col: java.util.Collection[T], right: T): T =
getFirstInJavaCol[T](col, _ ne right)
def getFirstStartsWith(col: java.util.Collection[String], right: String): String =
getFirstInJavaCol[String](col, _.startsWith(right))
def getFirstNotStartsWith(col: java.util.Collection[String], right: String): String =
getFirstInJavaCol[String](col, !_.startsWith(right))
def getFirstEndsWith(col: java.util.Collection[String], right: String): String =
getFirstInJavaCol[String](col, _.endsWith(right))
def getFirstNotEndsWith(col: java.util.Collection[String], right: String): String =
getFirstInJavaCol[String](col, !_.endsWith(right))
def getFirstInclude(col: java.util.Collection[String], right: String): String =
getFirstInJavaCol[String](col, _.indexOf(right) >= 0)
def getFirstNotInclude(col: java.util.Collection[String], right: String): String =
getFirstInJavaCol[String](col, _.indexOf(right) < 0)
def getFirstMatches(col: java.util.Collection[String], right: String): String =
getFirstInJavaCol[String](col, _.matches(right))
def getFirstNotMatches(col: java.util.Collection[String], right: String): String =
getFirstInJavaCol[String](col, !_.matches(right))
def getFirstSizeEqualGenTraversable[T](col: GenTraversable[GenTraversable[T]], right: Int): GenTraversable[T] =
getFirst[GenTraversable[T]](col, _.size == right)
def getFirstSizeNotEqualGenTraversable[T](col: GenTraversable[GenTraversable[T]], right: Int): GenTraversable[T] =
getFirst[GenTraversable[T]](col, _.size != right)
def getFirstSizeEqualGenTraversableArray[T](col: GenTraversable[Array[T]], right: Int): Array[T] =
getFirst[Array[T]](col, _.size == right)
def getFirstSizeNotEqualGenTraversableArray[T](col: GenTraversable[Array[T]], right: Int): Array[T] =
getFirst[Array[T]](col, _.size != right)
def getFirstIsEmpty[T](col: GenTraversable[GenTraversable[T]], right: T): GenTraversable[T] =
getFirst[GenTraversable[T]](col, _.isEmpty)
def getFirstNotIsEmpty[T](col: GenTraversable[GenTraversable[T]], right: T): GenTraversable[T] =
getFirst[GenTraversable[T]](col, !_.isEmpty)
def getFirstContainGenTraversable[T](col: GenTraversable[GenTraversable[T]], right: T): GenTraversable[T] =
getFirst[GenTraversable[T]](col, _.exists(_ == right))
def getFirstNotContainGenTraversable[T](col: GenTraversable[GenTraversable[T]], right: T): GenTraversable[T] =
getFirst[GenTraversable[T]](col, !_.exists(_ == right))
def getFirstContainGenTraversableArray[T](col: GenTraversable[Array[T]], right: T): Array[T] =
getFirst[Array[T]](col, _.exists(_ == right))
def getFirstNotContainGenTraversableArray[T](col: GenTraversable[Array[T]], right: T): Array[T] =
getFirst[Array[T]](col, !_.exists(_ == right))
def getFirstContainKey[K, V](col: GenTraversable[GenMap[K, V]], right: K): GenMap[K, V] =
getFirst[GenMap[K, V]](col, _.exists(_._1 == right))
def getFirstNotContainKey[K, V](col: GenTraversable[GenMap[K, V]], right: K): GenMap[K, V] =
getFirst[GenMap[K, V]](col, !_.exists(_._1 == right))
def getFirstContainValue[K, V](col: GenTraversable[GenMap[K, V]], right: V): GenMap[K, V] =
getFirst[GenMap[K, V]](col, _.exists(_._2 == right))
def getFirstNotContainValue[K, V](col: GenTraversable[GenMap[K, V]], right: V): GenMap[K, V] =
getFirst[GenMap[K, V]](col, !_.exists(_._2 == right))
import scala.language.higherKinds
def getFirstJavaMapIsEmpty[K, V, JMAP[k, v] <: java.util.Map[_, _]](col: java.util.Collection[JMAP[K, V]], right: Int = 0): java.util.Map[K, V] = // right is not used, but to be consistent to other so that easier for code generation
getFirstInJavaCol[java.util.Map[K, V]](col.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.isEmpty)
def getFirstJavaMapNotIsEmpty[K, V, JMAP[k, v] <: java.util.Map[_, _]](col: java.util.Collection[JMAP[K, V]], right: Int = 0): java.util.Map[K, V] = // right is not used, but to be consistent to other so that easier for code generation
getFirstInJavaCol[java.util.Map[K, V]](col.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], !_.isEmpty)
def getFirstJavaMapContainKey[K, V, JMAP[k, v] <: java.util.Map[_, _]](col: java.util.Collection[JMAP[K, V]], right: K): java.util.Map[K, V] =
getFirstInJavaCol[java.util.Map[K, V]](col.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.containsKey(right))
def getFirstJavaMapNotContainKey[K, V, JMAP[k, v] <: java.util.Map[_, _]](col: java.util.Collection[JMAP[K, V]], right: K): java.util.Map[K, V] =
getFirstInJavaCol[java.util.Map[K, V]](col.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], !_.containsKey(right))
def getFirstJavaMapContainValue[K, V, JMAP[k, v] <: java.util.Map[_, _]](col: java.util.Collection[JMAP[K, V]], right: V): java.util.Map[K, V] =
getFirstInJavaCol[java.util.Map[K, V]](col.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.containsValue(right))
def getFirstJavaMapNotContainValue[K, V, JMAP[k, v] <: java.util.Map[_, _]](col: java.util.Collection[JMAP[K, V]], right: V): java.util.Map[K, V] =
getFirstInJavaCol[java.util.Map[K, V]](col.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], !_.containsValue(right))
def getFirstJavaMapSizeEqual[K, V, JMAP[k, v] <: java.util.Map[_, _]](col: java.util.Collection[JMAP[K, V]], right: Int): java.util.Map[K, V] =
getFirstInJavaCol[java.util.Map[K, V]](col.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.size == right)
def getFirstJavaMapSizeNotEqual[K, V, JMAP[k, v] <: java.util.Map[_, _]](col: java.util.Collection[JMAP[K, V]], right: Int): java.util.Map[K, V] =
getFirstInJavaCol[java.util.Map[K, V]](col.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.size != right)
def getFirstJavaColSizeEqual[T, C[t] <: java.util.Collection[_]](col: java.util.Collection[C[T]], right: Int): java.util.Collection[T] =
getFirstInJavaCol[java.util.Collection[T]](col.asInstanceOf[java.util.Collection[java.util.Collection[T]]], _.size == right) // Safe cast, but ugly, can we do without it?
def getFirstJavaColSizeNotEqual[T, C[t] <: java.util.Collection[_]](col: java.util.Collection[C[T]], right: Int): java.util.Collection[T] =
getFirstInJavaCol[java.util.Collection[T]](col.asInstanceOf[java.util.Collection[java.util.Collection[T]]], _.size != right) // Safe cast, but ugly, can we do without it?
def getFirstJavaColContain[T, C[t] <: java.util.Collection[_]](col: java.util.Collection[C[T]], right: T): java.util.Collection[T] =
getFirstInJavaCol[java.util.Collection[T]](col.asInstanceOf[java.util.Collection[java.util.Collection[T]]], _.contains(right)) // Safe cast, but ugly, can we do without it?
def getFirstJavaColNotContain[T, C[t] <: java.util.Collection[_]](col: java.util.Collection[C[T]], right: T): java.util.Collection[T] =
getFirstInJavaCol[java.util.Collection[T]](col.asInstanceOf[java.util.Collection[java.util.Collection[T]]], !_.contains(right)) // Safe cast, but ugly, can we do without it?
def getFirstJavaColIsEmpty[T, C[t] <: java.util.Collection[_]](col: java.util.Collection[C[T]], right: Int = 0): java.util.Collection[T] = // right is not used, but to be consistent to other so that easier for code generation
getFirstInJavaCol[java.util.Collection[T]](col.asInstanceOf[java.util.Collection[java.util.Collection[T]]], _.isEmpty) // Safe cast, but ugly, can we do without it?
def getFirstJavaColNotIsEmpty[T, C[t] <: java.util.Collection[_]](col: java.util.Collection[C[T]], right: Int = 0): java.util.Collection[T] = // right is not used, but to be consistent to other so that easier for code generation
getFirstInJavaCol[java.util.Collection[T]](col.asInstanceOf[java.util.Collection[java.util.Collection[T]]], !_.isEmpty) // Safe cast, but ugly, can we do without it?
def indexElement[T](itr: Iterator[T], xs: GenTraversable[T], errorFun: T => Boolean): Array[String] = {
val element = getNext[T](itr, errorFun)
val indexOrKey =
xs match {
case map: GenMap[_, _] => element.asInstanceOf[Tuple2[_, _]]._1
case genTrv: GenTraversable[_] => getIndex(xs, element)
}
Array(indexOrKey.toString, decorateToStringValue(Prettifier.default, element))
}
def indexElementForJavaIterator[T](itr: java.util.Iterator[T], xs: java.util.Collection[T], errorFun: T => Boolean): Array[String] = {
val element = getNextInJavaIterator[T](itr, errorFun)
val indexOrKey =
xs match {
case map: java.util.Map[_, _] => element.asInstanceOf[java.util.Map.Entry[_, _]].getKey
case genTrv: java.util.Collection[_] => getIndex(xs, element)
}
Array(indexOrKey.toString, decorateToStringValue(Prettifier.default, element))
}
def indexElementForJavaIterator[K, V](itr: java.util.Iterator[java.util.Map.Entry[K, V]], xs: java.util.Map[K, V], errorFun: java.util.Map.Entry[K, V] => Boolean): Array[String] = {
val element = getNextInJavaIterator[java.util.Map.Entry[K, V]](itr, errorFun)
val indexOrKey = element.asInstanceOf[java.util.Map.Entry[_, _]].getKey
Array(indexOrKey.toString, decorateToStringValue(Prettifier.default, element))
}
def indexLengthElement[T](itr: Iterator[String], xs: GenTraversable[String], errorFun: String => Boolean): Array[String] = {
val element = getNext[String](itr, errorFun)
val indexOrKey =
xs match {
case map: GenMap[_, _] => element.asInstanceOf[Tuple2[_, _]]._1
case genTrv: GenTraversable[_] => getIndex(xs, element)
}
Array(indexOrKey.toString, element.length.toString, (if (element != null && element.isInstanceOf[Array[_]]) deep(element.asInstanceOf[Array[T]]).toString else element.toString))
}
def indexLengthElement[T](itr: java.util.Iterator[String], xs: java.util.Collection[String], errorFun: String => Boolean): Array[String] = {
val element = getNextInJavaIterator[String](itr, errorFun)
val indexOrKey =
xs match {
case map: java.util.Map[_, _] => element.asInstanceOf[java.util.Map.Entry[_, _]].getKey
case genTrv: java.util.Collection[_] => getIndex(xs, element)
}
Array(indexOrKey.toString, element.length.toString, (if (element != null && element.isInstanceOf[Array[_]]) deep(element.asInstanceOf[Array[T]]).toString else element.toString))
}
def indexElementLengthString[T](itr: Iterator[String], xs: GenTraversable[String], errorFun: String => Boolean): Array[String] = {
val element = getNext[String](itr, errorFun)
val indexOrKey =
xs match {
case map: GenMap[_, _] => element.asInstanceOf[Tuple2[_, _]]._1
case genTrv: GenTraversable[_] => getIndex(xs, element)
}
Array(indexOrKey.toString, decorateToStringValue(Prettifier.default, element), element.length.toString)
}
def indexElementLengthString[T](itr: java.util.Iterator[String], xs: java.util.Collection[String], errorFun: String => Boolean): Array[String] = {
val element = getNextInJavaIterator[String](itr, errorFun)
val indexOrKey =
xs match {
case map: java.util.Map[_, _] => element.asInstanceOf[java.util.Map.Entry[_, _]].getKey
case genTrv: java.util.Collection[_] => getIndex(xs, element)
}
Array(indexOrKey.toString, decorateToStringValue(Prettifier.default, element), element.length.toString)
}
def indexElementLengthGenTraversable[T](itr: Iterator[GenTraversable[T]], xs: GenTraversable[GenTraversable[T]], errorFun: GenTraversable[T] => Boolean): Array[String] = {
val element = getNext[GenTraversable[T]](itr, errorFun)
val indexOrKey =
xs match {
case map: GenMap[_, _] => element.asInstanceOf[Tuple2[_, _]]._1
case genTrv: GenTraversable[_] => getIndex(xs, element)
}
Array(indexOrKey.toString, decorateToStringValue(Prettifier.default, element), element.size.toString)
}
def indexElementLengthArray[T](itr: Iterator[Array[T]], xs: GenTraversable[Array[T]], errorFun: Array[T] => Boolean): Array[String] = {
val element = getNext[Array[T]](itr, errorFun)
val indexOrKey =
xs match {
case map: GenMap[_, _] => element.asInstanceOf[Tuple2[_, _]]._1
case genTrv: GenTraversable[_] => getIndex(xs, element)
}
Array(indexOrKey.toString, decorateToStringValue(Prettifier.default, element), element.size.toString)
}
def indexElementLengthJavaCol[T, C[t] <: java.util.Collection[_]](itr: java.util.Iterator[C[T]], xs: java.util.Collection[C[T]], errorFun: java.util.Collection[T] => Boolean): Array[String] = {
val element = getNextInJavaIterator[java.util.Collection[T]](itr.asInstanceOf[java.util.Iterator[java.util.Collection[T]]], errorFun)
val indexOrKey =
xs match {
case map: java.util.Map[_, _] => element.asInstanceOf[java.util.Map.Entry[_, _]].getKey
case genTrv: java.util.Collection[_] => getIndex(xs, element)
}
Array(indexOrKey.toString, decorateToStringValue(Prettifier.default, element), element.size.toString)
}
def indexElementLengthJavaMap[K, V, JMAP[k, v] <: java.util.Map[_, _]](itr: java.util.Iterator[JMAP[K, V]], xs: java.util.Collection[java.util.Map[K, V]], errorFun: java.util.Map[K, V] => Boolean): Array[String] = {
val element = getNextInJavaIterator[java.util.Map[K, V]](itr.asInstanceOf[java.util.Iterator[java.util.Map[K, V]]], errorFun)
val indexOrKey =
xs match {
case map: java.util.Map[_, _] => element.asInstanceOf[java.util.Map.Entry[_, _]].getKey
case genTrv: java.util.Collection[_] => getIndex(xs, element)
}
Array(indexOrKey.toString, decorateToStringValue(Prettifier.default, element), element.size.toString)
}
def indexElementEqual[T](itr: Iterator[T], xs: GenTraversable[T], right: T): Array[String] =
indexElement[T](itr, xs, _ == right)
def indexElementNotEqual[T](itr: Iterator[T], xs: GenTraversable[T], right: T): Array[String] =
indexElement[T](itr, xs, _ != right)
def indexElementMoreThan(itr: Iterator[Int], xs: GenTraversable[Int], right: Int): Array[String] =
indexElement[Int](itr, xs, _ > right)
def indexElementMoreThanEqual(itr: Iterator[Int], xs: GenTraversable[Int], right: Int): Array[String] =
indexElement[Int](itr, xs, _ >= right)
def indexElementLessThan(itr: Iterator[Int], xs: GenTraversable[Int], right: Int): Array[String] =
indexElement[Int](itr, xs, _ < right)
def indexElementLessThanEqual(itr: Iterator[Int], xs: GenTraversable[Int], right: Int): Array[String] =
indexElement[Int](itr, xs, _ <= right)
def indexElementIsEmpty(itr: Iterator[String], xs: GenTraversable[String], right: String): Array[String] = // right is not used, but to be consistent to other so that easier for code generation
indexElement[String](itr, xs, _.isEmpty)
def indexElementIsNotEmpty(itr: Iterator[String], xs: GenTraversable[String], right: String): Array[String] = // right is not used, but to be consistent to other so that easier for code generation
indexElement[String](itr, xs, !_.isEmpty)
def indexElementLengthEqual(itr: Iterator[String], xs: GenTraversable[String], right: Int): Array[String] =
indexElement[String](itr, xs, _.length == right)
def indexElementLengthNotEqual(itr: Iterator[String], xs: GenTraversable[String], right: Int): Array[String] =
indexElementLengthString[String](itr, xs, (e: String) => e.length != right)
def indexElementSizeEqual(itr: Iterator[String], xs: GenTraversable[String], right: Int): Array[String] =
indexElement[String](itr, xs, _.size == right)
def indexElementSizeNotEqual(itr: Iterator[String], xs: GenTraversable[String], right: Int): Array[String] =
indexElementLengthString[String](itr, xs, (e: String) => e.size != right)
def indexElementLengthNotEqualLength(itr: Iterator[String], xs: GenTraversable[String], right: Int): Array[String] =
indexLengthElement[String](itr, xs, (e: String) => e.length != right)
def indexElementStartsWith(itr: Iterator[String], xs: GenTraversable[String], right: String): Array[String] =
indexElement[String](itr, xs, _.startsWith(right))
def indexElementNotStartsWith(itr: Iterator[String], xs: GenTraversable[String], right: String): Array[String] =
indexElement[String](itr, xs, !_.startsWith(right))
def indexElementEndsWith(itr: Iterator[String], xs: GenTraversable[String], right: String): Array[String] =
indexElement[String](itr, xs, _.endsWith(right))
def indexElementNotEndsWith(itr: Iterator[String], xs: GenTraversable[String], right: String): Array[String] =
indexElement[String](itr, xs, !_.endsWith(right))
def indexElementInclude(itr: Iterator[String], xs: GenTraversable[String], right: String): Array[String] =
indexElement[String](itr, xs, _.indexOf(right) >= 0)
def indexElementNotInclude(itr: Iterator[String], xs: GenTraversable[String], right: String): Array[String] =
indexElement[String](itr, xs, _.indexOf(right) < 0)
def indexElementMatches(itr: Iterator[String], xs: GenTraversable[String], right: String): Array[String] =
indexElement[String](itr, xs, _.matches(right))
def indexElementNotMatches(itr: Iterator[String], xs: GenTraversable[String], right: String): Array[String] =
indexElement[String](itr, xs, !_.matches(right))
//##################################
// SKIP-SCALATESTJS,NATIVE-START
def javaMapEntry[K, V](key: K, value: V): java.util.Map.Entry[K, V] = org.scalatest.Entry(key, value)
// SKIP-SCALATESTJS,NATIVE-END
def indexElementEqual[K, V](itr: java.util.Iterator[java.util.Map.Entry[K, V]], xs: java.util.Map[K, V], right: java.util.Map.Entry[K, V]): Array[String] =
indexElementForJavaIterator[K, V](itr, xs, (e: java.util.Map.Entry[K, V]) => e.getKey == right.getKey && e.getValue == right.getValue)
def indexElementNotEqual[K, V](itr: java.util.Iterator[java.util.Map.Entry[K, V]], xs: java.util.Map[K, V], right: java.util.Map.Entry[K, V]): Array[String] =
indexElementForJavaIterator[K, V](itr, xs, (e: java.util.Map.Entry[K, V]) => e.getKey != right.getKey || e.getValue != right.getValue)
def indexElementEqual[T](itr: java.util.Iterator[T], xs: java.util.Collection[T], right: T): Array[String] =
indexElementForJavaIterator[T](itr, xs, _ == right)
def indexElementNotEqual[T](itr: java.util.Iterator[T], xs: java.util.Collection[T], right: T): Array[String] =
indexElementForJavaIterator[T](itr, xs, _ != right)
def indexElementMoreThan(itr: java.util.Iterator[Int], xs: java.util.Collection[Int], right: Int): Array[String] =
indexElementForJavaIterator[Int](itr, xs, _ > right)
def indexElementMoreThanEqual(itr: java.util.Iterator[Int], xs: java.util.Collection[Int], right: Int): Array[String] =
indexElementForJavaIterator[Int](itr, xs, _ >= right)
def indexElementLessThan(itr: java.util.Iterator[Int], xs: java.util.Collection[Int], right: Int): Array[String] =
indexElementForJavaIterator[Int](itr, xs, _ < right)
def indexElementLessThanEqual(itr: java.util.Iterator[Int], xs: java.util.Collection[Int], right: Int): Array[String] =
indexElementForJavaIterator[Int](itr, xs, _ <= right)
def indexElementIsEmpty(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: String): Array[String] = // right is not used, but to be consistent to other so that easier for code generation
indexElementForJavaIterator[String](itr, xs, _.isEmpty)
def indexElementIsNotEmpty(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: String): Array[String] = // right is not used, but to be consistent to other so that easier for code generation
indexElementForJavaIterator[String](itr, xs, !_.isEmpty)
def indexElementLengthEqual(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: Int): Array[String] =
indexElementForJavaIterator[String](itr, xs, _.length == right)
def indexElementLengthNotEqual(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: Int): Array[String] =
indexElementLengthString[String](itr, xs, (e: String) => e.length != right)
def indexElementSizeEqual(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: Int): Array[String] =
indexElementForJavaIterator[String](itr, xs, _.size == right)
def indexElementSizeNotEqual(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: Int): Array[String] =
indexElementLengthString[String](itr, xs, (e: String) => e.size != right)
def indexElementLengthNotEqualLength(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: Int): Array[String] =
indexLengthElement[String](itr, xs, (e: String) => e.length != right)
def indexElementStartsWith(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: String): Array[String] =
indexElementForJavaIterator[String](itr, xs, _.startsWith(right))
def indexElementNotStartsWith(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: String): Array[String] =
indexElementForJavaIterator[String](itr, xs, !_.startsWith(right))
def indexElementEndsWith(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: String): Array[String] =
indexElementForJavaIterator[String](itr, xs, _.endsWith(right))
def indexElementNotEndsWith(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: String): Array[String] =
indexElementForJavaIterator[String](itr, xs, !_.endsWith(right))
def indexElementInclude(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: String): Array[String] =
indexElementForJavaIterator[String](itr, xs, _.indexOf(right) >= 0)
def indexElementNotInclude(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: String): Array[String] =
indexElementForJavaIterator[String](itr, xs, _.indexOf(right) < 0)
def indexElementMatches(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: String): Array[String] =
indexElementForJavaIterator[String](itr, xs, _.matches(right))
def indexElementNotMatches(itr: java.util.Iterator[String], xs: java.util.Collection[String], right: String): Array[String] =
indexElementForJavaIterator[String](itr, xs, !_.matches(right))
//##################################
def indexElementSizeEqualGenTraversable[T](itr: Iterator[GenTraversable[T]], xs: GenTraversable[GenTraversable[T]], right: Int): Array[String] =
indexElement[GenTraversable[T]](itr, xs, _.size == right)
def indexElementSizeNotEqualGenTraversable[T](itr: Iterator[GenTraversable[T]], xs: GenTraversable[GenTraversable[T]], right: Int): Array[String] =
indexElementLengthGenTraversable[T](itr, xs, _.size != right)
def indexElementSizeEqualGenTraversableArray[T](itr: Iterator[Array[T]], xs: GenTraversable[Array[T]], right: Int): Array[T] =
indexElement[Array[T]](itr, xs, _.size == right).asInstanceOf[Array[T]]
def indexElementSizeNotEqualGenTraversableArray[T](itr: Iterator[Array[T]], xs: GenTraversable[Array[T]], right: Int): Array[T] =
indexElementLengthArray[T](itr, xs, _.size != right).asInstanceOf[Array[T]]
def indexElementContainGenTraversable[T](itr: Iterator[GenTraversable[T]], xs: GenTraversable[GenTraversable[T]], right: T): Array[String] =
indexElement[GenTraversable[T]](itr, xs, _.exists(_ == right))
def indexElementNotContainGenTraversable[T](itr: Iterator[GenTraversable[T]], xs: GenTraversable[GenTraversable[T]], right: T): Array[String] =
indexElement[GenTraversable[T]](itr, xs, !_.exists(_ == right))
def indexElementContainGenTraversableArray[T](itr: Iterator[Array[T]], xs: GenTraversable[Array[T]], right: T): Array[T] =
indexElement[Array[T]](itr, xs, _.exists(_ == right)).asInstanceOf[Array[T]]
def indexElementNotContainGenTraversableArray[T](itr: Iterator[Array[T]], xs: GenTraversable[Array[T]], right: T): Array[T] =
indexElement[Array[T]](itr, xs, !_.exists(_ == right)).asInstanceOf[Array[T]]
def indexElementRefEqual[T <: AnyRef](itr: Iterator[T], xs: GenTraversable[T], right: T): Array[String] =
indexElement[T](itr, xs, _ eq right)
def indexElementNotRefEqual[T <: AnyRef](itr: Iterator[T], xs: GenTraversable[T], right: T): Array[String] =
indexElement[T](itr, xs, _ ne right)
def indexElementRefEqual[T <: AnyRef](itr: java.util.Iterator[T], xs: java.util.Collection[T], right: T): Array[String] =
indexElementForJavaIterator[T](itr, xs, _ eq right)
def indexElementNotRefEqual[T <: AnyRef](itr: java.util.Iterator[T], xs: java.util.Collection[T], right: T): Array[String] =
indexElementForJavaIterator[T](itr, xs, _ ne right)
def indexElementContainKey[K, V](itr: Iterator[GenMap[K, V]], xs: GenTraversable[GenMap[K, V]], right: K): Array[String] =
indexElement[GenMap[K, V]](itr, xs, _.exists(_._1 == right))
def indexElementNotContainKey[K, V](itr: Iterator[GenMap[K, V]], xs: GenTraversable[GenMap[K, V]], right: K): Array[String] =
indexElement[GenMap[K, V]](itr, xs, !_.exists(_._1 == right))
def indexElementContainValue[K, V](itr: Iterator[GenMap[K, V]], xs: GenTraversable[GenMap[K, V]], right: V): Array[String] =
indexElement[GenMap[K, V]](itr, xs, _.exists(_._2 == right))
def indexElementNotContainValue[K, V](itr: Iterator[GenMap[K, V]], xs: GenTraversable[GenMap[K, V]], right: V): Array[String] =
indexElement[GenMap[K, V]](itr, xs, !_.exists(_._2 == right))
def indexElementJavaMapIsEmpty[K, V, JMAP[k, v] <: java.util.Map[_, _]](itr: java.util.Iterator[JMAP[K, V]], xs: java.util.Collection[JMAP[K, V]], right: Int = 0): Array[String] = // right is not used, but to be consistent to other so that easier for code generation
indexElementForJavaIterator[java.util.Map[K, V]](itr.asInstanceOf[java.util.Iterator[java.util.Map[K, V]]], xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.isEmpty)
def indexElementJavaMapNotIsEmpty[K, V, JMAP[k, v] <: java.util.Map[_, _]](itr: java.util.Iterator[JMAP[K, V]], xs: java.util.Collection[JMAP[K, V]], right: Int = 0): Array[String] = // right is not used, but to be consistent to other so that easier for code generation
indexElementForJavaIterator[java.util.Map[K, V]](itr.asInstanceOf[java.util.Iterator[java.util.Map[K, V]]], xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], !_.isEmpty)
def indexElementJavaMapContainKey[K, V, JMAP[k, v] <: java.util.Map[_, _]](itr: java.util.Iterator[JMAP[K, V]], xs: java.util.Collection[JMAP[K, V]], right: K): Array[String] =
indexElementForJavaIterator[java.util.Map[K, V]](itr.asInstanceOf[java.util.Iterator[java.util.Map[K, V]]], xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.containsKey(right))
def indexElementJavaMapNotContainKey[K, V, JMAP[k, v] <: java.util.Map[_, _]](itr: java.util.Iterator[JMAP[K, V]], xs: java.util.Collection[JMAP[K, V]], right: K): Array[String] =
indexElementForJavaIterator[java.util.Map[K, V]](itr.asInstanceOf[java.util.Iterator[java.util.Map[K, V]]], xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], !_.containsKey(right))
def indexElementJavaMapContainValue[K, V, JMAP[k, v] <: java.util.Map[_, _]](itr: java.util.Iterator[JMAP[K, V]], xs: java.util.Collection[JMAP[K, V]], right: V): Array[String] =
indexElementForJavaIterator[java.util.Map[K, V]](itr.asInstanceOf[java.util.Iterator[java.util.Map[K, V]]], xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.containsValue(right))
def indexElementJavaMapNotContainValue[K, V, JMAP[k, v] <: java.util.Map[_, _]](itr: java.util.Iterator[JMAP[K, V]], xs: java.util.Collection[JMAP[K, V]], right: V): Array[String] =
indexElementForJavaIterator[java.util.Map[K, V]](itr.asInstanceOf[java.util.Iterator[java.util.Map[K, V]]], xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], !_.containsValue(right))
def indexElementJavaMapSizeEqual[K, V, JMAP[k, v] <: java.util.Map[_, _]](itr: java.util.Iterator[JMAP[K, V]], xs: java.util.Collection[JMAP[K, V]], right: Int): Array[String] =
indexElementForJavaIterator[java.util.Map[K, V]](itr.asInstanceOf[java.util.Iterator[java.util.Map[K, V]]], xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.size == right)
def indexElementJavaMapSizeNotEqual[K, V, JMAP[k, v] <: java.util.Map[_, _]](itr: java.util.Iterator[JMAP[K, V]], xs: java.util.Collection[JMAP[K, V]], right: Int): Array[String] =
indexElementLengthJavaMap[K, V, java.util.Map](itr.asInstanceOf[java.util.Iterator[java.util.Map[K, V]]], xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.size != right)
def indexElementJavaColSizeEqual[T, C[t] <: java.util.Collection[_]](itr: java.util.Iterator[C[T]], xs: java.util.Collection[C[T]], right: Int): Array[String] =
indexElementForJavaIterator[java.util.Collection[T]](itr.asInstanceOf[java.util.Iterator[java.util.Collection[T]]], xs.asInstanceOf[java.util.Collection[java.util.Collection[T]]], _.size == right)
def indexElementJavaColSizeNotEqual[T, C[t] <: java.util.Collection[_]](itr: java.util.Iterator[C[T]], xs: java.util.Collection[C[T]], right: Int): Array[String] =
indexElementLengthJavaCol[T, java.util.Collection](itr.asInstanceOf[java.util.Iterator[java.util.Collection[T]]], xs.asInstanceOf[java.util.Collection[java.util.Collection[T]]], _.size != right)
def indexElementJavaColContain[T, C[t] <: java.util.Collection[_]](itr: java.util.Iterator[C[T]], xs: java.util.Collection[C[T]], right: T): Array[String] =
indexElementForJavaIterator[java.util.Collection[T]](itr.asInstanceOf[java.util.Iterator[java.util.Collection[T]]], xs.asInstanceOf[java.util.Collection[java.util.Collection[T]]], _.contains(right))
def indexElementJavaColNotContain[T, C[t] <: java.util.Collection[_]](itr: java.util.Iterator[C[T]], xs: java.util.Collection[C[T]], right: T): Array[String] =
indexElementForJavaIterator[java.util.Collection[T]](itr.asInstanceOf[java.util.Iterator[java.util.Collection[T]]], xs.asInstanceOf[java.util.Collection[java.util.Collection[T]]], !_.contains(right))
def indexElementJavaColIsEmpty[T, C[t] <: java.util.Collection[_]](itr: java.util.Iterator[C[T]], xs: java.util.Collection[C[T]], right: Int = 0): Array[String] = // right is not used, but to be consistent to other so that easier for code generation
indexElementForJavaIterator[java.util.Collection[T]](itr.asInstanceOf[java.util.Iterator[java.util.Collection[T]]], xs.asInstanceOf[java.util.Collection[java.util.Collection[T]]], _.isEmpty)
def indexElementJavaColNotIsEmpty[T, C[t] <: java.util.Collection[_]](itr: java.util.Iterator[C[T]], xs: java.util.Collection[C[T]], right: Int = 0): Array[String] = // right is not used, but to be consistent to other so that easier for code generation
indexElementForJavaIterator[java.util.Collection[T]](itr.asInstanceOf[java.util.Iterator[java.util.Collection[T]]], xs.asInstanceOf[java.util.Collection[java.util.Collection[T]]], !_.isEmpty)
private def succeededIndexes[T](xs: GenTraversable[T], filterFun: T => Boolean): String = {
xs match {
case map: GenMap[_, _] =>
val passedList = map.toList.filter(e => filterFun(e.asInstanceOf[T])).map(_._1).toList
if (passedList.size > 1)
"key " + passedList.dropRight(1).mkString(", ") + " and " + passedList.last
else if (passedList.size == 1)
"key " + passedList.last.toString
else
""
case _ =>
val passedList = getIndexes(xs, xs.toList.filter(e => filterFun(e))).toList
if (passedList.size > 1)
"index " + passedList.dropRight(1).mkString(", ") + " and " + passedList.last
else if (passedList.size == 1)
"index " + passedList.last.toString
else
""
}
}
private def succeededIndexesInJavaCol[T](xs: java.util.Collection[T], filterFun: T => Boolean): String = {
import collection.JavaConverters._
val passedList = getIndexes(xs.asScala, xs.asScala.toList.filter(e => filterFun(e))).toList
if (passedList.size > 1)
"index " + passedList.dropRight(1).mkString(", ") + " and " + passedList.last
else if (passedList.size == 1)
"index " + passedList.last.toString
else
""
}
// SKIP-SCALATESTJS,NATIVE-START
private def succeededIndexesInJavaMap[K, V](xs: java.util.Map[K, V], filterFun: java.util.Map.Entry[K, V] => Boolean): String = {
import collection.JavaConverters._
val passedList = xs.asScala.toList.filter(e => filterFun(org.scalatest.Entry(e._1, e._2))).toList.map(_._1)
if (passedList.size > 1)
"key " + passedList.dropRight(1).mkString(", ") + " and " + passedList.last
else if (passedList.size == 1)
"key " + passedList.last.toString
else
""
}
// SKIP-SCALATESTJS,NATIVE-END
private def failEarlySucceededIndexes[T](xs: GenTraversable[T], filterFun: T => Boolean, maxSucceed: Int): String = {
xs match {
case map: GenMap[_, _] =>
val passedList = map.toList.filter(e => filterFun(e.asInstanceOf[T])).take(maxSucceed).toList.map(_._1)
if (passedList.size > 1)
"key " + passedList.dropRight(1).mkString(", ") + " and " + passedList.last
else if (passedList.size == 1)
"key " + passedList.last.toString
else
""
case _ =>
val passedList = getIndexes(xs, xs.toList.filter(e => filterFun(e))).take(maxSucceed).toList
if (passedList.size > 1)
"index " + passedList.dropRight(1).mkString(", ") + " and " + passedList.last
else if (passedList.size == 1)
"index " + passedList.last.toString
else
""
}
}
private def failEarlySucceededIndexesInJavaCol[T](xs: java.util.Collection[T], filterFun: T => Boolean, maxSucceed: Int): String = {
import collection.JavaConverters._
val passedList = getIndexes(xs.asScala, xs.asScala.toList.filter(e => filterFun(e))).take(maxSucceed).toList
if (passedList.size > 1)
"index " + passedList.dropRight(1).mkString(", ") + " and " + passedList.last
else if (passedList.size == 1)
"index " + passedList.last.toString
else
""
}
// SKIP-SCALATESTJS,NATIVE-START
private def failEarlySucceededIndexesInJavaMap[K, V](xs: java.util.Map[K, V], filterFun: java.util.Map.Entry[K, V] => Boolean, maxSucceed: Int): String = {
import collection.JavaConverters._
val passedList = xs.asScala.toList.filter(e => filterFun(org.scalatest.Entry(e._1, e._2))).take(maxSucceed).toList.map(_._1)
if (passedList.size > 1)
"key " + passedList.dropRight(1).mkString(", ") + " and " + passedList.last
else if (passedList.size == 1)
"key " + passedList.last.toString
else
""
}
// SKIP-SCALATESTJS,NATIVE-END
def succeededIndexesEqualBoolean[T](xs: GenTraversable[T], value: Boolean): String =
succeededIndexes(xs, (e: T) => value)
def succeededIndexesNotEqualBoolean[T](xs: GenTraversable[T], value: Boolean): String =
succeededIndexes(xs, (e: T) => !value)
def succeededIndexesEqual[T](xs: GenTraversable[T], value: T): String =
succeededIndexes(xs, (e: T) => e == value)
def succeededIndexesNotEqual[T](xs: GenTraversable[T], value: T): String =
succeededIndexes(xs, (e: T) => e != value)
// SKIP-SCALATESTJS,NATIVE-START
def succeededIndexesEqual[K, V](xs: java.util.Map[K, V], value: java.util.Map.Entry[K, V]): String =
succeededIndexesInJavaMap(xs, (e: java.util.Map.Entry[K, V]) => e.getKey == value.getKey && e.getValue == value.getValue)
def succeededIndexesNotEqual[K, V](xs: java.util.Map[K, V], value: java.util.Map.Entry[K, V]): String =
succeededIndexesInJavaMap(xs, (e: java.util.Map.Entry[K, V]) => e.getKey != value.getKey || e.getValue != value.getValue)
// SKIP-SCALATESTJS,NATIVE-END
def succeededIndexesLessThanEqual(xs: GenTraversable[Int], value: Int): String =
succeededIndexes(xs, (e: Int) => e <= value)
def succeededIndexesLessThan(xs: GenTraversable[Int], value: Int): String =
succeededIndexes(xs, (e: Int) => e < value)
def succeededIndexesMoreThanEqual(xs: GenTraversable[Int], value: Int): String =
succeededIndexes(xs, (e: Int) => e >= value)
def succeededIndexesMoreThan(xs: GenTraversable[Int], value: Int): String =
succeededIndexes(xs, (e: Int) => e > value)
def succeededIndexesIsEmpty(xs: GenTraversable[String], value: String): String =
succeededIndexes(xs, (e: String) => e.isEmpty)
def succeededIndexesIsNotEmpty(xs: GenTraversable[String], value: String): String =
succeededIndexes(xs, (e: String) => !e.isEmpty)
def succeededIndexesSizeEqual(xs: GenTraversable[String], value: Int): String =
succeededIndexes(xs, (e: String) => e.size == value)
def succeededIndexesSizeNotEqual(xs: GenTraversable[String], value: Int): String =
succeededIndexes(xs, (e: String) => e.size != value)
def succeededIndexesLengthEqual(xs: GenTraversable[String], value: Int): String =
succeededIndexes(xs, (e: String) => e.length == value)
def succeededIndexesLengthNotEqual(xs: GenTraversable[String], value: Int): String =
succeededIndexes(xs, (e: String) => e.length != value)
def succeededIndexesStartsWith(xs: GenTraversable[String], value: String): String =
succeededIndexes(xs, (e: String) => e.startsWith(value))
def succeededIndexesNotStartsWith(xs: GenTraversable[String], value: String): String =
succeededIndexes(xs, (e: String) => !e.startsWith(value))
def succeededIndexesEndsWith(xs: GenTraversable[String], value: String): String =
succeededIndexes(xs, (e: String) => e.endsWith(value))
def succeededIndexesNotEndsWith(xs: GenTraversable[String], value: String): String =
succeededIndexes(xs, (e: String) => !e.endsWith(value))
def succeededIndexesInclude(xs: GenTraversable[String], value: String): String =
succeededIndexes(xs, (e: String) => e.indexOf(value) >= 0)
def succeededIndexesNotInclude(xs: GenTraversable[String], value: String): String =
succeededIndexes(xs, (e: String) => e.indexOf(value) < 0)
def succeededIndexesSizeEqualGenTraversable[T](xs: GenTraversable[GenTraversable[T]], value: Int): String =
succeededIndexes(xs, (e: GenTraversable[T]) => e.size == value)
def succeededIndexesSizeNotEqualGenTraversable[T](xs: GenTraversable[GenTraversable[T]], value: Int): String =
succeededIndexes(xs, (e: GenTraversable[T]) => e.size != value)
def succeededIndexesSizeEqualGenTraversableArray[T](xs: GenTraversable[Array[T]], value: Int): String =
succeededIndexes(xs, (e: Array[T]) => e.size == value)
def succeededIndexesSizeNotEqualGenTraversableArray[T](xs: GenTraversable[Array[T]], value: Int): String =
succeededIndexes(xs, (e: Array[T]) => e.size != value)
def succeededIndexesMatches(xs: GenTraversable[String], value: String): String =
succeededIndexes(xs, (e: String) => e.matches(value))
def succeededIndexesNotMatches(xs: GenTraversable[String], value: String): String =
succeededIndexes(xs, (e: String) => !e.matches(value))
def succeededIndexesEqualBoolean[T](xs: java.util.Collection[T], value: Boolean): String =
succeededIndexesInJavaCol(xs, (e: T) => value)
def succeededIndexesNotEqualBoolean[T](xs: java.util.Collection[T], value: Boolean): String =
succeededIndexesInJavaCol(xs, (e: T) => !value)
def succeededIndexesEqual[T](xs: java.util.Collection[T], value: T): String =
succeededIndexesInJavaCol(xs, (e: T) => e == value)
def succeededIndexesNotEqual[T](xs: java.util.Collection[T], value: T): String =
succeededIndexesInJavaCol(xs, (e: T) => e != value)
def succeededIndexesLessThanEqual(xs: java.util.Collection[Int], value: Int): String =
succeededIndexesInJavaCol(xs, (e: Int) => e <= value)
def succeededIndexesLessThan(xs: java.util.Collection[Int], value: Int): String =
succeededIndexesInJavaCol(xs, (e: Int) => e < value)
def succeededIndexesMoreThanEqual(xs: java.util.Collection[Int], value: Int): String =
succeededIndexesInJavaCol(xs, (e: Int) => e >= value)
def succeededIndexesMoreThan(xs: java.util.Collection[Int], value: Int): String =
succeededIndexesInJavaCol(xs, (e: Int) => e > value)
def succeededIndexesIsEmpty(xs: java.util.Collection[String], value: String): String =
succeededIndexesInJavaCol(xs, (e: String) => e.isEmpty)
def succeededIndexesIsNotEmpty(xs: java.util.Collection[String], value: String): String =
succeededIndexesInJavaCol(xs, (e: String) => !e.isEmpty)
def succeededIndexesSizeEqual(xs: java.util.Collection[String], value: Int): String =
succeededIndexesInJavaCol(xs, (e: String) => e.size == value)
def succeededIndexesSizeNotEqual(xs: java.util.Collection[String], value: Int): String =
succeededIndexesInJavaCol(xs, (e: String) => e.size != value)
def succeededIndexesLengthEqual(xs: java.util.Collection[String], value: Int): String =
succeededIndexesInJavaCol(xs, (e: String) => e.length == value)
def succeededIndexesLengthNotEqual(xs: java.util.Collection[String], value: Int): String =
succeededIndexesInJavaCol(xs, (e: String) => e.length != value)
def succeededIndexesStartsWith(xs: java.util.Collection[String], value: String): String =
succeededIndexesInJavaCol(xs, (e: String) => e.startsWith(value))
def succeededIndexesNotStartsWith(xs: java.util.Collection[String], value: String): String =
succeededIndexesInJavaCol(xs, (e: String) => !e.startsWith(value))
def succeededIndexesEndsWith(xs: java.util.Collection[String], value: String): String =
succeededIndexesInJavaCol(xs, (e: String) => e.endsWith(value))
def succeededIndexesNotEndsWith(xs: java.util.Collection[String], value: String): String =
succeededIndexesInJavaCol(xs, (e: String) => !e.endsWith(value))
def succeededIndexesInclude(xs: java.util.Collection[String], value: String): String =
succeededIndexesInJavaCol(xs, (e: String) => e.indexOf(value) >= 0)
def succeededIndexesNotInclude(xs: java.util.Collection[String], value: String): String =
succeededIndexesInJavaCol(xs, (e: String) => e.indexOf(value) < 0)
def succeededIndexesMatches(xs: java.util.Collection[String], value: String): String =
succeededIndexesInJavaCol(xs, (e: String) => e.matches(value))
def succeededIndexesNotMatches(xs: java.util.Collection[String], value: String): String =
succeededIndexesInJavaCol(xs, (e: String) => !e.matches(value))
def succeededIndexesContainGenTraversable[T](xs: GenTraversable[GenTraversable[T]], right: T): String =
succeededIndexes[GenTraversable[T]](xs, _.exists(_ == right))
def succeededIndexesNotContainGenTraversable[T](xs: GenTraversable[GenTraversable[T]], right: T): String =
succeededIndexes[GenTraversable[T]](xs, !_.exists(_ == right))
def succeededIndexesContainGenTraversableArray[T](xs: GenTraversable[Array[T]], right: T): String =
succeededIndexes[Array[T]](xs, _.exists(_ == right))
def succeededIndexesNotContainGenTraversableArray[T](xs: GenTraversable[Array[T]], right: T): String =
succeededIndexes[Array[T]](xs, !_.exists(_ == right))
def succeededIndexesRefEqual[T <: AnyRef](xs: GenTraversable[T], value: T): String =
succeededIndexes[T](xs, _ eq value)
def succeededIndexesNotRefEqual[T <: AnyRef](xs: GenTraversable[T], value: T): String =
succeededIndexes[T](xs, _ ne value)
//#################################
def succeededIndexesRefEqual[T <: AnyRef](xs: java.util.Collection[T], value: T): String =
succeededIndexesInJavaCol[T](xs, _ eq value)
def succeededIndexesNotRefEqual[T <: AnyRef](xs: java.util.Collection[T], value: T): String =
succeededIndexesInJavaCol[T](xs, _ ne value)
//#################################
def succeededIndexesContainKey[K, V](xs: GenTraversable[GenMap[K, V]], right: K): String =
succeededIndexes[GenMap[K, V]](xs, _.exists(_._1 == right))
def succeededIndexesNotContainKey[K, V](xs: GenTraversable[GenMap[K, V]], right: K): String =
succeededIndexes[GenMap[K, V]](xs, !_.exists(_._1 == right))
def succeededIndexesContainValue[K, V](xs: GenTraversable[GenMap[K, V]], right: V): String =
succeededIndexes[GenMap[K, V]](xs, _.exists(_._2 == right))
def succeededIndexesNotContainValue[K, V](xs: GenTraversable[GenMap[K, V]], right: V): String =
succeededIndexes[GenMap[K, V]](xs, !_.exists(_._2 == right))
def succeededIndexesJavaMapIsEmpty[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: Int = 0): String = // right is not used, but to be consistent to other so that easier for code generation
succeededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.isEmpty)
def succeededIndexesJavaMapNotIsEmpty[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: Int = 0): String = // right is not used, but to be consistent to other so that easier for code generation
succeededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], !_.isEmpty)
def succeededIndexesJavaMapContainKey[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: K): String =
succeededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.containsKey(right))
def succeededIndexesJavaMapNotContainKey[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: K): String =
succeededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], !_.containsKey(right))
def succeededIndexesJavaMapContainValue[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: V): String =
succeededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.containsValue(right))
def succeededIndexesJavaMapNotContainValue[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: V): String =
succeededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], !_.containsValue(right))
def succeededIndexesJavaMapSizeEqual[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: Int): String =
succeededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.size == right)
def succeededIndexesJavaMapSizeNotEqual[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: Int): String =
succeededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.size != right)
def succeededIndexesJavaColSizeEqual[E, C[e] <: java.util.Collection[_]](xs: java.util.Collection[C[E]], right: Int): String =
succeededIndexesInJavaCol[java.util.Collection[E]](xs.asInstanceOf[java.util.Collection[java.util.Collection[E]]], _.size == right)
def succeededIndexesJavaColSizeNotEqual[E, C[e] <: java.util.Collection[_]](xs: java.util.Collection[C[E]], right: Int): String =
succeededIndexesInJavaCol[java.util.Collection[E]](xs.asInstanceOf[java.util.Collection[java.util.Collection[E]]], _.size != right)
def succeededIndexesJavaColContain[E, C[e] <: java.util.Collection[_]](xs: java.util.Collection[C[E]], right: E): String =
succeededIndexesInJavaCol[java.util.Collection[E]](xs.asInstanceOf[java.util.Collection[java.util.Collection[E]]], _.contains(right))
def succeededIndexesJavaColNotContain[E, C[e] <: java.util.Collection[_]](xs: java.util.Collection[C[E]], right: E): String =
succeededIndexesInJavaCol[java.util.Collection[E]](xs.asInstanceOf[java.util.Collection[java.util.Collection[E]]], !_.contains(right))
def succeededIndexesJavaColIsEmpty[E, C[e] <: java.util.Collection[_]](xs: java.util.Collection[C[E]], right: Int = 0): String = // right is not used, but to be consistent to other so that easier for code generation
succeededIndexesInJavaCol[java.util.Collection[E]](xs.asInstanceOf[java.util.Collection[java.util.Collection[E]]], _.isEmpty)
def succeededIndexesJavaColNotIsEmpty[E, C[e] <: java.util.Collection[_]](xs: java.util.Collection[C[E]], right: Int = 0): String = // right is not used, but to be consistent to other so that easier for code generation
succeededIndexesInJavaCol[java.util.Collection[E]](xs.asInstanceOf[java.util.Collection[java.util.Collection[E]]], !_.isEmpty)
def failEarlySucceededIndexesEqualBoolean[T](xs: GenTraversable[T], value: Boolean, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: T) => value, maxSucceed)
def failEarlySucceededIndexesNotEqualBoolean[T](xs: GenTraversable[T], value: Boolean, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: T) => !value, maxSucceed)
def failEarlySucceededIndexesEqual[T](xs: GenTraversable[T], value: T, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: T) => e == value, maxSucceed)
def failEarlySucceededIndexesNotEqual[T](xs: GenTraversable[T], value: T, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: T) => e != value, maxSucceed)
def failEarlySucceededIndexesLessThanEqual(xs: GenTraversable[Int], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: Int) => e <= value, maxSucceed)
def failEarlySucceededIndexesLessThan(xs: GenTraversable[Int], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: Int) => e < value, maxSucceed)
def failEarlySucceededIndexesMoreThanEqual(xs: GenTraversable[Int], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: Int) => e >= value, maxSucceed)
def failEarlySucceededIndexesMoreThan(xs: GenTraversable[Int], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: Int) => e > value, maxSucceed)
def failEarlySucceededIndexesIsEmpty(xs: GenTraversable[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => e.isEmpty, maxSucceed)
def failEarlySucceededIndexesIsNotEmpty(xs: GenTraversable[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => !e.isEmpty, maxSucceed)
def failEarlySucceededIndexesSizeEqual(xs: GenTraversable[String], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => e.size == value, maxSucceed)
def failEarlySucceededIndexesSizeNotEqual(xs: GenTraversable[String], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => e.size != value, maxSucceed)
def failEarlySucceededIndexesLengthEqual(xs: GenTraversable[String], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => e.length == value, maxSucceed)
def failEarlySucceededIndexesLengthNotEqual(xs: GenTraversable[String], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => e.length != value, maxSucceed)
def failEarlySucceededIndexesStartsWith(xs: GenTraversable[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => e.startsWith(value), maxSucceed)
def failEarlySucceededIndexesNotStartsWith(xs: GenTraversable[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => !e.startsWith(value), maxSucceed)
def failEarlySucceededIndexesEndsWith(xs: GenTraversable[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => e.endsWith(value), maxSucceed)
def failEarlySucceededIndexesNotEndsWith(xs: GenTraversable[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => !e.endsWith(value), maxSucceed)
def failEarlySucceededIndexesInclude(xs: GenTraversable[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => e.indexOf(value) >= 0, maxSucceed)
def failEarlySucceededIndexesNotInclude(xs: GenTraversable[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => e.indexOf(value) < 0, maxSucceed)
//################################################
// SKIP-SCALATESTJS,NATIVE-START
def failEarlySucceededIndexesEqualBoolean[T](xs: java.util.Collection[T], value: Boolean, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: T) => value, maxSucceed)
def failEarlySucceededIndexesNotEqualBoolean[T](xs: java.util.Collection[T], value: Boolean, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: T) => !value, maxSucceed)
def failEarlySucceededIndexesEqual[T](xs: java.util.Collection[T], value: T, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: T) => e == value, maxSucceed)
def failEarlySucceededIndexesNotEqual[T](xs: java.util.Collection[T], value: T, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: T) => e != value, maxSucceed)
def failEarlySucceededIndexesEqual[K, V](xs: java.util.Map[K, V], value: java.util.Map.Entry[K, V], maxSucceed: Int): String =
failEarlySucceededIndexesInJavaMap(xs, (e: java.util.Map.Entry[K, V]) => e.getKey == value.getKey && e.getValue == value.getValue, maxSucceed)
def failEarlySucceededIndexesNotEqual[K, V](xs: java.util.Map[K, V], value: java.util.Map.Entry[K, V], maxSucceed: Int): String =
failEarlySucceededIndexesInJavaMap(xs, (e: java.util.Map.Entry[K, V]) => e.getKey != value.getKey || e.getValue != value.getValue, maxSucceed)
def failEarlySucceededIndexesLessThanEqual(xs: java.util.Collection[Int], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: Int) => e <= value, maxSucceed)
def failEarlySucceededIndexesLessThan(xs: java.util.Collection[Int], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: Int) => e < value, maxSucceed)
def failEarlySucceededIndexesMoreThanEqual(xs: java.util.Collection[Int], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: Int) => e >= value, maxSucceed)
def failEarlySucceededIndexesMoreThan(xs: java.util.Collection[Int], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: Int) => e > value, maxSucceed)
def failEarlySucceededIndexesIsEmpty(xs: java.util.Collection[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => e.isEmpty, maxSucceed)
def failEarlySucceededIndexesIsNotEmpty(xs: java.util.Collection[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => !e.isEmpty, maxSucceed)
def failEarlySucceededIndexesSizeEqual(xs: java.util.Collection[String], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => e.size == value, maxSucceed)
def failEarlySucceededIndexesSizeNotEqual(xs: java.util.Collection[String], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => e.size != value, maxSucceed)
def failEarlySucceededIndexesLengthEqual(xs: java.util.Collection[String], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => e.length == value, maxSucceed)
def failEarlySucceededIndexesLengthNotEqual(xs: java.util.Collection[String], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => e.length != value, maxSucceed)
def failEarlySucceededIndexesStartsWith(xs: java.util.Collection[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => e.startsWith(value), maxSucceed)
def failEarlySucceededIndexesNotStartsWith(xs: java.util.Collection[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => !e.startsWith(value), maxSucceed)
def failEarlySucceededIndexesEndsWith(xs: java.util.Collection[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => e.endsWith(value), maxSucceed)
def failEarlySucceededIndexesNotEndsWith(xs: java.util.Collection[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => !e.endsWith(value), maxSucceed)
def failEarlySucceededIndexesInclude(xs: java.util.Collection[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => e.indexOf(value) >= 0, maxSucceed)
def failEarlySucceededIndexesNotInclude(xs: java.util.Collection[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => e.indexOf(value) < 0, maxSucceed)
// SKIP-SCALATESTJS,NATIVE-END
//################################################
def failEarlySucceededIndexesSizeEqualGenTraversable[T](xs: GenTraversable[GenTraversable[T]], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexes[GenTraversable[T]](xs, _.size == value, maxSucceed)
def failEarlySucceededIndexesSizeNotEqualGenTraversable[T](xs: GenTraversable[GenTraversable[T]], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexes[GenTraversable[T]](xs, _.size != value, maxSucceed)
def failEarlySucceededIndexesSizeEqualGenTraversableArray[T](xs: GenTraversable[Array[T]], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: Array[T]) => e.size == value, maxSucceed)
def failEarlySucceededIndexesSizeNotEqualGenTraversableArray[T](xs: GenTraversable[Array[T]], value: Int, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: Array[T]) => e.size != value, maxSucceed)
def failEarlySucceededIndexesMatches(xs: GenTraversable[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => e.matches(value), maxSucceed)
def failEarlySucceededIndexesNotMatches(xs: GenTraversable[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexes(xs, (e: String) => !e.matches(value), maxSucceed)
def failEarlySucceededIndexesMatches(xs: java.util.Collection[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => e.matches(value), maxSucceed)
def failEarlySucceededIndexesNotMatches(xs: java.util.Collection[String], value: String, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol(xs, (e: String) => !e.matches(value), maxSucceed)
def failEarlySucceededIndexesContainGenTraversable[T](xs: GenTraversable[GenTraversable[T]], right: T, maxSucceed: Int): String =
failEarlySucceededIndexes[GenTraversable[T]](xs, _.exists(_ == right), maxSucceed)
def failEarlySucceededIndexesNotContainGenTraversable[T](xs: GenTraversable[GenTraversable[T]], right: T, maxSucceed: Int): String =
failEarlySucceededIndexes[GenTraversable[T]](xs, !_.exists(_ == right), maxSucceed)
def failEarlySucceededIndexesContainGenTraversableArray[T](xs: GenTraversable[Array[T]], right: T, maxSucceed: Int): String =
failEarlySucceededIndexes[Array[T]](xs, _.exists(_ == right), maxSucceed)
def failEarlySucceededIndexesNotContainGenTraversableArray[T](xs: GenTraversable[Array[T]], right: T, maxSucceed: Int): String =
failEarlySucceededIndexes[Array[T]](xs, !_.exists(_ == right), maxSucceed)
def failEarlySucceededIndexesRefEqual[T <: AnyRef](xs: GenTraversable[T], value: T, maxSucceed: Int): String =
failEarlySucceededIndexes[T](xs, _ eq value, maxSucceed)
def failEarlySucceededIndexesNotRefEqual[T <: AnyRef](xs: GenTraversable[T], value: T, maxSucceed: Int): String =
failEarlySucceededIndexes[T](xs, _ ne value, maxSucceed)
def failEarlySucceededIndexesRefEqual[T <: AnyRef](xs: java.util.Collection[T], value: T, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol[T](xs, _ eq value, maxSucceed)
def failEarlySucceededIndexesNotRefEqual[T <: AnyRef](xs: java.util.Collection[T], value: T, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol[T](xs, _ ne value, maxSucceed)
def failEarlySucceededIndexesContainKey[K, V](xs: GenTraversable[GenMap[K, V]], right: K, maxSucceed: Int): String =
failEarlySucceededIndexes[GenMap[K, V]](xs, _.exists(_._1 == right), maxSucceed)
def failEarlySucceededIndexesNotContainKey[K, V](xs: GenTraversable[GenMap[K, V]], right: K, maxSucceed: Int): String =
failEarlySucceededIndexes[GenMap[K, V]](xs, !_.exists(_._1 == right), maxSucceed)
def failEarlySucceededIndexesContainValue[K, V](xs: GenTraversable[GenMap[K, V]], right: V, maxSucceed: Int): String =
failEarlySucceededIndexes[GenMap[K, V]](xs, _.exists(_._2 == right), maxSucceed)
def failEarlySucceededIndexesNotContainValue[K, V](xs: GenTraversable[GenMap[K, V]], right: V, maxSucceed: Int): String =
failEarlySucceededIndexes[GenMap[K, V]](xs, !_.exists(_._2 == right), maxSucceed)
def failEarlySucceededIndexesJavaMapIsEmpty[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: Int = 0, maxSucceed: Int): String = // right is not used, but to be consistent to other so that easier for code generation
failEarlySucceededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.isEmpty, maxSucceed)
def failEarlySucceededIndexesJavaMapNotIsEmpty[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: Int = 0, maxSucceed: Int): String = // right is not used, but to be consistent to other so that easier for code generation
failEarlySucceededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], !_.isEmpty, maxSucceed)
def failEarlySucceededIndexesJavaMapContainKey[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: K, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.containsKey(right), maxSucceed)
def failEarlySucceededIndexesJavaMapNotContainKey[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: K, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], !_.containsKey(right), maxSucceed)
def failEarlySucceededIndexesJavaMapContainValue[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: V, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.containsValue(right), maxSucceed)
def failEarlySucceededIndexesJavaMapNotContainValue[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: V, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], !_.containsValue(right), maxSucceed)
def failEarlySucceededIndexesJavaMapSizeEqual[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: Int, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.size == right, maxSucceed)
def failEarlySucceededIndexesJavaMapSizeNotEqual[K, V, JMAP[k, v] <: java.util.Map[_, _]](xs: java.util.Collection[JMAP[K, V]], right: Int, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol[java.util.Map[K, V]](xs.asInstanceOf[java.util.Collection[java.util.Map[K, V]]], _.size != right, maxSucceed)
def failEarlySucceededIndexesJavaColSizeEqual[E, C[e] <: java.util.Collection[_]](xs: java.util.Collection[C[E]], right: Int, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol[java.util.Collection[E]](xs.asInstanceOf[java.util.Collection[java.util.Collection[E]]], _.size == right, maxSucceed)
def failEarlySucceededIndexesJavaColSizeNotEqual[E, C[e] <: java.util.Collection[_]](xs: java.util.Collection[C[E]], right: Int, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol[java.util.Collection[E]](xs.asInstanceOf[java.util.Collection[java.util.Collection[E]]], _.size != right, maxSucceed)
def failEarlySucceededIndexesJavaColContain[E, C[e] <: java.util.Collection[_]](xs: java.util.Collection[C[E]], right: E, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol[java.util.Collection[E]](xs.asInstanceOf[java.util.Collection[java.util.Collection[E]]], _.contains(right), maxSucceed)
def failEarlySucceededIndexesJavaColNotContain[E, C[e] <: java.util.Collection[_]](xs: java.util.Collection[C[E]], right: E, maxSucceed: Int): String =
failEarlySucceededIndexesInJavaCol[java.util.Collection[E]](xs.asInstanceOf[java.util.Collection[java.util.Collection[E]]], !_.contains(right), maxSucceed)
def failEarlySucceededIndexesJavaColIsEmpty[E, C[e] <: java.util.Collection[_]](xs: java.util.Collection[C[E]], right: Int = 0, maxSucceed: Int): String = // right is not used, but to be consistent to other so that easier for code generation
failEarlySucceededIndexesInJavaCol[java.util.Collection[E]](xs.asInstanceOf[java.util.Collection[java.util.Collection[E]]], _.isEmpty, maxSucceed)
def failEarlySucceededIndexesJavaColNotIsEmpty[E, C[e] <: java.util.Collection[_]](xs: java.util.Collection[C[E]], right: Int = 0, maxSucceed: Int): String = // right is not used, but to be consistent to other so that easier for code generation
failEarlySucceededIndexesInJavaCol[java.util.Collection[E]](xs.asInstanceOf[java.util.Collection[java.util.Collection[E]]], !_.isEmpty, maxSucceed)
private val TEMP_DIR_ATTEMPTS = 10000
// This is based on createTempDir here (Apache License): http://code.google.com/p/guava-libraries/source/browse/guava/src/com/google/common/io/Files.java
// java.nio.file.Files#createTempDirectory() exists in Java 7 should be preferred when we no longer support Java 5/6.
def createTempDirectory(): File = {
val baseDir = new File(System.getProperty("java.io.tmpdir"))
val baseName = System.currentTimeMillis + "-"
@tailrec
def tryCreateTempDirectory(counter: Int): Option[File] = {
val tempDir = new File(baseDir, baseName + counter)
if (tempDir.mkdir())
Some(tempDir)
else if (counter < TEMP_DIR_ATTEMPTS)
tryCreateTempDirectory(counter + 1)
else
None
}
tryCreateTempDirectory(0) match {
case Some(tempDir) => tempDir
case None =>
throw new IllegalStateException(
"Failed to create directory within " +
TEMP_DIR_ATTEMPTS + " attempts (tried " +
baseName + "0 to " + baseName +
(TEMP_DIR_ATTEMPTS - 1) + ')');
}
}
def javaSet[T](elements: T*): java.util.Set[T] = {
val javaSet = new java.util.HashSet[T]()
elements.foreach(javaSet.add(_))
javaSet
}
def javaList[T](elements: T*): java.util.List[T] = {
val javaList = new java.util.ArrayList[T]()
elements.foreach(javaList.add(_))
javaList
}
def javaMap[K, V](elements: Entry[K, V]*): java.util.LinkedHashMap[K, V] = {
val m = new java.util.LinkedHashMap[K, V]
elements.foreach(e => m.put(e.getKey, e.getValue))
m
}
// This gives a comparator that compares based on the value in the passed in order map
private def orderMapComparator[T](orderMap: Map[T, Int]): java.util.Comparator[T] =
new java.util.Comparator[T] {
def compare(x: T, y: T): Int = {
// When both x and y is defined in order map, use its corresponding value to compare (which in usage below, is the index of the insertion order)
if (orderMap.get(x).isDefined && orderMap.get(y).isDefined)
orderMap(x) compare orderMap(y)
else {
// It can happens that the comparator is used by equal method to check if 2 element is equaled.
// In the use-case below, orderMap only contains elements within the TreeSet/TreeMap itself,
// but in equal method elements from other instances of TreeSet/TreeMap can be passed in to check
// for equality. So the below handles element of type Int and String, which is enough for our tests.
// hashCode will be used for other types of objects, in future, special care for other types can be added
// if necessary.
// The relationship and behavior of comparator/ordering/equals is quite well defined in JavaDoc of java.lang.Comparable here:
// http://docs.oracle.com/javase/6/docs/api/java/lang/Comparable.html
x match {
case xInt: Int =>
y match {
case yInt: Int => xInt compare yInt
case _ => x.hashCode compare y.hashCode
}
case xStr: String =>
y match {
case yStr: String => xStr compare yStr
case _ => x.hashCode compare y.hashCode
}
case _ => x.hashCode compare y.hashCode
}
}
}
}
def sortedSet[T](elements: T*): SortedSet[T] = {
val orderMap = Map.empty[T, Int] ++ elements.zipWithIndex
val comparator = orderMapComparator(orderMap)
implicit val ordering: Ordering[T] = new Ordering[T] {
def compare(x: T, y: T): Int = comparator.compare(x, y)
}
SortedSet.empty[T] ++ elements
}
def sortedMap[K, V](elements: (K, V)*): SortedMap[K, V] = {
val orderMap = Map.empty[K, Int] ++ elements.map(_._1).zipWithIndex
val comparator = orderMapComparator(orderMap)
implicit val ordering: Ordering[K] = new Ordering[K] {
def compare(x: K, y: K): Int = comparator.compare(x, y)
}
SortedMap.empty[K, V] ++ elements
}
def javaSortedSet[T](elements: T*): java.util.SortedSet[T] = {
val orderMap = Map.empty[T, Int] ++ elements.zipWithIndex
val comparator = orderMapComparator(orderMap)
val sortedSet = new java.util.TreeSet[T](comparator)
elements.foreach(sortedSet.add(_))
sortedSet
}
def javaSortedMap[K, V](elements: Entry[K, V]*): java.util.SortedMap[K, V] = {
val orderMap = Map.empty[K, Int] ++ elements.map(_.getKey).zipWithIndex
val comparator = orderMapComparator(orderMap)
val sortedMap = new java.util.TreeMap[K, V](comparator)
elements.foreach(e => sortedMap.put(e.getKey, e.getValue))
sortedMap
}
// SKIP-SCALATESTJS,NATIVE-START
def serializeRoundtrip[A](a: A): A = {
val baos = new java.io.ByteArrayOutputStream
val oos = new java.io.ObjectOutputStream(baos)
oos.writeObject(a)
oos.flush()
val ois = new java.io.ObjectInputStream(new java.io.ByteArrayInputStream(baos.toByteArray))
ois.readObject.asInstanceOf[A]
}
// SKIP-SCALATESTJS,NATIVE-END
def checkMessageStackDepth(exception: StackDepthException, message: String, fileName: String, lineNumber: Int): Unit = {
assert(exception.message === Some(message))
assert(exception.failedCodeFileName === Some(fileName))
assert(exception.failedCodeLineNumber === Some(lineNumber))
}
def prettifyAst(str: String): String = {
import scala.annotation.tailrec
def getUntilNextDoubleQuote(itr: BufferedIterator[Char], buf: StringBuilder = new StringBuilder): String = {
if (itr.hasNext) {
val next = itr.next
buf.append(next)
if (next != '\"')
getUntilNextDoubleQuote(itr, buf)
else
buf.toString
}
else
throw new IllegalStateException("Expecting closing \", but none of them found")
}
val brackets = Set('(', ')')
@tailrec
def getNextBracket(itr: BufferedIterator[Char], buf: StringBuilder = new StringBuilder): (Char, String) = {
if (itr.hasNext) {
if (brackets.contains(itr.head))
(itr.head, buf.toString)
else {
val next = itr.next
buf.append(next)
if (next == '\"')
buf.append(getUntilNextDoubleQuote(itr))
getNextBracket(itr, buf)
}
}
else
throw new IllegalStateException("Expecting '(' or ')', but none of them found")
}
@tailrec
def transform(itr: BufferedIterator[Char], openBracket: Int, builder: StringBuilder, multilineBracket: Boolean = false): Unit = {
if (itr.hasNext) {
val next = itr.next
val (newOpenBracket, newMultilineBracket) =
next match {
case '(' =>
val (nextBracket, textWithin) = getNextBracket(itr)
if (nextBracket == '(') {
builder.append("(\n")
val newOpenBracket = openBracket + 1
builder.append(" " * newOpenBracket)
builder.append(textWithin)
(newOpenBracket, true)
}
else {
builder.append("(")
builder.append(textWithin)
(openBracket, false)
}
case ')' =>
val newOpenBracket =
if (multilineBracket) {
builder.append("\n")
val newOpenBracket =
if (openBracket > 0)
openBracket - 1
else
openBracket
builder.append(" " * newOpenBracket)
newOpenBracket
}
else
openBracket
if (itr.hasNext && itr.head == ',') {
itr.next
builder.append("),\n")
builder.append(" " * newOpenBracket)
}
else
builder.append(")")
if (itr.hasNext && itr.head == ' ')
itr.next
if (newOpenBracket == 0)
builder.append("\n")
(newOpenBracket, true)
case '\n' =>
builder.append("\n")
builder.append(" " * openBracket)
(openBracket, multilineBracket)
case other => builder.append(other)
(openBracket, multilineBracket)
}
transform(itr, newOpenBracket, builder, newMultilineBracket)
}
}
val itr = str.toCharArray.iterator.buffered
val builder = new StringBuilder
transform(itr, 0, builder)
builder.toString
}
}
| dotty-staging/scalatest | common-test.dotty/src/main/scala/org/scalatest/SharedHelpers.scala | Scala | apache-2.0 | 100,095 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.builder.serializers
import com.websudos.phantom.builder.query.QueryBuilderTest
import com.websudos.phantom.dsl._
import com.websudos.phantom.tables.TestDatabase
class CreateQuerySerialisationTest extends QueryBuilderTest {
"The CREATE query builder" - {
"should generate clustering keys for a schema queries" - {
"generate a descending order clustering key on a table with a single clustering key" in {
val qb = TestDatabase.timeSeriesTable.create.qb.queryString
qb shouldEqual "CREATE TABLE phantom.timeSeriesTable (id uuid, name text, unixTimestamp timestamp, " +
"PRIMARY KEY (id, unixTimestamp)) WITH CLUSTERING ORDER BY (unixTimestamp DESC)"
}
}
}
}
| levinson/phantom | phantom-dsl/src/test/scala/com/websudos/phantom/builder/serializers/CreateQuerySerialisationTest.scala | Scala | bsd-2-clause | 2,233 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.magic.builtin
import java.io.OutputStream
import java.net.URL
import java.nio.file.{FileSystems, Files}
import org.apache.toree.interpreter.Interpreter
import org.apache.toree.magic.dependencies.{IncludeConfig, IncludeOutputStream, IncludeInterpreter, IncludeSparkContext}
import com.typesafe.config.ConfigFactory
import org.apache.spark.SparkContext
import org.scalatest.{Matchers, FunSpec}
import org.scalatest.mock.MockitoSugar
import org.mockito.Mockito._
import org.mockito.Matchers._
import org.apache.toree.magic.MagicLoader
class AddJarSpec extends FunSpec with Matchers with MockitoSugar {
describe("AddJar"){
describe("#execute") {
it("should call addJar on the provided SparkContext and addJars on the " +
"provided interpreter") {
val mockSparkContext = mock[SparkContext]
val mockInterpreter = mock[Interpreter]
val mockOutputStream = mock[OutputStream]
val mockMagicLoader = mock[MagicLoader]
val testConfig = ConfigFactory.load()
val addJarMagic = new AddJar
with IncludeSparkContext
with IncludeInterpreter
with IncludeOutputStream
with IncludeConfig
{
override val sparkContext: SparkContext = mockSparkContext
override val interpreter: Interpreter = mockInterpreter
override val outputStream: OutputStream = mockOutputStream
override lazy val magicLoader: MagicLoader = mockMagicLoader
override val config = testConfig
override def downloadFile(fileUrl: URL, destinationUrl: URL): URL =
new URL("file://someFile") // Cannot mock URL
}
addJarMagic.execute("""http://www.example.com/someJar.jar""")
verify(mockSparkContext).addJar(anyString())
verify(mockInterpreter).addJars(any[URL])
verify(mockMagicLoader, times(0)).addJar(any())
}
it("should raise exception if jar file does not end in .jar or .zip") {
val mockOutputStream = mock[OutputStream]
val addJarMagic = new AddJar
with IncludeOutputStream
{
override val outputStream: OutputStream = mockOutputStream
}
intercept[IllegalArgumentException] {
addJarMagic.execute("""http://www.example.com/""")
}
intercept[IllegalArgumentException] {
addJarMagic.execute("""http://www.example.com/not_a_jar""")
}
}
it("should extract jar file name from jar URL") {
val mockOutputStream = mock[OutputStream]
val addJarMagic = new AddJar
with IncludeOutputStream
{
override val outputStream: OutputStream = mockOutputStream
}
var url = """http://www.example.com/someJar.jar"""
var jarName = addJarMagic.getFileFromLocation(url)
assert(jarName == "someJar.jar")
url = """http://www.example.com/remotecontent?filepath=/path/to/someJar.jar"""
jarName = addJarMagic.getFileFromLocation(url)
assert(jarName == "someJar.jar")
url = """http://www.example.com/"""
jarName = addJarMagic.getFileFromLocation(url)
assert(jarName == "")
}
it("should use a cached jar if the force option is not provided") {
val mockSparkContext = mock[SparkContext]
val mockInterpreter = mock[Interpreter]
val mockOutputStream = mock[OutputStream]
var downloadFileCalled = false // Used to verify that downloadFile
// was or was not called in this test
val testConfig = ConfigFactory.load()
val addJarMagic = new AddJar
with IncludeSparkContext
with IncludeInterpreter
with IncludeOutputStream
with IncludeConfig
{
override val sparkContext: SparkContext = mockSparkContext
override val interpreter: Interpreter = mockInterpreter
override val outputStream: OutputStream = mockOutputStream
override val config = testConfig
override def downloadFile(fileUrl: URL, destinationUrl: URL): URL = {
downloadFileCalled = true
new URL("file://someFile") // Cannot mock URL
}
}
// Create a temporary file representing our jar to fake the cache
val tmpFilePath = Files.createTempFile(
FileSystems.getDefault.getPath(AddJar.getJarDir(testConfig)),
"someJar",
".jar"
)
addJarMagic.execute(
"""http://www.example.com/""" + tmpFilePath.getFileName)
tmpFilePath.toFile.delete()
downloadFileCalled should be (false)
verify(mockSparkContext).addJar(anyString())
verify(mockInterpreter).addJars(any[URL])
}
it("should not use a cached jar if the force option is provided") {
val mockSparkContext = mock[SparkContext]
val mockInterpreter = mock[Interpreter]
val mockOutputStream = mock[OutputStream]
var downloadFileCalled = false // Used to verify that downloadFile
// was or was not called in this test
val testConfig = ConfigFactory.load()
val addJarMagic = new AddJar
with IncludeSparkContext
with IncludeInterpreter
with IncludeOutputStream
with IncludeConfig
{
override val sparkContext: SparkContext = mockSparkContext
override val interpreter: Interpreter = mockInterpreter
override val outputStream: OutputStream = mockOutputStream
override val config = testConfig
override def downloadFile(fileUrl: URL, destinationUrl: URL): URL = {
downloadFileCalled = true
new URL("file://someFile") // Cannot mock URL
}
}
// Create a temporary file representing our jar to fake the cache
val tmpFilePath = Files.createTempFile(
FileSystems.getDefault.getPath(AddJar.getJarDir(testConfig)),
"someJar",
".jar"
)
addJarMagic.execute(
"""-f http://www.example.com/""" + tmpFilePath.getFileName)
tmpFilePath.toFile.delete()
downloadFileCalled should be (true)
verify(mockSparkContext).addJar(anyString())
verify(mockInterpreter).addJars(any[URL])
}
it("should add magic jar to magicloader and not to interpreter and spark"+
"context") {
val mockSparkContext = mock[SparkContext]
val mockInterpreter = mock[Interpreter]
val mockOutputStream = mock[OutputStream]
val mockMagicLoader = mock[MagicLoader]
val testConfig = ConfigFactory.load()
val addJarMagic = new AddJar
with IncludeSparkContext
with IncludeInterpreter
with IncludeOutputStream
with IncludeConfig
{
override val sparkContext: SparkContext = mockSparkContext
override val interpreter: Interpreter = mockInterpreter
override val outputStream: OutputStream = mockOutputStream
override lazy val magicLoader: MagicLoader = mockMagicLoader
override val config = testConfig
override def downloadFile(fileUrl: URL, destinationUrl: URL): URL =
new URL("file://someFile") // Cannot mock URL
}
addJarMagic.execute(
"""--magic http://www.example.com/someJar.jar""")
verify(mockMagicLoader).addJar(any())
verify(mockSparkContext, times(0)).addJar(anyString())
verify(mockInterpreter, times(0)).addJars(any[URL])
}
}
}
}
| asorianostratio/incubator-toree | kernel/src/test/scala/org/apache/toree/magic/builtin/AddJarSpec.scala | Scala | apache-2.0 | 8,466 |
package skuber.batch
import skuber.ResourceSpecification.{Names, Scope}
import skuber.{ObjectReference, LabelSelector, NonCoreResourceSpecification, ObjectMeta, ObjectResource, Pod, ResourceDefinition, Timestamp}
/**
* @author David O'Riordan
*/
case class CronJob(kind: String ="CronJob",
override val apiVersion: String = "batch/v1beta1",
metadata: ObjectMeta = ObjectMeta(),
spec: Option[CronJob.Spec] = None,
status: Option[CronJob.Status] = None) extends ObjectResource
object CronJob {
implicit val cronjobDef = new ResourceDefinition[CronJob] {
def spec = NonCoreResourceSpecification(
apiGroup="batch",
version="v2alpha1",
scope = Scope.Namespaced,
names=Names(
plural = "cronjobs",
singular = "cronjob",
kind = "CronJob",
shortNames = Nil)
)
}
def apply(name: String) = new CronJob(metadata=ObjectMeta(name=name))
def apply(name: String, schedule: String, jobTemplateSpec: JobTemplate.Spec) =
new CronJob(metadata=ObjectMeta(name=name),spec=Some(Spec(schedule=schedule, jobTemplate = jobTemplateSpec)))
def apply(name: String, schedule: String, podTemplateSpec: Pod.Template.Spec) =
new CronJob(
metadata=ObjectMeta(name=name),
spec=Some(
Spec(
schedule=schedule,
jobTemplate = JobTemplate.Spec(
spec = Job.Spec(
template = Some(podTemplateSpec)
)
)
)
)
)
case class Spec(
schedule: String,
jobTemplate: JobTemplate.Spec,
startingDeadlineSeconds: Option[Long] = None,
concurrencyPolicy: Option[String] = None, // can be "Allow" (implied if None), "Forbid" or "Replace"
suspend: Option[Boolean] = None,
successfulJobsHistoryLimit: Option[Int] = None,
failedJobsHistoryLimit: Option[Int] = None)
case class Status(
lastScheduleTime: Option[Timestamp],
active: List[ObjectReference]
)
}
| doriordan/skuber | client/src/main/scala/skuber/batch/CronJob.scala | Scala | apache-2.0 | 1,945 |
package org.datacleaner.beans.valuedist
import org.datacleaner.result.html.{FlotChartLocator, HeadElement, HtmlRenderingContext}
/**
* Defines reusable script parts for value distribution results
*/
object ValueDistributionReusableScriptHeadElement extends HeadElement {
override def toHtml(context: HtmlRenderingContext): String = {
val flotBaseLocation = FlotChartLocator.getFlotBaseUrl
return """<script type="text/javascript">
//<![CDATA[
function draw_value_distribution_bar(chartElement, chartData, retries) {
require(['jquery', 'jquery.flot'], function ($) {
var elem = document.getElementById(chartElement);
try {
jQuery.plot(elem, chartData, {
series: {
bars: {
align: "center",
horizontal: true,
show: 0.5,
fill: 1,
lineWidth: 1,
barWidth: 0.9
}
},
grid: {
borderWidth: 1
},
xaxis: {
ticks: null
},
yaxis: {
show: false
},
legend: {
sorted: null,
position: "se"
}
});
} catch (err) {
// error can sometimes occur due to load time issues
if (retries > 0) {
retries = retries-1;
setTimeout(function() {draw_value_distribution_bar(chartElement, chartData, retries)}, 100);
}
}
});
}
//]]>
</script>
<style type="text/css">
.valueDistributionChart {
width: 400px;
}
</style>"""
}
}
| kaspersorensen/DataCleaner | components/value-distribution/src/main/scala/org/datacleaner/beans/valuedist/ValueDistributionReusableScriptHeadElement.scala | Scala | lgpl-3.0 | 1,922 |
package spire
package optional
import scala.collection.IterableLike
import scala.collection.generic.CanBuildFrom
import spire.algebra.{Semigroup, Group}
import spire.algebra.partial.{Semigroupoid, Groupoid}
import spire.util._
final class IterableSemigroupoid[A, SA <: IterableLike[A, SA]](implicit cbf: CanBuildFrom[SA, A, SA], A: Semigroup[A]) extends Semigroupoid[SA] {
override def opIsDefined(x: SA, y: SA): Boolean = x.size == y.size
def partialOp(x: SA, y: SA): Opt[SA] =
if (opIsDefined(x, y)) Opt({
val xIt = x.iterator
val yIt = y.iterator
val builder = cbf()
while (xIt.nonEmpty) {
assert(yIt.nonEmpty)
builder += A.op(xIt.next, yIt.next)
}
builder.result()
}) else Opt.empty[SA]
}
final class IterableGroupoid[A, SA <: IterableLike[A, SA]](implicit cbf: CanBuildFrom[SA, A, SA], A: Group[A]) extends Groupoid[SA] {
override def opIsDefined(x: SA, y: SA): Boolean = x.size == y.size
def partialOp(x: SA, y: SA): Opt[SA] =
if (opIsDefined(x, y)) Opt({
val xIt = x.iterator
val yIt = y.iterator
val builder = cbf()
while (xIt.nonEmpty) {
assert(yIt.nonEmpty)
builder += A.op(xIt.next, yIt.next)
}
builder.result()
}) else Opt.empty[SA]
def inverse(a: SA): SA = a.map(A.inverse(_))(cbf)
override def leftId(a: SA): SA = a.map(x => A.id)(cbf)
override def rightId(a: SA): SA = a.map(x => A.id)(cbf)
}
trait PartialIterable0 {
implicit def IterableSemigroupoid[A: Semigroup, CC[A] <: IterableLike[A, CC[A]]](implicit cbf: CanBuildFrom[CC[A], A, CC[A]]): Semigroupoid[CC[A]] = new IterableSemigroupoid[A, CC[A]]
}
trait PartialIterable1 extends PartialIterable0 {
implicit def IterableGroupoid[A: Group, CC[A] <: IterableLike[A, CC[A]]](implicit cbf: CanBuildFrom[CC[A], A, CC[A]]): Groupoid[CC[A]] = new IterableGroupoid[A, CC[A]]
}
object partialIterable extends PartialIterable1
| tixxit/spire | core/shared/src/main/scala/spire/optional/partialIterable.scala | Scala | mit | 1,932 |
package temportalist.esotericraft.emulation.common.ability
import net.minecraft.entity.EntityLivingBase
import net.minecraft.nbt.NBTTagByte
import net.minecraft.potion.Potion
import net.minecraft.util.ResourceLocation
import temportalist.esotericraft.api.emulation.IAbility
import temportalist.esotericraft.api.emulation.IAbility.Ability
import temportalist.esotericraft.api.emulation.ability.IAbilityResistanceWither
/**
*
* Created by TheTemportalist on 5/18/2016.
*
* @author TheTemportalist
*/
@Ability(id = "witherResistance")
class AbilityResistanceWither extends AbilityBase[NBTTagByte] with IAbilityResistanceWither {
private val WITHER_KEY = new ResourceLocation("wither")
// ~~~~~ Naming
override def getName: String = "Resistance to Wither"
// ~~~~~ Entity Handling
override def onUpdate(entity: EntityLivingBase): Unit = {
val potion = Potion.REGISTRY.getObject(this.WITHER_KEY)
if (entity.isPotionActive(potion)) entity.removePotionEffect(potion)
}
}
| TheTemportalist/EsoTeriCraft | src/main/scala/temportalist/esotericraft/emulation/common/ability/AbilityResistanceWither.scala | Scala | apache-2.0 | 992 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.metrics.reporter
import java.util.{Collections, HashMap, Map}
import scala.collection.JavaConverters._
object Metrics {
def fromMap(map: Map[String, Map[String, Object]]): Metrics = {
new Metrics(map)
}
}
/**
* Immutable metrics snapshot.
*/
class Metrics(metrics: Map[String, Map[String, Object]]) {
val immutableMetrics = new HashMap[String, Map[String, Object]]
for ((groupKey, groupValue) <- metrics.asScala) {
val immutableMetricGroup = new HashMap[String, Object]
for ((metricKey, metricValue) <- groupValue.asScala) {
immutableMetricGroup.put(metricKey, metricValue)
}
immutableMetrics.put(groupKey, Collections.unmodifiableMap(immutableMetricGroup))
}
def get[T](group: String, metricName: String) =
immutableMetrics.get(group).get(metricName).asInstanceOf[T]
def get(group: String) = immutableMetrics.get(group)
def getAsMap(): Map[String, Map[String, Object]] = Collections.unmodifiableMap(immutableMetrics)
// default constructor to enable deserialization by MetricsSnapshotSerdeV2
def this() {
this(new HashMap[String, Map[String, Object]]())
}
}
| lhaiesp/samza | samza-core/src/main/scala/org/apache/samza/metrics/reporter/Metrics.scala | Scala | apache-2.0 | 1,961 |
package chandu0101.scalajs.react.components
package materialui
import chandu0101.macros.tojs.JSMacro
import japgolly.scalajs.react._
import japgolly.scalajs.react.raw.React
import japgolly.scalajs.react.vdom.VdomNode
import scala.scalajs.js
/**
* This file is generated - submit issues instead of PR against it
*/
case class MuiDialog(key: js.UndefOr[String] = js.undefined,
ref: js.UndefOr[String] = js.undefined,
/* Action buttons to display below the Dialog content (`children`).
This property accepts either a React element, or an array of React elements. */
actions: js.UndefOr[VdomNode] = js.undefined,
/* The `className` to add to the actions container's root element. */
actionsContainerClassName: js.UndefOr[String] = js.undefined,
/* Overrides the inline-styles of the actions container's root element. */
actionsContainerStyle: js.UndefOr[CssProperties] = js.undefined,
/* If set to true, the height of the `Dialog` will be auto detected. A max height
will be enforced so that the content does not extend beyond the viewport. */
autoDetectWindowHeight: js.UndefOr[Boolean] = js.undefined,
/* If set to true, the body content of the `Dialog` will be scrollable. */
autoScrollBodyContent: js.UndefOr[Boolean] = js.undefined,
/* The `className` to add to the content's root element under the title. */
bodyClassName: js.UndefOr[String] = js.undefined,
/* Overrides the inline-styles of the content's root element under the title. */
bodyStyle: js.UndefOr[CssProperties] = js.undefined,
/* The css class name of the root element. */
className: js.UndefOr[String] = js.undefined,
/* The `className` to add to the content container. */
contentClassName: js.UndefOr[String] = js.undefined,
/* Overrides the inline-styles of the content container. */
contentStyle: js.UndefOr[CssProperties] = js.undefined,
/* Force the user to use one of the actions in the `Dialog`.
Clicking outside the `Dialog` will not trigger the `onRequestClose`. */
modal: js.UndefOr[Boolean] = js.undefined,
/* Fired when the `Dialog` is requested to be closed by a click outside the `Dialog` or on the buttons.
@param {bool} buttonClicked Determines whether a button click triggered this request. */
onRequestClose: js.UndefOr[Boolean => Callback] = js.undefined,
/* Controls whether the Dialog is opened or not. */
open: Boolean,
/* The `className` to add to the `Overlay` component that is rendered behind the `Dialog`. */
overlayClassName: js.UndefOr[String] = js.undefined,
/* Overrides the inline-styles of the `Overlay` component that is rendered behind the `Dialog`. */
overlayStyle: js.UndefOr[CssProperties] = js.undefined,
/* Determines whether the `Dialog` should be repositioned when it's contents are updated. */
repositionOnUpdate: js.UndefOr[Boolean] = js.undefined,
/* Override the inline-styles of the root element. */
style: js.UndefOr[CssProperties] = js.undefined,
/* The title to display on the `Dialog`. Could be number, string, element or an array containing these types. */
title: js.UndefOr[VdomNode] = js.undefined,
/* The `className` to add to the title's root container element. */
titleClassName: js.UndefOr[String] = js.undefined,
/* Overrides the inline-styles of the title's root container element. */
titleStyle: js.UndefOr[CssProperties] = js.undefined) {
/**
* @param children The contents of the `Dialog`.
*/
def apply(children: VdomNode*) = {
val props = JSMacro[MuiDialog](this)
val component = JsComponent[js.Object, Children.Varargs, Null](Mui.Dialog)
component(props)(children: _*)
}
}
| rleibman/scalajs-react-components | core/src/main/scala/chandu0101/scalajs/react/components/materialui/MuiDialog.scala | Scala | apache-2.0 | 4,426 |
import sbt._
class ScuartzBuild(info : ProjectInfo) extends DefaultProject(info) {
val quartz = "org.quartz-scheduler" % "quartz" % "1.8.4"
val specs = "org.scala-tools.testing" % "specs" % "1.6.2.1"
val slf4j = "org.slf4j" % "slf4j-simple" % "1.6.1"
}
| dchenbecker/Scuartz | project/build/ScuartzBuild.scala | Scala | apache-2.0 | 260 |
package uk.gov.gds.ier.transaction.ordinary.otherAddress
import uk.gov.gds.ier.test.FormTestSuite
import uk.gov.gds.ier.model.OtherAddress
class OtherAddressFormTests
extends FormTestSuite
with OtherAddressForms {
it should "error out on empty json" in {
val js = JsNull
otherAddressForm.bind(js).fold(
hasErrors => {
hasErrors.errors.size should be(2)
hasErrors.globalErrorMessages should be(Seq("ordinary_otheraddr_error_pleaseAnswer"))
hasErrors.errorMessages("otherAddress") should be(Seq("ordinary_otheraddr_error_pleaseAnswer"))
},
success => fail("Should have thrown an error")
)
}
it should "error out on empty values" in {
val js = Json.toJson(
Map(
"otherAddress.hasOtherAddress" -> ""
)
)
otherAddressForm.bind(js).fold(
hasErrors => {
hasErrors.errors.size should be(2)
hasErrors.globalErrorMessages should be(Seq("ordinary_otheraddr_error_pleaseAnswer"))
hasErrors.errorMessages("otherAddress") should be(Seq("ordinary_otheraddr_error_pleaseAnswer"))
},
success => fail("Should have thrown an error")
)
}
it should "error out on invalid values" in {
val js = Json.toJson(
Map(
"otherAddress.hasOtherAddress" -> "bleurch"
)
)
otherAddressForm.bind(js).fold(
hasErrors => {
hasErrors.errors.size should be(2)
hasErrors.globalErrorMessages should be(Seq("bleurch is not a valid other address type"))
hasErrors.errorMessages("otherAddress.hasOtherAddress") should be(
Seq("bleurch is not a valid other address type"))
},
success => fail("Should have thrown an error")
)
}
it should "successfully bind (student)" in {
val js = Json.toJson(
Map(
"otherAddress.hasOtherAddress" -> "student"
)
)
otherAddressForm.bind(js).fold(
hasErrors => fail(serialiser.toJson(hasErrors.prettyPrint)),
success => {
success.otherAddress.isDefined should be(true)
val otherAddress = success.otherAddress.get
otherAddress.otherAddressOption should be(OtherAddress.StudentOtherAddress)
}
)
}
it should "successfully bind (second home)" in {
val js = Json.toJson(
Map(
"otherAddress.hasOtherAddress" -> "secondHome"
)
)
otherAddressForm.bind(js).fold(
hasErrors => fail(serialiser.toJson(hasErrors.prettyPrint)),
success => {
success.otherAddress.isDefined should be(true)
val otherAddress = success.otherAddress.get
otherAddress.otherAddressOption should be(OtherAddress.HomeOtherAddress)
}
)
}
it should "successfully bind(false)" in {
val js = Json.toJson(
Map(
"otherAddress.hasOtherAddress" -> "none"
)
)
otherAddressForm.bind(js).fold(
hasErrors => fail(serialiser.toJson(hasErrors.prettyPrint)),
success => {
success.otherAddress.isDefined should be(true)
val otherAddress = success.otherAddress.get
otherAddress.otherAddressOption should be(OtherAddress.NoOtherAddress)
}
)
}
}
| alphagov/ier-frontend | test/uk/gov/gds/ier/transaction/ordinary/otherAddress/OtherAddressFormTests.scala | Scala | mit | 3,164 |
/*
Collector is a tool for obtaining bioactivity data from the Open PHACTS platform.
Copyright (C) 2013 UPF
Contributed by Manuel Pastor([email protected]) and Oriol Lรณpez-Massaguer([email protected]).
This file is part of Collector.
Collector is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Collector is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Collector. If not, see <http://www.gnu.org/licenses/>.
*/
package es.imim.phi.collector.compounds
class CompoundFilterValidAtoms extends CompoundFilter {
override def toString = "FilterAtoms:"+validAtoms
val validAtoms = Set("H","C","N","O","S","P","Cl","I","Br","F")
def filterPass(compound: Compound): Boolean = { compound.containsValidAtoms(validAtoms) }
} | OriolLopezMassaguer/Collector | app/es/imim/phi/collector/compounds/CompoundFilterValidAtoms.scala | Scala | gpl-3.0 | 1,227 |
package sangria.validation.rules
import sangria.ast
import sangria.ast.AstVisitorCommand
import sangria.validation._
import scala.collection.mutable.{ListBuffer, Set => MutableSet}
/** No unused fragments
*
* A GraphQL document is only valid if all fragment definitions are spread within operations, or
* spread within other fragments spread within operations.
*/
class NoUnusedFragments extends ValidationRule {
override def visitor(ctx: ValidationContext) = new AstValidatingVisitor {
val fragmentDefs = ListBuffer[ast.FragmentDefinition]()
val operationDefs = ListBuffer[ast.OperationDefinition]()
override val onEnter: ValidationVisit = {
case od: ast.OperationDefinition =>
operationDefs += od
AstVisitorCommand.RightSkip
case fd: ast.FragmentDefinition =>
fragmentDefs += fd
AstVisitorCommand.RightSkip
}
override def onLeave: ValidationVisit = { case ast.Document(_, _, _, _) =>
val fragmentNameUsed = MutableSet[String]()
operationDefs.foreach(operation =>
ctx.documentAnalyzer
.getRecursivelyReferencedFragments(operation)
.foreach(fragment => fragmentNameUsed += fragment.name))
val errors = fragmentDefs.toVector
.filter(fd => !fragmentNameUsed.contains(fd.name))
.map(fd => UnusedFragmentViolation(fd.name, ctx.sourceMapper, fd.location.toList))
if (errors.nonEmpty) Left(errors) else AstVisitorCommand.RightContinue
}
}
}
| OlegIlyenko/sangria | modules/core/src/main/scala/sangria/validation/rules/NoUnusedFragments.scala | Scala | apache-2.0 | 1,492 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v3
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtInteger, Linked}
import uk.gov.hmrc.ct.computations.CP258
case class B165(value: Int) extends CtBoxIdentifier(name = "Net trading profits") with CtInteger
object B165 extends Linked[CP258, B165] {
override def apply(source: CP258): B165 = B165(source.value)
}
| pncampbell/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v3/B165.scala | Scala | apache-2.0 | 949 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5
package object content {
// Provide an ExecuteReplyOk type and object representing a
// partially-completed ExecuteReply
//
// TODO: Is there a way to wrap the Option arguments in Some(...)?
// E.g. ExecuteReplyOk(3, [], {}) =>
// ExecuteReply("ok", 3, Some([]), Some({}), None, None, None
type ExecuteReplyOk = ExecuteReply
val ExecuteReplyOk = ExecuteReply(
"ok", _: Int, _: Option[Payloads],
_: Option[UserExpressions], None, None, None
)
// Provide an ExecuteReplyError type and object representing a
// partially-completed ExecuteReply
type ExecuteReplyError = ExecuteReply
val ExecuteReplyError = ExecuteReply(
"error", _: Int, None, None, _: Option[String],
_: Option[String], _: Option[List[String]]
)
// Provide an ExecuteReplyAbort type and object representing a
// partially-completed ExecuteReply
type ExecuteReplyAbort = ExecuteReply
val ExecuteReplyAbort = ExecuteReply(
"abort", _: Int, None, None, None, None, None
)
// Provide an InspectReplyOk type and object representing a
// partially-completed InspectReply
type InspectReplyOk = InspectReply
val InspectReplyOk = InspectReply(
"ok", _: Data, _: Metadata, None, None, None
)
// Provide an InspectReplyOk type and object representing a
// partially-completed InspectReply
type InspectReplyError = InspectReply
val InspectReplyError = InspectReply(
"error", _: Data, _: Metadata, _: Option[String],
_: Option[String], _: Option[List[String]]
)
// Provide an CompleteReplyOk type and object representing a
// partially-completed CompleteReply
type CompleteReplyOk = CompleteReply
val CompleteReplyOk = CompleteReply(
_: List[String], _: Int, _: Int, _: Metadata, "ok", None, None, None
)
// Provide an CompleteReplyError type and object representing a
// partially-completed CompleteReply
type CompleteReplyError = CompleteReply
val CompleteReplyError = CompleteReply(
_: List[String], _: Int, _: Int, _: Metadata, "error", _: Option[String],
_: Option[String], _: Option[List[String]]
)
}
| yeghishe/spark-kernel | protocol/src/main/scala/com/ibm/spark/kernel/protocol/v5/content/package.scala | Scala | apache-2.0 | 2,739 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.streaming.aliyun.logservice
import com.aliyun.ms.MetaClient
import com.aliyun.ms.utils.EndpointEnum
import com.aliyun.openservices.log.Client
import com.aliyun.openservices.loghub.client.config.LogHubCursorPosition
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.receiver.Receiver
class LoghubInputDStream(
@transient _ssc: StreamingContext,
logServiceProject: String,
logStoreName: String,
loghubConsumerGroupName: String,
loghubInstanceNameBase: String,
var loghubEndpoint: String,
var accessKeyId: String,
var accessKeySecret: String,
storageLevel: StorageLevel,
cursorPosition: LogHubCursorPosition,
mLoghubCursorStartTime: Int,
forceSpecial: Boolean)
extends ReceiverInputDStream[Array[Byte]](_ssc){
val mConsumeInOrder =
_ssc.sc.getConf.getBoolean("spark.logservice.fetch.inOrder", true)
val mHeartBeatIntervalMillis =
_ssc.sc.getConf.getLong("spark.logservice.heartbeat.interval.millis", 30000L)
val dataFetchIntervalMillis =
_ssc.sc.getConf.getLong("spark.logservice.fetch.interval.millis", 200L)
val batchInterval = _ssc.graph.batchDuration.milliseconds
var securityToken: String = null
@transient lazy val slsClient =
if (accessKeyId == null || accessKeySecret == null) {
accessKeyId = MetaClient.getRoleAccessKeyId
accessKeySecret = MetaClient.getRoleAccessKeySecret
securityToken = MetaClient.getRoleSecurityToken
loghubEndpoint = if (loghubEndpoint == null) {
val region = MetaClient.getClusterRegionName
val nType = MetaClient.getClusterNetworkType
val endpointBase = EndpointEnum.getEndpoint("log", region, nType)
s"$logServiceProject.$endpointBase"
} else {
loghubEndpoint
}
val client = new Client(loghubEndpoint, accessKeyId, accessKeySecret)
client.SetSecurityToken(securityToken)
client
} else {
new Client(loghubEndpoint, accessKeyId, accessKeySecret)
}
if (forceSpecial && cursorPosition.toString.equals(
LogHubCursorPosition.SPECIAL_TIMER_CURSOR.toString)) {
try {
slsClient.DeleteConsumerGroup(logServiceProject, logStoreName,
loghubConsumerGroupName)
} catch {
case e: Exception =>
// In case of expired token
if (securityToken != null) {
try {
accessKeyId = MetaClient.getRoleAccessKeyId
accessKeySecret = MetaClient.getRoleAccessKeySecret
securityToken = MetaClient.getRoleSecurityToken
val client = new Client(loghubEndpoint, accessKeyId, accessKeySecret)
client.SetSecurityToken(securityToken)
client.DeleteConsumerGroup(logServiceProject, logStoreName,
loghubConsumerGroupName)
} catch {
case e: Exception =>
logError(s"Failed to delete consumer group, ${e.getMessage}", e)
throw e
}
} else {
logError(s"Failed to delete consumer group, ${e.getMessage}", e)
throw e
}
}
}
override def getReceiver(): Receiver[Array[Byte]] =
new LoghubReceiver(
mConsumeInOrder,
mHeartBeatIntervalMillis,
dataFetchIntervalMillis,
batchInterval,
logServiceProject,
logStoreName,
loghubConsumerGroupName,
loghubInstanceNameBase,
loghubEndpoint,
accessKeyId,
accessKeySecret,
storageLevel,
cursorPosition,
mLoghubCursorStartTime)
def this(@transient _ssc: StreamingContext,
logServiceProject: String,
logStoreName: String,
loghubConsumerGroupName: String,
loghubInstanceNameBase: String,
loghubEndpoint: String,
accessKeyId: String,
accessKeySecret: String,
storageLevel: StorageLevel) = {
this(_ssc, logServiceProject, logStoreName, loghubConsumerGroupName,
loghubInstanceNameBase, loghubEndpoint, accessKeyId, accessKeySecret,
storageLevel, LogHubCursorPosition.END_CURSOR, -1, false)
}
}
| uncleGen/aliyun-emapreduce-sdk | external/emr-logservice/src/main/scala/org/apache/spark/streaming/aliyun/logservice/LoghubInputDStream.scala | Scala | artistic-2.0 | 5,097 |
/*
* Copyright 2011-2017 Chris de Vreeze
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package eu.cdevreeze.yaidom.queryapitests.indexed
import eu.cdevreeze.yaidom.parse.DocumentParserUsingDom
import eu.cdevreeze.yaidom.queryapi.BackingNodes
import eu.cdevreeze.yaidom.queryapitests.AbstractSubtypeAwareElemLikeQueryTest
/**
* Query test case for an XML dialect using indexed elements.
*
* @author Chris de Vreeze
*/
class SubtypeAwareElemLikeQueryTest extends AbstractSubtypeAwareElemLikeQueryTest {
protected val wrappedDocumentContent: BackingNodes.Elem = {
val docParser = DocumentParserUsingDom.newInstance()
val docUri = classOf[AbstractSubtypeAwareElemLikeQueryTest].getResource("content.xml").toURI
val doc = docParser.parse(docUri)
eu.cdevreeze.yaidom.indexed.Document(doc.withUriOption(Some(docUri))).documentElement
}
}
| dvreeze/yaidom | jvm/src/test/scala/eu/cdevreeze/yaidom/queryapitests/indexed/SubtypeAwareElemLikeQueryTest.scala | Scala | apache-2.0 | 1,373 |
package dotty.tools.scaladoc
package tasty
package comments
import scala.quoted._
import org.junit.{Test, Rule}
import org.junit.Assert.{assertSame, assertTrue, assertEquals}
import dotty.tools.scaladoc.tasty.util._
import dotty.tools.scaladoc.tasty.TastyParser
class CommentExpanderTests {
def check(using Quotes)(): Unit =
assertCommentEquals(
reflect.Symbol.requiredClass("tests.B").methodMember("otherMethod").head,
"/** This is my foo: Bar, actually. */",
)
assertCommentEquals(
reflect.Symbol.requiredClass("tests.C"),
"/** This is foo: Foo expanded. */",
)
assertCommentEquals(
reflect.Symbol.requiredModule("tests.O").methodMember("method").head,
"/** This is foo: O's foo. */",
)
def assertCommentEquals(
using Quotes
)(
rsym: reflect.Symbol,
str: String
): Unit =
import dotty.tools.dotc
given ctx: dotc.core.Contexts.Context = quotes.asInstanceOf[scala.quoted.runtime.impl.QuotesImpl].ctx
val sym = rsym.asInstanceOf[dotc.core.Symbols.Symbol]
val comment = CommentExpander.cookComment(sym).get
assertEquals(comment.expanded.get, str)
@Test
def test(): Unit = {
import scala.tasty.inspector.OldTastyInspector
class Inspector extends OldTastyInspector:
def processCompilationUnit(using Quotes)(root: reflect.Tree): Unit = ()
override def postProcess(using Quotes): Unit =
check()
Inspector().inspectTastyFiles(TestUtils.listOurClasses())
}
}
| dotty-staging/dotty | scaladoc/test/dotty/tools/scaladoc/tasty/comments/CommentExpanderTests.scala | Scala | apache-2.0 | 1,499 |
/**
*Person.scala
* @author Andrew Hanes [email protected]
*
* Written for Programming Language Concepts
*
* This program demos hello world in scala
*/
object HelloWorld {
/**
* Main method
* @param args Command line arguments
*/
def main(args: Array[String]) {
println("Hello World");
}
}
| AndrewHanes/Scala-Presentation | Examples/HelloWorld/HelloWorld.scala | Scala | mit | 325 |
package me.yingrui.segment.concept
import org.junit.Assert
import org.junit.Test
class ConceptTest {
val rootNounConcept: Concept = new Concept(100000, "noun")
buildTestData()
@Test
def should_have_children() {
val children = rootNounConcept.getChildren()
Assert.assertEquals(4, children.size); //ๅคไฝ
Assert.assertEquals("object", children(0).getName())
Assert.assertEquals(100001, children(0).getId())
Assert.assertEquals(rootNounConcept, children(0).getParent())
Assert.assertEquals(children, children(0).getSiblings())
Assert.assertEquals("process", children(1).getName())
Assert.assertEquals(100002, children(1).getId())
Assert.assertEquals("space", children(2).getName())
Assert.assertEquals(100003, children(2).getId())
Assert.assertEquals("time", children(3).getName())
Assert.assertEquals(100004, children(3).getId())
}
@Test
def should_have_parent() {
val children = rootNounConcept.getChildren()
Assert.assertEquals(rootNounConcept, children(0).getParent())
}
@Test
def should_have_siblings() {
val children = rootNounConcept.getChildren()
Assert.assertEquals(children, children(0).getSiblings())
}
private def buildTestData() {
val nounObject = new Concept(100001, "object")
val nounProcess = new Concept(100002, "process")
val nounSpace = new Concept(100003, "space")
val nounTime = new Concept(100004, "time")
rootNounConcept.addChild(nounTime)
rootNounConcept.addChild(nounSpace)
rootNounConcept.addChild(nounProcess)
rootNounConcept.addChild(nounObject)
}
}
| yingrui/mahjong | lib-segment/src/test/scala/me/yingrui/segment/concept/ConceptTest.scala | Scala | gpl-3.0 | 1,608 |
/**
* Copyright 2015 Thomson Reuters
*
* Licensed under the Apache License, Version 2.0 (the โLicenseโ); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an โAS ISโ BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.tools.neptune.export
import java.io._
import java.nio.file.{Files, Paths}
import java.util.Properties
import net.liftweb.json.DefaultFormats
object PropertiesStore{
implicit val formats = DefaultFormats
val directory = "/tmp/cm-well/"
val fileName = "config.properties"
val AUTOMATIC_UPDATE_MODE = "automaticUpdateMode"
val POSITION = "position"
val START_TIME = "start_time"
def persistPosition(position: String):Unit = {
var prop: Properties = new Properties()
var output: OutputStream = null
try {
prop.setProperty(PropertiesStore.POSITION, position)
readKey(START_TIME).foreach(x => prop.setProperty(START_TIME, x))
readKey(PropertiesStore.AUTOMATIC_UPDATE_MODE).foreach(x => prop.setProperty(PropertiesStore.AUTOMATIC_UPDATE_MODE, x))
output = new FileOutputStream(fileName)
prop.store(output, null)
} catch {
case io: IOException =>
io.printStackTrace();
} finally {
if (output != null) {
try {
output.close()
} catch {
case e: IOException => e.printStackTrace()
}
}
}
}
def persistStartTime(startTime: String):Unit = {
var prop: Properties = new Properties()
var output: OutputStream = null
try {
prop.setProperty(START_TIME, startTime)
readKey(PropertiesStore.POSITION).foreach(x => prop.setProperty(PropertiesStore.POSITION, x))
readKey(PropertiesStore.AUTOMATIC_UPDATE_MODE).foreach(x => prop.setProperty(PropertiesStore.AUTOMATIC_UPDATE_MODE, x))
output = new FileOutputStream(fileName)
prop.store(output, null);
} catch {
case io: IOException => io.printStackTrace();
} finally {
if (output != null) {
try {
output.close()
} catch {
case e: IOException => e.printStackTrace()
}
}
}
}
def persistAutomaticUpdateMode(automaticUpdateMode: Boolean): Unit = {
var prop: Properties = new Properties()
var output: OutputStream = null
try {
prop.setProperty(PropertiesStore.AUTOMATIC_UPDATE_MODE, automaticUpdateMode.toString)
readKey(PropertiesStore.POSITION).foreach(x => prop.setProperty(PropertiesStore.POSITION, x))
readKey(START_TIME).foreach(x => prop.setProperty(START_TIME, x))
output = new FileOutputStream(fileName)
prop.store(output, null);
} catch {
case io: IOException =>
io.printStackTrace();
} finally {
if (output != null) {
try {
output.close()
} catch {
case e: IOException => e.printStackTrace()
}
}
}
}
def readKey(key:String):Option[String] = {
if(!isPropertyFileExists) return None
var prop = new Properties()
var input:InputStream = null
input = new FileInputStream(fileName)
prop.load(input)
Option(prop.getProperty(key))
}
def retreivePosition():Option[String] = {
readKey(POSITION)
}
def retrieveStartTime():Option[String] = {
readKey(START_TIME)
}
def isPropertyPersist(property:String):Boolean ={
isPropertyFileExists && readKey(property).isDefined
}
def isAutomaticUpdateModePersist():Boolean = {
isPropertyPersist(AUTOMATIC_UPDATE_MODE)
}
def isPositionPersist():Boolean = {
isPropertyPersist(POSITION)
}
def isStartTimePersist():Boolean = {
isPropertyPersist(START_TIME)
}
def isPropertyFileExists:Boolean ={
Files.exists(Paths.get(fileName))
}
def createDirectoryIfNotExists() = {
val cmWellDirectory = new File(directory)
if (!cmWellDirectory.exists)
cmWellDirectory.mkdirs()
}
} | e-orz/CM-Well | tools/export-neptune-tool/src/main/scala/cmwell/tools/neptune/export/PropertiesStore.scala | Scala | apache-2.0 | 4,263 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.benchmark
import org.apache.spark.SparkConf
import org.apache.spark.benchmark.Benchmark
import org.apache.spark.internal.Logging
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.catalyst.catalog.HiveTableRelation
import org.apache.spark.sql.catalyst.plans.logical.SubqueryAlias
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.datasources.LogicalRelation
/**
* Benchmark to measure TPCDS query performance.
* To run this:
* {{{
* 1. without sbt:
* bin/spark-submit --class <this class> <spark sql test jar> --data-location <location>
* 2. build/sbt "sql/test:runMain <this class> --data-location <TPCDS data location>"
* 3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt
* "sql/test:runMain <this class> --data-location <location>"
* Results will be written to "benchmarks/TPCDSQueryBenchmark-results.txt".
* }}}
*/
object TPCDSQueryBenchmark extends SqlBasedBenchmark {
override def getSparkSession: SparkSession = {
val conf = new SparkConf()
.setMaster("local[1]")
.setAppName("test-sql-context")
.set("spark.sql.parquet.compression.codec", "snappy")
.set("spark.sql.shuffle.partitions", "4")
.set("spark.driver.memory", "3g")
.set("spark.executor.memory", "3g")
.set("spark.sql.autoBroadcastJoinThreshold", (20 * 1024 * 1024).toString)
.set("spark.sql.crossJoin.enabled", "true")
SparkSession.builder.config(conf).getOrCreate()
}
val tables = Seq("catalog_page", "catalog_returns", "customer", "customer_address",
"customer_demographics", "date_dim", "household_demographics", "inventory", "item",
"promotion", "store", "store_returns", "catalog_sales", "web_sales", "store_sales",
"web_returns", "web_site", "reason", "call_center", "warehouse", "ship_mode", "income_band",
"time_dim", "web_page")
def setupTables(dataLocation: String): Map[String, Long] = {
tables.map { tableName =>
spark.read.parquet(s"$dataLocation/$tableName").createOrReplaceTempView(tableName)
tableName -> spark.table(tableName).count()
}.toMap
}
def runTpcdsQueries(
queryLocation: String,
queries: Seq[String],
tableSizes: Map[String, Long],
nameSuffix: String = ""): Unit = {
queries.foreach { name =>
val queryString = resourceToString(s"$queryLocation/$name.sql",
classLoader = Thread.currentThread().getContextClassLoader)
// This is an indirect hack to estimate the size of each query's input by traversing the
// logical plan and adding up the sizes of all tables that appear in the plan.
val queryRelations = scala.collection.mutable.HashSet[String]()
spark.sql(queryString).queryExecution.analyzed.foreach {
case SubqueryAlias(alias, _: LogicalRelation) =>
queryRelations.add(alias.identifier)
case LogicalRelation(_, _, Some(catalogTable), _) =>
queryRelations.add(catalogTable.identifier.table)
case HiveTableRelation(tableMeta, _, _, _) =>
queryRelations.add(tableMeta.identifier.table)
case _ =>
}
val numRows = queryRelations.map(tableSizes.getOrElse(_, 0L)).sum
val benchmark = new Benchmark(s"TPCDS Snappy", numRows, 2, output = output)
benchmark.addCase(s"$name$nameSuffix") { _ =>
spark.sql(queryString).noop()
}
benchmark.run()
}
}
def filterQueries(
origQueries: Seq[String],
args: TPCDSQueryBenchmarkArguments): Seq[String] = {
if (args.queryFilter.nonEmpty) {
origQueries.filter(args.queryFilter.contains)
} else {
origQueries
}
}
override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
val benchmarkArgs = new TPCDSQueryBenchmarkArguments(mainArgs)
// List of all TPC-DS v1.4 queries
val tpcdsQueries = Seq(
"q1", "q2", "q3", "q4", "q5", "q6", "q7", "q8", "q9", "q10", "q11",
"q12", "q13", "q14a", "q14b", "q15", "q16", "q17", "q18", "q19", "q20",
"q21", "q22", "q23a", "q23b", "q24a", "q24b", "q25", "q26", "q27", "q28", "q29", "q30",
"q31", "q32", "q33", "q34", "q35", "q36", "q37", "q38", "q39a", "q39b", "q40",
"q41", "q42", "q43", "q44", "q45", "q46", "q47", "q48", "q49", "q50",
"q51", "q52", "q53", "q54", "q55", "q56", "q57", "q58", "q59", "q60",
"q61", "q62", "q63", "q64", "q65", "q66", "q67", "q68", "q69", "q70",
"q71", "q72", "q73", "q74", "q75", "q76", "q77", "q78", "q79", "q80",
"q81", "q82", "q83", "q84", "q85", "q86", "q87", "q88", "q89", "q90",
"q91", "q92", "q93", "q94", "q95", "q96", "q97", "q98", "q99")
// This list only includes TPC-DS v2.7 queries that are different from v1.4 ones
val tpcdsQueriesV2_7 = Seq(
"q5a", "q6", "q10a", "q11", "q12", "q14", "q14a", "q18a",
"q20", "q22", "q22a", "q24", "q27a", "q34", "q35", "q35a", "q36a", "q47", "q49",
"q51a", "q57", "q64", "q67a", "q70a", "q72", "q74", "q75", "q77a", "q78",
"q80a", "q86a", "q98")
// If `--query-filter` defined, filters the queries that this option selects
val queriesV1_4ToRun = filterQueries(tpcdsQueries, benchmarkArgs)
val queriesV2_7ToRun = filterQueries(tpcdsQueriesV2_7, benchmarkArgs)
if ((queriesV1_4ToRun ++ queriesV2_7ToRun).isEmpty) {
throw new RuntimeException(
s"Empty queries to run. Bad query name filter: ${benchmarkArgs.queryFilter}")
}
val tableSizes = setupTables(benchmarkArgs.dataLocation)
runTpcdsQueries(queryLocation = "tpcds", queries = queriesV1_4ToRun, tableSizes)
runTpcdsQueries(queryLocation = "tpcds-v2.7.0", queries = queriesV2_7ToRun, tableSizes,
nameSuffix = "-v2.7")
}
}
| ptkool/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/TPCDSQueryBenchmark.scala | Scala | apache-2.0 | 6,554 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.integration
import java.util.Properties
import kafka.server.KafkaConfig
import kafka.utils.{Logging, TestUtils}
import scala.collection.JavaConverters.mapAsScalaMapConverter
import org.junit.{Before, Test}
import com.yammer.metrics.Metrics
import com.yammer.metrics.core.Gauge
class MetricsDuringTopicCreationDeletionTest extends KafkaServerTestHarness with Logging {
private val nodesNum = 3
private val topicName = "topic"
private val topicNum = 2
private val replicationFactor = 3
private val partitionNum = 3
private val createDeleteIterations = 3
private val overridingProps = new Properties
overridingProps.put(KafkaConfig.DeleteTopicEnableProp, "true")
overridingProps.put(KafkaConfig.AutoCreateTopicsEnableProp, "false")
// speed up the test for UnderReplicatedPartitions
// which relies on the ISR expiry thread to execute concurrently with topic creation
overridingProps.put(KafkaConfig.ReplicaLagTimeMaxMsProp, "2000")
private val testedMetrics = List("OfflinePartitionsCount","PreferredReplicaImbalanceCount","UnderReplicatedPartitions")
private val topics = List.tabulate(topicNum) (n => topicName + n)
@volatile private var running = true
override def generateConfigs = TestUtils.createBrokerConfigs(nodesNum, zkConnect)
.map(KafkaConfig.fromProps(_, overridingProps))
@Before
override def setUp {
// Do some Metrics Registry cleanup by removing the metrics that this test checks.
// This is a test workaround to the issue that prior harness runs may have left a populated registry.
// see https://issues.apache.org/jira/browse/KAFKA-4605
for (m <- testedMetrics) {
val metricName = Metrics.defaultRegistry.allMetrics.asScala.keys.find(_.getName.endsWith(m))
metricName.foreach(Metrics.defaultRegistry.removeMetric)
}
super.setUp
}
/*
* checking all metrics we care in a single test is faster though it would be more elegant to have 3 @Test methods
*/
@Test
def testMetricsDuringTopicCreateDelete() {
// For UnderReplicatedPartitions, because of https://issues.apache.org/jira/browse/KAFKA-4605
// we can't access the metrics value of each server. So instead we directly invoke the method
// replicaManager.underReplicatedPartitionCount() that defines the metrics value.
@volatile var underReplicatedPartitionCount = 0
// For OfflinePartitionsCount and PreferredReplicaImbalanceCount even with https://issues.apache.org/jira/browse/KAFKA-4605
// the test has worked reliably because the metric that gets triggered is the one generated by the first started server (controller)
val offlinePartitionsCountGauge = getGauge("OfflinePartitionsCount")
@volatile var offlinePartitionsCount = offlinePartitionsCountGauge.value
assert(offlinePartitionsCount == 0)
val preferredReplicaImbalanceCountGauge = getGauge("PreferredReplicaImbalanceCount")
@volatile var preferredReplicaImbalanceCount = preferredReplicaImbalanceCountGauge.value
assert(preferredReplicaImbalanceCount == 0)
// Thread checking the metric continuously
running = true
val thread = new Thread(new Runnable {
def run() {
while (running) {
for ( s <- servers if running) {
underReplicatedPartitionCount = s.replicaManager.underReplicatedPartitionCount
if (underReplicatedPartitionCount > 0) {
running = false
}
}
preferredReplicaImbalanceCount = preferredReplicaImbalanceCountGauge.value
if (preferredReplicaImbalanceCount > 0) {
running = false
}
offlinePartitionsCount = offlinePartitionsCountGauge.value
if (offlinePartitionsCount > 0) {
running = false
}
}
}
})
thread.start
// breakable loop that creates and deletes topics
createDeleteTopics()
// if the thread checking the gauge is still run, stop it
running = false;
thread.join
assert(offlinePartitionsCount==0, "OfflinePartitionCount not 0: "+ offlinePartitionsCount)
assert(preferredReplicaImbalanceCount==0, "PreferredReplicaImbalanceCount not 0: " + preferredReplicaImbalanceCount)
assert(underReplicatedPartitionCount==0, "UnderReplicatedPartitionCount not 0: " + underReplicatedPartitionCount)
}
private def getGauge(metricName: String) = {
Metrics.defaultRegistry.allMetrics.asScala
.filterKeys(k => k.getName.endsWith(metricName))
.headOption
.getOrElse { fail( "Unable to find metric " + metricName ) }
._2.asInstanceOf[Gauge[Int]]
}
private def createDeleteTopics() {
for (l <- 1 to createDeleteIterations if running) {
// Create topics
for (t <- topics if running) {
try {
createTopic(t, partitionNum, replicationFactor)
} catch {
case e: Exception => e.printStackTrace
}
}
// Delete topics
for (t <- topics if running) {
try {
adminZkClient.deleteTopic(t)
TestUtils.verifyTopicDeletion(zkClient, t, partitionNum, servers)
} catch {
case e: Exception => e.printStackTrace
}
}
}
}
}
| mihbor/kafka | core/src/test/scala/unit/kafka/integration/MetricsDuringTopicCreationDeletionTest.scala | Scala | apache-2.0 | 6,053 |
package ai.akka.service.client
import ai.akka.service.client.Model.ClientDetails
/**
* Trait of client details service
*/
trait ClientDetailsService {
/**
* The method finds client details by client identity
* @param clientId client identity
* @return client details
*/
def findClientDetailsByClientId(clientId: String): ClientDetails
/**
* The method register client with specified client details
* @param clientDetails client details
* @return client details
*/
def addClient(clientDetails: ClientDetails): ClientDetails
/**
* The method removes client details by client identity
* @param clientId client identity
* @return client details
*/
def removeClient(clientId: String): ClientDetails
}
| andrew--i/spray-oauth2 | src/main/scala/ai/akka/service/client/ClientDetailsService.scala | Scala | apache-2.0 | 750 |
package fr.ramiro.sfuzzy.dsl
/**
* Created by Ramiro on 12/05/2017.
*/
object Rule {
def IF(condition: Condition): IfClause = ???
def NOT(clause: Condition): Condition = ???
}
| rrramiro/sFuzzyLogic | src/main/scala/fr/ramiro/sfuzzy/dsl/Rule.scala | Scala | apache-2.0 | 184 |
package filodb.prometheus.ast
/**
* Time durations are specified as a number
* followed immediately by one of the following units:
* s - seconds
* m - minutes
* h - hours
* d - days
* w - weeks
* y - years
* i - factor of step (aka interval)
* */
sealed trait TimeUnit {
def millis(step: Long): Long
}
case object Second extends TimeUnit {
override def millis(step: Long): Long = 1000L
}
case object Minute extends TimeUnit {
override def millis(step: Long): Long = Second.millis(step) * 60
}
case object Hour extends TimeUnit {
override def millis(step: Long): Long = Minute.millis(step) * 60
}
case object Day extends TimeUnit {
override def millis(step: Long): Long = Hour.millis(step) * 24
}
case object Week extends TimeUnit {
override def millis(step: Long): Long = Day.millis(step) * 7
}
case object Year extends TimeUnit {
override def millis(step: Long): Long = Week.millis(step) * 52
}
case object IntervalMultiple extends TimeUnit {
override def millis(step: Long): Long = {
require(step > 0, "Interval multiple notation was used in lookback/range without valid step")
step
}
}
case class Duration(scale: Double, timeUnit: TimeUnit) {
if (scale <= 0) throw new IllegalArgumentException("Duration should be greater than zero")
def millis(step: Long): Long = (scale * timeUnit.millis(step)).toLong
}
case class Offset(duration: Duration)
case class SimpleLookback(duration: Duration)
case class SubqueryClause(window: Duration, step: Option[Duration])
| filodb/FiloDB | prometheus/src/main/scala/filodb/prometheus/ast/TimeUnits.scala | Scala | apache-2.0 | 1,525 |
def isPrime(num: Int) = {
Stream.range(2, math.sqrt(num.toDouble).toInt + 1).forall(num % _ != 0)
}
val results = (2 until 2000000).filter(isPrime).map(BigInt(_)).sum
println(results)
| brandonhorst/project-euler-scala | 010.scala | Scala | mit | 188 |
package lib.gitgithub
import com.madgag.scalagithub.GitHub._
import com.madgag.scalagithub.model.{PullRequest, Repo}
import lib.{Bot, Delayer, LabelledState, RepoSnapshot}
import play.api.Logger
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
object IssueUpdater {
val logger = Logger(getClass)
}
/**
*
* @tparam IssueType Pull Request or Issue
* @tparam PersistableState State that can be converted to and from GitHub issue labels, ie a set of Strings
* @tparam Snapshot A present-state snapshot that can yield a PersistableState
*/
trait IssueUpdater[IssueType <: PullRequest, PersistableState, Snapshot <: StateSnapshot[PersistableState]] {
implicit val github = Bot.github
val repo: Repo
val repoSnapshot: RepoSnapshot
val labelToStateMapping:LabelMapping[PersistableState]
def ignoreItemsWithExistingState(existingState: PersistableState): Boolean
def snapshot(oldState: PersistableState, issue: IssueType): Future[Snapshot]
def actionTaker(snapshot: Snapshot)
def process(issueLike: IssueType): Future[Option[Snapshot]] = {
logger.trace(s"handling ${issueLike.prId.slug}")
for {
oldLabels <- new LabelledState(issueLike, repoSnapshot.allPossibleCheckpointPRLabels).currentLabelsF
snapshot <- takeSnapshotOf(issueLike, oldLabels)
} yield snapshot
}
def takeSnapshotOf(issueLike: IssueType, oldLabels: Set[String]): Future[Option[Snapshot]] = {
val existingPersistedState: PersistableState = labelToStateMapping.stateFrom(oldLabels)
if (!ignoreItemsWithExistingState(existingPersistedState)) {
for (currentSnapshot <- snapshot(existingPersistedState, issueLike)) yield {
val newPersistableState = currentSnapshot.newPersistableState
val stateChanged = newPersistableState != existingPersistedState
logger.debug(s"handling ${issueLike.prId.slug} : state: existing=$existingPersistedState new=$newPersistableState stateChanged=$stateChanged")
if (stateChanged) {
logger.info(s"#${issueLike.prId.slug} state-change: $existingPersistedState -> $newPersistableState")
val newLabels: Set[String] = labelToStateMapping.labelsFor(newPersistableState)
assert(oldLabels != newLabels, s"Labels should differ for differing states. labels=$oldLabels oldState=$existingPersistedState newState=$newPersistableState")
issueLike.labels.replace(newLabels.toSeq)
Delayer.doAfterSmallDelay {
actionTaker(currentSnapshot)
}
}
Some(currentSnapshot)
}
} else Future.successful(None)
}
}
| guardian/prout | app/lib/gitgithub/IssueUpdater.scala | Scala | apache-2.0 | 2,621 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import org.scalatestplus.mockito.MockitoSugar
import org.scalatest.{Matchers, WordSpec}
import play.api.libs.json.Json
import uk.gov.hmrc.ct.box.CtValidation
import uk.gov.hmrc.ct.computations.formats._
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
import uk.gov.hmrc.ct.utils.CatoInputBounds._
class CP8Spec extends WordSpec with Matchers with MockitoSugar {
implicit val format = Json.format[CP8Holder]
"CP8 to json" should {
"create valid json for int value" in {
val json = Json.toJson(CP8Holder(CP8(Some(1234))))
json.toString shouldBe """{"cp8":1234}"""
}
"create valid json for -ve int" in {
val json = Json.toJson(CP8Holder(CP8(Some(-1234))))
json.toString shouldBe """{"cp8":-1234}"""
}
"create valid json for None" in {
val json = Json.toJson(CP8Holder(CP8(None)))
json.toString shouldBe """{"cp8":null}"""
}
}
"CP8 from json" should {
"create +ve int from valid json" in {
val json = Json.parse("""{"cp8":1234}""")
Json.fromJson[CP8Holder](json).get shouldBe CP8Holder(cp8 = new CP8(Some(1234)))
}
"create -ve int from valid json" in {
val json = Json.parse("""{"cp8":-1234}""")
Json.fromJson[CP8Holder](json).get shouldBe CP8Holder(cp8 = new CP8(Some(-1234)))
}
"create None from valid json" in {
val json = Json.parse("""{"cp8":null}""")
Json.fromJson[CP8Holder](json).get shouldBe CP8Holder(cp8 = new CP8(None))
}
}
val boxRetriever = mock[ComputationsBoxRetriever]
"CP8 validation" should {
"pass when zero" in {
CP8(Some(0)).validate(boxRetriever) shouldBe empty
}
"pass when at max" in {
CP8(Some(99999999)).validate(boxRetriever) shouldBe empty
}
"pass when at min" in {
CP8(Some(-99999999)).validate(boxRetriever) shouldBe empty
}
"fail when below min" in {
CP8(Some(-100000000)).validate(boxRetriever) shouldBe Set(CtValidation(Some("CP8"), "error.CP8.below.min", Some(Seq(oldMinWithCommas, oldMaxWithCommas))))
}
"fail when above max" in {
CP8(Some(100000000)).validate(boxRetriever) shouldBe Set(CtValidation(Some("CP8"), "error.CP8.above.max", Some(Seq(oldMinWithCommas, oldMaxWithCommas))))
}
"fail when empty" in {
CP8(None).validate(boxRetriever) shouldBe Set(CtValidation(Some("CP8"), "error.CP8.required"))
}
}
}
case class CP8Holder(cp8: CP8)
| hmrc/ct-calculations | src/test/scala/uk/gov/hmrc/ct/computations/CP8Spec.scala | Scala | apache-2.0 | 3,069 |
package ch.ethz.dalab.dissolve.classification
import ch.ethz.dalab.dissolve.regression.LabeledObject
import breeze.linalg._
import ch.ethz.dalab.dissolve.optimization.SolverOptions
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import java.io.FileWriter
import ch.ethz.dalab.dissolve.optimization.SolverUtils
import scala.reflect.ClassTag
import ch.ethz.dalab.dissolve.optimization.DBCFWSolverTuned
import ch.ethz.dalab.dissolve.optimization.DissolveFunctions
class StructSVMWithDBCFW[X, Y](
val data: RDD[LabeledObject[X, Y]],
val dissolveFunctions: DissolveFunctions[X, Y],
val solverOptions: SolverOptions[X, Y]) {
def trainModel()(implicit m: ClassTag[Y]): StructSVMModel[X, Y] = {
val (trainedModel, debugInfo) = new DBCFWSolverTuned[X, Y](
data,
dissolveFunctions,
solverOptions,
miniBatchEnabled = false).optimize()
// Dump debug information into a file
val fw = new FileWriter(solverOptions.debugInfoPath)
// Write the current parameters being used
fw.write(solverOptions.toString())
fw.write("\n")
// Write spark-specific parameters
fw.write(SolverUtils.getSparkConfString(data.context.getConf))
fw.write("\n")
// Write values noted from the run
fw.write(debugInfo)
fw.close()
print(debugInfo)
// Return the trained model
trainedModel
}
}
object StructSVMWithDBCFW {
def train[X, Y](data: RDD[LabeledObject[X, Y]],
dissolveFunctions: DissolveFunctions[X, Y],
solverOptions: SolverOptions[X, Y])(implicit m: ClassTag[Y]): StructSVMModel[X, Y] = {
val (trainedModel, debugInfo) = new DBCFWSolverTuned[X, Y](
data,
dissolveFunctions,
solverOptions,
miniBatchEnabled = false).optimize()
// Dump debug information into a file
val fw = new FileWriter(solverOptions.debugInfoPath)
// Write the current parameters being used
fw.write(solverOptions.toString())
fw.write("\n")
// Write spark-specific parameters
fw.write(SolverUtils.getSparkConfString(data.context.getConf))
fw.write("\n")
// Write values noted from the run
fw.write(debugInfo)
fw.close()
print(debugInfo)
// Return the trained model
trainedModel
}
} | dalab/dissolve-struct | dissolve-struct-lib/src/main/scala/ch/ethz/dalab/dissolve/classification/StructSVMWithDBCFW.scala | Scala | apache-2.0 | 2,271 |
package mesosphere.marathon.core.task.tracker.impl
import javax.inject.Inject
import mesosphere.marathon.MarathonSchedulerDriverHolder
import mesosphere.marathon.Protos.MarathonTask
import mesosphere.marathon.core.base.Clock
import mesosphere.marathon.core.task.tracker.{ TaskStatusUpdateProcessor, TaskStatusUpdateStep }
import mesosphere.marathon.metrics.Metrics.Timer
import mesosphere.marathon.metrics.{ MetricPrefixes, Metrics }
import mesosphere.marathon.state.{ PathId, Timestamp }
import mesosphere.marathon.tasks.{ TaskIdUtil, TaskTracker }
import org.apache.mesos.Protos.{ TaskID, TaskStatus }
import org.slf4j.LoggerFactory
import scala.concurrent.Future
/**
* Executes the given TaskStatusUpdateSteps for every update.
*/
class TaskStatusUpdateProcessorImpl @Inject() (
metrics: Metrics,
clock: Clock,
taskIdUtil: TaskIdUtil,
taskTracker: TaskTracker,
driverHolder: MarathonSchedulerDriverHolder,
steps: Seq[TaskStatusUpdateStep]) extends TaskStatusUpdateProcessor {
import scala.concurrent.ExecutionContext.Implicits.global
private[this] val log = LoggerFactory.getLogger(getClass)
private[this] val publishFutureTimer: Timer =
metrics.timer(metrics.name(MetricPrefixes.SERVICE, getClass, "publishFuture"))
private[this] val killUnknownTaskTimer: Timer =
metrics.timer(metrics.name(MetricPrefixes.SERVICE, getClass, "killUnknownTask"))
private[this] val stepTimers: Map[String, Timer] = steps.map { step =>
step.name -> metrics.timer(metrics.name(MetricPrefixes.SERVICE, getClass, s"step-${step.name}"))
}.toMap
log.info("Started status update processor with steps:\\n{}", steps.map(step => s"* ${step.name}").mkString("\\n"))
override def publish(status: TaskStatus): Future[Unit] = publishFutureTimer.timeFuture {
val now = clock.now()
val taskId = status.getTaskId
val appId = taskIdUtil.appId(taskId)
val maybeTask = taskTracker.fetchTask(taskId.getValue)
maybeTask match {
case Some(task) =>
processUpdate(
timestamp = now,
appId = appId,
task = task,
mesosStatus = status
).map(_ => acknowledge(status))
case None =>
killUnknownTaskTimer {
killTask(taskId)
acknowledge(status)
Future.successful(())
}
}
}
private[this] def acknowledge(taskStatus: TaskStatus): Unit = {
driverHolder.driver.foreach(_.acknowledgeStatusUpdate(taskStatus))
}
private[this] def killTask(taskId: TaskID): Unit = {
driverHolder.driver.foreach(_.killTask(taskId))
}
private[this] def processUpdate(
timestamp: Timestamp,
appId: PathId,
task: MarathonTask,
mesosStatus: TaskStatus): Future[Unit] = {
steps.foldLeft(Future.successful(())) { (resultSoFar, nextStep) =>
stepTimers(nextStep.name).timeFuture {
resultSoFar.flatMap { _ =>
log.debug("Executing {} for [{}]", Array[Object](nextStep.name, mesosStatus.getTaskId.getValue): _*)
nextStep.processUpdate(timestamp, appId, task, mesosStatus).map { _ =>
log.debug(
"Done with executing {} for [{}]",
Array[Object](nextStep.name, mesosStatus.getTaskId.getValue): _*
)
}
}
}
}
}
}
| Kosta-Github/marathon | src/main/scala/mesosphere/marathon/core/task/tracker/impl/TaskStatusUpdateProcessorImpl.scala | Scala | apache-2.0 | 3,286 |
package dao.postgres.common
import java.sql.Connection
trait ConnectionPool {
def fetchConnection(): Connection
def withConnection[T](f: Connection => T) = {
val connection = fetchConnection()
connection.setAutoCommit(false)
try {
f(connection)
} finally {
connection.close()
}
}
}
| gilt/sundial | app/dao/postgres/common/ConnectionPool.scala | Scala | mit | 322 |
package org.qirx.cms.construction
sealed trait System[T]
case class Return[ReturnType](result: ReturnType) extends System[ReturnType]
trait DirectAction[ReturnType] extends System[ReturnType] {
def result:ReturnType
}
| EECOLOR/play-cms | cms/src/main/scala/org/qirx/cms/construction/System.scala | Scala | mit | 223 |
package com.twitter.finagle.client
import com.twitter.finagle.socks.SocksProxyFlags
import com.twitter.finagle.Stack
import com.twitter.finagle.transport.Transport
import com.twitter.util.Duration
import com.twitter.util.Future
import java.net.SocketAddress
/**
* Transporters are simple functions from a `SocketAddress` to a
* `Future[Transport[In, Out]]`. They represent a transport layer session from a
* client to a server. Transporters are symmetric to the server-side
* [[com.twitter.finagle.server.Listener]].
*/
trait Transporter[In, Out] {
def apply(addr: SocketAddress): Future[Transport[In, Out]]
}
/**
* A collection of [[com.twitter.finagle.Stack.Param Stack.Params]] useful for configuring
* a [[com.twitter.finagle.client.Transporter]].
*
* @define $param a [[com.twitter.finagle.Stack.Param]] used to configure
*/
object Transporter {
import com.twitter.conversions.time._
/**
* $param a `SocketAddress` that a `Transporter` connects to.
*/
case class EndpointAddr(addr: SocketAddress) {
def mk(): (EndpointAddr, Stack.Param[EndpointAddr]) =
(this, EndpointAddr.param)
}
object EndpointAddr {
implicit val param = Stack.Param(EndpointAddr(new SocketAddress {
override def toString = "noaddr"
}))
}
/**
* $param the connect timeout of a `Transporter`.
*
* @param howlong A maximum amount of time a transport
* is allowed to spend connecting.
*/
case class ConnectTimeout(howlong: Duration) {
def mk(): (ConnectTimeout, Stack.Param[ConnectTimeout]) =
(this, ConnectTimeout.param)
}
object ConnectTimeout {
implicit val param = Stack.Param(ConnectTimeout(1.second))
}
/**
* $param hostname verification, if TLS is enabled.
* @see [[com.twitter.finagle.transport.Transport#TLSEngine]]
*/
case class TLSHostname(hostname: Option[String]) {
def mk(): (TLSHostname, Stack.Param[TLSHostname]) =
(this, TLSHostname.param)
}
object TLSHostname {
implicit val param = Stack.Param(TLSHostname(None))
}
/**
* $param a SocksProxy as the endpoint for a `Transporter`.
*/
case class SocksProxy(sa: Option[SocketAddress], credentials: Option[(String, String)]) {
def mk(): (SocksProxy, Stack.Param[SocksProxy]) =
(this, SocksProxy.param)
}
object SocksProxy {
implicit val param = Stack.Param(SocksProxy(
SocksProxyFlags.socksProxy,
SocksProxyFlags.socksUsernameAndPassword
))
}
/**
* $param a HttpProxy as the endpoint for a `Transporter`.
* @see http://www.w3.org/Protocols/rfc2616/rfc2616-sec9.html#9.9
*/
case class HttpProxy(sa: Option[SocketAddress], credentials: Option[Credentials]) {
def mk(): (HttpProxy, Stack.Param[HttpProxy]) =
(this, HttpProxy.param)
def this(sa: Option[SocketAddress]) = this(sa, None)
}
object HttpProxy {
implicit val param = Stack.Param(HttpProxy(None, None))
}
/**
* This class wraps the username, password that we use for http proxy auth
*/
case class Credentials(username: String, password: String)
}
| kingtang/finagle | finagle-core/src/main/scala/com/twitter/finagle/client/Transporter.scala | Scala | apache-2.0 | 3,067 |
package org.jetbrains.plugins.scala.lang.formatting.settings.inference
import com.intellij.application.options.codeStyle.CodeStyleSchemesModel
import com.intellij.ide.startup.StartupManagerEx
import com.intellij.openapi.components.{PersistentStateComponent, _}
import com.intellij.openapi.diagnostic.Logger
import com.intellij.openapi.project.{DumbService, Project}
import com.intellij.psi.codeStyle.CodeStyleSettings
import com.intellij.util.indexing.FileBasedIndex
import org.jetbrains.plugins.scala.ScalaFileType
import org.jetbrains.plugins.scala.extensions.{executeOnPooledThread, inReadAction}
import org.jetbrains.plugins.scala.finder.SourceFilterScope
import org.jetbrains.plugins.scala.lang.formatting.settings.ScalaCodeStyleSettings
import org.jetbrains.plugins.scala.lang.formatting.settings.inference.ScalaDocAsteriskAlignStyleIndexer.AsteriskAlignStyle
import org.jetbrains.plugins.scala.lang.formatting.settings.inference.ScalaDocAsteriskAlignStyleIndexer.AsteriskAlignStyle.AlignByColumnThree
import scala.beans.BeanProperty
import scala.jdk.CollectionConverters._
@State(
name = "CodeStyleSettingsInfer",
storages = Array[Storage](new Storage(value = StoragePathMacros.WORKSPACE_FILE))
)
final class CodeStyleSettingsInferService(private val project: Project)
extends PersistentStateComponent[CodeStyleSettingsInferService.State] {
private val Log = Logger.getInstance(getClass)
private var state = new CodeStyleSettingsInferService.State
override def getState: CodeStyleSettingsInferService.State = state
override def loadState(state: CodeStyleSettingsInferService.State): Unit = this.state = state
def init(): Unit = {
if (state.done) {
Log.info("settings inference skipped: already done")
} else {
StartupManagerEx.getInstanceEx(project).runWhenProjectIsInitialized { () =>
DumbService.getInstance(project).runWhenSmart { () =>
executeOnPooledThread {
inferSettings()
}
state.done = true
}
}
}
}
private def inferSettings(): Unit = {
modifyCodeStyleSettings { settings =>
inferBestScaladocAsteriskAlignStyle(settings)
}
}
private def inferBestScaladocAsteriskAlignStyle(settings: CodeStyleSettings): Boolean = {
val fileIndex = FileBasedIndex.getInstance()
val indexId = ScalaDocAsteriskAlignStyleIndexer.Id
val sourcesScope = SourceFilterScope(Seq(ScalaFileType.INSTANCE))(project)
val alignTypeCounts: Map[AsteriskAlignStyle, Int] =
inReadAction {
val styles = fileIndex.getAllKeys(indexId, project).asScala
styles.map { alignType =>
val occurrences = fileIndex.getValues(indexId, alignType, sourcesScope).asScala
alignType -> occurrences.foldLeft(0)(_ + _)
}.filter(_._2 > 0).toMap
}
if (alignTypeCounts.nonEmpty) {
val mostUsedStyle = alignTypeCounts.maxBy(_._2)._1
Log.info(s"Scaladoc: most used align type: $mostUsedStyle ($alignTypeCounts)")
val scalaSettings = settings.getCustomSettings(classOf[ScalaCodeStyleSettings])
scalaSettings.USE_SCALADOC2_FORMATTING = mostUsedStyle == AlignByColumnThree
true
} else {
Log.info(s"Scaladoc: no comments detected")
false
}
}
private def modifyCodeStyleSettings(modifier: CodeStyleSettings => Boolean): Unit = {
val codeStyleSchemesModel = new CodeStyleSchemesModel(project)
val selectedScheme = codeStyleSchemesModel.getSelectedScheme
val projectScheme =
if (codeStyleSchemesModel.isProjectScheme(selectedScheme)) {
selectedScheme
} else {
codeStyleSchemesModel.copyToProject(selectedScheme)
codeStyleSchemesModel.getProjectScheme
}
val settings = projectScheme.getCodeStyleSettings
if (modifier.apply(settings)) {
codeStyleSchemesModel.apply()
}
}
}
object CodeStyleSettingsInferService {
class State {
@BeanProperty
var done: Boolean = false
}
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/formatting/settings/inference/CodeStyleSettingsInferService.scala | Scala | apache-2.0 | 3,971 |
package com.gilt.thehand.rules.typed
import com.gilt.thehand.{Context, AbstractRuleSpec}
class LongEqSpec extends AbstractRuleSpec {
val testCases = Map(
LongEq(54) -> (
Set(Context(54), Context(BigDecimal("54")), Context(54.0)),
// '6' is important here because its char code is 54.
Set(Context(55), Context(BigDecimal("54.001")), Context(54.01), Context(true), Context('1'), Context("544"), Context('6'))
),
LongEq(1) -> (
Set(Context(BigDecimal("1.000")), Context(1.00), Context(true), Context('1')),
Set(Context(0), Context(BigDecimal("1.001")), Context(1.01), Context(false), Context("11"), Context('2'))
),
LongEq(2147483648L) -> ( // This is Int.MaxValue.toLong + 1
Set(Context(2147483648L), Context(BigDecimal("2147483648")), Context(BigDecimal("2147483648.00")), Context(2147483648.0)),
Set(Context(Int.MaxValue), Context(BigDecimal("2147483648.001")), Context(2147483648.01), Context(true), Context("2147483649"))
)
)
runTests(testCases)
}
| gilt/the-hand | src/test/scala/com/gilt/thehand/rules/typed/LongEqSpec.scala | Scala | apache-2.0 | 1,035 |
package play.api.libs {
/**
* The Iteratee monad provides strict, safe, and functional I/O.
*/
package object iteratee {
type K[E, A] = Input[E] => Iteratee[E, A]
}
}
package play.api.libs.iteratee {
private[iteratee] object internal {
import scala.concurrent.ExecutionContext
import java.util.concurrent.Executors
import java.util.concurrent.ThreadFactory
import java.util.concurrent.atomic.AtomicInteger
implicit lazy val defaultExecutionContext: scala.concurrent.ExecutionContext = {
val numberOfThreads = try {
com.typesafe.config.ConfigFactory.load().getInt("iteratee-threadpool-size")
} catch { case e: com.typesafe.config.ConfigException.Missing => Runtime.getRuntime.availableProcessors }
val threadFactory = new ThreadFactory {
val threadNo = new AtomicInteger()
val backingThreadFactory = Executors.defaultThreadFactory()
def newThread(r: Runnable) = {
val thread = backingThreadFactory.newThread(r)
thread.setName("iteratee-execution-context-" + threadNo.incrementAndGet())
thread
}
}
ExecutionContext.fromExecutorService(Executors.newFixedThreadPool(numberOfThreads, threadFactory))
}
}
}
| noel-yap/setter-for-catan | play-2.1.1/framework/src/iteratees/src/main/scala/play/api/libs/iteratee/package.scala | Scala | apache-2.0 | 1,253 |
/*
* Copyright 2015 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtInteger, Input}
case class CP111(value: Int) extends CtBoxIdentifier(name = "Employees remuneration previously disallowed") with CtInteger with Input
| keithhall/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP111.scala | Scala | apache-2.0 | 839 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.algorithms.consensus
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.adam.rich.RichAlignmentRecord
import org.bdgenomics.adam.util.ADAMFunSuite
import org.bdgenomics.formats.avro.AlignmentRecord
class ConsensusGeneratorFromKnownsSuite extends ADAMFunSuite {
def cg(sc: SparkContext): ConsensusGenerator = {
val path = testFile("random.vcf")
ConsensusGenerator.fromKnownIndels(sc.loadVariants(path))
}
sparkTest("no consensuses for empty target") {
val c = cg(sc)
assert(c.findConsensus(Iterable.empty).isEmpty)
}
sparkTest("no consensuses for reads that don't overlap a target") {
val c = cg(sc)
val read = AlignmentRecord.newBuilder
.setStart(1L)
.setEnd(2L)
.setContigName("notAContig")
.build
assert(c.findConsensus(Iterable(new RichAlignmentRecord(read))).isEmpty)
}
sparkTest("return a consensus for read overlapping a single target") {
val c = cg(sc)
val read = AlignmentRecord.newBuilder
.setStart(19189L)
.setEnd(19191L)
.setContigName("2")
.build
val consensuses = c.findConsensus(Iterable(new RichAlignmentRecord(read)))
assert(consensuses.size === 1)
assert(consensuses.head.consensus === "")
assert(consensuses.head.index.referenceName === "2")
assert(consensuses.head.index.start === 19190L)
assert(consensuses.head.index.end === 19192L)
}
}
| massie/adam | adam-core/src/test/scala/org/bdgenomics/adam/algorithms/consensus/ConsensusGeneratorFromKnownsSuite.scala | Scala | apache-2.0 | 2,296 |
import sbt._
import sbt.Keys._
import less.Plugin._
import sbtlivescript.SbtLiveScriptPlugin._
import LiveScriptKeys._
import sbtclosure.SbtClosurePlugin._
import spray.revolver.RevolverPlugin._
import sbtassembly.Plugin.AssemblyKeys._
import sbtassembly.Plugin._
import com.mojolly.scalate.ScalatePlugin._
import ScalateKeys._
trait WithDependencies {
val dependenciesBuild = Seq(
// "com.logikujo" %% "unfilteredm-core" % "0.2-SNAPSHOT",
// "com.logikujo" %% "unfilteredm-mail" % "0.2-SNAPSHOT",
// "com.logikujo" %% "unfilteredm-captcha" % "0.2-SNAPSHOT",
// "com.logikujo" %% "unfilteredm-blog" % "0.2-SNAPSHOT"
// "org.reactivemongo" %% "reactivemongo" % "0.11.0-SNAPSHOT"
)
}
trait WithResolvers {
val resolversBuild = Seq(
"java m2" at "http://download.java.net/maven/2",
"sonatype-public" at "https://oss.sonatype.org/content/groups/public",
"Typesafe repository releases" at "http://repo.typesafe.com/typesafe/releases/"
)
}
object BuildSettings {
private def mergeFunc(old: String => MergeStrategy): (String) => MergeStrategy = {
case PathList("META-INF", xs@_*) =>
(xs map {
_.toLowerCase
}) match {
case ("eclipsef.sf" :: Nil) => MergeStrategy.rename
case ("mime.types" :: Nil) => MergeStrategy.discard
case ("manifest.mf" :: Nil) => MergeStrategy.discard
case ("dependencies" :: Nil) => MergeStrategy.discard
case ("license" :: Nil) | ("license.txt" :: Nil) => MergeStrategy.discard
case ("notice" :: Nil) | ("notice.txt" :: Nil) => MergeStrategy.discard
case _ => MergeStrategy.deduplicate
}
case x => old(x)
}
val buildTime = SettingKey[String]("build-time")
val basicSettings = Defaults.defaultSettings ++ Seq(
name := "AppTest",
version := "0.1-SNAPSHOT",
organization := "com.logikujo",
scalaVersion := "2.10.4",
scalacOptions <<= scalaVersion map { sv: String =>
if (sv.startsWith("2.10."))
Seq("-deprecation", "-unchecked", "-feature", "-language:postfixOps", "-language:implicitConversions")
else
Seq("-deprecation", "-unchecked")
},
javaOptions ++= List("-Dcom.logikujo.atuinsapp.scalate.runMode=production"),
fork in run := true
)
val lessSettingsBuild = lessSettings ++ Seq(
(LessKeys.filter in (Compile, LessKeys.less)) := "*styles.less",
(LessKeys.mini in (Compile, LessKeys.less)) := true,
(resourceManaged in (Compile, LessKeys.less)) <<= (resourceDirectory in Compile)(_ / "www" / "css")
)
val liveScriptSettingsBuild = liveScriptSettings ++ Seq(
(outputDirectory in (Compile, livescript)) := (sourceDirectory in (Compile, ClosureKeys.closure)).value / "ls"
)
val closureSettingsBuild = closureSettings ++ Seq(
(ClosureKeys.closure in Compile) := ((ClosureKeys.closure in Compile) dependsOn (livescript in (Compile, livescript))).value,
(resourceManaged in (Compile, ClosureKeys.closure)) <<= (resourceDirectory in Compile)(_ / "www" / "js"),
(ClosureKeys.prettyPrint in (Compile, ClosureKeys.closure)) := true
)
val revolverSettingsBuild = Revolver.settings ++ Seq(
javaOptions in Revolver.reStart += "-Djavax.net.ssl.trustStore=src/main/resources/cacerts.jks",
javaOptions in Revolver.reStart += "-Djavax.net.ssl.trustStorePassword=changeit",
javaOptions in Revolver.reStart += "-Dcom.logikujo.apptest.scalate.runMode=development",
javaOptions in Revolver.reStart += "-Dcom.logikujo.apptest.scalate.prefix=src/main/resources/scalate"
)
val assemblySettingsBuild = assemblySettings ++ Seq(
mainClass in assembly := Some("com.logikujo.www.AppTest"),
mergeStrategy in assembly <<= (mergeStrategy in assembly)(mergeFunc(_))
)
val scalateSettingsBuild = scalateSettings ++ Seq(
scalateTemplateConfig in Compile := {
val base = sourceDirectory.in(Compile).value
Seq(
TemplateConfig(
base / "resources" / "scalate",
Seq(
),
Seq(
),
Some("webTmpl")
))
})
val webAppSettings = basicSettings ++
closureSettingsBuild ++
revolverSettingsBuild ++
assemblySettingsBuild ++
liveScriptSettingsBuild ++
lessSettingsBuild ++
scalateSettingsBuild
}
| AitorATuin/UnfilteredM | project/BuildSettings.scala | Scala | mit | 4,258 |
package models
import java.util.UUID
import org.joda.time.DateTime
import play.api.libs.json.Json
/**
* Created by Biacco42 on 2016/04/19.
*/
case class MailToken(
id: UUID,
userId: UUID,
expirationDate: DateTime,
tokenKind: String)
object MailToken {
/**
* Create MailToken instance easily.
* @param user The user.
* @param tokenKind The token kind which corresponds "confirm" or "reset"
* @return New mail token instance.
*/
def create(user: User, tokenKind: String): MailToken = MailToken(UUID.randomUUID(), user.userID, new DateTime().plusDays(1), tokenKind)
/**
* Converts the [MailToken] object to Json and vice versa.
*/
implicit val jsonFormat = Json.format[MailToken]
}
| Biacco42/play-silhouette-mail-confirm-seed | app/models/MailToken.scala | Scala | apache-2.0 | 734 |
package monocle.function
import monocle.MonocleSuite
import monocle.law.discipline.function.EmptyTests
class EmptySpec extends MonocleSuite {
implicit val slistEmpty: Empty[CList] = Empty.fromIso(CList.toList)
checkAll("fromIso", EmptyTests[CList])
}
| NightRa/Monocle | test/src/test/scala/monocle/function/EmptySpec.scala | Scala | mit | 261 |
package skinny.orm.feature
import org.joda.time.DateTime
import org.scalatest.{ Matchers, fixture }
import scalikejdbc.scalatest.AutoRollback
import skinny.dbmigration.DBSeeds
import skinny.orm._
import scalikejdbc._
class TimestampsFeatureSpec extends fixture.FunSpec with Matchers
with Connection
with DBSeeds
with Formatter
with AutoRollback {
addSeedSQL(
sql"""
create table with_id (
id bigint auto_increment primary key not null,
created_at timestamp not null,
updated_at timestamp
)""",
sql"""
create table no_id (
a int not null,
b int not null,
created_at timestamp not null,
updated_at timestamp,
primary key (a, b)
)""",
sql"""
create table my_with_id (
id bigint auto_increment primary key not null,
my_created_at timestamp not null,
my_updated_at timestamp
)"""
)
run()
case class WithId(id: Long, createdAt: DateTime, updatedAt: DateTime)
object WithId extends SkinnyCRUDMapper[WithId] with TimestampsFeature[WithId] {
override def defaultAlias = createAlias("wid")
override def extract(rs: WrappedResultSet, n: ResultName[WithId]) = autoConstruct(rs, n)
}
case class NoId(a: Int, b: Int, createdAt: DateTime, updatedAt: DateTime)
object NoId extends SkinnyNoIdCRUDMapper[NoId] with NoIdTimestampsFeature[NoId] {
override def defaultAlias = createAlias("noid")
override def extract(rs: WrappedResultSet, n: ResultName[NoId]) = autoConstruct(rs, n)
}
case class MyWithId(id: Long, myCreatedAt: DateTime, myUpdatedAt: DateTime)
object MyWithId extends SkinnyCRUDMapper[MyWithId] with TimestampsFeature[MyWithId] {
override def defaultAlias = createAlias("wid")
override def extract(rs: WrappedResultSet, n: ResultName[MyWithId]) = autoConstruct(rs, n)
override def createdAtFieldName = "myCreatedAt"
override def updatedAtFieldName = "myUpdatedAt"
}
val (t1, t2) = (new DateTime(2000, 1, 1, 0, 0, 0), new DateTime(2000, 1, 2, 0, 0, 0))
describe("WithId") {
it("assigns/updates timestamps") { implicit session =>
info("It automatically assigns createdAt and updatedAt.")
val id = WithId.createWithAttributes()
val loaded1 = WithId.findById(id).get
loaded1.createdAt should be(loaded1.updatedAt)
loaded1.createdAt shouldNot be(null)
info("Only updatedAt should be changed by update.")
WithId.updateById(id).withAttributes()
val loaded2 = WithId.findById(id).get
loaded2.createdAt should be(loaded1.createdAt)
loaded2.updatedAt shouldNot be(loaded1.updatedAt)
info("Specified value is used when a user wants.")
WithId.updateById(id).withAttributes('createdAt -> t1, 'updatedAt -> t2)
val loaded3 = WithId.findById(id).get
loaded3.createdAt should be(t1)
loaded3.updatedAt should be(t2)
}
}
describe("WithoutId") {
it("assigns/updates timestamps") { implicit session =>
info("It automatically assigns createdAt and updatedAt.")
NoId.createWithAttributes('a -> 1, 'b -> 2)
val findCond = sqls.eq(NoId.column.a, 1).and.eq(NoId.column.b, 2)
val loaded1 = NoId.findBy(findCond).get
loaded1.createdAt should be(loaded1.updatedAt)
loaded1.createdAt shouldNot be(null)
info("Only updatedAt should be changed by update.")
NoId.updateBy(findCond).withAttributes()
val loaded2 = NoId.findBy(findCond).get
loaded2.createdAt should be(loaded1.createdAt)
loaded2.updatedAt shouldNot be(loaded1.updatedAt)
info("Specified value is used when a user wants.")
NoId.updateBy(findCond).withAttributes('createdAt -> t1, 'updatedAt -> t2)
val loaded3 = NoId.findBy(findCond).get
loaded3.createdAt should be(t1)
loaded3.updatedAt should be(t2)
}
}
describe("MyWithId") {
it("assigns timestamps for custom fields") { implicit session =>
val id = MyWithId.createWithAttributes()
val loaded1 = MyWithId.findById(id).get
loaded1.myCreatedAt should be(loaded1.myUpdatedAt)
loaded1.myCreatedAt shouldNot be(null)
}
}
}
| holycattle/skinny-framework | orm/src/test/scala/skinny/orm/feature/TimestampsFeatureSpec.scala | Scala | mit | 4,068 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.columnar.compression
import java.nio.ByteBuffer
import java.nio.ByteOrder
import scala.collection.mutable
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.SpecificInternalRow
import org.apache.spark.sql.execution.columnar._
import org.apache.spark.sql.execution.vectorized.WritableColumnVector
import org.apache.spark.sql.types._
private[columnar] case object PassThrough extends CompressionScheme {
override val typeId = 0
override def supports(columnType: ColumnType[_]): Boolean = true
override def encoder[T <: AtomicType](columnType: NativeColumnType[T]): Encoder[T] = {
new this.Encoder[T](columnType)
}
override def decoder[T <: AtomicType](
buffer: ByteBuffer, columnType: NativeColumnType[T]): Decoder[T] = {
new this.Decoder(buffer, columnType)
}
class Encoder[T <: AtomicType](columnType: NativeColumnType[T]) extends compression.Encoder[T] {
override def uncompressedSize: Int = 0
override def compressedSize: Int = 0
override def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer = {
// Writes compression type ID and copies raw contents
to.putInt(PassThrough.typeId).put(from).rewind()
to
}
}
class Decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T])
extends compression.Decoder[T] {
override def next(row: InternalRow, ordinal: Int): Unit = {
columnType.extract(buffer, row, ordinal)
}
override def hasNext: Boolean = buffer.hasRemaining
private def putBooleans(
columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = {
for (i <- 0 until len) {
columnVector.putBoolean(pos + i, (buffer.get(bufferPos + i) != 0))
}
}
private def putBytes(
columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = {
columnVector.putBytes(pos, len, buffer.array, bufferPos)
}
private def putShorts(
columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = {
columnVector.putShorts(pos, len, buffer.array, bufferPos)
}
private def putInts(
columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = {
columnVector.putInts(pos, len, buffer.array, bufferPos)
}
private def putLongs(
columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = {
columnVector.putLongs(pos, len, buffer.array, bufferPos)
}
private def putFloats(
columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = {
columnVector.putFloats(pos, len, buffer.array, bufferPos)
}
private def putDoubles(
columnVector: WritableColumnVector, pos: Int, bufferPos: Int, len: Int): Unit = {
columnVector.putDoubles(pos, len, buffer.array, bufferPos)
}
private def decompress0(
columnVector: WritableColumnVector,
capacity: Int,
unitSize: Int,
putFunction: (WritableColumnVector, Int, Int, Int) => Unit): Unit = {
val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder())
nullsBuffer.rewind()
val nullCount = ByteBufferHelper.getInt(nullsBuffer)
var nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else capacity
var pos = 0
var seenNulls = 0
var bufferPos = buffer.position()
while (pos < capacity) {
if (pos != nextNullIndex) {
val len = nextNullIndex - pos
assert(len * unitSize < Int.MaxValue)
putFunction(columnVector, pos, bufferPos, len)
bufferPos += len * unitSize
pos += len
} else {
seenNulls += 1
nextNullIndex = if (seenNulls < nullCount) {
ByteBufferHelper.getInt(nullsBuffer)
} else {
capacity
}
columnVector.putNull(pos)
pos += 1
}
}
}
override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = {
columnType.dataType match {
case _: BooleanType =>
val unitSize = 1
decompress0(columnVector, capacity, unitSize, putBooleans)
case _: ByteType =>
val unitSize = 1
decompress0(columnVector, capacity, unitSize, putBytes)
case _: ShortType =>
val unitSize = 2
decompress0(columnVector, capacity, unitSize, putShorts)
case _: IntegerType =>
val unitSize = 4
decompress0(columnVector, capacity, unitSize, putInts)
case _: LongType =>
val unitSize = 8
decompress0(columnVector, capacity, unitSize, putLongs)
case _: FloatType =>
val unitSize = 4
decompress0(columnVector, capacity, unitSize, putFloats)
case _: DoubleType =>
val unitSize = 8
decompress0(columnVector, capacity, unitSize, putDoubles)
}
}
}
}
private[columnar] case object RunLengthEncoding extends CompressionScheme {
override val typeId = 1
override def encoder[T <: AtomicType](columnType: NativeColumnType[T]): Encoder[T] = {
new this.Encoder[T](columnType)
}
override def decoder[T <: AtomicType](
buffer: ByteBuffer, columnType: NativeColumnType[T]): Decoder[T] = {
new this.Decoder(buffer, columnType)
}
override def supports(columnType: ColumnType[_]): Boolean = columnType match {
case INT | LONG | SHORT | BYTE | STRING | BOOLEAN => true
case _ => false
}
class Encoder[T <: AtomicType](columnType: NativeColumnType[T]) extends compression.Encoder[T] {
private var _uncompressedSize = 0
private var _compressedSize = 0
// Using `MutableRow` to store the last value to avoid boxing/unboxing cost.
private val lastValue = new SpecificInternalRow(Seq(columnType.dataType))
private var lastRun = 0
override def uncompressedSize: Int = _uncompressedSize
override def compressedSize: Int = _compressedSize
override def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {
val value = columnType.getField(row, ordinal)
val actualSize = columnType.actualSize(row, ordinal)
_uncompressedSize += actualSize
if (lastValue.isNullAt(0)) {
columnType.copyField(row, ordinal, lastValue, 0)
lastRun = 1
_compressedSize += actualSize + 4
} else {
if (columnType.getField(lastValue, 0) == value) {
lastRun += 1
} else {
_compressedSize += actualSize + 4
columnType.copyField(row, ordinal, lastValue, 0)
lastRun = 1
}
}
}
override def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer = {
to.putInt(RunLengthEncoding.typeId)
if (from.hasRemaining) {
val currentValue = new SpecificInternalRow(Seq(columnType.dataType))
var currentRun = 1
val value = new SpecificInternalRow(Seq(columnType.dataType))
columnType.extract(from, currentValue, 0)
while (from.hasRemaining) {
columnType.extract(from, value, 0)
if (value.get(0, columnType.dataType) == currentValue.get(0, columnType.dataType)) {
currentRun += 1
} else {
// Writes current run
columnType.append(currentValue, 0, to)
to.putInt(currentRun)
// Resets current run
columnType.copyField(value, 0, currentValue, 0)
currentRun = 1
}
}
// Writes the last run
columnType.append(currentValue, 0, to)
to.putInt(currentRun)
}
to.rewind()
to
}
}
class Decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T])
extends compression.Decoder[T] {
private var run = 0
private var valueCount = 0
private var currentValue: T#InternalType = _
override def next(row: InternalRow, ordinal: Int): Unit = {
if (valueCount == run) {
currentValue = columnType.extract(buffer)
run = ByteBufferHelper.getInt(buffer)
valueCount = 1
} else {
valueCount += 1
}
columnType.setField(row, ordinal, currentValue)
}
override def hasNext: Boolean = valueCount < run || buffer.hasRemaining
private def putBoolean(columnVector: WritableColumnVector, pos: Int, value: Long): Unit = {
columnVector.putBoolean(pos, value == 1)
}
private def getByte(buffer: ByteBuffer): Long = {
buffer.get().toLong
}
private def putByte(columnVector: WritableColumnVector, pos: Int, value: Long): Unit = {
columnVector.putByte(pos, value.toByte)
}
private def getShort(buffer: ByteBuffer): Long = {
buffer.getShort().toLong
}
private def putShort(columnVector: WritableColumnVector, pos: Int, value: Long): Unit = {
columnVector.putShort(pos, value.toShort)
}
private def getInt(buffer: ByteBuffer): Long = {
buffer.getInt().toLong
}
private def putInt(columnVector: WritableColumnVector, pos: Int, value: Long): Unit = {
columnVector.putInt(pos, value.toInt)
}
private def getLong(buffer: ByteBuffer): Long = {
buffer.getLong()
}
private def putLong(columnVector: WritableColumnVector, pos: Int, value: Long): Unit = {
columnVector.putLong(pos, value)
}
private def decompress0(
columnVector: WritableColumnVector,
capacity: Int,
getFunction: (ByteBuffer) => Long,
putFunction: (WritableColumnVector, Int, Long) => Unit): Unit = {
val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder())
nullsBuffer.rewind()
val nullCount = ByteBufferHelper.getInt(nullsBuffer)
var nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1
var pos = 0
var seenNulls = 0
var runLocal = 0
var valueCountLocal = 0
var currentValueLocal: Long = 0
while (valueCountLocal < runLocal || (pos < capacity)) {
if (pos != nextNullIndex) {
if (valueCountLocal == runLocal) {
currentValueLocal = getFunction(buffer)
runLocal = ByteBufferHelper.getInt(buffer)
valueCountLocal = 1
} else {
valueCountLocal += 1
}
putFunction(columnVector, pos, currentValueLocal)
} else {
seenNulls += 1
if (seenNulls < nullCount) {
nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
}
columnVector.putNull(pos)
}
pos += 1
}
}
override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = {
columnType.dataType match {
case _: BooleanType =>
decompress0(columnVector, capacity, getByte, putBoolean)
case _: ByteType =>
decompress0(columnVector, capacity, getByte, putByte)
case _: ShortType =>
decompress0(columnVector, capacity, getShort, putShort)
case _: IntegerType =>
decompress0(columnVector, capacity, getInt, putInt)
case _: LongType =>
decompress0(columnVector, capacity, getLong, putLong)
case _ => throw new IllegalStateException("Not supported type in RunLengthEncoding.")
}
}
}
}
private[columnar] case object DictionaryEncoding extends CompressionScheme {
override val typeId = 2
// 32K unique values allowed
val MAX_DICT_SIZE = Short.MaxValue
override def decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T])
: Decoder[T] = {
new this.Decoder(buffer, columnType)
}
override def encoder[T <: AtomicType](columnType: NativeColumnType[T]): Encoder[T] = {
new this.Encoder[T](columnType)
}
override def supports(columnType: ColumnType[_]): Boolean = columnType match {
case INT | LONG | STRING => true
case _ => false
}
class Encoder[T <: AtomicType](columnType: NativeColumnType[T]) extends compression.Encoder[T] {
// Size of the input, uncompressed, in bytes. Note that we only count until the dictionary
// overflows.
private var _uncompressedSize = 0
// If the number of distinct elements is too large, we discard the use of dictionary encoding
// and set the overflow flag to true.
private var overflow = false
// Total number of elements.
private var count = 0
// The reverse mapping of _dictionary, i.e. mapping encoded integer to the value itself.
private var values = new mutable.ArrayBuffer[T#InternalType](1024)
// The dictionary that maps a value to the encoded short integer.
private val dictionary = mutable.HashMap.empty[Any, Short]
// Size of the serialized dictionary in bytes. Initialized to 4 since we need at least an `Int`
// to store dictionary element count.
private var dictionarySize = 4
override def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {
val value = columnType.getField(row, ordinal)
if (!overflow) {
val actualSize = columnType.actualSize(row, ordinal)
count += 1
_uncompressedSize += actualSize
if (!dictionary.contains(value)) {
if (dictionary.size < MAX_DICT_SIZE) {
val clone = columnType.clone(value)
values += clone
dictionarySize += actualSize
dictionary(clone) = dictionary.size.toShort
} else {
overflow = true
values.clear()
dictionary.clear()
}
}
}
}
override def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer = {
if (overflow) {
throw new IllegalStateException(
"Dictionary encoding should not be used because of dictionary overflow.")
}
to.putInt(DictionaryEncoding.typeId)
.putInt(dictionary.size)
var i = 0
while (i < values.length) {
columnType.append(values(i), to)
i += 1
}
while (from.hasRemaining) {
to.putShort(dictionary(columnType.extract(from)))
}
to.rewind()
to
}
override def uncompressedSize: Int = _uncompressedSize
override def compressedSize: Int = if (overflow) Int.MaxValue else dictionarySize + count * 2
}
class Decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T])
extends compression.Decoder[T] {
val elementNum = ByteBufferHelper.getInt(buffer)
private val dictionary: Array[Any] = new Array[Any](elementNum)
private var intDictionary: Array[Int] = null
private var longDictionary: Array[Long] = null
columnType.dataType match {
case _: IntegerType =>
intDictionary = new Array[Int](elementNum)
for (i <- 0 until elementNum) {
val v = columnType.extract(buffer).asInstanceOf[Int]
intDictionary(i) = v
dictionary(i) = v
}
case _: LongType =>
longDictionary = new Array[Long](elementNum)
for (i <- 0 until elementNum) {
val v = columnType.extract(buffer).asInstanceOf[Long]
longDictionary(i) = v
dictionary(i) = v
}
case _: StringType =>
for (i <- 0 until elementNum) {
val v = columnType.extract(buffer).asInstanceOf[Any]
dictionary(i) = v
}
}
override def next(row: InternalRow, ordinal: Int): Unit = {
columnType.setField(row, ordinal, dictionary(buffer.getShort()).asInstanceOf[T#InternalType])
}
override def hasNext: Boolean = buffer.hasRemaining
override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = {
val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder())
nullsBuffer.rewind()
val nullCount = ByteBufferHelper.getInt(nullsBuffer)
var nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1
var pos = 0
var seenNulls = 0
columnType.dataType match {
case _: IntegerType =>
val dictionaryIds = columnVector.reserveDictionaryIds(capacity)
columnVector.setDictionary(new ColumnDictionary(intDictionary))
while (pos < capacity) {
if (pos != nextNullIndex) {
dictionaryIds.putInt(pos, buffer.getShort())
} else {
seenNulls += 1
if (seenNulls < nullCount) nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
columnVector.putNull(pos)
}
pos += 1
}
case _: LongType =>
val dictionaryIds = columnVector.reserveDictionaryIds(capacity)
columnVector.setDictionary(new ColumnDictionary(longDictionary))
while (pos < capacity) {
if (pos != nextNullIndex) {
dictionaryIds.putInt(pos, buffer.getShort())
} else {
seenNulls += 1
if (seenNulls < nullCount) {
nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
}
columnVector.putNull(pos)
}
pos += 1
}
case _ => throw new IllegalStateException("Not supported type in DictionaryEncoding.")
}
}
}
}
private[columnar] case object BooleanBitSet extends CompressionScheme {
override val typeId = 3
val BITS_PER_LONG = 64
override def decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T])
: compression.Decoder[T] = {
new this.Decoder(buffer).asInstanceOf[compression.Decoder[T]]
}
override def encoder[T <: AtomicType](columnType: NativeColumnType[T]): compression.Encoder[T] = {
(new this.Encoder).asInstanceOf[compression.Encoder[T]]
}
override def supports(columnType: ColumnType[_]): Boolean = columnType == BOOLEAN
class Encoder extends compression.Encoder[BooleanType.type] {
private var _uncompressedSize = 0
override def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {
_uncompressedSize += BOOLEAN.defaultSize
}
override def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer = {
to.putInt(BooleanBitSet.typeId)
// Total element count (1 byte per Boolean value)
.putInt(from.remaining)
while (from.remaining >= BITS_PER_LONG) {
var word = 0: Long
var i = 0
while (i < BITS_PER_LONG) {
if (BOOLEAN.extract(from)) {
word |= (1: Long) << i
}
i += 1
}
to.putLong(word)
}
if (from.hasRemaining) {
var word = 0: Long
var i = 0
while (from.hasRemaining) {
if (BOOLEAN.extract(from)) {
word |= (1: Long) << i
}
i += 1
}
to.putLong(word)
}
to.rewind()
to
}
override def uncompressedSize: Int = _uncompressedSize
override def compressedSize: Int = {
val extra = if (_uncompressedSize % BITS_PER_LONG == 0) 0 else 1
(_uncompressedSize / BITS_PER_LONG + extra) * 8 + 4
}
}
class Decoder(buffer: ByteBuffer) extends compression.Decoder[BooleanType.type] {
private val count = ByteBufferHelper.getInt(buffer)
private var currentWord = 0: Long
private var visited: Int = 0
override def next(row: InternalRow, ordinal: Int): Unit = {
val bit = visited % BITS_PER_LONG
visited += 1
if (bit == 0) {
currentWord = ByteBufferHelper.getLong(buffer)
}
row.setBoolean(ordinal, ((currentWord >> bit) & 1) != 0)
}
override def hasNext: Boolean = visited < count
override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = {
val countLocal = count
var currentWordLocal: Long = 0
var visitedLocal: Int = 0
val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder())
nullsBuffer.rewind()
val nullCount = ByteBufferHelper.getInt(nullsBuffer)
var nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1
var pos = 0
var seenNulls = 0
while (visitedLocal < countLocal) {
if (pos != nextNullIndex) {
val bit = visitedLocal % BITS_PER_LONG
visitedLocal += 1
if (bit == 0) {
currentWordLocal = ByteBufferHelper.getLong(buffer)
}
columnVector.putBoolean(pos, ((currentWordLocal >> bit) & 1) != 0)
} else {
seenNulls += 1
if (seenNulls < nullCount) {
nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
}
columnVector.putNull(pos)
}
pos += 1
}
}
}
}
private[columnar] case object IntDelta extends CompressionScheme {
override def typeId: Int = 4
override def decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T])
: compression.Decoder[T] = {
new Decoder(buffer, INT).asInstanceOf[compression.Decoder[T]]
}
override def encoder[T <: AtomicType](columnType: NativeColumnType[T]): compression.Encoder[T] = {
(new Encoder).asInstanceOf[compression.Encoder[T]]
}
override def supports(columnType: ColumnType[_]): Boolean = columnType == INT
class Encoder extends compression.Encoder[IntegerType.type] {
protected var _compressedSize: Int = 0
protected var _uncompressedSize: Int = 0
override def compressedSize: Int = _compressedSize
override def uncompressedSize: Int = _uncompressedSize
private var prevValue: Int = _
override def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {
val value = row.getInt(ordinal)
val delta = value - prevValue
_compressedSize += 1
// If this is the first integer to be compressed, or the delta is out of byte range, then give
// up compressing this integer.
if (_uncompressedSize == 0 || delta <= Byte.MinValue || delta > Byte.MaxValue) {
_compressedSize += INT.defaultSize
}
_uncompressedSize += INT.defaultSize
prevValue = value
}
override def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer = {
to.putInt(typeId)
if (from.hasRemaining) {
var prev = from.getInt()
to.put(Byte.MinValue)
to.putInt(prev)
while (from.hasRemaining) {
val current = from.getInt()
val delta = current - prev
prev = current
if (Byte.MinValue < delta && delta <= Byte.MaxValue) {
to.put(delta.toByte)
} else {
to.put(Byte.MinValue)
to.putInt(current)
}
}
}
to.rewind().asInstanceOf[ByteBuffer]
}
}
class Decoder(buffer: ByteBuffer, columnType: NativeColumnType[IntegerType.type])
extends compression.Decoder[IntegerType.type] {
private var prev: Int = _
override def hasNext: Boolean = buffer.hasRemaining
override def next(row: InternalRow, ordinal: Int): Unit = {
val delta = buffer.get()
prev = if (delta > Byte.MinValue) prev + delta else ByteBufferHelper.getInt(buffer)
row.setInt(ordinal, prev)
}
override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = {
var prevLocal: Int = 0
val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder())
nullsBuffer.rewind()
val nullCount = ByteBufferHelper.getInt(nullsBuffer)
var nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1
var pos = 0
var seenNulls = 0
while (pos < capacity) {
if (pos != nextNullIndex) {
val delta = buffer.get
prevLocal = if (delta > Byte.MinValue) { prevLocal + delta } else
{ ByteBufferHelper.getInt(buffer) }
columnVector.putInt(pos, prevLocal)
} else {
seenNulls += 1
if (seenNulls < nullCount) {
nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
}
columnVector.putNull(pos)
}
pos += 1
}
}
}
}
private[columnar] case object LongDelta extends CompressionScheme {
override def typeId: Int = 5
override def decoder[T <: AtomicType](buffer: ByteBuffer, columnType: NativeColumnType[T])
: compression.Decoder[T] = {
new Decoder(buffer, LONG).asInstanceOf[compression.Decoder[T]]
}
override def encoder[T <: AtomicType](columnType: NativeColumnType[T]): compression.Encoder[T] = {
(new Encoder).asInstanceOf[compression.Encoder[T]]
}
override def supports(columnType: ColumnType[_]): Boolean = columnType == LONG
class Encoder extends compression.Encoder[LongType.type] {
protected var _compressedSize: Int = 0
protected var _uncompressedSize: Int = 0
override def compressedSize: Int = _compressedSize
override def uncompressedSize: Int = _uncompressedSize
private var prevValue: Long = _
override def gatherCompressibilityStats(row: InternalRow, ordinal: Int): Unit = {
val value = row.getLong(ordinal)
val delta = value - prevValue
_compressedSize += 1
// If this is the first long integer to be compressed, or the delta is out of byte range, then
// give up compressing this long integer.
if (_uncompressedSize == 0 || delta <= Byte.MinValue || delta > Byte.MaxValue) {
_compressedSize += LONG.defaultSize
}
_uncompressedSize += LONG.defaultSize
prevValue = value
}
override def compress(from: ByteBuffer, to: ByteBuffer): ByteBuffer = {
to.putInt(typeId)
if (from.hasRemaining) {
var prev = from.getLong()
to.put(Byte.MinValue)
to.putLong(prev)
while (from.hasRemaining) {
val current = from.getLong()
val delta = current - prev
prev = current
if (Byte.MinValue < delta && delta <= Byte.MaxValue) {
to.put(delta.toByte)
} else {
to.put(Byte.MinValue)
to.putLong(current)
}
}
}
to.rewind().asInstanceOf[ByteBuffer]
}
}
class Decoder(buffer: ByteBuffer, columnType: NativeColumnType[LongType.type])
extends compression.Decoder[LongType.type] {
private var prev: Long = _
override def hasNext: Boolean = buffer.hasRemaining
override def next(row: InternalRow, ordinal: Int): Unit = {
val delta = buffer.get()
prev = if (delta > Byte.MinValue) prev + delta else ByteBufferHelper.getLong(buffer)
row.setLong(ordinal, prev)
}
override def decompress(columnVector: WritableColumnVector, capacity: Int): Unit = {
var prevLocal: Long = 0
val nullsBuffer = buffer.duplicate().order(ByteOrder.nativeOrder())
nullsBuffer.rewind
val nullCount = ByteBufferHelper.getInt(nullsBuffer)
var nextNullIndex = if (nullCount > 0) ByteBufferHelper.getInt(nullsBuffer) else -1
var pos = 0
var seenNulls = 0
while (pos < capacity) {
if (pos != nextNullIndex) {
val delta = buffer.get()
prevLocal = if (delta > Byte.MinValue) { prevLocal + delta } else
{ ByteBufferHelper.getLong(buffer) }
columnVector.putLong(pos, prevLocal)
} else {
seenNulls += 1
if (seenNulls < nullCount) {
nextNullIndex = ByteBufferHelper.getInt(nullsBuffer)
}
columnVector.putNull(pos)
}
pos += 1
}
}
}
}
| ron8hu/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/compression/compressionSchemes.scala | Scala | apache-2.0 | 28,330 |
package scalaprops
import scalaz._
import scalaz.Id.Id
import scalaz.Maybe.Just
import scalaz.std.anyVal._
import scalaz.syntax.equal._
import Property.forAll
import ScalapropsScalaz._
object IMapTest extends Scalaprops {
val testLaws =
Properties.list(
scalazlaws.bind.all[({ type l[a] = Int ==>> a })#l],
scalazlaws.align.all[({ type l[a] = Int ==>> a })#l],
scalazlaws.zip.all[({ type l[a] = Int ==>> a })#l],
scalazlaws.traverse.all[({ type l[a] = Int ==>> a })#l]
)
val bifoldable = scalazlaws.bifoldable.all[==>>]
val order = scalazlaws.order.all[Int ==>> Int]
val laws2 = Properties.list(
scalazlaws.monoid.all[Int ==>> Int],
scalazlaws.semilattice.all[Int ==>> ISet[Int]]
)
val conjunction = {
implicit def imapConjunctionGen[A: Gen: Order, B: Gen]: Gen[((A ==>> B) @@ Tags.Conjunction)] =
Tag.subst(Gen[A ==>> B])
implicit val s: Semigroup[(Int ==>> Int) @@ Tags.Conjunction] = IMap.mapIntersection[Int, Int]
implicit val e: Equal[(Int ==>> Int) @@ Tags.Conjunction] = Tags.Conjunction.subst(Equal[Int ==>> Int])
scalazlaws.semigroup.all[(Int ==>> Int) @@ Tags.Conjunction]
}
val intersectionWithKey = forAll { (a: Int ==>> Int, b: Int ==>> Int, f: (Int, Int, Int) => Int) =>
val aa = a.toList.toMap
val bb = b.toList.toMap
a.intersectionWithKey(b)(f).toList == scalaz.std.map.intersectWithKey(aa, bb)(f).toList.sorted
}
val mapKeys = {
type KEY = Short
type VALUE = Byte
forAll { (a: KEY ==>> VALUE, f: KEY => KEY) => a.mapKeys(f) == ==>>.fromList(a.toList.map(x => (f(x._1), x._2))) }
}
val insertWithKey = {
type KEY = Short
type VALUE = Byte
Property.forAll { (a: KEY ==>> VALUE, k: KEY, v: VALUE, f: (KEY, VALUE, VALUE) => VALUE) =>
val m = a.toList.toMap
val i = if (m contains k) {
k -> f(k, v, m(k))
} else {
k -> v
}
val x = a.insertWithKey(f, k, v)
val y = ==>>.fromList((m + i).toList)
Equal[KEY ==>> VALUE].equal(x, y)
}.toProperties((), Param.minSuccessful(2000))
}
val insertWith = {
import scalaz.syntax.std.function2._
type KEY = Short
type VALUE = Byte
Property.forAll { (a: KEY ==>> VALUE, k: KEY, v: VALUE, f: (VALUE, VALUE) => VALUE) =>
val m = a.toList.toMap
val x = a.insertWith(f.flip, k, v)
val y = ==>>.fromList(scalaz.std.map.insertWith(m, k, v)(f).toList)
Equal[KEY ==>> VALUE].equal(x, y)
}.toProperties((), Param.minSuccessful(2000))
}
val updateWithKey = {
type KEY = Byte
type VAL = Byte
val E = Equal[KEY ==>> VAL]
Property.forAll { (a: KEY ==>> VAL, k: KEY, f: (KEY, VAL) => Maybe[VAL]) =>
val r = a.updateWithKey(k, f)
a.lookup(k) match {
case Just(v1) =>
f(k, v1) match {
case Just(v2) =>
E.equal(a.delete(k).insert(k, v2), r)
case Maybe.Empty() =>
E.equal(a.delete(k), r)
}
case Maybe.Empty() =>
E.equal(a, r)
}
}.toProperties((), Param.minSuccessful(5000))
}
val updateLookupWithKey = {
type KEY = Byte
type VAL = Byte
val E = Equal[KEY ==>> VAL]
Property.forAll { (a: KEY ==>> VAL, k: KEY, f: (KEY, VAL) => Maybe[VAL]) =>
val (o, r) = a.updateLookupWithKey(k, f)
assert(E.equal(r, a.updateWithKey(k, f)))
a.lookup(k) match {
case Just(v1) =>
f(k, v1) match {
case Just(v2) =>
E.equal(a.delete(k).insert(k, v2), r) && (Maybe.just(v2) === o)
case Maybe.Empty() =>
E.equal(a.delete(k), r) && (Maybe.just(v1) === o)
}
case Maybe.Empty() =>
E.equal(a, r) && o.isEmpty
}
}.toProperties((), Param.minSuccessful(5000))
}
val alter = {
type KEY = Byte
type VAL = Byte
val E = Equal[KEY ==>> VAL]
Property.forAll { (a: KEY ==>> VAL, k: KEY, f: Maybe[VAL] => Maybe[VAL]) =>
val r = a.alter(k, f)
a.lookup(k) match {
case Just(v1) =>
f(Maybe.just(v1)) match {
case Just(v2) =>
E.equal(a.insert(k, v2), r)
case Maybe.Empty() =>
E.equal(a.delete(k), r)
}
case Maybe.Empty() =>
f(Maybe.empty) match {
case Just(v2) =>
E.equal(a.insert(k, v2), r)
case Maybe.Empty() =>
E.equal(a, r)
}
}
}.toProperties((), Param.minSuccessful(5000))
}
val updateAt = {
type KEY = Byte
type VAL = Byte
val E = Equal[KEY ==>> VAL]
Property.NoShrink.property2 { (a0: NonEmptyList[(KEY, VAL)], f: (KEY, VAL) => Maybe[VAL]) =>
val a = IMap.fromFoldable(a0)
Property.forAllG(Gen.choose(0, a.size - 1)) { i =>
val r = a.updateAt(i, f)
a.elemAt(i) match {
case Just((k, v1)) =>
f(k, v1) match {
case Just(v2) =>
E.equal(r, a.update(k, _ => Maybe.just(v2)))
case Maybe.Empty() =>
E.equal(a.deleteAt(i), r) && E.equal(a.delete(k), r)
}
case Maybe.Empty() =>
E.equal(a, r)
}
}
}.toProperties((), Param.minSuccessful(5000))
}
val updateMinWithKey = {
type KEY = Byte
type VAL = Byte
val E = Equal[KEY ==>> VAL]
Property.forAll { (a: (KEY ==>> VAL), f: (KEY, VAL) => Maybe[VAL]) =>
val b = a.updateMinWithKey(f)
a.minViewWithKey match {
case Just(((k, v1), c)) =>
f(k, v1) match {
case Just(v2) =>
(a.size == b.size) && E.equal(b, c.insert(k, v2))
case Maybe.Empty() =>
((a.size - 1) == b.size) && E.equal(b, c)
}
case Maybe.Empty() =>
a.isEmpty && b.isEmpty
}
}.toProperties((), Param.minSuccessful(5000))
}
val updateMaxWithKey = {
type KEY = Byte
type VAL = Byte
val E = Equal[KEY ==>> VAL]
Property.forAll { (a: (KEY ==>> VAL), f: (KEY, VAL) => Maybe[VAL]) =>
val b = a.updateMaxWithKey(f)
a.maxViewWithKey match {
case Just(((k, v1), c)) =>
f(k, v1) match {
case Just(v2) =>
(a.size == b.size) && E.equal(b, c.insert(k, v2))
case Maybe.Empty() =>
((a.size - 1) == b.size) && E.equal(b, c)
}
case Maybe.Empty() =>
a.isEmpty && b.isEmpty
}
}.toProperties((), Param.minSuccessful(5000))
}
val unionWithKey = {
type KEY = Byte
type VAL = Byte
val E = Equal[KEY ==>> VAL]
Property.forAll { (a: KEY ==>> VAL, b: KEY ==>> VAL, f: (KEY, VAL, VAL) => VAL) =>
val c = a.unionWithKey(b)(f)
val aa = a.toList.toMap
val bb = b.toList.toMap
val cc = scalaz.std.map.unionWithKey(aa, bb)(f)
E.equal(IMap.fromList(cc.toList), c)
}.toProperties((), Param.minSuccessful(5000))
}
val traverseWithKey = {
type KEY = Byte
type VAL = Int
type C = Short
val T = Traverse[({ type l[a] = KEY ==>> a })#l]
def test[F[_]: Applicative](implicit E: Equal[F[KEY ==>> C]], G: Gen[F[C]]) =
Property.forAll { (a: KEY ==>> VAL, f: VAL => F[C]) =>
val g: (KEY, VAL) => F[C] = (_, v) => f(v)
val x = T.traverse(a)(f)
val y = a.traverseWithKey(g)
E.equal(x, y)
}
Properties
.list(
test[Id].toProperties("Id"),
test[Maybe].toProperties("Maybe"),
test[IList].toProperties("IList", Param.maxSize(5))
)
.andThenParam(Param.minSuccessful(5000))
}
val foldMapWithKey = {
type KEY = Byte
type VAL = Int
type C = IList[Byte]
val F = Foldable[({ type l[a] = KEY ==>> a })#l]
Property.forAll { (a: KEY ==>> VAL, f: VAL => C) =>
val g: (KEY, VAL) => C = (_, v) => f(v)
val x = F.foldMap(a)(f)
val y = a.foldMapWithKey(g)
val z = Foldable[IList].foldMap(a.values)(f)
Equal[C].equal(x, y) && Equal[C].equal(y, z)
}.toProperties((), Param.minSuccessful(5000) andThen Param.maxSize(3))
}
}
| scalaprops/scalaprops | scalaz/src/test/scala/scalaprops/IMapTest.scala | Scala | mit | 8,086 |
package org.joda.time.field
import org.joda.time.Chronology
import org.joda.time.DateTimeField
import org.joda.time.DateTimeFieldType
import org.joda.time.IllegalFieldValueException
@SerialVersionUID(-8869148464118507846L)
class SkipDateTimeField(private val chronology: Chronology,
field: DateTimeField,
private val skip: Int)
extends DelegatedDateTimeField(field) {
private var iChronology: Chronology = null
private var iSkip: Int = _
iChronology = chronology
val min = super.getMinimumValue
@transient private val iMinValue: Int =
if (min < skip) min - 1 else if (min == skip) skip + 1 else min
iSkip = skip
def this(chronology: Chronology, field: DateTimeField) {
this(chronology, field, 0)
}
override def get(millis: Long): Int = {
var value = super.get(millis)
if (value <= iSkip) {
value -= 1
}
value
}
override def set(millis: Long, value: Int): Long = {
var _value: Int = value
FieldUtils.verifyValueBounds(this, _value, iMinValue, getMaximumValue)
if (_value <= iSkip) {
if (_value == iSkip) {
throw IllegalFieldValueException.create(DateTimeFieldType.year(),
Integer.valueOf(_value),
null,
null)
}
_value += 1
}
super.set(millis, _value)
}
override def getMinimumValue(): Int = iMinValue
private def readResolve(): AnyRef = getType.getField(iChronology)
}
| mdedetrich/soda-time | shared/src/main/scala/org/joda/time/field/SkipDateTimeField.scala | Scala | bsd-2-clause | 1,580 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.