code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package wandou.math.random
/**
* Strategy interface for seeding random number generators.
* @author Daniel Dyer
*/
trait SeedGenerator {
/**
* Generate a seed value for a random number generator.
* @param length The length of the seed to generate (in bytes).
* @return A byte array containing the seed data.
* @throws SeedException If a seed cannot be generated for any reason.
*/
@throws(classOf[SeedException])
def generateSeed(length: Int): Array[Byte]
}
object SeedGenerator extends SeedGenerator {
private val DEBUG_PROPERTY = "wandou.math.random.debug"
/** Delegate generators. */
private val GENERATORS = List[SeedGenerator](
DevRandomSeedGenerator,
RandomDotOrgSeedGenerator,
SecureRandomSeedGenerator)
/**
* Generates a seed by trying each of the available strategies in
* turn until one succeeds. Tries the most suitable strategy first
* and eventually degrades to the least suitable (but guaranteed to
* work) strategy.
* @param length The length (in bytes) of the seed.
* @return A random seed of the requested length.
*/
def generateSeed(length: Int): Array[Byte] = {
for (generator <- GENERATORS) {
try {
val seed = generator.generateSeed(length)
try {
val debug = System.getProperty(DEBUG_PROPERTY, "false").equals("true")
if (debug) {
val seedString = BinaryUtils.convertBytesToHexString(seed)
System.out.println(seed.length + " bytes of seed data acquired from " + generator + ":")
System.out.println(" " + seedString)
}
} catch {
case ex: SecurityException =>
}
seed
} catch {
case ex: SeedException =>
}
}
// This shouldn't happen as at least one the generators should be
// able to generate a seed.
throw new IllegalStateException("All available seed generation strategies failed.")
}
}
trait SeedException extends Exception
object SeedException {
def apply(message: String, cause: Throwable) = new Exception(message, cause) with SeedException
def apply(message: String) = new Exception(message) with SeedException
} | wandoulabs/wandou-math | wandou-math/src/main/scala/wandou/math/random/SeedGenerator.scala | Scala | apache-2.0 | 2,178 |
package controllers.dht
sealed trait DHTPutError {
val description: String
}
object DHTPutNotInitialized extends DHTPutError {
val description: String = "DHTγεζεγγγ¦γγΎγγ"
}
object DHTPutUnknownError extends DHTPutError {
val description = "δΈζγͺγ¨γ©γΌγ«γγDHTγ«γγΌγΏγζΏε
₯γ§γγΎγγγ§γγγ"
} | windymelt/p2p2ch | app/controllers/dht/DHTPutError.scala | Scala | bsd-3-clause | 351 |
/*
* Copyright 2015 Nicolas Rinaudo
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kantan.csv.laws
import kantan.csv.{rfc, RowCodec}
import kantan.csv.engine.ReaderEngine
import kantan.csv.laws.KnownFormatsReaderLaws.Car
import kantan.csv.ops._
import scala.io.Codec
trait KnownFormatsReaderLaws {
implicit def engine: ReaderEngine
implicit val carFormat: RowCodec[Car] = RowCodec.caseCodec(1, 2, 3, 4, 0)(Car.apply)(Car.unapply)
def read(res: String): List[Car] = {
implicit val codec: Codec = Codec.UTF8
getClass.getResource(s"/known_formats/$res.csv").unsafeReadCsv[List, Car](rfc.withHeader(true))
}
lazy val reference: List[Car] = read("raw")
def excelMac120: Boolean = read("excel_mac_12_0") == reference
def numbers103: Boolean = read("numbers_1_0_3") == reference
def googleDocs: Boolean = read("google_docs") == reference
}
object KnownFormatsReaderLaws {
final case class Car(make: String, model: String, description: Option[String], price: Int, year: Int)
}
| nrinaudo/tabulate | laws/jvm/src/main/scala/kantan/csv/laws/KnownFormatsReaderLaws.scala | Scala | mit | 1,530 |
package controllers
import java.io.File
import javax.inject.{Singleton, Inject}
import play.api.Play
import play.api.Play.current
import services.UUIDGenerator
import org.slf4j.{LoggerFactory, Logger}
import play.api.mvc._
/**
* Instead of declaring an object of Application as per the template project, we must declare a class given that
* the application context is going to be responsible for creating it and wiring it up with the UUID generator service.
* @param uuidGenerator the UUID generator service we wish to receive.
*/
@Singleton
class Application @Inject() (uuidGenerator: UUIDGenerator) extends Controller {
private final val logger: Logger = LoggerFactory.getLogger(classOf[Application])
def catchall(string:String) = index
def index = Action {
val javascripts = {
if (Play.isDev) {
// Load all .js and .coffeescript files within app/assets
Option(Play.getFile("app/assets")).
filter(_.exists).
map(findScripts).
getOrElse(Nil)
} else {
// Concated and minified by UglifyJS
"concat.min.js" :: Nil
}
}
Ok(views.html.index(javascripts))
}
private def findScripts(base: File): Seq[String] = {
val baseUri = base.toURI
directoryFlatMap(base, scriptMapper).
map(f => baseUri.relativize(f.toURI).getPath)
}
private def scriptMapper(file: File): Option[File] = {
val name = file.getName
if (name.endsWith(".js")) Some(file)
else if (name.endsWith(".coffee")) Some(new File(file.getParent, name.dropRight(6) + "js"))
else None
}
private def directoryFlatMap[A](in: File, fileFun: File => Option[A]): Seq[A] = {
in.listFiles.flatMap {
case f if f.isDirectory => directoryFlatMap(f, fileFun)
case f if f.isFile => fileFun(f)
}
}
def randomUUID = Action {
logger.info("calling UUIDGenerator...")
Ok(uuidGenerator.generate.toString)
}
}
| ranraj/reactive-play-angular | app/controllers/Application.scala | Scala | apache-2.0 | 1,928 |
/*
* Copyright 2014 IBM Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ibm.spark.kernel.protocol.v5.magic
import java.io.OutputStream
import akka.actor.Actor
import com.ibm.spark.interpreter.ExecuteError
import com.ibm.spark.magic.{MagicLoader, MagicOutput}
import com.ibm.spark.utils.LogLike
import scala.util.parsing.combinator.RegexParsers
case class ValidateMagicMessage(message: String) {
override def toString = message
}
case class ExecuteMagicMessage(message: String) {
override def toString = message
}
class MagicManager(magicLoader: MagicLoader)
extends Actor with LogLike with RegexParsers
{
/**
* Regular expression to match cases of %magic or %%magic as starting
* sequence.
*/
private val magicRegex = """^[%]{1,2}(\w+)""".r
override def receive: Receive = {
case message: ValidateMagicMessage =>
sender ! magicRegex.findFirstIn(message.toString).nonEmpty
// TODO: Support using the provided output stream, which sends messages
// dynamically to the KernelMessageRelay, to output magic-related
// print messages
case (message: ExecuteMagicMessage, outputStream: OutputStream) =>
val matchData =
magicRegex.findFirstMatchIn(message.toString).get
val (magicName, code) = (
matchData.group(1), matchData.after(1).toString.trim
)
val isCell = message.toString.startsWith("%%")
var result: Either[MagicOutput, ExecuteError] = null
if (magicLoader.hasMagic(magicName)) {
// TODO: Offload this to another actor
try {
val magicClassName = magicLoader.magicClassName(magicName)
// Set output stream to use for this magic
magicLoader.dependencyMap.setOutputStream(outputStream)
val output: MagicOutput =
magicLoader.executeMagic(magicClassName, code, isCell)
result = Left(output)
} catch {
case ex: Throwable =>
result = Right(ExecuteError(
ex.getClass.getName,
ex.getLocalizedMessage,
ex.getStackTrace.map(_.toString).toList
))
}
} else {
result = Right(ExecuteError(
"Missing Magic", s"Magic $magicName does not exist!", List()
))
}
sender ! result
}
} | bpburns/spark-kernel | kernel/src/main/scala/com/ibm/spark/kernel/protocol/v5/magic/MagicManager.scala | Scala | apache-2.0 | 2,834 |
package akka.persistence.couchbase
import java.util.concurrent.TimeUnit
import java.util.Date
import akka.actor.{ExtendedActorSystem, Extension, ExtensionId, ExtensionIdProvider}
import akka.event.Logging
import com.couchbase.client.java._
import com.couchbase.client.java.document.json.JsonObject
import com.couchbase.client.java.env.{CouchbaseEnvironment, DefaultCouchbaseEnvironment}
import com.couchbase.client.java.util.Blocking
import com.couchbase.client.java.view.DesignDocument
import scala.util.{Failure, Try}
trait Couchbase extends Extension {
def environment: CouchbaseEnvironment
def environmentConfig: CouchbaseEnvironmentConfig
def journalBucket: Bucket
def journalConfig: CouchbaseJournalConfig
def snapshotStoreBucket: Bucket
def snapshotStoreConfig: CouchbaseSnapshotStoreConfig
}
private class DefaultCouchbase(val system: ExtendedActorSystem) extends Couchbase {
private val log = Logging(system, getClass.getName)
override val environmentConfig = CouchbaseEnvironmentConfig(system)
override val journalConfig = CouchbaseJournalConfig(system)
override val snapshotStoreConfig = CouchbaseSnapshotStoreConfig(system)
override val environment = DefaultCouchbaseEnvironment
.builder()
.kvTimeout(environmentConfig.kvTimeout.toMillis)
.connectTimeout(environmentConfig.connectTimeout.toMillis)
.socketConnectTimeout(environmentConfig.socketConnectTimeout.toMillis.toInt)
.maxRequestLifetime(environmentConfig.maxRequestLifetime.toMillis)
.build();
private val journalCluster = journalConfig.createCluster(environment)
override val journalBucket = openBucketWithRetry(journalConfig, journalCluster)
private val snapshotStoreCluster = snapshotStoreConfig.createCluster(environment)
override val snapshotStoreBucket = openBucketWithRetry(snapshotStoreConfig, snapshotStoreCluster)
updateJournalDesignDocs()
updateSnapshotStoreDesignDocs()
def shutdown(): Unit = {
attemptSafely("Closing journal bucket")(journalBucket.close())
attemptSafely("Shutting down journal cluster")(journalCluster.disconnect())
attemptSafely("Closing snapshot store bucket")(snapshotStoreBucket.close())
attemptSafely("Shutting down snapshot store cluster")(snapshotStoreCluster.disconnect())
attemptSafely("Shutting down environment") {
Blocking.blockForSingle(environment.shutdownAsync().single(), 30, TimeUnit.SECONDS)
}
}
private def openBucketWithRetry(config: DefaultCouchbasePluginConfig, cluster: Cluster): Bucket = {
if(environmentConfig.openBucketRetryTimeout.toSeconds == 0) {
return config.openBucket(cluster)
}
var bucket: Bucket = null
var end: Date = new Date()
end = new Date(end.getTime() + environmentConfig.openBucketRetryTimeout.toMillis.toInt)
while(bucket == null) {
try {
bucket = config.openBucket(cluster)
} catch {
case e: Throwable => {
val now: Date = new Date()
if(now.after(end)) {
Failure(e)
} else {
log.warning("Could not connect to bucket. Retrying in {} seconds...", environmentConfig.openBucketRetryInterval.toSeconds)
TimeUnit.SECONDS.sleep(environmentConfig.openBucketRetryInterval.toSeconds)
}
}
}
}
return bucket
}
private def attemptSafely(message: String)(block: => Unit): Unit = {
log.debug(message)
Try(block) recoverWith {
case e =>
log.error(e, message)
Failure(e)
}
}
/**
* Initializes all design documents.
*/
private def updateJournalDesignDocs(): Unit = {
val designDocs = JsonObject.create()
.put("views", JsonObject.create()
.put("by_sequenceNr", JsonObject.create()
.put("map",
"""
|function (doc, meta) {
| if (doc.dataType === 'journal-messages') {
| var messages = doc.messages;
| for (var i = 0, l = messages.length; i < l; i++) {
| var message = messages[i];
| emit([message.persistenceId, message.sequenceNr], message);
| }
| }
|}
""".stripMargin
)
)
)
updateDesignDocuments(journalBucket, "journal", designDocs)
}
/**
* Initializes all design documents.
*/
private def updateSnapshotStoreDesignDocs(): Unit = {
val designDocs = JsonObject.create()
.put("views", JsonObject.create()
.put("by_sequenceNr", JsonObject.create()
.put("map",
"""
|function (doc) {
| if (doc.dataType === 'snapshot-message') {
| emit([doc.persistenceId, doc.sequenceNr], null);
| }
|}
""".stripMargin
)
)
.put("by_timestamp", JsonObject.create()
.put("map",
"""
|function (doc) {
| if (doc.dataType === 'snapshot-message') {
| emit([doc.persistenceId, doc.timestamp], null);
| }
|}
""".stripMargin
)
)
.put("all", JsonObject.create()
.put("map",
"""
|function (doc) {
| if (doc.dataType === 'snapshot-message') {
| emit(doc.persistenceId, null);
| }
|}
""".stripMargin
)
)
)
updateDesignDocuments(snapshotStoreBucket, "snapshots", designDocs)
}
private def updateDesignDocuments(bucket: Bucket, name: String, raw: JsonObject): Unit = {
Try {
val designDocument = DesignDocument.from(name, raw)
bucket.bucketManager.upsertDesignDocument(designDocument)
} recoverWith {
case e =>
log.error(e, "Update design docs with name: {}", name)
Failure(e)
}
}
}
object CouchbaseExtension extends ExtensionId[Couchbase] with ExtensionIdProvider {
override def lookup(): ExtensionId[Couchbase] = CouchbaseExtension
override def createExtension(system: ExtendedActorSystem): Couchbase = {
val couchbase = new DefaultCouchbase(system)
system.registerOnTermination(couchbase.shutdown())
couchbase
}
}
| Product-Foundry/akka-persistence-couchbase | src/main/scala/akka/persistence/couchbase/CouchbaseExtension.scala | Scala | apache-2.0 | 6,442 |
package at.logic.gapt.proofs.lksk
import at.logic.gapt.expr._
import at.logic.gapt.proofs.lk.{ BinaryLKProof, CutRule, UnaryLKProof, LKToExpansionProof }
import at.logic.gapt.proofs.lk.base.LKProof
import at.logic.gapt.proofs.expansionTrees.{ merge => mergeTree, _ }
import at.logic.gapt.proofs.occurrences.FormulaOccurrence
/**
* Extends expansion tree extraction to lksk.
*/
object LKskToExpansionProof extends LKskToExpansionProof;
class LKskToExpansionProof extends LKToExpansionProof {
override def apply( proof: LKProof ): ExpansionSequent = {
val map = extract( proof )
mergeTree( ( proof.root.antecedent.map( fo => map( fo ) ), proof.root.succedent.map( fo => map( fo ) ) ) )
}
def extract( proof: LKProof ): Map[FormulaOccurrence, ExpansionTreeWithMerges] = proof match {
case Axiom( r ) =>
handleAxiom( r )
case WeakeningRightRule( parent, r, p ) =>
val map = extract( parent )
val contextmap = getMapOfContext( ( r.antecedent ++ r.succedent ).toSet - p, map )
contextmap + ( ( p, ETWeakening( p.formula ) ) )
case WeakeningLeftRule( parent, r, p ) =>
val map = extract( parent )
val contextmap = getMapOfContext( ( r.antecedent ++ r.succedent ).toSet - p, map )
contextmap + ( ( p, ETWeakening( p.formula ) ) )
case ForallSkLeftRule( parent, r, a, p, t ) =>
val map = extract( parent )
val contextmap = getMapOfContext( ( r.antecedent ++ r.succedent ).toSet - p, map )
contextmap + ( ( p, ETWeakQuantifier( p.formula, List( Tuple2( map( a ), t ) ) ) ) )
case ExistsSkRightRule( parent, r, a, p, t ) =>
val map = extract( parent )
val contextmap = getMapOfContext( ( r.antecedent ++ r.succedent ).toSet - p, map )
contextmap + ( ( p, ETWeakQuantifier( p.formula, List( Tuple2( map( a ), t ) ) ) ) )
case ForallSkRightRule( parent, r, a, p, skt ) =>
val map = extract( parent )
val contextmap = getMapOfContext( ( r.antecedent ++ r.succedent ).toSet - p, map )
contextmap + ( ( p, ETSkolemQuantifier( p.formula, skt, map( a ) ) ) )
case ExistsSkLeftRule( parent, r, a, p, skt ) =>
val map = extract( parent )
val contextmap = getMapOfContext( ( r.antecedent ++ r.succedent ).toSet - p, map )
contextmap + ( ( p, ETSkolemQuantifier( p.formula, skt, map( a ) ) ) )
case UnaryLKProof( _, up, r, _, p ) =>
val map = extract( up )
handleUnary( r, p, map, proof )
case CutRule( up1, up2, r, _, _ ) =>
getMapOfContext( ( r.antecedent ++ r.succedent ).toSet, extract( up1 ) ++ extract( up2 ) )
case BinaryLKProof( _, up1, up2, r, a1, a2, Some( p ) ) =>
val map = extract( up1 ) ++ extract( up2 )
handleBinary( r, map, proof, a1, a2, p )
}
}
| loewenheim/gapt | src/main/scala/at/logic/gapt/proofs/lksk/LKskToExpansionProof.scala | Scala | gpl-3.0 | 2,752 |
package fpgatidbits.streams
import Chisel._
import fpgatidbits.axi._
// takes in two streams (<element> and <repCnt>) and repeats each
// element in <element> <repCnt> times. example:
// <element> = A B C D E F
// <repCnt> = 2 1 0 3
// <out> = A A B D D D
object StreamRepeatElem {
def apply(inElem: DecoupledIO[UInt], inRepCnt: DecoupledIO[UInt]):
DecoupledIO[UInt] = {
val repgen = Module(new StreamRepeatElem(inElem.bits.getWidth(),
inRepCnt.bits.getWidth())).io
repgen.inElem <> inElem
repgen.inRepCnt <> inRepCnt
repgen.out
}
def apply[Te <: Data](gen: Te, inElem: DecoupledIO[Te], inRepCnt: DecoupledIO[UInt]):
DecoupledIO[Te] = {
val repgen = Module(new StreamRepeatElem(gen.getWidth(),
inRepCnt.bits.getWidth())).io
val ret = Decoupled(gen)
repgen.inElem.bits := inElem.bits.toBits
repgen.inElem.valid := inElem.valid
inElem.ready := repgen.inElem.ready
repgen.inRepCnt <> inRepCnt
ret.valid := repgen.out.valid
ret.bits := gen.fromBits(repgen.out.bits)
repgen.out.ready := ret.ready
ret
}
}
class StreamRepeatElem(dataWidth: Int, repWidth: Int) extends Module {
val io = new Bundle {
val inElem = new AXIStreamSlaveIF(UInt(width = dataWidth))
val inRepCnt = new AXIStreamSlaveIF(UInt(width = repWidth))
val out = new AXIStreamMasterIF(UInt(width = dataWidth))
}
io.inElem.ready := Bool(false)
io.inRepCnt.ready := Bool(false)
io.out.valid := Bool(false)
val regElem = Reg(init = UInt(0, dataWidth))
val regRep = Reg(init = UInt(0, repWidth))
io.out.bits := regElem
val bothValid = io.inElem.valid & io.inRepCnt.valid
when(regRep === UInt(0)) {
when (bothValid) {
regElem := io.inElem.bits
regRep := io.inRepCnt.bits
io.inElem.ready := Bool(true)
io.inRepCnt.ready := Bool(true)
}
}
.otherwise {
io.out.valid := Bool(true)
when(io.out.ready) {
regRep := regRep - UInt(1)
// last repetition? prefetch in read
when(regRep === UInt(1)) {
// prefetch elem and repcount, if possible
when (bothValid) {
regElem := io.inElem.bits
regRep := io.inRepCnt.bits
io.inElem.ready := Bool(true)
io.inRepCnt.ready := Bool(true)
}
}
}
}
}
class StreamRepeatElemTester(c: StreamRepeatElem) extends Tester(c) {
var elems = Array(100, 200, 300, 400)
var reps = Array(3, 0, 2, 1)
val l = elems.size
var golden: Array[Int] = Array[Int]()
for(i <- 0 until l) {
val r = reps(i)
for(j <- 0 until r) {
golden = golden ++ Array(elems(i))
}
}
var res: Array[Int] = Array[Int]()
def streamToArray(s: DecoupledIO[UInt], a: Array[Int]): Array[Int] = {
if(peek(s.valid) == 1 && peek(s.ready)==1) {
return a ++ Array[Int](peek(s.bits).toInt)
} else return a
}
def arrayToStream_preStep(s: DecoupledIO[UInt], a: Array[Int]) = {
if(a.size > 0) {
poke(s.bits, a(0))
poke(s.valid, 1)
}
}
def arrayToStream_postStep(s: DecoupledIO[UInt], a: Array[Int]): Array[Int] = {
if(peek(s.valid) == 1 && peek(s.ready)==1) {
return a.drop(1)
} else return a
}
poke(c.io.out.ready, 1)
while(res.size != golden.size) {
arrayToStream_preStep(c.io.inElem, elems)
arrayToStream_preStep(c.io.inRepCnt, reps)
step(1)
res=streamToArray(c.io.out, res)
elems=arrayToStream_postStep(c.io.inElem, elems)
reps=arrayToStream_postStep(c.io.inRepCnt, reps)
}
println(res.deep.mkString(", "))
println(golden.deep.mkString(", "))
expect(res.deep == golden.deep, "Result equals golden")
}
| maltanar/fpga-tidbits | src/main/scala/fpgatidbits/streams/StreamRepeatElem.scala | Scala | bsd-2-clause | 3,666 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
import org.apache.spark.sql.catalyst.catalog.{CatalogStorageFormat, CatalogTable}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate.AggregateExpression
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.physical.{HashPartitioning, Partitioning,
RangePartitioning, RoundRobinPartitioning}
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
import org.apache.spark.util.random.RandomSampler
/**
* When planning take() or collect() operations, this special node that is inserted at the top of
* the logical plan before invoking the query planner.
*
* Rules can pattern-match on this node in order to apply transformations that only take effect
* at the top of the logical query plan.
*/
case class ReturnAnswer(child: LogicalPlan) extends UnaryNode {
override def output: Seq[Attribute] = child.output
}
/**
* This node is inserted at the top of a subquery when it is optimized. This makes sure we can
* recognize a subquery as such, and it allows us to write subquery aware transformations.
*/
case class Subquery(child: LogicalPlan) extends UnaryNode {
override def output: Seq[Attribute] = child.output
}
case class Project(projectList: Seq[NamedExpression], child: LogicalPlan) extends UnaryNode {
override def output: Seq[Attribute] = projectList.map(_.toAttribute)
override def maxRows: Option[Long] = child.maxRows
override lazy val resolved: Boolean = {
val hasSpecialExpressions = projectList.exists ( _.collect {
case agg: AggregateExpression => agg
case generator: Generator => generator
case window: WindowExpression => window
}.nonEmpty
)
!expressions.exists(!_.resolved) && childrenResolved && !hasSpecialExpressions
}
override def validConstraints: Set[Expression] =
child.constraints.union(getAliasedConstraints(projectList))
}
/**
* Applies a [[Generator]] to a stream of input rows, combining the
* output of each into a new stream of rows. This operation is similar to a `flatMap` in functional
* programming with one important additional feature, which allows the input rows to be joined with
* their output.
*
* @param generator the generator expression
* @param join when true, each output row is implicitly joined with the input tuple that produced
* it.
* @param outer when true, each input row will be output at least once, even if the output of the
* given `generator` is empty.
* @param qualifier Qualifier for the attributes of generator(UDTF)
* @param generatorOutput The output schema of the Generator.
* @param child Children logical plan node
*/
case class Generate(
generator: Generator,
join: Boolean,
outer: Boolean,
qualifier: Option[String],
generatorOutput: Seq[Attribute],
child: LogicalPlan)
extends UnaryNode {
/** The set of all attributes produced by this node. */
def generatedSet: AttributeSet = AttributeSet(generatorOutput)
override lazy val resolved: Boolean = {
generator.resolved &&
childrenResolved &&
generator.elementSchema.length == generatorOutput.length &&
generatorOutput.forall(_.resolved)
}
override def producedAttributes: AttributeSet = AttributeSet(generatorOutput)
def qualifiedGeneratorOutput: Seq[Attribute] = {
val qualifiedOutput = qualifier.map { q =>
// prepend the new qualifier to the existed one
generatorOutput.map(a => a.withQualifier(Some(q)))
}.getOrElse(generatorOutput)
val nullableOutput = qualifiedOutput.map {
// if outer, make all attributes nullable, otherwise keep existing nullability
a => a.withNullability(outer || a.nullable)
}
nullableOutput
}
def output: Seq[Attribute] = {
if (join) child.output ++ qualifiedGeneratorOutput else qualifiedGeneratorOutput
}
}
case class Filter(condition: Expression, child: LogicalPlan)
extends UnaryNode with PredicateHelper {
override def output: Seq[Attribute] = child.output
override def maxRows: Option[Long] = child.maxRows
override protected def validConstraints: Set[Expression] = {
val predicates = splitConjunctivePredicates(condition)
.filterNot(SubqueryExpression.hasCorrelatedSubquery)
child.constraints.union(predicates.toSet)
}
}
abstract class SetOperation(left: LogicalPlan, right: LogicalPlan) extends BinaryNode {
def duplicateResolved: Boolean = left.outputSet.intersect(right.outputSet).isEmpty
protected def leftConstraints: Set[Expression] = left.constraints
protected def rightConstraints: Set[Expression] = {
require(left.output.size == right.output.size)
val attributeRewrites = AttributeMap(right.output.zip(left.output))
right.constraints.map(_ transform {
case a: Attribute => attributeRewrites(a)
})
}
override lazy val resolved: Boolean =
childrenResolved &&
left.output.length == right.output.length &&
left.output.zip(right.output).forall { case (l, r) =>
l.dataType.sameType(r.dataType)
} && duplicateResolved
}
object SetOperation {
def unapply(p: SetOperation): Option[(LogicalPlan, LogicalPlan)] = Some((p.left, p.right))
}
case class Intersect(left: LogicalPlan, right: LogicalPlan) extends SetOperation(left, right) {
override def output: Seq[Attribute] =
left.output.zip(right.output).map { case (leftAttr, rightAttr) =>
leftAttr.withNullability(leftAttr.nullable && rightAttr.nullable)
}
override protected def validConstraints: Set[Expression] =
leftConstraints.union(rightConstraints)
override def maxRows: Option[Long] = {
if (children.exists(_.maxRows.isEmpty)) {
None
} else {
Some(children.flatMap(_.maxRows).min)
}
}
}
case class Except(left: LogicalPlan, right: LogicalPlan) extends SetOperation(left, right) {
/** We don't use right.output because those rows get excluded from the set. */
override def output: Seq[Attribute] = left.output
override protected def validConstraints: Set[Expression] = leftConstraints
}
/** Factory for constructing new `Union` nodes. */
object Union {
def apply(left: LogicalPlan, right: LogicalPlan): Union = {
Union (left :: right :: Nil)
}
}
/**
* Logical plan for unioning two plans, without a distinct. This is UNION ALL in SQL.
*/
case class Union(children: Seq[LogicalPlan]) extends LogicalPlan {
override def maxRows: Option[Long] = {
if (children.exists(_.maxRows.isEmpty)) {
None
} else {
Some(children.flatMap(_.maxRows).sum)
}
}
/**
* Note the definition has assumption about how union is implemented physically.
*/
override def maxRowsPerPartition: Option[Long] = {
if (children.exists(_.maxRowsPerPartition.isEmpty)) {
None
} else {
Some(children.flatMap(_.maxRowsPerPartition).sum)
}
}
// updating nullability to make all the children consistent
override def output: Seq[Attribute] =
children.map(_.output).transpose.map(attrs =>
attrs.head.withNullability(attrs.exists(_.nullable)))
override lazy val resolved: Boolean = {
// allChildrenCompatible needs to be evaluated after childrenResolved
def allChildrenCompatible: Boolean =
children.tail.forall( child =>
// compare the attribute number with the first child
child.output.length == children.head.output.length &&
// compare the data types with the first child
child.output.zip(children.head.output).forall {
case (l, r) => l.dataType.sameType(r.dataType)
})
children.length > 1 && childrenResolved && allChildrenCompatible
}
/**
* Maps the constraints containing a given (original) sequence of attributes to those with a
* given (reference) sequence of attributes. Given the nature of union, we expect that the
* mapping between the original and reference sequences are symmetric.
*/
private def rewriteConstraints(
reference: Seq[Attribute],
original: Seq[Attribute],
constraints: Set[Expression]): Set[Expression] = {
require(reference.size == original.size)
val attributeRewrites = AttributeMap(original.zip(reference))
constraints.map(_ transform {
case a: Attribute => attributeRewrites(a)
})
}
private def merge(a: Set[Expression], b: Set[Expression]): Set[Expression] = {
val common = a.intersect(b)
// The constraint with only one reference could be easily inferred as predicate
// Grouping the constraints by it's references so we can combine the constraints with same
// reference together
val othera = a.diff(common).filter(_.references.size == 1).groupBy(_.references.head)
val otherb = b.diff(common).filter(_.references.size == 1).groupBy(_.references.head)
// loose the constraints by: A1 && B1 || A2 && B2 -> (A1 || A2) && (B1 || B2)
val others = (othera.keySet intersect otherb.keySet).map { attr =>
Or(othera(attr).reduceLeft(And), otherb(attr).reduceLeft(And))
}
common ++ others
}
override protected def validConstraints: Set[Expression] = {
children
.map(child => rewriteConstraints(children.head.output, child.output, child.constraints))
.reduce(merge(_, _))
}
}
case class Join(
left: LogicalPlan,
right: LogicalPlan,
joinType: JoinType,
condition: Option[Expression])
extends BinaryNode with PredicateHelper {
override def output: Seq[Attribute] = {
joinType match {
case j: ExistenceJoin =>
left.output :+ j.exists
case LeftExistence(_) =>
left.output
case LeftOuter =>
left.output ++ right.output.map(_.withNullability(true))
case RightOuter =>
left.output.map(_.withNullability(true)) ++ right.output
case FullOuter =>
left.output.map(_.withNullability(true)) ++ right.output.map(_.withNullability(true))
case _ =>
left.output ++ right.output
}
}
override protected def validConstraints: Set[Expression] = {
joinType match {
case _: InnerLike if condition.isDefined =>
left.constraints
.union(right.constraints)
.union(splitConjunctivePredicates(condition.get).toSet)
case LeftSemi if condition.isDefined =>
left.constraints
.union(splitConjunctivePredicates(condition.get).toSet)
case j: ExistenceJoin =>
left.constraints
case _: InnerLike =>
left.constraints.union(right.constraints)
case LeftExistence(_) =>
left.constraints
case LeftOuter =>
left.constraints
case RightOuter =>
right.constraints
case FullOuter =>
Set.empty[Expression]
}
}
def duplicateResolved: Boolean = left.outputSet.intersect(right.outputSet).isEmpty
// Joins are only resolved if they don't introduce ambiguous expression ids.
// NaturalJoin should be ready for resolution only if everything else is resolved here
lazy val resolvedExceptNatural: Boolean = {
childrenResolved &&
expressions.forall(_.resolved) &&
duplicateResolved &&
condition.forall(_.dataType == BooleanType)
}
// if not a natural join, use `resolvedExceptNatural`. if it is a natural join or
// using join, we still need to eliminate natural or using before we mark it resolved.
override lazy val resolved: Boolean = joinType match {
case NaturalJoin(_) => false
case UsingJoin(_, _) => false
case _ => resolvedExceptNatural
}
}
/**
* Insert some data into a table. Note that this plan is unresolved and has to be replaced by the
* concrete implementations during analysis.
*
* @param table the logical plan representing the table. In the future this should be a
* [[org.apache.spark.sql.catalyst.catalog.CatalogTable]] once we converge Hive tables
* and data source tables.
* @param partition a map from the partition key to the partition value (optional). If the partition
* value is optional, dynamic partition insert will be performed.
* As an example, `INSERT INTO tbl PARTITION (a=1, b=2) AS ...` would have
* Map('a' -> Some('1'), 'b' -> Some('2')),
* and `INSERT INTO tbl PARTITION (a=1, b) AS ...`
* would have Map('a' -> Some('1'), 'b' -> None).
* @param query the logical plan representing data to write to.
* @param overwrite overwrite existing table or partitions.
* @param ifPartitionNotExists If true, only write if the partition does not exist.
* Only valid for static partitions.
*/
case class InsertIntoTable(
table: LogicalPlan,
partition: Map[String, Option[String]],
query: LogicalPlan,
overwrite: Boolean,
ifPartitionNotExists: Boolean)
extends LogicalPlan {
// IF NOT EXISTS is only valid in INSERT OVERWRITE
assert(overwrite || !ifPartitionNotExists)
// IF NOT EXISTS is only valid in static partitions
assert(partition.values.forall(_.nonEmpty) || !ifPartitionNotExists)
// We don't want `table` in children as sometimes we don't want to transform it.
override def children: Seq[LogicalPlan] = query :: Nil
override def output: Seq[Attribute] = Seq.empty
override lazy val resolved: Boolean = false
}
/**
* Insert query result into a directory.
*
* @param isLocal Indicates whether the specified directory is local directory
* @param storage Info about output file, row and what serialization format
* @param provider Specifies what data source to use; only used for data source file.
* @param child The query to be executed
* @param overwrite If true, the existing directory will be overwritten
*
* Note that this plan is unresolved and has to be replaced by the concrete implementations
* during analysis.
*/
case class InsertIntoDir(
isLocal: Boolean,
storage: CatalogStorageFormat,
provider: Option[String],
child: LogicalPlan,
overwrite: Boolean = true)
extends UnaryNode {
override def output: Seq[Attribute] = Seq.empty
override lazy val resolved: Boolean = false
}
/**
* A container for holding the view description(CatalogTable), and the output of the view. The
* child should be a logical plan parsed from the `CatalogTable.viewText`, should throw an error
* if the `viewText` is not defined.
* This operator will be removed at the end of analysis stage.
*
* @param desc A view description(CatalogTable) that provides necessary information to resolve the
* view.
* @param output The output of a view operator, this is generated during planning the view, so that
* we are able to decouple the output from the underlying structure.
* @param child The logical plan of a view operator, it should be a logical plan parsed from the
* `CatalogTable.viewText`, should throw an error if the `viewText` is not defined.
*/
case class View(
desc: CatalogTable,
output: Seq[Attribute],
child: LogicalPlan) extends LogicalPlan with MultiInstanceRelation {
override lazy val resolved: Boolean = child.resolved
override def children: Seq[LogicalPlan] = child :: Nil
override def newInstance(): LogicalPlan = copy(output = output.map(_.newInstance()))
override def simpleString: String = {
s"View (${desc.identifier}, ${output.mkString("[", ",", "]")})"
}
}
/**
* A container for holding named common table expressions (CTEs) and a query plan.
* This operator will be removed during analysis and the relations will be substituted into child.
*
* @param child The final query of this CTE.
* @param cteRelations A sequence of pair (alias, the CTE definition) that this CTE defined
* Each CTE can see the base tables and the previously defined CTEs only.
*/
case class With(child: LogicalPlan, cteRelations: Seq[(String, SubqueryAlias)]) extends UnaryNode {
override def output: Seq[Attribute] = child.output
override def simpleString: String = {
val cteAliases = Utils.truncatedString(cteRelations.map(_._1), "[", ", ", "]")
s"CTE $cteAliases"
}
override def innerChildren: Seq[LogicalPlan] = cteRelations.map(_._2)
}
case class WithWindowDefinition(
windowDefinitions: Map[String, WindowSpecDefinition],
child: LogicalPlan) extends UnaryNode {
override def output: Seq[Attribute] = child.output
}
/**
* @param order The ordering expressions
* @param global True means global sorting apply for entire data set,
* False means sorting only apply within the partition.
* @param child Child logical plan
*/
case class Sort(
order: Seq[SortOrder],
global: Boolean,
child: LogicalPlan) extends UnaryNode {
override def output: Seq[Attribute] = child.output
override def maxRows: Option[Long] = child.maxRows
}
/** Factory for constructing new `Range` nodes. */
object Range {
def apply(start: Long, end: Long, step: Long,
numSlices: Option[Int], isStreaming: Boolean = false): Range = {
val output = StructType(StructField("id", LongType, nullable = false) :: Nil).toAttributes
new Range(start, end, step, numSlices, output, isStreaming)
}
def apply(start: Long, end: Long, step: Long, numSlices: Int): Range = {
Range(start, end, step, Some(numSlices))
}
}
case class Range(
start: Long,
end: Long,
step: Long,
numSlices: Option[Int],
output: Seq[Attribute],
override val isStreaming: Boolean)
extends LeafNode with MultiInstanceRelation {
require(step != 0, s"step ($step) cannot be 0")
val numElements: BigInt = {
val safeStart = BigInt(start)
val safeEnd = BigInt(end)
if ((safeEnd - safeStart) % step == 0 || (safeEnd > safeStart) != (step > 0)) {
(safeEnd - safeStart) / step
} else {
// the remainder has the same sign with range, could add 1 more
(safeEnd - safeStart) / step + 1
}
}
def toSQL(): String = {
if (numSlices.isDefined) {
s"SELECT id AS `${output.head.name}` FROM range($start, $end, $step, ${numSlices.get})"
} else {
s"SELECT id AS `${output.head.name}` FROM range($start, $end, $step)"
}
}
override def newInstance(): Range = copy(output = output.map(_.newInstance()))
override def simpleString: String = {
s"Range ($start, $end, step=$step, splits=$numSlices)"
}
override def computeStats(): Statistics = {
Statistics(sizeInBytes = LongType.defaultSize * numElements)
}
}
case class Aggregate(
groupingExpressions: Seq[Expression],
aggregateExpressions: Seq[NamedExpression],
child: LogicalPlan)
extends UnaryNode {
override lazy val resolved: Boolean = {
val hasWindowExpressions = aggregateExpressions.exists ( _.collect {
case window: WindowExpression => window
}.nonEmpty
)
!expressions.exists(!_.resolved) && childrenResolved && !hasWindowExpressions
}
override def output: Seq[Attribute] = aggregateExpressions.map(_.toAttribute)
override def maxRows: Option[Long] = child.maxRows
override def validConstraints: Set[Expression] = {
val nonAgg = aggregateExpressions.filter(_.find(_.isInstanceOf[AggregateExpression]).isEmpty)
child.constraints.union(getAliasedConstraints(nonAgg))
}
}
case class Window(
windowExpressions: Seq[NamedExpression],
partitionSpec: Seq[Expression],
orderSpec: Seq[SortOrder],
child: LogicalPlan) extends UnaryNode {
override def output: Seq[Attribute] =
child.output ++ windowExpressions.map(_.toAttribute)
def windowOutputSet: AttributeSet = AttributeSet(windowExpressions.map(_.toAttribute))
}
object Expand {
/**
* Build bit mask from attributes of selected grouping set. A bit in the bitmask is corresponding
* to an attribute in group by attributes sequence, the selected attribute has corresponding bit
* set to 0 and otherwise set to 1. For example, if we have GroupBy attributes (a, b, c, d), the
* bitmask 5(whose binary form is 0101) represents grouping set (a, c).
*
* @param groupingSetAttrs The attributes of selected grouping set
* @param attrMap Mapping group by attributes to its index in attributes sequence
* @return The bitmask which represents the selected attributes out of group by attributes.
*/
private def buildBitmask(
groupingSetAttrs: Seq[Attribute],
attrMap: Map[Attribute, Int]): Int = {
val numAttributes = attrMap.size
val mask = (1 << numAttributes) - 1
// Calculate the attrbute masks of selected grouping set. For example, if we have GroupBy
// attributes (a, b, c, d), grouping set (a, c) will produce the following sequence:
// (15, 7, 13), whose binary form is (1111, 0111, 1101)
val masks = (mask +: groupingSetAttrs.map(attrMap).map(index =>
// 0 means that the column at the given index is a grouping column, 1 means it is not,
// so we unset the bit in bitmap.
~(1 << (numAttributes - 1 - index))
))
// Reduce masks to generate an bitmask for the selected grouping set.
masks.reduce(_ & _)
}
/**
* Apply the all of the GroupExpressions to every input row, hence we will get
* multiple output rows for an input row.
*
* @param groupingSetsAttrs The attributes of grouping sets
* @param groupByAliases The aliased original group by expressions
* @param groupByAttrs The attributes of aliased group by expressions
* @param gid Attribute of the grouping id
* @param child Child operator
*/
def apply(
groupingSetsAttrs: Seq[Seq[Attribute]],
groupByAliases: Seq[Alias],
groupByAttrs: Seq[Attribute],
gid: Attribute,
child: LogicalPlan): Expand = {
val attrMap = groupByAttrs.zipWithIndex.toMap
// Create an array of Projections for the child projection, and replace the projections'
// expressions which equal GroupBy expressions with Literal(null), if those expressions
// are not set for this grouping set.
val projections = groupingSetsAttrs.map { groupingSetAttrs =>
child.output ++ groupByAttrs.map { attr =>
if (!groupingSetAttrs.contains(attr)) {
// if the input attribute in the Invalid Grouping Expression set of for this group
// replace it with constant null
Literal.create(null, attr.dataType)
} else {
attr
}
// groupingId is the last output, here we use the bit mask as the concrete value for it.
} :+ Literal.create(buildBitmask(groupingSetAttrs, attrMap), IntegerType)
}
// the `groupByAttrs` has different meaning in `Expand.output`, it could be the original
// grouping expression or null, so here we create new instance of it.
val output = child.output ++ groupByAttrs.map(_.newInstance) :+ gid
Expand(projections, output, Project(child.output ++ groupByAliases, child))
}
}
/**
* Apply a number of projections to every input row, hence we will get multiple output rows for
* an input row.
*
* @param projections to apply
* @param output of all projections.
* @param child operator.
*/
case class Expand(
projections: Seq[Seq[Expression]],
output: Seq[Attribute],
child: LogicalPlan) extends UnaryNode {
override def references: AttributeSet =
AttributeSet(projections.flatten.flatMap(_.references))
// This operator can reuse attributes (for example making them null when doing a roll up) so
// the constraints of the child may no longer be valid.
override protected def validConstraints: Set[Expression] = Set.empty[Expression]
}
/**
* A GROUP BY clause with GROUPING SETS can generate a result set equivalent
* to generated by a UNION ALL of multiple simple GROUP BY clauses.
*
* We will transform GROUPING SETS into logical plan Aggregate(.., Expand) in Analyzer
*
* @param selectedGroupByExprs A sequence of selected GroupBy expressions, all exprs should
* exist in groupByExprs.
* @param groupByExprs The Group By expressions candidates.
* @param child Child operator
* @param aggregations The Aggregation expressions, those non selected group by expressions
* will be considered as constant null if it appears in the expressions
*/
case class GroupingSets(
selectedGroupByExprs: Seq[Seq[Expression]],
groupByExprs: Seq[Expression],
child: LogicalPlan,
aggregations: Seq[NamedExpression]) extends UnaryNode {
override def output: Seq[Attribute] = aggregations.map(_.toAttribute)
// Needs to be unresolved before its translated to Aggregate + Expand because output attributes
// will change in analysis.
override lazy val resolved: Boolean = false
}
case class Pivot(
groupByExprs: Seq[NamedExpression],
pivotColumn: Expression,
pivotValues: Seq[Literal],
aggregates: Seq[Expression],
child: LogicalPlan) extends UnaryNode {
override def output: Seq[Attribute] = groupByExprs.map(_.toAttribute) ++ aggregates match {
case agg :: Nil => pivotValues.map(value => AttributeReference(value.toString, agg.dataType)())
case _ => pivotValues.flatMap{ value =>
aggregates.map(agg => AttributeReference(value + "_" + agg.sql, agg.dataType)())
}
}
}
/**
* A constructor for creating a logical limit, which is split into two separate logical nodes:
* a [[LocalLimit]], which is a partition local limit, followed by a [[GlobalLimit]].
*
* This muds the water for clean logical/physical separation, and is done for better limit pushdown.
* In distributed query processing, a non-terminal global limit is actually an expensive operation
* because it requires coordination (in Spark this is done using a shuffle).
*
* In most cases when we want to push down limit, it is often better to only push some partition
* local limit. Consider the following:
*
* GlobalLimit(Union(A, B))
*
* It is better to do
* GlobalLimit(Union(LocalLimit(A), LocalLimit(B)))
*
* than
* Union(GlobalLimit(A), GlobalLimit(B)).
*
* So we introduced LocalLimit and GlobalLimit in the logical plan node for limit pushdown.
*/
object Limit {
def apply(limitExpr: Expression, child: LogicalPlan): UnaryNode = {
GlobalLimit(limitExpr, LocalLimit(limitExpr, child))
}
def unapply(p: GlobalLimit): Option[(Expression, LogicalPlan)] = {
p match {
case GlobalLimit(le1, LocalLimit(le2, child)) if le1 == le2 => Some((le1, child))
case _ => None
}
}
}
/**
* A global (coordinated) limit. This operator can emit at most `limitExpr` number in total.
*
* See [[Limit]] for more information.
*/
case class GlobalLimit(limitExpr: Expression, child: LogicalPlan) extends UnaryNode {
override def output: Seq[Attribute] = child.output
override def maxRows: Option[Long] = {
limitExpr match {
case IntegerLiteral(limit) => Some(limit)
case _ => None
}
}
}
/**
* A partition-local (non-coordinated) limit. This operator can emit at most `limitExpr` number
* of tuples on each physical partition.
*
* See [[Limit]] for more information.
*/
case class LocalLimit(limitExpr: Expression, child: LogicalPlan) extends UnaryNode {
override def output: Seq[Attribute] = child.output
override def maxRowsPerPartition: Option[Long] = {
limitExpr match {
case IntegerLiteral(limit) => Some(limit)
case _ => None
}
}
}
/**
* Aliased subquery.
*
* @param alias the alias name for this subquery.
* @param child the logical plan of this subquery.
*/
case class SubqueryAlias(
alias: String,
child: LogicalPlan)
extends UnaryNode {
override def doCanonicalize(): LogicalPlan = child.canonicalized
override def output: Seq[Attribute] = child.output.map(_.withQualifier(Some(alias)))
}
/**
* Sample the dataset.
*
* @param lowerBound Lower-bound of the sampling probability (usually 0.0)
* @param upperBound Upper-bound of the sampling probability. The expected fraction sampled
* will be ub - lb.
* @param withReplacement Whether to sample with replacement.
* @param seed the random seed
* @param child the LogicalPlan
*/
case class Sample(
lowerBound: Double,
upperBound: Double,
withReplacement: Boolean,
seed: Long,
child: LogicalPlan) extends UnaryNode {
val eps = RandomSampler.roundingEpsilon
val fraction = upperBound - lowerBound
if (withReplacement) {
require(
fraction >= 0.0 - eps,
s"Sampling fraction ($fraction) must be nonnegative with replacement")
} else {
require(
fraction >= 0.0 - eps && fraction <= 1.0 + eps,
s"Sampling fraction ($fraction) must be on interval [0, 1] without replacement")
}
override def output: Seq[Attribute] = child.output
}
/**
* Returns a new logical plan that dedups input rows.
*/
case class Distinct(child: LogicalPlan) extends UnaryNode {
override def maxRows: Option[Long] = child.maxRows
override def output: Seq[Attribute] = child.output
}
/**
* A base interface for [[RepartitionByExpression]] and [[Repartition]]
*/
abstract class RepartitionOperation extends UnaryNode {
def shuffle: Boolean
def numPartitions: Int
override def output: Seq[Attribute] = child.output
}
/**
* Returns a new RDD that has exactly `numPartitions` partitions. Differs from
* [[RepartitionByExpression]] as this method is called directly by DataFrame's, because the user
* asked for `coalesce` or `repartition`. [[RepartitionByExpression]] is used when the consumer
* of the output requires some specific ordering or distribution of the data.
*/
case class Repartition(numPartitions: Int, shuffle: Boolean, child: LogicalPlan)
extends RepartitionOperation {
require(numPartitions > 0, s"Number of partitions ($numPartitions) must be positive.")
}
/**
* This method repartitions data using [[Expression]]s into `numPartitions`, and receives
* information about the number of partitions during execution. Used when a specific ordering or
* distribution is expected by the consumer of the query result. Use [[Repartition]] for RDD-like
* `coalesce` and `repartition`.
*/
case class RepartitionByExpression(
partitionExpressions: Seq[Expression],
child: LogicalPlan,
numPartitions: Int) extends RepartitionOperation {
require(numPartitions > 0, s"Number of partitions ($numPartitions) must be positive.")
val partitioning: Partitioning = {
val (sortOrder, nonSortOrder) = partitionExpressions.partition(_.isInstanceOf[SortOrder])
require(sortOrder.isEmpty || nonSortOrder.isEmpty,
s"${getClass.getSimpleName} expects that either all its `partitionExpressions` are of type " +
"`SortOrder`, which means `RangePartitioning`, or none of them are `SortOrder`, which " +
"means `HashPartitioning`. In this case we have:" +
s"""
|SortOrder: $sortOrder
|NonSortOrder: $nonSortOrder
""".stripMargin)
if (sortOrder.nonEmpty) {
RangePartitioning(sortOrder.map(_.asInstanceOf[SortOrder]), numPartitions)
} else if (nonSortOrder.nonEmpty) {
HashPartitioning(nonSortOrder, numPartitions)
} else {
RoundRobinPartitioning(numPartitions)
}
}
override def maxRows: Option[Long] = child.maxRows
override def shuffle: Boolean = true
}
/**
* A relation with one row. This is used in "SELECT ..." without a from clause.
*/
case class OneRowRelation() extends LeafNode {
override def maxRows: Option[Long] = Some(1)
override def output: Seq[Attribute] = Nil
override def computeStats(): Statistics = Statistics(sizeInBytes = 1)
/** [[org.apache.spark.sql.catalyst.trees.TreeNode.makeCopy()]] does not support 0-arg ctor. */
override def makeCopy(newArgs: Array[AnyRef]): OneRowRelation = OneRowRelation()
}
/** A logical plan for `dropDuplicates`. */
case class Deduplicate(
keys: Seq[Attribute],
child: LogicalPlan) extends UnaryNode {
override def output: Seq[Attribute] = child.output
}
/**
* A logical plan for setting a barrier of analysis.
*
* The SQL Analyzer goes through a whole query plan even most part of it is analyzed. This
* increases the time spent on query analysis for long pipelines in ML, especially.
*
* This logical plan wraps an analyzed logical plan to prevent it from analysis again. The barrier
* is applied to the analyzed logical plan in Dataset. It won't change the output of wrapped
* logical plan and just acts as a wrapper to hide it from analyzer. New operations on the dataset
* will be put on the barrier, so only the new nodes created will be analyzed.
*
* This analysis barrier will be removed at the end of analysis stage.
*/
case class AnalysisBarrier(child: LogicalPlan) extends LeafNode {
override def output: Seq[Attribute] = child.output
override def isStreaming: Boolean = child.isStreaming
override def doCanonicalize(): LogicalPlan = child.canonicalized
}
| ron8hu/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/basicLogicalOperators.scala | Scala | apache-2.0 | 33,702 |
/**
* Copyright (C) 2011 Havoc Pennington
*
* Derived from mongo-java-driver,
*
* Copyright (C) 2008 10gen Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.beaucatcher.bson
import java.net.NetworkInterface
import java.nio.ByteBuffer
import java.util.concurrent.atomic.AtomicInteger
import java.util.Date
import scala.collection.JavaConversions.enumerationAsScalaIterator
import org.beaucatcher.util.toHex
/**
* A globally unique identifier for objects.
* <p>Consists of 12 bytes, divided as follows:
* <blockquote><pre>
* <table border="1">
* <tr><td>0</td><td>1</td><td>2</td><td>3</td><td>4</td><td>5</td><td>6</td>
* <td>7</td><td>8</td><td>9</td><td>10</td><td>11</td></tr>
* <tr><td colspan="4">time</td><td colspan="3">machine</td>
* <td colspan="2">pid</td><td colspan="3">inc</td></tr>
* </table>
* </pre></blockquote>
*/
case class ObjectId(time: Int, machine: Int, inc: Int) {
/** Time of the ID in milliseconds (the regular time field is in seconds) */
def timeMillis = time * 1000L
// we cache the generated string
private lazy val string = ObjectId.assembleString(time, machine, inc)
// code definitely relies on this, e.g. when generating json
override def toString = string
}
object ObjectId {
private[bson] case class ObjectIdParts(time: Int, machine: Int, inc: Int)
def apply(date: Date, machine: Int, inc: Int): ObjectId = {
val time = (date.getTime() / 1000).intValue
ObjectId(time, machine, inc)
}
def apply(string: String): ObjectId = {
val parts = disassembleString(string)
ObjectId(parts.time, parts.machine, parts.inc)
}
def apply(): ObjectId = {
val time = (System.currentTimeMillis / 1000).intValue
ObjectId(time, machine, nextInc)
}
private[bson] def assembleBytes(time: Int, machine: Int, inc: Int) = {
val b = new Array[Byte](12)
val bb = ByteBuffer.wrap(b)
// by default BB is big endian like we need
bb.putInt(time);
bb.putInt(machine);
bb.putInt(inc);
b
}
private[bson] def disassembleBytes(bytes: Array[Byte]) = {
if (bytes.length != 12)
throw new IllegalArgumentException("BSON object ID byte[] has length " + bytes.length + " should be 12")
val bb = ByteBuffer.wrap(bytes)
ObjectIdParts(bb.getInt(), bb.getInt(), bb.getInt())
}
private[bson] def assembleString(time: Int, machine: Int, inc: Int) = {
val bytes = assembleBytes(time, machine, inc)
toHex(bytes)
}
private[bson] def disassembleString(string: String): ObjectIdParts = {
if (string.length != 24)
throw new IllegalArgumentException("BSON object ID string has length " + string.length + " should be 24")
val bytes = new Array[Byte](12)
for (i <- 0 to 11) {
val parsed = try {
Integer.parseInt(string.substring(i * 2, i * 2 + 2), 16)
} catch {
case nfe: NumberFormatException =>
throw new IllegalArgumentException("BSON object ID string contains invalid hex: " + string)
}
if (parsed < 0 || parsed > 255)
throw new IllegalArgumentException("BSON object ID contains invalid number: " + parsed)
val b: Byte = parsed.byteValue
bytes.update(i, b)
}
disassembleBytes(bytes)
}
private[bson] val _nextInc = new AtomicInteger((new java.util.Random()).nextInt())
private[bson] def nextInc = _nextInc.getAndIncrement()
private[bson] lazy val machine = {
// build a 2-byte machine piece based on NICs info
val machinePiece = {
val sb = new StringBuilder()
for (iface <- NetworkInterface.getNetworkInterfaces()) {
sb.append(iface.toString)
}
sb.toString.hashCode << 16
}
// add a 2 byte process piece. It must represent not only the JVM but the class loader.
// Since static var belong to class loader there could be collisions otherwise
val processPiece = {
val jvmId = try {
java.lang.management.ManagementFactory.getRuntimeMXBean().getName().hashCode()
} catch {
case _ => new java.util.Random().nextInt()
}
val loader = this.getClass().getClassLoader
val loaderId = if (loader != null) System.identityHashCode(loader) else 0
val sb = new StringBuilder()
sb.append(Integer.toHexString(jvmId))
sb.append(Integer.toHexString(loaderId))
sb.toString.hashCode & 0xFFFF
}
machinePiece | processPiece
}
}
| havocp/beaucatcher | base/src/main/scala/org/beaucatcher/bson/ObjectId.scala | Scala | apache-2.0 | 5,299 |
/*
* Copyright (C) 2016 Christopher Batey and Dogan Narinc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scassandra.server.e2e.query
import java.net.InetAddress
import java.util.{Date, UUID}
import com.datastax.driver.core._
import org.scassandra.codec.datatype.{DataType => DType}
import org.scassandra.server.AbstractIntegrationTest
import org.scassandra.server.priming.json.Success
import org.scassandra.server.priming.query.When
import scala.collection.JavaConverters._
class TuplePriming extends AbstractIntegrationTest {
def tt(dataType: DataType*): TupleType =
TupleType.of(ProtocolVersion.NEWEST_SUPPORTED, CodecRegistry.DEFAULT_INSTANCE, dataType: _*)
test("Test a tuple<varchar, ascii>") {
val tuple = ("one", "two")
val whenQuery = "Test prime with tuple<varchar, ascii>"
val rows: List[Map[String, Any]] = List(Map("field" -> tuple))
val columnTypes = Map("field" -> DType.Tuple(DType.Varchar, DType.Ascii))
prime(When(query = Some(whenQuery)), rows, Success, columnTypes)
val result = session.execute(whenQuery)
val singleRow = result.one()
val tupleType = tt(DataType.varchar(), DataType.ascii())
val expectedTuple = tupleType.newValue(tuple._1, tuple._2)
singleRow.getColumnDefinitions.getType("field") should equal(tupleType)
singleRow.getTupleValue("field") should equal(expectedTuple)
}
test("Test a tuple<int, long, boolean, counter, decimal, double, float>") {
// A reasonably wide tuple.
val tuple = (1, 2L, false, 3L, BigDecimal("1.2"), 3.02, 3.01f)
val whenQuery = "test prime with tuple<int, long, boolean, counter, decimal, double, float>"
val rows: List[Map[String, Any]] = List(Map("field" -> tuple))
val columnTypes = Map("field" -> DType.Tuple(DType.Int, DType.Bigint, DType.Boolean, DType.Counter, DType.Decimal, DType.Double, DType.Float))
prime(When(query = Some(whenQuery)), rows, Success, columnTypes)
val result = session.execute(whenQuery)
val singleRow = result.one()
val tupleType: TupleType = tt(DataType.cint(), DataType.bigint(), DataType.cboolean(), DataType.counter(), DataType.decimal(),
DataType.cdouble(), DataType.cfloat())
val expectedTuple = tupleType.newValue(
new java.lang.Integer(tuple._1),
new java.lang.Long(tuple._2),
new java.lang.Boolean(tuple._3),
new java.lang.Long(tuple._4),
tuple._5.bigDecimal,
new java.lang.Double(tuple._6),
new java.lang.Float(tuple._7))
singleRow.getColumnDefinitions.getType("field") should equal(tupleType)
singleRow.getTupleValue("field") should equal(expectedTuple)
}
test("Test a tuple<inet, timestamp, uuid, timeuuid, varint> using a list as input") {
val inet = InetAddress.getLocalHost
val date = new Date().getTime
val uuid = UUID.randomUUID
val timeuuid = UUID.fromString("1c0e8c70-754b-11e4-ac06-4b05b98cc84c")
val varint = BigInt("2")
val tuple = inet :: date :: uuid :: timeuuid :: varint :: Nil
val whenQuery = "test prime with tuple<inet, timestamp, uuid, timeuuid, varint>"
val rows: List[Map[String, Any]] = List(Map("field" -> tuple))
val columnTypes = Map("field" -> DType.Tuple(DType.Inet, DType.Timestamp, DType.Uuid, DType.Timeuuid, DType.Varint))
prime(When(query = Some(whenQuery)), rows, Success, columnTypes)
val result = session.execute(whenQuery)
val singleRow = result.one()
val tupleType: TupleType = tt(DataType.inet(), DataType.timestamp(), DataType.uuid(), DataType.timeuuid(), DataType.varint())
val expectedTuple = tupleType.newValue(
inet,
new Date(date),
uuid,
timeuuid,
varint.bigInteger
)
singleRow.getColumnDefinitions.getType("field") should equal(tupleType)
singleRow.getTupleValue("field") should equal(expectedTuple)
}
test("Test a tuple<tuple<date,list<smallint>>,map<time,tinyint>>") {
// A somewhat complicated tuple with nested collections.
val date = LocalDate.fromYearMonthDay(2014, 9, 17)
val smallints = List[Short](10, 28, 38)
val timemap = Map[Long, Byte](10L -> 0x5.toByte, 8674L -> 0x7.toByte)
// Date is centered on epoch being 2^31
val dateVal = math.pow(2, 31).toLong + date.getDaysSinceEpoch
val tuple = ((dateVal, smallints), timemap)
val whenQuery = "test prime with tuple<tuple<date,list<smallint>>,map<time,tinyint>>"
val rows: List[Map[String, Any]] = List(Map("field" -> tuple))
val columnTypes = Map("field" -> DType.Tuple(DType.Tuple(DType.Date, DType.List(DType.Smallint)), DType.Map(DType.Time, DType.Tinyint)))
prime(When(query = Some(whenQuery)), rows, Success, columnTypes)
val result = session.execute(whenQuery)
val singleRow = result.one()
val innerTuple = tt(DataType.date(), DataType.list(DataType.smallint()))
val tupleType: TupleType = tt(innerTuple, DataType.map(DataType.time(), DataType.tinyint()))
val expectedTuple = tupleType.newValue(
innerTuple.newValue(date, smallints.asJava),
timemap.asJava
)
singleRow.getColumnDefinitions.getType("field") should equal(tupleType)
singleRow.getTupleValue("field") should equal(expectedTuple)
}
}
| mikefero/cpp-driver | gtests/src/integration/scassandra/server/server/src/test/scala/org/scassandra/server/e2e/query/TuplePriming.scala | Scala | apache-2.0 | 5,705 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.java
import java.util.{Comparator, List => JList}
import scala.Tuple2
import scala.collection.JavaConversions._
import scala.reflect.ClassTag
import com.google.common.base.Optional
import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.spark.{Partition, SparkContext, TaskContext}
import org.apache.spark.api.java.JavaPairRDD._
import org.apache.spark.api.java.JavaSparkContext.fakeClassTag
import org.apache.spark.api.java.function.{Function => JFunction, Function2 => JFunction2, _}
import org.apache.spark.partial.{BoundedDouble, PartialResult}
import org.apache.spark.rdd.RDD
import org.apache.spark.storage.StorageLevel
trait JavaRDDLike[T, This <: JavaRDDLike[T, This]] extends Serializable {
def wrapRDD(rdd: RDD[T]): This
implicit val classTag: ClassTag[T]
def rdd: RDD[T]
/** Set of partitions in this RDD. */
def splits: JList[Partition] = new java.util.ArrayList(rdd.partitions.toSeq)
/** The [[org.apache.spark.SparkContext]] that this RDD was created on. */
def context: SparkContext = rdd.context
/** A unique ID for this RDD (within its SparkContext). */
def id: Int = rdd.id
/** Get the RDD's current storage level, or StorageLevel.NONE if none is set. */
def getStorageLevel: StorageLevel = rdd.getStorageLevel
/**
* Internal method to this RDD; will read from cache if applicable, or otherwise compute it.
* This should ''not'' be called by users directly, but is available for implementors of custom
* subclasses of RDD.
*/
def iterator(split: Partition, taskContext: TaskContext): java.util.Iterator[T] =
asJavaIterator(rdd.iterator(split, taskContext))
// Transformations (return a new RDD)
/**
* Return a new RDD by applying a function to all elements of this RDD.
*/
def map[R](f: JFunction[T, R]): JavaRDD[R] =
new JavaRDD(rdd.map(f)(f.returnType()))(f.returnType())
/**
* Return a new RDD by applying a function to each partition of this RDD, while tracking the index
* of the original partition.
*/
def mapPartitionsWithIndex[R: ClassTag](
f: JFunction2[Int, java.util.Iterator[T], java.util.Iterator[R]],
preservesPartitioning: Boolean = false): JavaRDD[R] =
new JavaRDD(rdd.mapPartitionsWithIndex(((a,b) => f(a,asJavaIterator(b))),
preservesPartitioning))
/**
* Return a new RDD by applying a function to all elements of this RDD.
*/
def map[R](f: DoubleFunction[T]): JavaDoubleRDD =
new JavaDoubleRDD(rdd.map(x => f(x).doubleValue()))
/**
* Return a new RDD by applying a function to all elements of this RDD.
*/
def map[K2, V2](f: PairFunction[T, K2, V2]): JavaPairRDD[K2, V2] = {
val ctag = implicitly[ClassTag[Tuple2[K2, V2]]]
new JavaPairRDD(rdd.map(f)(ctag))(f.keyType(), f.valueType())
}
/**
* Return a new RDD by first applying a function to all elements of this
* RDD, and then flattening the results.
*/
def flatMap[U](f: FlatMapFunction[T, U]): JavaRDD[U] = {
import scala.collection.JavaConverters._
def fn = (x: T) => f.apply(x).asScala
JavaRDD.fromRDD(rdd.flatMap(fn)(f.elementType()))(f.elementType())
}
/**
* Return a new RDD by first applying a function to all elements of this
* RDD, and then flattening the results.
*/
def flatMap(f: DoubleFlatMapFunction[T]): JavaDoubleRDD = {
import scala.collection.JavaConverters._
def fn = (x: T) => f.apply(x).asScala
new JavaDoubleRDD(rdd.flatMap(fn).map((x: java.lang.Double) => x.doubleValue()))
}
/**
* Return a new RDD by first applying a function to all elements of this
* RDD, and then flattening the results.
*/
def flatMap[K2, V2](f: PairFlatMapFunction[T, K2, V2]): JavaPairRDD[K2, V2] = {
import scala.collection.JavaConverters._
def fn = (x: T) => f.apply(x).asScala
val ctag = implicitly[ClassTag[Tuple2[K2, V2]]]
JavaPairRDD.fromRDD(rdd.flatMap(fn)(ctag))(f.keyType(), f.valueType())
}
/**
* Return a new RDD by applying a function to each partition of this RDD.
*/
def mapPartitions[U](f: FlatMapFunction[java.util.Iterator[T], U]): JavaRDD[U] = {
def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator())
JavaRDD.fromRDD(rdd.mapPartitions(fn)(f.elementType()))(f.elementType())
}
/**
* Return a new RDD by applying a function to each partition of this RDD.
*/
def mapPartitions[U](f: FlatMapFunction[java.util.Iterator[T], U],
preservesPartitioning: Boolean): JavaRDD[U] = {
def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator())
JavaRDD.fromRDD(rdd.mapPartitions(fn, preservesPartitioning)(f.elementType()))(f.elementType())
}
/**
* Return a new RDD by applying a function to each partition of this RDD.
*/
def mapPartitions(f: DoubleFlatMapFunction[java.util.Iterator[T]]): JavaDoubleRDD = {
def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator())
new JavaDoubleRDD(rdd.mapPartitions(fn).map((x: java.lang.Double) => x.doubleValue()))
}
/**
* Return a new RDD by applying a function to each partition of this RDD.
*/
def mapPartitions[K2, V2](f: PairFlatMapFunction[java.util.Iterator[T], K2, V2]):
JavaPairRDD[K2, V2] = {
def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator())
JavaPairRDD.fromRDD(rdd.mapPartitions(fn))(f.keyType(), f.valueType())
}
/**
* Return a new RDD by applying a function to each partition of this RDD.
*/
def mapPartitions(f: DoubleFlatMapFunction[java.util.Iterator[T]],
preservesPartitioning: Boolean): JavaDoubleRDD = {
def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator())
new JavaDoubleRDD(rdd.mapPartitions(fn, preservesPartitioning)
.map((x: java.lang.Double) => x.doubleValue()))
}
/**
* Return a new RDD by applying a function to each partition of this RDD.
*/
def mapPartitions[K2, V2](f: PairFlatMapFunction[java.util.Iterator[T], K2, V2],
preservesPartitioning: Boolean): JavaPairRDD[K2, V2] = {
def fn = (x: Iterator[T]) => asScalaIterator(f.apply(asJavaIterator(x)).iterator())
JavaPairRDD.fromRDD(rdd.mapPartitions(fn, preservesPartitioning))(f.keyType(), f.valueType())
}
/**
* Applies a function f to each partition of this RDD.
*/
def foreachPartition(f: VoidFunction[java.util.Iterator[T]]) {
rdd.foreachPartition((x => f(asJavaIterator(x))))
}
/**
* Return an RDD created by coalescing all elements within each partition into an array.
*/
def glom(): JavaRDD[JList[T]] =
new JavaRDD(rdd.glom().map(x => new java.util.ArrayList[T](x.toSeq)))
/**
* Return the Cartesian product of this RDD and another one, that is, the RDD of all pairs of
* elements (a, b) where a is in `this` and b is in `other`.
*/
def cartesian[U](other: JavaRDDLike[U, _]): JavaPairRDD[T, U] =
JavaPairRDD.fromRDD(rdd.cartesian(other.rdd)(other.classTag))(classTag, other.classTag)
/**
* Return an RDD of grouped elements. Each group consists of a key and a sequence of elements
* mapping to that key.
*/
def groupBy[K](f: JFunction[T, K]): JavaPairRDD[K, JList[T]] = {
implicit val ctagK: ClassTag[K] = fakeClassTag
implicit val ctagV: ClassTag[JList[T]] = fakeClassTag
JavaPairRDD.fromRDD(groupByResultToJava(rdd.groupBy(f)(f.returnType)))
}
/**
* Return an RDD of grouped elements. Each group consists of a key and a sequence of elements
* mapping to that key.
*/
def groupBy[K](f: JFunction[T, K], numPartitions: Int): JavaPairRDD[K, JList[T]] = {
implicit val ctagK: ClassTag[K] = fakeClassTag
implicit val ctagV: ClassTag[JList[T]] = fakeClassTag
JavaPairRDD.fromRDD(groupByResultToJava(rdd.groupBy(f, numPartitions)(f.returnType)))
}
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: String): JavaRDD[String] = rdd.pipe(command)
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: JList[String]): JavaRDD[String] =
rdd.pipe(asScalaBuffer(command))
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: JList[String], env: java.util.Map[String, String]): JavaRDD[String] =
rdd.pipe(asScalaBuffer(command), mapAsScalaMap(env))
/**
* Zips this RDD with another one, returning key-value pairs with the first element in each RDD,
* second element in each RDD, etc. Assumes that the two RDDs have the *same number of
* partitions* and the *same number of elements in each partition* (e.g. one was made through
* a map on the other).
*/
def zip[U](other: JavaRDDLike[U, _]): JavaPairRDD[T, U] = {
JavaPairRDD.fromRDD(rdd.zip(other.rdd)(other.classTag))(classTag, other.classTag)
}
/**
* Zip this RDD's partitions with one (or more) RDD(s) and return a new RDD by
* applying a function to the zipped partitions. Assumes that all the RDDs have the
* *same number of partitions*, but does *not* require them to have the same number
* of elements in each partition.
*/
def zipPartitions[U, V](
other: JavaRDDLike[U, _],
f: FlatMapFunction2[java.util.Iterator[T], java.util.Iterator[U], V]): JavaRDD[V] = {
def fn = (x: Iterator[T], y: Iterator[U]) => asScalaIterator(
f.apply(asJavaIterator(x), asJavaIterator(y)).iterator())
JavaRDD.fromRDD(
rdd.zipPartitions(other.rdd)(fn)(other.classTag, f.elementType()))(f.elementType())
}
// Actions (launch a job to return a value to the user program)
/**
* Applies a function f to all elements of this RDD.
*/
def foreach(f: VoidFunction[T]) {
val cleanF = rdd.context.clean(f)
rdd.foreach(cleanF)
}
/**
* Return an array that contains all of the elements in this RDD.
*/
def collect(): JList[T] = {
import scala.collection.JavaConversions._
val arr: java.util.Collection[T] = rdd.collect().toSeq
new java.util.ArrayList(arr)
}
/**
* Return an array that contains all of the elements in this RDD.
*/
def toArray(): JList[T] = collect()
/**
* Return an array that contains all of the elements in a specific partition of this RDD.
*/
def collectPartitions(partitionIds: Array[Int]): Array[JList[T]] = {
// This is useful for implementing `take` from other language frontends
// like Python where the data is serialized.
import scala.collection.JavaConversions._
val res = context.runJob(rdd, (it: Iterator[T]) => it.toArray, partitionIds, true)
res.map(x => new java.util.ArrayList(x.toSeq)).toArray
}
/**
* Reduces the elements of this RDD using the specified commutative and associative binary
* operator.
*/
def reduce(f: JFunction2[T, T, T]): T = rdd.reduce(f)
/**
* Aggregate the elements of each partition, and then the results for all the partitions, using a
* given associative function and a neutral "zero value". The function op(t1, t2) is allowed to
* modify t1 and return it as its result value to avoid object allocation; however, it should not
* modify t2.
*/
def fold(zeroValue: T)(f: JFunction2[T, T, T]): T =
rdd.fold(zeroValue)(f)
/**
* Aggregate the elements of each partition, and then the results for all the partitions, using
* given combine functions and a neutral "zero value". This function can return a different result
* type, U, than the type of this RDD, T. Thus, we need one operation for merging a T into an U
* and one operation for merging two U's, as in scala.TraversableOnce. Both of these functions are
* allowed to modify and return their first argument instead of creating a new U to avoid memory
* allocation.
*/
def aggregate[U](zeroValue: U)(seqOp: JFunction2[U, T, U],
combOp: JFunction2[U, U, U]): U =
rdd.aggregate(zeroValue)(seqOp, combOp)(seqOp.returnType)
/**
* Return the number of elements in the RDD.
*/
def count(): Long = rdd.count()
/**
* (Experimental) Approximate version of count() that returns a potentially incomplete result
* within a timeout, even if not all tasks have finished.
*/
def countApprox(timeout: Long, confidence: Double): PartialResult[BoundedDouble] =
rdd.countApprox(timeout, confidence)
/**
* (Experimental) Approximate version of count() that returns a potentially incomplete result
* within a timeout, even if not all tasks have finished.
*/
def countApprox(timeout: Long): PartialResult[BoundedDouble] =
rdd.countApprox(timeout)
/**
* Return the count of each unique value in this RDD as a map of (value, count) pairs. The final
* combine step happens locally on the master, equivalent to running a single reduce task.
*/
def countByValue(): java.util.Map[T, java.lang.Long] =
mapAsJavaMap(rdd.countByValue().map((x => (x._1, new java.lang.Long(x._2)))))
/**
* (Experimental) Approximate version of countByValue().
*/
def countByValueApprox(
timeout: Long,
confidence: Double
): PartialResult[java.util.Map[T, BoundedDouble]] =
rdd.countByValueApprox(timeout, confidence).map(mapAsJavaMap)
/**
* (Experimental) Approximate version of countByValue().
*/
def countByValueApprox(timeout: Long): PartialResult[java.util.Map[T, BoundedDouble]] =
rdd.countByValueApprox(timeout).map(mapAsJavaMap)
/**
* Take the first num elements of the RDD. This currently scans the partitions *one by one*, so
* it will be slow if a lot of partitions are required. In that case, use collect() to get the
* whole RDD instead.
*/
def take(num: Int): JList[T] = {
import scala.collection.JavaConversions._
val arr: java.util.Collection[T] = rdd.take(num).toSeq
new java.util.ArrayList(arr)
}
def takeSample(withReplacement: Boolean, num: Int, seed: Int): JList[T] = {
import scala.collection.JavaConversions._
val arr: java.util.Collection[T] = rdd.takeSample(withReplacement, num, seed).toSeq
new java.util.ArrayList(arr)
}
/**
* Return the first element in this RDD.
*/
def first(): T = rdd.first()
/**
* Save this RDD as a text file, using string representations of elements.
*/
def saveAsTextFile(path: String) = rdd.saveAsTextFile(path)
/**
* Save this RDD as a compressed text file, using string representations of elements.
*/
def saveAsTextFile(path: String, codec: Class[_ <: CompressionCodec]) =
rdd.saveAsTextFile(path, codec)
/**
* Save this RDD as a SequenceFile of serialized objects.
*/
def saveAsObjectFile(path: String) = rdd.saveAsObjectFile(path)
/**
* Creates tuples of the elements in this RDD by applying `f`.
*/
def keyBy[K](f: JFunction[T, K]): JavaPairRDD[K, T] = {
implicit val ctag: ClassTag[K] = fakeClassTag
JavaPairRDD.fromRDD(rdd.keyBy(f))
}
/**
* Mark this RDD for checkpointing. It will be saved to a file inside the checkpoint
* directory set with SparkContext.setCheckpointDir() and all references to its parent
* RDDs will be removed. This function must be called before any job has been
* executed on this RDD. It is strongly recommended that this RDD is persisted in
* memory, otherwise saving it on a file will require recomputation.
*/
def checkpoint() = rdd.checkpoint()
/**
* Return whether this RDD has been checkpointed or not
*/
def isCheckpointed: Boolean = rdd.isCheckpointed
/**
* Gets the name of the file to which this RDD was checkpointed
*/
def getCheckpointFile(): Optional[String] = {
JavaUtils.optionToOptional(rdd.getCheckpointFile)
}
/** A description of this RDD and its recursive dependencies for debugging. */
def toDebugString(): String = {
rdd.toDebugString
}
/**
* Returns the top K elements from this RDD as defined by
* the specified Comparator[T].
* @param num the number of top elements to return
* @param comp the comparator that defines the order
* @return an array of top elements
*/
def top(num: Int, comp: Comparator[T]): JList[T] = {
import scala.collection.JavaConversions._
val topElems = rdd.top(num)(Ordering.comparatorToOrdering(comp))
val arr: java.util.Collection[T] = topElems.toSeq
new java.util.ArrayList(arr)
}
/**
* Returns the top K elements from this RDD using the
* natural ordering for T.
* @param num the number of top elements to return
* @return an array of top elements
*/
def top(num: Int): JList[T] = {
val comp = com.google.common.collect.Ordering.natural().asInstanceOf[Comparator[T]]
top(num, comp)
}
/**
* Returns the first K elements from this RDD as defined by
* the specified Comparator[T] and maintains the order.
* @param num the number of top elements to return
* @param comp the comparator that defines the order
* @return an array of top elements
*/
def takeOrdered(num: Int, comp: Comparator[T]): JList[T] = {
import scala.collection.JavaConversions._
val topElems = rdd.takeOrdered(num)(Ordering.comparatorToOrdering(comp))
val arr: java.util.Collection[T] = topElems.toSeq
new java.util.ArrayList(arr)
}
/**
* Returns the first K elements from this RDD using the
* natural ordering for T while maintain the order.
* @param num the number of top elements to return
* @return an array of top elements
*/
def takeOrdered(num: Int): JList[T] = {
val comp = com.google.common.collect.Ordering.natural().asInstanceOf[Comparator[T]]
takeOrdered(num, comp)
}
/**
* Return approximate number of distinct elements in the RDD.
*
* The accuracy of approximation can be controlled through the relative standard deviation
* (relativeSD) parameter, which also controls the amount of memory used. Lower values result in
* more accurate counts but increase the memory footprint and vise versa. The default value of
* relativeSD is 0.05.
*/
def countApproxDistinct(relativeSD: Double = 0.05): Long = rdd.countApproxDistinct(relativeSD)
def name(): String = rdd.name
/** Reset generator */
def setGenerator(_generator: String) = {
rdd.setGenerator(_generator)
}
}
| sryza/spark | core/src/main/scala/org/apache/spark/api/java/JavaRDDLike.scala | Scala | apache-2.0 | 19,051 |
package gitbucket.core.controller
import gitbucket.core.api._
import gitbucket.core.issues.html
import gitbucket.core.model.Issue
import gitbucket.core.service.IssuesService._
import gitbucket.core.service._
import gitbucket.core.util.ControlUtil._
import gitbucket.core.util.Implicits._
import gitbucket.core.util._
import gitbucket.core.view
import gitbucket.core.view.Markdown
import jp.sf.amateras.scalatra.forms._
import org.scalatra.Ok
class IssuesController extends IssuesControllerBase
with IssuesService with RepositoryService with AccountService with LabelsService with MilestonesService with ActivityService
with ReadableUsersAuthenticator with ReferrerAuthenticator with CollaboratorsAuthenticator with PullRequestService with WebHookIssueCommentService
trait IssuesControllerBase extends ControllerBase {
self: IssuesService with RepositoryService with AccountService with LabelsService with MilestonesService with ActivityService
with ReadableUsersAuthenticator with ReferrerAuthenticator with CollaboratorsAuthenticator with PullRequestService with WebHookIssueCommentService =>
case class IssueCreateForm(title: String, content: Option[String],
assignedUserName: Option[String], milestoneId: Option[Int], labelNames: Option[String])
case class CommentForm(issueId: Int, content: String)
case class IssueStateForm(issueId: Int, content: Option[String])
val issueCreateForm = mapping(
"title" -> trim(label("Title", text(required))),
"content" -> trim(optional(text())),
"assignedUserName" -> trim(optional(text())),
"milestoneId" -> trim(optional(number())),
"labelNames" -> trim(optional(text()))
)(IssueCreateForm.apply)
val issueTitleEditForm = mapping(
"title" -> trim(label("Title", text(required)))
)(x => x)
val issueEditForm = mapping(
"content" -> trim(optional(text()))
)(x => x)
val commentForm = mapping(
"issueId" -> label("Issue Id", number()),
"content" -> trim(label("Comment", text(required)))
)(CommentForm.apply)
val issueStateForm = mapping(
"issueId" -> label("Issue Id", number()),
"content" -> trim(optional(text()))
)(IssueStateForm.apply)
get("/:owner/:repository/issues")(referrersOnly { repository =>
val q = request.getParameter("q")
if(Option(q).exists(_.contains("is:pr"))){
redirect(s"/${repository.owner}/${repository.name}/pulls?q=" + StringUtil.urlEncode(q))
} else {
searchIssues(repository)
}
})
get("/:owner/:repository/issues/:id")(referrersOnly { repository =>
defining(repository.owner, repository.name, params("id")){ case (owner, name, issueId) =>
getIssue(owner, name, issueId) map {
html.issue(
_,
getComments(owner, name, issueId.toInt),
getIssueLabels(owner, name, issueId.toInt),
(getCollaborators(owner, name) ::: (if(getAccountByUserName(owner).get.isGroupAccount) Nil else List(owner))).sorted,
getMilestonesWithIssueCount(owner, name),
getLabels(owner, name),
hasWritePermission(owner, name, context.loginAccount),
repository)
} getOrElse NotFound
}
})
/**
* https://developer.github.com/v3/issues/comments/#list-comments-on-an-issue
*/
get("/api/v3/repos/:owner/:repository/issues/:id/comments")(referrersOnly { repository =>
(for{
issueId <- params("id").toIntOpt
comments = getCommentsForApi(repository.owner, repository.name, issueId.toInt)
} yield {
JsonFormat(comments.map{ case (issueComment, user) => ApiComment(issueComment, RepositoryName(repository), issueId, ApiUser(user)) })
}).getOrElse(NotFound)
})
get("/:owner/:repository/issues/new")(readableUsersOnly { repository =>
defining(repository.owner, repository.name){ case (owner, name) =>
html.create(
(getCollaborators(owner, name) ::: (if(getAccountByUserName(owner).get.isGroupAccount) Nil else List(owner))).sorted,
getMilestones(owner, name),
getLabels(owner, name),
hasWritePermission(owner, name, context.loginAccount),
repository)
}
})
post("/:owner/:repository/issues/new", issueCreateForm)(readableUsersOnly { (form, repository) =>
defining(repository.owner, repository.name){ case (owner, name) =>
val writable = hasWritePermission(owner, name, context.loginAccount)
val userName = context.loginAccount.get.userName
// insert issue
val issueId = createIssue(owner, name, userName, form.title, form.content,
if(writable) form.assignedUserName else None,
if(writable) form.milestoneId else None)
// insert labels
if(writable){
form.labelNames.map { value =>
val labels = getLabels(owner, name)
value.split(",").foreach { labelName =>
labels.find(_.labelName == labelName).map { label =>
registerIssueLabel(owner, name, issueId, label.labelId)
}
}
}
}
// record activity
recordCreateIssueActivity(owner, name, userName, issueId, form.title)
getIssue(owner, name, issueId.toString).foreach { issue =>
// extract references and create refer comment
createReferComment(owner, name, issue, form.title + " " + form.content.getOrElse(""))
// call web hooks
callIssuesWebHook("opened", repository, issue, context.baseUrl, context.loginAccount.get)
// notifications
Notifier().toNotify(repository, issue, form.content.getOrElse("")){
Notifier.msgIssue(s"${context.baseUrl}/${owner}/${name}/issues/${issueId}")
}
}
redirect(s"/${owner}/${name}/issues/${issueId}")
}
})
ajaxPost("/:owner/:repository/issues/edit_title/:id", issueTitleEditForm)(readableUsersOnly { (title, repository) =>
defining(repository.owner, repository.name){ case (owner, name) =>
getIssue(owner, name, params("id")).map { issue =>
if(isEditable(owner, name, issue.openedUserName)){
// update issue
updateIssue(owner, name, issue.issueId, title, issue.content)
// extract references and create refer comment
createReferComment(owner, name, issue.copy(title = title), title)
redirect(s"/${owner}/${name}/issues/_data/${issue.issueId}")
} else Unauthorized
} getOrElse NotFound
}
})
ajaxPost("/:owner/:repository/issues/edit/:id", issueEditForm)(readableUsersOnly { (content, repository) =>
defining(repository.owner, repository.name){ case (owner, name) =>
getIssue(owner, name, params("id")).map { issue =>
if(isEditable(owner, name, issue.openedUserName)){
// update issue
updateIssue(owner, name, issue.issueId, issue.title, content)
// extract references and create refer comment
createReferComment(owner, name, issue, content.getOrElse(""))
redirect(s"/${owner}/${name}/issues/_data/${issue.issueId}")
} else Unauthorized
} getOrElse NotFound
}
})
post("/:owner/:repository/issue_comments/new", commentForm)(readableUsersOnly { (form, repository) =>
handleComment(form.issueId, Some(form.content), repository)() map { case (issue, id) =>
redirect(s"/${repository.owner}/${repository.name}/${
if(issue.isPullRequest) "pull" else "issues"}/${form.issueId}#comment-${id}")
} getOrElse NotFound
})
/**
* https://developer.github.com/v3/issues/comments/#create-a-comment
*/
post("/api/v3/repos/:owner/:repository/issues/:id/comments")(readableUsersOnly { repository =>
(for{
issueId <- params("id").toIntOpt
body <- extractFromJsonBody[CreateAComment].map(_.body) if ! body.isEmpty
(issue, id) <- handleComment(issueId, Some(body), repository)()
issueComment <- getComment(repository.owner, repository.name, id.toString())
} yield {
JsonFormat(ApiComment(issueComment, RepositoryName(repository), issueId, ApiUser(context.loginAccount.get)))
}) getOrElse NotFound
})
post("/:owner/:repository/issue_comments/state", issueStateForm)(readableUsersOnly { (form, repository) =>
handleComment(form.issueId, form.content, repository)() map { case (issue, id) =>
redirect(s"/${repository.owner}/${repository.name}/${
if(issue.isPullRequest) "pull" else "issues"}/${form.issueId}#comment-${id}")
} getOrElse NotFound
})
ajaxPost("/:owner/:repository/issue_comments/edit/:id", commentForm)(readableUsersOnly { (form, repository) =>
defining(repository.owner, repository.name){ case (owner, name) =>
getComment(owner, name, params("id")).map { comment =>
if(isEditable(owner, name, comment.commentedUserName)){
updateComment(comment.commentId, form.content)
redirect(s"/${owner}/${name}/issue_comments/_data/${comment.commentId}")
} else Unauthorized
} getOrElse NotFound
}
})
ajaxPost("/:owner/:repository/issue_comments/delete/:id")(readableUsersOnly { repository =>
defining(repository.owner, repository.name){ case (owner, name) =>
getComment(owner, name, params("id")).map { comment =>
if(isEditable(owner, name, comment.commentedUserName)){
Ok(deleteComment(comment.commentId))
} else Unauthorized
} getOrElse NotFound
}
})
ajaxGet("/:owner/:repository/issues/_data/:id")(readableUsersOnly { repository =>
getIssue(repository.owner, repository.name, params("id")) map { x =>
if(isEditable(x.userName, x.repositoryName, x.openedUserName)){
params.get("dataType") collect {
case t if t == "html" => html.editissue(
x.content, x.issueId, x.userName, x.repositoryName)
} getOrElse {
contentType = formats("json")
org.json4s.jackson.Serialization.write(
Map("title" -> x.title,
"content" -> Markdown.toHtml(x.content getOrElse "No description given.",
repository, false, true, true, isEditable(x.userName, x.repositoryName, x.openedUserName))
))
}
} else Unauthorized
} getOrElse NotFound
})
ajaxGet("/:owner/:repository/issue_comments/_data/:id")(readableUsersOnly { repository =>
getComment(repository.owner, repository.name, params("id")) map { x =>
if(isEditable(x.userName, x.repositoryName, x.commentedUserName)){
params.get("dataType") collect {
case t if t == "html" => html.editcomment(
x.content, x.commentId, x.userName, x.repositoryName)
} getOrElse {
contentType = formats("json")
org.json4s.jackson.Serialization.write(
Map("content" -> view.Markdown.toHtml(x.content,
repository, false, true, true, isEditable(x.userName, x.repositoryName, x.commentedUserName))
))
}
} else Unauthorized
} getOrElse NotFound
})
ajaxPost("/:owner/:repository/issues/new/label")(collaboratorsOnly { repository =>
val labelNames = params("labelNames").split(",")
val labels = getLabels(repository.owner, repository.name).filter(x => labelNames.contains(x.labelName))
html.labellist(labels)
})
ajaxPost("/:owner/:repository/issues/:id/label/new")(collaboratorsOnly { repository =>
defining(params("id").toInt){ issueId =>
registerIssueLabel(repository.owner, repository.name, issueId, params("labelId").toInt)
html.labellist(getIssueLabels(repository.owner, repository.name, issueId))
}
})
ajaxPost("/:owner/:repository/issues/:id/label/delete")(collaboratorsOnly { repository =>
defining(params("id").toInt){ issueId =>
deleteIssueLabel(repository.owner, repository.name, issueId, params("labelId").toInt)
html.labellist(getIssueLabels(repository.owner, repository.name, issueId))
}
})
ajaxPost("/:owner/:repository/issues/:id/assign")(collaboratorsOnly { repository =>
updateAssignedUserName(repository.owner, repository.name, params("id").toInt, assignedUserName("assignedUserName"))
Ok("updated")
})
ajaxPost("/:owner/:repository/issues/:id/milestone")(collaboratorsOnly { repository =>
updateMilestoneId(repository.owner, repository.name, params("id").toInt, milestoneId("milestoneId"))
milestoneId("milestoneId").map { milestoneId =>
getMilestonesWithIssueCount(repository.owner, repository.name)
.find(_._1.milestoneId == milestoneId).map { case (_, openCount, closeCount) =>
gitbucket.core.issues.milestones.html.progress(openCount + closeCount, closeCount)
} getOrElse NotFound
} getOrElse Ok()
})
post("/:owner/:repository/issues/batchedit/state")(collaboratorsOnly { repository =>
defining(params.get("value")){ action =>
action match {
case Some("open") => executeBatch(repository) { handleComment(_, None, repository)( _ => Some("reopen")) }
case Some("close") => executeBatch(repository) { handleComment(_, None, repository)( _ => Some("close")) }
case _ => // TODO BadRequest
}
}
})
post("/:owner/:repository/issues/batchedit/label")(collaboratorsOnly { repository =>
params("value").toIntOpt.map{ labelId =>
executeBatch(repository) { issueId =>
getIssueLabel(repository.owner, repository.name, issueId, labelId) getOrElse {
registerIssueLabel(repository.owner, repository.name, issueId, labelId)
}
}
} getOrElse NotFound
})
post("/:owner/:repository/issues/batchedit/assign")(collaboratorsOnly { repository =>
defining(assignedUserName("value")){ value =>
executeBatch(repository) {
updateAssignedUserName(repository.owner, repository.name, _, value)
}
}
})
post("/:owner/:repository/issues/batchedit/milestone")(collaboratorsOnly { repository =>
defining(milestoneId("value")){ value =>
executeBatch(repository) {
updateMilestoneId(repository.owner, repository.name, _, value)
}
}
})
get("/:owner/:repository/_attached/:file")(referrersOnly { repository =>
(Directory.getAttachedDir(repository.owner, repository.name) match {
case dir if(dir.exists && dir.isDirectory) =>
dir.listFiles.find(_.getName.startsWith(params("file") + ".")).map { file =>
RawData(FileUtil.getMimeType(file.getName), file)
}
case _ => None
}) getOrElse NotFound
})
val assignedUserName = (key: String) => params.get(key) filter (_.trim != "")
val milestoneId: String => Option[Int] = (key: String) => params.get(key).flatMap(_.toIntOpt)
private def isEditable(owner: String, repository: String, author: String)(implicit context: Context): Boolean =
hasWritePermission(owner, repository, context.loginAccount) || author == context.loginAccount.get.userName
private def executeBatch(repository: RepositoryService.RepositoryInfo)(execute: Int => Unit) = {
params("checked").split(',') map(_.toInt) foreach execute
params("from") match {
case "issues" => redirect(s"/${repository.owner}/${repository.name}/issues")
case "pulls" => redirect(s"/${repository.owner}/${repository.name}/pulls")
}
}
private def createReferComment(owner: String, repository: String, fromIssue: Issue, message: String) = {
StringUtil.extractIssueId(message).foreach { issueId =>
val content = fromIssue.issueId + ":" + fromIssue.title
if(getIssue(owner, repository, issueId).isDefined){
// Not add if refer comment already exist.
if(!getComments(owner, repository, issueId.toInt).exists { x => x.action == "refer" && x.content == content }) {
createComment(owner, repository, context.loginAccount.get.userName, issueId.toInt, content, "refer")
}
}
}
}
/**
* @see [[https://github.com/takezoe/gitbucket/wiki/CommentAction]]
*/
private def handleComment(issueId: Int, content: Option[String], repository: RepositoryService.RepositoryInfo)
(getAction: Issue => Option[String] =
p1 => params.get("action").filter(_ => isEditable(p1.userName, p1.repositoryName, p1.openedUserName))) = {
defining(repository.owner, repository.name){ case (owner, name) =>
val userName = context.loginAccount.get.userName
getIssue(owner, name, issueId.toString) flatMap { issue =>
val (action, recordActivity) =
getAction(issue)
.collect {
case "close" if(!issue.closed) => true ->
(Some("close") -> Some(if(issue.isPullRequest) recordClosePullRequestActivity _ else recordCloseIssueActivity _))
case "reopen" if(issue.closed) => false ->
(Some("reopen") -> Some(recordReopenIssueActivity _))
}
.map { case (closed, t) =>
updateClosed(owner, name, issueId, closed)
t
}
.getOrElse(None -> None)
val commentId = (content, action) match {
case (None, None) => None
case (None, Some(action)) => Some(createComment(owner, name, userName, issueId, action.capitalize, action))
case (Some(content), _) => Some(createComment(owner, name, userName, issueId, content, action.map(_+ "_comment").getOrElse("comment")))
}
// record comment activity if comment is entered
content foreach {
(if(issue.isPullRequest) recordCommentPullRequestActivity _ else recordCommentIssueActivity _)
(owner, name, userName, issueId, _)
}
recordActivity foreach ( _ (owner, name, userName, issueId, issue.title) )
// extract references and create refer comment
content.map { content =>
createReferComment(owner, name, issue, content)
}
// call web hooks
action match {
case None => commentId.map{ commentIdSome => callIssueCommentWebHook(repository, issue, commentIdSome, context.loginAccount.get) }
case Some(act) => val webHookAction = act match {
case "open" => "opened"
case "reopen" => "reopened"
case "close" => "closed"
case _ => act
}
if(issue.isPullRequest){
callPullRequestWebHook(webHookAction, repository, issue.issueId, context.baseUrl, context.loginAccount.get)
} else {
callIssuesWebHook(webHookAction, repository, issue, context.baseUrl, context.loginAccount.get)
}
}
// notifications
Notifier() match {
case f =>
content foreach {
f.toNotify(repository, issue, _){
Notifier.msgComment(s"${context.baseUrl}/${owner}/${name}/${
if(issue.isPullRequest) "pull" else "issues"}/${issueId}#comment-${commentId.get}")
}
}
action foreach {
f.toNotify(repository, issue, _){
Notifier.msgStatus(s"${context.baseUrl}/${owner}/${name}/issues/${issueId}")
}
}
}
commentId.map( issue -> _ )
}
}
}
private def searchIssues(repository: RepositoryService.RepositoryInfo) = {
defining(repository.owner, repository.name){ case (owner, repoName) =>
val page = IssueSearchCondition.page(request)
val sessionKey = Keys.Session.Issues(owner, repoName)
// retrieve search condition
val condition = session.putAndGet(sessionKey,
if(request.hasQueryString){
val q = request.getParameter("q")
if(q == null || q.trim.isEmpty){
IssueSearchCondition(request)
} else {
IssueSearchCondition(q, getMilestones(owner, repoName).map(x => (x.title, x.milestoneId)).toMap)
}
} else session.getAs[IssueSearchCondition](sessionKey).getOrElse(IssueSearchCondition())
)
html.list(
"issues",
searchIssue(condition, false, (page - 1) * IssueLimit, IssueLimit, owner -> repoName),
page,
if(!getAccountByUserName(owner).exists(_.isGroupAccount)){
(getCollaborators(owner, repoName) :+ owner).sorted
} else {
getCollaborators(owner, repoName)
},
getMilestones(owner, repoName),
getLabels(owner, repoName),
countIssue(condition.copy(state = "open" ), false, owner -> repoName),
countIssue(condition.copy(state = "closed"), false, owner -> repoName),
condition,
repository,
hasWritePermission(owner, repoName, context.loginAccount))
}
}
}
| intermezzo-fr/gitbucket | src/main/scala/gitbucket/core/controller/IssuesController.scala | Scala | apache-2.0 | 20,641 |
package com.twitter.finagle.netty4
import com.twitter.io.Buf
import io.netty.buffer.{ByteBuf, Unpooled}
import io.netty.util.CharsetUtil
import java.io.{ByteArrayInputStream, ByteArrayOutputStream}
import java.nio.{ByteBuffer, ReadOnlyBufferException}
import java.util._
import org.junit.Assert._
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
import org.scalatest.{BeforeAndAfter, FunSuite}
@RunWith(classOf[JUnitRunner])
class BufAsByteBufTest extends FunSuite with BeforeAndAfter {
private[this] val Capacity = 4096 // Must be even
private[this] val BlockSize = 128
private[this] var seed: Long = 0
private[this] var random: Random = null
private[this] var buffer: ByteBuf = null
before {
buffer =
BufAsByteBuf.Owned(Buf.ByteArray.Owned(Array.fill[Byte](Capacity)(0.toByte)))
seed = System.currentTimeMillis()
random = new Random(seed)
}
after {
buffer = null
}
test("initial state") {
assertEquals(Capacity, buffer.capacity)
assertEquals(0, buffer.readerIndex)
}
test("reader index boundary check 1") {
try {
buffer.writerIndex(0)
} catch { case e: IndexOutOfBoundsException =>
fail()
}
intercept[IndexOutOfBoundsException] {
buffer.readerIndex(-1)
}
}
test("reader index boundary check 2") {
try {
buffer.writerIndex(buffer.capacity)
} catch { case e: IndexOutOfBoundsException =>
fail()
}
intercept[IndexOutOfBoundsException] {
buffer.readerIndex(buffer.capacity + 1)
}
}
test("reader index boundary check 3") {
try {
buffer.writerIndex(Capacity / 2)
} catch { case e: IndexOutOfBoundsException =>
fail()
}
intercept[IndexOutOfBoundsException] {
buffer.readerIndex(Capacity * 3 / 2)
}
}
test("reader index boundary check 4") {
buffer.writerIndex(0)
buffer.readerIndex(0)
buffer.writerIndex(buffer.capacity)
buffer.readerIndex(buffer.capacity)
}
test("writer index boundary check 1") {
intercept[IndexOutOfBoundsException] {
buffer.writerIndex(-1)
}
}
test("writer index boundary check 2") {
try {
buffer.writerIndex(Capacity)
buffer.readerIndex(Capacity)
} catch { case e: IndexOutOfBoundsException =>
fail()
}
intercept[IndexOutOfBoundsException] {
buffer.writerIndex(buffer.capacity + 1)
}
}
test("writer index boundary check 3") {
try {
buffer.writerIndex(Capacity)
buffer.readerIndex(Capacity / 2)
} catch { case e: IndexOutOfBoundsException =>
fail()
}
intercept[IndexOutOfBoundsException] {
buffer.writerIndex(Capacity / 4)
}
}
test("writer index boundary check 4") {
buffer.writerIndex(0)
buffer.readerIndex(0)
buffer.writerIndex(Capacity)
}
test("getByte boundary check 1") {
intercept[IndexOutOfBoundsException] {
buffer.getByte(-1)
}
}
test("getByte boundary check 2") {
intercept[IndexOutOfBoundsException] {
buffer.getByte(buffer.capacity)
}
}
test("getShort boundary check 1") {
intercept[IndexOutOfBoundsException] {
buffer.getShort(-1)
}
}
test("getShort boundary check 2") {
intercept[IndexOutOfBoundsException] {
buffer.getShort(buffer.capacity - 1)
}
}
test("getMedium boundary check 1") {
intercept[IndexOutOfBoundsException] {
buffer.getMedium(-1)
}
}
test("getMedium boundary check 2") {
intercept[IndexOutOfBoundsException] {
buffer.getMedium(buffer.capacity - 2)
}
}
test("getInt boundary check 1") {
intercept[IndexOutOfBoundsException] {
buffer.getInt(-1)
}
}
test("getInt boundary check 2") {
intercept[IndexOutOfBoundsException] {
buffer.getInt(buffer.capacity - 3)
}
}
test("getLong boundary check 1") {
intercept[IndexOutOfBoundsException] {
buffer.getLong(-1)
}
}
test("getLong boundary check 2") {
intercept[IndexOutOfBoundsException] {
buffer.getLong(buffer.capacity - 7)
}
}
test("getByteArray boundary check 1") {
intercept[IndexOutOfBoundsException] {
buffer.getBytes(-1, new Array[Byte](0))
}
}
test("getByteArray boundary check 2") {
intercept[IndexOutOfBoundsException] {
buffer.getBytes(-1, new Array[Byte](0), 0, 0)
}
}
test("getByteArray boundary check 3") {
val dst = new Array[Byte](4)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(Array[Byte](1,2,3,4)))
intercept[IndexOutOfBoundsException] {
wrappedBuf.getBytes(0, dst, -1, 4)
}
// No partial copy is expected.
assert(0 == dst(0))
assert(0 == dst(1))
assert(0 == dst(2))
assert(0 == dst(3))
}
test("getByteArray boundary check 4") {
val dst = new Array[Byte](4)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(Array[Byte](1,2,3,4)))
intercept[IndexOutOfBoundsException] {
wrappedBuf.getBytes(0, dst, 1, 4)
}
// No partial copy is expected.
assert(0 == dst(0))
assert(0 == dst(1))
assert(0 == dst(2))
assert(0 == dst(3))
}
test("getBytes ByteBuffer boundary check") {
intercept[IndexOutOfBoundsException] {
buffer.getBytes(-1, ByteBuffer.allocate(0))
}
}
test("copy boundary check 1") {
intercept[IndexOutOfBoundsException] {
buffer.copy(-1, 0)
}
}
test("copy boundary check 2") {
intercept[IndexOutOfBoundsException] {
buffer.copy(0, buffer.capacity + 1)
}
}
test("copy boundary check 3") {
intercept[IndexOutOfBoundsException] {
buffer.copy(buffer.capacity + 1, 0)
}
}
test("copy boundary check 4") {
intercept[IndexOutOfBoundsException] {
buffer.copy(buffer.capacity, 1)
}
}
test("setIndex boundary check 1") {
intercept[IndexOutOfBoundsException] {
buffer.setIndex(-1, Capacity)
}
}
test("setIndex boundary check 2") {
intercept[IndexOutOfBoundsException] {
buffer.setIndex(Capacity / 2, Capacity / 4)
}
}
test("setIndex boundary check 3") {
intercept[IndexOutOfBoundsException] {
buffer.setIndex(0, Capacity + 1)
}
}
test("getBytes ByteBuffer state") {
val dst = ByteBuffer.allocate(4)
dst.position(1)
dst.limit(3)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(Array[Byte](1,2,3,4)))
wrappedBuf.getBytes(1, dst)
assert(3 == dst.position())
assert(3 == dst.limit())
dst.clear()
assert(0 == dst.get(0))
assert(2 == dst.get(1))
assert(3 == dst.get(2))
assert(0 == dst.get(3))
}
test("getBytes DirectByteBuffer boundary check") {
intercept[IndexOutOfBoundsException] {
buffer.getBytes(-1, ByteBuffer.allocateDirect(0))
}
}
test("getBytes DirectByteBuffer state") {
val dst = ByteBuffer.allocateDirect(4)
dst.position(1)
dst.limit(3)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(Array[Byte](1,2,3,4)))
wrappedBuf.getBytes(1, dst)
assert(3 == dst.position())
assert(3 == dst.limit())
dst.clear()
assert(0 == dst.get(0))
assert(2 == dst.get(1))
assert(3 == dst.get(2))
assert(0 == dst.get(3))
}
test("random byte access") {
val buf = Buf.ByteArray.Owned(Array.fill(Capacity)(random.nextInt().toByte))
val wrappedBuf = BufAsByteBuf.Owned(buf)
random.setSeed(seed)
0.until(Capacity).foreach { i =>
val value = random.nextInt().asInstanceOf[Byte]
assert(value == wrappedBuf.getByte(i))
}
}
test("test random unsigned byte access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
0.until(Capacity).foreach { i =>
val value = random.nextInt().asInstanceOf[Byte]
wrapped.setByte(i, value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
0.until(Capacity).foreach { i =>
val value = random.nextInt() & 0xFF
assert(value == wrappedBuf.getUnsignedByte(i))
}
}
test("test random short access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
0.until(Capacity, 2).foreach { i =>
val value = random.nextInt().asInstanceOf[Short]
wrapped.setShort(i, value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
0.until(Capacity, 2).foreach { i =>
val value = random.nextInt().asInstanceOf[Short]
assert(value == wrappedBuf.getShort(i))
}
}
test("test random unsigned short access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
0.until(Capacity, 2).foreach { i =>
val value = random.nextInt().asInstanceOf[Short]
wrapped.setShort(i, value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
0.until(Capacity, 2).foreach { i =>
val value = random.nextInt() & 0xFFFF
assert(value == wrappedBuf.getUnsignedShort(i))
}
}
test("test random medium access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
0.until(Capacity - 2, 3).foreach { i =>
val value = random.nextInt()
wrapped.setMedium(i, value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
0.until(Capacity - 2, 3).foreach { i =>
val value = random.nextInt() << 8 >> 8
assert(value == wrappedBuf.getMedium(i))
}
}
test("test random unsigned medium access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
0.until(Capacity - 2, 3).foreach { i =>
val value = random.nextInt()
wrapped.setMedium(i, value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
0.until(Capacity - 2, 3).foreach { i =>
val value = random.nextInt() & 0x00FFFFFF
assert(value == wrappedBuf.getUnsignedMedium(i))
}
}
test("test random int access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
0.until(Capacity - 3, 4).foreach { i =>
val value = random.nextInt()
wrapped.setInt(i, value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
0.until(Capacity - 3, 4).foreach { i =>
val value = random.nextInt()
assert(value == wrappedBuf.getInt(i))
}
}
test("test random unsigned int access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
0.until(Capacity - 3, 4).foreach { i =>
val value = random.nextInt()
wrapped.setInt(i, value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
0.until(Capacity - 3, 4).foreach { i =>
val value = random.nextInt() & 0xFFFFFFFFL
assert(value == wrappedBuf.getUnsignedInt(i))
}
}
test("test random long access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
0.until(Capacity - 7, 8).foreach { i =>
val value = random.nextLong()
wrapped.setLong(i, value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
0.until(Capacity - 7, 8).foreach { i =>
val value = random.nextLong()
assert(value == wrappedBuf.getLong(i))
}
}
test("setZero") {
intercept[ReadOnlyBufferException] {
buffer.setZero(0, 1)
}
}
test("sequential byte access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
wrapped.writerIndex(0)
0.until(Capacity).foreach { i =>
val value = random.nextInt().asInstanceOf[Byte]
wrapped.writeByte(value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
assert(0 == wrappedBuf.readerIndex)
random.setSeed(seed)
0.until(Capacity).foreach { i =>
val value = random.nextInt().asInstanceOf[Byte]
assert(i == wrappedBuf.readerIndex)
assert(wrappedBuf.isReadable)
assert(value == wrappedBuf.readByte())
}
assert(wrappedBuf.maxCapacity == wrappedBuf.readerIndex)
assert(!wrappedBuf.isReadable)
}
test("sequential unsigned byte access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
wrapped.writerIndex(0)
0.until(Capacity).foreach { i =>
val value = random.nextInt().asInstanceOf[Byte]
wrapped.writeByte(value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
assert(0 == wrappedBuf.readerIndex)
random.setSeed(seed)
0.until(Capacity).foreach { i =>
val value = random.nextInt() & 0xFF
assert(i == wrappedBuf.readerIndex)
assert(wrappedBuf.isReadable)
assert(value == wrappedBuf.readUnsignedByte())
}
assert(wrappedBuf.capacity == wrappedBuf.readerIndex)
assert(!wrappedBuf.isReadable)
}
test("sequential short access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
wrapped.writerIndex(0)
0.until(Capacity, 2).foreach { i =>
val value = random.nextInt().asInstanceOf[Short]
wrapped.writeShort(value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
assert(0 == wrappedBuf.readerIndex)
random.setSeed(seed)
0.until(Capacity, 2).foreach { i =>
val value = random.nextInt().asInstanceOf[Short]
assert(i == wrappedBuf.readerIndex)
assert(wrappedBuf.isReadable)
assert(value == wrappedBuf.readShort())
}
assert(wrappedBuf.capacity == wrappedBuf.readerIndex)
assert(!wrappedBuf.isReadable)
}
test("sequential unsigned short access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
wrapped.writerIndex(0)
0.until(Capacity, 2).foreach { i =>
val value = random.nextInt().asInstanceOf[Short]
wrapped.writeShort(value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
assert(0 == wrappedBuf.readerIndex)
random.setSeed(seed)
0.until(Capacity, 2).foreach { i =>
val value = random.nextInt() & 0xFFFF
assert(i == wrappedBuf.readerIndex)
assert(wrappedBuf.isReadable)
assert(value == wrappedBuf.readUnsignedShort())
}
assert(wrappedBuf.capacity == wrappedBuf.readerIndex)
assert(!wrappedBuf.isReadable)
}
test("sequential medium access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
wrapped.writerIndex(0)
val limit = Capacity / 3 * 3
0.until(limit, 3).foreach { i =>
val value = random.nextInt()
wrapped.writeMedium(value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
assert(0 == wrappedBuf.readerIndex)
random.setSeed(seed)
0.until(limit, 3).foreach { i =>
val value = random.nextInt() << 8 >> 8
assert(i == wrappedBuf.readerIndex)
assert(wrappedBuf.isReadable)
assert(value == wrappedBuf.readMedium())
}
assert(limit == wrappedBuf.readerIndex)
}
test("sequential unsigned medium access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
wrapped.writerIndex(0)
val limit = Capacity / 3 * 3
0.until(limit, 3).foreach { i =>
val value = random.nextInt()
wrapped.writeMedium(value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
assert(0 == wrappedBuf.readerIndex)
random.setSeed(seed)
0.until(limit, 3).foreach { i =>
val value = random.nextInt() & 0x00FFFFFF
assert(i == wrappedBuf.readerIndex)
assert(wrappedBuf.isReadable)
assert(value == wrappedBuf.readUnsignedMedium())
}
assert(limit == wrappedBuf.readerIndex)
}
test("sequential int access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
wrapped.writerIndex(0)
0.until(Capacity, 4).foreach { i =>
val value = random.nextInt()
wrapped.writeInt(value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
assert(0 == wrappedBuf.readerIndex)
random.setSeed(seed)
0.until(Capacity, 4).foreach { i =>
val value = random.nextInt()
assert(i == wrappedBuf.readerIndex)
assert(wrappedBuf.isReadable)
assert(value == wrappedBuf.readInt())
}
assert(Capacity == wrappedBuf.readerIndex)
}
test("sequential unsigned int access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
wrapped.writerIndex(0)
0.until(Capacity, 4).foreach { i =>
val value = random.nextInt()
wrapped.writeInt(value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
assert(0 == wrappedBuf.readerIndex)
random.setSeed(seed)
0.until(Capacity, 4).foreach { i =>
val value = random.nextInt() & 0xFFFFFFFFL
assert(i == wrappedBuf.readerIndex)
assert(wrappedBuf.isReadable)
assert(value == wrappedBuf.readUnsignedInt())
}
assert(Capacity == wrappedBuf.readerIndex)
}
test("sequential long access") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
wrapped.writerIndex(0)
0.until(Capacity, 8).foreach { i =>
val value = random.nextLong()
wrapped.writeLong(value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
assert(0 == wrappedBuf.readerIndex)
random.setSeed(seed)
0.until(Capacity, 8).foreach { i =>
val value = random.nextLong()
assert(i == wrappedBuf.readerIndex)
assert(wrappedBuf.isReadable)
assert(value == wrappedBuf.readLong())
}
assert(Capacity == wrappedBuf.readerIndex)
}
test("byte array transfer") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val value = new Array[Byte](BlockSize * 2)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(value)
wrapped.setBytes(i, value, random.nextInt(BlockSize), BlockSize)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
val expectedValue = new Array[Byte](BlockSize * 2)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValue)
val valueOffset = random.nextInt(BlockSize)
wrappedBuf.getBytes(i, value, valueOffset, BlockSize)
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue(j), value(j))
}
}
}
test("random byte array transfer 1") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val value = new Array[Byte](BlockSize)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(value)
wrapped.setBytes(i, value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
val expectedValueContent = new Array[Byte](BlockSize)
val expectedValue = Unpooled.wrappedBuffer(expectedValueContent)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValueContent)
wrappedBuf.getBytes(i, value)
0.until(BlockSize).foreach { j =>
assertEquals(expectedValue.getByte(j), value(j))
}
}
}
test("random byte array transfer 2") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val value = new Array[Byte](BlockSize * 2)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(value)
wrapped.setBytes(i, value, random.nextInt(BlockSize), BlockSize)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
val expectedValueContent = new Array[Byte](BlockSize * 2)
val expectedValue = Unpooled.wrappedBuffer(expectedValueContent)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValueContent)
val valueOffset = random.nextInt(BlockSize)
wrappedBuf.getBytes(i, value, valueOffset, BlockSize)
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue.getByte(j), value(j))
}
}
}
test("random heap buffer transfer 1") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val valueContent = new Array[Byte](BlockSize)
val value = Unpooled.wrappedBuffer(valueContent)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(valueContent)
value.setIndex(0, BlockSize)
wrapped.setBytes(i, value)
assertEquals(BlockSize, value.readerIndex)
assertEquals(BlockSize, value.writerIndex())
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
val expectedValueContent = new Array[Byte](BlockSize)
val expectedValue = Unpooled.wrappedBuffer(expectedValueContent)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValueContent)
value.clear()
wrappedBuf.getBytes(i, value)
assertEquals(0, value.readerIndex)
assertEquals(BlockSize, value.writerIndex())
0.until(BlockSize).foreach { j =>
assertEquals(expectedValue.getByte(j), value.getByte(j))
}
}
}
test("random heap buffer transfer 2") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val valueContent = new Array[Byte](BlockSize * 2)
val value = Unpooled.wrappedBuffer(valueContent)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(valueContent)
wrapped.setBytes(i, value, random.nextInt(BlockSize), BlockSize)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
val expectedValueContent = new Array[Byte](BlockSize * 2)
val expectedValue = Unpooled.wrappedBuffer(expectedValueContent)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValueContent)
val valueOffset = random.nextInt(BlockSize)
wrappedBuf.getBytes(i, value, valueOffset, BlockSize)
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue.getByte(j), value.getByte(j))
}
}
}
test("random direct buffer transfer") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val tmp = new Array[Byte](BlockSize * 2)
val value = Unpooled.directBuffer(BlockSize * 2)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(tmp)
value.setBytes(0, tmp, 0, value.capacity)
wrapped.setBytes(i, value, random.nextInt(BlockSize), BlockSize)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
val expectedValue = Unpooled.directBuffer(BlockSize * 2)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(tmp)
expectedValue.setBytes(0, tmp, 0, expectedValue.capacity)
val valueOffset = random.nextInt(BlockSize)
wrappedBuf.getBytes(i, value, valueOffset, BlockSize)
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue.getByte(j), value.getByte(j))
}
}
}
test("random ByteBuffer transfer") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val value = ByteBuffer.allocate(BlockSize * 2)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(value.array())
value.clear().position(random.nextInt(BlockSize))
value.limit(value.position() + BlockSize)
wrapped.setBytes(i, value)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
random.setSeed(seed)
val expectedValue = ByteBuffer.allocate(BlockSize * 2)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValue.array())
val valueOffset = random.nextInt(BlockSize)
value.clear().position(valueOffset).limit(valueOffset + BlockSize)
wrappedBuf.getBytes(i, value)
assertEquals(valueOffset + BlockSize, value.position())
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue.get(j), value.get(j))
}
}
}
test("sequential Array[Byte] transfer 1") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
val value = new Array[Byte](BlockSize)
wrapped.writerIndex(0)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(value)
assertEquals(0, wrappedBuf.readerIndex)
wrapped.writeBytes(value)
}
random.setSeed(seed)
val expectedValue = new Array[Byte](BlockSize)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValue)
assert(i == wrappedBuf.readerIndex)
assertEquals(Capacity, wrappedBuf.writerIndex())
wrappedBuf.readBytes(value)
0.until(BlockSize).foreach { j =>
assertEquals(expectedValue(j), value(j))
}
}
}
test("sequential Array[Byte] transfer 2") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
val value = new Array[Byte](BlockSize * 2)
wrapped.writerIndex(0)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(value)
assertEquals(0, wrappedBuf.readerIndex)
val readerIndex = random.nextInt(BlockSize)
wrapped.writeBytes(value, readerIndex, BlockSize)
}
random.setSeed(seed)
val expectedValue = new Array[Byte](BlockSize * 2)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValue)
val valueOffset = random.nextInt(BlockSize)
assert(i == wrappedBuf.readerIndex)
assertEquals(Capacity, wrappedBuf.writerIndex())
wrappedBuf.readBytes(value, valueOffset, BlockSize)
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue(j), value(j))
}
}
}
test("sequential heap buffer transfer 1") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
val valueContent = new Array[Byte](BlockSize * 2)
val value = Unpooled.wrappedBuffer(valueContent)
wrapped.writerIndex(0)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(valueContent)
assertEquals(0, wrappedBuf.readerIndex)
wrapped.writeBytes(value, random.nextInt(BlockSize), BlockSize)
assertEquals(0, value.readerIndex)
assertEquals(valueContent.length, value.writerIndex())
}
random.setSeed(seed)
val expectedValueContent = new Array[Byte](BlockSize * 2)
val expectedValue = Unpooled.wrappedBuffer(expectedValueContent)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValueContent)
val valueOffset = random.nextInt(BlockSize)
assert(i == wrappedBuf.readerIndex)
assertEquals(Capacity, wrappedBuf.writerIndex())
wrappedBuf.readBytes(value, valueOffset, BlockSize)
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue.getByte(j), value.getByte(j))
}
assertEquals(0, value.readerIndex)
assertEquals(valueContent.length, value.writerIndex())
}
}
test("sequential heap buffer transfer 2") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
val valueContent = new Array[Byte](BlockSize * 2)
val value = Unpooled.wrappedBuffer(valueContent)
wrapped.writerIndex(0)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(valueContent)
assertEquals(0, wrappedBuf.readerIndex)
val readerIndex = random.nextInt(BlockSize)
value.readerIndex(readerIndex)
value.writerIndex(readerIndex + BlockSize)
wrapped.writeBytes(value)
assertEquals(readerIndex + BlockSize, value.writerIndex())
assertEquals(value.writerIndex(), value.readerIndex)
}
random.setSeed(seed)
val expectedValueContent = new Array[Byte](BlockSize * 2)
val expectedValue = Unpooled.wrappedBuffer(expectedValueContent)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValueContent)
val valueOffset = random.nextInt(BlockSize)
assert(i == wrappedBuf.readerIndex)
assertEquals(Capacity, wrappedBuf.writerIndex())
value.readerIndex(valueOffset)
value.writerIndex(valueOffset)
wrappedBuf.readBytes(value, BlockSize)
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue.getByte(j), value.getByte(j))
}
assertEquals(valueOffset, value.readerIndex)
assertEquals(valueOffset + BlockSize, value.writerIndex())
}
}
test("sequential direct buffer transfer 1") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
val valueContent = new Array[Byte](BlockSize * 2)
val value = Unpooled.directBuffer(BlockSize * 2)
wrapped.writerIndex(0)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(valueContent)
value.setBytes(0, valueContent)
assertEquals(0, wrappedBuf.readerIndex)
wrapped.writeBytes(value, random.nextInt(BlockSize), BlockSize)
assertEquals(0, value.readerIndex)
assertEquals(0, value.writerIndex())
}
random.setSeed(seed)
val expectedValueContent = new Array[Byte](BlockSize * 2)
val expectedValue = Unpooled.wrappedBuffer(expectedValueContent)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValueContent)
val valueOffset = random.nextInt(BlockSize)
value.setBytes(0, valueContent)
assert(i == wrappedBuf.readerIndex)
assertEquals(Capacity, wrappedBuf.writerIndex())
wrappedBuf.readBytes(value, valueOffset, BlockSize)
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue.getByte(j), value.getByte(j))
}
assertEquals(0, value.readerIndex)
assertEquals(0, value.writerIndex())
}
}
test("sequential direct buffer transfer 2") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
val valueContent = new Array[Byte](BlockSize * 2)
val value = Unpooled.directBuffer(BlockSize * 2)
wrapped.writerIndex(0)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(valueContent)
value.setBytes(0, valueContent)
assertEquals(0, wrappedBuf.readerIndex)
val readerIndex = random.nextInt(BlockSize)
value.readerIndex(0)
value.writerIndex(readerIndex + BlockSize)
value.readerIndex(readerIndex)
wrapped.writeBytes(value)
assertEquals(readerIndex + BlockSize, value.writerIndex())
assertEquals(value.writerIndex(), value.readerIndex)
}
random.setSeed(seed)
val expectedValueContent = new Array[Byte](BlockSize * 2)
val expectedValue = Unpooled.wrappedBuffer(expectedValueContent)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValueContent)
value.setBytes(0, valueContent)
val valueOffset = random.nextInt(BlockSize)
assert(i == wrappedBuf.readerIndex)
assertEquals(Capacity, wrappedBuf.writerIndex())
value.readerIndex(valueOffset)
value.writerIndex(valueOffset)
wrappedBuf.readBytes(value, BlockSize)
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue.getByte(j), value.getByte(j))
}
assertEquals(valueOffset, value.readerIndex)
assertEquals(valueOffset + BlockSize, value.writerIndex())
}
}
test("sequential ByteBuffer-backed heap buffer transfer 1") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
val valueContent = new Array[Byte](BlockSize * 2)
val value = Unpooled.wrappedBuffer(ByteBuffer.allocate(BlockSize * 2))
value.writerIndex(0)
wrapped.writerIndex(0)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(valueContent)
value.setBytes(0, valueContent)
assertEquals(0, wrappedBuf.readerIndex)
wrapped.writeBytes(value, random.nextInt(BlockSize), BlockSize)
assertEquals(0, value.readerIndex)
assertEquals(0, value.writerIndex())
}
random.setSeed(seed)
val expectedValueContent = new Array[Byte](BlockSize * 2)
val expectedValue = Unpooled.wrappedBuffer(expectedValueContent)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValueContent)
val valueOffset = random.nextInt(BlockSize)
value.setBytes(0, valueContent)
assert(i == wrappedBuf.readerIndex)
assertEquals(Capacity, wrappedBuf.writerIndex())
wrappedBuf.readBytes(value, valueOffset, BlockSize)
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue.getByte(j), value.getByte(j))
}
assertEquals(0, value.readerIndex)
assertEquals(0, value.writerIndex())
}
}
test("sequential ByteBuffer-backed heap buffer transfer 2") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
val valueContent = new Array[Byte](BlockSize * 2)
val value = Unpooled.wrappedBuffer(ByteBuffer.allocate(BlockSize * 2))
value.writerIndex(0)
wrapped.writerIndex(0)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(valueContent)
value.setBytes(0, valueContent)
assertEquals(0, wrappedBuf.readerIndex)
val readerIndex = random.nextInt(BlockSize)
value.readerIndex(0)
value.writerIndex(readerIndex + BlockSize)
value.readerIndex(readerIndex)
wrapped.writeBytes(value)
assertEquals(readerIndex + BlockSize, value.writerIndex())
assertEquals(value.writerIndex(), value.readerIndex)
}
random.setSeed(seed)
val expectedValueContent = new Array[Byte](BlockSize * 2)
val expectedValue = Unpooled.wrappedBuffer(expectedValueContent)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValueContent)
value.setBytes(0, valueContent)
val valueOffset = random.nextInt(BlockSize)
assert(i == wrappedBuf.readerIndex)
assertEquals(Capacity, wrappedBuf.writerIndex())
value.readerIndex(valueOffset)
value.writerIndex(valueOffset)
wrappedBuf.readBytes(value, BlockSize)
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue.getByte(j), value.getByte(j))
}
assertEquals(valueOffset, value.readerIndex)
assertEquals(valueOffset + BlockSize, value.writerIndex())
}
}
test("sequential ByteBuffer transfer") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
wrapped.writerIndex(0)
val value = ByteBuffer.allocate(BlockSize * 2)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(value.array())
value.clear().position(random.nextInt(BlockSize))
value.limit(value.position() + BlockSize)
wrapped.writeBytes(value)
}
random.setSeed(seed)
val expectedValue = ByteBuffer.allocate(BlockSize * 2)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValue.array())
val valueOffset = random.nextInt(BlockSize)
value.clear().position(valueOffset).limit(valueOffset + BlockSize)
wrappedBuf.readBytes(value)
assertEquals(valueOffset + BlockSize, value.position())
valueOffset.until(valueOffset + BlockSize).foreach { j =>
assertEquals(expectedValue.get(j), value.get(j))
}
}
}
test("sequential copied buffer transfer") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
wrapped.writerIndex(0)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
val value = new Array[Byte](BlockSize)
random.nextBytes(value)
assertEquals(0, wrappedBuf.readerIndex)
wrapped.writeBytes(value)
}
random.setSeed(seed)
val expectedValue = new Array[Byte](BlockSize)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValue)
assert(i == wrappedBuf.readerIndex)
assertEquals(Capacity, wrappedBuf.writerIndex())
val actualValue = wrappedBuf.readBytes(BlockSize)
assertEquals(Unpooled.wrappedBuffer(expectedValue), actualValue)
}
}
test("sequential slice") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
wrapped.writerIndex(0)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
val value = new Array[Byte](BlockSize)
random.nextBytes(value)
assertEquals(0, wrappedBuf.readerIndex)
wrapped.writeBytes(value)
}
random.setSeed(seed)
val expectedValue = new Array[Byte](BlockSize)
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
random.nextBytes(expectedValue)
assert(i == wrappedBuf.readerIndex)
assertEquals(Capacity, wrappedBuf.writerIndex())
val actualValue = wrappedBuf.readSlice(BlockSize)
assert(Unpooled.wrappedBuffer(expectedValue) == actualValue)
// Make sure if it is a read-only buffer.
intercept[ReadOnlyBufferException] {
actualValue.setByte(0, 0)
}
}
}
test("write zero") {
intercept[IllegalArgumentException] {
buffer.writeZero(-1)
}
buffer.clear()
intercept[ReadOnlyBufferException] {
buffer.writeZero(Capacity)
}
}
test("discardReadBytes") {
val bytes = new Array[Byte](Capacity)
val wrapped = Unpooled.wrappedBuffer(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
wrapped.writerIndex(0)
0.until(Capacity, 4).foreach { i =>
wrapped.writeInt(i)
}
wrappedBuf.readByte()
intercept[ReadOnlyBufferException] {
wrappedBuf.discardReadBytes()
}
}
test("stream transfer 1") {
val bytes = new Array[Byte](Capacity)
random.nextBytes(bytes)
val in = new ByteArrayInputStream(bytes, 0, Capacity)
intercept[ReadOnlyBufferException] {
buffer.setBytes(0, in, Capacity)
}
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
val out = new ByteArrayOutputStream()
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
wrappedBuf.getBytes(i, out, BlockSize)
}
assert(Arrays.equals(bytes, out.toByteArray()))
}
test("stream transfer 2") {
val bytes = new Array[Byte](Capacity)
random.nextBytes(bytes)
val in = new ByteArrayInputStream(bytes, 0, Capacity)
buffer.clear()
intercept[ReadOnlyBufferException] {
buffer.writeBytes(in, Capacity)
}
assert(buffer.writerIndex() == 0)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
val out = new ByteArrayOutputStream()
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
assert(i == wrappedBuf.readerIndex)
wrappedBuf.readBytes(out, BlockSize)
assertEquals(i + BlockSize, wrappedBuf.readerIndex)
}
assert(Arrays.equals(bytes, out.toByteArray()))
}
test("copy") {
val bytes = new Array[Byte](Capacity)
random.nextBytes(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
val readerIndex = Capacity / 3
val writerIndex = Capacity * 2 / 3
wrappedBuf.setIndex(readerIndex, writerIndex)
// Make sure all properties are copied.
val copy = wrappedBuf.copy()
assertEquals(0, copy.readerIndex)
assertEquals(wrappedBuf.readableBytes, copy.writerIndex())
assertEquals(wrappedBuf.readableBytes, copy.capacity)
0.until(copy.capacity).foreach { i =>
assertEquals(wrappedBuf.getByte(i + readerIndex), copy.getByte(i))
}
}
test("duplicate") {
val bytes = new Array[Byte](Capacity)
random.nextBytes(bytes)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
val readerIndex = Capacity / 3
val writerIndex = Capacity * 2 / 3
wrappedBuf.setIndex(readerIndex, writerIndex)
// Make sure all properties are copied.
val duplicate = wrappedBuf.duplicate()
assertEquals(wrappedBuf.readerIndex, duplicate.readerIndex)
assertEquals(wrappedBuf.writerIndex(), duplicate.writerIndex())
assertEquals(wrappedBuf.capacity, duplicate.capacity)
0.until(duplicate.capacity).foreach { i =>
assertEquals(wrappedBuf.getByte(i), duplicate.getByte(i))
}
// Make sure the duplicate is read-only
intercept[ReadOnlyBufferException] {
duplicate.setByte(1, 1)
}
}
test("slice index") {
assertEquals(0, buffer.slice(0, buffer.capacity).readerIndex)
assertEquals(0, buffer.slice(0, buffer.capacity - 1).readerIndex)
assertEquals(0, buffer.slice(1, buffer.capacity - 1).readerIndex)
assertEquals(0, buffer.slice(1, buffer.capacity - 2).readerIndex)
assertEquals(buffer.capacity, buffer.slice(0, buffer.capacity).writerIndex())
assertEquals(buffer.capacity - 1, buffer.slice(0, buffer.capacity - 1).writerIndex())
assertEquals(buffer.capacity - 1, buffer.slice(1, buffer.capacity - 1).writerIndex())
assertEquals(buffer.capacity - 2, buffer.slice(1, buffer.capacity - 2).writerIndex())
}
test("equals") {
assert(!buffer.equals(null))
assert(!buffer.equals(new Object()))
val value = new Array[Byte](32)
random.nextBytes(value)
val bytes = Arrays.copyOf(value, Capacity)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
wrappedBuf.setIndex(0, value.length)
assertEquals(wrappedBuf, Unpooled.wrappedBuffer(value))
value(0) = (value(0) + 1).asInstanceOf[Byte]
assert(!wrappedBuf.equals(Unpooled.wrappedBuffer(value)))
}
test("compareTo") {
intercept[NullPointerException] {
buffer.compareTo(null)
}
// Fill the random stuff
val value = new Array[Byte](32)
random.nextBytes(value)
// Prevent overflow / underflow
if (value(0) == 0) {
value(0) = (value(0) + 1).asInstanceOf[Byte]
} else if (value(0) == -1) {
value(0) = (value(0) - 1).asInstanceOf[Byte]
}
val bytes = Arrays.copyOf(value, Capacity)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(bytes))
wrappedBuf.setIndex(0, value.length)
assertEquals(0, wrappedBuf.compareTo(Unpooled.wrappedBuffer(value)))
value(0) = (value(0) + 1).asInstanceOf[Byte]
assert(wrappedBuf.compareTo(Unpooled.wrappedBuffer(value)) < 0)
value(0) = (value(0) - 2).asInstanceOf[Byte]
assert(wrappedBuf.compareTo(Unpooled.wrappedBuffer(value)) > 0)
value(0) = (value(0) + 1).asInstanceOf[Byte]
assert(wrappedBuf.compareTo(Unpooled.wrappedBuffer(value, 0, 31)) > 0)
assert(wrappedBuf.slice(0, 31).compareTo(Unpooled.wrappedBuffer(value)) < 0)
}
test("toString") {
val msg = "Hello, World!"
val wrappedBuf = BufAsByteBuf.Owned(Buf.Utf8(msg))
assert("Hello, World!" == wrappedBuf.toString(CharsetUtil.UTF_8))
}
test("indexOf") {
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(Array[Byte](1,2,3,2,1)))
assertEquals(-1, wrappedBuf.indexOf(1, 4, 1: Byte))
assertEquals(-1, wrappedBuf.indexOf(4, 1, 1: Byte))
assertEquals(1, wrappedBuf.indexOf(1, 4, 2: Byte))
assertEquals(3, wrappedBuf.indexOf(4, 1, 2: Byte))
}
test("nioBuffer 1") {
val value = new Array[Byte](Capacity)
random.nextBytes(value)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(value))
assertEquals(ByteBuffer.wrap(value), wrappedBuf.nioBuffer())
}
test("nioBuffer 2") {
val value = new Array[Byte](Capacity)
random.nextBytes(value)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(value))
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
assertEquals(ByteBuffer.wrap(value, i, BlockSize), wrappedBuf.nioBuffer(i, BlockSize))
}
}
test("nioBuffers 1") {
val value = new Array[Byte](Capacity)
random.nextBytes(value)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(value))
val nioBuffers = wrappedBuf.nioBuffers()
var length = 0
for (b <- nioBuffers) {
length = length + b.remaining
}
val nioBuffer = ByteBuffer.allocate(length)
for (b <- nioBuffers) {
nioBuffer.put(b)
}
nioBuffer.flip()
assertEquals(ByteBuffer.wrap(value), nioBuffer)
}
test("nioBuffers 2") {
val value = new Array[Byte](Capacity)
random.nextBytes(value)
val wrappedBuf = BufAsByteBuf.Owned(Buf.ByteArray.Owned(value))
0.until(Capacity - BlockSize + 1, BlockSize).foreach { i =>
val nioBuffers = wrappedBuf.nioBuffers(i, BlockSize)
val nioBuffer = ByteBuffer.allocate(BlockSize)
for (b <- nioBuffers) {
nioBuffer.put(b)
}
nioBuffer.flip()
assertEquals(ByteBuffer.wrap(value, i, BlockSize), nioBuffer)
}
}
test("skipBytes") {
buffer.setIndex(Capacity / 4, Capacity / 2)
buffer.skipBytes(Capacity / 4)
assertEquals(Capacity / 4 * 2, buffer.readerIndex)
intercept[IndexOutOfBoundsException] {
buffer.skipBytes(Capacity / 4 + 1)
}
// Should remain unchanged.
assertEquals(Capacity / 4 * 2, buffer.readerIndex)
}
test("hashCode") {
val a = Array[Byte](1, 2, 3, 4, 5, 6, 7, 8, 9, 0, 1, 2, 3, 4, 5)
val b = Array[Byte](6, 7, 8, 9, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 9)
val cbA = Unpooled.buffer(15)
val cbB = Unpooled.directBuffer(15)
cbA.writeBytes(a)
cbB.writeBytes(b)
val set = new HashSet[ByteBuf]()
set.add(cbA)
set.add(cbB)
assertEquals(2, set.size())
assert(set.contains(cbA.copy()))
assert(set.contains(cbB.copy()))
val wrappedBufA = BufAsByteBuf.Owned(Buf.ByteArray.Owned(a))
assert(set.remove(wrappedBufA))
assert(!set.contains(cbA))
assertEquals(1, set.size())
val wrappedBufB = BufAsByteBuf.Owned(Buf.ByteArray.Owned(b))
assert(set.remove(wrappedBufB))
assert(!set.contains(cbB))
assertEquals(0, set.size())
}
def testConstructAndExtract(name: String, buf: Buf): Unit = {
test(s"construct and extract BufAsByteBuf.Owned from $name") {
val bytes = Buf.ByteArray.Owned.extract(buf)
val bb = BufAsByteBuf.Owned(buf)
assert(bb.readableBytes == bytes.length)
val wrappedBytes = new Array[Byte](buf.length)
bb.getBytes(0, wrappedBytes)
assert(wrappedBytes.toSeq == bytes.toSeq)
}
test(s"construct and extract BufAsByteBuf.Shared from $name") {
val bytes = Buf.ByteArray.Shared.extract(buf)
val bb = BufAsByteBuf.Shared(buf)
assert(bb.readableBytes == bytes.length)
val wrappedBytes = new Array[Byte](buf.length)
bb.getBytes(0, wrappedBytes)
assert(wrappedBytes.toSeq == bytes.toSeq)
}
}
testConstructAndExtract("empty Buf", Buf.Empty)
testConstructAndExtract(
"Buf.ByteArray",
Buf.ByteArray.Owned(Array[Byte](0, 1, 2, 3, 4))
)
testConstructAndExtract(
"Buf.ByteArray with begin and end",
Buf.ByteArray.Owned(Array[Byte](0, 1, 2, 3, 4), 3, 4)
)
testConstructAndExtract(
"Buf.ByteBuffer",
Buf.ByteBuffer.Owned(
ByteBuffer.wrap(Array[Byte](0, 1, 2, 3, 4))
)
)
}
| spockz/finagle | finagle-netty4/src/test/scala/com/twitter/finagle/netty4/BufAsByteBufTest.scala | Scala | apache-2.0 | 48,733 |
import stainless.math.BitVectors._
object BitVectors2 {
def test1(n1: Int3, n2: Int3, n3: Int3) = {
require(n2 >= (0: Int3) && n2 <= n3 && n1 <= n3 - n2 && n1 + n2 == n3)
assert(n3 - n1 == n2)
}
def test2(n: Int10) = {
assert((n ^ n) == (0: Int10))
}
def test3(n: Int100) = {
assert((n & n) == n)
assert((n & 0) == (0: Int100))
}
def test4(n: Int200) = {
assert((n | n) == n)
assert((n | 0) == n)
}
def test5(n1: Int200, n2: Int200) = {
require(n1 > n2)
assert(n2 < n1)
}
def test6(n1: Int200, n2: Int200) = {
require(n1 > n2)
assert(n1 >= n2 + 1)
}
def test8(n: Int100) = {
require(n < (0: Int100))
assert(n % 2 == -(n mod 2))
assert((n % 2 == (0: Int100)) || (n % 2 == (-1: Int100)))
}
def test10() = {
assert(min[Int3] == (-4: Int3))
assert(max[Int3] == (3: Int3))
}
def test11(n: Int100) = {
require((0: Int100) <= n && n <= max[Int100] / 2)
assert(n << 1 == n * 2)
}
def test12(n: Int100) = {
require((0: Int100) <= n && n < max[Int100] / 2)
assert(n >> 1 == n / 2)
}
def test13(n: Int10) = {
require(n == (42: Int10))
val m = n.widen[Int14]
assert(m == (42: Int14))
}
def test14(n: Int4) = {
require(n == (7: Int4))
val m = n.narrow[Int3]
assert(m == (-1: Int3))
}
}
| epfl-lara/stainless | frontends/benchmarks/verification/valid/MicroTests/dotty/BitVectors2.scala | Scala | apache-2.0 | 1,340 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.tools {
import org.scalatest.{FunSuite, Outcome, DispatchReporter, Resources, Retries}
import org.scalatools.testing.{Event, EventHandler, Result, Logger, Runner => TestingRunner}
import org.scalatest.SharedHelpers.{EventRecordingReporter, createTempDirectory}
import org.scalatest.exceptions.NotAllowedException
import org.scalatest.tagobjects.Retryable
import java.io.File
// testing runner.run:
// def run(testClassName: String, fingerprint: TestFingerprint, args: Array[String]): Array[Event]
class ScalaTestRunnerSuite extends FunSuite with Retries {
override def withFixture(test: NoArgTest) = {
if (isRetryable(test))
withRetryOnFailure {
super.withFixture(test)
}
else super.withFixture(test)
}
test("call with simple class") {
val results = run("org.scalatest.tools.test.SimpleTest")
assert(results(0).testName === "hello, world")
assert(results(0).result === Result.Success)
}
test("three different results") {
val results = run("org.scalatest.tools.test.ThreeTestsTest")
assert(results(0).testName === "hello, world")
assert(results(0).result === Result.Success)
assert(results(1).testName === "throw")
assert(results(1).result === Result.Failure)
assert(results(1).error.getMessage === "baah")
assert(results(2).testName === "assert bad")
assert(results(2).result === Result.Failure)
assert(results(2).error.getMessage === "1 did not equal 3")
assert(results.size === 3)
}
test("one tag included") {
val results = run("org.scalatest.tools.test.TagsTest", "-n hello")
assert(results(0).testName === "hello, world")
assert(results(0).result === Result.Success)
assert(results.size === 1)
}
test("two tags included") {
val results = run("org.scalatest.tools.test.TagsTest", Array("-n", "hello helloAgain"))
assert(results(0).testName === "hello, world")
assert(results(0).result === Result.Success)
assert(results(1).testName === "hello, world again")
assert(results(1).result === Result.Success)
assert(results.size === 2)
}
test("one tag excluded") {
val results = run("org.scalatest.tools.test.TagsTest", Array("-l", "hello"))
assert(results(0).testName === "hello, world again")
assert(results(0).result === Result.Success)
assert(results(1).testName === "tag3")
assert(results(1).result === Result.Success)
assert(results(2).testName === "throw")
assert(results(2).result === Result.Failure)
assert(results(2).error.getMessage === "baah")
assert(results(3).testName === "assert bad")
assert(results(3).result === Result.Failure)
assert(results(3).error.getMessage === "1 did not equal 3")
assert(results.size === 4)
}
test("configs") {
val results = run("org.scalatest.tools.test.TestWithConfigMap", "-Djosh=cool")
assert(results(0).testName === "get config")
assert(results(0).result === Result.Success)
val resultsF = run("org.scalatest.tools.test.TestWithConfigMap", "-Djosh=bad")
assert(resultsF(0).testName === "get config")
assert(resultsF(0).result === Result.Failure)
assert(resultsF(0).error.getMessage === "\\"[bad]\\" did not equal \\"[cool]\\"")
}
test("configs 2"){
val results = run("org.scalatest.tools.test.TestWithConfigMap2", "-Da=z -Db=y -Dc=x")
assert(results(0).testName === "get config")
assert(results(0).result === Result.Success)
}
test("illegal arg on private constructor, inaccessible suite"){
val results = run("org.scalatest.tools.test.PrivateConstructor")
assert(results.size === 0)
}
test("@DoNotDiscover suite"){
val results = run("org.scalatest.tools.test.DoNotDiscoverSuite")
assert(results.size === 0)
}
test("skipped test results in Result.Skipped") {
val results = run("org.scalatest.tools.test.SuiteWithSkippedTest")
assert(results.size === 2)
assert(results(0).testName === "dependeeThatFails")
assert(results(0).result === Result.Failure)
assert(results(0).error.getMessage === "fail")
assert(results(1).testName === "depender")
assert(results(1).result === Result.Skipped)
}
test("pending test results in Result.Skipped") {
val results = run("org.scalatest.tools.test.PendingTest")
assert(results.size === 1)
assert(results(0).testName === "i am pending")
assert(results(0).result === Result.Skipped)
}
test("throw IllegalArgumentException when -g is passed in as argument") {
intercept[IllegalArgumentException] {
run("org.scalatest.tools.test.PendingTest", Array("-g"))
}
}
test("-w should execute suites that match the specified package and its sub packages") {
val result1 = run("org.scalatest.tools.test.TagsTest", Array("-w", "org.scalatest.tools"))
assert(result1.size === 5)
val result2 = run("org.scalatest.tools.test.TagsTest", Array("-w", "org.scalatest.tools.test"))
assert(result2.size === 5)
val result3 = run("org.scalatest.SuiteSuite", Array("-w", "org.scalatest.tools.test"))
assert(result3.size === 0)
}
test("-m should execute suites that match the specified package and not its sub packages") {
val result1 = run("org.scalatest.tools.test.TagsTest", Array("-m", "org.scalatest.tools"))
assert(result1.size === 0)
val result2 = run("org.scalatest.tools.test.TagsTest", Array("-m", "org.scalatest.tools.test"))
assert(result2.size === 5)
val result3 = run("org.scalatest.SuiteSuite", Array("-m", "org.scalatest.tools.test"))
assert(result3.size === 0)
val result4 = run("org.scalatest.enablers.NoParamSpec", Array("-m", "org.scalatest.concurrent"))
assert(result4.size === 0)
}
test("ScalaTestRunner.run should throw IllegalArgumentException when -s is passed in") {
val iae = intercept[IllegalArgumentException] {
run("org.scalatest.tools.test.SimpleTest", Array("-s", "org.scalatest.tools.test.SimpleTest"))
}
assert(iae.getMessage === "-s (suite) is not supported when runs in SBT, please use SBT's test-only instead.")
}
test("ScalaTestRunner.run should throw IllegalArgumentException when -j is passed in") {
val iae = intercept[IllegalArgumentException] {
run("org.scalatest.tools.test.SimpleTest", Array("-j", "org.scalatest.tools.test.SimpleTest"))
}
assert(iae.getMessage === "-j (junit) is not supported when runs in SBT.")
}
test("ScalaTestRunner.run should throw IllegalArgumentException when -b is passed in") {
val iae = intercept[IllegalArgumentException] {
run("org.scalatest.tools.test.SimpleTest", Array("-b", "org.scalatest.tools.test.SimpleTest"))
}
assert(iae.getMessage === "-b (testng) is not supported when runs in SBT.")
}
test("ScalaTestRunner.run should throw IllegalArgumentException when -P is passed in") {
val iae = intercept[IllegalArgumentException] {
run("org.scalatest.tools.test.SimpleTest", Array("-P"))
}
assert(iae.getMessage === "-P (concurrent) is not supported when runs in SBT, please use SBT parallel configuration instead.")
}
test("ScalaTestRunner.run should throw IllegalArgumentException when -PS is passed in") {
val iae = intercept[IllegalArgumentException] {
run("org.scalatest.tools.test.SimpleTest", Array("-PS"))
}
assert(iae.getMessage === "-P (concurrent) is not supported when runs in SBT, please use SBT parallel configuration instead.")
}
test("ScalaTestRunner.run should throw IllegalArgumentException when -R is passed in") {
val iae = intercept[IllegalArgumentException] {
run("org.scalatest.tools.test.SimpleTest", Array("-R"))
}
assert(iae.getMessage === "-R (runpath) is not supported when runs in SBT.")
}
test("ScalaTestRunner.run should throw IllegalArgumentException when -A is passed in") {
val iae = intercept[IllegalArgumentException] {
run("org.scalatest.tools.test.SimpleTest", Array("-A", "again.txt"))
}
assert(iae.getMessage === "-A is not supported when runs in SBT, please use SBT's test-quick instead.")
}
test("ScalaTestRunner.run should throw IllegalArgumentException when -q is passed in") {
val iae = intercept[IllegalArgumentException] {
run("org.scalatest.tools.test.SimpleTest", Array("-q", "Spec"))
}
assert(iae.getMessage === "-q is not supported when runs in SBT, please use SBT's test-only or test filter instead.")
}
test("ScalaTestRunner.run should throw IllegalArgumentException when -T is passed in") {
val iae = intercept[IllegalArgumentException] {
run("org.scalatest.tools.test.SimpleTest", Array("-T", "100"))
}
assert(iae.getMessage === "-T is not supported when runs in SBT.")
}
test("ScalaTestRunner.run should be able to pass in custom reporter via -C") {
val framework = new ScalaTestFramework()
val runner: TestingRunner = framework.testRunner(Thread.currentThread.getContextClassLoader, Array(new TestLogger))
val listener = new EventHandler {
def handle(event: Event) {}
}
runner.run("org.scalatest.tools.scalasbt.SampleSuite", fingerprint, listener, Array("-C", classOf[EventRecordingReporter].getName))
framework.RunConfig.reporter.get match {
case Some(dispatchRep: DispatchReporter) =>
dispatchRep.doDispose()
dispatchRep.reporters.find(_.isInstanceOf[EventRecordingReporter]) match {
case Some(recordingRep : EventRecordingReporter) =>
assert(recordingRep.testSucceededEventsReceived.size === 3)
case _ => fail("Expected to find EventRecordingReporter, but not found.")
}
case _ => fail("Expected to find DispatchReporter, but not found.")
}
}
test("-y should do nothing when the task to execute is a chosen style") {
val framework = new ScalaTestFramework()
val runner: TestingRunner = framework.testRunner(Thread.currentThread.getContextClassLoader, Array(new TestLogger))
val listener = new EventHandler {
def handle(event: Event) {}
}
runner.run("org.scalatest.tools.scalasbt.SampleSuite", fingerprint, listener, Array("-y", "org.scalatest.FunSuite", "-C", classOf[EventRecordingReporter].getName))
framework.RunConfig.reporter.get match {
case Some(dispatchRep: DispatchReporter) =>
dispatchRep.doDispose()
dispatchRep.reporters.find(_.isInstanceOf[EventRecordingReporter]) match {
case Some(recordingRep : EventRecordingReporter) =>
assert(recordingRep.testSucceededEventsReceived.size === 3)
assert(recordingRep.suiteCompletedEventsReceived.size === 1)
case _ => fail("Expected to find EventRecordingReporter, but not found.")
}
case _ => fail("Expected to find DispatchReporter, but not found.")
}
}
test("-y should get SuiteAborted event with NotAllowedException when the task to execute is not a chosen style") {
val framework = new ScalaTestFramework()
val runner: TestingRunner = framework.testRunner(Thread.currentThread.getContextClassLoader, Array(new TestLogger))
val listener = new EventHandler {
def handle(event: Event) {}
}
runner.run("org.scalatest.tools.scalasbt.SampleSuite", fingerprint, listener, Array("-y", "org.scalatest.FunSpec", "-C", classOf[EventRecordingReporter].getName))
framework.RunConfig.reporter.get match {
case Some(dispatchRep: DispatchReporter) =>
dispatchRep.doDispose()
dispatchRep.reporters.find(_.isInstanceOf[EventRecordingReporter]) match {
case Some(recordingRep : EventRecordingReporter) =>
assert(recordingRep.testSucceededEventsReceived.size === 0)
val suiteAbortedEvents = recordingRep.suiteAbortedEventsReceived
assert(suiteAbortedEvents.size === 1)
suiteAbortedEvents(0).throwable match {
case Some(e: NotAllowedException) =>
assert(e.getMessage === Resources.notTheChosenStyle("org.scalatest.FunSuite", "org.scalatest.FunSpec"))
case _ => fail("Expected SuiteAborted to carry NotAllowedException, but it did not.")
}
case _ => fail("Expected to find EventRecordingReporter, but not found.")
}
case _ => fail("Expected to find DispatchReporter, but not found.")
}
}
test("-W should cause AlertProvided to be fired", Retryable) {
val framework = new ScalaTestFramework()
val runner: TestingRunner = framework.testRunner(Thread.currentThread.getContextClassLoader, Array(new TestLogger))
val listener = new EventHandler {
def handle(event: Event) {}
}
runner.run("org.scalatest.tools.scalasbt.SlowSampleSuite", fingerprint, listener, Array("-W", "1", "1", "-C", classOf[EventRecordingReporter].getName))
framework.RunConfig.reporter.get match {
case Some(dispatchRep: DispatchReporter) =>
dispatchRep.doDispose()
dispatchRep.reporters.find(_.isInstanceOf[EventRecordingReporter]) match {
case Some(recordingRep : EventRecordingReporter) =>
assert(recordingRep.testSucceededEventsReceived.size === 1)
assert(recordingRep.alertProvidedEventsReceived.size > 0)
case _ => fail("Expected to find EventRecordingReporter, but not found.")
}
case _ => fail("Expected to find DispatchReporter, but not found.")
}
}
test("Runner should support deprecated friendly argument dsl 'include'") {
run("org.scalatest.tools.test.SimpleTest", Array("include(org.scala.a, org.scala.b, org.scala.c)"))
run("org.scalatest.tools.test.SimpleTest", Array("include(\\"org.scala.a\\", \\"org.scala.b\\", \\"org.scala.c\\")"))
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("include")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("include (org.scala.a, org.scala.b, org.scala.c)")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("include (org.scala.a, org.scala.b, org.scala.c")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("includeorg.scala.a, org.scala.b, org.scala.c)")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("include org.scala.a, org.scala.b, org.scala.c")) }
}
test("Runner should support deprecated friendly argument dsl 'exclude'") {
run("org.scalatest.tools.test.SimpleTest", Array("exclude(org.scala.a, org.scala.b, org.scala.c)"))
run("org.scalatest.tools.test.SimpleTest", Array("exclude(\\"org.scala.a\\", \\"org.scala.b\\", \\"org.scala.c\\")"))
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("exclude")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("exclude (org.scala.a, org.scala.b, org.scala.c)")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("exclude (org.scala.a, org.scala.b, org.scala.c")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("excludeorg.scala.a, org.scala.b, org.scala.c)")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("exclude org.scala.a, org.scala.b, org.scala.c")) }
}
test("Runner should support deprecated friendly argument dsl 'stdout'") {
run("org.scalatest.tools.test.SimpleTest", Array("stdout"))
run("org.scalatest.tools.test.SimpleTest", Array("stdout(config=\\"nocolor fullstacks droptestsucceeded\\")"))
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("stdout (config=\\"nocolor fullstacks doptestsucceeded\\")")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("stdout config=\\"nocolor fullstacks doptestsucceeded\\"")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("stdout(config=\\"nocolor fullstacks doptestsucceeded\\"")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("stdoutconfig=\\"nocolor fullstacks doptestsucceeded\\")")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("stdout(confi=\\"nocolor fullstacks doptestsucceeded\\")")) }
}
test("Runner should support deprecated friendly argument dsl 'stderr'") {
run("org.scalatest.tools.test.SimpleTest", Array("stderr"))
run("org.scalatest.tools.test.SimpleTest", Array("stderr(config=\\"dropinfoprovided dropsuitestarting droptestignored\\")"))
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("stderr (config=\\"dopinfoprovided dropsuitestarting droptestignored\\")")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("stderr config=\\"dopinfoprovided dropsuitestarting droptestignored\\"")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("stderr(config=\\"dopinfoprovided dropsuitestarting droptestignored\\"")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("stderrconfig=\\"dopinfoprovided dropsuitestarting droptestignored\\")")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("stderr(confi=\\"dopinfoprovided dropsuitestarting droptestignored\\")")) }
}
val tempDir = createTempDirectory()
val cssFile = File.createTempFile("mystyles", "css", tempDir)
test("Runner should support deprecated friendly argument dsl 'file'") {
run("org.scalatest.tools.test.SimpleTest", Array("file(filename=\\"" + cssFile.getAbsolutePath + "\\")"))
run("org.scalatest.tools.test.SimpleTest", Array("file(filename=\\"" + cssFile.getAbsolutePath + "\\", config=\\"durations shortstacks dropteststarting\\")"))
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("file")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("file(config=\\"durations shortstacks dropteststarting\\")")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("file (config=\\"nocolor fullstacks doptestsucceeded\\")")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("file config=\\"nocolor fullstacks doptestsucceeded\\"")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("file(config=\\"nocolor fullstacks doptestsucceeded\\"")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("file=\\"nocolor fullstacks doptestsucceeded\\")")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("file(confi=\\"nocolor fullstacks doptestsucceeded\\")")) }
}
test("Runner should support deprecated friendly argument dsl 'junitxml'") {
run("org.scalatest.tools.test.SimpleTest", Array("junitxml(directory=\\"" + tempDir.getAbsolutePath + "\\")"))
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("junitxml")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("junitxml (directory=\\"" + tempDir.getAbsolutePath + "\\")")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("junitxml directory=\\"" + tempDir.getAbsolutePath + "\\"")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("junitxml(directory=\\"" + tempDir.getAbsolutePath + "\\"")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("junitxmldirectory=\\"" + tempDir.getAbsolutePath + "\\")")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("junitxml(director=\\"" + tempDir.getAbsolutePath + "\\")")) }
}
test("Runner should support deprecated friendly argument dsl 'html'") {
run("org.scalatest.tools.test.SimpleTest", Array("html(directory=\\"" + tempDir.getAbsolutePath + "\\")"))
run("org.scalatest.tools.test.SimpleTest", Array("html(directory=\\"" + tempDir.getAbsolutePath + "\\", css=\\"" + cssFile.getAbsolutePath + "\\")"))
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("html()")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("html(directory=\\"" + tempDir.getAbsolutePath + "\\", css=\\"\\")")) }
}
test("Runner should support deprecated friendly argument dsl 'reporterclass'") {
val repClassName = classOf[EventRecordingReporter].getName
run("org.scalatest.tools.test.SimpleTest", Array("reporterclass(classname=\\"" + repClassName + "\\")"))
run("org.scalatest.tools.test.SimpleTest", Array("reporterclass(classname=\\"" + repClassName + "\\", config=\\"dropsuitestarting dropinfoprovided dropteststarting\\")"))
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("reporterclass")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("reporterclass(classname=\\"a.b.c\\", config=\\"nocolor\\")")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("reporterclass(classname=\\"a.b.c\\", config=\\"shortstacks\\")")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("reporterclass(classname=\\"a.b.c\\", config=\\"fullstacks\\")")) }
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("reporterclass(classname=\\"a.b.c\\", config=\\"durations\\")")) }
}
test("Runner should support deprecated friendly argument dsl 'membersonly'") {
run("org.scalatest.tools.test.SimpleTest", Array("membersonly(a.b.c)"))
run("org.scalatest.tools.test.SimpleTest", Array("membersonly(a.b.c, a.b.d, a.b.e)"))
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("membersonly")) }
}
test("Runner should support deprecated friendly argument dsl 'wildcard'") {
run("org.scalatest.tools.test.SimpleTest", Array("wildcard(a.b.c)"))
run("org.scalatest.tools.test.SimpleTest", Array("wildcard(a.b.c, a.b.d, a.b.e)"))
intercept[IllegalArgumentException] { run("org.scalatest.tools.test.SimpleTest", Array("wildcard")) }
}
val fingerprint = {
val fingerprints = new ScalaTestFramework().tests
fingerprints(0).
asInstanceOf[org.scalatools.testing.TestFingerprint]
}
def run(classname: String): Array[Event] = run(classname, Array[String]())
def run(classname: String, args:String): Array[Event] = run(classname, args.split(" "))
def run(classname: String, args:Array[String]): Array[Event] = {
// val buf = scala.collection.mutable.ArrayBuffer[Event]() // Only worked under 2.8
val buf = new scala.collection.mutable.ArrayBuffer[Event]
val listener = new EventHandler {
def handle(event: Event) {
buf += event
}
}
val framework = new ScalaTestFramework()
val runner: TestingRunner = framework.testRunner(Thread.currentThread.getContextClassLoader, Array(new TestLogger))
runner.run(classname, fingerprint, listener, args)
dispose(framework)
buf.toArray
}
private def dispose(framework: ScalaTestFramework) {
framework.RunConfig.reporter.get match {
case Some(dispatchRep: DispatchReporter) =>
dispatchRep.doDispose()
case _ =>
}
}
class TestLogger extends Logger {
def trace(t:Throwable) {}
def error(msg: String) {}
def warn(msg: String) {}
def info(msg: String) {}
def debug(msg: String) {}
def ansiCodesSupported = false
}
}
package test{
private class SimpleTest extends FunSuite {
test("hello, world") {"hello, world"}
}
private class ThreeTestsTest extends FunSuite {
test("hello, world") {"hello, world"}
test("throw") {throw new Exception("baah")}
test("assert bad") {assert(1 === 3)}
}
import org.scalatest.fixture
private class TestWithConfigMap extends fixture.FunSuite {
type FixtureParam = String
override def withFixture(test: OneArgTest): Outcome = {
test(test.configMap("josh").toString)
}
test("get config"){ conf => assert(conf === "cool") }
}
private class TestWithConfigMap2 extends fixture.FunSuite {
type FixtureParam = Map[String,Any]
override def withFixture(test: OneArgTest): Outcome = {
test(test.configMap)
}
test("get config"){ conf => assert(conf === Map("a" -> "z", "b" -> "y", "c" -> "x")) }
}
private class TagsTest extends FunSuite {
test("hello, world", org.scalatest.Tag("hello")) {"hello, world"}
test("hello, world again", org.scalatest.Tag("helloAgain")) {"hello, world again"}
test("tag3", org.scalatest.Tag("tag3")) {"tag3"}
test("throw") {throw new Exception("baah")}
test("assert bad") {assert(1 === 3)}
}
private class PrivateConstructor private() extends FunSuite
import org.scalatest.DoNotDiscover
@DoNotDiscover
private class DoNotDiscoverSuite extends FunSuite {
test("do not test me") {}
}
private class PendingTest extends FunSuite {
test("i am pending")(pending)
}
import org.scalatest.testng.TestNGSuite
private class SuiteWithSkippedTest extends TestNGSuite {
import org.testng.annotations.Test
@Test(groups = Array("run")) def dependeeThatFails() { throw new Exception("fail") }
@Test(dependsOnGroups = Array("run")) def depender() {}
}
}
}
| SRGOM/scalatest | scalatest-test/src/test/scala/org/scalatest/tools/ScalaTestRunnerSuite.scala | Scala | apache-2.0 | 27,192 |
/*
* Copyright 2015 Foundational Development
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package pro.foundev.commons.benchmarking
case class Benchmark(callback: () => Unit, name: String, tag: String){
}
| rssvihla/datastax_work | spark_commons/commons/src/main/scala/pro/foundev/commons/benchmarking/Benchmark.scala | Scala | apache-2.0 | 748 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.feature
import breeze.numerics.{cos, sin}
import breeze.numerics.constants.Pi
import org.apache.spark.ml.linalg.{Vector, Vectors}
import org.apache.spark.ml.param.ParamsSuite
import org.apache.spark.ml.util.{DefaultReadWriteTest, MLTest, MLTestingUtils}
import org.apache.spark.ml.util.TestingUtils._
import org.apache.spark.sql.{Dataset, Row}
class BucketedRandomProjectionLSHSuite extends MLTest with DefaultReadWriteTest {
import testImplicits._
@transient var dataset: Dataset[_] = _
override def beforeAll(): Unit = {
super.beforeAll()
val data = {
for (i <- -10 until 10; j <- -10 until 10) yield Vectors.dense(i.toDouble, j.toDouble)
}
dataset = spark.createDataFrame(data.map(Tuple1.apply)).toDF("keys")
}
test("params") {
ParamsSuite.checkParams(new BucketedRandomProjectionLSH)
val model = new BucketedRandomProjectionLSHModel(
"brp", randUnitVectors = Array(Vectors.dense(1.0, 0.0)))
ParamsSuite.checkParams(model)
}
test("setters") {
val model = new BucketedRandomProjectionLSHModel("brp", Array(Vectors.dense(0.0, 1.0)))
.setInputCol("testkeys")
.setOutputCol("testvalues")
assert(model.getInputCol === "testkeys")
assert(model.getOutputCol === "testvalues")
}
test("BucketedRandomProjectionLSH: default params") {
val brp = new BucketedRandomProjectionLSH
assert(brp.getNumHashTables === 1.0)
}
test("read/write") {
def checkModelData(
model: BucketedRandomProjectionLSHModel,
model2: BucketedRandomProjectionLSHModel): Unit = {
model.randUnitVectors.zip(model2.randUnitVectors)
.foreach(pair => assert(pair._1 === pair._2))
}
val mh = new BucketedRandomProjectionLSH()
val settings = Map("inputCol" -> "keys", "outputCol" -> "values", "bucketLength" -> 1.0)
testEstimatorAndModelReadWrite(mh, dataset, settings, settings, checkModelData)
}
test("hashFunction") {
val randUnitVectors = Array(Vectors.dense(0.0, 1.0), Vectors.dense(1.0, 0.0))
val model = new BucketedRandomProjectionLSHModel("brp", randUnitVectors)
model.set(model.bucketLength, 0.5)
val res = model.hashFunction(Vectors.dense(1.23, 4.56))
assert(res.length == 2)
assert(res(0).equals(Vectors.dense(9.0)))
assert(res(1).equals(Vectors.dense(2.0)))
}
test("keyDistance") {
val model = new BucketedRandomProjectionLSHModel("brp", Array(Vectors.dense(0.0, 1.0)))
val keyDist = model.keyDistance(Vectors.dense(1, 2), Vectors.dense(-2, -2))
assert(keyDist === 5)
}
test("BucketedRandomProjectionLSH: randUnitVectors") {
val brp = new BucketedRandomProjectionLSH()
.setNumHashTables(20)
.setInputCol("keys")
.setOutputCol("values")
.setBucketLength(1.0)
.setSeed(12345)
val brpModel = brp.fit(dataset)
val unitVectors = brpModel.randUnitVectors
unitVectors.foreach { v: Vector =>
assert(Vectors.norm(v, 2.0) ~== 1.0 absTol 1e-14)
}
MLTestingUtils.checkCopyAndUids(brp, brpModel)
}
test("BucketedRandomProjectionLSH: streaming transform") {
val brp = new BucketedRandomProjectionLSH()
.setNumHashTables(2)
.setInputCol("keys")
.setOutputCol("values")
.setBucketLength(1.0)
.setSeed(12345)
val brpModel = brp.fit(dataset)
testTransformer[Tuple1[Vector]](dataset.toDF(), brpModel, "values") {
case Row(values: scala.collection.Seq[_]) =>
assert(values.length === brp.getNumHashTables)
}
}
test("BucketedRandomProjectionLSH: test of LSH property") {
// Project from 2 dimensional Euclidean Space to 1 dimensions
val brp = new BucketedRandomProjectionLSH()
.setInputCol("keys")
.setOutputCol("values")
.setBucketLength(1.0)
.setSeed(12345)
val (falsePositive, falseNegative) = LSHTest.calculateLSHProperty(dataset, brp, 8.0, 2.0)
assert(falsePositive < 0.4)
assert(falseNegative < 0.4)
}
test("BucketedRandomProjectionLSH with high dimension data: test of LSH property") {
val numDim = 100
val data = {
for (i <- 0 until numDim; j <- Seq(-2, -1, 1, 2))
yield Vectors.sparse(numDim, Seq((i, j.toDouble)))
}
val df = spark.createDataFrame(data.map(Tuple1.apply)).toDF("keys")
// Project from 100 dimensional Euclidean Space to 10 dimensions
val brp = new BucketedRandomProjectionLSH()
.setNumHashTables(10)
.setInputCol("keys")
.setOutputCol("values")
.setBucketLength(2.5)
.setSeed(12345)
val (falsePositive, falseNegative) = LSHTest.calculateLSHProperty(df, brp, 3.0, 2.0)
assert(falsePositive < 0.3)
assert(falseNegative < 0.3)
}
test("approxNearestNeighbors for bucketed random projection") {
val key = Vectors.dense(1.2, 3.4)
val brp = new BucketedRandomProjectionLSH()
.setNumHashTables(2)
.setInputCol("keys")
.setOutputCol("values")
.setBucketLength(4.0)
.setSeed(12345)
val (precision, recall) = LSHTest.calculateApproxNearestNeighbors(brp, dataset, key, 100,
singleProbe = true)
assert(precision >= 0.6)
assert(recall >= 0.6)
}
test("approxNearestNeighbors with multiple probing") {
val key = Vectors.dense(1.2, 3.4)
val brp = new BucketedRandomProjectionLSH()
.setNumHashTables(20)
.setInputCol("keys")
.setOutputCol("values")
.setBucketLength(1.0)
.setSeed(12345)
val (precision, recall) = LSHTest.calculateApproxNearestNeighbors(brp, dataset, key, 100,
singleProbe = false)
assert(precision >= 0.7)
assert(recall >= 0.7)
}
test("approxNearestNeighbors for numNeighbors <= 0") {
val key = Vectors.dense(1.2, 3.4)
val model = new BucketedRandomProjectionLSHModel(
"brp", randUnitVectors = Array(Vectors.dense(1.0, 0.0)))
intercept[IllegalArgumentException] {
model.approxNearestNeighbors(dataset, key, 0)
}
intercept[IllegalArgumentException] {
model.approxNearestNeighbors(dataset, key, -1)
}
}
test("approxSimilarityJoin for bucketed random projection on different dataset") {
val data2 = {
for (i <- 0 until 24) yield Vectors.dense(10 * sin(Pi / 12 * i), 10 * cos(Pi / 12 * i))
}
val dataset2 = spark.createDataFrame(data2.map(Tuple1.apply)).toDF("keys")
val brp = new BucketedRandomProjectionLSH()
.setNumHashTables(2)
.setInputCol("keys")
.setOutputCol("values")
.setBucketLength(4.0)
.setSeed(12345)
val (precision, recall) = LSHTest.calculateApproxSimilarityJoin(brp, dataset, dataset2, 1.0)
assert(precision == 1.0)
assert(recall >= 0.7)
}
test("approxSimilarityJoin for self join") {
val data = {
for (i <- 0 until 24) yield Vectors.dense(10 * sin(Pi / 12 * i), 10 * cos(Pi / 12 * i))
}
val df = spark.createDataFrame(data.map(Tuple1.apply)).toDF("keys")
val brp = new BucketedRandomProjectionLSH()
.setNumHashTables(2)
.setInputCol("keys")
.setOutputCol("values")
.setBucketLength(4.0)
.setSeed(12345)
val (precision, recall) = LSHTest.calculateApproxSimilarityJoin(brp, df, df, 3.0)
assert(precision == 1.0)
assert(recall >= 0.7)
}
}
| mahak/spark | mllib/src/test/scala/org/apache/spark/ml/feature/BucketedRandomProjectionLSHSuite.scala | Scala | apache-2.0 | 8,043 |
package bad.robot.radiate.ui
import java.beans.{PropertyChangeEvent, PropertyChangeListener}
class PropertyChangeListenerStub extends PropertyChangeListener {
private val results = new scala.collection.mutable.MutableList[String]
def propertyChange(event: PropertyChangeEvent) {
results += event.toString
}
def contains(result: String) = {
results.contains(result)
}
def size = results.size
} | tobyweston/radiate | src/test/scala/bad/robot/radiate/ui/PropertyChangeListenerStub.scala | Scala | apache-2.0 | 418 |
import sbt._
import Keys._
import xerial.sbt.Sonatype.sonatypeSettings
object Publish {
lazy val settings = sonatypeSettings ++ Seq(
scmInfo := Some(
ScmInfo(url("https://github.com/pawelkaczor/schale"), "scm:git:[email protected]:pawelkaczor/schale.git</")
),
pomExtra :=
<developers>
<developer>
<id>pawelkaczor</id>
<name>Pawel Kaczor</name>
<url>https://github.com/pawelkaczor</url>
</developer>
</developers>
)
} | pawelkaczor/schale | project/Publish.scala | Scala | bsd-2-clause | 495 |
/**
* Copyright (C) 2012 Inria, University Lille 1.
*
* This file is part of PowerAPI.
*
* PowerAPI is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* PowerAPI is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with PowerAPI. If not, see <http://www.gnu.org/licenses/>.
*
* Contact: [email protected].
*/
package fr.inria.powerapi.formula.powerspy
import fr.inria.powerapi.core.Formula
import fr.inria.powerapi.sensor.powerspy.PowerSpySensorMessage
import fr.inria.powerapi.sensor.powerspy.PowerSpySensorMessage
import fr.inria.powerapi.core.FormulaMessage
import fr.inria.powerapi.core.Tick
import fr.inria.powerapi.core.Energy
case class PowerSpyFormulaMessage(energy: Energy, tick: Tick, device: String = "powerspy") extends FormulaMessage
class PowerSpyFormula extends Formula {
def messagesToListen = Array(classOf[PowerSpySensorMessage])
def process(powerSpySensorMessage: PowerSpySensorMessage) {
publish(PowerSpyFormulaMessage(Energy.fromPower(powerSpySensorMessage.currentRMS * powerSpySensorMessage.uScale * powerSpySensorMessage.iScale), powerSpySensorMessage.tick))
}
def acquire = {
case powerSpySensorMessage: PowerSpySensorMessage => process(powerSpySensorMessage)
}
} | abourdon/powerapi-akka | formulae/formula-powerspy/src/main/scala/fr/inria/powerapi/formula/powerspy/PowerSpyFormula.scala | Scala | agpl-3.0 | 1,703 |
package com.github.diegopacheco.scalaplayground.tinylog
object TinyLogApp extends App {
import org.pmw.tinylog.Logger
Logger.info("Hello World!")
}
| diegopacheco/scala-playground | tinylog-fun/src/main/scala/com/github/diegopacheco/scalaplayground/tinylog/TinyLogApp.scala | Scala | unlicense | 155 |
package observatory
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
import org.scalatest.prop.Checkers
@RunWith(classOf[JUnitRunner])
class ManipulationTest extends FunSuite with Checkers with Config {
lazy val locateTemperatures = Extraction.locateTemperatures(year, stationsPath, temperaturesPath)
lazy val locateAverage = Extraction.locationYearlyAverageRecords(locateTemperatures)
test("tileLocation") {
val gridFetch = Manipulation.makeGrid(locateAverage)
val gridpoints = for {
lat <- -89 to 90
lon <- -180 to 179
} yield gridFetch(GridLocation(lat, lon))
assert(gridpoints.size === 360 * 180)
assert(gridpoints(360 * 180 - 1) === -4.630726890271194)
}
test("average") {
val temperatures = List(List((Location(0.0, 0.0), 10.0)), List((Location(0.2, 0.3), 20.0)), List((Location(-0.5, -0.8), 5.0)))
val avgs = Manipulation.average(temperatures)
assert(avgs(GridLocation(0, 0)) === 11.666666666666666)
}
} | yurii-khomenko/fpScalaSpec | c5w1observatory/src/test/scala/observatory/ManipulationTest.scala | Scala | gpl-3.0 | 1,028 |
package mesosphere.util.state.zk
import java.util.UUID
import com.fasterxml.uuid.impl.UUIDUtil
import com.google.protobuf.{ ByteString, InvalidProtocolBufferException }
import com.twitter.util.{ Future => TWFuture }
import com.twitter.zk.{ ZNode, ZkClient }
import mesosphere.marathon.{ Protos, StoreCommandFailedException }
import mesosphere.util.ThreadPoolContext
import mesosphere.util.state.zk.ZKStore._
import mesosphere.util.state.{ PersistentEntity, PersistentStore, PersistentStoreManagement }
import org.apache.zookeeper.KeeperException
import org.apache.zookeeper.KeeperException.{ NoNodeException, NodeExistsException }
import org.slf4j.LoggerFactory
import scala.concurrent.{ Future, Promise }
class ZKStore(val client: ZkClient, root: ZNode) extends PersistentStore with PersistentStoreManagement {
private[this] val log = LoggerFactory.getLogger(getClass)
private[this] implicit val ec = ThreadPoolContext.context
/**
* Fetch data and return entity.
* The entity is returned also if it is not found in zk, since it is needed for the store operation.
*/
override def load(key: ID): Future[Option[ZKEntity]] = {
val node = root(key)
require(node.parent == root, s"Nested paths are not supported: $key!")
node.getData().asScala
.map { data => Some(ZKEntity(node, ZKData(data.bytes), Some(data.stat.getVersion))) }
.recover { case ex: NoNodeException => None }
.recover(exceptionTransform(s"Could not load key $key"))
}
override def create(key: ID, content: IndexedSeq[Byte]): Future[ZKEntity] = {
val node = root(key)
require(node.parent == root, s"Nested paths are not supported: $key")
val data = ZKData(key, UUID.randomUUID(), content)
node.create(data.toProto.toByteArray).asScala
.map { n => ZKEntity(n, data, Some(0)) } //first version after create is 0
.recover(exceptionTransform(s"Can not create entity $key"))
}
/**
* This will store a previously fetched entity.
* The entity will be either created or updated, depending on the read state.
* @return Some value, if the store operation is successful otherwise None
*/
override def update(entity: PersistentEntity): Future[ZKEntity] = {
val zk = zkEntity(entity)
val version = zk.version.getOrElse (
throw new StoreCommandFailedException(s"Can not store entity $entity, since there is no version!")
)
zk.node.setData(zk.data.toProto.toByteArray, version).asScala
.map { data => zk.copy(version = Some(data.stat.getVersion)) }
.recover(exceptionTransform(s"Can not update entity $entity"))
}
/**
* Delete an entry with given identifier.
*/
override def delete(key: ID): Future[Boolean] = {
val node = root(key)
require(node.parent == root, s"Nested paths are not supported: $key")
node.exists().asScala
.flatMap { d => node.delete(d.stat.getVersion).asScala.map(_ => true) }
.recover { case ex: NoNodeException => false }
.recover(exceptionTransform(s"Can not delete entity $key"))
}
override def allIds(): Future[Seq[ID]] = {
root.getChildren().asScala
.map(_.children.map(_.name))
.recover(exceptionTransform("Can not list all identifiers"))
}
private[this] def exceptionTransform[T](errorMessage: String): PartialFunction[Throwable, T] = {
case ex: KeeperException => throw new StoreCommandFailedException(errorMessage, ex)
}
private[this] def zkEntity(entity: PersistentEntity): ZKEntity = {
entity match {
case zk: ZKEntity => zk
case _ => throw new IllegalArgumentException(s"Can not handle this kind of entity: ${entity.getClass}")
}
}
private[this] def createPath(path: ZNode): Future[ZNode] = {
def nodeExists(node: ZNode): Future[Boolean] = node.exists().asScala
.map(_ => true)
.recover { case ex: NoNodeException => false }
.recover(exceptionTransform("Can not query for exists"))
def createNode(node: ZNode): Future[ZNode] = node.create().asScala
.recover { case ex: NodeExistsException => node }
.recover(exceptionTransform("Can not create"))
def createPath(node: ZNode): Future[ZNode] = {
nodeExists(node).flatMap {
case true => Future.successful(node)
case false => createPath(node.parent).flatMap(_ => createNode(node))
}
}
createPath(path)
}
override def initialize(): Future[Unit] = createPath(root).map(_ => ())
}
case class ZKEntity(node: ZNode, data: ZKData, version: Option[Int] = None) extends PersistentEntity {
override def id: String = node.name
override def withNewContent(updated: IndexedSeq[Byte]): PersistentEntity = copy(data = data.copy(bytes = updated))
override def bytes: IndexedSeq[Byte] = data.bytes
}
case class ZKData(name: String, uuid: UUID, bytes: IndexedSeq[Byte] = Vector.empty) {
def toProto: Protos.ZKStoreEntry = Protos.ZKStoreEntry.newBuilder()
.setName(name)
.setUuid(ByteString.copyFromUtf8(uuid.toString))
.setValue(ByteString.copyFrom(bytes.toArray))
.build()
}
object ZKData {
def apply(bytes: Array[Byte]): ZKData = {
try {
val proto = Protos.ZKStoreEntry.parseFrom(bytes)
new ZKData(proto.getName, UUIDUtil.uuid(proto.getUuid.toByteArray), proto.getValue.toByteArray)
}
catch {
case ex: InvalidProtocolBufferException =>
throw new StoreCommandFailedException(s"Can not deserialize Protobuf from ${bytes.length}", ex)
}
}
}
object ZKStore {
implicit class Twitter2Scala[T](val twitterF: TWFuture[T]) extends AnyVal {
def asScala: Future[T] = {
val promise = Promise[T]()
twitterF.onSuccess(promise.success(_))
twitterF.onFailure(promise.failure(_))
promise.future
}
}
}
| sledigabel/marathon | src/main/scala/mesosphere/util/state/zk/ZKStore.scala | Scala | apache-2.0 | 5,750 |
package com.tribbloids.spookystuff.utils
import java.util.concurrent.LinkedBlockingDeque
import scala.language.implicitConversions
case class CircularDeque[T](size: Int = 10) {
import scala.collection.JavaConverters._
lazy val delegate = new LinkedBlockingDeque[T](size)
def forceAddFirst(v: T): Unit = delegate.synchronized {
var success: Boolean = false
while (!success) {
try {
delegate.addFirst(v)
success = true
} catch {
case e: IllegalStateException =>
delegate.removeLast()
}
}
}
def forceAddLast(v: T): Unit = delegate.synchronized {
var success: Boolean = false
while (!success) {
try {
delegate.addLast(v)
success = true
} catch {
case e: IllegalStateException =>
delegate.removeFirst()
}
}
}
def toList: List[T] = delegate.asScala.toList
}
object CircularDeque {
implicit def toDelegate[T](v: CircularDeque[T]): LinkedBlockingDeque[T] = v.delegate
}
| tribbloid/spookystuff | mldsl/src/main/scala/com/tribbloids/spookystuff/utils/CircularDeque.scala | Scala | apache-2.0 | 1,023 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.cancelables
import monix.execution.cancelables.SingleAssignCancelable.State
import monix.execution.atomic.AtomicAny
import scala.annotation.tailrec
import monix.execution.Cancelable
/** Represents a [[monix.execution.Cancelable]] that can be assigned only
* once to another cancelable reference.
*
* Similar to [[monix.execution.cancelables.OrderedCancelable]],
* except that in case of multi-assignment, it throws a
* `java.lang.IllegalStateException`.
*
* If the assignment happens after this cancelable has been canceled, then on
* assignment the reference will get canceled too.
*
* Useful in case you need a forward reference.
*/
final class SingleAssignCancelable private (extra: Cancelable)
extends AssignableCancelable.Bool {
// For binary compatibility
private[SingleAssignCancelable] def this() = this(null)
import State._
override def isCanceled: Boolean =
state.get match {
case IsEmptyCanceled | IsCanceled =>
true
case _ =>
false
}
/** Sets the underlying cancelable reference with `s`.
*
* In case this `SingleAssignmentCancelable` is already canceled,
* then the reference `value` will also be canceled on assignment.
*
* Throws `IllegalStateException` in case this cancelable has already
* been assigned.
*
* @return `this`
*/
@throws(classOf[IllegalStateException])
override def `:=`(value: Cancelable): this.type = {
// Optimistic CAS, no loop needed
if (state.compareAndSet(Empty, IsActive(value))) this else {
state.get match {
case IsEmptyCanceled =>
state.getAndSet(IsCanceled) match {
case IsEmptyCanceled =>
value.cancel()
this
case _ =>
value.cancel()
raiseError()
}
case IsCanceled | IsActive(_) =>
value.cancel()
raiseError()
case Empty =>
// $COVERAGE-OFF$
:=(value)
// $COVERAGE-ON$
}
}
}
@tailrec
override def cancel(): Unit = {
state.get match {
case IsCanceled | IsEmptyCanceled => ()
case IsActive(s) =>
state.set(IsCanceled)
if (extra != null) extra.cancel()
s.cancel()
case Empty =>
if (state.compareAndSet(Empty, IsEmptyCanceled)) {
if (extra != null) extra.cancel()
} else {
// $COVERAGE-OFF$
cancel() // retry
// $COVERAGE-ON$
}
}
}
private def raiseError(): Nothing = {
throw new IllegalStateException(
"Cannot assign to SingleAssignmentCancelable, " +
"as it was already assigned once")
}
private[this] val state = AtomicAny(Empty : State)
}
object SingleAssignCancelable {
/** Builder for [[SingleAssignCancelable]]. */
def apply(): SingleAssignCancelable =
new SingleAssignCancelable()
/** Builder for [[SingleAssignCancelable]] that takes an extra reference,
* to be canceled on [[SingleAssignCancelable.cancel cancel()]]
* along with whatever underlying reference we have.
*
* {{{
* val c = {
* val extra = Cancelable(() => println("extra canceled")
* SingleAssignmentCancelable.withExtra(extra)
* }
*
* c := Cancelable(() => println("main canceled"))
*
* // ...
* c.cancel()
* //=> extra canceled
* //=> main canceled
* }}}
*/
def plusOne(guest: Cancelable): SingleAssignCancelable =
new SingleAssignCancelable(guest)
private sealed trait State
private object State {
case object Empty extends State
case class IsActive(s: Cancelable) extends State
case object IsCanceled extends State
case object IsEmptyCanceled extends State
}
}
| Wogan/monix | monix-execution/shared/src/main/scala/monix/execution/cancelables/SingleAssignCancelable.scala | Scala | apache-2.0 | 4,470 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.webmvc.view.i18n
import java.{util => ju}
import org.beangle.commons.bean.Initializing
import org.beangle.commons.lang.Strings
import org.beangle.commons.lang.annotation.description
import org.beangle.commons.text.i18n.{TextBundleRegistry, TextFormater, TextResource, TextResourceProvider}
import org.beangle.web.action.context.ActionContext
@description("εΊδΊActionηζζ¬θ΅ζΊζδΎθ
")
class ActionTextResourceProvider(registry: TextBundleRegistry, formater: TextFormater)
extends TextResourceProvider with Initializing {
var defaults: String = "beangle,application"
override def init(): Unit = {
registry.addDefaults(Strings.split(defaults, ",").toIndexedSeq: _*)
}
def getTextResource(locale: ju.Locale): TextResource = {
new ActionTextResource(ActionContext.current, locale, registry, formater)
}
}
| beangle/webmvc | core/src/main/scala/org/beangle/webmvc/view/i18n/ActionTextResourceProvider.scala | Scala | lgpl-3.0 | 1,578 |
package scala.swing
import java.awt.{Adjustable => JAdjustable}
object Adjustable {
trait Wrapper extends Oriented.Wrapper with Adjustable {
def peer: JAdjustable with OrientedMixin
def unitIncrement = peer.getUnitIncrement
def unitIncrement_=(i: Int) = peer.setUnitIncrement(i)
def blockIncrement = peer.getBlockIncrement
def blockIncrement_=(i: Int) = peer.setBlockIncrement(i)
def value = peer.getValue
def value_=(v: Int) = peer.setValue(v)
def visibleAmount = peer.getVisibleAmount
def visibleAmount_=(v: Int) = peer.setVisibleAmount(v)
def minimum = peer.getMinimum
def minimum_=(m: Int) = peer.setMinimum(m)
def maximum = peer.getMaximum
def maximum_=(m: Int) = peer.setMaximum(m)
}
}
trait Adjustable extends Oriented {
def unitIncrement: Int
def unitIncrement_=(i: Int)
def blockIncrement: Int
def blockIncrement_=(i: Int)
def value: Int
def value_=(v : Int)
def visibleAmount: Int
def visibleAmount_=(v: Int)
def minimum: Int
def minimum_=(m: Int)
def maximum: Int
def maximum_=(m: Int)
// Needs implementation of AdjustmentEvent
//
// val adjustments: Publisher = new Publisher {
// peer.addAdjustmentListener(new AdjustmentListener {
// def adjustmentValueChanged(e: java.awt.event.AdjustmentEvent) {
// publish(new AdjustmentEvent(e))
// }
// })
// }
}
| benhutchison/scala-swing | src/main/scala/scala/swing/Adjustable.scala | Scala | bsd-3-clause | 1,372 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v2
import uk.gov.hmrc.ct.box.{CtOptionalBoolean, CtBoxIdentifier, CtOptionalInteger, Input}
case class E26(value: Option[Boolean]) extends CtBoxIdentifier("Qualifying investments and loans") with CtOptionalBoolean with Input
| ahudspith-equalexperts/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600e/v2/E26.scala | Scala | apache-2.0 | 862 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.adaptive
import java.io.File
import java.net.URI
import org.apache.log4j.Level
import org.apache.spark.scheduler.{SparkListener, SparkListenerEvent, SparkListenerJobStart}
import org.apache.spark.sql.{Dataset, QueryTest, Row, SparkSession, Strategy}
import org.apache.spark.sql.catalyst.optimizer.{BuildLeft, BuildRight}
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, LogicalPlan}
import org.apache.spark.sql.execution.{PartialReducerPartitionSpec, QueryExecution, ReusedSubqueryExec, ShuffledRowRDD, SparkPlan, UnaryExecNode}
import org.apache.spark.sql.execution.command.DataWritingCommandExec
import org.apache.spark.sql.execution.datasources.noop.NoopDataSource
import org.apache.spark.sql.execution.datasources.v2.V2TableWriteExec
import org.apache.spark.sql.execution.exchange.{BroadcastExchangeExec, Exchange, REPARTITION, REPARTITION_WITH_NUM, ReusedExchangeExec, ShuffleExchangeExec, ShuffleExchangeLike}
import org.apache.spark.sql.execution.joins.{BaseJoinExec, BroadcastHashJoinExec, SortMergeJoinExec}
import org.apache.spark.sql.execution.metric.SQLShuffleReadMetricsReporter
import org.apache.spark.sql.execution.ui.SparkListenerSQLAdaptiveExecutionUpdate
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.PartitionOverwriteMode
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.{IntegerType, StructType}
import org.apache.spark.sql.util.QueryExecutionListener
import org.apache.spark.util.Utils
class AdaptiveQueryExecSuite
extends QueryTest
with SharedSparkSession
with AdaptiveSparkPlanHelper {
import testImplicits._
setupTestData()
private def runAdaptiveAndVerifyResult(query: String): (SparkPlan, SparkPlan) = {
var finalPlanCnt = 0
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLAdaptiveExecutionUpdate(_, _, sparkPlanInfo) =>
if (sparkPlanInfo.simpleString.startsWith(
"AdaptiveSparkPlan isFinalPlan=true")) {
finalPlanCnt += 1
}
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
val dfAdaptive = sql(query)
val planBefore = dfAdaptive.queryExecution.executedPlan
assert(planBefore.toString.startsWith("AdaptiveSparkPlan isFinalPlan=false"))
val result = dfAdaptive.collect()
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") {
val df = sql(query)
checkAnswer(df, result)
}
val planAfter = dfAdaptive.queryExecution.executedPlan
assert(planAfter.toString.startsWith("AdaptiveSparkPlan isFinalPlan=true"))
val adaptivePlan = planAfter.asInstanceOf[AdaptiveSparkPlanExec].executedPlan
spark.sparkContext.listenerBus.waitUntilEmpty()
// AQE will post `SparkListenerSQLAdaptiveExecutionUpdate` twice in case of subqueries that
// exist out of query stages.
val expectedFinalPlanCnt = adaptivePlan.find(_.subqueries.nonEmpty).map(_ => 2).getOrElse(1)
assert(finalPlanCnt == expectedFinalPlanCnt)
spark.sparkContext.removeSparkListener(listener)
val exchanges = adaptivePlan.collect {
case e: Exchange => e
}
assert(exchanges.isEmpty, "The final plan should not contain any Exchange node.")
(dfAdaptive.queryExecution.sparkPlan, adaptivePlan)
}
private def findTopLevelBroadcastHashJoin(plan: SparkPlan): Seq[BroadcastHashJoinExec] = {
collect(plan) {
case j: BroadcastHashJoinExec => j
}
}
private def findTopLevelSortMergeJoin(plan: SparkPlan): Seq[SortMergeJoinExec] = {
collect(plan) {
case j: SortMergeJoinExec => j
}
}
private def findTopLevelBaseJoin(plan: SparkPlan): Seq[BaseJoinExec] = {
collect(plan) {
case j: BaseJoinExec => j
}
}
private def findReusedExchange(plan: SparkPlan): Seq[ReusedExchangeExec] = {
collectWithSubqueries(plan) {
case ShuffleQueryStageExec(_, e: ReusedExchangeExec) => e
case BroadcastQueryStageExec(_, e: ReusedExchangeExec) => e
}
}
private def findReusedSubquery(plan: SparkPlan): Seq[ReusedSubqueryExec] = {
collectWithSubqueries(plan) {
case e: ReusedSubqueryExec => e
}
}
private def checkNumLocalShuffleReaders(
plan: SparkPlan, numShufflesWithoutLocalReader: Int = 0): Unit = {
val numShuffles = collect(plan) {
case s: ShuffleQueryStageExec => s
}.length
val numLocalReaders = collect(plan) {
case reader: CustomShuffleReaderExec if reader.isLocalReader => reader
}
numLocalReaders.foreach { r =>
val rdd = r.execute()
val parts = rdd.partitions
assert(parts.forall(rdd.preferredLocations(_).nonEmpty))
}
assert(numShuffles === (numLocalReaders.length + numShufflesWithoutLocalReader))
}
private def checkInitialPartitionNum(df: Dataset[_], numPartition: Int): Unit = {
// repartition obeys initialPartitionNum when adaptiveExecutionEnabled
val plan = df.queryExecution.executedPlan
assert(plan.isInstanceOf[AdaptiveSparkPlanExec])
val shuffle = plan.asInstanceOf[AdaptiveSparkPlanExec].executedPlan.collect {
case s: ShuffleExchangeExec => s
}
assert(shuffle.size == 1)
assert(shuffle(0).outputPartitioning.numPartitions == numPartition)
}
test("Change merge join to broadcast join") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
}
}
test("Reuse the parallelism of CoalescedShuffleReaderExec in LocalShuffleReaderExec") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "10") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
val localReaders = collect(adaptivePlan) {
case reader: CustomShuffleReaderExec if reader.isLocalReader => reader
}
assert(localReaders.length == 2)
val localShuffleRDD0 = localReaders(0).execute().asInstanceOf[ShuffledRowRDD]
val localShuffleRDD1 = localReaders(1).execute().asInstanceOf[ShuffledRowRDD]
// The pre-shuffle partition size is [0, 0, 0, 72, 0]
// We exclude the 0-size partitions, so only one partition, advisoryParallelism = 1
// the final parallelism is
// math.max(1, advisoryParallelism / numMappers): math.max(1, 1/2) = 1
// and the partitions length is 1 * numMappers = 2
assert(localShuffleRDD0.getPartitions.length == 2)
// The pre-shuffle partition size is [0, 72, 0, 72, 126]
// We exclude the 0-size partitions, so only 3 partition, advisoryParallelism = 3
// the final parallelism is
// math.max(1, advisoryParallelism / numMappers): math.max(1, 3/2) = 1
// and the partitions length is 1 * numMappers = 2
assert(localShuffleRDD1.getPartitions.length == 2)
}
}
test("Reuse the default parallelism in LocalShuffleReaderExec") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "false") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
val localReaders = collect(adaptivePlan) {
case reader: CustomShuffleReaderExec if reader.isLocalReader => reader
}
assert(localReaders.length == 2)
val localShuffleRDD0 = localReaders(0).execute().asInstanceOf[ShuffledRowRDD]
val localShuffleRDD1 = localReaders(1).execute().asInstanceOf[ShuffledRowRDD]
// the final parallelism is math.max(1, numReduces / numMappers): math.max(1, 5/2) = 2
// and the partitions length is 2 * numMappers = 4
assert(localShuffleRDD0.getPartitions.length == 4)
// the final parallelism is math.max(1, numReduces / numMappers): math.max(1, 5/2) = 2
// and the partitions length is 2 * numMappers = 4
assert(localShuffleRDD1.getPartitions.length == 4)
}
}
test("Empty stage coalesced to 1-partition RDD") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true") {
val df1 = spark.range(10).withColumn("a", 'id)
val df2 = spark.range(10).withColumn("b", 'id)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
val testDf = df1.where('a > 10).join(df2.where('b > 10), Seq("id"), "left_outer")
.groupBy('a).count()
checkAnswer(testDf, Seq())
val plan = testDf.queryExecution.executedPlan
assert(find(plan)(_.isInstanceOf[SortMergeJoinExec]).isDefined)
val coalescedReaders = collect(plan) {
case r: CustomShuffleReaderExec => r
}
assert(coalescedReaders.length == 3)
coalescedReaders.foreach(r => assert(r.partitionSpecs.length == 1))
}
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "1") {
val testDf = df1.where('a > 10).join(df2.where('b > 10), Seq("id"), "left_outer")
.groupBy('a).count()
checkAnswer(testDf, Seq())
val plan = testDf.queryExecution.executedPlan
assert(find(plan)(_.isInstanceOf[BroadcastHashJoinExec]).isDefined)
val coalescedReaders = collect(plan) {
case r: CustomShuffleReaderExec => r
}
assert(coalescedReaders.length == 3, s"$plan")
coalescedReaders.foreach(r => assert(r.isLocalReader || r.partitionSpecs.length == 1))
}
}
}
test("Scalar subquery") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a " +
"where value = (SELECT max(a) from testData3)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
}
}
test("Scalar subquery in later stages") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a " +
"where (value + a) = (SELECT max(a) from testData3)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
}
}
test("multiple joins") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|WITH t4 AS (
| SELECT * FROM lowercaseData t2 JOIN testData3 t3 ON t2.n = t3.a where t2.n = '1'
|)
|SELECT * FROM testData
|JOIN testData2 t2 ON key = t2.a
|JOIN t4 ON t2.b = t4.a
|WHERE value = 1
""".stripMargin)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 3)
// A possible resulting query plan:
// BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastExchange
// +-LocalShuffleReader*
// +- ShuffleExchange
// After applied the 'OptimizeLocalShuffleReader' rule, we can convert all the four
// shuffle reader to local shuffle reader in the bottom two 'BroadcastHashJoin'.
// For the top level 'BroadcastHashJoin', the probe side is not shuffle query stage
// and the build side shuffle query stage is also converted to local shuffle reader.
checkNumLocalShuffleReaders(adaptivePlan)
}
}
test("multiple joins with aggregate") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|WITH t4 AS (
| SELECT * FROM lowercaseData t2 JOIN (
| select a, sum(b) from testData3 group by a
| ) t3 ON t2.n = t3.a where t2.n = '1'
|)
|SELECT * FROM testData
|JOIN testData2 t2 ON key = t2.a
|JOIN t4 ON t2.b = t4.a
|WHERE value = 1
""".stripMargin)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 3)
// A possible resulting query plan:
// BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastExchange
// +-HashAggregate
// +- CoalescedShuffleReader
// +- ShuffleExchange
// The shuffle added by Aggregate can't apply local reader.
checkNumLocalShuffleReaders(adaptivePlan, 1)
}
}
test("multiple joins with aggregate 2") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "500") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|WITH t4 AS (
| SELECT * FROM lowercaseData t2 JOIN (
| select a, max(b) b from testData2 group by a
| ) t3 ON t2.n = t3.b
|)
|SELECT * FROM testData
|JOIN testData2 t2 ON key = t2.a
|JOIN t4 ON value = t4.a
|WHERE value = 1
""".stripMargin)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 3)
// A possible resulting query plan:
// BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- BroadcastExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- LocalShuffleReader*
// +- ShuffleExchange
// +- BroadcastHashJoin
// +- Filter
// +- HashAggregate
// +- CoalescedShuffleReader
// +- ShuffleExchange
// +- BroadcastExchange
// +-LocalShuffleReader*
// +- ShuffleExchange
// The shuffle added by Aggregate can't apply local reader.
checkNumLocalShuffleReaders(adaptivePlan, 1)
}
}
test("Exchange reuse") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT value FROM testData join testData2 ON key = a " +
"join (SELECT value v from testData join testData3 ON key = a) on value = v")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 3)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 2)
// There is still a SMJ, and its two shuffles can't apply local reader.
checkNumLocalShuffleReaders(adaptivePlan, 2)
// Even with local shuffle reader, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.size == 1)
}
}
test("Exchange reuse with subqueries") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value = (SELECT max(a) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
// Even with local shuffle reader, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.size == 1)
}
}
test("Exchange reuse across subqueries") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.SUBQUERY_REUSE_ENABLED.key -> "false") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value >= (SELECT max(a) from testData join testData2 ON key = a) " +
"and a <= (SELECT max(a) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
// Even with local shuffle reader, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.nonEmpty)
val sub = findReusedSubquery(adaptivePlan)
assert(sub.isEmpty)
}
}
test("Subquery reuse") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value >= (SELECT max(a) from testData join testData2 ON key = a) " +
"and a <= (SELECT max(a) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
// Even with local shuffle reader, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.isEmpty)
val sub = findReusedSubquery(adaptivePlan)
assert(sub.nonEmpty)
}
}
test("Broadcast exchange reuse across subqueries") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "20000000",
SQLConf.SUBQUERY_REUSE_ENABLED.key -> "false") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT a FROM testData join testData2 ON key = a " +
"where value >= (" +
"SELECT /*+ broadcast(testData2) */ max(key) from testData join testData2 ON key = a) " +
"and a <= (" +
"SELECT /*+ broadcast(testData2) */ max(value) from testData join testData2 ON key = a)")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
checkNumLocalShuffleReaders(adaptivePlan)
// Even with local shuffle reader, the query stage reuse can also work.
val ex = findReusedExchange(adaptivePlan)
assert(ex.nonEmpty)
assert(ex.head.child.isInstanceOf[BroadcastExchangeExec])
val sub = findReusedSubquery(adaptivePlan)
assert(sub.isEmpty)
}
}
test("Union/Except/Intersect queries") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
runAdaptiveAndVerifyResult(
"""
|SELECT * FROM testData
|EXCEPT
|SELECT * FROM testData2
|UNION ALL
|SELECT * FROM testData
|INTERSECT ALL
|SELECT * FROM testData2
""".stripMargin)
}
}
test("Subquery de-correlation in Union queries") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
withTempView("a", "b") {
Seq("a" -> 2, "b" -> 1).toDF("id", "num").createTempView("a")
Seq("a" -> 2, "b" -> 1).toDF("id", "num").createTempView("b")
runAdaptiveAndVerifyResult(
"""
|SELECT id,num,source FROM (
| SELECT id, num, 'a' as source FROM a
| UNION ALL
| SELECT id, num, 'b' as source FROM b
|) AS c WHERE c.id IN (SELECT id FROM b WHERE num = 2)
""".stripMargin)
}
}
}
test("Avoid plan change if cost is greater") {
val origPlan = sql("SELECT * FROM testData " +
"join testData2 t2 ON key = t2.a " +
"join testData2 t3 on t2.a = t3.a where t2.b = 1").queryExecution.executedPlan
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80",
SQLConf.BROADCAST_HASH_JOIN_OUTPUT_PARTITIONING_EXPAND_LIMIT.key -> "0") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData " +
"join testData2 t2 ON key = t2.a " +
"join testData2 t3 on t2.a = t3.a where t2.b = 1")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 2)
val smj2 = findTopLevelSortMergeJoin(adaptivePlan)
assert(smj2.size == 2, origPlan.toString)
}
}
test("Change merge join to broadcast join without local shuffle reader") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.LOCAL_SHUFFLE_READER_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "40") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"""
|SELECT * FROM testData t1 join testData2 t2
|ON t1.key = t2.a join testData3 t3 on t2.a = t3.a
|where t1.value = 1
""".stripMargin
)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 2)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
// There is still a SMJ, and its two shuffles can't apply local reader.
checkNumLocalShuffleReaders(adaptivePlan, 2)
}
}
test("Avoid changing merge join to broadcast join if too many empty partitions on build plan") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.NON_EMPTY_PARTITION_RATIO_FOR_BROADCAST_JOIN.key -> "0.5") {
// `testData` is small enough to be broadcast but has empty partition ratio over the config.
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.isEmpty)
}
// It is still possible to broadcast `testData2`.
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "2000") {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val bhj = findTopLevelBroadcastHashJoin(adaptivePlan)
assert(bhj.size == 1)
assert(bhj.head.buildSide == BuildRight)
}
}
}
test("SPARK-29906: AQE should not introduce extra shuffle for outermost limit") {
var numStages = 0
val listener = new SparkListener {
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
numStages = jobStart.stageInfos.length
}
}
try {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
spark.sparkContext.addSparkListener(listener)
spark.range(0, 100, 1, numPartitions = 10).take(1)
spark.sparkContext.listenerBus.waitUntilEmpty()
// Should be only one stage since there is no shuffle.
assert(numStages == 1)
}
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
test("SPARK-30524: Do not optimize skew join if introduce additional shuffle") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "100",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "100") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.selectExpr("id % 3 as key1", "id as value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.selectExpr("id % 1 as key2", "id as value2")
.createOrReplaceTempView("skewData2")
def checkSkewJoin(query: String, optimizeSkewJoin: Boolean): Unit = {
val (_, innerAdaptivePlan) = runAdaptiveAndVerifyResult(query)
val innerSmj = findTopLevelSortMergeJoin(innerAdaptivePlan)
assert(innerSmj.size == 1 && innerSmj.head.isSkewJoin == optimizeSkewJoin)
}
checkSkewJoin(
"SELECT key1 FROM skewData1 JOIN skewData2 ON key1 = key2", true)
// Additional shuffle introduced, so disable the "OptimizeSkewedJoin" optimization
checkSkewJoin(
"SELECT key1 FROM skewData1 JOIN skewData2 ON key1 = key2 GROUP BY key1", false)
}
}
}
test("SPARK-29544: adaptive skew join with different join types") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.COALESCE_PARTITIONS_MIN_PARTITION_NUM.key -> "1",
SQLConf.SHUFFLE_PARTITIONS.key -> "100",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "800",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "800") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.when('id >= 750, 1000)
.otherwise('id).as("key1"),
'id as "value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.otherwise('id).as("key2"),
'id as "value2")
.createOrReplaceTempView("skewData2")
def checkSkewJoin(
joins: Seq[SortMergeJoinExec],
leftSkewNum: Int,
rightSkewNum: Int): Unit = {
assert(joins.size == 1 && joins.head.isSkewJoin)
assert(joins.head.left.collect {
case r: CustomShuffleReaderExec => r
}.head.partitionSpecs.collect {
case p: PartialReducerPartitionSpec => p.reducerIndex
}.distinct.length == leftSkewNum)
assert(joins.head.right.collect {
case r: CustomShuffleReaderExec => r
}.head.partitionSpecs.collect {
case p: PartialReducerPartitionSpec => p.reducerIndex
}.distinct.length == rightSkewNum)
}
// skewed inner join optimization
val (_, innerAdaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM skewData1 join skewData2 ON key1 = key2")
val innerSmj = findTopLevelSortMergeJoin(innerAdaptivePlan)
checkSkewJoin(innerSmj, 2, 1)
// skewed left outer join optimization
val (_, leftAdaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM skewData1 left outer join skewData2 ON key1 = key2")
val leftSmj = findTopLevelSortMergeJoin(leftAdaptivePlan)
checkSkewJoin(leftSmj, 2, 0)
// skewed right outer join optimization
val (_, rightAdaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM skewData1 right outer join skewData2 ON key1 = key2")
val rightSmj = findTopLevelSortMergeJoin(rightAdaptivePlan)
checkSkewJoin(rightSmj, 0, 1)
}
}
}
test("SPARK-30291: AQE should catch the exceptions when doing materialize") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
withTable("bucketed_table") {
val df1 =
(0 until 50).map(i => (i % 5, i % 13, i.toString)).toDF("i", "j", "k").as("df1")
df1.write.format("parquet").bucketBy(8, "i").saveAsTable("bucketed_table")
val warehouseFilePath = new URI(spark.sessionState.conf.warehousePath).getPath
val tableDir = new File(warehouseFilePath, "bucketed_table")
Utils.deleteRecursively(tableDir)
df1.write.parquet(tableDir.getAbsolutePath)
val aggregated = spark.table("bucketed_table").groupBy("i").count()
val error = intercept[Exception] {
aggregated.count()
}
assert(error.getCause().toString contains "Invalid bucket file")
assert(error.getSuppressed.size === 0)
}
}
}
test("SPARK-30403: AQE should handle InSubquery") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
runAdaptiveAndVerifyResult("SELECT * FROM testData LEFT OUTER join testData2" +
" ON key = a AND key NOT IN (select a from testData3) where value = '1'"
)
}
}
test("force apply AQE") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
val plan = sql("SELECT * FROM testData").queryExecution.executedPlan
assert(plan.isInstanceOf[AdaptiveSparkPlanExec])
}
}
test("SPARK-30719: do not log warning if intentionally skip AQE") {
val testAppender = new LogAppender("aqe logging warning test when skip")
withLogAppender(testAppender) {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val plan = sql("SELECT * FROM testData").queryExecution.executedPlan
assert(!plan.isInstanceOf[AdaptiveSparkPlanExec])
}
}
assert(!testAppender.loggingEvents
.exists(msg => msg.getRenderedMessage.contains(
s"${SQLConf.ADAPTIVE_EXECUTION_ENABLED.key} is" +
s" enabled but is not supported for")))
}
test("test log level") {
def verifyLog(expectedLevel: Level): Unit = {
val logAppender = new LogAppender("adaptive execution")
withLogAppender(
logAppender,
loggerName = Some(AdaptiveSparkPlanExec.getClass.getName.dropRight(1)),
level = Some(Level.TRACE)) {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
sql("SELECT * FROM testData join testData2 ON key = a where value = '1'").collect()
}
}
Seq("Plan changed", "Final plan").foreach { msg =>
assert(
logAppender.loggingEvents.exists { event =>
event.getRenderedMessage.contains(msg) && event.getLevel == expectedLevel
})
}
}
// Verify default log level
verifyLog(Level.DEBUG)
// Verify custom log level
val levels = Seq(
"TRACE" -> Level.TRACE,
"trace" -> Level.TRACE,
"DEBUG" -> Level.DEBUG,
"debug" -> Level.DEBUG,
"INFO" -> Level.INFO,
"info" -> Level.INFO,
"WARN" -> Level.WARN,
"warn" -> Level.WARN,
"ERROR" -> Level.ERROR,
"error" -> Level.ERROR,
"deBUG" -> Level.DEBUG)
levels.foreach { level =>
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_LOG_LEVEL.key -> level._1) {
verifyLog(level._2)
}
}
}
test("tree string output") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val df = sql("SELECT * FROM testData join testData2 ON key = a where value = '1'")
val planBefore = df.queryExecution.executedPlan
assert(!planBefore.toString.contains("== Current Plan =="))
assert(!planBefore.toString.contains("== Initial Plan =="))
df.collect()
val planAfter = df.queryExecution.executedPlan
assert(planAfter.toString.contains("== Final Plan =="))
assert(planAfter.toString.contains("== Initial Plan =="))
}
}
test("SPARK-31384: avoid NPE in OptimizeSkewedJoin when there's 0 partition plan") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1") {
withTempView("t2") {
// create DataFrame with 0 partition
spark.createDataFrame(sparkContext.emptyRDD[Row], new StructType().add("b", IntegerType))
.createOrReplaceTempView("t2")
// should run successfully without NPE
runAdaptiveAndVerifyResult("SELECT * FROM testData2 t1 left semi join t2 ON t1.a=t2.b")
}
}
}
test("metrics of the shuffle reader") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT key FROM testData GROUP BY key")
val readers = collect(adaptivePlan) {
case r: CustomShuffleReaderExec => r
}
assert(readers.length == 1)
val reader = readers.head
assert(!reader.isLocalReader)
assert(!reader.hasSkewedPartition)
assert(reader.hasCoalescedPartition)
assert(reader.metrics.keys.toSeq.sorted == Seq(
"numPartitions", "partitionDataSize"))
assert(reader.metrics("numPartitions").value == reader.partitionSpecs.length)
assert(reader.metrics("partitionDataSize").value > 0)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData join testData2 ON key = a where value = '1'")
val join = collect(adaptivePlan) {
case j: BroadcastHashJoinExec => j
}.head
assert(join.buildSide == BuildLeft)
val readers = collect(join.right) {
case r: CustomShuffleReaderExec => r
}
assert(readers.length == 1)
val reader = readers.head
assert(reader.isLocalReader)
assert(reader.metrics.keys.toSeq == Seq("numPartitions"))
assert(reader.metrics("numPartitions").value == reader.partitionSpecs.length)
}
withSQLConf(
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SHUFFLE_PARTITIONS.key -> "100",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "800",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "800") {
withTempView("skewData1", "skewData2") {
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.when('id >= 750, 1000)
.otherwise('id).as("key1"),
'id as "value1")
.createOrReplaceTempView("skewData1")
spark
.range(0, 1000, 1, 10)
.select(
when('id < 250, 249)
.otherwise('id).as("key2"),
'id as "value2")
.createOrReplaceTempView("skewData2")
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM skewData1 join skewData2 ON key1 = key2")
val readers = collect(adaptivePlan) {
case r: CustomShuffleReaderExec => r
}
readers.foreach { reader =>
assert(!reader.isLocalReader)
assert(reader.hasCoalescedPartition)
assert(reader.hasSkewedPartition)
assert(reader.metrics.contains("numSkewedPartitions"))
}
assert(readers(0).metrics("numSkewedPartitions").value == 2)
assert(readers(0).metrics("numSkewedSplits").value == 15)
assert(readers(1).metrics("numSkewedPartitions").value == 1)
assert(readers(1).metrics("numSkewedSplits").value == 12)
}
}
}
}
test("control a plan explain mode in listeners via SQLConf") {
def checkPlanDescription(mode: String, expected: Seq[String]): Unit = {
var checkDone = false
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLAdaptiveExecutionUpdate(_, planDescription, _) =>
assert(expected.forall(planDescription.contains))
checkDone = true
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
withSQLConf(SQLConf.UI_EXPLAIN_MODE.key -> mode,
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
val dfAdaptive = sql("SELECT * FROM testData JOIN testData2 ON key = a WHERE value = '1'")
try {
checkAnswer(dfAdaptive, Row(1, "1", 1, 1) :: Row(1, "1", 1, 2) :: Nil)
spark.sparkContext.listenerBus.waitUntilEmpty()
assert(checkDone)
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
}
Seq(("simple", Seq("== Physical Plan ==")),
("extended", Seq("== Parsed Logical Plan ==", "== Analyzed Logical Plan ==",
"== Optimized Logical Plan ==", "== Physical Plan ==")),
("codegen", Seq("WholeStageCodegen subtrees")),
("cost", Seq("== Optimized Logical Plan ==", "Statistics(sizeInBytes")),
("formatted", Seq("== Physical Plan ==", "Output", "Arguments"))).foreach {
case (mode, expected) =>
checkPlanDescription(mode, expected)
}
}
test("SPARK-30953: InsertAdaptiveSparkPlan should apply AQE on child plan of write commands") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
withTable("t1") {
val plan = sql("CREATE TABLE t1 USING parquet AS SELECT 1 col").queryExecution.executedPlan
assert(plan.isInstanceOf[DataWritingCommandExec])
assert(plan.asInstanceOf[DataWritingCommandExec].child.isInstanceOf[AdaptiveSparkPlanExec])
}
}
}
test("AQE should set active session during execution") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val df = spark.range(10).select(sum('id))
assert(df.queryExecution.executedPlan.isInstanceOf[AdaptiveSparkPlanExec])
SparkSession.setActiveSession(null)
checkAnswer(df, Seq(Row(45)))
SparkSession.setActiveSession(spark) // recover the active session.
}
}
test("No deadlock in UI update") {
object TestStrategy extends Strategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = plan match {
case _: Aggregate =>
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
spark.range(5).rdd
}
Nil
case _ => Nil
}
}
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
try {
spark.experimental.extraStrategies = TestStrategy :: Nil
val df = spark.range(10).groupBy('id).count()
df.collect()
} finally {
spark.experimental.extraStrategies = Nil
}
}
}
test("SPARK-31658: SQL UI should show write commands") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.ADAPTIVE_EXECUTION_FORCE_APPLY.key -> "true") {
withTable("t1") {
var checkDone = false
val listener = new SparkListener {
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case SparkListenerSQLAdaptiveExecutionUpdate(_, _, planInfo) =>
assert(planInfo.nodeName == "Execute CreateDataSourceTableAsSelectCommand")
checkDone = true
case _ => // ignore other events
}
}
}
spark.sparkContext.addSparkListener(listener)
try {
sql("CREATE TABLE t1 USING parquet AS SELECT 1 col").collect()
spark.sparkContext.listenerBus.waitUntilEmpty()
assert(checkDone)
} finally {
spark.sparkContext.removeSparkListener(listener)
}
}
}
}
test("SPARK-31220, SPARK-32056: repartition by expression with AQE") {
Seq(true, false).foreach { enableAQE =>
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> enableAQE.toString,
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_INITIAL_PARTITION_NUM.key -> "10",
SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
val df1 = spark.range(10).repartition($"id")
val df2 = spark.range(10).repartition($"id" + 1)
val partitionsNum1 = df1.rdd.collectPartitions().length
val partitionsNum2 = df2.rdd.collectPartitions().length
if (enableAQE) {
assert(partitionsNum1 < 10)
assert(partitionsNum2 < 10)
checkInitialPartitionNum(df1, 10)
checkInitialPartitionNum(df2, 10)
} else {
assert(partitionsNum1 === 10)
assert(partitionsNum2 === 10)
}
// Don't coalesce partitions if the number of partitions is specified.
val df3 = spark.range(10).repartition(10, $"id")
val df4 = spark.range(10).repartition(10)
assert(df3.rdd.collectPartitions().length == 10)
assert(df4.rdd.collectPartitions().length == 10)
}
}
}
test("SPARK-31220, SPARK-32056: repartition by range with AQE") {
Seq(true, false).foreach { enableAQE =>
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> enableAQE.toString,
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_INITIAL_PARTITION_NUM.key -> "10",
SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
val df1 = spark.range(10).toDF.repartitionByRange($"id".asc)
val df2 = spark.range(10).toDF.repartitionByRange(($"id" + 1).asc)
val partitionsNum1 = df1.rdd.collectPartitions().length
val partitionsNum2 = df2.rdd.collectPartitions().length
if (enableAQE) {
assert(partitionsNum1 < 10)
assert(partitionsNum2 < 10)
checkInitialPartitionNum(df1, 10)
checkInitialPartitionNum(df2, 10)
} else {
assert(partitionsNum1 === 10)
assert(partitionsNum2 === 10)
}
// Don't coalesce partitions if the number of partitions is specified.
val df3 = spark.range(10).repartitionByRange(10, $"id".asc)
assert(df3.rdd.collectPartitions().length == 10)
}
}
}
test("SPARK-31220, SPARK-32056: repartition using sql and hint with AQE") {
Seq(true, false).foreach { enableAQE =>
withTempView("test") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> enableAQE.toString,
SQLConf.COALESCE_PARTITIONS_ENABLED.key -> "true",
SQLConf.COALESCE_PARTITIONS_INITIAL_PARTITION_NUM.key -> "10",
SQLConf.SHUFFLE_PARTITIONS.key -> "10") {
spark.range(10).toDF.createTempView("test")
val df1 = spark.sql("SELECT /*+ REPARTITION(id) */ * from test")
val df2 = spark.sql("SELECT /*+ REPARTITION_BY_RANGE(id) */ * from test")
val df3 = spark.sql("SELECT * from test DISTRIBUTE BY id")
val df4 = spark.sql("SELECT * from test CLUSTER BY id")
val partitionsNum1 = df1.rdd.collectPartitions().length
val partitionsNum2 = df2.rdd.collectPartitions().length
val partitionsNum3 = df3.rdd.collectPartitions().length
val partitionsNum4 = df4.rdd.collectPartitions().length
if (enableAQE) {
assert(partitionsNum1 < 10)
assert(partitionsNum2 < 10)
assert(partitionsNum3 < 10)
assert(partitionsNum4 < 10)
checkInitialPartitionNum(df1, 10)
checkInitialPartitionNum(df2, 10)
checkInitialPartitionNum(df3, 10)
checkInitialPartitionNum(df4, 10)
} else {
assert(partitionsNum1 === 10)
assert(partitionsNum2 === 10)
assert(partitionsNum3 === 10)
assert(partitionsNum4 === 10)
}
// Don't coalesce partitions if the number of partitions is specified.
val df5 = spark.sql("SELECT /*+ REPARTITION(10, id) */ * from test")
val df6 = spark.sql("SELECT /*+ REPARTITION_BY_RANGE(10, id) */ * from test")
assert(df5.rdd.collectPartitions().length == 10)
assert(df6.rdd.collectPartitions().length == 10)
}
}
}
}
test("SPARK-32573: Eliminate NAAJ when BuildSide is HashedRelationWithAllNullKeys") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString) {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData2 t1 WHERE t1.b NOT IN (SELECT b FROM testData3)")
val bhj = findTopLevelBroadcastHashJoin(plan)
assert(bhj.size == 1)
val join = findTopLevelBaseJoin(adaptivePlan)
assert(join.isEmpty)
checkNumLocalShuffleReaders(adaptivePlan)
}
}
test("SPARK-32717: AQEOptimizer should respect excludedRules configuration") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> Long.MaxValue.toString,
// This test is a copy of test(SPARK-32573), in order to test the configuration
// `spark.sql.adaptive.optimizer.excludedRules` works as expect.
SQLConf.ADAPTIVE_OPTIMIZER_EXCLUDED_RULES.key -> EliminateJoinToEmptyRelation.ruleName) {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT * FROM testData2 t1 WHERE t1.b NOT IN (SELECT b FROM testData3)")
val bhj = findTopLevelBroadcastHashJoin(plan)
assert(bhj.size == 1)
val join = findTopLevelBaseJoin(adaptivePlan)
// this is different compares to test(SPARK-32573) due to the rule
// `EliminateJoinToEmptyRelation` has been excluded.
assert(join.nonEmpty)
checkNumLocalShuffleReaders(adaptivePlan)
}
}
test("SPARK-32649: Eliminate inner and semi join to empty relation") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
Seq(
// inner join (small table at right side)
"SELECT * FROM testData t1 join testData3 t2 ON t1.key = t2.a WHERE t2.b = 1",
// inner join (small table at left side)
"SELECT * FROM testData3 t1 join testData t2 ON t1.a = t2.key WHERE t1.b = 1",
// left semi join
"SELECT * FROM testData t1 left semi join testData3 t2 ON t1.key = t2.a AND t2.b = 1"
).foreach(query => {
val (plan, adaptivePlan) = runAdaptiveAndVerifyResult(query)
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.size == 1)
val join = findTopLevelBaseJoin(adaptivePlan)
assert(join.isEmpty)
checkNumLocalShuffleReaders(adaptivePlan)
})
}
}
test("SPARK-32753: Only copy tags to node with no tags") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
withTempView("v1") {
spark.range(10).union(spark.range(10)).createOrReplaceTempView("v1")
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(
"SELECT id FROM v1 GROUP BY id DISTRIBUTE BY id")
assert(collect(adaptivePlan) {
case s: ShuffleExchangeExec => s
}.length == 1)
}
}
}
test("Logging plan changes for AQE") {
val testAppender = new LogAppender("plan changes")
withLogAppender(testAppender) {
withSQLConf(
SQLConf.PLAN_CHANGE_LOG_LEVEL.key -> "INFO",
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
sql("SELECT * FROM testData JOIN testData2 ON key = a " +
"WHERE value = (SELECT max(a) FROM testData3)").collect()
}
Seq("=== Result of Batch AQE Preparations ===",
"=== Result of Batch AQE Post Stage Creation ===",
"=== Result of Batch AQE Replanning ===",
"=== Result of Batch AQE Query Stage Optimization ===",
"=== Result of Batch AQE Final Query Stage Optimization ===").foreach { expectedMsg =>
assert(testAppender.loggingEvents.exists(_.getRenderedMessage.contains(expectedMsg)))
}
}
}
test("SPARK-32932: Do not use local shuffle reader at final stage on write command") {
withSQLConf(SQLConf.PARTITION_OVERWRITE_MODE.key -> PartitionOverwriteMode.DYNAMIC.toString,
SQLConf.SHUFFLE_PARTITIONS.key -> "5",
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val data = for (
i <- 1L to 10L;
j <- 1L to 3L
) yield (i, j)
val df = data.toDF("i", "j").repartition($"j")
var noLocalReader: Boolean = false
val listener = new QueryExecutionListener {
override def onSuccess(funcName: String, qe: QueryExecution, durationNs: Long): Unit = {
qe.executedPlan match {
case plan@(_: DataWritingCommandExec | _: V2TableWriteExec) =>
assert(plan.asInstanceOf[UnaryExecNode].child.isInstanceOf[AdaptiveSparkPlanExec])
noLocalReader = collect(plan) {
case exec: CustomShuffleReaderExec if exec.isLocalReader => exec
}.isEmpty
case _ => // ignore other events
}
}
override def onFailure(funcName: String, qe: QueryExecution,
exception: Exception): Unit = {}
}
spark.listenerManager.register(listener)
withTable("t") {
df.write.partitionBy("j").saveAsTable("t")
sparkContext.listenerBus.waitUntilEmpty()
assert(noLocalReader)
noLocalReader = false
}
// Test DataSource v2
val format = classOf[NoopDataSource].getName
df.write.format(format).mode("overwrite").save()
sparkContext.listenerBus.waitUntilEmpty()
assert(noLocalReader)
noLocalReader = false
spark.listenerManager.unregister(listener)
}
}
test("SPARK-33494: Do not use local shuffle reader for repartition") {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val df = spark.table("testData").repartition('key)
df.collect()
// local shuffle reader breaks partitioning and shouldn't be used for repartition operation
// which is specified by users.
checkNumLocalShuffleReaders(df.queryExecution.executedPlan, numShufflesWithoutLocalReader = 1)
}
}
test("SPARK-33551: Do not use custom shuffle reader for repartition") {
def hasRepartitionShuffle(plan: SparkPlan): Boolean = {
find(plan) {
case s: ShuffleExchangeLike =>
s.shuffleOrigin == REPARTITION || s.shuffleOrigin == REPARTITION_WITH_NUM
case _ => false
}.isDefined
}
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.SHUFFLE_PARTITIONS.key -> "5") {
val df = sql(
"""
|SELECT * FROM (
| SELECT * FROM testData WHERE key = 1
|)
|RIGHT OUTER JOIN testData2
|ON value = b
""".stripMargin)
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "80") {
// Repartition with no partition num specified.
val dfRepartition = df.repartition('b)
dfRepartition.collect()
val plan = dfRepartition.queryExecution.executedPlan
// The top shuffle from repartition is optimized out.
assert(!hasRepartitionShuffle(plan))
val bhj = findTopLevelBroadcastHashJoin(plan)
assert(bhj.length == 1)
checkNumLocalShuffleReaders(plan, 1)
// Probe side is coalesced.
val customReader = bhj.head.right.find(_.isInstanceOf[CustomShuffleReaderExec])
assert(customReader.isDefined)
assert(customReader.get.asInstanceOf[CustomShuffleReaderExec].hasCoalescedPartition)
// Repartition with partition default num specified.
val dfRepartitionWithNum = df.repartition(5, 'b)
dfRepartitionWithNum.collect()
val planWithNum = dfRepartitionWithNum.queryExecution.executedPlan
// The top shuffle from repartition is optimized out.
assert(!hasRepartitionShuffle(planWithNum))
val bhjWithNum = findTopLevelBroadcastHashJoin(planWithNum)
assert(bhjWithNum.length == 1)
checkNumLocalShuffleReaders(planWithNum, 1)
// Probe side is not coalesced.
assert(bhjWithNum.head.right.find(_.isInstanceOf[CustomShuffleReaderExec]).isEmpty)
// Repartition with partition non-default num specified.
val dfRepartitionWithNum2 = df.repartition(3, 'b)
dfRepartitionWithNum2.collect()
val planWithNum2 = dfRepartitionWithNum2.queryExecution.executedPlan
// The top shuffle from repartition is not optimized out, and this is the only shuffle that
// does not have local shuffle reader.
assert(hasRepartitionShuffle(planWithNum2))
val bhjWithNum2 = findTopLevelBroadcastHashJoin(planWithNum2)
assert(bhjWithNum2.length == 1)
checkNumLocalShuffleReaders(planWithNum2, 1)
val customReader2 = bhjWithNum2.head.right.find(_.isInstanceOf[CustomShuffleReaderExec])
assert(customReader2.isDefined)
assert(customReader2.get.asInstanceOf[CustomShuffleReaderExec].isLocalReader)
}
// Force skew join
withSQLConf(SQLConf.AUTO_BROADCASTJOIN_THRESHOLD.key -> "-1",
SQLConf.SKEW_JOIN_ENABLED.key -> "true",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_THRESHOLD.key -> "1",
SQLConf.SKEW_JOIN_SKEWED_PARTITION_FACTOR.key -> "0",
SQLConf.ADVISORY_PARTITION_SIZE_IN_BYTES.key -> "10") {
// Repartition with no partition num specified.
val dfRepartition = df.repartition('b)
dfRepartition.collect()
val plan = dfRepartition.queryExecution.executedPlan
// The top shuffle from repartition is optimized out.
assert(!hasRepartitionShuffle(plan))
val smj = findTopLevelSortMergeJoin(plan)
assert(smj.length == 1)
// No skew join due to the repartition.
assert(!smj.head.isSkewJoin)
// Both sides are coalesced.
val customReaders = collect(smj.head) {
case c: CustomShuffleReaderExec if c.hasCoalescedPartition => c
}
assert(customReaders.length == 2)
// Repartition with default partition num specified.
val dfRepartitionWithNum = df.repartition(5, 'b)
dfRepartitionWithNum.collect()
val planWithNum = dfRepartitionWithNum.queryExecution.executedPlan
// The top shuffle from repartition is optimized out.
assert(!hasRepartitionShuffle(planWithNum))
val smjWithNum = findTopLevelSortMergeJoin(planWithNum)
assert(smjWithNum.length == 1)
// No skew join due to the repartition.
assert(!smjWithNum.head.isSkewJoin)
// No coalesce due to the num in repartition.
val customReadersWithNum = collect(smjWithNum.head) {
case c: CustomShuffleReaderExec if c.hasCoalescedPartition => c
}
assert(customReadersWithNum.isEmpty)
// Repartition with default non-partition num specified.
val dfRepartitionWithNum2 = df.repartition(3, 'b)
dfRepartitionWithNum2.collect()
val planWithNum2 = dfRepartitionWithNum2.queryExecution.executedPlan
// The top shuffle from repartition is not optimized out.
assert(hasRepartitionShuffle(planWithNum2))
val smjWithNum2 = findTopLevelSortMergeJoin(planWithNum2)
assert(smjWithNum2.length == 1)
// Skew join can apply as the repartition is not optimized out.
assert(smjWithNum2.head.isSkewJoin)
}
}
}
test("SPARK-34091: Batch shuffle fetch in AQE partition coalescing") {
withSQLConf(
SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true",
SQLConf.SHUFFLE_PARTITIONS.key -> "10000",
SQLConf.FETCH_SHUFFLE_BLOCKS_IN_BATCH.key -> "true") {
withTable("t1") {
spark.range(100).selectExpr("id + 1 as a").write.format("parquet").saveAsTable("t1")
val query = "SELECT SUM(a) FROM t1 GROUP BY a"
val (_, adaptivePlan) = runAdaptiveAndVerifyResult(query)
val metricName = SQLShuffleReadMetricsReporter.LOCAL_BLOCKS_FETCHED
val blocksFetchedMetric = collectFirst(adaptivePlan) {
case p if p.metrics.contains(metricName) => p.metrics(metricName)
}
assert(blocksFetchedMetric.isDefined)
val blocksFetched = blocksFetchedMetric.get.value
withSQLConf(SQLConf.FETCH_SHUFFLE_BLOCKS_IN_BATCH.key -> "false") {
val (_, adaptivePlan2) = runAdaptiveAndVerifyResult(query)
val blocksFetchedMetric2 = collectFirst(adaptivePlan2) {
case p if p.metrics.contains(metricName) => p.metrics(metricName)
}
assert(blocksFetchedMetric2.isDefined)
val blocksFetched2 = blocksFetchedMetric2.get.value
assert(blocksFetched < blocksFetched2)
}
}
}
}
test("SPARK-33933: Materialize BroadcastQueryStage first in AQE") {
val testAppender = new LogAppender("aqe query stage materialization order test")
val df = spark.range(1000).select($"id" % 26, $"id" % 10)
.toDF("index", "pv")
val dim = Range(0, 26).map(x => (x, ('a' + x).toChar.toString))
.toDF("index", "name")
val testDf = df.groupBy("index")
.agg(sum($"pv").alias("pv"))
.join(dim, Seq("index"))
withLogAppender(testAppender, level = Some(Level.DEBUG)) {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "true") {
val result = testDf.collect()
assert(result.length == 26)
}
}
val materializeLogs = testAppender.loggingEvents
.map(_.getRenderedMessage)
.filter(_.startsWith("Materialize query stage"))
.toArray
assert(materializeLogs(0).startsWith("Materialize query stage BroadcastQueryStageExec"))
assert(materializeLogs(1).startsWith("Materialize query stage ShuffleQueryStageExec"))
}
}
| witgo/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/adaptive/AdaptiveQueryExecSuite.scala | Scala | apache-2.0 | 60,845 |
package com.outr.stripe.card
import com.outr.stripe.TokenError
import scala.scalajs.js
@js.native
trait CardTokenInfo extends js.Object {
def id: String = js.native
def card: StripeCardResponse = js.native
def created: Long = js.native
def livemode: Boolean = js.native
def `type`: String = js.native
def `object`: String = js.native
def used: Boolean = js.native
def error: TokenError = js.native
}
| outr/scala-stripe | core/js/src/main/scala/com/outr/stripe/card/CardTokenInfo.scala | Scala | mit | 419 |
// /////////////////////////////////////////// //
// Fureteur - https://github.com/gip/fureteur //
// /////////////////////////////////////////// //
package fureteur.config
import scala.collection.mutable._
import com.rabbitmq.client._
import fureteur.data._
class Config(d:Data) {
val data= d
def apply(s:String):String = {
data get s
}
def ++(kvs:List[(String, String)]) = new Config( data ++ kvs )
def getInt(s:String) = {
(data get s).toInt
}
def getOption(s:String) = {
data getOption s
}
def getLongOption(s:String) = {
(data getOption s) match {
case Some(s) => Some(s.toLong)
case _ => None
}
}
def exists(s:String) = {
data exists s
}
def getObject(s:String):Config = { new Config(data.getObject(s)) }
def unwrapArray(s:String):List[Config] = {
(data unwrapArray s) map (new Config(_))
}
}
object Config {
val configs = new HashMap[String, (String, Config)]()
def registerConfig(s:String, s0:String):Unit = {
val c= Config.fromJson(s)
configs+= (c("conf") -> (s0,c))
}
def getConfig(s:String) = {
configs(s)._2
}
def dumpConfig(s:String) = {
configs(s)._1
}
def fromJson(s:String) = {
new Config( Data.fromJson(s) )
}
def showConfigs() ={
configs.toList.map ( kkv => (kkv._1,kkv._2._2("description") ) )
}
}
| gip/fureteur | src/main/scala/config.scala | Scala | mit | 1,360 |
package x7c1.linen.modern.init.inspector
import android.app.Dialog
import android.content.Context
import android.os.Bundle
import android.support.v4.app.{DialogFragment, FragmentActivity}
import android.support.v7.app.AlertDialog
import android.widget.Button
import x7c1.linen.glue.res.layout.SourceSearchStart
import x7c1.linen.glue.service.ServiceControl
import x7c1.linen.modern.init.inspector.StartSearchDialog.Arguments
import x7c1.linen.repository.inspector.ActionPageUrl
import x7c1.linen.repository.inspector.ActionPageUrlError.{EmptyUrl, InvalidFormat}
import x7c1.linen.scene.inspector.InspectorService
import x7c1.wheat.ancient.context.ContextualFactory
import x7c1.wheat.ancient.resource.ViewHolderProviderFactory
import x7c1.wheat.lore.dialog.DelayedDialog
import x7c1.wheat.macros.fragment.TypedFragment
import x7c1.wheat.macros.logger.Log
import x7c1.wheat.modern.decorator.Imports._
import x7c1.wheat.modern.dialog.tasks.KeyboardControl
object StartSearchDialog {
class Arguments(
val clientAccountId: Long,
val dialogFactory: ContextualFactory[AlertDialog.Builder],
val inputLayoutFactory: ViewHolderProviderFactory[SourceSearchStart]
)
}
class StartSearchDialog extends DialogFragment
with DelayedDialog
with TypedFragment[Arguments] {
private lazy val args = getTypedArguments
private lazy val keyboard = {
KeyboardControl[StartSearchError](this, layout.originUrl)
}
def showIn(activity: FragmentActivity): Unit = {
show(activity.getSupportFragmentManager, "start-search-dialog")
}
override def onCreateDialog(savedInstanceState: Bundle): Dialog = {
args.dialogFactory.createAlertDialog(
title = "Search sources",
positiveText = "Start",
negativeText = "Cancel",
layoutView = layout.itemView
)
}
override def onStart(): Unit = {
super.onStart()
initializeButtons(
positive = onClickPositive,
negative = onClickNegative
)
}
private def onClickPositive(button: Button) = {
val context = getActivity.asInstanceOf[Context with ServiceControl]
ActionPageUrl.create(
accountId = args.clientAccountId,
url = layout.originUrl.text.toString
) match {
case Right(pageUrl) =>
InspectorService(context) inspect pageUrl
keyboard.taskToHide().execute()
case Left(e: EmptyUrl) =>
layout.originUrlLayout setError "(required)"
case Left(e: InvalidFormat) =>
layout.originUrlLayout setError {
e.cause.map(_.getMessage) getOrElse "invalid format"
}
Log info e.detail
case Left(e) =>
layout.originUrlLayout setError {
e.cause.map(_.getMessage) getOrElse "unknown format"
}
Log error e.detail
}
}
private def onClickNegative(button: Button) = {
Log info s"[init]"
keyboard.taskToHide().execute()
}
private lazy val layout = {
args.inputLayoutFactory.create(getActivity).inflate()
}
}
| x7c1/Linen | linen-modern/src/main/scala/x7c1/linen/modern/init/inspector/StartSearchDialog.scala | Scala | mit | 2,971 |
package org.littlewings.javaee7.cdi
import javax.inject.Inject
import org.apache.deltaspike.core.api.projectstage.ProjectStage
import org.apache.deltaspike.testcontrol.api.junit.CdiTestRunner
import org.junit.Test
import org.junit.runner.RunWith
import org.scalatest.Matchers
import org.scalatest.junit.JUnitSuite
@RunWith(classOf[CdiTestRunner])
class SimpleProjectStageSpec extends JUnitSuite with Matchers {
@Inject
var messageService: MessageService = _
@Test
def messageTest(): Unit = {
messageService.get should be("UnitTestStage!!")
}
@Inject
var projectStage: ProjectStage = _
@Test
def projectStageTest(): Unit = {
projectStage should be(a[ProjectStage.UnitTest])
}
@Inject
var greetingService: GreetingService = _
@Test
def greetingTest(): Unit = {
greetingService.greet should be("Hello CDI!!")
}
}
| kazuhira-r/javaee7-scala-examples | cdi-deltaspike-project-stage/src/test/scala/org/littlewings/javaee7/cdi/SimpleProjectStageSpec.scala | Scala | mit | 860 |
package com.blinkbox.books.test
import org.mockito.stubbing.Answer
import org.mockito.invocation.InvocationOnMock
import scala.language.implicitConversions
/**
* A convenient pair of wrappers that lets you pass in closures to Mockito's doAnswer/thenAnswer methods.
* See http://henningpetersen.com/post/10/using-mockito-answers-with-scala-2-9 for details.
*/
trait AnswerSugar {
implicit def toAnswer[T](f: () => T): Answer[T] = new Answer[T] {
override def answer(invocation: InvocationOnMock): T = f()
}
implicit def toAnswerWithArguments[T](f: (InvocationOnMock) => T): Answer[T] = new Answer[T] {
override def answer(invocation: InvocationOnMock): T = f(invocation)
}
}
/**
* Companion object that facilitates the importing of <code>AnswerSugar</code> members as
* an alternative to mixing it in.
*/
object AnswerSugar extends AnswerSugar
| blinkboxbooks/common-test.scala | src/main/scala/com/blinkbox/books/test/AnswerSugar.scala | Scala | mit | 871 |
import consumer.kafka.ReceiverLauncher
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkContext, SparkConf}
/**
* Created by akhld on 11/12/14.
*/
object LowLevelKafkaConsumer {
def main(arg: Array[String]): Unit = {
import org.apache.log4j.Logger
import org.apache.log4j.Level
Logger.getLogger("org").setLevel(Level.OFF)
Logger.getLogger("akka").setLevel(Level.OFF)
//Create SparkContext
val conf = new SparkConf()
.setMaster("spark://10.252.5.113:7077")
.setAppName("LowLevelKafkaConsumer")
.set("spark.executor.memory", "1g")
.set("spark.rdd.compress","true")
.set("spark.storage.memoryFraction", "1")
.set("spark.streaming.unpersist", "true")
.set("spark.streaming.blockInterval", "200")
val sc = new SparkContext(conf)
//Might want to uncomment and add the jars if you are running on standalone mode.
sc.addJar("/home/kafka-spark-consumer/target/kafka-spark-consumer-0.0.1-SNAPSHOT-jar-with-dependencies.jar")
val ssc = new StreamingContext(sc, Seconds(10))
val topic = "valid_subpub"
val zkhosts = "10.252.5.131"
val zkports = "2181"
val brokerPath = "/brokers"
//Specify number of Receivers you need.
//It should be less than or equal to number of Partitions of your topic
val numberOfReceivers = 1
val kafkaProperties: Map[String, String] = Map("zookeeper.hosts" -> zkhosts,
"zookeeper.port" -> zkports,
"zookeeper.broker.path" -> brokerPath ,
"kafka.topic" -> topic,
"zookeeper.consumer.connection" -> "10.252.5.113:2182",
"zookeeper.consumer.path" -> "/spark-kafka",
"kafka.consumer.id" -> "12345")
val props = new java.util.Properties()
kafkaProperties foreach { case (key,value) => props.put(key, value)}
val tmp_stream = ReceiverLauncher.launch(ssc, props, numberOfReceivers)
tmp_stream.foreachRDD(rdd => println("\\n\\nNumber of records in this batch : " + rdd.count()))
ssc.start()
ssc.awaitTermination()
}
}
| jedisct1/kafka-spark-consumer | examples/scala/LowLevelKafkaConsumer.scala | Scala | apache-2.0 | 2,328 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.streaming
import java.util.Locale
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.streaming.OutputMode
class InternalOutputModesSuite extends SparkFunSuite {
test("supported strings") {
def testMode(outputMode: String, expected: OutputMode): Unit = {
assert(InternalOutputModes(outputMode) === expected)
}
testMode("append", OutputMode.Append)
testMode("Append", OutputMode.Append)
testMode("complete", OutputMode.Complete)
testMode("Complete", OutputMode.Complete)
testMode("update", OutputMode.Update)
testMode("Update", OutputMode.Update)
}
test("unsupported strings") {
def testMode(outputMode: String): Unit = {
val acceptedModes = Seq("append", "update", "complete")
val e = intercept[IllegalArgumentException](InternalOutputModes(outputMode))
(Seq("output mode", "unknown", outputMode) ++ acceptedModes).foreach { s =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(s.toLowerCase(Locale.ROOT)))
}
}
testMode("Xyz")
}
}
| bravo-zhang/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/streaming/InternalOutputModesSuite.scala | Scala | apache-2.0 | 1,887 |
package collins.controllers.actions.asset
import scala.concurrent.Future
import play.api.data.Form
import play.api.data.Forms.of
import play.api.data.Forms.optional
import play.api.data.Forms.tuple
import play.api.libs.concurrent.Execution.Implicits.defaultContext
import collins.controllers.Api
import collins.controllers.SecureController
import collins.controllers.actions.AssetAction
import collins.controllers.actions.RequestDataHolder
import collins.controllers.actions.SecureAction
import collins.controllers.forms.stateFormat
import collins.controllers.forms.statusFormat
import collins.controllers.validators.ParamValidation
import collins.models.Asset
import collins.models.AssetLifecycle
import collins.models.State
import collins.models.{Status => AssetStatus}
import collins.util.security.SecuritySpecification
import collins.controllers.actions.asset.UpdateAction.Messages.fuck
import collins.controllers.actions.asset.UpdateAction.Messages.invalidState
import collins.controllers.actions.asset.UpdateAction.Messages.invalidStatus
import collins.controllers.actions.asset.UpdateAction.Messages.rootMessage
object UpdateStatusAction extends ParamValidation {
val UpdateForm = Form(tuple(
"status" -> optional(of[AssetStatus]),
"state" -> optional(of[State]),
"reason" -> validatedText(2)
))
}
/**
* Update the status or state of an asset
*
* @apigroup Asset
* @apimethod POST
* @apiurl /api/asset/:tag/status
* @apiparam :tag String asset tag
* @apiparam status Option[Status] new status of asset
* @apiparam state Option[State] new state of asset
* @apiparam reason String reason for maintenance
* @apirespond 200 success
* @apirespond 400 invalid status or state, missing reason, neither state nor status specified
* @apirespond 409 state conflicts with status
* @apirespond 500 error saving status
* @apiperm controllers.AssetApi.updateAssetStatus
* @collinsshell {{{
* collins-shell asset set_status [--status=STATUS --state=STATE --reason='REASON' --tag=TAG]
* }}}
* @curlexample {{{
* curl -v -u blake:admin:first --basic \\
* -d status=Unallocated \\
* -d state=Running \\
* -d reason='Ready for action' \\
* http://localhost:9000/api/asset/TAG/status
* }}}
*/
case class UpdateStatusAction(
assetTag: String,
spec: SecuritySpecification,
handler: SecureController
) extends SecureAction(spec, handler) with AssetAction {
import UpdateAction.Messages._
import UpdateStatusAction._
case class ActionDataHolder(
astatus: Option[AssetStatus], state: Option[State], reason: String
) extends RequestDataHolder
override def validate(): Validation = UpdateForm.bindFromRequest()(request).fold(
err => Left(RequestDataHolder.error400(fieldError(err))),
form => {
withValidAsset(assetTag) { asset =>
val (statusOpt, stateOpt, reason) = form
if (List(statusOpt,stateOpt).filter(_.isDefined).size == 0) {
Left(RequestDataHolder.error400(invalidInvocation))
} else {
checkStateConflict(asset, statusOpt, stateOpt) match {
case (Some(status), Some(state)) =>
Left(RequestDataHolder.error409(stateConflictError(status, state)))
case _ =>
Right(ActionDataHolder(statusOpt, stateOpt, reason))
}
}
}
}
)
override def execute(rd: RequestDataHolder) = Future {
rd match {
case ActionDataHolder(status, state, reason) =>
val lifeCycle = new AssetLifecycle(userOption, tattler)
lifeCycle.updateAssetStatus(definedAsset, status, state, reason).fold(
e => Api.errorResponse("Error updating status", Status.InternalServerError, Some(e)),
b => Api.statusResponse(b)
)
}
}
protected def checkStateConflict(
asset: Asset, statusOpt: Option[AssetStatus], stateOpt: Option[State]
): Tuple2[Option[AssetStatus],Option[State]] = {
val status = statusOpt.getOrElse(asset.getStatus())
val state = stateOpt.getOrElse(State.findById(asset.state).getOrElse(State.empty))
if (state.status == State.ANY_STATUS || state.status == status.id) {
(None, None)
} else {
(Some(status), Some(state))
}
}
protected def invalidInvocation =
rootMessage("controllers.AssetApi.updateStatus.invalidInvocation")
protected def stateConflictError(status: AssetStatus, state: State) =
rootMessage("controllers.AssetApi.updateStatus.stateConflict", status.name, state.name)
protected def fieldError(f: Form[_]) = f match {
case e if e.error("status").isDefined => invalidStatus
case e if e.error("state").isDefined => invalidState
case e if e.error("reason").isDefined =>
rootMessage("controllers.AssetApi.updateStatus.invalidReason")
case n => fuck
}
}
| funzoneq/collins | app/collins/controllers/actions/asset/UpdateStatusAction.scala | Scala | apache-2.0 | 4,784 |
package frameless
package object functions extends Udf {
object aggregate extends AggregateFunctions
}
| bamine/frameless | dataset/src/main/scala/frameless/functions/package.scala | Scala | apache-2.0 | 106 |
package io.github.mandar2812.PlasmaML.dynamics.diffusion
import breeze.linalg.DenseVector
import io.github.mandar2812.dynaml.pipes._
import io.github.mandar2812.dynaml.utils.combine
/**
* <h3>Phase Space Density: Basis Expansions</h3>
* A basis function expansion (φ) of the plasma
* Phase Space Density (ƒ) in the radial diffusion
* system.
*
* ∂ƒ/∂t =
* L<sup>2</sup>∂/∂L(D<sub>LL</sub> × L<sup>-2</sup> × ∂ƒ/∂L)
* - λ(L,t) × ƒ(L,t)
* + Q(L,t)
*
* */
abstract class PSDBasis extends Basis[(Double, Double)] {
self =>
/**
* Dimensionality of φ
* */
val dimension: Int
/**
* Calculates ∂φ/∂L
* */
val f_l: ((Double, Double)) => DenseVector[Double]
/**
* Calculates ∂<sup>2</sup>φ/∂L<sup>2</sup>
* */
val f_ll: ((Double, Double)) => DenseVector[Double]
/**
* Calculates ∂φ/∂t
* */
val f_t: ((Double, Double)) => DenseVector[Double]
/**
* Calculates the basis ψ resulting from applying
* the differential operator of plasma radial diffusion,
* on the basis set φ
*
* D = (d/dt - L<sup>2</sup>d/dL(D<sub>LL</sub> × L<sup>-2</sup> × d/dL) + λ(L,t))
*
* ψ(L,t) = D[φ(L,t)]
*
* @param diffusionField The diffusion field/coefficient.
* @param diffusionFieldGradL The first partial spatial derivative of
* the diffusion field ∂D<sub>LL</sub>/∂L
* @param lossTimeScale The loss rate λ(L,t)
*
* */
def operator_basis(
diffusionField: DataPipe[(Double, Double), Double],
diffusionFieldGradL: DataPipe[(Double, Double), Double],
lossTimeScale: DataPipe[(Double, Double), Double]): Basis[(Double, Double)] =
Basis((x: (Double, Double)) => {
val dll = diffusionField(x)
val alpha = diffusionFieldGradL(x) - 2d*diffusionField(x)/x._1
val lambda = lossTimeScale(x)
f_t(x) + lambda*f(x) - (dll*f_ll(x) + alpha*f_l(x))
})
/**
* Returns a [[PSDBasis]] that is the addition
* of the current basis and the one accepted as
* the method argument.
*
* φ(L,t) = φ<sub>1</sub>(L,t) + φ<sub>2</sub>(L,t)
* */
def +(other: PSDBasis): PSDBasis =
new PSDBasis {
override val dimension: Int = self.dimension
override protected val f = (x: (Double, Double)) => self(x) + other(x)
override val f_l: ((Double, Double)) => DenseVector[Double] =
(x: (Double, Double)) => self.f_l(x) + other.f_l(x)
override val f_ll: ((Double, Double)) => DenseVector[Double] =
(x: (Double, Double)) => self.f_ll(x) + other.f_ll(x)
override val f_t: ((Double, Double)) => DenseVector[Double] =
(x: (Double, Double)) => self.f_t(x) + other.f_t(x)
}
/**
* Returns a [[PSDBasis]] that is the concatenation
* of the current basis and the one accepted as
* the method argument.
*
* φ(L,t) = (φ<sub>1</sub>(L,t), φ<sub>2</sub>(L,t))
* */
def ::(other: PSDBasis): PSDBasis =
new PSDBasis {
override val dimension: Int = self.dimension + other.dimension
override protected val f = (x: (Double, Double)) => DenseVector.vertcat(self(x), other(x))
override val f_l: ((Double, Double)) => DenseVector[Double] =
(x: (Double, Double)) => DenseVector.vertcat(self.f_l(x), other.f_l(x))
override val f_ll: ((Double, Double)) => DenseVector[Double] =
(x: (Double, Double)) => DenseVector.vertcat(self.f_ll(x), other.f_ll(x))
override val f_t: ((Double, Double)) => DenseVector[Double] =
(x: (Double, Double)) => DenseVector.vertcat(self.f_t(x), other.f_t(x))
}
}
/**
* <h3>Radial Basis Phase Space Density Expansions</h3>
*
* A top level class that can be extended to implement
* various radial basis function based PSD expansions.
*
* Nodes are placed on a regular or logarithmically
* spaced space-time grid.
*
* @param lShellLimits Spatial limits
* @param nL Number of spatial intervals
* @param timeLimits Temporal limits
* @param nT Number of temporal intervals
* @param logScaleFlags Set to true for generating
* logarithmically spaced grid,
* one flag each for space and
* time.
* */
abstract class PSDRadialBasis(
val lShellLimits: (Double, Double), val nL: Int,
val timeLimits: (Double, Double), val nT: Int,
val logScaleFlags: (Boolean, Boolean) = (false, false))
extends PSDBasis {
var mult = 1d
val (lSeq, tSeq) = RadialDiffusion.buildStencil(
lShellLimits, nL,
timeLimits, nT,
logScaleFlags)
val deltaL: Double =
if(logScaleFlags._1) math.log(lShellLimits._2 - lShellLimits._1)/nL
else (lShellLimits._2 - lShellLimits._1)/nL
val deltaT: Double =
if(logScaleFlags._2) math.log(timeLimits._2 - timeLimits._1)/nT
else (timeLimits._2 - timeLimits._1)/nT
def scalesL: Seq[Double] =
if(logScaleFlags._1) Seq.tabulate(lSeq.length)(i =>
if(i == 0) math.exp(deltaL)
else if(i < nL) math.exp((i+1)*deltaL) - math.exp(i*deltaL)
else math.exp((nL+1)*deltaL) - math.exp(nL*deltaL)).map(_*mult)
else Seq.fill(lSeq.length)(deltaL*mult)
def scalesT: Seq[Double] =
if(logScaleFlags._2) Seq.tabulate(tSeq.length)(i =>
if(i == 0) math.exp(deltaT)
else if(i < nL) math.exp((i+1)*deltaT) - math.exp(i*deltaT)
else math.exp((nL+1)*deltaT) - math.exp(nL*deltaT)).map(_*mult)
else Seq.fill(tSeq.length)(deltaT*mult)
val tupleListEnc = Encoder(
(t: (Int, Int)) => List(t._1, t._2),
(l: List[Int]) => (l.head, l.last)
)
protected val centers: Seq[(Double, Double)] = combine(Seq(lSeq, tSeq)).map(s => (s.head, s.last))
protected def scales: Seq[(Double, Double)] = combine(Seq(scalesL, scalesT)).map(s => (s.head, s.last))
def _centers: Seq[(Double, Double)] = centers
override val dimension: Int = lSeq.length*tSeq.length
val dimensionL: Int = lSeq.length
val dimensionT: Int = tSeq.length
val indexEncoder: Encoder[(Int, Int), Int] = tupleListEnc > TupleIntegerEncoder(List(lSeq.length, tSeq.length))
} | mandar2812/PlasmaML | mag-core/src/main/scala/io/github/mandar2812/PlasmaML/dynamics/diffusion/PSDBasis.scala | Scala | lgpl-2.1 | 6,276 |
/*
* Copyright (c) 2014. Regents of the University of California
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package edu.berkeley.cs.amplab.avocado.postprocessing
import org.apache.commons.configuration.SubnodeConfiguration
import org.apache.spark.rdd.RDD
import edu.berkeley.cs.amplab.adam.avro.ADAMGenotype
import edu.berkeley.cs.amplab.adam.models.ADAMVariantContext
import edu.berkeley.cs.amplab.avocado.stats.AvocadoConfigAndStats
private[postprocessing] trait PostprocessingStage {
val stageName: String
def apply (rdd: RDD[ADAMVariantContext],
stats: AvocadoConfigAndStats,
config: SubnodeConfiguration): RDD[ADAMVariantContext]
}
private[postprocessing] trait GenotypeFilter extends Serializable {
/**
* Abstract method that must be implemented. Implements basic filtering on genotypes that
* are inside a single variant context.
*
* @param genotypes Genotypes to filter.
* @return Filtered genotypes.
*/
def filterGenotypes (genotypes: Seq[ADAMGenotype]): Seq[ADAMGenotype]
/**
* Applies filtering and creates a new variant context, if called genotypes still exist.
* If all genotypes have been filtered out, then an empty option (None) is returned.
*
* @param vc Variant context on which to filter.
* @return If not all genotypes have been filtered out, a new variant context, else none.
*/
def createNewVC(vc: ADAMVariantContext): Option[ADAMVariantContext] = {
val filteredGt = filterGenotypes(vc.genotypes)
if (filteredGt.length > 0) {
Some(ADAMVariantContext.buildFromGenotypes(filteredGt))
} else {
None
}
}
/**
* Applies the filtering described above across a full RDD.
*
* @param rdd RDD of variant contexts.
* @return An RDD containing variant contexts after filtering.
*/
def filter (rdd: RDD[ADAMVariantContext]): RDD[ADAMVariantContext] = {
rdd.flatMap(vc => createNewVC(vc))
}
}
| fnothaft/avocado | avocado-core/src/main/scala/edu/berkeley/cs/amplab/avocado/postprocessing/PostprocessingStage.scala | Scala | apache-2.0 | 2,465 |
package org.leialearns.logic.utilities
trait Bit {
def asInt: Int
def asBoolean: Boolean
}
case object ZERO extends Bit {
def asInt = 0
def asBoolean = false
}
case object ONE extends Bit {
def asInt = 1
def asBoolean = true
}
| jeroenvanmaanen/leia | prefixencoding/src/main/java/org/leialearns/logic/utilities/Bit.scala | Scala | lgpl-2.1 | 240 |
package uk.gov.gds.ier.transaction.overseas.parentName
import uk.gov.gds.ier.config.Config
import uk.gov.gds.ier.model.{OverseasParentName, Name, PreviousName}
import uk.gov.gds.ier.security.EncryptionService
import uk.gov.gds.ier.serialiser.JsonSerialiser
import uk.gov.gds.ier.test._
import uk.gov.gds.ier.assets.RemoteAssets
class ParentNameStepTests
extends MockingTestSuite
with WithMockOverseasControllers {
it should "reset the previous names if the has previous is false when submitting the form successfully" in {
val mockedJsonSerialiser = mock[JsonSerialiser]
val mockedConfig = mock[Config]
val mockedEncryptionService = mock[EncryptionService]
val mockedRemoteAssets = mock[RemoteAssets]
val parentNameStep = new ParentNameStep(
mockedJsonSerialiser,
mockedConfig,
mockedEncryptionService,
mockedRemoteAssets,
overseas
)
val currentState = completeOverseasApplication.copy(overseasParentName = Some(OverseasParentName(
name = None, previousName =
Some(PreviousName(false, "false", Some(Name("john", None, "smith")))))))
val transferedState = parentNameStep.resetParentName.apply(currentState, parentNameStep)
transferedState._1.overseasParentName.get.previousName.isDefined should be (true)
transferedState._1.overseasParentName.get.previousName.get.previousName should be (None)
}
}
| michaeldfallen/ier-frontend | test/uk/gov/gds/ier/transaction/overseas/parentName/ParentNameStepTests.scala | Scala | mit | 1,395 |
package com.bryanjos.lovecouch
import scala.concurrent.duration._
import scala.concurrent.Await
import org.scalatest._
import play.api.libs.json.Json
import akka.actor.ActorSystem
class DocumentSpec extends FunSpec with BeforeAndAfterAll {
implicit val system = ActorSystem()
implicit val context = system.dispatcher
val couchDB = CouchDb()
val db = Database(name = "documentspec", couchDB.url)
case class Guy(_id: Option[String] = None,
_rev: Option[String] = None,
name: String, age: Long)
implicit val guyFmt = Json.format[Guy]
var id = ""
var revs = List[String]()
info("CouchDB Document Methods")
override def beforeAll() {
Await.result(couchDB.createDatabase("documentspec"), 5 seconds)
}
describe("Create a new document") {
it("should be created") {
val data = Guy(name = "Alf", age = 23)
val result = db.createDocument[Guy](data) map {
value =>
id = value.id
revs = revs ++ List[String](value.rev)
assert(value.ok)
}
Await.result(result, 5 seconds)
}
}
describe("Returns the latest revision of the document") {
it("should be return the wanted document") {
val result = db.getDocument[Guy](id) map {
value =>
assert(value.age == 23)
assert(value.name == "Alf")
assert(value._id.get == id)
}
Await.result(result, 5 seconds)
}
}
describe("Inserts a new document, or new version of an existing document") {
it("should be updated") {
val data = Guy(_id = Some(id), _rev = Some(revs.last), name = "Alf", age = 24)
val result = db.updateDocument[Guy](data, id) map {
value =>
revs = revs ++ List[String](value.rev)
assert(value.ok)
}
Await.result(result, 5 seconds)
}
}
describe("Adds an attachment of a document") {
it("should be added") {
val result = db.addAttachment(id,
revs.last,
"README.md",
new java.io.File("/Users/bryanjos/Projects/Personal/lovecouch/README.md"),
"text/plain") map {
value =>
revs = revs ++ List[String](value.rev)
assert(value.ok)
}
Await.result(result, 5 seconds)
}
}
describe("Gets the attachment of a document") {
it("should be a byte array with non zero bytes") {
val result = db.getAttachment(id, "README.md") map {
value =>
assert(!value.isEmpty)
}
Await.result(result, 5 seconds)
}
}
describe("Returns the a revision of the document") {
it("should be a previous revision of the document") {
val result = db.getDocument[Guy](id, Some(revs.head)) map {
value =>
assert(value.age == 23)
assert(value.name == "Alf")
assert(value._id.get == id)
}
Await.result(result, 5 seconds)
}
}
describe("Deletes the document") {
it("should be deleted") {
val result = db.deleteDocument(id, revs.last) map {
value =>
assert(value.ok)
}
Await.result(result, 5 seconds)
}
}
override def afterAll() {
Await.result(couchDB.deleteDatabase("documentspec"), 5 seconds)
}
}
| bryanjos/lovecouch | src/test/scala/com/bryanjos/lovecouch/DocumentSpec.scala | Scala | mit | 3,247 |
package scredis
import org.scalameter.api._
import akka.actor.ActorSystem
import scala.concurrent.{ Future, Await }
import scala.concurrent.duration._
/*
object ClientBenchmark extends PerformanceTest {
private var system: ActorSystem = _
private var client: Client = _
/* configuration */
lazy val executor = SeparateJvmsExecutor(
new Executor.Warmer.Default,
Aggregator.average,
new Measurer.Default
)
lazy val reporter = Reporter.Composite(
new RegressionReporter(
RegressionReporter.Tester.Accepter(),
RegressionReporter.Historian.Complete()
),
HtmlReporter(true)
)
lazy val persistor = Persistor.None
/* inputs */
val sizes = Gen.range("size")(1000000, 3000000, 1000000)
/* tests */
performance of "Client" in {
measure method "PING" in {
using(sizes) config {
exec.maxWarmupRuns -> 3
exec.benchRuns -> 3
exec.independentSamples -> 3
} setUp { _ =>
system = ActorSystem()
client = Client()(system)
} tearDown { _ =>
Await.result(client.quit(), 2 seconds)
system.shutdown()
client = null
system = null
} in { i =>
implicit val ec = system.dispatcher
val future = Future.traverse(1 to i) { i =>
client.ping()
}
Await.result(future, 30 seconds)
}
}
measure method "GET" in {
using(sizes) config {
exec.maxWarmupRuns -> 3
exec.benchRuns -> 3
exec.independentSamples -> 3
} setUp { _ =>
system = ActorSystem()
client = Client()(system)
Await.result(client.set("foo", "bar"), 2 seconds)
} tearDown { _ =>
Await.result(client.del("foo"), 2 seconds)
Await.result(client.quit(), 2 seconds)
system.shutdown()
client = null
system = null
} in { i =>
implicit val ec = system.dispatcher
val future = Future.traverse(1 to i) { i =>
client.get("foo")
}
Await.result(future, 30 seconds)
}
}
measure method "SET" in {
using(sizes) config {
exec.maxWarmupRuns -> 3
exec.benchRuns -> 3
exec.independentSamples -> 3
} setUp { _ =>
system = ActorSystem()
client = Client()(system)
} tearDown { _ =>
Await.result(client.del("foo"), 2 seconds)
Await.result(client.quit(), 2 seconds)
system.shutdown()
client = null
system = null
} in { i =>
implicit val ec = system.dispatcher
val future = Future.traverse(1 to i) { i =>
client.set("foo", "bar")
}
Await.result(future, 30 seconds)
}
}
}
}*/ | rileyberton/scredis | src/test/scala/scredis/ClientBenchmark.scala | Scala | apache-2.0 | 2,743 |
/*
,i::,
:;;;;;;;
;:,,::;.
1ft1;::;1tL
t1;::;1,
:;::; _____ __ ___ __
fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_
CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\
1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / /
CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/
Lft1,:;: , 1tfL:
;it1i ,,,:::;;;::1tti s_mach.explain_play_json
.t1i .,::;;; ;1tt Copyright (c) 2016 S-Mach, Inc.
Lft11ii;::;ii1tfL: Author: [email protected]
.L1 1tt1ttt,,Li
...1LLLL...
*/
package s_mach.explain_play_json.impl
import scala.reflect.macros.blackbox
import s_mach.codetools.macros.{BlackboxHelper, Result}
import s_mach.explain_play_json.ExplainPlayJson
class ExplainPlayJsonMacroBuilder(
val c: blackbox.Context,
override val showDebug : Boolean = false
) extends BlackboxHelper {
import c.universe._
def build[A:c.WeakTypeTag]() : c.Expr[ExplainPlayJson[A]] =
getOrAbort {
calcProductType(c.weakTypeOf[A]).flatMap(build2)
}
case class FieldEx(
field: ProductType.Field
) {
val index = field.index
val name = field.name
val termName = field.termName
val explainTermName = TermName("explain_" + termName)
val _type = field._type
}
def build2[A:c.WeakTypeTag](
aProductType: ProductType
) : Result[c.Expr[ExplainPlayJson[A]]] = {
val aType = c.weakTypeOf[A]
val fields = aProductType.oomField.map(FieldEx.apply)
val result = c.Expr[ExplainPlayJson[A]] {
q"""
s_mach.explain_play_json.ExplainPlayJson[$aType] {
import s_mach.metadata._
import s_mach.explain_json._
import s_mach.explain_play_json._
TypeMetadata.Rec(
JsonExplanationNode.JsonObject(),
Seq(..${
fields.map { field =>
import field._
q"""(
$name,
implicitly[ExplainPlayJson[${_type}]].explain
)""".asInstanceOf[c.Tree]
}
})
)
}
"""
}
Result(result,Result.Debug(result.toString()))
}
}
| S-Mach/s_mach.explain | explain_play_json/src/main/scala/s_mach/explain_play_json/impl/ExplainPlayJsonMacroBuilder.scala | Scala | mit | 2,221 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package java.util
abstract private[util] class AbstractRandomAccessListIterator[E](private var i: Int,
start: Int, protected var end: Int) extends ListIterator[E] with SizeChangeEvent {
private var last = -1
def hasNext(): Boolean =
i < end
def next(): E = {
last = i
i += 1
get(last)
}
def hasPrevious(): Boolean =
start < i
def previous(): E = {
i -= 1
last = i
get(last)
}
def nextIndex(): Int = i
def previousIndex(): Int = i - 1
override def remove(): Unit = {
checkThatHasLast()
remove(last)
if (last < i)
i -= 1
last = -1
changeSize(-1)
}
def set(e: E): Unit = {
checkThatHasLast()
set(last, e)
}
def add(e: E): Unit = {
add(i, e)
changeSize(1)
last = -1
i += 1
}
protected def get(index: Int): E
protected def remove(index: Int): Unit
protected def set(index: Int, e: E): Unit
protected def add(index: Int, e: E): Unit
private def checkThatHasLast(): Unit = {
if (last == -1)
throw new IllegalStateException
}
}
| scala-js/scala-js | javalib/src/main/scala/java/util/AbstractRandomAccessListIterator.scala | Scala | apache-2.0 | 1,348 |
package elevators.queue.sweep
import elevators._
import elevators.queue.RequestQueue
case class ScanQueue(position: Int, lowerBound: Int, upperBound: Int,
requests: List[Int], direction: SeekDirection) extends RequestQueue[Int] {
private lazy val compare = eligibleRequestFilter(position, direction)
private lazy val eligibleRequests = requests.filter(req => compare(req))
private lazy val service = if (eligibleRequests.nonEmpty) {
eligibleRequests.sortBy(difference(position)_).head
} else direction match {
case Up => upperBound
case Down => lowerBound
}
override def enqueue(request: Int): ScanQueue = {
return new ScanQueue(position, lowerBound, upperBound, request :: requests,
direction)
}
override def dequeue: (Int, RequestQueue[Int]) = {
return (this.service, new ScanQueue(this.service, lowerBound, upperBound,
requests.filter(req => req != this.service), newDirection(this.service)))
}
def newDirection(service: Int): SeekDirection = direction match {
case Up => if (service == upperBound) Down else Up
case Down => if (service == lowerBound) Up else Down
}
override def head: Int = this.service
override def contains(request: Int): Boolean = requests.contains(request)
override def isEmpty: Boolean = requests.isEmpty
}
| wohanley/elevators | src/main/scala/elevators/queue/sweep/ScanQueue.scala | Scala | agpl-3.0 | 1,322 |
package filodb.memory.data
import debox.Buffer
import filodb.memory.BinaryRegion.NativePointer
/**
* An unboxed iterator over SortedIDMap elements which are native 64-bit Long pointers.
* When constructed, the iterator holds a shared lock over the backing collection, to protect
* the contents of the native pointers. The close method must be called when the native pointers
* don't need to be accessed anymore, and then the lock is released.
*/
trait ElementIterator {
def close(): Unit
def hasNext: Boolean
def next: NativePointer
def toBuffer: Buffer[NativePointer] = {
val buf = Buffer.empty[NativePointer]
while (hasNext) buf += next
buf
}
def count: Int = {
var _count = 0
while (hasNext) {
_count += 1
next
}
_count
}
/**
* ElementIterators obtain a lock to protect access to native memory, and the lock is
* released when the iterator is closed. As a convenience (or not), the iterator is
* automatically closed when the hasNext method returns true. To protect native memory access
* even longer, call the lock method before performing any iteration. When done, call unlock.
* The lock method can be called multiple times, but be sure to call unlock the same amount.
*/
def lock(): Unit
def unlock(): Unit
}
/**
* Lazily instantiates a wrapped iterator until hasNext or next is called.
*/
//scalastyle:off
class LazyElementIterator(source: () => ElementIterator) extends ElementIterator {
private var it: ElementIterator = _
// Note: If close is called before the iterator is assigned, then there's seemingly no
// reason to go to the source and create an iterator just to close it. Doing so anyhow
// ensures that any side effects from constructing the iterator are observed, and it
// also ensures that a closed iterator stays closed.
override def close(): Unit = sourceIt().close()
override def hasNext: Boolean = sourceIt().hasNext
override def next: NativePointer = sourceIt().next
override def lock(): Unit = sourceIt().lock()
override def unlock(): Unit = sourceIt().unlock()
private def sourceIt(): ElementIterator = {
if (it == null) it = source()
it
}
}
//scalastyle:on
| tuplejump/FiloDB | memory/src/main/scala/filodb.memory/data/ElementIterator.scala | Scala | apache-2.0 | 2,219 |
/*
* Copyright 2014, by Vladimir Kostyukov and Contributors.
*
* This file is a part of a Finch library that may be found at
*
* https://github.com/finagle/finch
*
* Licensed under the Apache License, Version 2.0 (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributor(s):
* Ben Whitehead
* Ryan Plessner
* Pedro Viegas
* Jens Halm
*/
package io.finch.request
/**
* A reusable validation rule that can be applied to any [[io.finch.request.RequestReader RequestReader]] with a
* matching type.
*/
trait ValidationRule[A] { self =>
/**
* Text description of this validation rule.
*/
def description: String
/**
* Applies the rule to the specified value.
*
* @return true if the predicate of this rule holds for the specified value
*/
def apply(value: A): Boolean
/**
* Combines this rule with another rule such that the new rule only validates if both the combined rules validate.
*
* @param that the rule to combine with this rule
* @return a new rule that only validates if both the combined rules validate
*/
def and(that: ValidationRule[A]): ValidationRule[A] =
ValidationRule(s"${self.description} and ${that.description}") { value => self(value) && that(value) }
/**
* Combines this rule with another rule such that the new rule validates if any one of the combined rules validates.
*
* @param that the rule to combine with this rule
* @return a new rule that validates if any of the combined rules validates
*/
def or(that: ValidationRule[A]): ValidationRule[A] =
ValidationRule(s"${self.description} or ${that.description}") { value => self(value) || that(value) }
}
/**
* Allows the creation of reusable validation rules for [[io.finch.request.RequestReader RequestReader]]s.
*/
object ValidationRule {
/**
* Implicit conversion that allows the same [[io.finch.request.ValidationRule ValudationRule]] to be used for required
* and optional values. If the optional value is non-empty, it gets validated (and validation may fail, producing an
* error), but if it is empty, it is always treated as valid.
*
* @param rule the validation rule to adapt for optional values
* @return a new validation rule that applies the specified rule to an optional value in case it is not empty
*/
implicit def toOptionalRule[A](rule: ValidationRule[A]): ValidationRule[Option[A]] = {
ValidationRule(rule.description) {
case Some(value) => rule(value)
case None => true
}
}
/**
* Creates a new reusable [[io.finch.request.ValidationRule ValidationRule]] based on the specified predicate.
*
* @param desc text describing the rule being validated
* @param p returns true if the data is valid
*
* @return a new reusable validation rule.
*/
def apply[A](desc: String)(p: A => Boolean): ValidationRule[A] = new ValidationRule[A] {
def description: String = desc
def apply(value: A): Boolean = p(value)
}
}
| penland365/finch | core/src/main/scala/io/finch/request/ValidationRule.scala | Scala | apache-2.0 | 3,441 |
package org.openjdk.jmh.samples
import java.util.concurrent.TimeUnit
import org.openjdk.jmh.annotations.{Benchmark, BenchmarkMode, CompilerControl, Fork, Measurement, Mode, OutputTimeUnit, Param, Scope, Setup, State, Warmup}
import org.openjdk.jmh.infra.Blackhole
object JMHSample_34_SafeLooping {
/*
* JMHSample_11_Loops warns about the dangers of using loops in @Benchmark methods.
* Sometimes, however, one needs to traverse through several elements in a dataset.
* This is hard to do without loops, and therefore we need to devise a scheme for
* safe looping.
*/
/*
* Suppose we want to measure how much it takes to execute work() with different
* arguments. This mimics a frequent use case when multiple instances with the same
* implementation, but different data, is measured.
*/
}
@State(Scope.Thread)
@Warmup(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Measurement(iterations = 5, time = 1, timeUnit = TimeUnit.SECONDS)
@Fork(3)
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.NANOSECONDS)
class JMHSample_34_SafeLooping {
@Param(Array("1", "10", "100", "1000"))
var size: Int = _
var xs: Array[Int] = _
val BASE = 42
def work(x: Int): Int = BASE + x
@CompilerControl(CompilerControl.Mode.DONT_INLINE)
def sink(v: Int) {
}
@Setup
def setup() {
xs = Array.ofDim[Int](size)
for (c <- 0 until size) {
xs(c) = c
}
}
@Benchmark
def measureWrong_1(): Int = {
var acc = 0
for (x <- xs) {
acc = work(x)
}
acc
}
@Benchmark
def measureWrong_2(): Int = {
var acc = 0
for (x <- xs) {
acc += work(x)
}
acc
}
@Benchmark
def measureRight_1(bh: Blackhole) {
for (x <- xs) {
bh.consume(work(x))
}
}
@Benchmark
def measureRight_2() {
for (x <- xs) {
sink(work(x))
}
}
}
| bantonsson/sbt-jmh | src/sbt-test/sbt-jmh/run/src/main/scala/org/openjdk/jmh/samples/JMHSample_34_SafeLooping.scala | Scala | apache-2.0 | 1,874 |
package play.core.j
import play.api.mvc._
import play.templates._
/** Defines a magic helper for Play templates in a Java context. */
object PlayMagicForJava {
import scala.collection.JavaConverters._
/** Transforms a Play Java `Option` to a proper Scala `Option`. */
implicit def javaOptionToScala[T](x: play.libs.F.Option[T]): Option[T] = x match {
case x: play.libs.F.Some[_] => Some(x.get)
case x: play.libs.F.None[_] => None
}
implicit def implicitJavaLang: play.api.i18n.Lang = {
try {
play.mvc.Http.Context.Implicit.lang.asInstanceOf[play.api.i18n.Lang]
} catch {
case _: Throwable => play.api.i18n.Lang.defaultLang
}
}
/**
* Implicit conversion of a Play Java form `Field` to a proper Scala form `Field`.
*/
implicit def javaFieldtoScalaField(jField: play.data.Form.Field): play.api.data.Field = {
new play.api.data.Field(
null,
jField.name,
jField.constraints.asScala.map { jT =>
jT._1 -> jT._2.asScala
},
Option(jField.format).map(f => f._1 -> f._2.asScala),
jField.errors.asScala.map { jE =>
play.api.data.FormError(
jE.key,
jE.message,
jE.arguments.asScala)
},
Option(jField.value)) {
override def apply(key: String) = {
javaFieldtoScalaField(jField.sub(key))
}
override lazy val indexes = jField.indexes.asScala.toSeq.map(_.toInt)
}
}
implicit def requestHeader: play.api.mvc.RequestHeader = {
play.mvc.Http.Context.Implicit.ctx._requestHeader
}
} | noel-yap/setter-for-catan | play-2.1.1/framework/src/play-java/src/main/scala/play/core/TemplateMagicForJava.scala | Scala | apache-2.0 | 1,564 |
/*
* Copyright (C) 2015 Stratio (http://stratio.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.crossdata.catalog.persistent
import com.stratio.crossdata.test.BaseXDTest
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.spark.sql.catalyst.{CatalystConf, SimpleCatalystConf}
import org.apache.spark.sql.crossdata.config.CoreConfig
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class MySQLCatalogSpec extends BaseXDTest {
private class MySQLCatalogPublicMetadata(override val catalystConf: CatalystConf) extends MySQLXDCatalog(catalystConf){
val tablesPrefixTest = tablesPrefix
val tableWithTableMetadataTest = tableWithTableMetadata
val tableWithViewMetadataTest = tableWithViewMetadata
val tableWithAppJarsTest = tableWithAppJars
val tableWithIndexMetadataTest = tableWithIndexMetadata
val configTest = config
}
private class MySQLCatalogWithMockedConfig(override val catalystConf: CatalystConf) extends MySQLCatalogPublicMetadata(catalystConf) {
override lazy val config: Config =
ConfigFactory.load("catalogspec/mysql-catalog-test-properties.conf").getConfig(Seq(CoreConfig.ParentConfigName, CoreConfig.CatalogConfigKey) mkString ".")
}
it should "get the cluster name from the config if specified" in {
val catalog = new MySQLCatalogWithMockedConfig(new SimpleCatalystConf(true))
catalog.configTest.getString("prefix") shouldBe "crossdataClusterTest"
catalog.tablesPrefixTest shouldBe "crossdataClusterTest_"
catalog.tableWithTableMetadataTest shouldBe "crossdataClusterTest_crossdataTables"
catalog.tableWithViewMetadataTest shouldBe "crossdataClusterTest_crossdataViews"
catalog.tableWithAppJarsTest shouldBe "crossdataClusterTest_crossdataJars"
catalog.tableWithIndexMetadataTest shouldBe "crossdataClusterTest_crossdataIndexes"
}
it should "work with the default values if cluster name is not specified" in {
val catalog = new MySQLCatalogPublicMetadata(new SimpleCatalystConf(true))
an[Exception] shouldBe thrownBy(catalog.configTest.getString("prefix"))
catalog.tablesPrefixTest shouldBe ""
catalog.tableWithTableMetadataTest shouldBe "crossdataTables"
catalog.tableWithViewMetadataTest shouldBe "crossdataViews"
catalog.tableWithAppJarsTest shouldBe "crossdataJars"
catalog.tableWithIndexMetadataTest shouldBe "crossdataIndexes"
}
}
| darroyocazorla/crossdata | core/src/test/scala/org/apache/spark/sql/crossdata/catalog/persistent/MySQLCatalogSpec.scala | Scala | apache-2.0 | 2,985 |
//Copyright 2014, Alex Khilko.
//This file is part of MoonGene which is released under MIT.
//See file LICENSE.TXT or go to www.alexkhilko.com for full license details.
package com.moongene.models.load
import play.api._
import scala.util.Random
case class PhoneDevice(vendor: String,
model: String,
platform: Byte,
version: String,
screen_w: Short,
screen_h: Short,
size: Float,
ram: Int)
object PhoneDevice {
val allDevices = all()
def random() : PhoneDevice = {
val rand = new Random(System.currentTimeMillis())
allDevices(rand.nextInt(allDevices.length))
}
def all():List[PhoneDevice] = {
val allDevices = List(
PhoneDevice("Acer", "beTouch E110", 1, "1.5",320,240,2.8f,256),
PhoneDevice("Acer", "Liquid A1Β (S100)", 1, "1.6",800,480,3.5f,256),
PhoneDevice("Acer", "Liquid E", 1, "2.1",800,480,3.5f,512),
PhoneDevice("Acer", "Liquid E Ferrari", 1, "2.1",800,480,3.5f,512),
PhoneDevice("Acer", "Acer Liquid Metal", 1, "2.2",800,480,3.6f,512),
PhoneDevice("Acer", "Acer Stream S110", 1, "2.1",800,480,3.7f,512),
PhoneDevice("Asus", "Padfone", 1, "4.0",960,540,4.3f,1024),
PhoneDevice("Asus", "Padfone 2", 1, "4.1",1280,720,4.7f,2048),
PhoneDevice("HTC", "Dream", 1, "1.6",480,320,3.2f,192),
PhoneDevice("HTC", "Legend", 1, "2.1",480,320,3.2f,384),
PhoneDevice("HTC", "Nexus One", 1, "2.2",800,480,3.7f,512),
PhoneDevice("HTC", "Desire", 1, "2.1",800,480,3.7f,576),
PhoneDevice("HTC", "Desire HD", 1, "2.2",800,480,4.3f,768),
PhoneDevice("HTC", "Desire Z", 1, "2.2",800,480,3.7f,512),
PhoneDevice("HTC", "Desire S", 1, "2.3",800,480,3.7f,768),
PhoneDevice("HTC", "Incredible S", 1, "2.2",800,480,4.0f,768),
PhoneDevice("HTC", "Sensation", 1, "2.3",960,540,4.3f,768),
PhoneDevice("HTC", "Evo 3D", 1, "2.3",960,540,4.3f,1024),
PhoneDevice("HTC", "Sensation XE", 1, "2.3",960,540,4.3f,768),
PhoneDevice("HTC", "Sensation XL", 1, "2.3",800,480,4.7f,768),
PhoneDevice("HTC", "Amaze 4G", 1, "2.3",960,540,4.3f,1024),
PhoneDevice("HTC", "Vivid", 1, "2.3",960,540,4.5f,1024),
PhoneDevice("HTC", "One V", 1, "4.0",800,480,3.7f,512),
PhoneDevice("HTC", "One S", 1, "4.0",960,540,4.3f,1024),
PhoneDevice("HTC", "One X", 1, "4.0",1280,720,4.7f,1024),
PhoneDevice("HTC", "Evo 4G LTE", 1, "4.0",1280,720,4.7f,1024),
PhoneDevice("Karbonn", "A15", 1, "4.0",800,480,4.0f,512),
PhoneDevice("LG", "Google Nexus 4", 1, "4.2",1280,768,4.7f,2048),
PhoneDevice("Motorola", "Droid RAZR MAXX", 1, "2.3",960,540,4.3f,1024),
PhoneDevice("Motorola", "Droid RAZR I", 1, "4.0",960,540,4.3f,1024),
PhoneDevice("Motorola", "Droid RAZR HD", 1, "4.1",1280,720,4.7f,1024),
PhoneDevice("Panasonic", "Eluga", 1, "2.3",960,540,4.3f,1024),
PhoneDevice("Panasonic", "Eluga Power", 1, "4.0",1280,720,5.0f,1024),
PhoneDevice("Samsung", "Moment", 1, "1.5",480,320,3.2f,256),
PhoneDevice("Samsung", "Galaxy Ace 2", 1, "2.3",800,480,3.8f,768),
PhoneDevice("Samsung", "i9000 Galaxy S", 1, "2.3",800,480,4.0f,512),
PhoneDevice("Sony", "Xperia Z", 1, "4.1",1920,1080,5.0f,2048),
PhoneDevice("Sony Ericsson", "Xperia X10", 1, "2.1",854,480,4.0f,384),
PhoneDevice("Cherry Mobile", "Flare", 1, "4.0",800,480,4.0f,512),
PhoneDevice("Huawei", "U8120 Joy", 1, "2.1",320,240,2.8f,256),
PhoneDevice("ZTE", "Warp", 1, "2.3.5",854,480,4.3f,512),
PhoneDevice("Apple", "iPhone 3GS", 2, "6.1.3",480,320,3.5f,256),
PhoneDevice("Apple", "iPhone 4S", 2, "5.0",960,640,3.5f,512),
PhoneDevice("Apple", "iPhone 5", 2, "6.0",1136,640,4.0f,1024),
PhoneDevice("Apple", "iPad 2", 2, "4.3",1024,768,9.7f,512),
PhoneDevice("Apple", "iPad 3", 2, "5.1",2048,1536,9.7f,1024)
)
allDevices
}
}
| InfiniteCode/MoonGene | src/gene/src/main/scala/com/moongene/models/load/PhoneDevice.scala | Scala | mit | 4,082 |
package reftree.util
object Reflection {
/** A utility for accessing private fields */
implicit class PrivateFields[A](val value: A) extends AnyVal {
def privateField[B](name: String) = {
val field = value.getClass.getDeclaredField(name)
field.setAccessible(true)
field.get(value).asInstanceOf[B]
}
}
}
| stanch/reftree | core/jvm/src/main/scala/reftree/util/Reflection.scala | Scala | gpl-3.0 | 336 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala.internal
import java.util.concurrent.atomic.AtomicBoolean
import org.mongodb.scala.{Observable, Observer, Subscription}
private[scala] case class FoldLeftObservable[T, S](observable: Observable[T], initialValue: S, accumulator: (S, T) => S) extends Observable[S] {
override def subscribe(observer: Observer[_ >: S]): Unit = {
observable.subscribe(SubscriptionCheckingObserver(
new Observer[T] {
@volatile
private var currentValue: S = initialValue
private val requested = new AtomicBoolean(false)
override def onError(throwable: Throwable): Unit = observer.onError(throwable)
override def onSubscribe(subscription: Subscription): Unit = {
val masterSub = new Subscription() {
override def isUnsubscribed: Boolean = subscription.isUnsubscribed
override def request(n: Long): Unit = {
require(n > 0L, s"Number requested must be greater than zero: $n")
if (requested.compareAndSet(false, true)) subscription.request(Long.MaxValue)
}
override def unsubscribe(): Unit = subscription.unsubscribe()
}
observer.onSubscribe(masterSub)
}
override def onComplete(): Unit = {
observer.onNext(currentValue)
observer.onComplete()
}
override def onNext(tResult: T): Unit = {
currentValue = accumulator(currentValue, tResult)
}
}
))
}
}
| rozza/mongo-scala-driver | driver/src/main/scala/org/mongodb/scala/internal/FoldLeftObservable.scala | Scala | apache-2.0 | 2,103 |
package sample
import akka.actor._
import org.springframework.context.ApplicationContext
object SpringExtension {
/**
* The identifier used to access the SpringExtension.
*/
def apply() : SpringExtension= new SpringExtension
}
class SpringExtension extends AbstractExtensionId[SpringExtentionImpl] {
import SpringExtension._
/**
* Is used by Akka to instantiate the Extension identified by this
* ExtensionId, internal use only.
*/
override def createExtension(system: ExtendedActorSystem) = new SpringExtentionImpl
/**
* Java API: retrieve the SpringExt extension for the given system.
*/
override def get(system: ActorSystem): SpringExtentionImpl = super.get(system)
}
| alanktwong/typesafe_activators | akka-scala-spring/app/sample/SpringExtension.scala | Scala | mit | 703 |
package algorithms
import types.Types._
import scala.util.Random
object TabuSearch {
type TabuAction = (Int, Int) // (i x(i))
def apply(inputs: ProblemData, random: Random) = {
val (n, m1, m2) = inputs
val memory = new LongMemory(n)
val reInitIterations = 8 * n
val maxReinitializations = 4
val neightboursPerIteration = 40
def reinitialize(best: Solution) = {
val prob = random.nextDouble()
if (prob < 0.25) generateRandomSol(random, n)
else if (prob < 0.75) memory.generateInfrecuenteSol
else best
}
def find(initialSol: Solution, best: Solution, bestCost: Int, tabuList: TabuList, iterations: Int): (Solution, Solution, Int) = {
def generateNeightbours(sol: Solution) = {
val n = sol.length
val indexs = 0 until n
val switchs = random.shuffle(indexs.combinations(2)) take neightboursPerIteration
switchs map {
case Vector(i, j) =>
val newS = sol updated(i, sol(j))
newS update (j, sol(i))
val tabuActions = List ( (i, newS(i)), (j, newS(j)) )
(newS, tabuActions)
} toList
}
def bestNeightbour(solution: Solution) = {
val all = generateNeightbours(solution)
val allSortedByCost = all map {
case (neig, tabuActions) => (neig, tabuActions, cost(inputs, neig))
} sortBy{ case (_,_,cost) => cost }
allSortedByCost find {
case(neig, tabuActions, cost) =>
cost < bestCost ||
tabuActions.forall(!tabuList.isTabu(_))
}
}
if (iterations == 0) (initialSol, best, bestCost)
else {
bestNeightbour(initialSol) match {
case Some(solutionInfo) => {
val (sol, tabuActions, cost) = solutionInfo
tabuList.addAll(tabuActions)
tabuActions foreach {case (i, xi) => memory increment(i, xi)}
if (cost < bestCost) find(sol, sol, cost, tabuList, iterations - 1)
else find(sol, best, bestCost, tabuList, iterations - 1)
}
case None =>
find(initialSol, best, bestCost, tabuList, iterations - 1)
}
}
}
def iterate(currentSol: Solution, best: Solution, bestCost: Int, reInitCount: Int, tabuList: TabuList): Solution = {
if (reInitCount == maxReinitializations) best
else {
val (_, newBest, newBestCost) = find(currentSol, best, bestCost, tabuList, reInitIterations)
val newStart = reinitialize(newBest)
val prob = random.nextDouble()
if (prob < 0.5) tabuList.grow() else tabuList.decrease()
tabuList.clear()
iterate(newStart, newBest, newBestCost, reInitCount + 1, tabuList)
}
}
val initialSol = Greedy(inputs)
iterate(initialSol, initialSol, cost(inputs, initialSol), 0, new TabuList(n/2))
}
def generateRandomSol(random: Random, size: Int) = random.shuffle(1 to size).toArray
}
class TabuList(InitialSize: Int) {
import TabuSearch.TabuAction
var elements = Array.ofDim[TabuAction](16*InitialSize)
var size = 2*InitialSize
var count = 0
def addAll(actions: List[TabuAction]) = {
actions foreach add
}
private def add(action: TabuAction) = {
elements.update(count % size, action)
count = count + 1
}
def isTabu(action: TabuAction) = elements.take(size).contains(action)
def grow() = size = size + size / 2
def decrease() = size = size / 2
def clear() = {
elements = Array.ofDim[TabuAction](16*size)
count = 0
}
}
class LongMemory(size: Int) {
val frecc: Matrix = Array.ofDim[Int](size, size)
def increment(i: Int, j: Int) = frecc(i)(j-1) += 1
def generateInfrecuenteSol: Solution = {
var posibilities = ( for {
i <- 0 until size
j <- 0 until size
} yield (i, j, frecc(i)(j)) ) sortBy(t => t._3)
val solution = 1 to size map( _ => -1) toArray
while (solution.contains(-1)) {
posibilities = posibilities dropWhile {
case (i, j, f) => solution.contains(j) || solution(i) != -1
}
val (i, j, _) = posibilities(0)
solution update(i, j)
}
solution map (_ + 1)
}
} | Truji92/MH-algoritmos-basados-en-trayectorias | src/main/scala/algorithms/TabuSearch.scala | Scala | mit | 4,188 |
package com.scalaAsm.x86
package Instructions
package General
// Description: Set Byte on Condition - not below or equal/above (CF=0 AND ZF=0)
// Category: general/datamov
trait SETA extends InstructionDefinition {
val mnemonic = "SETA"
}
object SETA extends OneOperand[SETA] with SETAImpl
trait SETAImpl extends SETA {
implicit object _0 extends OneOp[rm8] {
val opcode: TwoOpcodes = (0x0F, 0x97) /+ 0
val format = RmFormat
}
}
| bdwashbu/scala-x86-inst | src/main/scala/com/scalaAsm/x86/Instructions/General/SETA.scala | Scala | apache-2.0 | 447 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package com.google.protobuf.struct
object StructProto extends _root_.scalapb.GeneratedFileObject {
lazy val dependencies: Seq[_root_.scalapb.GeneratedFileObject] = Seq.empty
lazy val messagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] =
Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]](
com.google.protobuf.struct.Struct,
com.google.protobuf.struct.Value,
com.google.protobuf.struct.ListValue
)
private lazy val ProtoBytes: _root_.scala.Array[Byte] =
scalapb.Encoding.fromBase64(scala.collection.immutable.Seq(
"""Chxnb29nbGUvcHJvdG9idWYvc3RydWN0LnByb3RvEg9nb29nbGUucHJvdG9idWYiuwEKBlN0cnVjdBJICgZmaWVsZHMYASADK
AsyIy5nb29nbGUucHJvdG9idWYuU3RydWN0LkZpZWxkc0VudHJ5QgviPwgSBmZpZWxkc1IGZmllbGRzGmcKC0ZpZWxkc0VudHJ5E
hoKA2tleRgBIAEoCUII4j8FEgNrZXlSA2tleRI4CgV2YWx1ZRgCIAEoCzIWLmdvb2dsZS5wcm90b2J1Zi5WYWx1ZUIK4j8HEgV2Y
Wx1ZVIFdmFsdWU6AjgBIpgDCgVWYWx1ZRJLCgpudWxsX3ZhbHVlGAEgASgOMhouZ29vZ2xlLnByb3RvYnVmLk51bGxWYWx1ZUIO4
j8LEgludWxsVmFsdWVIAFIJbnVsbFZhbHVlEjUKDG51bWJlcl92YWx1ZRgCIAEoAUIQ4j8NEgtudW1iZXJWYWx1ZUgAUgtudW1iZ
XJWYWx1ZRI1CgxzdHJpbmdfdmFsdWUYAyABKAlCEOI/DRILc3RyaW5nVmFsdWVIAFILc3RyaW5nVmFsdWUSLwoKYm9vbF92YWx1Z
RgEIAEoCEIO4j8LEglib29sVmFsdWVIAFIJYm9vbFZhbHVlEk4KDHN0cnVjdF92YWx1ZRgFIAEoCzIXLmdvb2dsZS5wcm90b2J1Z
i5TdHJ1Y3RCEOI/DRILc3RydWN0VmFsdWVIAFILc3RydWN0VmFsdWUSSwoKbGlzdF92YWx1ZRgGIAEoCzIaLmdvb2dsZS5wcm90b
2J1Zi5MaXN0VmFsdWVCDuI/CxIJbGlzdFZhbHVlSABSCWxpc3RWYWx1ZUIGCgRraW5kIkgKCUxpc3RWYWx1ZRI7CgZ2YWx1ZXMYA
SADKAsyFi5nb29nbGUucHJvdG9idWYuVmFsdWVCC+I/CBIGdmFsdWVzUgZ2YWx1ZXMqGwoJTnVsbFZhbHVlEg4KCk5VTExfVkFMV
UUQAEKBAQoTY29tLmdvb2dsZS5wcm90b2J1ZkILU3RydWN0UHJvdG9QAVoxZ2l0aHViLmNvbS9nb2xhbmcvcHJvdG9idWYvcHR5c
GVzL3N0cnVjdDtzdHJ1Y3RwYvgBAaICA0dQQqoCHkdvb2dsZS5Qcm90b2J1Zi5XZWxsS25vd25UeXBlc2IGcHJvdG8z"""
).mkString)
lazy val scalaDescriptor: _root_.scalapb.descriptors.FileDescriptor = {
val scalaProto = com.google.protobuf.descriptor.FileDescriptorProto.parseFrom(ProtoBytes)
_root_.scalapb.descriptors.FileDescriptor.buildFrom(scalaProto, dependencies.map(_.scalaDescriptor))
}
lazy val javaDescriptor: com.google.protobuf.Descriptors.FileDescriptor =
com.google.protobuf.StructProto.getDescriptor()
@deprecated("Use javaDescriptor instead. In a future version this will refer to scalaDescriptor.", "ScalaPB 0.5.47")
def descriptor: com.google.protobuf.Descriptors.FileDescriptor = javaDescriptor
} | trueaccord/ScalaPB | scalapb-runtime/src/main/scalajvm/com/google/protobuf/struct/StructProto.scala | Scala | apache-2.0 | 2,607 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.cmd
import scala.annotation.tailrec
/** A simple enough command line parser.
*/
object CommandLineParser {
private final val DQ = '"'
private final val SQ = '\\''
/** Split the line into tokens separated by whitespace or quotes.
*
* @return either an error message or reverse list of tokens
*/
private def tokens(in: String) = {
import Character.isWhitespace
import java.lang.{StringBuilder => Builder}
import collection.mutable.ArrayBuffer
var accum: List[String] = Nil
var pos = 0
var start = 0
val qpos = new ArrayBuffer[Int](16) // positions of paired quotes
def cur: Int = if (done) -1 else in.charAt(pos)
def bump() = pos += 1
def done = pos >= in.length
def skipToQuote(q: Int) = {
var escaped = false
def terminal = in.charAt(pos) match {
case _ if escaped => escaped = false ; false
case '\\\\' => escaped = true ; false
case `q` => true
case _ => false
}
while (!done && !terminal) pos += 1
!done
}
@tailrec
def skipToDelim(): Boolean =
cur match {
case q @ (DQ | SQ) => { qpos += pos; bump(); skipToQuote(q) } && { qpos += pos; bump(); skipToDelim() }
case -1 => true
case c if isWhitespace(c) => true
case _ => bump(); skipToDelim()
}
def skipWhitespace() = while (isWhitespace(cur)) pos += 1
def copyText() = {
val buf = new Builder
var p = start
var i = 0
while (p < pos) {
if (i >= qpos.size) {
buf.append(in, p, pos)
p = pos
} else if (p == qpos(i)) {
buf.append(in, qpos(i)+1, qpos(i+1))
p = qpos(i+1)+1
i += 2
} else {
buf.append(in, p, qpos(i))
p = qpos(i)
}
}
buf.toString
}
def text() = {
val res =
if (qpos.isEmpty) in.substring(start, pos)
else if (qpos(0) == start && qpos(1) == pos) in.substring(start+1, pos-1)
else copyText()
qpos.clear()
res
}
def badquote = Left("Unmatched quote")
@tailrec def loop(): Either[String, List[String]] = {
skipWhitespace()
start = pos
if (done) Right(accum)
else if (!skipToDelim()) badquote
else {
accum = text() :: accum
loop()
}
}
loop()
}
class ParseException(msg: String) extends RuntimeException(msg)
def tokenize(line: String, errorFn: String => Unit): List[String] =
tokens(line) match {
case Right(args) => args.reverse
case Left(msg) => errorFn(msg) ; Nil
}
def tokenize(line: String): List[String] = tokenize(line, x => throw new ParseException(x))
}
| scala/scala | src/compiler/scala/tools/cmd/CommandLineParser.scala | Scala | apache-2.0 | 3,095 |
// Copyright 2014 Foursquare Labs Inc. All Rights Reserved.
package io.fsq.spindle.rogue
import io.fsq.field.Field
import io.fsq.rogue.{BSONType, Rogue}
import io.fsq.spindle.runtime.{Enum, EnumIntField, EnumStringField, MetaRecord, Record}
trait SpindleRogue {
// EnumIntField: Query (base, list, & set)
implicit def enumFieldToSpindleEnumIntQueryField[M <: MetaRecord[_, _], F <: Enum[F]](
f: Field[F, M] with EnumIntField
): SpindleEnumIntQueryField[M, F] =
new SpindleEnumIntQueryField(f)
implicit def enumListFieldToSpindleEnumIntListQueryField[M <: MetaRecord[_, _], F <: Enum[F]](
f: Field[Seq[F], M] with EnumIntField
): SpindleEnumIntListQueryField[M, F] =
new SpindleEnumIntListQueryField(f)
implicit def enumSetFieldToSpindleEnumIntSetQueryField[M <: MetaRecord[_, _], F <: Enum[F]](
f: Field[Set[F], M] with EnumIntField
): SpindleEnumIntSetQueryField[M, F] =
new SpindleEnumIntSetQueryField(f)
// EnumIntField: Modify (base, list, & set)
implicit def enumFieldToSpindleEnumIntModifyField[M <: MetaRecord[_, _], F <: Enum[F]](
f: Field[F, M] with EnumIntField
): SpindleEnumIntModifyField[M, F] =
new SpindleEnumIntModifyField(f)
implicit def enumFieldToSpindleEnumIntListModifyField[M <: MetaRecord[_, _], F <: Enum[F]](
f: Field[Seq[F], M] with EnumIntField
): SpindleEnumIntListModifyField[M, F] =
new SpindleEnumIntListModifyField(f)
implicit def enumFieldToSpindleEnumIntSetModifyField[M <: MetaRecord[_, _], F <: Enum[F]](
f: Field[Set[F], M] with EnumIntField
): SpindleEnumIntSetModifyField[M, F] =
new SpindleEnumIntSetModifyField(f)
// EnumStringField: Query (base, list, & set)
implicit def enumFieldToSpindleEnumStringQueryField[M <: MetaRecord[_, _], F <: Enum[F]](
f: Field[F, M] with EnumStringField
): SpindleEnumStringQueryField[M, F] =
new SpindleEnumStringQueryField(f)
implicit def enumListFieldToSpindleEnumStringListQueryField[M <: MetaRecord[_, _], F <: Enum[F]](
f: Field[Seq[F], M] with EnumStringField
): SpindleEnumStringListQueryField[M, F] =
new SpindleEnumStringListQueryField(f)
implicit def enumSetFieldToSpindleEnumStringSetQueryField[M <: MetaRecord[_, _], F <: Enum[F]](
f: Field[Set[F], M] with EnumStringField
): SpindleEnumStringSetQueryField[M, F] =
new SpindleEnumStringSetQueryField(f)
// EnumStringField: Modify (base, list, & set)
implicit def enumFieldToSpindleEnumStringModifyField[M <: MetaRecord[_, _], F <: Enum[F]](
f: Field[F, M] with EnumStringField
): SpindleEnumStringModifyField[M, F] =
new SpindleEnumStringModifyField(f)
implicit def enumFieldToSpindleEnumStringListModifyField[M <: MetaRecord[_, _], F <: Enum[F]](
f: Field[Seq[F], M] with EnumStringField
): SpindleEnumStringListModifyField[M, F] =
new SpindleEnumStringListModifyField(f)
implicit def enumFieldToSpindleEnumStringSetModifyField[M <: MetaRecord[_, _], F <: Enum[F]](
f: Field[Set[F], M] with EnumStringField
): SpindleEnumStringSetModifyField[M, F] =
new SpindleEnumStringSetModifyField(f)
implicit def embeddedFieldToSpindleEmbeddedRecordQueryField[
R <: Record[_],
MM <: MetaRecord[_, _]
](
f: Field[R, MM]
): SpindleEmbeddedRecordQueryField[R, MM] = new SpindleEmbeddedRecordQueryField(f)
implicit def embeddedFieldToSpindleEmbeddedRecordModifyField[
R <: Record[_],
MM <: MetaRecord[_, _]
](
f: Field[R, MM]
): SpindleEmbeddedRecordModifyField[R, MM] = new SpindleEmbeddedRecordModifyField(f)
implicit def embeddedListFieldToSpindleEmbeddedRecordListQueryField[
R <: Record[_],
MM <: MetaRecord[_, _]
](
f: Field[Seq[R], MM]
): SpindleEmbeddedRecordListQueryField[R, MM] = new SpindleEmbeddedRecordListQueryField(f)
implicit def embeddedListFieldToSpindleEmbeddedRecordListModifyField[
R <: Record[_],
MM <: MetaRecord[_, _]
](
f: Field[Seq[R], MM]
): SpindleEmbeddedRecordListModifyField[R, MM] = new SpindleEmbeddedRecordListModifyField(f)
class SpindleRecordIsBSONType[R <: Record[R]] extends BSONType[R] {
private val serializer = new SpindleRogueWriteSerializer
override def asBSONObject(v: R): AnyRef = serializer.toDBObject(v)
}
object _SpindleRecordIsBSONType extends SpindleRecordIsBSONType[Nothing]
implicit def SpindleRecordIsBSONType[R <: Record[R]]: BSONType[R] = _SpindleRecordIsBSONType.asInstanceOf[BSONType[R]]
}
object SpindleRogue extends Rogue with SpindleRogue
| foursquare/fsqio | src/jvm/io/fsq/spindle/rogue/SpindleRogue.scala | Scala | apache-2.0 | 4,484 |
package org.eoin.akkaneural
import java.util.{Random => JRandom}
import akka.actor.Props
import akka.testkit.{TestActorRef, TestKit}
import org.eoin._
import org.junit.{Ignore, Test}
import org.scalacheck.Gen
import org.scalatest.Matchers._
import org.scalatest.junit.JUnitSuite
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import spire.math.Real
import scala.concurrent.duration._
trait TestConfig extends InjectableConfig {
override def getRealNumberRNG: () => Real = () => 0.5
override def getActivationFunction: (Real) => Real = identity[Real]
}
class ProjectUnitTestSuite1 extends JUnitSuite with GeneratorDrivenPropertyChecks with TestConfig{
@Test def testHelloWorld : Unit = {
val gen = Gen.oneOf("Hello", "World!")
forAll (gen, minSuccessful(5)) { (s: String) =>
logger.info(s)
}
}
//manual re running
@Ignore @Test def testProblemCases : Unit = {
performRun(List(7, 1, 7, 8, 8, 9, 7, 3, 6, 4, 7) ,20 )
}
@Test def testCorrectOutputs : Unit = {
val layerSizesGenerator: Gen[List[Int]] = for {
inputLayerSize <- Gen.choose[Int](1,50)
hiddenLayerSizes <- Gen.containerOf[List, Int](Gen.choose(1,100))
outputLayerSize <- Gen.choose[Int](1,50)
} yield (inputLayerSize :: hiddenLayerSizes) :+ outputLayerSize
forAll (layerSizesGenerator,minSuccessful(10)) { (layerSizes : List[Int]) =>
whenever (layerSizes.length > 1 && ! layerSizes.exists( _ <= 0)) {
performRun(layerSizes, 200)
}
}
}
private def performRun(layerSizes: List[Int], numRowsOfData : Int) : Unit = {
logger.info(s" *** performRun layerSizes : ${layerSizes} numRowsOfData : ${numRowsOfData}")
val neuralNet = new NeuralNet(layerSizes,
numRowsOfData, "TestNeuralNetwork") with TestConfig
val testActor = TestActorRef(Props(new NetworkExitPoint(numRowsOfData))) (neuralNet.actorSystem)
neuralNet.outputRouter ! NeuronAdded(testActor, true)
testActor ! NeuronAdded(neuralNet.outputRouter, false)
val rng = new JRandom
val dummyData = List.fill(neuralNet.dataRowSize * numRowsOfData){ Real(rng.nextDouble()) }
.grouped(neuralNet.dataRowSize)
dummyData foreach { neuralNet.entryPoint ! FeedForwardInput(_) }
val outputData = testActor.underlyingActor.asInstanceOf[NetworkExitPoint].feedForwardInputsReceived
TestKit.awaitCond (outputData.size == numRowsOfData, 60 seconds)
val outputLayerSize = neuralNet.outputLayer.length
outputData foreach { case(_,ffi:FeedForwardInput) =>
ffi.values.length should equal(outputLayerSize + 1)} // +1 cos the bias term is included
logger.info(s"\\n\\n *** outputData : ${outputData}\\n\\n")
neuralNet.actorSystem.terminate()
}
}
| eoinparker/SimpleAkkaNeuralNetwork | src/test/scala/org/eoin/akkaneural/ProjectUnitTestSuite1.scala | Scala | mit | 2,772 |
package com.bot4s.telegram.api.declarative
/**
* Declarative interface.
*/
trait Declarative[F[_]]
extends Updates[F]
with Messages[F]
with ChannelPosts[F]
with Callbacks[F]
with InlineQueries[F]
with Payments[F]
| mukel/telegrambot4s | core/src/com/bot4s/telegram/api/declarative/Declarative.scala | Scala | apache-2.0 | 240 |
//Example from https://github.com/spray/spray/blob/master/examples/spray-can/simple-http-client/src/main/scala/spray/examples/ConnectionLevelApiDemo.scala
//2014-06-20 Christoph Knabe
package demo
import scala.concurrent.Future
import scala.concurrent.duration._
import akka.io.IO
import akka.util.Timeout
import akka.pattern.ask
import akka.actor._
import spray.can.Http
import spray.http._
import HttpMethods._
trait ConnectionLevelApiDemo {
private implicit val timeout: Timeout = 5.seconds
def demoConnectionLevelApi(host: String)(implicit system: ActorSystem): Future[ProductVersion] = {
val actor = system.actorOf(Props(new MyRequestActor(host)), name = "my-request-actor")
val future = actor ? HttpRequest(GET, "/")
future.mapTo[ProductVersion]
}
// The connection-level API is the lowest-level way to access the spray-can client-side infrastructure.
// With it you are in charge of establishing, using, and tearing down the HTTP connections yourself.
// The benefit is that you have complete control over when connections are being established and torn down
// as well as how requests are scheduled onto them.
// Actor that manages the lifecycle of a single HTTP connection for a single request
class MyRequestActor(host: String) extends Actor with ActorLogging {
import context.system
def receive: Receive = {
case request: HttpRequest =>
// start by establishing a new HTTP connection
IO(Http) ! Http.Connect(host, port = 80)
context.become(connecting(sender(), request))
}
def connecting(commander: ActorRef, request: HttpRequest): Receive = {
case _: Http.Connected =>
// once connected, we can send the request across the connection
sender() ! request
context.become(waitingForResponse(commander))
case Http.CommandFailed(Http.Connect(address, _, _, _, _)) =>
log.warning("Could not connect to {}", address)
commander ! Status.Failure(new RuntimeException("Connection error"))
context.stop(self)
}
def waitingForResponse(commander: ActorRef): Receive = {
case response@ HttpResponse(status, entity, _, _) =>
log.info("Connection-Level API: received {} response with {} bytes", status, entity.data.length)
sender() ! Http.Close
context.become(waitingForClose(commander, response))
case ev@(Http.SendFailed(_) | Timedout(_))=>
log.warning("Received {}", ev)
commander ! Status.Failure(new RuntimeException("Request error"))
context.stop(self)
}
def waitingForClose(commander: ActorRef, response: HttpResponse): Receive = {
case ev: Http.ConnectionClosed =>
log.debug("Connection closed ({})", ev)
commander ! Status.Success(response.header[HttpHeaders.Server].get.products.head)
context.stop(self)
case Http.CommandFailed(Http.Close) =>
log.warning("Could not close connection")
commander ! Status.Failure(new RuntimeException("Connection close error"))
context.stop(self)
}
}
}
| ChristophKnabe/sprayreactivedemo | src/main/scala/demo/ConnectionLevelApiDemo.scala | Scala | lgpl-3.0 | 3,081 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui.jobs
import scala.collection.mutable.HashMap
import scala.xml.Node
import org.apache.spark.scheduler.{Schedulable, StageInfo}
import org.apache.spark.ui.UIUtils
/** Table showing list of pools */
private[ui] class PoolTable(pools: Seq[Schedulable], parent: JobProgressTab) {
private val basePath = parent.basePath
private val listener = parent.listener
def toNodeSeq: Seq[Node] = {
listener.synchronized {
poolTable(poolRow, pools)
}
}
private def poolTable(
makeRow: (Schedulable, HashMap[String, HashMap[Int, StageInfo]]) => Seq[Node],
rows: Seq[Schedulable]): Seq[Node] = {
<table class="table table-bordered table-striped table-condensed sortable table-fixed">
<thead>
<th>Pool Name</th>
<th>Minimum Share</th>
<th>Pool Weight</th>
<th>Active Stages</th>
<th>Running Tasks</th>
<th>SchedulingMode</th>
</thead>
<tbody>
{rows.map(r => makeRow(r, listener.poolToActiveStages))}
</tbody>
</table>
}
private def poolRow(
p: Schedulable,
poolToActiveStages: HashMap[String, HashMap[Int, StageInfo]]): Seq[Node] = {
val activeStages = poolToActiveStages.get(p.name) match {
case Some(stages) => stages.size
case None => 0
}
<tr>
<td>
<a href={"%s/stages/pool?poolname=%s".format(UIUtils.prependBaseUri(basePath), p.name)}>
{p.name}
</a>
</td>
<td>{p.minShare}</td>
<td>{p.weight}</td>
<td>{activeStages}</td>
<td>{p.runningTasks}</td>
<td>{p.schedulingMode}</td>
</tr>
}
}
| zhangjunfang/eclipse-dir | spark/core/src/main/scala/org/apache/spark/ui/jobs/PoolTable.scala | Scala | bsd-2-clause | 2,446 |
package org.veripacks
case class ClassUsage(cls: ClassName, usedIn: ClassName, detail: ClassUsageDetail)
sealed trait ClassUsageDetail {
val sourceFileName: String
}
case class MethodSignatureUsageDetail(sourceFileName: String, methodName: String) extends ClassUsageDetail {
override def toString = s"$methodName signature in $sourceFileName"
}
case class MethodBodyUsageDetail(sourceFileName: String, methodName: String, lineNumber: Int) extends ClassUsageDetail {
override def toString = s"line $lineNumber of $methodName method in $sourceFileName"
}
case class FieldUsageDetail(sourceFileName: String, fieldName: String) extends ClassUsageDetail {
override def toString = s"$fieldName field in $sourceFileName"
}
case class ClassSignatureUsageDetail(sourceFileName: String) extends ClassUsageDetail {
override def toString = s"class signature in $sourceFileName"
}
case class MultipleUsageDetail(usages: Set[ClassUsageDetail]) extends ClassUsageDetail {
require(usages.size > 0)
lazy val sourceFileName: String = usages.iterator.next().sourceFileName // All source file names should be the same
override def toString = usages.mkString("; ")
} | adamw/veripacks | verifier/src/main/scala/org/veripacks/ClassUsage.scala | Scala | apache-2.0 | 1,169 |
package mesosphere.marathon.state
import java.util.concurrent.atomic.AtomicInteger
import javax.validation.ConstraintViolationException
import akka.actor.ActorSystem
import akka.event.EventStream
import akka.testkit.TestKit
import mesosphere.marathon.io.storage.StorageProvider
import mesosphere.marathon.state.PathId._
import mesosphere.marathon.tasks.TaskTracker
import mesosphere.marathon.{ MarathonConf, MarathonSchedulerService, MarathonSpec, PortRangeExhaustedException }
import mesosphere.util.SerializeExecution
import org.mockito.Matchers.any
import org.mockito.Mockito.{ times, verify, when }
import org.rogach.scallop.ScallopConf
import org.scalatest.Matchers
import org.scalatest.mock.MockitoSugar
import scala.collection.immutable.Seq
import scala.concurrent.duration._
import scala.concurrent.{ Await, Future }
class GroupManagerTest extends TestKit(ActorSystem("System")) with MockitoSugar with Matchers with MarathonSpec {
val actorId = new AtomicInteger(0)
def serializeExecutions() = SerializeExecution(system, s"serializeGroupUpdates${actorId.incrementAndGet()}")
test("Assign dynamic app ports") {
val group = Group(PathId.empty, Set(
AppDefinition("/app1".toPath, ports = Seq(0, 0, 0)),
AppDefinition("/app2".toPath, ports = Seq(1, 2, 3)),
AppDefinition("/app2".toPath, ports = Seq(0, 2, 0))
))
val update = manager(10, 20).assignDynamicServicePorts(Group.empty, group)
update.transitiveApps.filter(_.hasDynamicPort) should be('empty)
update.transitiveApps.flatMap(_.ports.filter(x => x >= 10 && x <= 20)) should have size 5
}
test("Assign dynamic service ports specified in the container") {
import Container.Docker
import Docker.PortMapping
import org.apache.mesos.Protos.ContainerInfo.DockerInfo.Network
val container = Container(
docker = Some(Docker(
image = "busybox",
network = Some(Network.BRIDGE),
portMappings = Some(Seq(
PortMapping(containerPort = 8080, hostPort = 0, servicePort = 0, protocol = "tcp"),
PortMapping (containerPort = 9000, hostPort = 10555, servicePort = 10555, protocol = "udp"),
PortMapping(containerPort = 9001, hostPort = 0, servicePort = 0, protocol = "tcp")
))
))
)
val group = Group(PathId.empty, Set(
AppDefinition("/app1".toPath, ports = Seq(), container = Some(container))
))
val update = manager(minServicePort = 10, maxServicePort = 20).assignDynamicServicePorts(Group.empty, group)
update.transitiveApps.filter(_.hasDynamicPort) should be ('empty)
update.transitiveApps.flatMap(_.ports.filter(x => x >= 10 && x <= 20)) should have size 2
}
// Regression test for #1365
test("Export non-dynamic service ports specified in the container to the ports field") {
import Container.Docker
import Docker.PortMapping
import org.apache.mesos.Protos.ContainerInfo.DockerInfo.Network
val container = Container(
docker = Some(Docker(
image = "busybox",
network = Some(Network.BRIDGE),
portMappings = Some(Seq(
PortMapping(containerPort = 8080, hostPort = 0, servicePort = 80, protocol = "tcp"),
PortMapping (containerPort = 9000, hostPort = 10555, servicePort = 81, protocol = "udp")
))
))
)
val group = Group(PathId.empty, Set(
AppDefinition("/app1".toPath, container = Some(container))
))
val update = manager(minServicePort = 90, maxServicePort = 900).assignDynamicServicePorts(Group.empty, group)
update.transitiveApps.filter(_.hasDynamicPort) should be ('empty)
update.transitiveApps.flatMap(_.ports) should equal (Set(80, 81).map(Integer.valueOf))
}
test("Already taken ports will not be used") {
val group = Group(PathId.empty, Set(
AppDefinition("/app1".toPath, ports = Seq(0, 0, 0)),
AppDefinition("/app2".toPath, ports = Seq(0, 2, 0))
))
val update = manager(10, 20).assignDynamicServicePorts(Group.empty, group)
update.transitiveApps.filter(_.hasDynamicPort) should be('empty)
update.transitiveApps.flatMap(_.ports.filter(x => x >= 10 && x <= 20)) should have size 5
}
test("If there are not enough ports, a PortExhausted exception is thrown") {
val group = Group(PathId.empty, Set(
AppDefinition("/app1".toPath, ports = Seq(0, 0, 0)),
AppDefinition("/app2".toPath, ports = Seq(0, 0, 0))
))
val ex = intercept[PortRangeExhaustedException] {
manager(10, 15).assignDynamicServicePorts(Group.empty, group)
}
ex.minPort should be(10)
ex.maxPort should be(15)
}
test("Retain the original container definition if port mappings are missing") {
import Container.Docker
val container = Container(
docker = Some(Docker(
image = "busybox"
))
)
val group = Group(PathId.empty, Set(
AppDefinition(
id = "/app1".toPath,
container = Some(container)
)
))
val result = manager(10, 15).assignDynamicServicePorts(Group.empty, group)
result.apps.size should be(1)
val app = result.apps.head
app.container should be (Some(container))
}
test("Don't store invalid groups") {
val scheduler = mock[MarathonSchedulerService]
val taskTracker = mock[TaskTracker]
val appRepo = mock[AppRepository]
val groupRepo = mock[GroupRepository]
val eventBus = mock[EventStream]
val provider = mock[StorageProvider]
val config = new ScallopConf(Seq("--master", "foo")) with MarathonConf
config.afterInit()
val manager = new GroupManager(
serializeUpdates = serializeExecutions(), scheduler = scheduler,
taskTracker = taskTracker, groupRepo = groupRepo, appRepo = appRepo,
storage = provider, config = config, eventBus = eventBus)
val group = Group(PathId.empty, Set(AppDefinition("/app1".toPath)), Set(Group("/group1".toPath)))
when(groupRepo.group(groupRepo.zkRootName)).thenReturn(Future.successful(None))
intercept[ConstraintViolationException] {
Await.result(manager.update(group.id, _ => group), 3.seconds)
}.printStackTrace()
verify(groupRepo, times(0)).store(any(), any())
}
def manager(minServicePort: Int, maxServicePort: Int) = {
val config = new ScallopConf(Seq(
"--master", "foo",
"--local_port_min", minServicePort.toString, "--local_port_max", maxServicePort.toString)) with MarathonConf
config.afterInit()
val scheduler = mock[MarathonSchedulerService]
val taskTracker = mock[TaskTracker]
val appRepo = mock[AppRepository]
val groupRepo = mock[GroupRepository]
val eventBus = mock[EventStream]
val provider = mock[StorageProvider]
new GroupManager(
serializeUpdates = serializeExecutions(), scheduler = scheduler, taskTracker = taskTracker,
groupRepo = groupRepo, appRepo = appRepo,
storage = provider, config = config, eventBus = eventBus)
}
}
| lelezi/marathon | src/test/scala/mesosphere/marathon/state/GroupManagerTest.scala | Scala | apache-2.0 | 6,911 |
package eu.radusw
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import api._
import eu.radusw.services._
import monix.eval.Task
import monix.execution.Scheduler
import monix.execution.schedulers.SchedulerService
class Components(config: AppConfig.Config) {
implicit val clientSystem: ActorSystem = ActorSystem("client")
implicit val clientMat: ActorMaterializer = ActorMaterializer()
implicit val blockingOpsScheduler: SchedulerService = Scheduler.io()
val mailService: MailService[Task] = new MailServiceInterpreter(config)
val santaService: SantaService[Task] = new SantaServiceInterpreter()
val versionApi = new VersionApi()
val mainApi = new MainApi(santaService, mailService)
val frontendResource = new FrontendResource()
}
| radusw/SantaEMail | src/main/scala/eu/radusw/Components.scala | Scala | apache-2.0 | 772 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils.caffe
import scala.collection.JavaConverters._
import caffe.Caffe
import caffe.Caffe.EltwiseParameter.EltwiseOp
import caffe.Caffe.{BlobProto, PoolingParameter, _}
import com.google.protobuf.GeneratedMessage
import com.intel.analytics.bigdl.nn.Graph._
import com.intel.analytics.bigdl.nn._
import com.intel.analytics.bigdl.nn.abstractnn.{AbstractModule, Activity}
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.tensor.TensorNumericMath.TensorNumeric
import com.intel.analytics.bigdl.utils.Table
import scala.collection.mutable.ArrayBuffer
import scala.reflect.ClassTag
/**
* [[Converter]] implementation for caffe recommended LayerParameter conversion
*/
class LayerConverter[T: ClassTag](implicit ev: TensorNumeric[T]) extends Converter[T]{
override protected def fromCaffeConvolution(layer : GeneratedMessage) : Seq[ModuleNode[T]] = {
val name = getLayerName(layer)
val param = getConvolutionParam(layer).get
val group = if (param.getGroup == 0) 1 else param.getGroup
val weightBlob = getBlob(layer, 0).get
val biasBlob = getBlob(layer, 1)
if (!biasBlob.isDefined) {
throw new RuntimeException(s"${getLayerName(layer)} without bias is not supported now")
}
val nInputPlane = if (weightBlob.hasShape) weightBlob.getShape.getDim(1)
else weightBlob.getChannels * group
val nOutPlane = if (weightBlob.hasShape) weightBlob.getShape.getDim(0)
else weightBlob.getNum
var kw = param.getKernelW
var kh = param.getKernelH
var dw = param.getStrideW
var dh = param.getStrideH
if (kw ==0 || kh == 0) {
kw = param.getKernelSize(0)
kh = kw
}
if (dw == 0 || dh == 0) {
if (param.getStrideList.size() != 0) {
dw = param.getStride(0)
dh = dw
} else {
// use default values if not found
dw = 1
dh = 1
}
}
var pw = param.getPadW
var ph = param.getPadH
if (pw == 0 || ph == 0) {
if (param.getPadList.size() != 0) {
pw = param.getPad(0)
ph = pw
}
}
Seq(SpatialConvolution[T](nInputPlane.toInt, nOutPlane.toInt,
kw, kh, dw, dh, pw, ph, group).setName(getLayerName(layer)).inputs())
}
override protected def fromCaffeInnerProduct(layer : GeneratedMessage) : Seq[ModuleNode[T]] = {
val param = getInnerProductParam(layer).get
val withBias = param.getBiasTerm
val layerName = getLayerName(layer)
val weightBlob = getBlob(layer.asInstanceOf[LayerParameter], 0).get
var nInputPlane = 0
if (weightBlob.hasShape) {
nInputPlane = weightBlob.getShape.getDim(1).toInt
}
else {
nInputPlane = weightBlob.getWidth
}
val nOutputPlane = param.getNumOutput
val linear = Linear[T](nInputPlane, nOutputPlane, withBias = withBias).setName(layerName)
val node = linear.inputs()
if(nInputPlane != nOutputPlane) {
// Construct a view layer in between
val view = View[T](nInputPlane).inputs()
view -> node
Seq(view, node)
} else {
Seq(node)
}
}
override protected def fromCaffeBatchNormalization(layer : GeneratedMessage) :
Seq[ModuleNode[T]] = {
val param = layer.asInstanceOf[LayerParameter].getBatchNormParam
val eps = param.getEps
Seq(BatchNormalization[T](3, eps).inputs())
}
override protected def fromCaffeELU(layer : GeneratedMessage) : Seq[ModuleNode[T]] = {
val param = layer.asInstanceOf[LayerParameter].getEluParam
var alpha = 1.0
if (param.hasAlpha) alpha = param.getAlpha
Seq(ELU[T](alpha).inputs())
}
override protected def fromCaffeReshape(layer : GeneratedMessage) : Seq[ModuleNode[T]] = {
val param = layer.asInstanceOf[LayerParameter].getReshapeParam
val shapeSize = param.getShape.getDimList.toArray.asInstanceOf[Array[Int]]
Seq(Reshape[T](shapeSize).setName(getLayerName(layer)).inputs())
}
override protected def fromCaffeScale(layer : GeneratedMessage) : Seq[ModuleNode[T]] = {
val param = layer.asInstanceOf[LayerParameter].getScaleParam
val layerName = getLayerName(layer)
// second blob as weight for scale
val weightBlob = getBlob(layer, 1)
if (weightBlob.isDefined) {
val blob = weightBlob.get
val size = blob.getShape.getDimList.toArray.asInstanceOf[Array[Int]]
Seq(Scale[T](size).setName(layerName).inputs())
} else {
val inputBlob = getBlob(layer, 0).get
val shape = inputBlob.getShape
val axis = param.getAxis
var numOfAxis = param.getNumAxes
if (numOfAxis == -1) {
numOfAxis = shape.getDimList.size() - 1
} else {
numOfAxis = numOfAxis + axis
}
val size = shape.getDimList.subList(axis, numOfAxis).asInstanceOf[Array[Int]]
Seq(Scale[T](size).setName(layerName).inputs())
}
}
override protected def fromCaffeBias(layer : GeneratedMessage) : Seq[ModuleNode[T]] = {
val param = layer.asInstanceOf[LayerParameter].getBiasParam
// input blob
val weightBlob = getBlob(layer, 0)
val size = weightBlob.get.getShape.getDimList.toArray().asInstanceOf[Array[Int]].product
Seq(Add[T](size).setName(getLayerName(layer)).inputs())
}
override protected def fromCaffeTile(layer : GeneratedMessage) : Seq[ModuleNode[T]] = {
val param = layer.asInstanceOf[LayerParameter].getTileParam
val axis = param.getAxis
val tiles = param.getTiles
Seq(Replicate[T](tiles, axis).setName(getLayerName(layer)).inputs())
}
override protected def getLayerName(layer : GeneratedMessage) : String = {
layer.asInstanceOf[LayerParameter].getName
}
override protected def getLayerType(layer : GeneratedMessage) : String = {
layer.asInstanceOf[LayerParameter].getType
}
protected def getConvolutionParam(layer : GeneratedMessage): Option[ConvolutionParameter] = {
Some(layer.asInstanceOf[LayerParameter].getConvolutionParam)
}
override protected def getLRNParam(layer: GeneratedMessage): Option[LRNParameter] = {
Some(layer.asInstanceOf[LayerParameter].getLrnParam)
}
override protected def getPoolingParam(layer : GeneratedMessage): Option[PoolingParameter] = {
Some(layer.asInstanceOf[LayerParameter].getPoolingParam)
}
protected def getInnerProductParam(layer : GeneratedMessage): Option[InnerProductParameter] = {
Some(layer.asInstanceOf[LayerParameter].getInnerProductParam)
}
protected def getDropoutParam(layer : GeneratedMessage): Option[DropoutParameter] = {
Some(layer.asInstanceOf[LayerParameter].getDropoutParam)
}
protected def getConcatParam(layer : GeneratedMessage): Option[ConcatParameter] = {
Some(layer.asInstanceOf[LayerParameter].getConcatParam)
}
override protected def getPowerParam(layer : GeneratedMessage) : Option[PowerParameter] = {
Some(layer.asInstanceOf[LayerParameter].getPowerParam)
}
override protected def getThresholdParam(layer : GeneratedMessage): Option[ThresholdParameter] = {
Some(layer.asInstanceOf[LayerParameter].getThresholdParam)
}
override protected def getSliceParam(layer : GeneratedMessage): Option[SliceParameter] = {
Some(layer.asInstanceOf[LayerParameter].getSliceParam)
}
override protected def getEltWiseParam(layer : GeneratedMessage): Option[EltwiseParameter] = {
Some(layer.asInstanceOf[LayerParameter].getEltwiseParam)
}
private def getBlob(layer : GeneratedMessage, ind: Int): Option[Caffe.BlobProto] = {
if (layer.asInstanceOf[LayerParameter].getBlobsCount > ind) {
Some(layer.asInstanceOf[LayerParameter].getBlobs(ind))
} else {
None
}
}
}
| 122689305/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/utils/caffe/LayerConverter.scala | Scala | apache-2.0 | 8,213 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.scalding.filecache
import cascading.tuple.Tuple
import com.twitter.scalding._
import java.net.URI
import org.scalatest.{ Matchers, WordSpec }
import scala.collection.mutable
// TODO: fix? is it worth having the dep on mockito just for this?
class DistributedCacheFileSpec extends WordSpec with Matchers {
case class UnknownMode(buffers: Map[Source, mutable.Buffer[Tuple]]) extends TestMode with CascadingLocal
/*
val conf = smartMock[Configuration]
lazy val hdfsMode = {
val mode = smartMock[Hdfs]
mode.conf returns conf
mode.strict returns true
mode
}
lazy val hadoopTestMode = {
val mode = smartMock[HadoopTest]
mode.conf returns conf
mode
}
lazy val testMode = smartMock[Test]
lazy val localMode = smartMock[Local]
*/
val uriString = "hdfs://foo.example:1234/path/to/the/stuff/thefilename.blah"
val uri = new URI(uriString)
val hashHex = URIHasher(uri)
val hashedFilename = hashHex + "-thefilename.blah"
"DistributedCacheFile" should {
"symlinkNameFor must return a hashed name" in {
DistributedCacheFile.symlinkNameFor(uri) shouldBe hashedFilename
}
}
/*
"UncachedFile.add" should {
val dcf = new UncachedFile(Right(uri))
def sharedLocalBehavior(implicit mode: Mode) = {
"use the local file path" in {
val cf = dcf.add()(mode)
cf.path shouldBe (uri.getPath)
cf.file shouldBe (new File(uri.getPath).getCanonicalFile)
}
}
"with a Test mode" in {
sharedLocalBehavior(testMode)
}
"with a Local mode" in {
sharedLocalBehavior(localMode)
}
"throw RuntimeException when the current mode isn't recognized" in {
val mode = smartMock[UnknownMode]
an[RuntimeException] should be thrownBy (dcf.add()(mode))
}
}
*/
}
| jzmq/scalding | scalding-core/src/test/scala/com/twitter/scalding/filecache/DistributedCacheFileSpec.scala | Scala | apache-2.0 | 2,372 |
/*
Copyright 2017-2018 EconomicSL
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package org.economicsl.mechanisms
package object voting
| EconomicSL/mechanisms | src/main/scala/org/economicsl/mechanisms/voting/package.scala | Scala | apache-2.0 | 619 |
// vim: expandtab shiftwidth=2 softtabstop=2
/*
* Copyright 2011 ζ¨ε (Yang Bo)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.dongxiguo.zeroLog.test.formatting
package compileTimeConfiguration.privateAccess.typed
import com.dongxiguo.zeroLog.Logger
import org.junit._; import com.dongxiguo.fastring.Fastring;import Fastring.Implicits._; import language.postfixOps
import Assert._
private object FinalValLoggerTest {
implicit private final val (logger: Logger, formatter, appender) = ZeroLoggerFactory.newLogger(this)
import formatter._
private def doTest(p1: Double, p2: Int) = {
val e = new Exception("For test only.") with scala.util.control.NoStackTrace
var i = 0
val start = System.nanoTime()
while(i < 1000) {
logger.finest(
fast"Hello, World!${start}blahblah$i${p1}\\n${p2}blahblah" ->
e)
logger.finest(
fast"Hello, World!${start}blahblah$i${p1}\\n${p2}blahblah" ->
e)
logger.finest(
fast"Hello, World!${start}blahblah$i${p1}\\n${p2}blahblah" ->
e)
logger.finest(
fast"Hello, World!${start}blahblah$i${p1}\\n${p2}blahblah" ->
e)
logger.finest(
fast"Hello, World!${start}blahblah$i${p1}\\n${p2}blahblah" ->
e)
logger.finest(
fast"Hello, World!${start}blahblah$i${p1}\\n${p2}blahblah" ->
e)
logger.finest(
fast"Hello, World!${start}blahblah$i${p1}\\n${p2}blahblah" ->
e)
logger.finest(
fast"Hello, World!${start}blahblah$i${p1}\\n${p2}blahblah" ->
e)
logger.finest(
fast"Hello, World!${start}blahblah$i${p1}\\n${p2}blahblah" ->
e)
logger.finest(
fast"Hello, World!${start}blahblah$i${p1}\\n${p2}blahblah" ->
e)
i += 1
}
val time = (System.nanoTime() - start)
time.toDouble / 10000000.0
}
}
import FinalValLoggerTest._
class FinalValLoggerTest {
import formatter._
@Before
def setUp {
}
@After
def tearDown {
}
@Test
def test = {
print("zero-log spends "); print(0 until 5 map { FinalValLoggerTest.doTest(math.random, _) } min); println(" nanoseconds per log.")
}
}
| Atry/zero-log | src/test/scala/com/dongxiguo/zeroLog/test/formatting/compileTimeConfiguration/privateAccess/typed/FinalValLoggerTest.scala | Scala | apache-2.0 | 2,690 |
package arena.foundation
/**
* @author Bhupendra Bhudia <[email protected]>
* 18/11/2015 13:35
*/
object Direction extends Enum[Direction]
sealed trait Direction extends Direction.Value
case object TOP extends Direction
case object RIGHT extends Direction
case object BOTTOM extends Direction
case object LEFT extends Direction
| quedexco/arena-scala | src/main/scala/arena/foundation/Direction.scala | Scala | apache-2.0 | 352 |
/*
* MIT License
*
* Copyright (c) 2016 Ramjet Anvil
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package com.ramjetanvil.padrone.util.scheduling
import akka.actor.{Actor, ActorSystem, Cancellable, Props}
import scala.concurrent.duration.FiniteDuration
class ActorRescheduler[Key](implicit actorSystem: ActorSystem) extends Rescheduler[Key] with AutoCloseable {
sealed trait ScheduleAction
case class ScheduleOnce(key: Key, delay: FiniteDuration, work: () => Unit) extends ScheduleAction
case class Schedule(key: Key, initialDelay: FiniteDuration, interval: FiniteDuration, work: () => Unit)
case class RemoveSchedule(key: Key) extends ScheduleAction
val scheduler = actorSystem.actorOf(Props(new Actor {
import scala.collection.mutable
import actorSystem.dispatcher
val schedules = mutable.Map[Key, Cancellable]()
override def receive = {
case ScheduleOnce(key, delay, work) =>
storeSchedule(key, actorSystem.scheduler.scheduleOnce(delay)(work()))
case Schedule(key, initialDelay, interval, work) =>
storeSchedule(key, actorSystem.scheduler.schedule(initialDelay, interval)(work()))
case RemoveSchedule(key) => removeSchedule(key)
}
private def storeSchedule(key: Key, cancellable: Cancellable) = {
removeSchedule(key)
schedules += (key -> cancellable)
}
private def removeSchedule(key: Key) = schedules.remove(key).foreach(_.cancel())
}))
override def scheduleOnce(key: Key, delay: FiniteDuration)(work: => Unit): Unit = {
scheduler ! ScheduleOnce(key, delay, () => work)
}
override def schedule(key: Key, initialDelay: FiniteDuration, interval: FiniteDuration)(work: => Unit): Unit = {
scheduler ! Schedule(key, initialDelay, interval, () => work)
}
override def cancelSchedule(key: Key): Unit = {
scheduler ! RemoveSchedule(key)
}
override def close(): Unit = {
actorSystem.stop(scheduler)
}
}
| RamjetAnvil/padrone | server/src/main/scala/com/ramjetanvil/padrone/util/scheduling/ActorRescheduler.scala | Scala | mit | 2,971 |
/**
* Created by Romain Reuillon on 04/11/16.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*/
package fr.iscpif.doors.server
import freedsl.dsl._
import freedsl.io._
import cats._
import cats.data._
import cats.implicits._
import slick.driver.H2Driver.api._
import db.dbIOActionIsMonad
import fr.iscpif.doors.ext.Data.{ApiRep, DSLError}
import fr.iscpif.doors.server.db.DB
object DSL {
//
// implicit def pureIsFunctor = new Functor[Pure] {
// override def map[A, B](fa: Pure[A])(f: (A) => B): Pure[B] = Pure(() => f(fa.f()))
// }
// implicit def DBAndSideIsFunctor[S, T] = new Functor[DBAndSide[S, T, ?]] {
// override def map[A, B](fa: DBAndSide[S, T, A])(f: (A) => B): DBAndSide[T, B] =
// DBAndSide[S, T, B](fa.dbEffect, (t: T) => f(fa.sideEffect(t)))
// }
//
// implicit def DBIsMonad = new Monad[DB] {
// override def pure[A](x: A): DB[A] = DB[A]((scheme: fr.iscpif.doors.server.db.DBScheme) => DBIOAction.successful(x))
// override def flatMap[A, B](fa: DB[A])(f: (A) => DB[B]): DB[B] = {
// def newDBEffect =
// (scheme: fr.iscpif.doors.server.db.DBScheme) =>
// for {
// a <- fa.dbEffect(scheme)
// b <- f(a)
// } yield b
//
// DB(newDBEffect)
// }
// //TODO make this monad tailRec
// override def tailRecM[A, B](a: A)(f: (A) => DB[Either[A, B]]): DB[B] =
// flatMap(f(a)) {
// case Right(b) => pure(b)
// case Left(nextA) => tailRecM(nextA)(f)
// }
// }
implicit class DBDecorator[T](db: DB[T]) {
def effect[M[_], U](side: SideEffect[M, T, U]) = DBAndSide(db, side)
def effect[M[_], U](f: T => M[U]) = DBAndSide(db, SideEffect(f))
def chain[M[_], S, U](dbAndSide: DBAndSide[S, U, M]) = compose(db, dbAndSide)
def chain[M[_], S, U](dbAndSide: T => DBAndSide[S, U, M]) = compose(db, dbAndSide)
}
object SideEffect {
def apply[M[_], T, U](f: T => M[U]) = Kleisli(f)
}
type SideEffect[M[_], T, U] = Kleisli[M, T, U]
def compose[T, S, U, M[_]](db: fr.iscpif.doors.server.db.DB[T], dBAndSide: DBAndSide[S, U, M]) =
dBAndSide.copy(
db =
for {
_ <- db
r <- dBAndSide.db
} yield r
)
def compose[T, S, U, M[_]](db: fr.iscpif.doors.server.db.DB[T], dBAndSide: T => DBAndSide[S, U, M]) = {
def newDB =
for {
t <- db
s <- dBAndSide(t).db
} yield (t, s)
def newSide = Kleisli[M, (T, S), U] { ts => dBAndSide(ts._1).sideEffect.run(ts._2) }
DBAndSide(newDB, newSide)
}
case class DBAndSide[T, U, M[_]](db: fr.iscpif.doors.server.db.DB[T], sideEffect: SideEffect[M, T, U]) {
def chain[V](effect: SideEffect[M, U, V])(implicit monad: Monad[M]) = DBAndSide(db, sideEffect andThen effect)
def map[V](f: U => V)(implicit functor: Functor[M]) = DBAndSide(db, sideEffect.map(f))
}
object Executable {
import dsl._
import dsl.implicits._
implicit def dbIsExecutable[U] = new Executable[db.DB[U], U, M] {
override def execute(t: DB[U], settings: Settings, database: Database): Either[freedsl.dsl.Error, U] =
interpreter(settings).run(db.runTransaction[U, M](t, database))
}
implicit def dbAndSide[T, U] = new Executable[DBAndSide[T, U, M], U, M] {
override def execute(t: DBAndSide[T, U, M], settings: Settings, database: Database) = {
def prg =
for {
dbRes <- db.runTransaction[T, M](t.db, database)
effect <- t.sideEffect.run(dbRes)
} yield effect
interpreter(settings).run(prg)
}
}
}
trait Executable[T, U, M[_]] {
def execute(t: T, settings: Settings, database: Database): Either[freedsl.dsl.Error, U]
}
implicit class ExecuteDecorator[T, U, M[_]](t: T)(implicit executable: Executable[T, U, M]) {
def execute(settings: Settings, database: Database) = executable.execute(t, settings, database)
}
implicit def eitherToOption[T](either: Either[_, Seq[T]]): Option[T] = eitherToSeq(either).headOption
implicit def apiRepToOption[T](apiRep: ApiRep[T]): Option[T] = apiRep match {
case Right(t)=> Some(t)
case _=> None
}
implicit def eitherToSeq[T](either: Either[_, Seq[T]]): Seq[T] = either.right.toSeq.flatten
implicit def eitherToBoolean(either: Either[_, Boolean]): Boolean = either match {
case Right(r) => r
case _ => false
}
implicit def eitherToApiRep[T](either: Either[freedsl.dsl.Error, T]): ApiRep[T] = either match {
case Right(t) => Right(t)
case Left(l) => Left(DSLError)
}
implicit def eitherOptionToApiRep[T](either: Either[freedsl.dsl.Error, Option[T]]): ApiRep[T] = either match {
case Right(t) => t match {
case Some(t)=> Right(t)
case _=> Left(DSLError)
}
case Left(l) => Left(DSLError)
}
object Email {
def interpreter(smtp: SMTPSettings) = new Interpreter {
def send(address: String, subject: String, content: String)(implicit context: Context) =
DoorsMailer.send(smtp, subject, content, address) match {
case util.Failure(e) => Left(SendMailError(e))
case util.Success(_) => Right(())
}
}
case class SendMailError(e: Throwable) extends Error
}
@dsl trait Email[M[_]] {
def send(address: String, subject: String, content: String): M[Unit]
}
object Date {
def interpreter = new Interpreter {
def now(implicit context: Context) = result(System.currentTimeMillis())
}
}
@dsl trait Date[M[_]] {
def now: M[Long]
}
val dsl = merge(Email, Date, IO)
def interpreter(settings: Settings) =
merge(
Email.interpreter(settings.smtp),
Date.interpreter,
IO.interpreter)
}
| ISCPIF/doors | server/src/main/scala/fr/iscpif/doors/server/DSL.scala | Scala | agpl-3.0 | 6,408 |
package rexstream.events
import rexstream.collections._
import scala.collection.mutable
/**
* Created by GregRos on 06/02/2016.
*/
trait AbsEvent[TParam] extends AutoCloseable {
final def +=(f: (TParam) => Unit): AutoCloseable = {
this.subscribe(f)
}
final def -=(f: (TParam) => Unit) = {
this.unsubscribe(f)
}
private val onSubEventRaised = (param : TParam) => {
raise(param)
}
final def ++=(otherEvent : AbsEvent[ _ >: TParam]) = {
this += otherEvent.onSubEventRaised
}
final def --=(otherEvent : AbsEvent[_ >: TParam]) = {
this -= otherEvent.onSubEventRaised
}
def subscribe(f: (TParam) => Unit): AutoCloseable
def unsubscribe(f: (TParam) => Unit): Unit
protected def raise(param : TParam) : Unit
}
| GregRos/Rexstream | ScalaFBL/src/rexstream/events/AbsEvent.scala | Scala | mit | 812 |
/*******************************************************************************
* Copyright (c) Nikolai Koudelia
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Eclipse Public License v1.0
* which accompanies this distribution, and is available at
* http://www.eclipse.org/legal/epl-v10.html
*
* Contributors:
* Nikolai Koudelia - initial API and implementation
*******************************************************************************/
package easyfit.test
import org.scalatest.junit.JUnitRunner
import org.junit.runner.RunWith
@RunWith(classOf[JUnitRunner])
class VariableInitialization extends EasyTest
{
"A Query table" should "initialize and redefine a variable" in
{
val (in, out, expected) = createTestData()
runQueryTest(in, out, expected)
}
"A Row table" should "initialize and redefine a variable" in
{
val (in, out, expected) = createTestData()
runRowTest(in, out.iterator, expected)
}
def createTestData(): (Seq[Seq[String]], Seq[Seq[String]], Seq[Seq[String]]) =
{
val header = Seq("C1")
val (in1, out1) = (Seq("$v1="), Seq("123"))
val (in2, out2) = (Seq("$v1"), Seq("X"))
val (in3, out3) = (Seq("$v1="), Seq("456"))
val (in4, out4) = (Seq("$v1"), Seq("123"))
val (in5, out5) = (Seq("$v1"), Seq("456"))
val exp1 = Seq("pass") //header
val exp2 = Seq("pass: $v1 <- [123]")
val exp3 = Seq("fail: $v1 [123] != [X]")
val exp4 = Seq("pass: $v1 <- [456]")
val exp5 = Seq("fail: $v1 [456] != [123]")
val exp6 = Seq("pass: $v1 [456]")
(
Seq(header, in1, in2, in3, in4, in5),
Seq(out1, out2, out3, out4, out5),
Seq(exp1, exp2, exp3, exp4, exp5, exp6)
)
}
} | nikoudel/easyfit | easyFit/src/test/scala/easyfit.test/VariableInitialization.scala | Scala | epl-1.0 | 1,834 |
/**
* Copyright (C) 2009-2015 Typesafe Inc. <http://www.typesafe.com>
*/
package akka
import sbt._
import sbt.Keys._
import java.io.File
import sbtrelease.ReleasePlugin.autoImport.releasePublishArtifactsAction
import com.typesafe.sbt.pgp.PgpKeys
object Publish extends AutoPlugin {
val defaultPublishTo = settingKey[File]("Default publish directory")
override def trigger = allRequirements
override def requires = sbtrelease.ReleasePlugin
override lazy val projectSettings = Seq(
crossPaths := false,
pomExtra := akkaPomExtra,
publishTo := akkaPublishTo.value,
credentials ++= akkaCredentials,
organizationName := "Lightbend Inc.",
organizationHomepage := Some(url("https://www.lightbend.com")),
homepage := Some(url("https://github.com/akka/akka-persistence-cassandra")),
publishMavenStyle := true,
pomIncludeRepository := { x => false },
defaultPublishTo := crossTarget.value / "repository",
releasePublishArtifactsAction := PgpKeys.publishSigned.value
)
def akkaPomExtra = {
/* The scm info is automatic from the sbt-git plugin
<scm>
<url>[email protected]:akka/akka-persistence-cassandra.git</url>
<connection>scm:git:[email protected]:akka/akka-persistence-cassandra.git</connection>
</scm>
*/
<developers>
<developer>
<id>contributors</id>
<name>Contributors</name>
<email>[email protected]</email>
<url>https://github.com/akka/akka-persistence-cassandra/graphs/contributors</url>
</developer>
</developers>
}
private def akkaPublishTo = Def.setting {
sonatypeRepo(version.value) orElse localRepo(defaultPublishTo.value)
}
private def sonatypeRepo(version: String): Option[Resolver] =
Option(sys.props("publish.maven.central")) filter (_.toLowerCase == "true") map { _ =>
val nexus = "https://oss.sonatype.org/"
if (version endsWith "-SNAPSHOT") "snapshots" at nexus + "content/repositories/snapshots"
else "releases" at nexus + "service/local/staging/deploy/maven2"
}
private def localRepo(repository: File) =
Some(Resolver.file("Default Local Repository", repository))
private def akkaCredentials: Seq[Credentials] =
Option(System.getProperty("akka.publish.credentials", null)).map(f => Credentials(new File(f))).toSeq
}
| ktoso/akka-persistence-cassandra | project/Publish.scala | Scala | apache-2.0 | 2,330 |
/*
* Copyright (c) <2015-2016>, see CONTRIBUTORS
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the <organization> nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package ch.usi.inf.l3.sana.primj.phases
import ch.usi.inf.l3.sana
import sana.tiny.dsl._
import sana.tiny.core._
import sana.tiny.core.Implicits._
import sana.tiny.ast.{Tree, NoTree}
import sana.primj.PrimjNodes
import sana.primj.namers._
trait PrimjNamerFamilyApi extends TransformationFamily[Tree, Tree] {
self =>
override def default = { case s: Tree => s }
def components: List[PartialFunction[Tree, Tree]] =
generateComponents[Tree, Tree](
"Program,MethodDef,ValDef,TypeUse,Ident",
"NamerComponent", "name", "")
// "Ident,TypeUse,Assign,Ternary,Apply,Return,Binary,Literal")
def name: Tree => Tree = family
}
case class PrimjNamerFamily(compiler: CompilerInterface)
extends PrimjNamerFamilyApi
| amanjpro/languages-a-la-carte | primj/src/main/scala/phases/NamerFamily.scala | Scala | bsd-3-clause | 2,311 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtInteger, Linked}
import uk.gov.hmrc.ct.computations.CP264
case class B30(value: Int) extends CtBoxIdentifier("Trading losses of this or later accounting periods") with CtInteger
object B30 extends Linked[CP264, B30] {
override def apply(source: CP264): B30 = B30(source.value)
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600/v2/B30.scala | Scala | apache-2.0 | 968 |
package com.getjenny.starchat.entities.io
import com.getjenny.starchat.entities.persistents.QADocument
/**
* Created by Angelo Leto <[email protected]> on 18/02/19.
*/
case class Conversation (
count: Long = 0,
docs: List[QADocument] = List.empty[QADocument]
)
case class Conversations (
total: Long = 0,
conversations: List[Conversation] = List.empty[Conversation]
)
| GetJenny/starchat | src/main/scala/com/getjenny/starchat/entities/io/Conversations.scala | Scala | gpl-2.0 | 531 |
package com.lightbend
/**
* Copyright Β© 2016 Lightbend, Inc
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* NO COMMERCIAL SUPPORT OR ANY OTHER FORM OF SUPPORT IS OFFERED ON
* THIS SOFTWARE BY LIGHTBEND, Inc.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.File
import java.nio.charset.StandardCharsets
import java.nio.file.{ Files, Paths }
import scala.util.matching.Regex
package object coursegentools {
def toConsoleRed(msg: String): String = Console.RED + msg + Console.RESET
def toConsoleGreen(msg: String): String = Console.GREEN + msg + Console.RESET
def toConsoleCyan(msg: String): String = Console.CYAN + msg + Console.RESET
def printError(msg: String)(implicit eofe: ExitOnFirstError): Unit = {
println(toConsoleRed(msg))
if (eofe.exitOnFirstError) System.exit(-1)
}
def printNotification(msg: String): Unit =
println(toConsoleGreen(msg))
type Seq[+A] = scala.collection.immutable.Seq[A]
val Seq = scala.collection.immutable.Seq
val ExerciseNumberSpec: Regex = """.*_(\\d{3})_.*""".r
def extractExerciseNr(exercise: String): Int = {
val ExerciseNumberSpec(d) = exercise
d.toInt
}
def renumberExercise(exercise: String, newNumber: Int)(implicit config: MainSettings): String = {
val newNumerLZ = f"${config.exerciseProjectPrefix}_$newNumber%03d_"
val oldNumberPrefix = f"${config.exerciseProjectPrefix}_${extractExerciseNr(exercise)}%03d_"
exercise.replaceFirst(oldNumberPrefix, newNumerLZ)
}
def getExerciseName(exercises: Vector[String], exerciseNumber: Int): Option[String] =
exercises.find(exercise => extractExerciseNr(exercise) == exerciseNumber)
def folderExists(folder: File): Boolean =
folder.exists() && folder.isDirectory
def dumpStringToFile(string: String, filePath: String): Unit =
Files.write(Paths.get(filePath), string.getBytes(StandardCharsets.UTF_8))
}
| lightbend-training/course-management-tools | core/src/main/scala/com/lightbend/coursegentools/package.scala | Scala | apache-2.0 | 2,376 |
import Macros._
object Test {
/** Object with a docstring */
object Obj
/** Val with a docstring */
val x: Null = null
val y: Null = null // val without a docstring
def main(args: Array[String]): Unit = {
printComment(Obj)
printComment(x)
printComment(y)
}
}
| som-snytt/dotty | tests/pending/run/tasty-comments/quoted_2.scala | Scala | apache-2.0 | 290 |
package funsets
import scala.annotation.tailrec
/**
* 2. Purely Functional Sets.
*/
object FunSets {
/**
* We represent a set by its characteristic function, i.e.
* its `contains` predicate.
*/
type FunSet = Int => Boolean
/**
* Indicates whether a set contains a given element.
*/
def contains(s: FunSet, elem: Int): Boolean =
s(elem)
/**
* Returns the set of the one given element.
*/
def singleElementSet(elem: Int): FunSet = {
def f(x: Int): Boolean = (x == elem)
f
}
/*
* OU
* def singleElementSet(elem: Int): FunSet = (x => x == elem)
*/
/**
* Returns the union of the two given sets,
* the sets of all elements that are in either `s` or `t`.
*/
def union(s: FunSet, t: FunSet): FunSet = {
def f(x: Int): Boolean = (s(x) || t(x))
f
}
/*
* OU
*def union(d: FunSet, a: FunSet): FunSet = (x => contains(d,x) || contains(a,x))
*/
/**
* Returns the intersection of the two given sets,
* the set of all elements that are both in `s` or `t`.
*/
def intersect(s: FunSet, t: FunSet): FunSet =
{
def f(x: Int): Boolean = (s(x) && t(x))
f
}
/*
* OU
* def intersect(v: FunSet, i: FunSet): FunSet = (x => contains(v,x) && contains(i,x))
*/
/**
* Returns the difference of the two given sets,
* the set of all elements of `s` that are not in `t`.
*/
def diff(s: FunSet, t: FunSet): FunSet =
{
def f(x: Int): Boolean = (s(x) && !t(x))
f
}
/*
* OU
* def diff(a: FunSet, z: FunSet): FunSet = (x => contains(a,x) && !contains(z,x))
*/
/**
* Returns the subset of `s` for which `p` holds.
*/
def filter(s: FunSet, p: Int => Boolean): FunSet =
{
def f(x: Int): Boolean = (s(x) && p(x))
f
}
/*
* OU
* def filter(f: FunSet, i: Int => Boolean): FunSet = (x => contains(f,x) && i(x))
*/
/**
* The bounds for `forall` and `exists` are +/- 1000.
*/
val bound = 1000
/**
* Returns whether all bounded integers within `s` satisfy `p`.
*/
def forall(s: FunSet, p: Int => Boolean): Boolean = {
def iter(a: Int): Boolean = {
if (a > bound) true
else if (contains(s, a) && !p(a)) false
else iter(a + 1)
}
iter(-bound)
}
/**
* Returns whether there exists a bounded integer within `s`
* that satisfies `p`.
* forall => Il n'existe pas d'entier 'bounded' tel que p(x) est faux
* ! forall => Il existe un entier 'bounded' tel que p(x) est faux
*/
def exists(s: FunSet, p: Int => Boolean): Boolean =
!forall(s, x => !p(x))
/**
* Returns a set transformed by applying `f` to each element of `s`.
*/
def map(s: FunSet, f: Int => Int): FunSet =
(y => exists(s, x => y == f(x)))
def toSet(ints: List[Int]): FunSet =
if (ints.isEmpty) x => false
else union(singleElementSet(ints.head), toSet(ints.tail))
/*def toList(set: FunSet): List[Int] = {
}*/
/**
* Displays the contents of a set
*/
def toString(s: FunSet): String = {
val xs = for (i <- -bound to bound if contains(s, i)) yield i
xs.mkString("{", ",", "}")
}
/**
* Prints the contents of a set on the console.
*/
def printSet(s: FunSet) {
println(toString(s))
}
}
| PiSurQuatre/fp-scala-public | funsets/src/main/scala/funsets/FunSets.scala | Scala | mit | 3,268 |
package sql.migration
import java.sql.SQLException
import akka.actor.ActorSystem
import akka.stream.Materializer
import akka.stream.scaladsl._
import com.google.protobuf.ByteString
import com.typesafe.scalalogging.Logger
import im.actor.server.db.DbExtension
import im.actor.server.model.{ SerializedUpdate, UpdateMapping }
import im.actor.server.persist.{ AuthIdRepo, UserRepo }
import org.slf4j.LoggerFactory
import slick.driver.PostgresDriver.api._
import slick.jdbc.{ GetResult, ResultSetConcurrency, ResultSetType, SetParameter }
import scala.concurrent.duration._
import scala.concurrent.{ Await, ExecutionContext, Future }
import scala.language.postfixOps
object V20151108011300__FillUserSequence {
final case class Obsolete(authId: Long, timestamp: Long, seq: Int, header: Int, data: Array[Byte], userIds: String, groupIds: String)
final case class New(userId: Int, seq: Int, timestamp: Long, mapping: Array[Byte])
final class UserSequenceTable(tag: Tag) extends Table[New](tag, "user_sequence") {
def userId = column[Int]("user_id", O.PrimaryKey)
def seq = column[Int]("seq", O.PrimaryKey)
def timestamp = column[Long]("timestamp")
def mapping = column[Array[Byte]]("mapping")
def * = (userId, seq, timestamp, mapping) <> (New.tupled, New.unapply)
}
val newTable = TableQuery[UserSequenceTable]
implicit val getByteArray = GetResult(r β r.nextBytes())
implicit val setByteArray = SetParameter[Array[Byte]] { (bs, pp) β pp.setBytes(bs) }
implicit val getObsolete = GetResult(r β
Obsolete(
authId = r.nextLong(),
timestamp = r.nextLong(),
seq = r.nextInt(),
header = r.nextInt(),
data = r.nextBytes(),
userIds = r.nextString,
groupIds = r.nextString()
))
val BulkSize = 300
val Parallelism = 3
}
//nohup bin/actor-cli migrate-user-sequence > log.log &
final class V20151108011300__FillUserSequence(implicit system: ActorSystem, materializer: Materializer) {
import V20151108011300__FillUserSequence._
private val executor = system.dispatcher
private implicit val ec = ExecutionContext.fromExecutor(executor)
private val log = Logger(LoggerFactory.getLogger(getClass))
private val db = DbExtension(system).db
def migrate(): Unit = {
try {
log.warn("Starting filling user sequence")
val count =
Await.result({
Source.fromPublisher(db.stream(UserRepo.allIds))
.mapAsync(Parallelism) { userId β
db.run(for {
authIds β AuthIdRepo.findIdByUserId(userId)
_ = log.warn(s"Found ${authIds.length} authIds for ${userId}")
oldestOpt β maxSeq(authIds)
} yield (userId, oldestOpt))
}
.map {
case pair @ (userId, authIdOpt) β
if (authIdOpt.isEmpty)
log.warn(s"User ${userId} haven't any authIds, ignoring")
pair
}
.collect {
case (userId, Some(authId)) β (userId, authId)
}
.mapAsyncUnordered(Parallelism) {
case (userId, authId) β
move(userId, authId)
}
.runFold(0)(_ + _)
}, 48.hours)
log.warn(s"Migration complete! Moved ${count} updates")
} catch {
case e: SQLException β
log.error("Failed to migrate", e.getNextException())
throw e.getNextException()
}
}
private def move(userId: Int, authId: Long): Future[Int] = {
log.warn(s"Moving user $userId")
db.run(sql"""SELECT seq FROM user_sequence WHERE user_id = $userId ORDER BY seq DESC LIMIT 1""".as[Int]).map(_.headOption.getOrElse(0)) flatMap { startFrom β
log.warn(s"Starting userId ${userId} from seq: ${startFrom}")
Source.fromPublisher(
db.stream(
sql"""SELECT auth_id, timestamp, seq, header, serialized_data, user_ids_str, group_ids_str FROM seq_updates_ngen WHERE auth_id = $authId and seq > $startFrom ORDER BY timestamp ASC"""
.as[Obsolete].withStatementParameters(
rsType = ResultSetType.ForwardOnly,
rsConcurrency = ResultSetConcurrency.ReadOnly,
fetchSize = BulkSize
).transactionally
)
)
.map {
case Obsolete(_, timestamp, seq, header, data, userIds, groupIds) β
log.debug(s"Parsing userId: ${userId}, seq: ${seq}")
New(
userId = userId,
seq = seq,
timestamp = timestamp,
mapping = UpdateMapping(
default = Some(SerializedUpdate(
header = header,
body = ByteString.copyFrom(data),
userIds = userIds.split(",").view.filter(_.nonEmpty).map(_.toInt).toSeq,
groupIds = groupIds.split(",").view.filter(_.nonEmpty).map(_.toInt).toSeq
))
).toByteArray
)
}
.grouped(BulkSize)
.mapAsync(1) { bulk β
val action = newTable ++= bulk
db.run(action) map (_.getOrElse(0))
}
.runFold(0)(_ + _)
.map { count β
log.warn(s"Moved ${count} updates for user ${userId}")
count
}
}
}
private def maxSeq(authIds: Seq[Long]): DBIO[Option[Long]] = {
if (authIds.isEmpty) DBIO.successful(None)
else
for {
seqs β DBIO.sequence(authIds map (a β getSeq(a) map (a β _)))
} yield Some(seqs maxBy (_._2 getOrElse 0) _1)
}
private def getSeq(authId: Long) = sql"""SELECT seq FROM seq_updates_ngen WHERE auth_id = $authId ORDER BY timestamp DESC LIMIT 1""".as[Int].headOption
}
| EaglesoftZJ/actor-platform | actor-server/actor-core/src/main/scala/sql/migration/V20151108011300__FillUserSequence.scala | Scala | agpl-3.0 | 5,686 |
package models
import play.api.libs.json._
case class User(
appId: Int, // The client app identifier
username: String, // The user identifier, expecting a custom "name" claim in JWT (could use "sub" instead?)
group: Option[String] = None,
isActive: Boolean = false,
isAdmin: Boolean = false
)
object User {
implicit val userFormat = Json.format[User]
}
| chuv-ssrc/bam-server-scala | app/models/User.scala | Scala | gpl-3.0 | 381 |
/*
* Copyright 2014 Treode, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package movies
import com.treode.store.alt.Transaction
import org.joda.time.DateTime
import movies.{PhysicalModel => PM, SearchResult => SR}
case class SearchResult (movies: Seq [SR.Movie], actors: Seq [SR.Actor])
/** See README.md. */
object SearchResult {
case class Movie (id: String, title: String, released: DateTime)
object Movie {
def lookup (tx: Transaction, movieId: String): Option [Movie] =
for (movie <- tx.get (PM.MovieTable) (movieId))
yield Movie (movieId, movie.title, movie.released)
def lookup (tx: Transaction, movieIds: Set [String]): Seq [Movie] =
movieIds.toSeq.map (lookup (tx, _)) .flatten
}
case class Actor (id: String, name: String, born: DateTime)
object Actor {
def lookup (tx: Transaction, actorId: String): Option [Actor] =
for (actor <- tx.get (PM.ActorTable) (actorId))
yield Actor (actorId, actor.name, actor.born)
def lookup (tx: Transaction, actorIds: Set [String]): Seq [Actor] =
actorIds.toSeq.map (lookup (tx, _)) .flatten
}
def lookup (tx: Transaction, entry: PM.IndexEntry): SearchResult =
SearchResult (Movie.lookup (tx, entry.movies), Actor.lookup (tx, entry.actors))
}
| Treode/store | demos/movies/server/src/movies/SearchResult.scala | Scala | apache-2.0 | 1,797 |
// Databricks notebook source
// MAGIC %md
// MAGIC
// MAGIC # [SDS-2.2, Scalable Data Science](https://lamastex.github.io/scalable-data-science/sds/2/2/)
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC # Introduction to Spark Streaming
// MAGIC
// MAGIC Spark Streaming is an extension of the core Spark API that enables scalable, high-throughput, fault-tolerant stream processing of live data streams.
// MAGIC
// MAGIC This is a walk-through of excerpts from the following resources:
// MAGIC
// MAGIC * the Databricks Guide:
// MAGIC * [Spark Streaming - RDD-based](https://docs.databricks.com/spark/latest/rdd-streaming/index.html) and
// MAGIC * [Structured Streaming - DatFrame/Dataset-Based](https://docs.databricks.com/spark/latest/structured-streaming/index.html#structured-streaming)
// MAGIC * Spark programming guide:
// MAGIC * [http://spark.apache.org/docs/latest/streaming-programming-guide.html](http://spark.apache.org/docs/latest/streaming-programming-guide.html)
// MAGIC * [http://spark.apache.org/docs/latest/structured-streaming-programming-guide.html](http://spark.apache.org/docs/latest/structured-streaming-programming-guide.html)
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC Overview
// MAGIC ========
// MAGIC
// MAGIC Spark Streaming is an extension of the core Spark API that enables
// MAGIC scalable, high-throughput, fault-tolerant stream processing of live data
// MAGIC streams.
// MAGIC
// MAGIC Data can be ingested from many sources like
// MAGIC
// MAGIC * [Kafka](http://kafka.apache.org/documentation.html#introduction),
// MAGIC * [Flume](https://flume.apache.org/),
// MAGIC * [Twitter](https://twitter.com/) [Streaming](https://dev.twitter.com/streaming/overview) and [REST](https://dev.twitter.com/rest/public) APIs,
// MAGIC * [ZeroMQ](http://zeromq.org/),
// MAGIC * [Amazon Kinesis](https://aws.amazon.com/kinesis/streams/), or
// MAGIC * [TCP sockets](http://www.gnu.org/software/mit-scheme/documentation/mit-scheme-ref/TCP-Sockets.html),
// MAGIC * etc
// MAGIC
// MAGIC and can be processed using
// MAGIC complex algorithms expressed with high-level functions like `map`,
// MAGIC `reduce`, `join` and `window`.
// MAGIC
// MAGIC Finally, processed data can be pushed out
// MAGIC to filesystems, databases, and live dashboards. In fact, you can apply Spark's
// MAGIC
// MAGIC * [machine learning](http://spark.apache.org/docs/latest/mllib-guide.html) and
// MAGIC * [graph processing](http://spark.apache.org/docs/latest/graphx-programming-guide.html) algorithms
// MAGIC on data streams.
// MAGIC
// MAGIC 
// MAGIC
// MAGIC #### Internally, it works as follows:
// MAGIC
// MAGIC * Spark Streaming receives live input data streams and
// MAGIC * divides the data into batches,
// MAGIC * which are then processed by the Spark engine
// MAGIC * to generate the final stream of results in batches.
// MAGIC
// MAGIC 
// MAGIC
// MAGIC Spark Streaming provides a high-level abstraction called **discretized
// MAGIC stream** or **DStream**, which represents a continuous stream of data.
// MAGIC DStreams can be created either from input data streams from sources such
// MAGIC as Kafka, Flume, and Kinesis, or by applying high-level operations on
// MAGIC other DStreams. Internally, a **DStream is represented as a sequence of
// MAGIC [RDDs](http://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.rdd.RDD)**.
// MAGIC
// MAGIC This guide shows you how to start writing Spark Streaming programs with
// MAGIC DStreams. You can write Spark Streaming programs in Scala, Java or
// MAGIC Python (introduced in Spark 1.2), all of which are presented in this
// MAGIC [guide](http://spark.apache.org/docs/latest/streaming-programming-guide.html).
// MAGIC
// MAGIC Here, we will focus on Streaming in Scala.
// MAGIC
// MAGIC * * * * *
// MAGIC
// MAGIC **Spark Streaming** is a near-real-time micro-batch stream processing engine as opposed to other real-time stream processing frameworks like [Apache Storm](http://storm.apache.org/). Typically 'near-real-time' in Spark Streaming can be in the order of seconds as opposed to milliseconds, for example.
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC Three Quick Examples
// MAGIC ===============
// MAGIC
// MAGIC Before we go into the details of how to write your own Spark Streaming program, let us take a quick look at what a simple Spark Streaming program looks like.
// MAGIC
// MAGIC We will choose the first two examples in Databricks notebooks below.
// COMMAND ----------
// MAGIC %md
// MAGIC #### Spark Streaming Hello World Examples
// MAGIC
// MAGIC These are adapted from several publicly available Databricks Notebooks
// MAGIC
// MAGIC 1. Streaming Word Count (Scala)
// MAGIC * Tweet Collector for Capturing Live Tweets
// MAGIC * Twitter Hashtag Count (Scala)
// MAGIC
// MAGIC Other examples we won't try here:
// MAGIC
// MAGIC * Kinesis Word Count (Scala)
// MAGIC * Kafka Word Count (Scala)
// MAGIC * FileStream Word Count (Python)
// MAGIC * etc.
// COMMAND ----------
// MAGIC %md
// MAGIC ## 1. Streaming Word Count
// MAGIC
// MAGIC This is a *hello world* example of Spark Streaming which counts words on 1 second batches of streaming data.
// MAGIC
// MAGIC It uses an in-memory string generator as a dummy source for streaming data.
// COMMAND ----------
// MAGIC %md
// MAGIC ## Configurations
// MAGIC
// MAGIC Configurations that control the streaming app in the notebook
// COMMAND ----------
// === Configuration to control the flow of the application ===
val stopActiveContext = true
// "true" = stop if any existing StreamingContext is running;
// "false" = dont stop, and let it run undisturbed, but your latest code may not be used
// === Configurations for Spark Streaming ===
val batchIntervalSeconds = 1
val eventsPerSecond = 1000 // For the dummy source
// Verify that the attached Spark cluster is 1.4.0+
require(sc.version.replace(".", "").toInt >= 140, "Spark 1.4.0+ is required to run this notebook. Please attach it to a Spark 1.4.0+ cluster.")
// COMMAND ----------
// MAGIC %md
// MAGIC ### Imports
// MAGIC
// MAGIC Import all the necessary libraries. If you see any error here, you have to make sure that you have attached the necessary libraries to the attached cluster.
// COMMAND ----------
import org.apache.spark._
import org.apache.spark.storage._
import org.apache.spark.streaming._
// COMMAND ----------
// MAGIC %md
// MAGIC Discretized Streams (DStreams)
// MAGIC ------------------------------
// MAGIC
// MAGIC **Discretized Stream** or **DStream** is the basic abstraction provided
// MAGIC by Spark Streaming. It represents a continuous stream of data, either
// MAGIC the input data stream received from source, or the processed data stream
// MAGIC generated by transforming the input stream. Internally, a DStream is
// MAGIC represented by a continuous series of RDDs, which is Spark's abstraction
// MAGIC of an immutable, distributed dataset (see [Spark Programming
// MAGIC Guide](http://spark.apache.org/docs/latest/programming-guide.html#resilient-distributed-datasets-rdds)
// MAGIC for more details). Each RDD in a DStream contains data from a certain
// MAGIC interval, as shown in the following figure.
// MAGIC
// MAGIC 
// COMMAND ----------
// MAGIC %md
// MAGIC ### Setup: Define the function that sets up the StreamingContext
// MAGIC
// MAGIC In this we will do two things.
// MAGIC
// MAGIC * Define a custom receiver as the dummy source (no need to understand this)
// MAGIC * this custom receiver will have lines that end with a random number between 0 and 9 and read:
// MAGIC ```
// MAGIC I am a dummy source 2
// MAGIC I am a dummy source 8
// MAGIC ...
// MAGIC ```
// COMMAND ----------
// MAGIC %md
// MAGIC This is the dummy source implemented as a custom receiver. **No need to understand this now.**
// COMMAND ----------
// This is the dummy source implemented as a custom receiver. No need to fully understand this.
import scala.util.Random
import org.apache.spark.streaming.receiver._
class DummySource(ratePerSec: Int) extends Receiver[String](StorageLevel.MEMORY_AND_DISK_2) {
def onStart() {
// Start the thread that receives data over a connection
new Thread("Dummy Source") {
override def run() { receive() }
}.start()
}
def onStop() {
// There is nothing much to do as the thread calling receive()
// is designed to stop by itself isStopped() returns false
}
/** Create a socket connection and receive data until receiver is stopped */
private def receive() {
while(!isStopped()) {
store("I am a dummy source " + Random.nextInt(10))
Thread.sleep((1000.toDouble / ratePerSec).toInt)
}
}
}
// COMMAND ----------
// MAGIC %md
// MAGIC ## Transforming and Acting on the DStream of lines
// MAGIC
// MAGIC Any operation applied on a DStream translates to operations on the
// MAGIC underlying RDDs. For converting
// MAGIC a stream of lines to words, the `flatMap` operation is applied on each
// MAGIC RDD in the `lines` DStream to generate the RDDs of the `wordStream` DStream.
// MAGIC This is shown in the following figure.
// MAGIC
// MAGIC 
// MAGIC
// MAGIC These underlying RDD transformations are computed by the Spark engine.
// MAGIC The DStream operations hide most of these details and provide the
// MAGIC developer with a higher-level API for convenience.
// MAGIC
// MAGIC Next `reduceByKey` is used to get `wordCountStream` that counts the words in `wordStream`.
// MAGIC
// MAGIC Finally, this is registered as a temporary table for each RDD in the DStream.
// COMMAND ----------
// MAGIC %md
// MAGIC Let's try to understand the following `creatingFunc` to create a new StreamingContext and setting it up for word count and registering it as temp table for each batch of 1000 lines per second in the stream.
// COMMAND ----------
var newContextCreated = false // Flag to detect whether new context was created or not
// Function to create a new StreamingContext and set it up
def creatingFunc(): StreamingContext = {
// Create a StreamingContext - starting point for a Spark Streaming job
val ssc = new StreamingContext(sc, Seconds(batchIntervalSeconds))
// Create a stream that generates 1000 lines per second
val stream = ssc.receiverStream(new DummySource(eventsPerSecond))
// Split the lines into words, and then do word count
val wordStream = stream.flatMap { _.split(" ") }
val wordCountStream = wordStream.map(word => (word, 1)).reduceByKey(_ + _)
// Create temp table at every batch interval
wordCountStream.foreachRDD { rdd =>
rdd.toDF("word", "count").createOrReplaceTempView("batch_word_count")
}
stream.foreachRDD { rdd =>
System.out.println("# events = " + rdd.count())
System.out.println("\t " + rdd.take(10).mkString(", ") + ", ...")
}
ssc.remember(Minutes(1)) // To make sure data is not deleted by the time we query it interactively
println("Creating function called to create new StreamingContext")
newContextCreated = true
ssc
}
// COMMAND ----------
// MAGIC %md
// MAGIC ## Start Streaming Job: Stop existing StreamingContext if any and start/restart the new one
// MAGIC
// MAGIC Here we are going to use the configurations at the top of the notebook to decide whether to stop any existing StreamingContext, and start a new one, or recover one from existing checkpoints.
// COMMAND ----------
// Stop any existing StreamingContext
// The getActive function is proviced by Databricks to access active Streaming Contexts
if (stopActiveContext) {
StreamingContext.getActive.foreach { _.stop(stopSparkContext = false) }
}
// Get or create a streaming context
val ssc = StreamingContext.getActiveOrCreate(creatingFunc)
if (newContextCreated) {
println("New context created from currently defined creating function")
} else {
println("Existing context running or recovered from checkpoint, may not be running currently defined creating function")
}
// Start the streaming context in the background.
ssc.start()
// This is to ensure that we wait for some time before the background streaming job starts. This will put this cell on hold for 5 times the batchIntervalSeconds.
ssc.awaitTerminationOrTimeout(batchIntervalSeconds * 5 * 1000)
// COMMAND ----------
// MAGIC %md
// MAGIC ### Interactive Querying
// MAGIC
// MAGIC Now let's try querying the table. You can run this command again and again, you will find the numbers changing.
// COMMAND ----------
// MAGIC %sql
// MAGIC select * from batch_word_count
// COMMAND ----------
// MAGIC %md
// MAGIC Try again for current table.
// COMMAND ----------
// MAGIC %sql select * from batch_word_count
// COMMAND ----------
// MAGIC %md
// MAGIC ### Go to Spark UI now and see Streaming job running
// COMMAND ----------
// MAGIC %md ### Finally, if you want stop the StreamingContext, you can uncomment and execute the following
// MAGIC
// MAGIC `StreamingContext.getActive.foreach { _.stop(stopSparkContext = false) }`
// COMMAND ----------
StreamingContext.getActive.foreach { _.stop(stopSparkContext = false) } // please do this if you are done!
// COMMAND ----------
// MAGIC %md
// MAGIC # Next - Spark Streaming of live tweets.
// MAGIC Let's do two more example applications of streaming involving live tweets. | lamastex/scalable-data-science | db/2/2/024_SparkStreamingIntro.scala | Scala | unlicense | 13,901 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.chill
import scala.collection.immutable.{
BitSet,
HashSet,
ListSet,
NumericRange,
Range,
SortedSet,
SortedMap,
ListMap,
HashMap,
Queue
}
import scala.collection.mutable.{
WrappedArray,
BitSet => MBitSet,
Map => MMap,
HashMap => MHashMap,
Set => MSet,
HashSet => MHashSet,
ListBuffer,
Queue => MQueue,
Buffer
}
import scala.util.matching.Regex
import com.twitter.chill.java.PackageRegistrar
import _root_.java.io.Serializable
import scala.collection.JavaConverters._
/**
* This class has a no-arg constructor, suitable for use with reflection instantiation
* It has no registered serializers, just the standard Kryo configured for Kryo.
*/
class EmptyScalaKryoInstantiator extends KryoInstantiator {
override def newKryo = {
val k = new KryoBase
k.setRegistrationRequired(false)
k.setInstantiatorStrategy(new org.objenesis.strategy.StdInstantiatorStrategy)
k
}
}
object ScalaKryoInstantiator extends Serializable {
private val mutex = new AnyRef with Serializable // some serializable object
@transient private var kpool: KryoPool = null
/**
* Return a KryoPool that uses the ScalaKryoInstantiator
*/
def defaultPool: KryoPool = mutex.synchronized {
if (null == kpool) {
kpool = KryoPool.withByteArrayOutputStream(guessThreads, new ScalaKryoInstantiator)
}
kpool
}
private def guessThreads: Int = {
val cores = Runtime.getRuntime.availableProcessors
val GUESS_THREADS_PER_CORE = 4
GUESS_THREADS_PER_CORE * cores
}
}
/** Makes an empty instantiator then registers everything */
class ScalaKryoInstantiator extends EmptyScalaKryoInstantiator {
override def newKryo = {
val k = super.newKryo
val reg = new AllScalaRegistrar
reg(k)
k
}
}
class ScalaCollectionsRegistrar extends IKryoRegistrar {
def apply(newK: Kryo) {
// for binary compat this is here, but could be moved to RichKryo
def useField[T](cls: Class[T]) {
val fs = new com.esotericsoftware.kryo.serializers.FieldSerializer(newK, cls)
fs.setIgnoreSyntheticFields(false) // scala generates a lot of these attributes
newK.register(cls, fs)
}
// The wrappers are private classes:
useField(List(1, 2, 3).asJava.getClass)
useField(List(1, 2, 3).iterator.asJava.getClass)
useField(Map(1 -> 2, 4 -> 3).asJava.getClass)
useField(new _root_.java.util.ArrayList().asScala.getClass)
useField(new _root_.java.util.HashMap().asScala.getClass)
/*
* Note that subclass-based use: addDefaultSerializers, else: register
* You should go from MOST specific, to least to specific when using
* default serializers. The FIRST one found is the one used
*/
newK
// wrapper array is abstract
.forSubclass[WrappedArray[Any]](new WrappedArraySerializer[Any])
.forSubclass[BitSet](new BitSetSerializer)
.forSubclass[SortedSet[Any]](new SortedSetSerializer)
.forClass[Some[Any]](new SomeSerializer[Any])
.forClass[Left[Any, Any]](new LeftSerializer[Any, Any])
.forClass[Right[Any, Any]](new RightSerializer[Any, Any])
.forTraversableSubclass(Queue.empty[Any])
// List is a sealed class, so there are only two subclasses:
.forTraversableSubclass(List.empty[Any])
// Add ListBuffer subclass before Buffer to prevent the more general case taking precedence
.forTraversableSubclass(ListBuffer.empty[Any], isImmutable = false)
// add mutable Buffer before Vector, otherwise Vector is used
.forTraversableSubclass(Buffer.empty[Any], isImmutable = false)
// Vector is a final class
.forTraversableClass(Vector.empty[Any])
.forTraversableSubclass(ListSet.empty[Any])
// specifically register small sets since Scala represents them differently
.forConcreteTraversableClass(Set[Any]('a))
.forConcreteTraversableClass(Set[Any]('a, 'b))
.forConcreteTraversableClass(Set[Any]('a, 'b, 'c))
.forConcreteTraversableClass(Set[Any]('a, 'b, 'c, 'd))
// default set implementation
.forConcreteTraversableClass(HashSet[Any]('a, 'b, 'c, 'd, 'e))
// specifically register small maps since Scala represents them differently
.forConcreteTraversableClass(Map[Any, Any]('a -> 'a))
.forConcreteTraversableClass(Map[Any, Any]('a -> 'a, 'b -> 'b))
.forConcreteTraversableClass(Map[Any, Any]('a -> 'a, 'b -> 'b, 'c -> 'c))
.forConcreteTraversableClass(Map[Any, Any]('a -> 'a, 'b -> 'b, 'c -> 'c, 'd -> 'd))
// default map implementation
.forConcreteTraversableClass(HashMap[Any, Any]('a -> 'a, 'b -> 'b, 'c -> 'c, 'd -> 'd, 'e -> 'e))
// The normal fields serializer works for ranges
.registerClasses(Seq(classOf[Range.Inclusive],
classOf[NumericRange.Inclusive[_]],
classOf[NumericRange.Exclusive[_]]))
// Add some maps
.forSubclass[SortedMap[Any, Any]](new SortedMapSerializer)
.forTraversableSubclass(ListMap.empty[Any, Any])
.forTraversableSubclass(HashMap.empty[Any, Any])
// The above ListMap/HashMap must appear before this:
.forTraversableSubclass(Map.empty[Any, Any])
// here are the mutable ones:
.forTraversableClass(MBitSet.empty, isImmutable = false)
.forTraversableClass(MHashMap.empty[Any, Any], isImmutable = false)
.forTraversableClass(MHashSet.empty[Any], isImmutable = false)
.forTraversableSubclass(MQueue.empty[Any], isImmutable = false)
.forTraversableSubclass(MMap.empty[Any, Any], isImmutable = false)
.forTraversableSubclass(MSet.empty[Any], isImmutable = false)
}
}
class JavaWrapperCollectionRegistrar extends IKryoRegistrar {
def apply(newK: Kryo) {
newK.register(JavaIterableWrapperSerializer.wrapperClass, new JavaIterableWrapperSerializer)
}
}
/** Registers all the scala (and java) serializers we have */
class AllScalaRegistrar extends IKryoRegistrar {
def apply(k: Kryo) {
val col = new ScalaCollectionsRegistrar
col(k)
val jcol = new JavaWrapperCollectionRegistrar
jcol(k)
// Register all 22 tuple serializers and specialized serializers
ScalaTupleSerialization.register(k)
k.forClass[Symbol](new KSerializer[Symbol] {
override def isImmutable = true
def write(k: Kryo, out: Output, obj: Symbol) { out.writeString(obj.name) }
def read(k: Kryo, in: Input, cls: Class[Symbol]) = Symbol(in.readString)
})
.forSubclass[Regex](new RegexSerializer)
.forClass[ClassManifest[Any]](new ClassManifestSerializer[Any])
.forSubclass[Manifest[Any]](new ManifestSerializer[Any])
.forSubclass[scala.Enumeration#Value](new EnumerationSerializer)
// use the singleton serializer for boxed Unit
val boxedUnit = scala.Unit.box(())
k.register(boxedUnit.getClass, new SingletonSerializer(boxedUnit))
PackageRegistrar.all()(k)
}
}
| steveloughran/chill | chill-scala/src/main/scala/com/twitter/chill/ScalaKryoInstantiator.scala | Scala | apache-2.0 | 7,444 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO2
package com.google.protobuf.descriptor
import _root_.scalapb.internal.compat.JavaConverters._
/** @param ctype
* The ctype option instructs the C++ code generator to use a different
* representation of the field than it normally would. See the specific
* options below. This option is not yet implemented in the open source
* release -- sorry, we'll try to include it in a future version!
* @param packed
* The packed option can be enabled for repeated primitive fields to enable
* a more efficient representation on the wire. Rather than repeatedly
* writing the tag and type for each element, the entire array is encoded as
* a single length-delimited blob. In proto3, only explicit setting it to
* false will avoid using packed encoding.
* @param jstype
* The jstype option determines the JavaScript type used for values of the
* field. The option is permitted only for 64 bit integral and fixed types
* (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING
* is represented as JavaScript string, which avoids loss of precision that
* can happen when a large value is converted to a floating point JavaScript.
* Specifying JS_NUMBER for the jstype causes the generated JavaScript code to
* use the JavaScript "number" type. The behavior of the default option
* JS_NORMAL is implementation dependent.
*
* This option is an enum to permit additional types to be added, e.g.
* goog.math.Integer.
* @param lazy
* Should this field be parsed lazily? Lazy applies only to message-type
* fields. It means that when the outer message is initially parsed, the
* inner message's contents will not be parsed but instead stored in encoded
* form. The inner message will actually be parsed when it is first accessed.
*
* This is only a hint. Implementations are free to choose whether to use
* eager or lazy parsing regardless of the value of this option. However,
* setting this option true suggests that the protocol author believes that
* using lazy parsing on this field is worth the additional bookkeeping
* overhead typically needed to implement it.
*
* This option does not affect the public interface of any generated code;
* all method signatures remain the same. Furthermore, thread-safety of the
* interface is not affected by this option; const methods remain safe to
* call from multiple threads concurrently, while non-const methods continue
* to require exclusive access.
*
*
* Note that implementations may choose not to check required fields within
* a lazy sub-message. That is, calling IsInitialized() on the outer message
* may return true even if the inner message has missing required fields.
* This is necessary because otherwise the inner message would have to be
* parsed in order to perform the check, defeating the purpose of lazy
* parsing. An implementation which chooses not to check required fields
* must be consistent about it. That is, for any particular sub-message, the
* implementation must either *always* check its required fields, or *never*
* check its required fields, regardless of whether or not the message has
* been parsed.
* @param deprecated
* Is this field deprecated?
* Depending on the target platform, this can emit Deprecated annotations
* for accessors, or it will be completely ignored; in the very least, this
* is a formalization for deprecating fields.
* @param weak
* For Google-internal migration only. Do not use.
* @param uninterpretedOption
* The parser stores options it doesn't recognize here. See above.
*/
@SerialVersionUID(0L)
final case class FieldOptions(
ctype: _root_.scala.Option[com.google.protobuf.descriptor.FieldOptions.CType] = _root_.scala.None,
packed: _root_.scala.Option[_root_.scala.Boolean] = _root_.scala.None,
jstype: _root_.scala.Option[com.google.protobuf.descriptor.FieldOptions.JSType] = _root_.scala.None,
`lazy`: _root_.scala.Option[_root_.scala.Boolean] = _root_.scala.None,
deprecated: _root_.scala.Option[_root_.scala.Boolean] = _root_.scala.None,
weak: _root_.scala.Option[_root_.scala.Boolean] = _root_.scala.None,
uninterpretedOption: _root_.scala.Seq[com.google.protobuf.descriptor.UninterpretedOption] = _root_.scala.Seq.empty,
unknownFields: _root_.scalapb.UnknownFieldSet = _root_.scalapb.UnknownFieldSet()
) extends scalapb.GeneratedMessage with scalapb.Message[FieldOptions] with scalapb.lenses.Updatable[FieldOptions] with _root_.scalapb.ExtendableMessage[FieldOptions] {
@transient
private[this] var __serializedSizeCachedValue: _root_.scala.Int = 0
private[this] def __computeSerializedValue(): _root_.scala.Int = {
var __size = 0
if (ctype.isDefined) {
val __value = ctype.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeEnumSize(1, __value.value)
};
if (packed.isDefined) {
val __value = packed.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeBoolSize(2, __value)
};
if (jstype.isDefined) {
val __value = jstype.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeEnumSize(6, __value.value)
};
if (`lazy`.isDefined) {
val __value = `lazy`.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeBoolSize(5, __value)
};
if (deprecated.isDefined) {
val __value = deprecated.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeBoolSize(3, __value)
};
if (weak.isDefined) {
val __value = weak.get
__size += _root_.com.google.protobuf.CodedOutputStream.computeBoolSize(10, __value)
};
uninterpretedOption.foreach { __item =>
val __value = __item
__size += 2 + _root_.com.google.protobuf.CodedOutputStream.computeUInt32SizeNoTag(__value.serializedSize) + __value.serializedSize
}
__size += unknownFields.serializedSize
__size
}
final override def serializedSize: _root_.scala.Int = {
var read = __serializedSizeCachedValue
if (read == 0) {
read = __computeSerializedValue()
__serializedSizeCachedValue = read
}
read
}
def writeTo(`_output__`: _root_.com.google.protobuf.CodedOutputStream): _root_.scala.Unit = {
ctype.foreach { __v =>
val __m = __v
_output__.writeEnum(1, __m.value)
};
packed.foreach { __v =>
val __m = __v
_output__.writeBool(2, __m)
};
deprecated.foreach { __v =>
val __m = __v
_output__.writeBool(3, __m)
};
`lazy`.foreach { __v =>
val __m = __v
_output__.writeBool(5, __m)
};
jstype.foreach { __v =>
val __m = __v
_output__.writeEnum(6, __m.value)
};
weak.foreach { __v =>
val __m = __v
_output__.writeBool(10, __m)
};
uninterpretedOption.foreach { __v =>
val __m = __v
_output__.writeTag(999, 2)
_output__.writeUInt32NoTag(__m.serializedSize)
__m.writeTo(_output__)
};
unknownFields.writeTo(_output__)
}
def mergeFrom(`_input__`: _root_.com.google.protobuf.CodedInputStream): com.google.protobuf.descriptor.FieldOptions = {
var __ctype = this.ctype
var __packed = this.packed
var __jstype = this.jstype
var __lazy = this.`lazy`
var __deprecated = this.deprecated
var __weak = this.weak
val __uninterpretedOption = (_root_.scala.collection.immutable.Vector.newBuilder[com.google.protobuf.descriptor.UninterpretedOption] ++= this.uninterpretedOption)
val _unknownFields__ = new _root_.scalapb.UnknownFieldSet.Builder(this.unknownFields)
var _done__ = false
while (!_done__) {
val _tag__ = _input__.readTag()
_tag__ match {
case 0 => _done__ = true
case 8 =>
__ctype = Option(com.google.protobuf.descriptor.FieldOptions.CType.fromValue(_input__.readEnum()))
case 16 =>
__packed = Option(_input__.readBool())
case 48 =>
__jstype = Option(com.google.protobuf.descriptor.FieldOptions.JSType.fromValue(_input__.readEnum()))
case 40 =>
__lazy = Option(_input__.readBool())
case 24 =>
__deprecated = Option(_input__.readBool())
case 80 =>
__weak = Option(_input__.readBool())
case 7994 =>
__uninterpretedOption += _root_.scalapb.LiteParser.readMessage(_input__, com.google.protobuf.descriptor.UninterpretedOption.defaultInstance)
case tag => _unknownFields__.parseField(tag, _input__)
}
}
com.google.protobuf.descriptor.FieldOptions(
ctype = __ctype,
packed = __packed,
jstype = __jstype,
`lazy` = __lazy,
deprecated = __deprecated,
weak = __weak,
uninterpretedOption = __uninterpretedOption.result(),
unknownFields = _unknownFields__.result()
)
}
def getCtype: com.google.protobuf.descriptor.FieldOptions.CType = ctype.getOrElse(com.google.protobuf.descriptor.FieldOptions.CType.STRING)
def clearCtype: FieldOptions = copy(ctype = _root_.scala.None)
def withCtype(__v: com.google.protobuf.descriptor.FieldOptions.CType): FieldOptions = copy(ctype = Option(__v))
def getPacked: _root_.scala.Boolean = packed.getOrElse(false)
def clearPacked: FieldOptions = copy(packed = _root_.scala.None)
def withPacked(__v: _root_.scala.Boolean): FieldOptions = copy(packed = Option(__v))
def getJstype: com.google.protobuf.descriptor.FieldOptions.JSType = jstype.getOrElse(com.google.protobuf.descriptor.FieldOptions.JSType.JS_NORMAL)
def clearJstype: FieldOptions = copy(jstype = _root_.scala.None)
def withJstype(__v: com.google.protobuf.descriptor.FieldOptions.JSType): FieldOptions = copy(jstype = Option(__v))
def getLazy: _root_.scala.Boolean = `lazy`.getOrElse(false)
def clearLazy: FieldOptions = copy(`lazy` = _root_.scala.None)
def withLazy(__v: _root_.scala.Boolean): FieldOptions = copy(`lazy` = Option(__v))
def getDeprecated: _root_.scala.Boolean = deprecated.getOrElse(false)
def clearDeprecated: FieldOptions = copy(deprecated = _root_.scala.None)
def withDeprecated(__v: _root_.scala.Boolean): FieldOptions = copy(deprecated = Option(__v))
def getWeak: _root_.scala.Boolean = weak.getOrElse(false)
def clearWeak: FieldOptions = copy(weak = _root_.scala.None)
def withWeak(__v: _root_.scala.Boolean): FieldOptions = copy(weak = Option(__v))
def clearUninterpretedOption = copy(uninterpretedOption = _root_.scala.Seq.empty)
def addUninterpretedOption(__vs: com.google.protobuf.descriptor.UninterpretedOption*): FieldOptions = addAllUninterpretedOption(__vs)
def addAllUninterpretedOption(__vs: Iterable[com.google.protobuf.descriptor.UninterpretedOption]): FieldOptions = copy(uninterpretedOption = uninterpretedOption ++ __vs)
def withUninterpretedOption(__v: _root_.scala.Seq[com.google.protobuf.descriptor.UninterpretedOption]): FieldOptions = copy(uninterpretedOption = __v)
def withUnknownFields(__v: _root_.scalapb.UnknownFieldSet) = copy(unknownFields = __v)
def discardUnknownFields = copy(unknownFields = _root_.scalapb.UnknownFieldSet.empty)
def getFieldByNumber(__fieldNumber: _root_.scala.Int): _root_.scala.Any = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => ctype.map(_.javaValueDescriptor).orNull
case 2 => packed.orNull
case 6 => jstype.map(_.javaValueDescriptor).orNull
case 5 => `lazy`.orNull
case 3 => deprecated.orNull
case 10 => weak.orNull
case 999 => uninterpretedOption
}
}
def getField(__field: _root_.scalapb.descriptors.FieldDescriptor): _root_.scalapb.descriptors.PValue = {
_root_.scala.Predef.require(__field.containingMessage eq companion.scalaDescriptor)
(__field.number: @_root_.scala.unchecked) match {
case 1 => ctype.map(__e => _root_.scalapb.descriptors.PEnum(__e.scalaValueDescriptor)).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 2 => packed.map(_root_.scalapb.descriptors.PBoolean).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 6 => jstype.map(__e => _root_.scalapb.descriptors.PEnum(__e.scalaValueDescriptor)).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 5 => `lazy`.map(_root_.scalapb.descriptors.PBoolean).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 3 => deprecated.map(_root_.scalapb.descriptors.PBoolean).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 10 => weak.map(_root_.scalapb.descriptors.PBoolean).getOrElse(_root_.scalapb.descriptors.PEmpty)
case 999 => _root_.scalapb.descriptors.PRepeated(uninterpretedOption.iterator.map(_.toPMessage).toVector)
}
}
def toProtoString: _root_.scala.Predef.String = _root_.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.descriptor.FieldOptions
}
object FieldOptions extends scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.FieldOptions] with scalapb.JavaProtoSupport[com.google.protobuf.descriptor.FieldOptions, com.google.protobuf.DescriptorProtos.FieldOptions] {
implicit def messageCompanion: scalapb.GeneratedMessageCompanion[com.google.protobuf.descriptor.FieldOptions] with scalapb.JavaProtoSupport[com.google.protobuf.descriptor.FieldOptions, com.google.protobuf.DescriptorProtos.FieldOptions] = this
def toJavaProto(scalaPbSource: com.google.protobuf.descriptor.FieldOptions): com.google.protobuf.DescriptorProtos.FieldOptions = {
val javaPbOut = com.google.protobuf.DescriptorProtos.FieldOptions.newBuilder
scalaPbSource.ctype.map(com.google.protobuf.descriptor.FieldOptions.CType.toJavaValue).foreach(javaPbOut.setCtype)
scalaPbSource.packed.foreach(javaPbOut.setPacked)
scalaPbSource.jstype.map(com.google.protobuf.descriptor.FieldOptions.JSType.toJavaValue).foreach(javaPbOut.setJstype)
scalaPbSource.`lazy`.foreach(javaPbOut.setLazy)
scalaPbSource.deprecated.foreach(javaPbOut.setDeprecated)
scalaPbSource.weak.foreach(javaPbOut.setWeak)
javaPbOut.addAllUninterpretedOption(scalaPbSource.uninterpretedOption.iterator.map(com.google.protobuf.descriptor.UninterpretedOption.toJavaProto).toIterable.asJava)
javaPbOut.build
}
def fromJavaProto(javaPbSource: com.google.protobuf.DescriptorProtos.FieldOptions): com.google.protobuf.descriptor.FieldOptions = com.google.protobuf.descriptor.FieldOptions(
ctype = if (javaPbSource.hasCtype) Some(com.google.protobuf.descriptor.FieldOptions.CType.fromJavaValue(javaPbSource.getCtype)) else _root_.scala.None,
packed = if (javaPbSource.hasPacked) Some(javaPbSource.getPacked.booleanValue) else _root_.scala.None,
jstype = if (javaPbSource.hasJstype) Some(com.google.protobuf.descriptor.FieldOptions.JSType.fromJavaValue(javaPbSource.getJstype)) else _root_.scala.None,
`lazy` = if (javaPbSource.hasLazy) Some(javaPbSource.getLazy.booleanValue) else _root_.scala.None,
deprecated = if (javaPbSource.hasDeprecated) Some(javaPbSource.getDeprecated.booleanValue) else _root_.scala.None,
weak = if (javaPbSource.hasWeak) Some(javaPbSource.getWeak.booleanValue) else _root_.scala.None,
uninterpretedOption = javaPbSource.getUninterpretedOptionList.asScala.iterator.map(com.google.protobuf.descriptor.UninterpretedOption.fromJavaProto).toSeq
)
def fromFieldsMap(__fieldsMap: scala.collection.immutable.Map[_root_.com.google.protobuf.Descriptors.FieldDescriptor, _root_.scala.Any]): com.google.protobuf.descriptor.FieldOptions = {
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.getContainingType() == javaDescriptor), "FieldDescriptor does not match message type.")
val __fields = javaDescriptor.getFields
com.google.protobuf.descriptor.FieldOptions(
__fieldsMap.get(__fields.get(0)).asInstanceOf[_root_.scala.Option[_root_.com.google.protobuf.Descriptors.EnumValueDescriptor]].map(__e => com.google.protobuf.descriptor.FieldOptions.CType.fromValue(__e.getNumber)),
__fieldsMap.get(__fields.get(1)).asInstanceOf[_root_.scala.Option[_root_.scala.Boolean]],
__fieldsMap.get(__fields.get(2)).asInstanceOf[_root_.scala.Option[_root_.com.google.protobuf.Descriptors.EnumValueDescriptor]].map(__e => com.google.protobuf.descriptor.FieldOptions.JSType.fromValue(__e.getNumber)),
__fieldsMap.get(__fields.get(3)).asInstanceOf[_root_.scala.Option[_root_.scala.Boolean]],
__fieldsMap.get(__fields.get(4)).asInstanceOf[_root_.scala.Option[_root_.scala.Boolean]],
__fieldsMap.get(__fields.get(5)).asInstanceOf[_root_.scala.Option[_root_.scala.Boolean]],
__fieldsMap.getOrElse(__fields.get(6), Nil).asInstanceOf[_root_.scala.Seq[com.google.protobuf.descriptor.UninterpretedOption]]
)
}
implicit def messageReads: _root_.scalapb.descriptors.Reads[com.google.protobuf.descriptor.FieldOptions] = _root_.scalapb.descriptors.Reads{
case _root_.scalapb.descriptors.PMessage(__fieldsMap) =>
_root_.scala.Predef.require(__fieldsMap.keys.forall(_.containingMessage == scalaDescriptor), "FieldDescriptor does not match message type.")
com.google.protobuf.descriptor.FieldOptions(
__fieldsMap.get(scalaDescriptor.findFieldByNumber(1).get).flatMap(_.as[_root_.scala.Option[_root_.scalapb.descriptors.EnumValueDescriptor]]).map(__e => com.google.protobuf.descriptor.FieldOptions.CType.fromValue(__e.number)),
__fieldsMap.get(scalaDescriptor.findFieldByNumber(2).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Boolean]]),
__fieldsMap.get(scalaDescriptor.findFieldByNumber(6).get).flatMap(_.as[_root_.scala.Option[_root_.scalapb.descriptors.EnumValueDescriptor]]).map(__e => com.google.protobuf.descriptor.FieldOptions.JSType.fromValue(__e.number)),
__fieldsMap.get(scalaDescriptor.findFieldByNumber(5).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Boolean]]),
__fieldsMap.get(scalaDescriptor.findFieldByNumber(3).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Boolean]]),
__fieldsMap.get(scalaDescriptor.findFieldByNumber(10).get).flatMap(_.as[_root_.scala.Option[_root_.scala.Boolean]]),
__fieldsMap.get(scalaDescriptor.findFieldByNumber(999).get).map(_.as[_root_.scala.Seq[com.google.protobuf.descriptor.UninterpretedOption]]).getOrElse(_root_.scala.Seq.empty)
)
case _ => throw new RuntimeException("Expected PMessage")
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.Descriptor = DescriptorProtoCompanion.javaDescriptor.getMessageTypes.get(12)
def scalaDescriptor: _root_.scalapb.descriptors.Descriptor = DescriptorProtoCompanion.scalaDescriptor.messages(12)
def messageCompanionForFieldNumber(__number: _root_.scala.Int): _root_.scalapb.GeneratedMessageCompanion[_] = {
var __out: _root_.scalapb.GeneratedMessageCompanion[_] = null
(__number: @_root_.scala.unchecked) match {
case 999 => __out = com.google.protobuf.descriptor.UninterpretedOption
}
__out
}
lazy val nestedMessagesCompanions: Seq[_root_.scalapb.GeneratedMessageCompanion[_ <: _root_.scalapb.GeneratedMessage]] = Seq.empty
def enumCompanionForFieldNumber(__fieldNumber: _root_.scala.Int): _root_.scalapb.GeneratedEnumCompanion[_] = {
(__fieldNumber: @_root_.scala.unchecked) match {
case 1 => com.google.protobuf.descriptor.FieldOptions.CType
case 6 => com.google.protobuf.descriptor.FieldOptions.JSType
}
}
lazy val defaultInstance = com.google.protobuf.descriptor.FieldOptions(
)
sealed trait CType extends _root_.scalapb.GeneratedEnum {
type EnumType = CType
def isString: _root_.scala.Boolean = false
def isCord: _root_.scala.Boolean = false
def isStringPiece: _root_.scala.Boolean = false
def companion: _root_.scalapb.GeneratedEnumCompanion[CType] = com.google.protobuf.descriptor.FieldOptions.CType
}
object CType extends _root_.scalapb.GeneratedEnumCompanion[CType] {
implicit def enumCompanion: _root_.scalapb.GeneratedEnumCompanion[CType] = this
/** Default mode.
*/
@SerialVersionUID(0L)
case object STRING extends CType {
val value = 0
val index = 0
val name = "STRING"
override def isString: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object CORD extends CType {
val value = 1
val index = 1
val name = "CORD"
override def isCord: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
case object STRING_PIECE extends CType {
val value = 2
val index = 2
val name = "STRING_PIECE"
override def isStringPiece: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
final case class Unrecognized(value: _root_.scala.Int) extends CType with _root_.scalapb.UnrecognizedEnum
lazy val values = scala.collection.immutable.Seq(STRING, CORD, STRING_PIECE)
def fromValue(value: _root_.scala.Int): CType = value match {
case 0 => STRING
case 1 => CORD
case 2 => STRING_PIECE
case __other => Unrecognized(__other)
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.EnumDescriptor = com.google.protobuf.descriptor.FieldOptions.javaDescriptor.getEnumTypes.get(0)
def scalaDescriptor: _root_.scalapb.descriptors.EnumDescriptor = com.google.protobuf.descriptor.FieldOptions.scalaDescriptor.enums(0)
def fromJavaValue(pbJavaSource: com.google.protobuf.DescriptorProtos.FieldOptions.CType): CType = fromValue(pbJavaSource.getNumber)
def toJavaValue(pbScalaSource: CType): com.google.protobuf.DescriptorProtos.FieldOptions.CType = {
_root_.scala.Predef.require(!pbScalaSource.isUnrecognized, "Unrecognized enum values can not be converted to Java")
com.google.protobuf.DescriptorProtos.FieldOptions.CType.forNumber(pbScalaSource.value)
}
}
sealed trait JSType extends _root_.scalapb.GeneratedEnum {
type EnumType = JSType
def isJsNormal: _root_.scala.Boolean = false
def isJsString: _root_.scala.Boolean = false
def isJsNumber: _root_.scala.Boolean = false
def companion: _root_.scalapb.GeneratedEnumCompanion[JSType] = com.google.protobuf.descriptor.FieldOptions.JSType
}
object JSType extends _root_.scalapb.GeneratedEnumCompanion[JSType] {
implicit def enumCompanion: _root_.scalapb.GeneratedEnumCompanion[JSType] = this
/** Use the default type.
*/
@SerialVersionUID(0L)
case object JS_NORMAL extends JSType {
val value = 0
val index = 0
val name = "JS_NORMAL"
override def isJsNormal: _root_.scala.Boolean = true
}
/** Use JavaScript strings.
*/
@SerialVersionUID(0L)
case object JS_STRING extends JSType {
val value = 1
val index = 1
val name = "JS_STRING"
override def isJsString: _root_.scala.Boolean = true
}
/** Use JavaScript numbers.
*/
@SerialVersionUID(0L)
case object JS_NUMBER extends JSType {
val value = 2
val index = 2
val name = "JS_NUMBER"
override def isJsNumber: _root_.scala.Boolean = true
}
@SerialVersionUID(0L)
final case class Unrecognized(value: _root_.scala.Int) extends JSType with _root_.scalapb.UnrecognizedEnum
lazy val values = scala.collection.immutable.Seq(JS_NORMAL, JS_STRING, JS_NUMBER)
def fromValue(value: _root_.scala.Int): JSType = value match {
case 0 => JS_NORMAL
case 1 => JS_STRING
case 2 => JS_NUMBER
case __other => Unrecognized(__other)
}
def javaDescriptor: _root_.com.google.protobuf.Descriptors.EnumDescriptor = com.google.protobuf.descriptor.FieldOptions.javaDescriptor.getEnumTypes.get(1)
def scalaDescriptor: _root_.scalapb.descriptors.EnumDescriptor = com.google.protobuf.descriptor.FieldOptions.scalaDescriptor.enums(1)
def fromJavaValue(pbJavaSource: com.google.protobuf.DescriptorProtos.FieldOptions.JSType): JSType = fromValue(pbJavaSource.getNumber)
def toJavaValue(pbScalaSource: JSType): com.google.protobuf.DescriptorProtos.FieldOptions.JSType = {
_root_.scala.Predef.require(!pbScalaSource.isUnrecognized, "Unrecognized enum values can not be converted to Java")
com.google.protobuf.DescriptorProtos.FieldOptions.JSType.forNumber(pbScalaSource.value)
}
}
implicit class FieldOptionsLens[UpperPB](_l: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.descriptor.FieldOptions]) extends _root_.scalapb.lenses.ObjectLens[UpperPB, com.google.protobuf.descriptor.FieldOptions](_l) {
def ctype: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.descriptor.FieldOptions.CType] = field(_.getCtype)((c_, f_) => c_.copy(ctype = Option(f_)))
def optionalCtype: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[com.google.protobuf.descriptor.FieldOptions.CType]] = field(_.ctype)((c_, f_) => c_.copy(ctype = f_))
def packed: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Boolean] = field(_.getPacked)((c_, f_) => c_.copy(packed = Option(f_)))
def optionalPacked: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Boolean]] = field(_.packed)((c_, f_) => c_.copy(packed = f_))
def jstype: _root_.scalapb.lenses.Lens[UpperPB, com.google.protobuf.descriptor.FieldOptions.JSType] = field(_.getJstype)((c_, f_) => c_.copy(jstype = Option(f_)))
def optionalJstype: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[com.google.protobuf.descriptor.FieldOptions.JSType]] = field(_.jstype)((c_, f_) => c_.copy(jstype = f_))
def `lazy`: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Boolean] = field(_.getLazy)((c_, f_) => c_.copy(`lazy` = Option(f_)))
def optionalLazy: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Boolean]] = field(_.`lazy`)((c_, f_) => c_.copy(`lazy` = f_))
def deprecated: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Boolean] = field(_.getDeprecated)((c_, f_) => c_.copy(deprecated = Option(f_)))
def optionalDeprecated: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Boolean]] = field(_.deprecated)((c_, f_) => c_.copy(deprecated = f_))
def weak: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Boolean] = field(_.getWeak)((c_, f_) => c_.copy(weak = Option(f_)))
def optionalWeak: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Option[_root_.scala.Boolean]] = field(_.weak)((c_, f_) => c_.copy(weak = f_))
def uninterpretedOption: _root_.scalapb.lenses.Lens[UpperPB, _root_.scala.Seq[com.google.protobuf.descriptor.UninterpretedOption]] = field(_.uninterpretedOption)((c_, f_) => c_.copy(uninterpretedOption = f_))
}
final val CTYPE_FIELD_NUMBER = 1
final val PACKED_FIELD_NUMBER = 2
final val JSTYPE_FIELD_NUMBER = 6
final val LAZY_FIELD_NUMBER = 5
final val DEPRECATED_FIELD_NUMBER = 3
final val WEAK_FIELD_NUMBER = 10
final val UNINTERPRETED_OPTION_FIELD_NUMBER = 999
def of(
ctype: _root_.scala.Option[com.google.protobuf.descriptor.FieldOptions.CType],
packed: _root_.scala.Option[_root_.scala.Boolean],
jstype: _root_.scala.Option[com.google.protobuf.descriptor.FieldOptions.JSType],
`lazy`: _root_.scala.Option[_root_.scala.Boolean],
deprecated: _root_.scala.Option[_root_.scala.Boolean],
weak: _root_.scala.Option[_root_.scala.Boolean],
uninterpretedOption: _root_.scala.Seq[com.google.protobuf.descriptor.UninterpretedOption],
unknownFields: _root_.scalapb.UnknownFieldSet
): _root_.com.google.protobuf.descriptor.FieldOptions = _root_.com.google.protobuf.descriptor.FieldOptions(
ctype,
packed,
jstype,
`lazy`,
deprecated,
weak,
uninterpretedOption,
unknownFields
)
}
| dotty-staging/ScalaPB | scalapb-runtime/jvm/src/main/scala/com/google/protobuf/descriptor/FieldOptions.scala | Scala | apache-2.0 | 28,022 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.collection
package mutable
import scala.annotation.{implicitNotFound, tailrec}
import scala.annotation.unchecked.uncheckedVariance
import scala.collection.generic.DefaultSerializationProxy
import scala.runtime.Statics
/** This class implements mutable maps using a hashtable with red-black trees in the buckets for good
* worst-case performance on hash collisions. An `Ordering` is required for the element type. Equality
* as determined by the `Ordering` has to be consistent with `equals` and `hashCode`. Universal equality
* of numeric types is not supported (similar to `AnyRefMap`).
*
* @see [[http://docs.scala-lang.org/overviews/collections/concrete-mutable-collection-classes.html#hash-tables "Scala's Collection Library overview"]]
* section on `Hash Tables` for more information.
*
* @define Coll `mutable.CollisionProofHashMap`
* @define coll mutable collision-proof hash map
* @define mayNotTerminateInf
* @define willNotTerminateInf
*/
final class CollisionProofHashMap[K, V](initialCapacity: Int, loadFactor: Double)(implicit ordering: Ordering[K])
extends AbstractMap[K, V]
with MapOps[K, V, Map, CollisionProofHashMap[K, V]] //--
with StrictOptimizedIterableOps[(K, V), Iterable, CollisionProofHashMap[K, V]]
with StrictOptimizedMapOps[K, V, Map, CollisionProofHashMap[K, V]] { //--
private[this] final def sortedMapFactory: SortedMapFactory[CollisionProofHashMap] = CollisionProofHashMap
def this()(implicit ordering: Ordering[K]) = this(CollisionProofHashMap.defaultInitialCapacity, CollisionProofHashMap.defaultLoadFactor)(ordering)
import CollisionProofHashMap.Node
private[this] type RBNode = CollisionProofHashMap.RBNode[K, V]
private[this] type LLNode = CollisionProofHashMap.LLNode[K, V]
/** The actual hash table. */
private[this] var table: Array[Node] = new Array[Node](tableSizeFor(initialCapacity))
/** The next size value at which to resize (capacity * load factor). */
private[this] var threshold: Int = newThreshold(table.length)
private[this] var contentSize = 0
override def size: Int = contentSize
@`inline` private[this] final def computeHash(o: K): Int = {
val h = if(o.asInstanceOf[AnyRef] eq null) 0 else o.hashCode
h ^ (h >>> 16)
}
@`inline` private[this] final def index(hash: Int) = hash & (table.length - 1)
override protected def fromSpecific(coll: IterableOnce[(K, V)] @uncheckedVariance): CollisionProofHashMap[K, V] @uncheckedVariance = CollisionProofHashMap.from(coll)
override protected def newSpecificBuilder: Builder[(K, V), CollisionProofHashMap[K, V]] @uncheckedVariance = CollisionProofHashMap.newBuilder[K, V]
override def empty: CollisionProofHashMap[K, V] = new CollisionProofHashMap[K, V]
override def contains(key: K): Boolean = findNode(key) ne null
def get(key: K): Option[V] = findNode(key) match {
case null => None
case nd => Some(nd match {
case nd: LLNode => nd.value
case nd: RBNode => nd.value
})
}
@throws[NoSuchElementException]
override def apply(key: K): V = findNode(key) match {
case null => default(key)
case nd => nd match {
case nd: LLNode => nd.value
case nd: RBNode => nd.value
}
}
override def getOrElse[V1 >: V](key: K, default: => V1): V1 = {
val nd = findNode(key)
if (nd eq null) default else nd match {
case nd: LLNode => nd.value
case n => n.asInstanceOf[RBNode].value
}
}
@`inline` private[this] def findNode(elem: K): Node = {
val hash = computeHash(elem)
table(index(hash)) match {
case null => null
case n: LLNode => n.getNode(elem, hash)
case n => n.asInstanceOf[RBNode].getNode(elem, hash)
}
}
override def sizeHint(size: Int): Unit = {
val target = tableSizeFor(((size + 1).toDouble / loadFactor).toInt)
if(target > table.length) {
if(size == 0) reallocTable(target)
else growTable(target)
}
}
override def update(key: K, value: V): Unit = put0(key, value, false)
override def put(key: K, value: V): Option[V] = put0(key, value, true) match {
case null => None
case sm => sm
}
def addOne(elem: (K, V)): this.type = { put0(elem._1, elem._2, false); this }
@`inline` private[this] def put0(key: K, value: V, getOld: Boolean): Some[V] = {
if(contentSize + 1 >= threshold) growTable(table.length * 2)
val hash = computeHash(key)
val idx = index(hash)
put0(key, value, getOld, hash, idx)
}
private[this] def put0(key: K, value: V, getOld: Boolean, hash: Int, idx: Int): Some[V] = {
val res = table(idx) match {
case n: RBNode =>
insert(n, idx, key, hash, value)
case _old =>
val old: LLNode = _old.asInstanceOf[LLNode]
if(old eq null) {
table(idx) = new LLNode(key, hash, value, null)
} else {
var remaining = CollisionProofHashMap.treeifyThreshold
var prev: LLNode = null
var n = old
while((n ne null) && n.hash <= hash && remaining > 0) {
if(n.hash == hash && key == n.key) {
val old = n.value
n.value = value
return (if(getOld) Some(old) else null)
}
prev = n
n = n.next
remaining -= 1
}
if(remaining == 0) {
treeify(old, idx)
return put0(key, value, getOld, hash, idx)
}
if(prev eq null) table(idx) = new LLNode(key, hash, value, old)
else prev.next = new LLNode(key, hash, value, prev.next)
}
true
}
if(res) contentSize += 1
if(res) Some(null.asInstanceOf[V]) else null //TODO
}
private[this] def treeify(old: LLNode, idx: Int): Unit = {
table(idx) = CollisionProofHashMap.leaf(old.key, old.hash, old.value, red = false, null)
var n: LLNode = old.next
while(n ne null) {
val root = table(idx).asInstanceOf[RBNode]
insertIntoExisting(root, idx, n.key, n.hash, n.value, root)
n = n.next
}
}
override def addAll(xs: IterableOnce[(K, V)]): this.type = {
val k = xs.knownSize
if(k > 0) sizeHint(contentSize + k)
super.addAll(xs)
}
// returns the old value or Statics.pfMarker if not found
private[this] def remove0(elem: K) : Any = {
val hash = computeHash(elem)
var idx = index(hash)
table(idx) match {
case null => Statics.pfMarker
case t: RBNode =>
val v = delete(t, idx, elem, hash)
if(v.asInstanceOf[AnyRef] ne Statics.pfMarker) contentSize -= 1
v
case nd: LLNode if nd.hash == hash && nd.key == elem =>
// first element matches
table(idx) = nd.next
contentSize -= 1
nd.value
case nd: LLNode =>
// find an element that matches
var prev = nd
var next = nd.next
while((next ne null) && next.hash <= hash) {
if(next.hash == hash && next.key == elem) {
prev.next = next.next
contentSize -= 1
return next.value
}
prev = next
next = next.next
}
Statics.pfMarker
}
}
private[this] abstract class MapIterator[R] extends AbstractIterator[R] {
protected[this] def extract(node: LLNode): R
protected[this] def extract(node: RBNode): R
private[this] var i = 0
private[this] var node: Node = null
private[this] val len = table.length
def hasNext: Boolean = {
if(node ne null) true
else {
while(i < len) {
val n = table(i)
i += 1
n match {
case null =>
case n: RBNode =>
node = CollisionProofHashMap.minNodeNonNull(n)
return true
case n: LLNode =>
node = n
return true
}
}
false
}
}
def next(): R =
if(!hasNext) Iterator.empty.next()
else node match {
case n: RBNode =>
val r = extract(n)
node = CollisionProofHashMap.successor(n )
r
case n: LLNode =>
val r = extract(n)
node = n.next
r
}
}
override def keysIterator: Iterator[K] = {
if (isEmpty) Iterator.empty
else new MapIterator[K] {
protected[this] def extract(node: LLNode) = node.key
protected[this] def extract(node: RBNode) = node.key
}
}
override def iterator: Iterator[(K, V)] = {
if (isEmpty) Iterator.empty
else new MapIterator[(K, V)] {
protected[this] def extract(node: LLNode) = (node.key, node.value)
protected[this] def extract(node: RBNode) = (node.key, node.value)
}
}
private[this] def growTable(newlen: Int) = {
var oldlen = table.length
table = java.util.Arrays.copyOf(table, newlen)
threshold = newThreshold(table.length)
while(oldlen < newlen) {
var i = 0
while (i < oldlen) {
val old = table(i)
if(old ne null) splitBucket(old, i, i + oldlen, oldlen)
i += 1
}
oldlen *= 2
}
}
@`inline` private[this] def reallocTable(newlen: Int) = {
table = new Array(newlen)
threshold = newThreshold(table.length)
}
@`inline` private[this] def splitBucket(tree: Node, lowBucket: Int, highBucket: Int, mask: Int): Unit = tree match {
case t: LLNode => splitBucket(t, lowBucket, highBucket, mask)
case t: RBNode => splitBucket(t, lowBucket, highBucket, mask)
}
private[this] def splitBucket(list: LLNode, lowBucket: Int, highBucket: Int, mask: Int): Unit = {
val preLow: LLNode = new LLNode(null.asInstanceOf[K], 0, null.asInstanceOf[V], null)
val preHigh: LLNode = new LLNode(null.asInstanceOf[K], 0, null.asInstanceOf[V], null)
//preLow.next = null
//preHigh.next = null
var lastLow: LLNode = preLow
var lastHigh: LLNode = preHigh
var n = list
while(n ne null) {
val next = n.next
if((n.hash & mask) == 0) { // keep low
lastLow.next = n
lastLow = n
} else { // move to high
lastHigh.next = n
lastHigh = n
}
n = next
}
lastLow.next = null
if(list ne preLow.next) table(lowBucket) = preLow.next
if(preHigh.next ne null) {
table(highBucket) = preHigh.next
lastHigh.next = null
}
}
private[this] def splitBucket(tree: RBNode, lowBucket: Int, highBucket: Int, mask: Int): Unit = {
var lowCount, highCount = 0
tree.foreachNode((n: RBNode) => if((n.hash & mask) != 0) highCount += 1 else lowCount += 1)
if(highCount != 0) {
if(lowCount == 0) {
table(lowBucket) = null
table(highBucket) = tree
} else {
table(lowBucket) = fromNodes(new CollisionProofHashMap.RBNodesIterator(tree).filter(n => (n.hash & mask) == 0), lowCount)
table(highBucket) = fromNodes(new CollisionProofHashMap.RBNodesIterator(tree).filter(n => (n.hash & mask) != 0), highCount)
}
}
}
private[this] def tableSizeFor(capacity: Int) =
(Integer.highestOneBit((capacity-1).max(4))*2).min(1 << 30)
private[this] def newThreshold(size: Int) = (size.toDouble * loadFactor).toInt
override def clear(): Unit = {
java.util.Arrays.fill(table.asInstanceOf[Array[AnyRef]], null)
contentSize = 0
}
override def remove(key: K): Option[V] = {
val v = remove0(key)
if(v.asInstanceOf[AnyRef] eq Statics.pfMarker) None else Some(v.asInstanceOf[V])
}
def subtractOne(elem: K): this.type = { remove0(elem); this }
override def knownSize: Int = size
override def isEmpty: Boolean = size == 0
override def foreach[U](f: ((K, V)) => U): Unit = {
val len = table.length
var i = 0
while(i < len) {
val n = table(i)
if(n ne null) n match {
case n: LLNode => n.foreach(f)
case n: RBNode => n.foreach(f)
}
i += 1
}
}
override def foreachEntry[U](f: (K, V) => U): Unit = {
val len = table.length
var i = 0
while(i < len) {
val n = table(i)
if(n ne null) n match {
case n: LLNode => n.foreachEntry(f)
case n: RBNode => n.foreachEntry(f)
}
i += 1
}
}
protected[this] def writeReplace(): AnyRef = new DefaultSerializationProxy(new CollisionProofHashMap.DeserializationFactory[K, V](table.length, loadFactor, ordering), this)
override protected[this] def className = "CollisionProofHashMap"
override def getOrElseUpdate(key: K, defaultValue: => V): V = {
val hash = computeHash(key)
val idx = index(hash)
table(idx) match {
case null => ()
case n: LLNode =>
val nd = n.getNode(key, hash)
if(nd != null) return nd.value
case n =>
val nd = n.asInstanceOf[RBNode].getNode(key, hash)
if(nd != null) return nd.value
}
val table0 = table
val default = defaultValue
if(contentSize + 1 >= threshold) growTable(table.length * 2)
// Avoid recomputing index if the `defaultValue()` or new element hasn't triggered a table resize.
val newIdx = if (table0 eq table) idx else index(hash)
put0(key, default, false, hash, newIdx)
default
}
///////////////////// Overrides code from SortedMapOps
/** Builds a new `CollisionProofHashMap` by applying a function to all elements of this $coll.
*
* @param f the function to apply to each element.
* @return a new $coll resulting from applying the given function
* `f` to each element of this $coll and collecting the results.
*/
def map[K2, V2](f: ((K, V)) => (K2, V2))
(implicit @implicitNotFound(CollisionProofHashMap.ordMsg) ordering: Ordering[K2]): CollisionProofHashMap[K2, V2] =
sortedMapFactory.from(new View.Map[(K, V), (K2, V2)](toIterable, f))
/** Builds a new `CollisionProofHashMap` by applying a function to all elements of this $coll
* and using the elements of the resulting collections.
*
* @param f the function to apply to each element.
* @return a new $coll resulting from applying the given collection-valued function
* `f` to each element of this $coll and concatenating the results.
*/
def flatMap[K2, V2](f: ((K, V)) => IterableOnce[(K2, V2)])
(implicit @implicitNotFound(CollisionProofHashMap.ordMsg) ordering: Ordering[K2]): CollisionProofHashMap[K2, V2] =
sortedMapFactory.from(new View.FlatMap(toIterable, f))
/** Builds a new sorted map by applying a partial function to all elements of this $coll
* on which the function is defined.
*
* @param pf the partial function which filters and maps the $coll.
* @return a new $coll resulting from applying the given partial function
* `pf` to each element on which it is defined and collecting the results.
* The order of the elements is preserved.
*/
def collect[K2, V2](pf: PartialFunction[(K, V), (K2, V2)])
(implicit @implicitNotFound(CollisionProofHashMap.ordMsg) ordering: Ordering[K2]): CollisionProofHashMap[K2, V2] =
sortedMapFactory.from(new View.Collect(toIterable, pf))
override def concat[V2 >: V](suffix: IterableOnce[(K, V2)]): CollisionProofHashMap[K, V2] = sortedMapFactory.from(suffix match {
case it: Iterable[(K, V2)] => new View.Concat(toIterable, it)
case _ => iterator.concat(suffix.iterator)
})
/** Alias for `concat` */
@`inline` override final def ++ [V2 >: V](xs: IterableOnce[(K, V2)]): CollisionProofHashMap[K, V2] = concat(xs)
@deprecated("Consider requiring an immutable Map or fall back to Map.concat", "2.13.0")
override def + [V1 >: V](kv: (K, V1)): CollisionProofHashMap[K, V1] =
sortedMapFactory.from(new View.Appended(toIterable, kv))
@deprecated("Use ++ with an explicit collection argument instead of + with varargs", "2.13.0")
override def + [V1 >: V](elem1: (K, V1), elem2: (K, V1), elems: (K, V1)*): CollisionProofHashMap[K, V1] =
sortedMapFactory.from(new View.Concat(new View.Appended(new View.Appended(toIterable, elem1), elem2), elems))
///////////////////// RedBlackTree code derived from mutable.RedBlackTree:
@`inline` private[this] def isRed(node: RBNode) = (node ne null) && node.red
@`inline` private[this] def isBlack(node: RBNode) = (node eq null) || !node.red
@`inline` private[this] def compare(key: K, hash: Int, node: LLNode): Int = {
val i = hash - node.hash
if(i != 0) i else ordering.compare(key, node.key)
}
@`inline` private[this] def compare(key: K, hash: Int, node: RBNode): Int = {
/*val i = hash - node.hash
if(i != 0) i else*/ ordering.compare(key, node.key)
}
// ---- insertion ----
@tailrec private[this] final def insertIntoExisting(_root: RBNode, bucket: Int, key: K, hash: Int, value: V, x: RBNode): Boolean = {
val cmp = compare(key, hash, x)
if(cmp == 0) {
x.value = value
false
} else {
val next = if(cmp < 0) x.left else x.right
if(next eq null) {
val z = CollisionProofHashMap.leaf(key, hash, value, red = true, x)
if (cmp < 0) x.left = z else x.right = z
table(bucket) = fixAfterInsert(_root, z)
return true
}
else insertIntoExisting(_root, bucket, key, hash, value, next)
}
}
private[this] final def insert(tree: RBNode, bucket: Int, key: K, hash: Int, value: V): Boolean = {
if(tree eq null) {
table(bucket) = CollisionProofHashMap.leaf(key, hash, value, red = false, null)
true
} else insertIntoExisting(tree, bucket, key, hash, value, tree)
}
private[this] def fixAfterInsert(_root: RBNode, node: RBNode): RBNode = {
var root = _root
var z = node
while (isRed(z.parent)) {
if (z.parent eq z.parent.parent.left) {
val y = z.parent.parent.right
if (isRed(y)) {
z.parent.red = false
y.red = false
z.parent.parent.red = true
z = z.parent.parent
} else {
if (z eq z.parent.right) {
z = z.parent
root = rotateLeft(root, z)
}
z.parent.red = false
z.parent.parent.red = true
root = rotateRight(root, z.parent.parent)
}
} else { // symmetric cases
val y = z.parent.parent.left
if (isRed(y)) {
z.parent.red = false
y.red = false
z.parent.parent.red = true
z = z.parent.parent
} else {
if (z eq z.parent.left) {
z = z.parent
root = rotateRight(root, z)
}
z.parent.red = false
z.parent.parent.red = true
root = rotateLeft(root, z.parent.parent)
}
}
}
root.red = false
root
}
// ---- deletion ----
// returns the old value or Statics.pfMarker if not found
private[this] def delete(_root: RBNode, bucket: Int, key: K, hash: Int): Any = {
var root = _root
val z = root.getNode(key, hash: Int)
if (z ne null) {
val oldValue = z.value
var y = z
var yIsRed = y.red
var x: RBNode = null
var xParent: RBNode = null
if (z.left eq null) {
x = z.right
root = transplant(root, z, z.right)
xParent = z.parent
}
else if (z.right eq null) {
x = z.left
root = transplant(root, z, z.left)
xParent = z.parent
}
else {
y = CollisionProofHashMap.minNodeNonNull(z.right)
yIsRed = y.red
x = y.right
if (y.parent eq z) xParent = y
else {
xParent = y.parent
root = transplant(root, y, y.right)
y.right = z.right
y.right.parent = y
}
root = transplant(root, z, y)
y.left = z.left
y.left.parent = y
y.red = z.red
}
if (!yIsRed) root = fixAfterDelete(root, x, xParent)
if(root ne _root) table(bucket) = root
oldValue
} else Statics.pfMarker
}
private[this] def fixAfterDelete(_root: RBNode, node: RBNode, parent: RBNode): RBNode = {
var root = _root
var x = node
var xParent = parent
while ((x ne root) && isBlack(x)) {
if (x eq xParent.left) {
var w = xParent.right
// assert(w ne null)
if (w.red) {
w.red = false
xParent.red = true
root = rotateLeft(root, xParent)
w = xParent.right
}
if (isBlack(w.left) && isBlack(w.right)) {
w.red = true
x = xParent
} else {
if (isBlack(w.right)) {
w.left.red = false
w.red = true
root = rotateRight(root, w)
w = xParent.right
}
w.red = xParent.red
xParent.red = false
w.right.red = false
root = rotateLeft(root, xParent)
x = root
}
} else { // symmetric cases
var w = xParent.left
// assert(w ne null)
if (w.red) {
w.red = false
xParent.red = true
root = rotateRight(root, xParent)
w = xParent.left
}
if (isBlack(w.right) && isBlack(w.left)) {
w.red = true
x = xParent
} else {
if (isBlack(w.left)) {
w.right.red = false
w.red = true
root = rotateLeft(root, w)
w = xParent.left
}
w.red = xParent.red
xParent.red = false
w.left.red = false
root = rotateRight(root, xParent)
x = root
}
}
xParent = x.parent
}
if (x ne null) x.red = false
root
}
// ---- helpers ----
@`inline` private[this] def rotateLeft(_root: RBNode, x: RBNode): RBNode = {
var root = _root
val y = x.right
x.right = y.left
val xp = x.parent
if (y.left ne null) y.left.parent = x
y.parent = xp
if (xp eq null) root = y
else if (x eq xp.left) xp.left = y
else xp.right = y
y.left = x
x.parent = y
root
}
@`inline` private[this] def rotateRight(_root: RBNode, x: RBNode): RBNode = {
var root = _root
val y = x.left
x.left = y.right
val xp = x.parent
if (y.right ne null) y.right.parent = x
y.parent = xp
if (xp eq null) root = y
else if (x eq xp.right) xp.right = y
else xp.left = y
y.right = x
x.parent = y
root
}
/**
* Transplant the node `from` to the place of node `to`. This is done by setting `from` as a child of `to`'s previous
* parent and setting `from`'s parent to the `to`'s previous parent. The children of `from` are left unchanged.
*/
private[this] def transplant(_root: RBNode, to: RBNode, from: RBNode): RBNode = {
var root = _root
if (to.parent eq null) root = from
else if (to eq to.parent.left) to.parent.left = from
else to.parent.right = from
if (from ne null) from.parent = to.parent
root
}
// building
def fromNodes(xs: Iterator[Node], size: Int): RBNode = {
val maxUsedDepth = 32 - Integer.numberOfLeadingZeros(size) // maximum depth of non-leaf nodes
def f(level: Int, size: Int): RBNode = size match {
case 0 => null
case 1 =>
val nn = xs.next()
val (key, hash, value) = nn match {
case nn: LLNode => (nn.key, nn.hash, nn.value)
case nn: RBNode => (nn.key, nn.hash, nn.value)
}
new RBNode(key, hash, value, level == maxUsedDepth && level != 1, null, null, null)
case n =>
val leftSize = (size-1)/2
val left = f(level+1, leftSize)
val nn = xs.next()
val right = f(level+1, size-1-leftSize)
val (key, hash, value) = nn match {
case nn: LLNode => (nn.key, nn.hash, nn.value)
case nn: RBNode => (nn.key, nn.hash, nn.value)
}
val n = new RBNode(key, hash, value, false, left, right, null)
if(left ne null) left.parent = n
right.parent = n
n
}
f(1, size)
}
}
/**
* $factoryInfo
* @define Coll `mutable.CollisionProofHashMap`
* @define coll mutable collision-proof hash map
*/
@SerialVersionUID(3L)
object CollisionProofHashMap extends SortedMapFactory[CollisionProofHashMap] {
private[collection] final val ordMsg = "No implicit Ordering[${K2}] found to build a CollisionProofHashMap[${K2}, ${V2}]. You may want to upcast to a Map[${K}, ${V}] first by calling `unsorted`."
def from[K : Ordering, V](it: scala.collection.IterableOnce[(K, V)]): CollisionProofHashMap[K, V] = {
val k = it.knownSize
val cap = if(k > 0) ((k + 1).toDouble / defaultLoadFactor).toInt else defaultInitialCapacity
new CollisionProofHashMap[K, V](cap, defaultLoadFactor) ++= it
}
def empty[K : Ordering, V]: CollisionProofHashMap[K, V] = new CollisionProofHashMap[K, V]
def newBuilder[K : Ordering, V]: Builder[(K, V), CollisionProofHashMap[K, V]] = newBuilder(defaultInitialCapacity, defaultLoadFactor)
def newBuilder[K : Ordering, V](initialCapacity: Int, loadFactor: Double): Builder[(K, V), CollisionProofHashMap[K, V]] =
new GrowableBuilder[(K, V), CollisionProofHashMap[K, V]](new CollisionProofHashMap[K, V](initialCapacity, loadFactor)) {
override def sizeHint(size: Int) = elems.sizeHint(size)
}
/** The default load factor for the hash table */
final def defaultLoadFactor: Double = 0.75
/** The default initial capacity for the hash table */
final def defaultInitialCapacity: Int = 16
@SerialVersionUID(3L)
private final class DeserializationFactory[K, V](val tableLength: Int, val loadFactor: Double, val ordering: Ordering[K]) extends Factory[(K, V), CollisionProofHashMap[K, V]] with Serializable {
def fromSpecific(it: IterableOnce[(K, V)]): CollisionProofHashMap[K, V] = new CollisionProofHashMap[K, V](tableLength, loadFactor)(ordering) ++= it
def newBuilder: Builder[(K, V), CollisionProofHashMap[K, V]] = CollisionProofHashMap.newBuilder(tableLength, loadFactor)(ordering)
}
@`inline` private def compare[K, V](key: K, hash: Int, node: LLNode[K, V])(implicit ord: Ordering[K]): Int = {
val i = hash - node.hash
if(i != 0) i else ord.compare(key, node.key)
}
@`inline` private def compare[K, V](key: K, hash: Int, node: RBNode[K, V])(implicit ord: Ordering[K]): Int = {
/*val i = hash - node.hash
if(i != 0) i else*/ ord.compare(key, node.key)
}
private final val treeifyThreshold = 8
// Superclass for RBNode and LLNode to help the JIT with optimizing instance checks, but no shared common fields.
// Keeping calls monomorphic where possible and dispatching manually where needed is faster.
sealed abstract class Node
/////////////////////////// Red-Black Tree Node
final class RBNode[K, V](var key: K, var hash: Int, var value: V, var red: Boolean, var left: RBNode[K, V], var right: RBNode[K, V], var parent: RBNode[K, V]) extends Node {
override def toString: String = "RBNode(" + key + ", " + hash + ", " + value + ", " + red + ", " + left + ", " + right + ")"
@tailrec def getNode(k: K, h: Int)(implicit ord: Ordering[K]): RBNode[K, V] = {
val cmp = compare(k, h, this)
if (cmp < 0) {
if(left ne null) left.getNode(k, h) else null
} else if (cmp > 0) {
if(right ne null) right.getNode(k, h) else null
} else this
}
def foreach[U](f: ((K, V)) => U): Unit = {
if(left ne null) left.foreach(f)
f((key, value))
if(right ne null) right.foreach(f)
}
def foreachEntry[U](f: (K, V) => U): Unit = {
if(left ne null) left.foreachEntry(f)
f(key, value)
if(right ne null) right.foreachEntry(f)
}
def foreachNode[U](f: RBNode[K, V] => U): Unit = {
if(left ne null) left.foreachNode(f)
f(this)
if(right ne null) right.foreachNode(f)
}
}
@`inline` private def leaf[A, B](key: A, hash: Int, value: B, red: Boolean, parent: RBNode[A, B]): RBNode[A, B] =
new RBNode(key, hash, value, red, null, null, parent)
@tailrec private def minNodeNonNull[A, B](node: RBNode[A, B]): RBNode[A, B] =
if (node.left eq null) node else minNodeNonNull(node.left)
/**
* Returns the node that follows `node` in an in-order tree traversal. If `node` has the maximum key (and is,
* therefore, the last node), this method returns `null`.
*/
private def successor[A, B](node: RBNode[A, B]): RBNode[A, B] = {
if (node.right ne null) minNodeNonNull(node.right)
else {
var x = node
var y = x.parent
while ((y ne null) && (x eq y.right)) {
x = y
y = y.parent
}
y
}
}
private final class RBNodesIterator[A, B](tree: RBNode[A, B])(implicit ord: Ordering[A]) extends AbstractIterator[RBNode[A, B]] {
private[this] var nextNode: RBNode[A, B] = if(tree eq null) null else minNodeNonNull(tree)
def hasNext: Boolean = nextNode ne null
@throws[NoSuchElementException]
def next(): RBNode[A, B] = nextNode match {
case null => Iterator.empty.next()
case node =>
nextNode = successor(node)
node
}
}
/////////////////////////// Linked List Node
private final class LLNode[K, V](var key: K, var hash: Int, var value: V, var next: LLNode[K, V]) extends Node {
override def toString = s"LLNode($key, $value, $hash) -> $next"
private[this] def eq(a: Any, b: Any): Boolean =
if(a.asInstanceOf[AnyRef] eq null) b.asInstanceOf[AnyRef] eq null else a.asInstanceOf[AnyRef].equals(b)
@tailrec def getNode(k: K, h: Int)(implicit ord: Ordering[K]): LLNode[K, V] = {
if(h == hash && eq(k, key) /*ord.compare(k, key) == 0*/) this
else if((next eq null) || (hash > h)) null
else next.getNode(k, h)
}
@tailrec def foreach[U](f: ((K, V)) => U): Unit = {
f((key, value))
if(next ne null) next.foreach(f)
}
@tailrec def foreachEntry[U](f: (K, V) => U): Unit = {
f(key, value)
if(next ne null) next.foreachEntry(f)
}
@tailrec def foreachNode[U](f: LLNode[K, V] => U): Unit = {
f(this)
if(next ne null) next.foreachNode(f)
}
}
}
| martijnhoekstra/scala | src/library/scala/collection/mutable/CollisionProofHashMap.scala | Scala | apache-2.0 | 30,330 |
package monocle.bench
import java.util.concurrent.TimeUnit
import monocle.Lens
import monocle.bench.BenchModel._
import monocle.bench.input.Nested0Input
import org.openjdk.jmh.annotations._
import cats.instances.option._
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.MICROSECONDS)
@State(Scope.Benchmark)
class MonocleLensBench extends LensBench {
val _n1 = Lens[Nested0, Nested1](_.n)(n2 => n1 => n1.copy(n = n2))
val _n2 = Lens[Nested1, Nested2](_.n)(n3 => n2 => n2.copy(n = n3))
val _n3 = Lens[Nested2, Nested3](_.n)(n4 => n3 => n3.copy(n = n4))
val _n4 = Lens[Nested3, Nested4](_.n)(n5 => n4 => n4.copy(n = n5))
val _n5 = Lens[Nested4, Nested5](_.n)(n6 => n5 => n5.copy(n = n6))
val _n6 = Lens[Nested5, Nested6](_.n)(n7 => n6 => n6.copy(n = n7))
val _n0_i = Lens[Nested0, Int](_.i)(i => n => n.copy(i = i))
val _n3_i = Lens[Nested3, Int](_.i)(i => n => n.copy(i = i))
val _n6_i = Lens[Nested6, Int](_.i)(i => n => n.copy(i = i))
val _n0Ton3I = _n1 andThen _n2 andThen _n3 andThen _n3_i
val _n0Ton6I = _n1 andThen _n2 andThen _n3 andThen _n4 andThen _n5 andThen _n6 andThen _n6_i
@Benchmark def lensGet0(in: Nested0Input) = _n0_i.get(in.n0)
@Benchmark def lensGet3(in: Nested0Input) = _n0Ton3I.get(in.n0)
@Benchmark def lensGet6(in: Nested0Input) = _n0Ton6I.get(in.n0)
@Benchmark def lensSet0(in: Nested0Input) = _n0_i.replace(43)(in.n0)
@Benchmark def lensSet3(in: Nested0Input) = _n0Ton3I.replace(43)(in.n0)
@Benchmark def lensSet6(in: Nested0Input) = _n0Ton6I.replace(43)(in.n0)
@Benchmark def lensModify0(in: Nested0Input) = _n0_i.modify(_ + 1)(in.n0)
@Benchmark def lensModify3(in: Nested0Input) = _n0Ton3I.modify(_ + 1)(in.n0)
@Benchmark def lensModify6(in: Nested0Input) = _n0Ton6I.modify(_ + 1)(in.n0)
@Benchmark def lensModifyF0(in: Nested0Input) = _n0_i.modifyF(safeDivide(_, 2))(in.n0)
@Benchmark def lensModifyF3(in: Nested0Input) = _n0Ton3I.modifyF(safeDivide(_, 2))(in.n0)
@Benchmark def lensModifyF6(in: Nested0Input) = _n0Ton6I.modifyF(safeDivide(_, 2))(in.n0)
}
| julien-truffaut/Monocle | bench/src/main/scala/monocle/bench/MonocleLensBench.scala | Scala | mit | 2,080 |
/*
# Licensed Materials - Property of IBM
# Copyright IBM Corp. 2015
*/
package com.ibm.streamsx.topology.test.scala
import com.ibm.streamsx.topology.Topology
import com.ibm.streamsx.topology.TStream
import com.ibm.streamsx.topology.streams.StringStreams
import com.ibm.streamsx.topology.test.TestTopology
import org.junit.Test;
import scala.collection.JavaConversions._
import com.ibm.streamsx.topology.functions.FunctionConversions._
class ScalaWindowAPITest extends TestTopology {
/**
* Test that an aggregate can be written.
*/
@Test
def testScalaObjectsAggregate() {
val topology = new Topology("EmmaCharactersAge")
val emma = List(new Person("Emma", 20), new Person("George", 37), new Person("Harriet", 17), new Person("Jane", 20))
var peopleStream = topology.constants(emma).asType(classOf[Person])
var peopleWindow = peopleStream.last(3);
// Have to use explicit Java list!
var oldestPerson = peopleWindow.aggregate((people: java.util.List[Person]) => {
var oldest: Person = new Person("", -1)
people.foreach { person =>
if (person.age > oldest.age)
oldest = person
}
oldest
})
var strings = StringStreams.toString(oldestPerson)
completeAndValidate(strings, 10, "Emma is 20", "George is 37", "George is 37", "George is 37");
}
}
| IBMStreams/streamsx.topology | test/scala/src/com/ibm/streamsx/topology/test/scala/ScalaWindowTest.scala | Scala | apache-2.0 | 1,345 |
/*
* Copyright 2014 JHC Systems Limited
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package sqlest.ast
import org.scalatest._
import org.scalatest.matchers._
import sqlest._
class SelectSpec extends FlatSpec with Matchers {
class MyTable(alias: Option[String]) extends Table("mytable", alias) {
val col1 = column[Int]("col1")
val col2 = column[Int]("col2")
}
object MyTable extends MyTable(None)
"simplest select possible" should "produce the right sql" in {
val query = select.from(MyTable)
query.columns should equal(List(MyTable.col1, MyTable.col2))
query.from should equal(MyTable)
query.where should equal(None)
query.order should equal(Nil)
query.limit should equal(None)
query.offset should equal(None)
}
"select with explicit column list" should "produce the right sql" in {
val query = select(MyTable.col1).from(MyTable)
query.columns should equal(List(MyTable.col1))
query.from should equal(MyTable)
query.where should equal(None)
query.order should equal(Nil)
query.limit should equal(None)
query.offset should equal(None)
}
"repeated calls to select.where()" should "append new filters" in {
val query = select.from(MyTable).where(MyTable.col1 > 1).where(MyTable.col1 < 2)
query.where should equal(Some(MyTable.col1 > 1 && MyTable.col1 < 2))
}
"repeated calls to select.order()" should "append new orders" in {
val query = select.from(MyTable)
query.columns should equal(List(MyTable.col1, MyTable.col2))
}
"repeated calls to select.page()" should "override the old values" in {
val query = select.from(MyTable).page(1, 10).page(2, 20)
query.limit should equal(Some(20))
query.offset should equal(Some(40))
}
} | andrewjskatz/sqlest | src/test/scala/sqlest/ast/SelectSpec.scala | Scala | apache-2.0 | 2,269 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.analytic
import com.typesafe.scalalogging.LazyLogging
import org.geotools.data.Query
import org.geotools.data.collection.ListFeatureCollection
import org.geotools.data.simple.{SimpleFeatureCollection, SimpleFeatureSource}
import org.geotools.feature.simple.{SimpleFeatureBuilder, SimpleFeatureTypeBuilder}
import org.geotools.feature.visitor.{AbstractCalcResult, CalcResult}
import org.geotools.process.factory.{DescribeParameter, DescribeProcess, DescribeResult}
import org.locationtech.geomesa.filter.factory.FastFilterFactory
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.index.geotools.GeoMesaFeatureCollection
import org.locationtech.geomesa.index.index.attribute.AttributeIndex
import org.locationtech.geomesa.index.iterators.StatsScan
import org.locationtech.geomesa.index.process.GeoMesaProcessVisitor
import org.locationtech.geomesa.process.GeoMesaProcess
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.locationtech.geomesa.utils.geotools.RichAttributeDescriptors.RichAttributeDescriptor
import org.locationtech.geomesa.utils.stats.{EnumerationStat, Stat}
import org.opengis.feature.Feature
import org.opengis.feature.`type`.AttributeDescriptor
import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType}
import org.opengis.filter.Filter
import org.opengis.util.ProgressListener
import scala.collection.JavaConverters._
import scala.collection.mutable
@DescribeProcess(title = "Geomesa Unique",
description = "Finds unique attributes values, optimized for GeoMesa")
class UniqueProcess extends GeoMesaProcess with LazyLogging {
@DescribeResult(name = "result",
description = "Feature collection with an attribute containing the unique values")
def execute(
@DescribeParameter(name = "features", description = "Input feature collection")
features: SimpleFeatureCollection,
@DescribeParameter(name = "attribute", description = "Attribute whose unique values are extracted")
attribute: String,
@DescribeParameter(name = "filter", min = 0, description = "The filter to apply to the feature collection")
filter: Filter,
@DescribeParameter(name = "histogram", min = 0, description = "Create a histogram of attribute values")
histogram: java.lang.Boolean,
@DescribeParameter(name = "sort", min = 0, description = "Sort results - allowed to be ASC or DESC")
sort: String,
@DescribeParameter(name = "sortByCount", min = 0, description = "Sort by histogram counts instead of attribute values")
sortByCount: java.lang.Boolean,
progressListener: ProgressListener): SimpleFeatureCollection = {
val attributeDescriptor = features
.getSchema
.getAttributeDescriptors
.asScala
.find(_.getLocalName == attribute)
.getOrElse(throw new IllegalArgumentException(s"Attribute $attribute does not exist in feature schema."))
val hist = Option(histogram).exists(_.booleanValue)
val sortBy = Option(sortByCount).exists(_.booleanValue)
val visitor = new AttributeVisitor(features, attributeDescriptor, Option(filter).filter(_ != Filter.INCLUDE), hist)
GeoMesaFeatureCollection.visit(features, visitor, progressListener)
val uniqueValues = visitor.getResult.attributes
val binding = attributeDescriptor.getType.getBinding
UniqueProcess.createReturnCollection(uniqueValues, binding, hist, Option(sort), sortBy)
}
}
object UniqueProcess {
val SftName = "UniqueValue"
val AttributeValue = "value"
val AttributeCount = "count"
/**
* Duplicates output format from geotools UniqueProcess
*
* @param uniqueValues values
* @param binding value binding
* @param histogram include counts or just values
* @param sort sort
* @param sortByCount sort by count or by value
* @return
*/
def createReturnCollection(uniqueValues: Map[Any, Long],
binding: Class[_],
histogram: Boolean,
sort: Option[String],
sortByCount: Boolean): SimpleFeatureCollection = {
val ft = createUniqueSft(binding, histogram)
val sfb = new SimpleFeatureBuilder(ft)
val result = new ListFeatureCollection(ft)
// if sorting was requested do it here, otherwise return results in iterator order
val sorted = sort.map { s =>
if (sortByCount) {
val ordering = if (s.equalsIgnoreCase("desc")) Ordering[Long].reverse else Ordering[Long]
uniqueValues.iterator.toList.sortBy(_._2)(ordering)
} else {
val ordering = if (s.equalsIgnoreCase("desc")) Ordering[String].reverse else Ordering[String]
uniqueValues.iterator.toList.sortBy(_._1.toString)(ordering)
}
}.getOrElse(uniqueValues.iterator)
// histogram includes extra 'count' attribute
val addFn = if (histogram) (key: Any, value: Long) => {
sfb.add(key)
sfb.add(value)
result.add(sfb.buildFeature(null))
} else (key: Any, _: Long) => {
sfb.add(key)
result.add(sfb.buildFeature(null))
}
sorted.foreach { case (key, value) => addFn(key, value) }
result
}
/**
* Based on geotools UniqueProcess simple feature type
*
* @param binding class of attribute
* @param histogram return counts or not
* @return
*/
def createUniqueSft(binding: Class[_], histogram: Boolean): SimpleFeatureType = {
val sftb = new SimpleFeatureTypeBuilder
sftb.add(AttributeValue, binding)
if (histogram) {
// histogram includes extra 'count' attribute
sftb.add(AttributeCount, classOf[java.lang.Long])
}
sftb.setName(SftName)
sftb.buildFeatureType
}
}
/**
* Visitor that tracks unique attribute values and counts
*
* @param features features to evaluate
* @param attributeDescriptor attribute to evaluate
* @param filter optional filter to apply to features before evaluating
* @param histogram return counts or not
*/
class AttributeVisitor(val features: SimpleFeatureCollection,
val attributeDescriptor: AttributeDescriptor,
val filter: Option[Filter],
histogram: Boolean) extends GeoMesaProcessVisitor with LazyLogging {
import scala.collection.JavaConversions._
private val attribute = attributeDescriptor.getLocalName
private val uniqueValues = mutable.Map.empty[Any, Long].withDefaultValue(0)
private var attributeIdx: Int = -1
// normally handled in our query planner, but we are going to use the filter directly here
private lazy val manualFilter = filter.map(FastFilterFactory.optimize(features.getSchema, _))
private def getAttribute[T](f: SimpleFeature): T = {
if (attributeIdx == -1) {
attributeIdx = f.getType.indexOf(attribute)
}
f.getAttribute(attributeIdx).asInstanceOf[T]
}
private def addSingularValue(f: SimpleFeature): Unit = {
val value = getAttribute[AnyRef](f)
if (value != null) {
uniqueValues(value) += 1
}
}
private def addMultiValue(f: SimpleFeature): Unit = {
val values = getAttribute[java.util.Collection[_]](f)
if (values != null) {
values.foreach(uniqueValues(_) += 1)
}
}
private val addValue: SimpleFeature => Unit =
if (attributeDescriptor.isList) { addMultiValue } else { addSingularValue }
// non-optimized visit
override def visit(feature: Feature): Unit = {
val f = feature.asInstanceOf[SimpleFeature]
if (manualFilter.forall(_.evaluate(f))) {
addValue(f)
}
}
override def getResult: AttributeResult = new AttributeResult(uniqueValues.toMap)
override def execute(source: SimpleFeatureSource, query: Query): Unit = {
import org.locationtech.geomesa.filter.mergeFilters
logger.debug(s"Running Geomesa histogram process on source type ${source.getClass.getName}")
// combine filters from this process and any input collection
filter.foreach(f => query.setFilter(mergeFilters(query.getFilter, f)))
val sft = source.getSchema
val enumerated = if (attributeDescriptor.isMultiValued) {
// stats don't support list types
uniqueV5(source, query)
} else {
// TODO if !histogram, we could write a new unique skipping iterator
query.getHints.put(QueryHints.STATS_STRING, Stat.Enumeration(attribute))
query.getHints.put(QueryHints.ENCODE_STATS, java.lang.Boolean.TRUE)
// execute the query
val reader = source.getFeatures(query).features()
val enumeration = try {
// stats should always return exactly one result, even if there are no features in the table
val encoded = reader.next.getAttribute(0).asInstanceOf[String]
StatsScan.decodeStat(sft)(encoded).asInstanceOf[EnumerationStat[Any]]
} finally {
reader.close()
}
enumeration.frequencies
}
uniqueValues.clear()
enumerated.foreach { case (k, v) => uniqueValues.put(k, v) }
}
private def uniqueV5(source: SimpleFeatureSource, query: Query): Iterable[(Any, Long)] = {
// only return the attribute we are interested in to reduce bandwidth
query.setPropertyNames(Seq(attribute).asJava)
// if there is no filter, try to force an attribute scan - should be fastest query
if (query.getFilter == Filter.INCLUDE && AttributeIndex.indexed(features.getSchema, attribute)) {
query.setFilter(AttributeVisitor.getIncludeAttributeFilter(attribute))
}
// execute the query
SelfClosingIterator(source.getFeatures(query).features()).foreach(addValue)
uniqueValues.toMap
}
}
object AttributeVisitor {
import org.locationtech.geomesa.filter.ff
/**
* Returns a filter that is equivalent to Filter.INCLUDE, but against the attribute index.
*
* @param attribute attribute to query
* @return
*/
def getIncludeAttributeFilter(attribute: String): Filter =
ff.greaterOrEqual(ff.property(attribute), ff.literal(""))
}
/**
* Result class to hold the attribute histogram
*
* @param attributes result
*/
class AttributeResult(val attributes: Map[Any, Long]) extends AbstractCalcResult {
override def getValue: java.util.Map[Any, Long] = attributes.asJava
override def isCompatible(targetResults: CalcResult): Boolean =
targetResults.isInstanceOf[AttributeResult] || targetResults == CalcResult.NULL_RESULT
override def merge(resultsToAdd: CalcResult): CalcResult = {
if (!isCompatible(resultsToAdd)) {
throw new IllegalArgumentException("Parameter is not a compatible type")
} else if (resultsToAdd == CalcResult.NULL_RESULT) {
this
} else if (resultsToAdd.isInstanceOf[AttributeResult]) {
val toAdd = resultsToAdd.getValue.asInstanceOf[Map[Any, Long]]
// note ++ on maps will get all keys with second maps values if exists, if not first map values
val merged = attributes ++ toAdd.map {
case (attr, count) => attr -> (count + attributes.getOrElse(attr, 0L))
}
new AttributeResult(merged)
} else {
throw new IllegalArgumentException(
"The CalcResults claim to be compatible, but the appropriate merge method has not been implemented.")
}
}
} | locationtech/geomesa | geomesa-process/geomesa-process-vector/src/main/scala/org/locationtech/geomesa/process/analytic/UniqueProcess.scala | Scala | apache-2.0 | 11,675 |
package protocol.body
import java.nio.ByteBuffer
import com.github.romangrebennikov.columnize.protocol._
import com.github.romangrebennikov.columnize.protocol.Frame._
import com.github.romangrebennikov.columnize.protocol.Opcode._
import com.github.romangrebennikov.columnize.protocol.body._
import org.apache.commons.codec.binary.Hex
import org.scalatest.{WordSpecLike, MustMatchers}
/**
* Created by shutty on 10/13/15.
*/
class SupportedBodySpec extends WordSpecLike with MustMatchers {
"SupportedBody" must {
"deser" in {
val body = ByteBuffer.wrap(Hex.decodeHex("8400000006000000340002000b434f4d5052455353494f4e00020006736e6170707900036c7a34000b43514c5f56455253494f4e00010005332e332e30".toCharArray))
assert(Frame(body) == Frame(Response,4,Flags(false,false,false,false),0,SUPPORTED,52,SupportedBody(Map("COMPRESSION" -> List("snappy", "lz4"), "CQL_VERSION" -> List("3.3.0")))))
}
}
}
| shuttie/columnize | src/test/scala/protocol/body/SupportedBodySpec.scala | Scala | bsd-2-clause | 918 |
import sbt._
import Keys._
object Commons {
val appVersion = "2.1.0-SNAPSHOT"
val appOrganization = "space.spacelift"
val settings: Seq[Def.Setting[_]] = Seq(
version := appVersion,
organization := appOrganization,
scalaVersion := "2.12.1",
scalacOptions ++= Seq("-feature", "-language:postfixOps", "-unchecked", "-deprecation"),
resolvers += "Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/",
publishMavenStyle := true,
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value)
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
publishArtifact in Test := false,
pomIncludeRepository := { _ => false },
pomExtra := (
<url>http://spacelift.space</url>
<licenses>
<license>
<name>MIT License</name>
<url>http://www.opensource.org/licenses/mit-license.php</url>
<distribution>repo</distribution>
</license>
</licenses>
<scm>
<url>https://github.com/spacelift/akka-mq-proxies</url>
<connection>scm:git:[email protected]:spacelift/akka-mq-proxies.git</connection>
</scm>
<developers>
<developer>
<id>drheart</id>
<name>Dustin R. Heart</name>
<url>http://spacelift.space</url>
</developer>
</developers>)
)
}
| Spacelift/akka-mq-proxies | project/commons.scala | Scala | mit | 1,467 |
/*
* Copyright (C) 2015 Cotiviti Labs ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.signalcollect.triplerush
import scala.concurrent.Future
import scala.concurrent.duration.Duration
trait QueryEngine {
def addTriplePatterns(i: Iterator[TriplePattern], timeout: Duration): Unit
def addEncodedTriple(s: Int, p: Int, o: Int, timeout: Duration): Unit
def resultIteratorForQuery(query: Seq[TriplePattern]): Iterator[Array[Int]]
def close(): Unit
} | uzh/triplerush | src/main/scala/com/signalcollect/triplerush/QueryEngine.scala | Scala | apache-2.0 | 1,013 |
/*
* Copyright 2009-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.json4s
import org.specs2.mutable.Specification
import text.Document
object NativeXmlExamples extends XmlExamples[Document]("Native") with native.JsonMethods
object JacksonXmlExamples extends XmlExamples[JValue]("Jackson") with jackson.JsonMethods
abstract class XmlExamples[T](mod: String) extends Specification with JsonMethods[T]{
import JsonDSL._
import Xml._
import scala.xml.{Group, Text}
(mod+" XML Examples") should {
"Basic conversion example" in {
val json = toJson(users1)
compact(render(json)) must_== """{"users":{"count":"2","user":[{"disabled":"true","id":"1","name":"Harry"},{"id":"2","name":"David","nickname":"Dave"}]}}"""
}
"Conversion transformation example 1" in {
val json = toJson(users1).transformField {
case JField("id", JString(s)) => JField("id", JInt(s.toInt))
}
compact(render(json)) must_== """{"users":{"count":"2","user":[{"disabled":"true","id":1,"name":"Harry"},{"id":2,"name":"David","nickname":"Dave"}]}}"""
}
"Conversion transformation example 2" in {
val json = toJson(users2).transformField {
case JField("id", JString(s)) => JField("id", JInt(s.toInt))
case JField("user", x: JObject) => JField("user", JArray(x :: Nil))
}
compact(render(json)) must_== """{"users":{"user":[{"id":1,"name":"Harry"}]}}"""
}
"Primitive array example" in {
val xml = <chars><char>a</char><char>b</char><char>c</char></chars>
compact(render(toJson(xml))) must_== """{"chars":{"char":["a","b","c"]}}"""
}
"Lotto example which flattens number arrays into encoded string arrays" in {
def flattenArray(nums: List[JValue]) = JString(nums.map(_.values).mkString(","))
val printer = new scala.xml.PrettyPrinter(100,2)
val lotto: JObject = LottoExample.json
val xml = toXml(lotto.transformField {
case JField("winning-numbers", JArray(nums)) => JField("winning-numbers", flattenArray(nums))
case JField("numbers", JArray(nums)) => JField("numbers", flattenArray(nums))
})
printer.format(xml(0)) must_== printer.format(
<lotto>
<id>5</id>
<winning-numbers>2,45,34,23,7,5,3</winning-numbers>
<winners>
<winner-id>23</winner-id>
<numbers>2,45,34,23,3,5</numbers>
</winners>
<winners>
<winner-id>54</winner-id>
<numbers>52,3,12,11,18,22</numbers>
</winners>
</lotto>)
}
"Band example with namespaces" in {
val json = toJson(band)
json mustEqual parse("""{
"b:band":{
"name":"The Fall",
"genre":"rock",
"influence":"",
"playlists":{
"playlist":[{
"name":"hits",
"song":["Hit the north","Victoria"]
},{
"name":"mid 80s",
"song":["Eat your self fitter","My new house"]
}]
}
}
}""")
}
"Grouped text example" in {
val json = toJson(groupedText)
compact(render(json)) mustEqual """{"g":{"group":"foobar","url":"http://example.com/test"}}"""
}
"Example with multiple attributes, multiple nested elements " in {
val a1 = attrToObject("stats", "count", s => JInt(s.s.toInt)) _
val a2 = attrToObject("messages", "href", identity) _
val json = a1(a2(toJson(messageXml1)))
(json diff parse(expected1)) mustEqual Diff(JNothing, JNothing, JNothing)
}
"Example with one attribute, one nested element " in {
val a = attrToObject("stats", "count", s => JInt(s.s.toInt)) _
compact(render(a(toJson(messageXml2)))) mustEqual expected2
compact(render(a(toJson(messageXml3)))) mustEqual expected2
}
}
val messageXml1 =
<message expiry_date="20091126" text="text" word="ant" self="me">
<stats count="0"></stats>
<messages href="https://domain.com/message/ant"></messages>
</message>
val expected1 = """{"message":{"expiry_date":"20091126","word":"ant","text":"text","self":"me","stats":{"count":0},"messages":{"href":"https://domain.com/message/ant"}}}"""
val messageXml2 =
<message expiry_date="20091126">
<stats count="0"></stats>
</message>
val messageXml3 = <message expiry_date="20091126"><stats count="0"></stats></message>
val expected2 = """{"message":{"expiry_date":"20091126","stats":{"count":0}}}"""
val band =
<b:band>
<name>The Fall</name>
<genre>rock</genre>
<influence/>
<playlists>
<playlist name="hits">
<song>Hit the north</song>
<song>Victoria</song>
</playlist>
<playlist name="mid 80s">
<song>Eat your self fitter</song>
<song>My new house</song>
</playlist>
</playlists>
</b:band>
val users1 =
<users count="2">
<user disabled="true">
<id>1</id>
<name>Harry</name>
</user>
<user>
<id>2</id>
<name nickname="Dave">David</name>
</user>
</users>
val users2 =
<users>
<user>
<id>1</id>
<name>Harry</name>
</user>
</users>
val url = "test"
val groupedText =
<g>
<group>{ Group(List(Text("foo"), Text("bar"))) }</group>
<url>http://example.com/{ url }</url>
</g>
// Examples by Jonathan Ferguson. See http://groups.google.com/group/liftweb/browse_thread/thread/f3bdfcaf1c21c615/c311a91e44f9c178?show_docid=c311a91e44f9c178
// This example shows how to use a transformation function to correct JSON generated by
// default conversion rules. The transformation function 'attrToObject' makes following conversion:
// { ..., "fieldName": "", "attrName":"someValue", ...} ->
// { ..., "fieldName": { "attrName": f("someValue") }, ... }
def attrToObject(fieldName: String, attrName: String, f: JString => JValue)(json: JValue) = json.transformField {
case (n, v: JString) if n == attrName => JField(fieldName, JObject(JField(n, f(v)) :: Nil))
case (n, JString("")) if n == fieldName => JField(n, JNothing)
} transformField {
case (n, x: JObject) if n == attrName => JField(fieldName, x)
}
}
| geggo98/json4s | tests/src/test/scala/org/json4s/XmlExamples.scala | Scala | apache-2.0 | 6,859 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.