code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package org.broadinstitute.dsde.test.api.orch
import java.util.UUID
import org.broadinstitute.dsde.workbench.auth.AuthToken
import org.broadinstitute.dsde.workbench.config.{Credentials, UserPool}
import org.broadinstitute.dsde.workbench.fixture.{BillingFixtures, WorkspaceFixtures}
import org.broadinstitute.dsde.workbench.service.{AclEntry, Orchestration, RestException, WorkspaceAccessLevel}
import org.broadinstitute.dsde.workbench.service.OrchestrationModel._
import org.scalatest.{FreeSpec, Matchers}
import spray.json._
import DefaultJsonProtocol._
import org.scalatest.time.{Minutes, Seconds, Span}
import org.scalatest.concurrent.Eventually
class WorkspaceApiSpec extends FreeSpec with Matchers with Eventually
with BillingFixtures with WorkspaceFixtures {
val owner: Credentials = UserPool.chooseProjectOwner
val ownerAuthToken: AuthToken = owner.makeAuthToken()
"Orchestration" - {
"should return a storage cost estimate" - {
"for the owner of a workspace" in {
implicit val token: AuthToken = ownerAuthToken
withCleanBillingProject(owner) { projectName =>
withWorkspace(projectName, prependUUID("owner-storage-cost")) { workspaceName =>
Orchestration.workspaces.waitForBucketReadAccess(projectName, workspaceName)
val storageCostEstimate = Orchestration.workspaces.getStorageCostEstimate(projectName, workspaceName).parseJson.convertTo[StorageCostEstimate]
storageCostEstimate.estimate should be ("$0.00")
}
}
}
"for writers of a workspace" in {
val writer = UserPool.chooseStudent
withCleanBillingProject(owner) { projectName =>
withWorkspace(projectName, prependUUID("writer-storage-cost"), aclEntries = List(AclEntry(writer.email, WorkspaceAccessLevel.Writer))) { workspaceName =>
implicit val writerAuthToken: AuthToken = writer.makeAuthToken
Orchestration.workspaces.waitForBucketReadAccess(projectName, workspaceName)
Orchestration.workspaces.getStorageCostEstimate(projectName, workspaceName)
.parseJson.convertTo[StorageCostEstimate]
.estimate should be("$0.00")
} (ownerAuthToken)
}
}
}
"should not return a storage cost estimate" - {
"for readers of a workspace" in {
val reader = UserPool.chooseStudent
withCleanBillingProject(owner) { projectName =>
withWorkspace(projectName, prependUUID("reader-storage-cost"), aclEntries = List(AclEntry(reader.email, WorkspaceAccessLevel.Reader))) { workspaceName =>
implicit val readerAuthToken: AuthToken = reader.makeAuthToken
Orchestration.workspaces.waitForBucketReadAccess(projectName, workspaceName)
val exception = intercept[RestException] {
Orchestration.workspaces.getStorageCostEstimate(projectName, workspaceName)
}
val exceptionMessage = exception.message.parseJson.asJsObject.fields("message").convertTo[String]
exceptionMessage should include(s"insufficient permissions to perform operation on $projectName/$workspaceName")
} (ownerAuthToken)
}
}
}
}
private def prependUUID(suffix: String): String = s"${UUID.randomUUID().toString}-$suffix"
}
| broadinstitute/firecloud-orchestration | automation/src/test/scala/org/broadinstitute/dsde/test/api/orch/WorkspaceApiSpec.scala | Scala | bsd-3-clause | 3,315 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.codegen
import java.io.ObjectInputStream
import com.esotericsoftware.kryo.{Kryo, KryoSerializable}
import com.esotericsoftware.kryo.io.{Input, Output}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.Utils
/**
* Inherits some default implementation for Java from `Ordering[Row]`
*/
class BaseOrdering extends Ordering[InternalRow] {
def compare(a: InternalRow, b: InternalRow): Int = {
throw new UnsupportedOperationException
}
}
/**
* Generates bytecode for an [[Ordering]] of rows for a given set of expressions.
*/
object GenerateOrdering extends CodeGenerator[Seq[SortOrder], Ordering[InternalRow]] with Logging {
protected def canonicalize(in: Seq[SortOrder]): Seq[SortOrder] =
in.map(ExpressionCanonicalizer.execute(_).asInstanceOf[SortOrder])
protected def bind(in: Seq[SortOrder], inputSchema: Seq[Attribute]): Seq[SortOrder] =
in.map(BindReferences.bindReference(_, inputSchema))
/**
* Creates a code gen ordering for sorting this schema, in ascending order.
*/
def create(schema: StructType): BaseOrdering = {
create(schema.zipWithIndex.map { case (field, ordinal) =>
SortOrder(BoundReference(ordinal, field.dataType, nullable = true), Ascending)
})
}
/**
* Generates the code for comparing a struct type according to its natural ordering
* (i.e. ascending order by field 1, then field 2, ..., then field n.
*/
def genComparisons(ctx: CodegenContext, schema: StructType): String = {
val ordering = schema.fields.map(_.dataType).zipWithIndex.map {
case(dt, index) => SortOrder(BoundReference(index, dt, nullable = true), Ascending)
}
genComparisons(ctx, ordering)
}
/**
* Creates the variables for ordering based on the given order.
*/
private def createOrderKeys(
ctx: CodegenContext,
row: String,
ordering: Seq[SortOrder]): Seq[ExprCode] = {
ctx.INPUT_ROW = row
// to use INPUT_ROW we must make sure currentVars is null
ctx.currentVars = null
ordering.map(_.child.genCode(ctx))
}
/**
* Generates the code for ordering based on the given order.
*/
def genComparisons(ctx: CodegenContext, ordering: Seq[SortOrder]): String = {
val oldInputRow = ctx.INPUT_ROW
val oldCurrentVars = ctx.currentVars
val rowAKeys = createOrderKeys(ctx, "a", ordering)
val rowBKeys = createOrderKeys(ctx, "b", ordering)
val comparisons = rowAKeys.zip(rowBKeys).zipWithIndex.map { case ((l, r), i) =>
val dt = ordering(i).child.dataType
val asc = ordering(i).isAscending
val nullOrdering = ordering(i).nullOrdering
val lRetValue = nullOrdering match {
case NullsFirst => "-1"
case NullsLast => "1"
}
val rRetValue = nullOrdering match {
case NullsFirst => "1"
case NullsLast => "-1"
}
s"""
|${l.code}
|${r.code}
|if (${l.isNull} && ${r.isNull}) {
| // Nothing
|} else if (${l.isNull}) {
| return $lRetValue;
|} else if (${r.isNull}) {
| return $rRetValue;
|} else {
| int comp = ${ctx.genComp(dt, l.value, r.value)};
| if (comp != 0) {
| return ${if (asc) "comp" else "-comp"};
| }
|}
""".stripMargin
}
val code = ctx.splitExpressions(
expressions = comparisons,
funcName = "compare",
arguments = Seq(("InternalRow", "a"), ("InternalRow", "b")),
returnType = "int",
makeSplitFunction = { body =>
s"""
|$body
|return 0;
""".stripMargin
},
foldFunctions = { funCalls =>
funCalls.zipWithIndex.map { case (funCall, i) =>
val comp = ctx.freshName("comp")
s"""
|int $comp = $funCall;
|if ($comp != 0) {
| return $comp;
|}
""".stripMargin
}.mkString
})
ctx.currentVars = oldCurrentVars
ctx.INPUT_ROW = oldInputRow
code
}
protected def create(ordering: Seq[SortOrder]): BaseOrdering = {
val ctx = newCodeGenContext()
val comparisons = genComparisons(ctx, ordering)
val codeBody = s"""
public SpecificOrdering generate(Object[] references) {
return new SpecificOrdering(references);
}
class SpecificOrdering extends ${classOf[BaseOrdering].getName} {
private Object[] references;
${ctx.declareMutableStates()}
public SpecificOrdering(Object[] references) {
this.references = references;
${ctx.initMutableStates()}
}
public int compare(InternalRow a, InternalRow b) {
$comparisons
return 0;
}
${ctx.declareAddedFunctions()}
}"""
val code = CodeFormatter.stripOverlappingComments(
new CodeAndComment(codeBody, ctx.getPlaceHolderToComments()))
logDebug(s"Generated Ordering by ${ordering.mkString(",")}:\n${CodeFormatter.format(code)}")
val (clazz, _) = CodeGenerator.compile(code)
clazz.generate(ctx.references.toArray).asInstanceOf[BaseOrdering]
}
}
/**
* A lazily generated row ordering comparator.
*/
class LazilyGeneratedOrdering(val ordering: Seq[SortOrder])
extends Ordering[InternalRow] with KryoSerializable {
def this(ordering: Seq[SortOrder], inputSchema: Seq[Attribute]) =
this(ordering.map(BindReferences.bindReference(_, inputSchema)))
@transient
private[this] var generatedOrdering = GenerateOrdering.generate(ordering)
def compare(a: InternalRow, b: InternalRow): Int = {
generatedOrdering.compare(a, b)
}
private def readObject(in: ObjectInputStream): Unit = Utils.tryOrIOException {
in.defaultReadObject()
generatedOrdering = GenerateOrdering.generate(ordering)
}
override def write(kryo: Kryo, out: Output): Unit = Utils.tryOrIOException {
kryo.writeObject(out, ordering.toArray)
}
override def read(kryo: Kryo, in: Input): Unit = Utils.tryOrIOException {
generatedOrdering = GenerateOrdering.generate(kryo.readObject(in, classOf[Array[SortOrder]]))
}
}
object LazilyGeneratedOrdering {
/**
* Creates a [[LazilyGeneratedOrdering]] for the given schema, in natural ascending order.
*/
def forSchema(schema: StructType): LazilyGeneratedOrdering = {
new LazilyGeneratedOrdering(schema.zipWithIndex.map {
case (field, ordinal) =>
SortOrder(BoundReference(ordinal, field.dataType, nullable = true), Ascending)
})
}
}
| guoxiaolongzte/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/codegen/GenerateOrdering.scala | Scala | apache-2.0 | 7,497 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.database.test
import java.util.concurrent.TimeoutException
import scala.collection.mutable.ListBuffer
import scala.concurrent.Await
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.duration.Duration
import scala.concurrent.duration.DurationInt
import scala.language.postfixOps
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import spray.json._
import spray.json.DefaultJsonProtocol._
import whisk.common.TransactionCounter
import whisk.common.TransactionId
import whisk.core.database.ArtifactStore
import whisk.core.database.CouchDbRestClient
import whisk.core.database.DocumentFactory
import whisk.core.database.NoDocumentException
import whisk.core.database.StaleParameter
import whisk.core.entity._
import whisk.core.entity.types.AuthStore
import whisk.core.entity.types.EntityStore
/**
* WARNING: the put/get/del operations in this trait operate directly on the datastore,
* and in the presence of a cache, there will be inconsistencies if one mixes these
* operations with those that flow through the cache. To mitigate this, use unique asset
* names in tests, and defer all cleanup to the end of a test suite.
*/
trait DbUtils extends TransactionCounter {
implicit val dbOpTimeout = 15 seconds
override val numberOfInstances = 1
override val instanceOrdinal = 0
val instance = InstanceId(instanceOrdinal)
val docsToDelete = ListBuffer[(ArtifactStore[_], DocInfo)]()
case class RetryOp() extends Throwable
/**
* Retry an operation 'step()' awaiting its result up to 'timeout'.
* Attempt the operation up to 'count' times. The future from the
* step is not aborted --- TODO fix this.
*/
def retry[T](step: () => Future[T], timeout: Duration, count: Int = 5): Try[T] = {
val future = step()
if (count > 0) try {
val result = Await.result(future, timeout)
Success(result)
} catch {
case n: NoDocumentException =>
println("no document exception, retrying")
retry(step, timeout, count - 1)
case RetryOp() =>
println("condition not met, retrying")
retry(step, timeout, count - 1)
case t: TimeoutException =>
println("timed out, retrying")
retry(step, timeout, count - 1)
case t: Throwable =>
println(s"unexpected failure $t")
Failure(t)
} else Failure(new NoDocumentException("timed out"))
}
/**
* Wait on a view to update with documents added to namespace. This uses retry above,
* where the step performs a direct db query to retrieve the view and check the count
* matches the given value.
*/
def waitOnView[Au](db: ArtifactStore[Au], namespace: EntityName, count: Int, view: View)(
implicit context: ExecutionContext,
transid: TransactionId,
timeout: Duration) = {
val success = retry(
() => {
val startKey = List(namespace.asString)
val endKey = List(namespace.asString, WhiskEntityQueries.TOP)
db.query(view.name, startKey, endKey, 0, 0, false, true, false, StaleParameter.No) map { l =>
if (l.length != count) {
throw RetryOp()
} else true
}
},
timeout)
assert(success.isSuccess, "wait aborted")
}
/**
* Wait on a view specific to a collection to update with documents added to that collection in namespace.
* This uses retry above, where the step performs a collection-specific view query using the collection
* factory. The result count from the view is checked against the given value.
*/
def waitOnView(db: EntityStore, factory: WhiskEntityQueries[_], namespace: EntityPath, count: Int)(
implicit context: ExecutionContext,
transid: TransactionId,
timeout: Duration) = {
val success = retry(() => {
factory.listCollectionInNamespace(db, namespace, 0, 0) map { l =>
if (l.left.get.length < count) {
throw RetryOp()
} else true
}
}, timeout)
assert(success.isSuccess, "wait aborted")
}
/**
* Wait on view for the authentication table. This is like the other waitOnViews but
* specific to the WhiskAuth records.
*/
def waitOnView(db: AuthStore, authkey: AuthKey, count: Int)(implicit context: ExecutionContext,
transid: TransactionId,
timeout: Duration) = {
val success = retry(() => {
Identity.list(db, List(authkey.uuid.asString, authkey.key.asString)) map { l =>
if (l.length != count) {
throw RetryOp()
} else true
}
}, timeout)
assert(success.isSuccess, "wait aborted after: " + timeout + ": " + success)
}
/**
* Wait on view using the CouchDbRestClient. This is like the other waitOnViews.
*/
def waitOnView(db: CouchDbRestClient, designDocName: String, viewName: String, count: Int)(
implicit context: ExecutionContext,
timeout: Duration) = {
val success = retry(
() => {
db.executeView(designDocName, viewName)().map {
case Right(doc) =>
val length = doc.fields("rows").convertTo[List[JsObject]].length
if (length != count) {
throw RetryOp()
} else true
case Left(_) =>
throw RetryOp()
}
},
timeout)
assert(success.isSuccess, "wait aborted after: " + timeout + ": " + success)
}
/**
* Puts document 'w' in datastore, and add it to gc queue to delete after the test completes.
*/
def put[A, Au >: A](db: ArtifactStore[Au], w: A, garbageCollect: Boolean = true)(
implicit transid: TransactionId,
timeout: Duration = 10 seconds): DocInfo = {
val docFuture = db.put(w)
val doc = Await.result(docFuture, timeout)
assert(doc != null)
if (garbageCollect) docsToDelete += ((db, doc))
doc
}
/**
* Gets document by id from datastore, and add it to gc queue to delete after the test completes.
*/
def get[A, Au >: A](db: ArtifactStore[Au], docid: DocId, factory: DocumentFactory[A], garbageCollect: Boolean = true)(
implicit transid: TransactionId,
timeout: Duration = 10 seconds,
ma: Manifest[A]): A = {
val docFuture = factory.get(db, docid)
val doc = Await.result(docFuture, timeout)
assert(doc != null)
if (garbageCollect) docsToDelete += ((db, docid.asDocInfo))
doc
}
/**
* Deletes document by id from datastore.
*/
def del[A <: WhiskDocument, Au >: A](db: ArtifactStore[Au], docid: DocId, factory: DocumentFactory[A])(
implicit transid: TransactionId,
timeout: Duration = 10 seconds,
ma: Manifest[A]) = {
val docFuture = factory.get(db, docid)
val doc = Await.result(docFuture, timeout)
assert(doc != null)
Await.result(db.del(doc.docinfo), timeout)
}
/**
* Deletes document by id and revision from datastore.
*/
def delete(db: ArtifactStore[_], docinfo: DocInfo)(implicit transid: TransactionId,
timeout: Duration = 10 seconds) = {
Await.result(db.del(docinfo), timeout)
}
/**
* Puts a document 'entity' into the datastore, then do a get to retrieve it and confirm the identity.
*/
def putGetCheck[A, Au >: A](db: ArtifactStore[Au], entity: A, factory: DocumentFactory[A], gc: Boolean = true)(
implicit transid: TransactionId,
timeout: Duration = 10 seconds,
ma: Manifest[A]): (DocInfo, A) = {
val doc = put(db, entity, gc)
assert(doc != null && doc.id.asString != null && doc.rev.asString != null)
val future = factory.get(db, doc.id, doc.rev)
val dbEntity = Await.result(future, timeout)
assert(dbEntity != null)
assert(dbEntity == entity)
(doc, dbEntity)
}
/**
* Deletes all documents added to gc queue.
*/
def cleanup()(implicit timeout: Duration = 10 seconds) = {
docsToDelete.map { e =>
Try(Await.result(e._1.del(e._2)(TransactionId.testing), timeout))
}
docsToDelete.clear()
}
}
| duynguyen/incubator-openwhisk | tests/src/test/scala/whisk/core/database/test/DbUtils.scala | Scala | apache-2.0 | 8,867 |
package services.core
import akka.actor.{Actor, Props}
import services.core.UtilityConversion._
/**
* Created by yabumoto on 2014/09/21.
*/
class ServiceActor(implicit val setting: ServiceSetting) extends Actor {
def receive = {
case action: ServiceAction => {
val result = action.execute block;
sender() ! result
}
}
}
object ServiceActor {
def props(implicit setting: ServiceSetting): Props = {
Props(new ServiceActor)
}
}
| czyabumoto/OpenAdsAPI_ | src/main/scala/services/core/ServiceActor.scala | Scala | cc0-1.0 | 463 |
/*
* Copyright (c) 2021, salesforce.com, inc.
* All rights reserved.
* SPDX-License-Identifier: BSD-3-Clause
* For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause
*/
package com.krux.hyperion.common
import scala.language.implicitConversions
import com.krux.hyperion.aws.{ AdpDataPipelineAbstractObject, AdpRef }
/**
* The base trait of krux data pipeline objects.
*/
trait PipelineObject extends Ordered[PipelineObject] {
type Self <: PipelineObject
implicit def uniquePipelineId2String(id: PipelineObjectId): String = id.toString
implicit def seq2Option[A](anySeq: Seq[A]): Option[Seq[A]] = seqToOption(anySeq)(x => x)
def id: PipelineObjectId
def objects: Iterable[PipelineObject]
def serialize: AdpDataPipelineAbstractObject
def ref: AdpRef[AdpDataPipelineAbstractObject]
def seqToOption[A, B](anySeq: Seq[A])(transform: A => B) = {
anySeq match {
case Seq() => None
case other => Option(anySeq.map(transform))
}
}
def compare(that: PipelineObject): Int = id.compare(that.id)
}
| realstraw/hyperion | core/src/main/scala/com/krux/hyperion/common/PipelineObject.scala | Scala | bsd-3-clause | 1,097 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import kafka.utils._
import kafka.message._
import org.scalatest.junit.JUnitSuite
import org.junit._
import org.junit.Assert._
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import org.junit.runners.Parameterized.Parameters
import org.apache.kafka.common.record.{CompressionType, MemoryRecords, SimpleRecord}
import org.apache.kafka.common.utils.Utils
import java.util.{Collection, Properties}
import kafka.server.{BrokerTopicStats, LogDirFailureChannel}
import scala.collection.JavaConverters._
@RunWith(value = classOf[Parameterized])
class BrokerCompressionTest(messageCompression: String, brokerCompression: String) extends JUnitSuite {
val tmpDir = TestUtils.tempDir()
val logDir = TestUtils.randomPartitionLogDir(tmpDir)
val time = new MockTime(0, 0)
val logConfig = LogConfig()
@After
def tearDown() {
Utils.delete(tmpDir)
}
/**
* Test broker-side compression configuration
*/
@Test
def testBrokerSideCompression() {
val messageCompressionCode = CompressionCodec.getCompressionCodec(messageCompression)
val logProps = new Properties()
logProps.put(LogConfig.CompressionTypeProp, brokerCompression)
/*configure broker-side compression */
val log = Log(logDir, LogConfig(logProps), logStartOffset = 0L, recoveryPoint = 0L, scheduler = time.scheduler,
time = time, brokerTopicStats = new BrokerTopicStats, maxProducerIdExpirationMs = 60 * 60 * 1000,
producerIdExpirationCheckIntervalMs = LogManager.ProducerIdExpirationCheckIntervalMs,
logDirFailureChannel = new LogDirFailureChannel(10))
/* append two messages */
log.appendAsLeader(MemoryRecords.withRecords(CompressionType.forId(messageCompressionCode.codec), 0,
new SimpleRecord("hello".getBytes), new SimpleRecord("there".getBytes)), leaderEpoch = 0)
def readBatch(offset: Int) = log.readUncommitted(offset, 4096).records.batches.iterator.next()
if (!brokerCompression.equals("producer")) {
val brokerCompressionCode = BrokerCompressionCodec.getCompressionCodec(brokerCompression)
assertEquals("Compression at offset 0 should produce " + brokerCompressionCode.name, brokerCompressionCode.codec, readBatch(0).compressionType.id)
}
else
assertEquals("Compression at offset 0 should produce " + messageCompressionCode.name, messageCompressionCode.codec, readBatch(0).compressionType.id)
}
}
object BrokerCompressionTest {
@Parameters
def parameters: Collection[Array[String]] = {
(for (brokerCompression <- BrokerCompressionCodec.brokerCompressionOptions;
messageCompression <- CompressionType.values
) yield Array(messageCompression.name, brokerCompression)).asJava
}
}
| themarkypantz/kafka | core/src/test/scala/unit/kafka/log/BrokerCompressionTest.scala | Scala | apache-2.0 | 3,530 |
/*
* Copyright 2016 University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.faces.color
import scalismo.color.{ColorSpaceOperations, RGB, RGBA}
import scalismo.faces.FacesTestSuite
class ColorSpaceOperationsTest extends FacesTestSuite {
describe("Implicit ColorSpaceOperation operators are mapped to proper ColorSpaceOperations methods") {
import scalismo.color.ColorSpaceOperations.implicits._
case class V(v: Double)
implicit object TestSpace extends ColorSpaceOperations[V] {
override def add(pix1: V, pix2: V): V = V(pix1.v + pix2.v)
override def multiply(pix1: V, pix2: V): V = V(pix1.v * pix2.v)
override def dot(pix1: V, pix2: V): Double = pix1.v * pix2.v
override def dimensionality: Int = 1
override def scale(pix: V, l: Double): V = V(pix.v * l)
override def zero: V = V(0.0)
}
val v1 = randomDouble
val v2 = randomDouble
val i1 = V(v1)
val i2 = V(v2)
val f = randomDouble
it("+") { i1 + i2 shouldBe V(v1 + v2) }
it("-") { i1 - i2 shouldBe V(v1 - v2) }
it("x") { i1 x i2 shouldBe V(v1 * v2) }
it("*") { i1 * f shouldBe V(v1 * f) }
it("*:") { f *: i1 shouldBe V(v1 * f) }
it("/") { (i1 / f).v shouldBe v1 / f +- 1e-6 }
it("dot") { i1 dot i2 shouldBe v1 * v2 }
it("normSq") { i1.normSq shouldBe v1 * v1 }
it("unary-") { -i1 shouldBe V(-v1) }
}
describe("RGB has proper ColorSpaceOperations") {
val i1 = randomRGB
val i2 = randomRGB
val f = randomDouble
val ops = RGB.RGBOperations
it("supports add") {
i1 + i2 shouldBe ops.add(i1, i2)
}
it("supports multiply") {
i1 x i2 shouldBe ops.multiply(i1, i2)
}
it("supports scale") {
i1 * f shouldBe ops.scale(i1, f)
f *: i1 shouldBe ops.scale(i1, f)
i1 / f shouldBe ops.scale(i1, 1.0 / f)
}
it("supports normSq") {
i1.dot(i1) shouldBe ops.normSq(i1)
}
it("supports dot") {
i1 dot i2 shouldBe ops.dot(i1, i2)
}
it("has zero as black") {
RGB.Black shouldBe ops.zero
}
}
describe("RGBA has proper ColorSpaceOperations (involves A as 4th dimension)") {
val i1 = randomRGBA
val i2 = randomRGBA
val f = randomDouble
val ops = RGBA.RGBAOperations
it("supports add") {
i1 + i2 shouldBe ops.add(i1, i2)
}
it("supports multiply") {
i1 x i2 shouldBe ops.multiply(i1, i2)
}
it("supports scale") {
i1 * f shouldBe ops.scale(i1, f)
f *: i1 shouldBe ops.scale(i1, f)
i1 / f shouldBe ops.scale(i1, 1.0 / f)
}
it("supports normSq") {
i1.dot(i1) shouldBe ops.normSq(i1)
}
it("supports dot") {
i1 dot i2 shouldBe ops.dot(i1, i2)
}
it("has zero as black") {
RGBA.BlackTransparent shouldBe ops.zero
}
}
}
| unibas-gravis/scalismo-faces | src/test/scala/scalismo/faces/color/ColorSpaceOperationsTest.scala | Scala | apache-2.0 | 3,378 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.batch.table
import org.apache.flink.table.api._
import org.apache.flink.table.planner.factories.TestValuesTableFactory
import org.apache.flink.table.planner.runtime.utils.BatchTestBase
import org.apache.flink.table.planner.runtime.utils.TestData._
import org.apache.flink.util.ExceptionUtils
import org.junit.Assert.{assertEquals, assertTrue, fail}
import org.junit.Test
import scala.collection.JavaConversions._
class TableSinkITCase extends BatchTestBase {
@Test
def testDecimalOnOutputFormatTableSink(): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE sink (
| `c` VARCHAR(5),
| `b` DECIMAL(10, 0),
| `d` CHAR(5)
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true',
| 'runtime-sink' = 'OutputFormat'
|)
|""".stripMargin)
registerCollection("MyTable", data3, type3, "a, b, c", nullablesOfData3)
val table = tEnv.from("MyTable")
.where('a > 20)
.select("12345", 55.cast(DataTypes.DECIMAL(10, 0)), "12345".cast(DataTypes.CHAR(5)))
table.executeInsert("sink").await()
val result = TestValuesTableFactory.getResults("sink")
val expected = Seq("12345,55,12345")
assertEquals(expected.sorted, result.sorted)
}
@Test
def testDecimalOnSinkFunctionTableSink(): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE sink (
| `c` VARCHAR(5),
| `b` DECIMAL(10, 0),
| `d` CHAR(5)
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true'
|)
|""".stripMargin)
registerCollection("MyTable", data3, type3, "a, b, c", nullablesOfData3)
val table = tEnv.from("MyTable")
.where('a > 20)
.select("12345", 55.cast(DataTypes.DECIMAL(10, 0)), "12345".cast(DataTypes.CHAR(5)))
table.executeInsert("sink").await()
val result = TestValuesTableFactory.getResults("sink")
val expected = Seq("12345,55,12345")
assertEquals(expected.sorted, result.sorted)
}
@Test
def testSinkWithKey(): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE testSink (
| `a` INT,
| `b` DOUBLE,
| PRIMARY KEY (a) NOT ENFORCED
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true'
|)
|""".stripMargin)
registerCollection("MyTable", simpleData2, simpleType2, "a, b", nullableOfSimpleData2)
val table = tEnv.from("MyTable")
.groupBy('a)
.select('a, 'b.sum())
table.executeInsert("testSink").await()
val result = TestValuesTableFactory.getResults("testSink")
val expected = List(
"1,0.1",
"2,0.4",
"3,1.0",
"4,2.2",
"5,3.9")
assertEquals(expected.sorted, result.sorted)
}
@Test
def testSinkWithoutKey(): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE testSink (
| `a` INT,
| `b` DOUBLE
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true'
|)
|""".stripMargin)
registerCollection("MyTable", simpleData2, simpleType2, "a, b", nullableOfSimpleData2)
val table = tEnv.from("MyTable")
.groupBy('a)
.select('a, 'b.sum())
table.executeInsert("testSink").await()
val result = TestValuesTableFactory.getResults("testSink")
val expected = List(
"1,0.1",
"2,0.4",
"3,1.0",
"4,2.2",
"5,3.9")
assertEquals(expected.sorted, result.sorted)
}
@Test
def testNotNullEnforcer(): Unit = {
innerTestNotNullEnforcer("SinkFunction")
}
@Test
def testDataStreamNotNullEnforcer(): Unit = {
innerTestNotNullEnforcer("DataStream")
}
def innerTestNotNullEnforcer(provider: String): Unit = {
val dataId = TestValuesTableFactory.registerData(nullData4)
tEnv.executeSql(
s"""
|CREATE TABLE nullable_src (
| category STRING,
| shopId INT,
| num INT
|) WITH (
| 'connector' = 'values',
| 'data-id' = '$dataId',
| 'bounded' = 'true'
|)
|""".stripMargin)
tEnv.executeSql(
s"""
|CREATE TABLE not_null_sink (
| category STRING,
| shopId INT,
| num INT NOT NULL
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true',
| 'runtime-sink' = '$provider'
|)
|""".stripMargin)
// default should fail, because there are null values in the source
try {
tEnv.executeSql("INSERT INTO not_null_sink SELECT * FROM nullable_src").await()
fail("Execution should fail.")
} catch {
case t: Throwable =>
val exception = ExceptionUtils.findThrowableWithMessage(
t,
"Column 'num' is NOT NULL, however, a null value is being written into it. " +
"You can set job configuration 'table.exec.sink.not-null-enforcer'='drop' " +
"to suppress this exception and drop such records silently.")
assertTrue(exception.isPresent)
}
// enable drop enforcer to make the query can run
tEnv.getConfig.getConfiguration.setString("table.exec.sink.not-null-enforcer", "drop")
tEnv.executeSql("INSERT INTO not_null_sink SELECT * FROM nullable_src").await()
val result = TestValuesTableFactory.getResults("not_null_sink")
val expected = List("book,1,12", "book,4,11", "fruit,3,44")
assertEquals(expected.sorted, result.sorted)
}
}
| greghogan/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/batch/table/TableSinkITCase.scala | Scala | apache-2.0 | 6,383 |
package SparqlSpark
import scala.collection.mutable._
import SparqlSpark._
import scala.collection.concurrent._
import Array._
import scala.io.Source
object SparqlPlan {
class Triple(val tp: String) extends java.io.Serializable {
var spo = tp.split("\\\\s+")
val s: String = spo(0)
val p: String = spo(1)
val o: String = spo(2)
val str: String = tp
var finish: Boolean = false
override def toString(): String = {
return str
}
}
/**
* PlanResult, which contains the plan, the rootNode, the number of variable as well as the dataProperties in the query.
*/
class PlanResult(_plan: ArrayBuffer[ListBuffer[PlanItem]], _rootNode: String, _varNum: Int, _dataProperties: LinkedHashMap[String, MutableList[VertexProp]], _nonTreeEdge: Array[Triple]) extends java.io.Serializable {
val plan: ArrayBuffer[ListBuffer[PlanItem]] = _plan.clone()
val rootNode: String = _rootNode
val numVar: Int = _varNum
val dataProperties = _dataProperties.clone()
val nonTreeEdge = _nonTreeEdge.clone()
}
/**
* CandidateList, find the candidate list.
*/
class Candidate(_candiPlan: PlanResult, _level: Int, _weightArray: ArrayBuffer[Double]) extends java.io.Serializable {
val candiPlan: PlanResult = _candiPlan
val level: Int = _level
val weightArray: ArrayBuffer[Double] = _weightArray.clone
}
/**
* PlanItem, which contains the triple, the src and the headPattern, just like
* PLAN-- (?Z <http://spark.elte.hu#subOrganizationOf> ?Y, ?Y, Set(?X)), where ?Y is src and Set(?X) is the set which send message to ?Y
*/
class PlanItem(_tp: Triple, _src: String, _headPattern: Set[String]) extends java.io.Serializable {
val tp: Triple = _tp
val src: String = _src
var headPattern: Set[String] = _headPattern
}
/**
* get DataProperties of the query, if the object is start with '"' or the predicate is type, it is DataProperties, otherwise, it's ObjectProperties.
*/
def getDataProperties(SparqlQuery: String): (Array[Triple], LinkedHashMap[String, MutableList[VertexProp]]) = {
var query = SparqlQuery.split(" . ")
var TPs: Array[Triple] = query.map(tp => new Triple(tp))
var queryArray: Array[String] = Array[String]()
var dataProperties: LinkedHashMap[String, MutableList[VertexProp]] = LinkedHashMap[String, MutableList[VertexProp]]()
TPs = TPs.flatMap(tp => {
if ((tp.p == "rdf:type") || (tp.p == "<http://www.w3.org/1999/02/22-rdf-syntax-ns#type>") || tp.o(0) == '"') {
var propList = dataProperties.getOrElseUpdate(tp.s, MutableList[VertexProp]())
propList += new VertexProp(tp.p, tp.o)
dataProperties(tp.s) = propList
Array[Triple]()
} else {
Array[Triple](tp)
}
})
return (TPs, dataProperties)
}
/**
* calc variable
*/
def calcVariable(level: Int, weightArray: ArrayBuffer[Double]): Double = {
var variable = 0.0
val mean = weightArray.reduce(_ + _) / level
variable = 1 / level * weightArray.map(x => {
Math.pow(x - mean, 2)
}).reduce(_ + _)
variable
}
/**
* get the degrees of every node, and find the max as the rootNode, after find the minimum spanning tree
*/
def getRootNode(tps: Array[Triple]): String = {
var array = ArrayBuffer[String]()
tps.foreach(x => {
array += x.s
array += x.o
})
var map = new HashMap[String, Int]
for (i <- 0 until array.length) {
if (map.contains(array(i))) {
var tmp = map.get(array(i))
map.put(array(i), tmp.getOrElse(0) + 1)
} else {
map.put(array(i), 1)
}
}
var maxStr: String = ""
val max = map.values.max
for (v <- map) {
if (max == v._2)
maxStr = v._1
}
return maxStr
}
/**
* find the minimum spanning tree
*/
def findMinSpanningTree(tps: Array[Triple], scoreMap: scala.collection.Map[String, Double]): (Array[Triple], Array[Triple]) = {
var minTriple = ArrayBuffer[Triple]()
val node = tps.flatMap(x => List(x.s, x.o)).distinct.zipWithIndex.toMap
val newTriple = tps.map(tp => {
(node.getOrElse(tp.s, 0), scoreMap.getOrElse(tp.p, 100.0), node.getOrElse(tp.o, 0))
})
var tree = ofDim[Double](node.size, node.size)
//init
for (i <- 0 until node.size) {
for (j <- 0 until node.size) {
tree(i)(j) = Double.MaxValue
}
}
for (i <- 0 until node.size) {
for (j <- 0 until node.size) {
newTriple.map(x => {
if ((i == x._1 && j == x._3) || (i == x._3 && j == x._1)) {
tree(i)(j) = x._2
tree(j)(i) = x._2
}
})
}
}
var lowcost = new Array[Double](node.size)
var mst = new Array[Int](node.size)
var min: Double = 0
var minid: Int = 0
var sum: Double = 0
for (i <- 1 until node.size) {
lowcost(i) = tree(0)(i)
mst(i) = 0
}
mst(0) = 0
for (i <- 1 until node.size) {
min = Double.MaxValue
minid = 0
for (j <- 1 until node.size) {
if (lowcost(j) < min && lowcost(j) != 0) {
min = lowcost(j)
minid = j
}
}
//println("v" + mst(minid) + "-v" + minid + "=" + min)
var src = ""
var dst = ""
for (key <- node.keySet) {
if (node.getOrElse(key, -1) == mst(minid)) {
src = key
}
if (node.getOrElse(key, -1) == minid) {
dst = key
}
}
tps.map(x => {
if ((x.s == src && x.o == dst) || (x.s == dst && x.o == src)) {
minTriple += x
}
})
sum += min
lowcost(minid) = 0
for (j <- 1 until node.size) {
if (tree(minid)(j) < lowcost(j)) {
lowcost(j) = tree(minid)(j)
mst(j) = minid
}
}
}
var nonTreeEdge = ArrayBuffer[Triple]()
tps.map(tp => {
if (!minTriple.contains(tp))
nonTreeEdge += tp
})
return (minTriple.toArray, nonTreeEdge.toArray)
}
/**
* Create the queryPlan, which is a BFS algorithm.
* Choose a variable and all its neighborhood will be its children, loop while all the have been processed.
* Build the tree top-down while process the query bottom-up.
*/
def createPlan(SparqlQuery: String, scoreMap: scala.collection.Map[String, Double]): PlanResult = {
var (tps, dataProperties): (Array[Triple], LinkedHashMap[String, MutableList[VertexProp]]) = getDataProperties(SparqlQuery)
var rootNode = ""
var vars: Set[String] = HashSet()
var nonTreeEdge: Array[Triple] = Array[Triple]()
if (tps.size > 0) {
var (newtps, newnonTreeEdge) = findMinSpanningTree(tps, scoreMap)
tps = newtps.clone
nonTreeEdge = newnonTreeEdge.clone
} else {
rootNode = dataProperties.keySet.head
dataProperties.map(v => {
println("PLAN-- " + v._1)
v._2.map(p => {
println("PLAN-- " + p.prop + " " + p.obj)
})
})
return new PlanResult(ArrayBuffer[ListBuffer[PlanItem]](), rootNode, dataProperties.size, dataProperties, nonTreeEdge)
}
tps.foreach(tp => {
vars.add(tp.s)
vars.add(tp.o)
})
var candidateArray = ArrayBuffer[Candidate]()
for (root <- vars) {
rootNode = root
var v = root
tps.map(tp => {
tp.finish = false
})
var plan = ArrayBuffer[ListBuffer[PlanItem]]()
var aliveTP = ArrayBuffer[ListBuffer[PlanItem]]()
var q2 = new Queue[String]()
q2.enqueue("^")
var iteration = ListBuffer[PlanItem]()
var aliveIter = ListBuffer[PlanItem]()
var level: Int = 0
var MsgPattern: LinkedHashMap[String, Set[String]] = LinkedHashMap[String, Set[String]]()
var levelWeight: Double = 0
var weightArray: ArrayBuffer[Double] = ArrayBuffer[Double]()
while (v != "^") {
if (!MsgPattern.contains(v)) {
MsgPattern(v) = HashSet()
}
tps.map(tp => {
if (!tp.finish && tp.s == v) {
if (!q2.contains(tp.o)) q2.enqueue(tp.o)
aliveIter += new PlanItem(new Triple("?alive " + tp.p + " ?alive"), " ", Set[String]())
iteration += new PlanItem(new Triple(tp.str), tp.o, Set[String]())
levelWeight += scoreMap.getOrElse(tp.p, 1.0)
tp.finish = true
} else if (!tp.finish && tp.o == v) {
if (!q2.contains(tp.s)) q2.enqueue(tp.s)
aliveIter += new PlanItem(new Triple("?alive " + tp.p + " ?alive"), " ", Set[String]())
iteration += new PlanItem(new Triple(tp.str), tp.s, Set[String]())
levelWeight += scoreMap.getOrElse(tp.p, 1.0)
tp.finish = true
} else {
//nop
}
})
if (q2.size > 1 && q2.front.equals("^")) {
if (!iteration.isEmpty) {
aliveTP += aliveIter
if (aliveTP.length > 1) {
for (i <- 0 to level - 1) {
aliveTP(i).map(alive => iteration += alive)
}
}
}
plan += iteration
q2.dequeue
v = q2.dequeue
q2.enqueue("^")
level += 1
weightArray += levelWeight
aliveIter = ListBuffer[PlanItem]()
iteration = ListBuffer[PlanItem]()
levelWeight = 0.0
} else if (q2.size > 1 && !q2.front.equals("^")) {
v = q2.dequeue
} else {
v = "^"
}
}
/**
* The function of the code below is to add AcceptHeaders to the query, by which we can know how many messages need to send to the src in the triple.
*/
plan = plan.reverse //bottom-up
plan = plan.map(iter => {
iter.map(planitem => {
if (planitem.tp.s != "?alive") {
var o = planitem.tp.o
var src = planitem.src
var des = o
if (src == o) {
des = planitem.tp.s
}
MsgPattern(des) = MsgPattern(des).union(MsgPattern(src))
MsgPattern(des) += src
var mp: Set[String] = Set[String]()
MsgPattern(src).foreach(i => mp += i)
planitem.headPattern = mp
planitem
} else {
planitem.headPattern = Set[String]()
planitem
}
})
})
candidateArray += new Candidate(new PlanResult(plan, rootNode, vars.size, dataProperties, nonTreeEdge), level, weightArray)
}
val minLevel = candidateArray.map(candidate => {
candidate.level
}).min
val candidateResult = candidateArray.filter(candidate => {
candidate.level == minLevel
})
val minVariance = candidateResult.map(candidate => {
calcVariable(candidate.level, candidate.weightArray)
}).min
val finalSparqlPlan = candidateResult.filter(candidate => {
calcVariable(candidate.level, candidate.weightArray) == minVariance
})(0)
finalSparqlPlan.candiPlan.plan.map(list => {
println("PLAN-- [")
list.map(tp => println("PLAN-- (" + tp.tp.toString() + ", " + tp.src + ", " + tp.headPattern + ")"))
println("PLAN-- ],")
})
println("PLAN-- DATAPROPERTIES")
dataProperties.map(v => {
println("PLAN-- " + v._1)
v._2.map(p => {
println("PLAN-- " + p.prop + " " + p.obj)
})
})
return finalSparqlPlan.candiPlan
//return new PlanResult(plan, rootNode, vars.size, dataProperties, nonTreeEdge)
}
}
| qiuhuiGithub/SQX | src/SparqlSpark/SparqlPlan.scala | Scala | apache-2.0 | 11,466 |
package doodle
package image
package examples
import doodle.core._
import doodle.syntax._
object ColorPaletteAgain {
val circleMinimum = 50.0
val circleIncrement = 10.0
def complement(c: Color): Color =
c.spin(180.degrees)
def nearComplement(c: Color): Color =
c.spin(170.degrees)
def analogous(c: Color): Color =
c.spin(15.degrees)
def singleCircle(n: Int, color: Color): Image =
Image.circle(circleMinimum + circleIncrement * n) strokeColor color strokeWidth circleIncrement
def complementCircles(n: Int, c: Color): Image = {
val color = complement(c)
if (n == 1) {
singleCircle(n, color)
} else {
complementCircles(n - 1, color) on singleCircle(n, color)
}
}
def nearComplementCircles(n: Int, c: Color): Image = {
val color = nearComplement(c)
if (n == 1) {
singleCircle(n, color)
} else {
nearComplementCircles(n - 1, color) on singleCircle(n, color)
}
}
def coloredCircles(n: Int, c: Color, palette: Color => Color): Image = {
val color = palette(c)
if (n == 1) {
singleCircle(n, color)
} else {
coloredCircles(n - 1, color, palette) on singleCircle(n, color)
}
}
def lcg1(input: Int): Int = {
// These values of a, c, and m come from the Wikipedia page
// on strokear congruential generators and are reported to
// be from Knuth. This a rather short period, but for our
// purposes any period greater than 256 (2^8) will
// generate sufficient results.
val a = 8121
val c = 28411
val m = 134456
(a * input + c) % m
}
def lcg2(input: Int): Int = {
// These values of a and m come from Park and Miller's Minimal Standard
// Generator, and the method used to calculate the result is called
// Schrage's method. Schrage's method avoids numeric overflow in the
// computation. Both of these concepts I first read about in a post on the
// Eternally Confuzzled blog.
//
// Learning about this was a fun excursion into Computer Science for me. I recommend it.
val a = 48271
val m = Int.MaxValue
val q = m / a
val r = m % a
val result = a * (input % q) - r * (input / q)
if (result <= 0)
result + m
else
result
}
def normalize(value: Int, max: Int): Normalized =
(value.toDouble / max.toDouble).normalized
def rescale(value: Normalized, min: Double, range: Double): Normalized =
((value.get * range) + min).normalized
def lcgColor(c: Color): Color = {
val spun = c.spin(169.degrees)
val saturation =
rescale(normalize(lcg1(spun.hue.toDegrees.toInt), 134456), 0.25, 0.75)
val lightness = rescale(
normalize(lcg2(spun.hue.toDegrees.toInt), Int.MaxValue),
0.25,
0.5)
println(s"saturation ${saturation} lightness ${lightness}")
spun.saturation(saturation).lightness(lightness)
}
// def murmurColor(c: Color): Color = {
// val murmur = scala.util.hashing.MurmurHash3
// val spun = c.spin(169.degrees)
// val saturation = murmur.mix(murmur.symmetricSeed, spun.h.toDegrees.toInt)
// val lightness = murmur.mix(murmur.symmetricSeed, saturation)
// println(s"saturation ${normalize(saturation)} lightness ${normalize(lightness)}")
// spun.copy(s = calm(normalize(saturation)), l = calm(normalize(lightness)))
// }
def image =
complementCircles(10, Color.seaGreen) beside
nearComplementCircles(10, Color.seaGreen) beside
coloredCircles(10, Color.seaGreen, lcgColor)
}
| underscoreio/doodle | image/shared/src/main/scala/doodle/image/examples/ColorPaletteAgain.scala | Scala | apache-2.0 | 3,516 |
/**
* Copyright (c) 2013 Bernard Leach
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package org.leachbj.hsmsim.commands
import org.leachbj.hsmsim.util.HexConverter
import org.leachbj.hsmsim.crypto.DES
import org.leachbj.hsmsim.crypto.LMK
import akka.util.ByteString
case class GenerateMacRequest(blockNumber: Int, keyType: Int, keyLength: Int, macKey: Array[Byte], iv: Option[Array[Byte]], message: Array[Byte]) extends HsmRequest
case class GenerateMacResponse(errorCode: String, mac: Array[Byte]) extends HsmResponse {
val responseCode = "MT"
}
object GenerateMacResponse {
private val (onlyBlock, firstBlock, middleBlock, lastBlock) = (0, 1, 2, 3)
private val (takKeyType, zakKeyType) = (0, 1)
private val (singleKeyLen, doubleKeyLen) = (0, 1)
private val (binaryMessage, hexMessage) = (0, 1)
def createResponse(req: GenerateMacRequest): HsmResponse = {
if (req.blockNumber != onlyBlock) return ErrorResponse("MT", "05")
if (req.keyType != takKeyType && req.keyType != zakKeyType) return ErrorResponse("MT", "04")
if (req.keyLength != doubleKeyLen) return ErrorResponse("MT", "06")
val macKey = req.keyType match {
case `takKeyType` =>
DES.tripleDesDecryptVariant(LMK.lmkVariant("16-17", 0), req.macKey)
case `zakKeyType` =>
DES.tripleDesDecryptVariant(LMK.lmkVariant("26-27", 0), req.macKey)
}
println("mac key: " + HexConverter.toHex(ByteString(macKey)))
if (!DES.isParityAdjusted(macKey)) return ErrorResponse("MT", "10")
GenerateMacResponse("00", DES.mac(macKey, req.message))
}
} | leachbj/hsm-emulator | hsmsim-akka/src/main/scala/org/leachbj/hsmsim/commands/GenerateMac.scala | Scala | mit | 2,604 |
/*
*
* * Copyright 2014 Commonwealth Computer Research, Inc.
* *
* * Licensed under the Apache License, Version 2.0 (the License);
* * you may not use this file except in compliance with the License.
* * You may obtain a copy of the License at
* *
* * http://www.apache.org/licenses/LICENSE-2.0
* *
* * Unless required by applicable law or agreed to in writing, software
* * distributed under the License is distributed on an AS IS BASIS,
* * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* * See the License for the specific language governing permissions and
* * limitations under the License.
*
*/
package org.locationtech.geomesa.plugin.ui
import java.io.File
import com.google.common.io.Files
import org.apache.accumulo.core.client.security.tokens.PasswordToken
import org.apache.accumulo.core.client.{BatchWriterConfig, Connector, ZooKeeperInstance}
import org.apache.accumulo.core.data.Mutation
import org.apache.accumulo.minicluster.MiniAccumuloCluster
import org.apache.hadoop.io.Text
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.collection.JavaConverters._
@RunWith(classOf[JUnitRunner])
class GeoMesaDataStoresPageTest extends Specification {
sequential
val table = "test"
var tempDirectory: File = null
var miniCluster: MiniAccumuloCluster = null
var connector: Connector = null
"GeoMesaDataStoresPage" should {
step(startCluster())
"scan metadata table accurately" in {
connector.tableOperations.create(table)
val splits = (0 to 99).map {
s => "%02d".format(s)
}.map(new Text(_))
connector.tableOperations().addSplits(table, new java.util.TreeSet[Text](splits.asJava))
val mutations = (0 to 149).map {
i =>
val mutation = new Mutation("%02d-row-%d".format(i%100, i))
mutation.put("cf1", "cq1", s"value1-$i")
mutation.put("cf1", "cq2", s"value2-$i")
mutation.put("cf2", "cq3", s"value3-$i")
mutation
}
val writer = connector.createBatchWriter(table, new BatchWriterConfig)
writer.addMutations(mutations.asJava)
writer.flush()
writer.close()
// have to flush table in order for it to write to metadata table
connector.tableOperations().flush(table, null, null, true)
val metadata = GeoMesaDataStoresPage.getTableMetadata(connector,
"feature",
"test",
connector.tableOperations().tableIdMap().get("test"),
"test table")
metadata.table must be equalTo "test"
metadata.displayName must be equalTo "test table"
metadata.numTablets should be equalTo 100
metadata.numEntries should be equalTo 450
metadata.numSplits should be equalTo 100
// exact file size varies slightly between runs... not sure why
Math.abs(metadata.fileSize - 0.026) should be lessThan 0.001
}
step(stopCluster())
}
def startCluster() = {
tempDirectory = Files.createTempDir()
miniCluster = new MiniAccumuloCluster(tempDirectory, "password")
miniCluster.start()
val instance = new ZooKeeperInstance(miniCluster.getInstanceName(), miniCluster.getZooKeepers())
connector = instance.getConnector("root", new PasswordToken("password"))
}
def stopCluster() = {
miniCluster.stop()
if (!tempDirectory.delete()) {
tempDirectory.deleteOnExit()
}
}
} | jwkessi/geomesa | geomesa-plugin/src/test/scala/org/locationtech/geomesa/plugin/ui/GeoMesaDataStoresPageTest.scala | Scala | apache-2.0 | 3,674 |
/**
* Copyright (C) 2011 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.processor.handlers.xhtml
import org.orbeon.oxf.common.OrbeonLocationException
import org.orbeon.oxf.xforms.XFormsConstants
import org.orbeon.oxf.xforms.XFormsUtils
import org.orbeon.oxf.xforms.control.XFormsControl
import org.orbeon.oxf.xforms.control.controls.XFormsRepeatControl
import org.orbeon.oxf.xml._
import org.orbeon.oxf.xml.dom4j.ExtendedLocationData
import org.xml.sax.Attributes
import java.lang.{StringBuilder ⇒ JStringBuilder}
import org.orbeon.oxf.xforms.processor.handlers.OutputInterceptor
import org.orbeon.oxf.xforms.analysis.controls.RepeatControl
import scala.util.control.NonFatal
/**
* Handle xf:repeat.
*/
class XFormsRepeatHandler extends XFormsControlLifecyleHandler(true, true) { // This is a repeating element
override def isMustOutputContainerElement = handlerContext.isFullUpdateTopLevelControl(getEffectiveId)
def handleControlStart(uri: String, localname: String, qName: String, attributes: Attributes, effectiveId: String, control: XFormsControl) {
val isTopLevelRepeat = handlerContext.countParentRepeats == 0
val isRepeatSelected = handlerContext.isRepeatSelected || isTopLevelRepeat
val isMustGenerateTemplate = (handlerContext.isTemplate || isTopLevelRepeat) && !handlerContext.isNoScript // don't generate templates in noscript mode as they won't be used
val isMustGenerateDelimiters = !handlerContext.isNoScript
val isMustGenerateBeginEndDelimiters = isMustGenerateDelimiters && !handlerContext.isFullUpdateTopLevelControl(effectiveId)
val namespacedId = XFormsUtils.namespaceId(containingDocument, effectiveId)
val repeatControl = if (handlerContext.isTemplate) null else containingDocument.getObjectByEffectiveId(effectiveId).asInstanceOf[XFormsRepeatControl]
val isConcreteControl = repeatControl != null
val xhtmlPrefix = handlerContext.findXHTMLPrefix
val spanQName = XMLUtils.buildQName(xhtmlPrefix, "span")
// Compute user classes only once for all iterations
val userClasses = appendControlUserClasses(attributes, control, new JStringBuilder).toString
// Place interceptor on output
val savedOutput = handlerContext.getController.getOutput
var mustOutputFirstDelimiter = isMustGenerateDelimiters
var outputDelimiter: (String, String) ⇒ Unit = null // initialized further below
val outputInterceptor = if (!isMustGenerateDelimiters) null else new OutputInterceptor(savedOutput, spanQName, new OutputInterceptor.Listener {
def generateFirstDelimiter(outputInterceptor: OutputInterceptor): Unit = {
if (isMustGenerateBeginEndDelimiters) {
def firstDelimiterClasses = "xforms-repeat-begin-end" + (if (userClasses.nonEmpty) (" " + userClasses) else "")
// Delimiter: begin repeat
outputDelimiter(firstDelimiterClasses, "repeat-begin-" + namespacedId)
// Delimiter: before repeat entries, unless disabled (disabled in case the repeat is completely empty)
if (mustOutputFirstDelimiter)
outputDelimiter("xforms-repeat-delimiter", null)
}
}
}, elementAnalysis.asInstanceOf[RepeatControl].isAroundTableOrListElement)
// Shortcut function to output the delimiter
outputDelimiter = outputInterceptor.outputDelimiter(savedOutput, _, _)
def appendClasses(sb: StringBuilder, classes: String) {
if (classes.nonEmpty) {
if (sb.nonEmpty)
sb += ' '
sb append classes // use append until Scala ++= is optimized
}
}
def addDnDClasses(sb: StringBuilder) {
val dndAttribute = attributes.getValue(XFormsConstants.XXFORMS_NAMESPACE_URI, "dnd")
if (Set("vertical", "horizontal")(dndAttribute)) {
appendClasses(sb, "xforms-dnd xforms-dnd-" + dndAttribute)
if (attributes.getValue(XFormsConstants.XXFORMS_NAMESPACE_URI, "dnd-over") != null)
appendClasses(sb, "xforms-dnd-over")
}
}
var bodyRepeated = false
def repeatBody(iteration: Int, classes: StringBuilder, generateTemplate: Boolean, repeatSelected: Boolean) {
if (isMustGenerateDelimiters) {
// User and DnD classes
appendClasses(classes, userClasses)
addDnDClasses(classes)
outputInterceptor.setAddedClasses(classes.toString)
}
handlerContext.pushRepeatContext(generateTemplate, iteration, repeatSelected)
try {
handlerContext.getController.repeatBody()
if (isMustGenerateDelimiters)
outputInterceptor.flushCharacters(true, true)
} catch {
case NonFatal(t) ⇒
throw OrbeonLocationException.wrapException(t, new ExtendedLocationData(repeatControl.getLocationData, "unrolling xf:repeat control", repeatControl.element))
}
handlerContext.popRepeatContext()
bodyRepeated = true
}
if (isMustGenerateDelimiters)
handlerContext.getController.setOutput(new DeferredXMLReceiverImpl(outputInterceptor))
// 1. Unroll repeat if needed
if (isConcreteControl) {
val repeatIndex = repeatControl.getIndex
val selectedClass = "xforms-repeat-selected-item-" + ((handlerContext.countParentRepeats % 4) + 1)
val isStaticReadonly = super.isStaticReadonly(repeatControl)
val addedClasses = new StringBuilder(200)
for (i ← 1 to repeatControl.getSize) {
// Delimiter: before repeat entries, except the first one which is output by generateFirstDelimiter()
if (isMustGenerateDelimiters && i > 1)
outputDelimiter("xforms-repeat-delimiter", null)
// Determine classes to add on root elements and around root characters
addedClasses.setLength(0)
// Selected iteration
val selected = isRepeatSelected && i == repeatIndex && !isStaticReadonly
if (selected)
addedClasses append selectedClass
// MIP classes
// Q: Could use handleMIPClasses()?
val relevant = repeatControl.children(i - 1).isRelevant
if (!relevant)
appendClasses(addedClasses, "xforms-disabled")
// Apply the content of the body for this iteration
repeatBody(i, addedClasses, generateTemplate = false, repeatSelected = selected)
}
}
// 2. Generate template if needed
if (isMustGenerateTemplate) {
// Delimiter: before repeat template
if (isMustGenerateDelimiters && !outputInterceptor.isMustGenerateFirstDelimiters)
outputDelimiter("xforms-repeat-delimiter", null)
// Determine classes to add on root elements and around root characters
val addedClasses = new StringBuilder(if (isTopLevelRepeat) "xforms-repeat-template" else "")
// Apply the content of the body for this iteration
repeatBody(0, addedClasses, generateTemplate = true, repeatSelected = false)
}
// 3. Handle case where no delimiter was output by repeat iterations or template
if (isMustGenerateDelimiters && ! bodyRepeated) {
// What we do here is replay the body to /dev/null in order to find and output the begin delimiter (but not
// the other delimiters)
outputInterceptor.setForward(false)
mustOutputFirstDelimiter = false
repeatBody(0, new StringBuilder, generateTemplate = true, repeatSelected = false)
}
// Restore output
handlerContext.getController.setOutput(savedOutput)
// 4. Delimiter: end repeat
if (isMustGenerateBeginEndDelimiters)
outputDelimiter("xforms-repeat-begin-end", "repeat-end-" + namespacedId)
}
// Don't output any LHHA
override def handleLabel() = ()
override def handleHint() = ()
override def handleHelp() = ()
override def handleAlert() = ()
}
| evlist/orbeon-forms | src/main/scala/org/orbeon/oxf/xforms/processor/handlers/xhtml/XFormsRepeatHandler.scala | Scala | lgpl-2.1 | 9,113 |
/*
Collector is a tool for obtaining bioactivity data from the Open PHACTS platform.
Copyright (C) 2013 UPF
Contributed by Manuel Pastor([email protected]) and Oriol López-Massaguer([email protected]).
This file is part of Collector.
Collector is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
Collector is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Collector. If not, see <http://www.gnu.org/licenses/>.
*/
package es.imim.phi.collector.compounds
trait CompoundFilter {
def filterPass(compound:Compound):Boolean
} | OriolLopezMassaguer/Collector | app/es/imim/phi/collector/compounds/CompoundFilter.scala | Scala | gpl-3.0 | 1,032 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.akka
import javax.inject.Singleton
import akka.actor.ActorRefFactory
import akka.actor.ActorSystem
import akka.stream.Materializer
import com.google.inject.AbstractModule
import com.google.inject.Provides
import com.google.inject.multibindings.Multibinder
import com.netflix.iep.guice.LifecycleModule
import com.netflix.iep.service.Service
import com.typesafe.config.Config
import com.typesafe.scalalogging.StrictLogging
import javax.inject.Inject
import javax.inject.Provider
import scala.concurrent.Await
import scala.concurrent.duration.Duration
/**
* Configures the actor system and web server. This module expects that bindings
* are available for `com.typesafe.config.Config` and `com.netflix.spectator.api.Registry`.
*/
final class AkkaModule extends AbstractModule {
override def configure(): Unit = {
install(new LifecycleModule)
bind(classOf[ActorSystem]).toProvider(classOf[AkkaModule.ActorSystemProvider])
bind(classOf[Materializer]).toProvider(classOf[AkkaModule.MaterializerProvider])
// Mark as eager to ensure they will be created
bind(classOf[ActorService]).asEagerSingleton()
bind(classOf[WebServer]).asEagerSingleton()
// Hookup to service manager for health tracking
val serviceBinder = Multibinder.newSetBinder(binder, classOf[Service])
serviceBinder.addBinding().to(classOf[ActorService])
serviceBinder.addBinding().to(classOf[WebServer])
}
@Provides
@Singleton
protected def providesActorRefFactory(system: ActorSystem): ActorRefFactory = system
override def equals(obj: Any): Boolean = {
obj != null && getClass.equals(obj.getClass)
}
override def hashCode(): Int = getClass.hashCode()
}
object AkkaModule extends StrictLogging {
@Singleton
private class ActorSystemProvider @Inject() (config: Config)
extends Provider[ActorSystem]
with AutoCloseable {
private val system = ActorSystem(config.getString("atlas.akka.name"), config)
override def get(): ActorSystem = system
override def close(): Unit = {
Await.ready(system.terminate(), Duration.Inf)
}
}
@Singleton
private class MaterializerProvider @Inject() (system: ActorSystem)
extends Provider[Materializer]
with AutoCloseable {
private val materializer = Materializer(system)
override def get(): Materializer = materializer
override def close(): Unit = {
materializer.shutdown()
}
}
}
| brharrington/atlas | atlas-module-akka/src/main/scala/com/netflix/atlas/akka/AkkaModule.scala | Scala | apache-2.0 | 3,056 |
/*
*
*/
package see.nodes
import see.EvalError
import see.ParamError
import see.Scope
import see.StableScope
import see.values.UserFunc
import see.values.Val
import see.values.Vector
private[see] object Fnode {
private var instanceCount = 0
private def next = {
instanceCount += 1; instanceCount
}
def newName = "<afun_" + next + ">"
def apply(params: Seq[Variable], code: Node) =
new Fnode(newName, params, code)
}
private[see] class FnodeP(code: Node) extends Proto {
override def precedence = PREC.Anondef
// We need a varaible or vector of variables as insertion point.
// It will be replaced by the resulting Fnode.
// Note that Fnodes created from plain blocks won't use this.
override def finish(n: Node): Option[Fnode] = {
val args = n match {
case v: Variable => Some(List(v))
case vn: Vnode if vn.isParamList => vn.asArgs
case _ => return None // No suitable parameter list
}
Some(Fnode(args.get, code))
}
}
// A node that contains a function definition for an anonymous function.
private[see] class Fnode(
val name: String, val params: Seq[Variable], val code: Node)
extends Leaf {
override def dump = toString + "\nDefinition: " + code.dump
override def evalIn(s: Scope) = new UserFunc(s, this)
override def simplifyIn(s: Scope) = try {
// If this works, we have a free function that can be used right away
// Note that it still may refer to global consts, if they are already defined.
val cs = s.createStableCheck
for (p <- params) cs.setLocal(p.name, Vector.Empty)
val simpleCode = code match {
case block: Block => block.content.simplifyIn(cs)
case _ => code.simplifyIn(cs)
}
new StableFnode(name, params, simpleCode)
} catch {
// Otherwise try full closure (we don't need to define parameters in advance)
case _: Exception =>
val sc = code match {
case block: Block => block.content.simplifyIn(s)
case _ => code.simplifyIn(s)
}
new Fnode(name, params, sc)
}
override def isDefinedIn(s: Scope) = {
val cs = s.createInner
for (p <- params) cs.setLocal(p.name, Vector.Empty)
code.isDefinedIn(cs)
}
override def toString = "Function " + name +
params.map(_.name).mkString("(", ",", ")") + " := " + code
private def paramFail(got: Int) = new ParamError(
"Function " + name + " requires " + params.size +
" parameters, got " + got + ".")
// Call during evaluation
def call(defScope: Scope, args: Val): Val = {
val inner = defScope.createInner
args match {
case Vector(vs) =>
// if just one parameter, use the whole vector for that
if (params.size == 1) inner.setLocal(params.head.name, args)
else if (vs.size != params.size) throw paramFail(vs.size)
else for ((p, v) <- params zip vs) inner.setLocal(p.name, v)
case x => if (params.size != 1) throw paramFail(1)
else inner.setLocal(params.head.name, args)
}
code evalIn inner
}
val stable = false
}
// A node that contains a function definition.
private[see] class StableFnode(n: String, p: Seq[Variable], c: Node)
extends Fnode(n, p, c) {
override def evalIn(s: Scope) = new UserFunc(
new Scope(s.parser, StableScope), this)
override def toString = "Stable " + super.toString
override def isDefinedIn(s: Scope) = true
// or else we would not have this
override val stable = true
}
private[see] class FundefP(code: Node) extends Proto {
override def precedence = PREC.Fundef
// We need a Fcall as insertion point.
// It will be replaced by the Fundef.
override def finish(n: Node): Option[Fundef] = {
// check, whether insertion poínt is suitable:
if (!n.isInstanceOf[Fcall]) return None // No valid function header
val fc = n.asInstanceOf[Fcall]
val argList = fc.argList
if (argList.isParamList) // Invalid argument list:" + argList)
Some(new Fundef(fc.fname, argList.asArgs.get, code))
else None
}
}
// A node that contains a function definition (will be defined when evaluated).
private[see] class Fundef(n: String, ps: Seq[Variable], c: Node)
extends Fnode(n, ps, c) {
def this(f: Fnode) = this(f.name, f.params, f.code)
// evaluating this node means to set a variable with the function's name.
override def evalIn(s: Scope) = {
val f = new UserFunc(s, this)
s.iset(name, f)
f
}
override def simplifyIn(s: Scope) = {
super.simplifyIn(s) match {
case f: StableFnode => new StableDef(f)
case f: Fnode => new Fundef(f)
case _ => throw new EvalError("Strange simplify result.")
}
}
}
// A node that contains a function definition.
private[see] class StableDef(n: String, p: Seq[Variable], c: Node)
extends Fundef(n, p, c) {
def this(f: StableFnode) = this(f.name, f.params, f.code)
override def evalIn(s: Scope) = {
val f = new UserFunc(new Scope(s.parser, StableScope), this)
s.iset(name, f)
f
}
override def isDefinedIn(s: Scope) = true
// or else we would not have this
override def toString = "Stable " + super.toString
override val stable = true
}
| acruise/see | src/main/scala/see/nodes/Funcs.scala | Scala | bsd-3-clause | 5,198 |
package akka.dumbdi
class FakeModule extends ActorModuleRuntime {
bind[Service](new FakeService)
}
| KadekM/akka-dumbdi | src/test/scala/akka/dumbdi/FakeModule.scala | Scala | apache-2.0 | 102 |
package arbiterJs
import com.highcharts.CleanJsObject
import com.highcharts.HighchartsUtils._
import com.highstock.HighstockAliases._
import com.highstock.config._
import scala.scalajs.js
import scala.scalajs.js.UndefOr
import scala.scalajs.js.annotation.ScalaJSDefined
import js.JSConverters._
/**
* Displays line movement across different betting exchanges.
*/
@ScalaJSDefined
class BookLineChart(participant: String, data: List[Series]) extends HighstockConfig {
// remove the highcharts credits
override val credits: Cfg[Credits] = Credits(enabled = false)
// disable exporting
override val exporting: Cfg[Exporting] = Exporting(enabled = false)
// TODO this shall also be removed see below
override val title: Cfg[Title] = Title(
text = participant
)
// TODO this tie shall be removed because it doesn't make sense
// to include the event time on both charts for odds a and b
// there will be two charts now
//override val subtitle: Cfg[Subtitle] = Subtitle(
// text = data.time
//)
override val chart: Cfg[Chart] = Chart(
zoomType = "x"
)
override val xAxis: Cfg[XAxis] = XAxis(
gridLineWidth = 0
)
override val scrollbar: Cfg[Scrollbar] = Scrollbar(
enabled = false
)
override val navigator: Cfg[Navigator] = Navigator(
enabled = false
)
override val yAxis: Cfg[YAxis] = YAxis(
gridLineWidth = 0,
minorGridLineWidth = 0,
opposite = false
)
override val plotOptions: Cfg[PlotOptions] = PlotOptions(
area = PlotOptionsArea(
lineWidth = 1
)
)
// the range selecter shall be disabled
override val rangeSelector: Cfg[RangeSelector] = new RangeSelector {
override val buttons: UndefOr[js.Array[js.Any]] = js.Array(
js.Dynamic.literal(`type` = "hour", count = 1, text = "1H"),
js.Dynamic.literal(`type` = "day", count = 1, text = "1D"),
js.Dynamic.literal(`type` = "all", count = 1, text = "All")
)
override val selected: UndefOr[Double] = 1
override val inputEnabled: UndefOr[Boolean] = false
override val enabled: UndefOr[Boolean] = false
}
val sdata = data.map{ s =>
SeriesLine (
data = s.series,
name = s.bookname,
tooltip = new SeriesLineTooltip {
override val valueDecimals: UndefOr[Double] = 2
},
step = "left",
lineWidth = 2
): CleanJsObject[SeriesLine]
}.toArray.toJSArray.asInstanceOf[js.Array[AnySeries]]
override val series: SeriesCfg = sdata
}
| asciiu/halo | arbiterJs/src/main/scala/arbiterJs/BookLineChart.scala | Scala | mit | 2,471 |
package org.jetbrains.plugins.scala
package debugger.evaluation
import java.io.File
import com.intellij.debugger.DebuggerManagerEx
import com.intellij.debugger.impl.{DebuggerManagerAdapter, DebuggerManagerListener, DebuggerSession}
import com.intellij.openapi.application.ApplicationManager
import com.intellij.openapi.components.ProjectComponent
import com.intellij.openapi.module.Module
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.io.FileUtil
import org.jetbrains.jps.incremental.messages.BuildMessage.Kind
import org.jetbrains.jps.incremental.scala.Client
import org.jetbrains.plugins.scala.compiler.{CompileServerLauncher, RemoteServerConnectorBase, RemoteServerRunner, ScalaCompileServerSettings}
import org.jetbrains.plugins.scala.project.ProjectExt
import scala.annotation.tailrec
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
/**
* Nikolay.Tropin
* 2014-10-07
*/
class ScalaEvaluatorCompileHelper(project: Project) extends ProjectComponent with EvaluatorCompileHelper {
private val tempFiles = mutable.Set[File]()
private val listener = new DebuggerManagerListener {
override def sessionAttached(session: DebuggerSession): Unit = {
if (EvaluatorCompileHelper.needCompileServer && project.hasScala) {
CompileServerLauncher.ensureServerRunning(project)
}
}
override def sessionDetached(session: DebuggerSession): Unit = {
clearTempFiles()
if (!ScalaCompileServerSettings.getInstance().COMPILE_SERVER_ENABLED && EvaluatorCompileHelper.needCompileServer) {
CompileServerLauncher.ensureNotRunning(project)
}
}
}
override def projectOpened(): Unit = {
if (!ApplicationManager.getApplication.isUnitTestMode) {
DebuggerManagerEx.getInstanceEx(project).addDebuggerManagerListener(listener)
}
}
override def projectClosed(): Unit = {
DebuggerManagerEx.getInstanceEx(project).removeDebuggerManagerListener(listener)
}
private def clearTempFiles(): Unit = {
tempFiles.foreach(FileUtil.delete)
tempFiles.clear()
}
def tempDir(): File = {
val dir = FileUtil.createTempDirectory("classfilesForDebugger", null, true)
tempFiles += dir
dir
}
def tempFile(): File = {
val file = FileUtil.createTempFile("FileToCompile", ".scala", true)
tempFiles += file
file
}
def compile(fileText: String, module: Module): Array[(File, String)] = {
compile(fileText, module, tempDir())
}
def compile(files: Seq[File], module: Module, outputDir: File): Array[(File, String)] = {
assert(CompileServerLauncher.ensureServerRunning(project))
val connector = new ServerConnector(module, files, outputDir)
try {
connector.compile() match {
case Left(output) => output
case Right(errors) => throw EvaluationException(errors.mkString("\\n"))
}
}
catch {
case e: Exception => throw EvaluationException("Could not compile:\\n" + e.getMessage)
}
}
def compile(fileText: String, module: Module, outputDir: File): Array[(File, String)] = {
compile(Seq(writeToTempFile(fileText)), module, outputDir)
}
def writeToTempFile(text: String): File = {
val file = tempFile()
FileUtil.writeToFile(file, text)
file
}
}
object ScalaEvaluatorCompileHelper {
def instance(project: Project): ScalaEvaluatorCompileHelper = project.getComponent(classOf[ScalaEvaluatorCompileHelper])
}
private class ServerConnector(module: Module, filesToCompile: Seq[File], outputDir: File)
extends RemoteServerConnectorBase(module, filesToCompile, outputDir) {
private val errors: ListBuffer[String] = ListBuffer[String]()
private val client: Client = new Client {
override def message(kind: Kind, text: String, source: Option[File], line: Option[Long], column: Option[Long]): Unit = {
if (kind == Kind.ERROR) errors += text
}
override def deleted(module: File): Unit = {}
override def progress(text: String, done: Option[Float]): Unit = {}
override def isCanceled: Boolean = false
override def debug(text: String): Unit = {}
override def processed(source: File): Unit = {}
override def trace(exception: Throwable): Unit = {}
override def generated(source: File, module: File, name: String): Unit = {}
}
@tailrec
private def classfiles(dir: File, namePrefix: String = ""): Array[(File, String)] = dir.listFiles() match {
case Array(d) if d.isDirectory => classfiles(d, s"$namePrefix${d.getName}.")
case files => files.map(f => (f, s"$namePrefix${f.getName}".stripSuffix(".class")))
}
def compile(): Either[Array[(File, String)], Seq[String]] = {
val project = module.getProject
val compilationProcess = new RemoteServerRunner(project).buildProcess(arguments, client)
var result: Either[Array[(File, String)], Seq[String]] = Right(Seq("Compilation failed"))
compilationProcess.addTerminationCallback {
result = if (errors.nonEmpty) Right(errors) else Left(classfiles(outputDir))
}
compilationProcess.run()
result
}
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/debugger/evaluation/ScalaEvaluatorCompileHelper.scala | Scala | apache-2.0 | 5,069 |
/**
* This code is generated using [[https://www.scala-sbt.org/contraband/ sbt-contraband]].
*/
// DO NOT EDIT MANUALLY
package sbt.internal.bsp
/**
* @param targets A sequence of build targets affected by the debugging action.
* @param dataKind The kind of data to expect in the `data` field.
* @param data A language-agnostic JSON object interpreted by the server.
*/
final class DebugSessionParams private (
val targets: Vector[sbt.internal.bsp.BuildTargetIdentifier],
val dataKind: Option[String],
val data: Option[sjsonnew.shaded.scalajson.ast.unsafe.JValue]) extends Serializable {
override def equals(o: Any): Boolean = this.eq(o.asInstanceOf[AnyRef]) || (o match {
case x: DebugSessionParams => (this.targets == x.targets) && (this.dataKind == x.dataKind) && (this.data == x.data)
case _ => false
})
override def hashCode: Int = {
37 * (37 * (37 * (37 * (17 + "sbt.internal.bsp.DebugSessionParams".##) + targets.##) + dataKind.##) + data.##)
}
override def toString: String = {
"DebugSessionParams(" + targets + ", " + dataKind + ", " + data + ")"
}
private[this] def copy(targets: Vector[sbt.internal.bsp.BuildTargetIdentifier] = targets, dataKind: Option[String] = dataKind, data: Option[sjsonnew.shaded.scalajson.ast.unsafe.JValue] = data): DebugSessionParams = {
new DebugSessionParams(targets, dataKind, data)
}
def withTargets(targets: Vector[sbt.internal.bsp.BuildTargetIdentifier]): DebugSessionParams = {
copy(targets = targets)
}
def withDataKind(dataKind: Option[String]): DebugSessionParams = {
copy(dataKind = dataKind)
}
def withDataKind(dataKind: String): DebugSessionParams = {
copy(dataKind = Option(dataKind))
}
def withData(data: Option[sjsonnew.shaded.scalajson.ast.unsafe.JValue]): DebugSessionParams = {
copy(data = data)
}
def withData(data: sjsonnew.shaded.scalajson.ast.unsafe.JValue): DebugSessionParams = {
copy(data = Option(data))
}
}
object DebugSessionParams {
def apply(targets: Vector[sbt.internal.bsp.BuildTargetIdentifier], dataKind: Option[String], data: Option[sjsonnew.shaded.scalajson.ast.unsafe.JValue]): DebugSessionParams = new DebugSessionParams(targets, dataKind, data)
def apply(targets: Vector[sbt.internal.bsp.BuildTargetIdentifier], dataKind: String, data: sjsonnew.shaded.scalajson.ast.unsafe.JValue): DebugSessionParams = new DebugSessionParams(targets, Option(dataKind), Option(data))
}
| sbt/sbt | protocol/src/main/contraband-scala/sbt/internal/bsp/DebugSessionParams.scala | Scala | apache-2.0 | 2,451 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.lock
import java.util.concurrent.TimeUnit
import org.joda.time.{DateTime, Duration}
import org.scalatest.{BeforeAndAfterEach, OptionValues}
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import org.scalatest.enablers.Emptiness
import reactivemongo.api.commands.LastError
import uk.gov.hmrc.lock.LockFormats.Lock
import uk.gov.hmrc.mongo.{Awaiting, MongoSpecSupport, ReactiveRepository}
import uk.gov.hmrc.time.DateTimeUtils
import scala.concurrent.Future
import scala.concurrent.duration.FiniteDuration
class LockRepositorySpec
extends AnyWordSpecLike
with Matchers
with MongoSpecSupport
with Awaiting
with BeforeAndAfterEach
with OptionValues
with ScalaFutures
with IntegrationPatience {
private implicit val now = DateTimeUtils.now
val lockId = "testLock"
val owner = "repoSpec"
val testContext = this
implicit def liftFuture[A](v: A) = Future.successful(v)
implicit val reactiveRepositoryEmptiness = new Emptiness[ReactiveRepository[_, _]] {
override def isEmpty(thing: ReactiveRepository[_, _]) = await(thing.count) == 0
}
val repo = new LockRepository {
val retryIntervalMillis = FiniteDuration(5L, TimeUnit.SECONDS).toMillis
override def withCurrentTime[A](f: (DateTime) => A) = f(now)
}
override protected def beforeEach(): Unit = {
await(repo.collection.db.connection.active)
await(repo.removeAll())
}
def manuallyInsertLock(lock: Lock) = {
await(repo.insert(lock))
}
"The lock method" should {
"successfully create a lock if one does not already exist" in {
await(repo.lock(lockId, owner, new Duration(1000L))) shouldBe true
val lock = await(repo.findAll())
lock.head shouldBe Lock(lockId, owner, now, now.plusSeconds(1))
}
"successfully create a lock if a different one already exists" in {
manuallyInsertLock(Lock("nonMatchingLock", owner, now, now.plusSeconds(1)))
await(repo.lock(lockId, owner, new Duration(1000L))) shouldBe true
await(repo.count) shouldBe 2
await(repo.findById(lockId)) shouldBe Some(Lock(lockId, owner, now, now.plusSeconds(1)))
}
"do not change a non-expired lock with a different owner" in {
val alternativeOwner = "owner2"
manuallyInsertLock(Lock(lockId, alternativeOwner, now, now.plusSeconds(100)))
await(repo.lock(lockId, owner, new Duration(1000L))) shouldBe false
await(repo.findById(lockId)).map(_.owner) shouldBe Some(alternativeOwner)
}
"do not change a non-expired lock with the same owner" in {
await(repo.removeAll())
val existingLock = Lock(lockId, owner, now.minusDays(1), now.plusDays(1))
manuallyInsertLock(existingLock)
await(repo.lock(lockId, owner, new Duration(1000L))) shouldBe false
await(repo.findAll()).head shouldBe existingLock
}
"change an expired lock" in {
val expiredLock = Lock(lockId, owner, now.minusDays(2), now.minusDays(1))
manuallyInsertLock(expiredLock)
val gotLock = await(repo.lock(lockId, owner, new Duration(1000L)))
gotLock shouldBe true
await(repo.findAll()).head shouldBe Lock(lockId, owner, now, now.plusSeconds(1))
}
}
"The renew method" should {
"not renew a lock if one does not already exist" in {
await(repo.renew(lockId, owner, new Duration(1000L))) shouldBe false
await(repo.findAll()) shouldBe empty
}
"not renew a different lock if one exists" in {
manuallyInsertLock(Lock("nonMatchingLock", owner, now, now.plusSeconds(1)))
await(repo.renew(lockId, owner, new Duration(1000L))) shouldBe false
await(repo.findAll()).head shouldBe Lock("nonMatchingLock", owner, now, now.plusSeconds(1))
}
"not change a non-expired lock with a different owner" in {
val alternativeOwner = "owner2"
manuallyInsertLock(Lock(lockId, alternativeOwner, now, now.plusSeconds(100)))
await(repo.renew(lockId, owner, new Duration(1000L))) shouldBe false
await(repo.findById(lockId)).map(_.owner) shouldBe Some(alternativeOwner)
}
"change a non-expired lock with the same owner" in {
val existingLock = Lock(lockId, owner, now.minusDays(1), now.plusDays(1))
manuallyInsertLock(existingLock)
await(repo.renew(lockId, owner, new Duration(1000L))) shouldBe true
await(repo.findAll()).head shouldBe Lock(lockId, owner, existingLock.timeCreated, now.plus(new Duration(1000L)))
}
"not renew an expired lock" in {
val expiredLock = Lock(lockId, owner, now.minusDays(2), now.minusDays(1))
manuallyInsertLock(expiredLock)
val gotLock = await(repo.renew(lockId, owner, new Duration(1000L)))
gotLock shouldBe false
await(repo.findAll()).head shouldBe expiredLock
}
}
"The releaseLock method" should {
"remove an owned and expired lock" in {
val lock = Lock(lockId, owner, now.minusDays(2), now.minusDays(1))
manuallyInsertLock(lock)
await(repo.releaseLock(lockId, owner))
await(repo.count) shouldBe 0
}
"remove an owned and unexpired lock" in {
val lock = Lock(lockId, owner, now.minusDays(1), now.plusDays(1))
manuallyInsertLock(lock)
await(repo.releaseLock(lockId, owner))
await(repo.count) shouldBe 0
}
"do nothing if the lock doesn't exist" in {
await(repo.releaseLock(lockId, owner))
await(repo.count) shouldBe 0
}
"leave an expired lock owned by someone else" in {
val someoneElsesExpiredLock = Lock(lockId, "someoneElse", now.minusDays(2), now.minusDays(1))
manuallyInsertLock(someoneElsesExpiredLock)
await(repo.releaseLock(lockId, owner))
await(repo.findAll()).head shouldBe someoneElsesExpiredLock
}
"leave an unexpired lock owned by someone else" in {
val someoneElsesLock = Lock(lockId, "someoneElse", now.minusDays(2), now.plusDays(1))
manuallyInsertLock(someoneElsesLock)
await(repo.releaseLock(lockId, owner))
await(repo.findAll()).head shouldBe someoneElsesLock
}
"leave a different owned lock" in {
val someOtherLock = Lock("someOtherLock", owner, now.minusDays(1), now.plusDays(1))
manuallyInsertLock(someOtherLock)
await(repo.releaseLock(lockId, owner))
await(repo.findAll()).head shouldBe someOtherLock
}
}
"The isLocked method" should {
"return false if no lock obtained" in {
await(repo.isLocked(lockId, owner)) should be (false)
}
"return true if lock held" in {
manuallyInsertLock(Lock(lockId, owner, now, now.plusSeconds(100)))
await(repo.isLocked(lockId, owner)) should be (true)
}
"return false if the lock is held but expired" in {
manuallyInsertLock(Lock(lockId, owner, now.minusDays(2), now.minusDays(1)))
await(repo.isLocked(lockId, owner)) should be (false)
}
}
"The lock keeper" should {
val lockKeeper = new LockKeeper {
val forceLockReleaseAfter = Duration.standardSeconds(1)
override lazy val serverId: String = testContext.owner
val lockId: String = testContext.lockId
val repo: LockRepository = testContext.repo
}
"run the block supplied if the lock can be obtained, and return an option on the result and release the lock" in {
def hasLock = {
await(repo.findById(lockId)) shouldBe Some(Lock(lockId, owner, now, now.plusSeconds(1)))
Future.successful("testString")
}
await(lockKeeper.tryLock[String](hasLock)) shouldBe Some("testString")
repo shouldBe empty
}
"run the block supplied and release the lock even if the block returns a failed future" in {
a [RuntimeException] should be thrownBy{
await(lockKeeper.tryLock(Future.failed(new RuntimeException)))
}
repo shouldBe empty
}
"run the block supplied and release the lock even if the block throws an exception" in {
a [RuntimeException] should be thrownBy{
await(lockKeeper.tryLock(throw new RuntimeException ))
}
repo shouldBe empty
}
"not run the block supplied if the lock is owned by someone else, and return None" in {
val manualyInsertedLock = Lock(lockId, "owner2", now, now.plusSeconds(100))
manuallyInsertLock(manualyInsertedLock)
await(lockKeeper.tryLock {
fail("Should not be run!")
}) shouldBe None
await(repo.findAll()).head shouldBe manualyInsertedLock
}
"not run the block supplied if the lock is already owned by the caller, and return None" in {
val manualyInsertedLock = Lock(lockId, owner, now, now.plusSeconds(100))
manuallyInsertLock(manualyInsertedLock)
await(lockKeeper.tryLock {
fail("Should not be run!")
}) shouldBe None
await(repo.findAll()).head shouldBe manualyInsertedLock
}
"return false from isLocked if no lock obtained" in {
await(lockKeeper.isLocked) shouldBe false
}
"return true from isLocked if lock held" in {
manuallyInsertLock(Lock(lockId, owner, now, now.plusSeconds(100)))
await(lockKeeper.isLocked) shouldBe true
}
}
"Mongo should" should {
val DuplicateKey = 11000
"throw an exception if a lock object is inserted that is not unique" in {
val lock1 = Lock("lockName", "owner1", now.plusDays(1), now.plusDays(2))
val lock2 = Lock("lockName", "owner2", now.plusDays(3), now.plusDays(4))
manuallyInsertLock(lock1)
val error = the[LastError] thrownBy manuallyInsertLock(lock2)
error.code should contain(DuplicateKey)
await(repo.findAll()).head shouldBe lock1
}
}
}
| hmrc/mongo-lock | src/test/scala/uk/gov/hmrc/lock/LockRepositorySpec.scala | Scala | apache-2.0 | 10,355 |
package uconfig.test
import uconfig.UConfigObject.MapConfigObject
import uconfig.{PathSeq, UConfigObject, UConfigValue}
import utest._
object MapConfigObjectTest extends UConfigObjectTests {
override def createEUT(pairs: Seq[Tuple2[PathSeq, UConfigValue]]) = UConfigObject(pairs)
}
| jokade/sconfig | shared/src/test/scala/uconfig/test/MapConfigObjectTest.scala | Scala | mit | 286 |
package endpoints.xhr
import endpoints.algebra.Codec
import org.scalajs.dom.XMLHttpRequest
/**
* Interpreter for [[endpoints.algebra.JsonEntitiesFromCodec]] that encodes JSON requests
* and decodes JSON responses.
*/
trait JsonEntitiesFromCodec extends Endpoints with endpoints.algebra.JsonEntitiesFromCodec {
def jsonRequest[A](implicit codec: Codec[String, A]) = (a: A, xhr: XMLHttpRequest) => {
xhr.setRequestHeader("Content-Type", "application/json")
codec.encode(a)
}
def jsonResponse[A](implicit codec: Codec[String, A]) =
xhr => codec.decode(xhr.responseText)
}
| Krever/endpoints | xhr/client/src/main/scala/endpoints/xhr/JsonEntitiesFromCodec.scala | Scala | mit | 598 |
package controllers
import play.api.libs.json._
import play.api.mvc._
import models.{City, Passenger, Taxi}
object Application extends Controller {
def addTaxi = Action(BodyParsers.parse.json) { implicit request =>
val p = request.body.validate[Taxi]
p.fold(
errors => {
BadRequest(Json.obj("status" -> "BadRequest", "message" -> JsError.toFlatJson(errors)))
},
taxi => {
City.addTaxi(taxi) match {
case Left(msg) => BadRequest(Json.obj("status" -> "BadRequest", "message" -> msg))
case Right(msg) => Ok(Json.obj("status" -> "OK"))
}
}
)
}
def addPassenger = Action(BodyParsers.parse.json) { implicit request =>
val p = request.body.validate[Passenger]
p.fold(
errors => {
BadRequest(Json.obj("status" -> "BadRequest", "message" -> JsError.toFlatJson(errors)))
},
passenger => {
City.addPassenger(passenger) match {
case Left(msg) => BadRequest(Json.obj("status" -> "BadRequest", "message" -> msg))
case Right(msg) => Ok(Json.obj("status" -> "OK"))
}
}
)
}
def doStep() = Action { request =>
City.moveStep()
Ok(Json.obj("status" -> "OK"))
}
def restart = Action { request =>
City.restart
Ok(Json.obj("status" -> "OK"))
}
def state = Action { request =>
request match {
case Accepts.Json() => Ok(views.html.city(City.renderHtml)).as(HTML)
case Accepts.Html() => Ok(views.html.city(City.renderHtml)).as(HTML)
case _ => Ok(views.html.city(City.renderHtml)).as(HTML)
}
}
}
| riosgabriel/66Taxis | app/controllers/Application.scala | Scala | mit | 1,600 |
package ru.tolsi.matcher.util
import ru.tolsi.matcher.UnitSpec
class EitherUtilsSpec extends UnitSpec {
describe("splitEitherIterator method") {
it("should split either iterator to left and right iterators") {
val (lefts, rigths) = EitherUtils.splitEitherIterator(Iterator(Left(0), Right(3), Left(1), Left(2), Right(4),
Right(5)))
lefts.toSeq contains inOrderOnly(0, 1, 2)
rigths.toSeq contains inOrderOnly(3, 4, 5)
}
it("should split empty either iterator") {
val (lefts, rigths) = EitherUtils.splitEitherIterator(Iterator.empty)
lefts.toSeq should have size (0)
rigths.toSeq should have size (0)
}
}
}
| Tolsi/matcher | src/test/scala/ru/tolsi/matcher/util/EitherUtilsSpec.scala | Scala | mit | 670 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.objects
import java.lang.reflect.{Method, Modifier}
import scala.collection.JavaConverters._
import scala.collection.mutable.Builder
import scala.language.existentials
import scala.reflect.ClassTag
import scala.util.Try
import org.apache.spark.{SparkConf, SparkEnv}
import org.apache.spark.serializer._
import org.apache.spark.sql.Row
import org.apache.spark.sql.catalyst.{CatalystTypeConverters, InternalRow, ScalaReflection}
import org.apache.spark.sql.catalyst.ScalaReflection.universe.TermName
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.catalyst.expressions.codegen.Block._
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, ArrayData, GenericArrayData, MapData}
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
import org.apache.spark.util.Utils
/**
* Common base class for [[StaticInvoke]], [[Invoke]], and [[NewInstance]].
*/
trait InvokeLike extends Expression with NonSQLExpression {
def arguments: Seq[Expression]
def propagateNull: Boolean
protected lazy val needNullCheck: Boolean = propagateNull && arguments.exists(_.nullable)
/**
* Prepares codes for arguments.
*
* - generate codes for argument.
* - use ctx.splitExpressions() to not exceed 64kb JVM limit while preparing arguments.
* - avoid some of nullability checking which are not needed because the expression is not
* nullable.
* - when needNullCheck == true, short circuit if we found one of arguments is null because
* preparing rest of arguments can be skipped in the case.
*
* @param ctx a [[CodegenContext]]
* @return (code to prepare arguments, argument string, result of argument null check)
*/
def prepareArguments(ctx: CodegenContext): (String, String, ExprValue) = {
val resultIsNull = if (needNullCheck) {
val resultIsNull = ctx.addMutableState(CodeGenerator.JAVA_BOOLEAN, "resultIsNull")
JavaCode.isNullGlobal(resultIsNull)
} else {
FalseLiteral
}
val argValues = arguments.map { e =>
val argValue = ctx.addMutableState(CodeGenerator.javaType(e.dataType), "argValue")
argValue
}
val argCodes = if (needNullCheck) {
val reset = s"$resultIsNull = false;"
val argCodes = arguments.zipWithIndex.map { case (e, i) =>
val expr = e.genCode(ctx)
val updateResultIsNull = if (e.nullable) {
s"$resultIsNull = ${expr.isNull};"
} else {
""
}
s"""
if (!$resultIsNull) {
${expr.code}
$updateResultIsNull
${argValues(i)} = ${expr.value};
}
"""
}
reset +: argCodes
} else {
arguments.zipWithIndex.map { case (e, i) =>
val expr = e.genCode(ctx)
s"""
${expr.code}
${argValues(i)} = ${expr.value};
"""
}
}
val argCode = ctx.splitExpressionsWithCurrentInputs(argCodes)
(argCode, argValues.mkString(", "), resultIsNull)
}
/**
* Evaluate each argument with a given row, invoke a method with a given object and arguments,
* and cast a return value if the return type can be mapped to a Java Boxed type
*
* @param obj the object for the method to be called. If null, perform s static method call
* @param method the method object to be called
* @param arguments the arguments used for the method call
* @param input the row used for evaluating arguments
* @param dataType the data type of the return object
* @return the return object of a method call
*/
def invoke(
obj: Any,
method: Method,
arguments: Seq[Expression],
input: InternalRow,
dataType: DataType): Any = {
val args = arguments.map(e => e.eval(input).asInstanceOf[Object])
if (needNullCheck && args.exists(_ == null)) {
// return null if one of arguments is null
null
} else {
val ret = method.invoke(obj, args: _*)
val boxedClass = ScalaReflection.typeBoxedJavaMapping.get(dataType)
if (boxedClass.isDefined) {
boxedClass.get.cast(ret)
} else {
ret
}
}
}
}
/**
* Common trait for [[DecodeUsingSerializer]] and [[EncodeUsingSerializer]]
*/
trait SerializerSupport {
/**
* If true, Kryo serialization is used, otherwise the Java one is used
*/
val kryo: Boolean
/**
* The serializer instance to be used for serialization/deserialization in interpreted execution
*/
lazy val serializerInstance: SerializerInstance = SerializerSupport.newSerializer(kryo)
/**
* Adds a immutable state to the generated class containing a reference to the serializer.
* @return a string containing the name of the variable referencing the serializer
*/
def addImmutableSerializerIfNeeded(ctx: CodegenContext): String = {
val (serializerInstance, serializerInstanceClass) = {
if (kryo) {
("kryoSerializer",
classOf[KryoSerializerInstance].getName)
} else {
("javaSerializer",
classOf[JavaSerializerInstance].getName)
}
}
val newSerializerMethod = s"${classOf[SerializerSupport].getName}$$.MODULE$$.newSerializer"
// Code to initialize the serializer
ctx.addImmutableStateIfNotExists(serializerInstanceClass, serializerInstance, v =>
s"""
|$v = ($serializerInstanceClass) $newSerializerMethod($kryo);
""".stripMargin)
serializerInstance
}
}
object SerializerSupport {
/**
* It creates a new `SerializerInstance` which is either a `KryoSerializerInstance` (is
* `useKryo` is set to `true`) or a `JavaSerializerInstance`.
*/
def newSerializer(useKryo: Boolean): SerializerInstance = {
// try conf from env, otherwise create a new one
val conf = Option(SparkEnv.get).map(_.conf).getOrElse(new SparkConf)
val s = if (useKryo) {
new KryoSerializer(conf)
} else {
new JavaSerializer(conf)
}
s.newInstance()
}
}
/**
* Invokes a static function, returning the result. By default, any of the arguments being null
* will result in returning null instead of calling the function.
*
* @param staticObject The target of the static call. This can either be the object itself
* (methods defined on scala objects), or the class object
* (static methods defined in java).
* @param dataType The expected return type of the function call
* @param functionName The name of the method to call.
* @param arguments An optional list of expressions to pass as arguments to the function.
* @param propagateNull When true, and any of the arguments is null, null will be returned instead
* of calling the function.
* @param returnNullable When false, indicating the invoked method will always return
* non-null value.
*/
case class StaticInvoke(
staticObject: Class[_],
dataType: DataType,
functionName: String,
arguments: Seq[Expression] = Nil,
propagateNull: Boolean = true,
returnNullable: Boolean = true) extends InvokeLike {
val objectName = staticObject.getName.stripSuffix("$")
val cls = if (staticObject.getName == objectName) {
staticObject
} else {
Utils.classForName(objectName)
}
override def nullable: Boolean = needNullCheck || returnNullable
override def children: Seq[Expression] = arguments
lazy val argClasses = ScalaReflection.expressionJavaClasses(arguments)
@transient lazy val method = cls.getDeclaredMethod(functionName, argClasses : _*)
override def eval(input: InternalRow): Any = {
invoke(null, method, arguments, input, dataType)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val javaType = CodeGenerator.javaType(dataType)
val (argCode, argString, resultIsNull) = prepareArguments(ctx)
val callFunc = s"$objectName.$functionName($argString)"
val prepareIsNull = if (nullable) {
s"boolean ${ev.isNull} = $resultIsNull;"
} else {
ev.isNull = FalseLiteral
""
}
val evaluate = if (returnNullable) {
if (CodeGenerator.defaultValue(dataType) == "null") {
s"""
${ev.value} = $callFunc;
${ev.isNull} = ${ev.value} == null;
"""
} else {
val boxedResult = ctx.freshName("boxedResult")
s"""
${CodeGenerator.boxedType(dataType)} $boxedResult = $callFunc;
${ev.isNull} = $boxedResult == null;
if (!${ev.isNull}) {
${ev.value} = $boxedResult;
}
"""
}
} else {
s"${ev.value} = $callFunc;"
}
val code = code"""
$argCode
$prepareIsNull
$javaType ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
if (!$resultIsNull) {
$evaluate
}
"""
ev.copy(code = code)
}
}
/**
* Calls the specified function on an object, optionally passing arguments. If the `targetObject`
* expression evaluates to null then null will be returned.
*
* In some cases, due to erasure, the schema may expect a primitive type when in fact the method
* is returning java.lang.Object. In this case, we will generate code that attempts to unbox the
* value automatically.
*
* @param targetObject An expression that will return the object to call the method on.
* @param functionName The name of the method to call.
* @param dataType The expected return type of the function.
* @param arguments An optional list of expressions, whose evaluation will be passed to the
* function.
* @param propagateNull When true, and any of the arguments is null, null will be returned instead
* of calling the function.
* @param returnNullable When false, indicating the invoked method will always return
* non-null value.
*/
case class Invoke(
targetObject: Expression,
functionName: String,
dataType: DataType,
arguments: Seq[Expression] = Nil,
propagateNull: Boolean = true,
returnNullable : Boolean = true) extends InvokeLike {
lazy val argClasses = ScalaReflection.expressionJavaClasses(arguments)
override def nullable: Boolean = targetObject.nullable || needNullCheck || returnNullable
override def children: Seq[Expression] = targetObject +: arguments
private lazy val encodedFunctionName = TermName(functionName).encodedName.toString
@transient lazy val method = targetObject.dataType match {
case ObjectType(cls) =>
val m = cls.getMethods.find(_.getName == encodedFunctionName)
if (m.isEmpty) {
sys.error(s"Couldn't find $encodedFunctionName on $cls")
} else {
m
}
case _ => None
}
override def eval(input: InternalRow): Any = {
val obj = targetObject.eval(input)
if (obj == null) {
// return null if obj is null
null
} else {
val invokeMethod = if (method.isDefined) {
method.get
} else {
obj.getClass.getDeclaredMethod(functionName, argClasses: _*)
}
invoke(obj, invokeMethod, arguments, input, dataType)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val javaType = CodeGenerator.javaType(dataType)
val obj = targetObject.genCode(ctx)
val (argCode, argString, resultIsNull) = prepareArguments(ctx)
val returnPrimitive = method.isDefined && method.get.getReturnType.isPrimitive
val needTryCatch = method.isDefined && method.get.getExceptionTypes.nonEmpty
def getFuncResult(resultVal: String, funcCall: String): String = if (needTryCatch) {
s"""
try {
$resultVal = $funcCall;
} catch (Exception e) {
org.apache.spark.unsafe.Platform.throwException(e);
}
"""
} else {
s"$resultVal = $funcCall;"
}
val evaluate = if (returnPrimitive) {
getFuncResult(ev.value, s"${obj.value}.$encodedFunctionName($argString)")
} else {
val funcResult = ctx.freshName("funcResult")
// If the function can return null, we do an extra check to make sure our null bit is still
// set correctly.
val assignResult = if (!returnNullable) {
s"${ev.value} = (${CodeGenerator.boxedType(javaType)}) $funcResult;"
} else {
s"""
if ($funcResult != null) {
${ev.value} = (${CodeGenerator.boxedType(javaType)}) $funcResult;
} else {
${ev.isNull} = true;
}
"""
}
s"""
Object $funcResult = null;
${getFuncResult(funcResult, s"${obj.value}.$encodedFunctionName($argString)")}
$assignResult
"""
}
val code = obj.code + code"""
boolean ${ev.isNull} = true;
$javaType ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
if (!${obj.isNull}) {
$argCode
${ev.isNull} = $resultIsNull;
if (!${ev.isNull}) {
$evaluate
}
}
"""
ev.copy(code = code)
}
override def toString: String = s"$targetObject.$functionName"
}
object NewInstance {
def apply(
cls: Class[_],
arguments: Seq[Expression],
dataType: DataType,
propagateNull: Boolean = true): NewInstance =
new NewInstance(cls, arguments, propagateNull, dataType, None)
}
/**
* Constructs a new instance of the given class, using the result of evaluating the specified
* expressions as arguments.
*
* @param cls The class to construct.
* @param arguments A list of expression to use as arguments to the constructor.
* @param propagateNull When true, if any of the arguments is null, then null will be returned
* instead of trying to construct the object.
* @param dataType The type of object being constructed, as a Spark SQL datatype. This allows you
* to manually specify the type when the object in question is a valid internal
* representation (i.e. ArrayData) instead of an object.
* @param outerPointer If the object being constructed is an inner class, the outerPointer for the
* containing class must be specified. This parameter is defined as an optional
* function, which allows us to get the outer pointer lazily,and it's useful if
* the inner class is defined in REPL.
*/
case class NewInstance(
cls: Class[_],
arguments: Seq[Expression],
propagateNull: Boolean,
dataType: DataType,
outerPointer: Option[() => AnyRef]) extends InvokeLike {
private val className = cls.getName
override def nullable: Boolean = needNullCheck
override def children: Seq[Expression] = arguments
override lazy val resolved: Boolean = {
// If the class to construct is an inner class, we need to get its outer pointer, or this
// expression should be regarded as unresolved.
// Note that static inner classes (e.g., inner classes within Scala objects) don't need
// outer pointer registration.
val needOuterPointer =
outerPointer.isEmpty && cls.isMemberClass && !Modifier.isStatic(cls.getModifiers)
childrenResolved && !needOuterPointer
}
@transient private lazy val constructor: (Seq[AnyRef]) => Any = {
val paramTypes = ScalaReflection.expressionJavaClasses(arguments)
val getConstructor = (paramClazz: Seq[Class[_]]) => {
ScalaReflection.findConstructor(cls, paramClazz).getOrElse {
sys.error(s"Couldn't find a valid constructor on $cls")
}
}
outerPointer.map { p =>
val outerObj = p()
val d = outerObj.getClass +: paramTypes
val c = getConstructor(outerObj.getClass +: paramTypes)
(args: Seq[AnyRef]) => {
c.newInstance(outerObj +: args: _*)
}
}.getOrElse {
val c = getConstructor(paramTypes)
(args: Seq[AnyRef]) => {
c.newInstance(args: _*)
}
}
}
override def eval(input: InternalRow): Any = {
val argValues = arguments.map(_.eval(input))
constructor(argValues.map(_.asInstanceOf[AnyRef]))
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val javaType = CodeGenerator.javaType(dataType)
val (argCode, argString, resultIsNull) = prepareArguments(ctx)
val outer = outerPointer.map(func => Literal.fromObject(func()).genCode(ctx))
ev.isNull = resultIsNull
val constructorCall = outer.map { gen =>
s"${gen.value}.new ${cls.getSimpleName}($argString)"
}.getOrElse {
s"new $className($argString)"
}
val code = code"""
$argCode
${outer.map(_.code).getOrElse("")}
final $javaType ${ev.value} = ${ev.isNull} ?
${CodeGenerator.defaultValue(dataType)} : $constructorCall;
"""
ev.copy(code = code)
}
override def toString: String = s"newInstance($cls)"
}
/**
* Given an expression that returns on object of type `Option[_]`, this expression unwraps the
* option into the specified Spark SQL datatype. In the case of `None`, the nullbit is set instead.
*
* @param dataType The expected unwrapped option type.
* @param child An expression that returns an `Option`
*/
case class UnwrapOption(
dataType: DataType,
child: Expression) extends UnaryExpression with NonSQLExpression with ExpectsInputTypes {
override def nullable: Boolean = true
override def inputTypes: Seq[AbstractDataType] = ObjectType :: Nil
override def eval(input: InternalRow): Any = {
val inputObject = child.eval(input)
if (inputObject == null) {
null
} else {
inputObject.asInstanceOf[Option[_]].orNull
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val javaType = CodeGenerator.javaType(dataType)
val inputObject = child.genCode(ctx)
val code = inputObject.code + code"""
final boolean ${ev.isNull} = ${inputObject.isNull} || ${inputObject.value}.isEmpty();
$javaType ${ev.value} = ${ev.isNull} ? ${CodeGenerator.defaultValue(dataType)} :
(${CodeGenerator.boxedType(javaType)}) ${inputObject.value}.get();
"""
ev.copy(code = code)
}
}
/**
* Converts the result of evaluating `child` into an option, checking both the isNull bit and
* (in the case of reference types) equality with null.
*
* @param child The expression to evaluate and wrap.
* @param optType The type of this option.
*/
case class WrapOption(child: Expression, optType: DataType)
extends UnaryExpression with NonSQLExpression with ExpectsInputTypes {
override def dataType: DataType = ObjectType(classOf[Option[_]])
override def nullable: Boolean = false
override def inputTypes: Seq[AbstractDataType] = optType :: Nil
override def eval(input: InternalRow): Any = Option(child.eval(input))
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val inputObject = child.genCode(ctx)
val code = inputObject.code + code"""
scala.Option ${ev.value} =
${inputObject.isNull} ?
scala.Option$$.MODULE$$.apply(null) : new scala.Some(${inputObject.value});
"""
ev.copy(code = code, isNull = FalseLiteral)
}
}
/**
* A placeholder for the loop variable used in [[MapObjects]]. This should never be constructed
* manually, but will instead be passed into the provided lambda function.
*/
case class LambdaVariable(
value: String,
isNull: String,
dataType: DataType,
nullable: Boolean = true) extends LeafExpression with NonSQLExpression {
private val accessor: (InternalRow, Int) => Any = InternalRow.getAccessor(dataType)
// Interpreted execution of `LambdaVariable` always get the 0-index element from input row.
override def eval(input: InternalRow): Any = {
assert(input.numFields == 1,
"The input row of interpreted LambdaVariable should have only 1 field.")
if (nullable && input.isNullAt(0)) {
null
} else {
accessor(input, 0)
}
}
override def genCode(ctx: CodegenContext): ExprCode = {
val isNullValue = if (nullable) {
JavaCode.isNullVariable(isNull)
} else {
FalseLiteral
}
ExprCode(value = JavaCode.variable(value, dataType), isNull = isNullValue)
}
// This won't be called as `genCode` is overrided, just overriding it to make
// `LambdaVariable` non-abstract.
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = ev
}
/**
* When constructing [[MapObjects]], the element type must be given, which may not be available
* before analysis. This class acts like a placeholder for [[MapObjects]], and will be replaced by
* [[MapObjects]] during analysis after the input data is resolved.
* Note that, ideally we should not serialize and send unresolved expressions to executors, but
* users may accidentally do this(e.g. mistakenly reference an encoder instance when implementing
* Aggregator). Here we mark `function` as transient because it may reference scala Type, which is
* not serializable. Then even users mistakenly reference unresolved expression and serialize it,
* it's just a performance issue(more network traffic), and will not fail.
*/
case class UnresolvedMapObjects(
@transient function: Expression => Expression,
child: Expression,
customCollectionCls: Option[Class[_]] = None) extends UnaryExpression with Unevaluable {
override lazy val resolved = false
override def dataType: DataType = customCollectionCls.map(ObjectType.apply).getOrElse {
throw new UnsupportedOperationException("not resolved")
}
}
object MapObjects {
private val curId = new java.util.concurrent.atomic.AtomicInteger()
/**
* Construct an instance of MapObjects case class.
*
* @param function The function applied on the collection elements.
* @param inputData An expression that when evaluated returns a collection object.
* @param elementType The data type of elements in the collection.
* @param elementNullable When false, indicating elements in the collection are always
* non-null value.
* @param customCollectionCls Class of the resulting collection (returning ObjectType)
* or None (returning ArrayType)
*/
def apply(
function: Expression => Expression,
inputData: Expression,
elementType: DataType,
elementNullable: Boolean = true,
customCollectionCls: Option[Class[_]] = None): MapObjects = {
val id = curId.getAndIncrement()
val loopValue = s"MapObjects_loopValue$id"
val loopIsNull = if (elementNullable) {
s"MapObjects_loopIsNull$id"
} else {
"false"
}
val loopVar = LambdaVariable(loopValue, loopIsNull, elementType, elementNullable)
MapObjects(
loopValue, loopIsNull, elementType, function(loopVar), inputData, customCollectionCls)
}
}
/**
* Applies the given expression to every element of a collection of items, returning the result
* as an ArrayType or ObjectType. This is similar to a typical map operation, but where the lambda
* function is expressed using catalyst expressions.
*
* The type of the result is determined as follows:
* - ArrayType - when customCollectionCls is None
* - ObjectType(collection) - when customCollectionCls contains a collection class
*
* The following collection ObjectTypes are currently supported on input:
* Seq, Array, ArrayData, java.util.List
*
* @param loopValue the name of the loop variable that used when iterate the collection, and used
* as input for the `lambdaFunction`
* @param loopIsNull the nullity of the loop variable that used when iterate the collection, and
* used as input for the `lambdaFunction`
* @param loopVarDataType the data type of the loop variable that used when iterate the collection,
* and used as input for the `lambdaFunction`
* @param lambdaFunction A function that take the `loopVar` as input, and used as lambda function
* to handle collection elements.
* @param inputData An expression that when evaluated returns a collection object.
* @param customCollectionCls Class of the resulting collection (returning ObjectType)
* or None (returning ArrayType)
*/
case class MapObjects private(
loopValue: String,
loopIsNull: String,
loopVarDataType: DataType,
lambdaFunction: Expression,
inputData: Expression,
customCollectionCls: Option[Class[_]]) extends Expression with NonSQLExpression {
override def nullable: Boolean = inputData.nullable
override def children: Seq[Expression] = lambdaFunction :: inputData :: Nil
// The data with UserDefinedType are actually stored with the data type of its sqlType.
// When we want to apply MapObjects on it, we have to use it.
lazy private val inputDataType = inputData.dataType match {
case u: UserDefinedType[_] => u.sqlType
case _ => inputData.dataType
}
private def executeFuncOnCollection(inputCollection: Seq[_]): Iterator[_] = {
val row = new GenericInternalRow(1)
inputCollection.toIterator.map { element =>
row.update(0, element)
lambdaFunction.eval(row)
}
}
private lazy val convertToSeq: Any => Seq[_] = inputDataType match {
case ObjectType(cls) if classOf[Seq[_]].isAssignableFrom(cls) =>
_.asInstanceOf[Seq[_]]
case ObjectType(cls) if cls.isArray =>
_.asInstanceOf[Array[_]].toSeq
case ObjectType(cls) if classOf[java.util.List[_]].isAssignableFrom(cls) =>
_.asInstanceOf[java.util.List[_]].asScala
case ObjectType(cls) if cls == classOf[Object] =>
(inputCollection) => {
if (inputCollection.getClass.isArray) {
inputCollection.asInstanceOf[Array[_]].toSeq
} else {
inputCollection.asInstanceOf[Seq[_]]
}
}
case ArrayType(et, _) =>
_.asInstanceOf[ArrayData].toSeq[Any](et)
}
private lazy val mapElements: Seq[_] => Any = customCollectionCls match {
case Some(cls) if classOf[Seq[_]].isAssignableFrom(cls) =>
// Scala sequence
executeFuncOnCollection(_).toSeq
case Some(cls) if classOf[scala.collection.Set[_]].isAssignableFrom(cls) =>
// Scala set
executeFuncOnCollection(_).toSet
case Some(cls) if classOf[java.util.List[_]].isAssignableFrom(cls) =>
// Java list
if (cls == classOf[java.util.List[_]] || cls == classOf[java.util.AbstractList[_]] ||
cls == classOf[java.util.AbstractSequentialList[_]]) {
// Specifying non concrete implementations of `java.util.List`
executeFuncOnCollection(_).toSeq.asJava
} else {
val constructors = cls.getConstructors()
val intParamConstructor = constructors.find { constructor =>
constructor.getParameterCount == 1 && constructor.getParameterTypes()(0) == classOf[Int]
}
val noParamConstructor = constructors.find { constructor =>
constructor.getParameterCount == 0
}
val constructor = intParamConstructor.map { intConstructor =>
(len: Int) => intConstructor.newInstance(len.asInstanceOf[Object])
}.getOrElse {
(_: Int) => noParamConstructor.get.newInstance()
}
// Specifying concrete implementations of `java.util.List`
(inputs) => {
val results = executeFuncOnCollection(inputs)
val builder = constructor(inputs.length).asInstanceOf[java.util.List[Any]]
results.foreach(builder.add(_))
builder
}
}
case None =>
// array
x => new GenericArrayData(executeFuncOnCollection(x).toArray)
case Some(cls) =>
throw new RuntimeException(s"class `${cls.getName}` is not supported by `MapObjects` as " +
"resulting collection.")
}
override def eval(input: InternalRow): Any = {
val inputCollection = inputData.eval(input)
if (inputCollection == null) {
return null
}
mapElements(convertToSeq(inputCollection))
}
override def dataType: DataType =
customCollectionCls.map(ObjectType.apply).getOrElse(
ArrayType(lambdaFunction.dataType, containsNull = lambdaFunction.nullable))
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val elementJavaType = CodeGenerator.javaType(loopVarDataType)
ctx.addMutableState(elementJavaType, loopValue, forceInline = true, useFreshName = false)
val genInputData = inputData.genCode(ctx)
val genFunction = lambdaFunction.genCode(ctx)
val dataLength = ctx.freshName("dataLength")
val convertedArray = ctx.freshName("convertedArray")
val loopIndex = ctx.freshName("loopIndex")
val convertedType = CodeGenerator.boxedType(lambdaFunction.dataType)
// Because of the way Java defines nested arrays, we have to handle the syntax specially.
// Specifically, we have to insert the [$dataLength] in between the type and any extra nested
// array declarations (i.e. new String[1][]).
val arrayConstructor = if (convertedType contains "[]") {
val rawType = convertedType.takeWhile(_ != '[')
val arrayPart = convertedType.reverse.takeWhile(c => c == '[' || c == ']').reverse
s"new $rawType[$dataLength]$arrayPart"
} else {
s"new $convertedType[$dataLength]"
}
// In RowEncoder, we use `Object` to represent Array or Seq, so we need to determine the type
// of input collection at runtime for this case.
val seq = ctx.freshName("seq")
val array = ctx.freshName("array")
val determineCollectionType = inputData.dataType match {
case ObjectType(cls) if cls == classOf[Object] =>
val seqClass = classOf[Seq[_]].getName
s"""
$seqClass $seq = null;
$elementJavaType[] $array = null;
if (${genInputData.value}.getClass().isArray()) {
$array = ($elementJavaType[]) ${genInputData.value};
} else {
$seq = ($seqClass) ${genInputData.value};
}
"""
case _ => ""
}
// `MapObjects` generates a while loop to traverse the elements of the input collection. We
// need to take care of Seq and List because they may have O(n) complexity for indexed accessing
// like `list.get(1)`. Here we use Iterator to traverse Seq and List.
val (getLength, prepareLoop, getLoopVar) = inputDataType match {
case ObjectType(cls) if classOf[Seq[_]].isAssignableFrom(cls) =>
val it = ctx.freshName("it")
(
s"${genInputData.value}.size()",
s"scala.collection.Iterator $it = ${genInputData.value}.toIterator();",
s"$it.next()"
)
case ObjectType(cls) if cls.isArray =>
(
s"${genInputData.value}.length",
"",
s"${genInputData.value}[$loopIndex]"
)
case ObjectType(cls) if classOf[java.util.List[_]].isAssignableFrom(cls) =>
val it = ctx.freshName("it")
(
s"${genInputData.value}.size()",
s"java.util.Iterator $it = ${genInputData.value}.iterator();",
s"$it.next()"
)
case ArrayType(et, _) =>
(
s"${genInputData.value}.numElements()",
"",
CodeGenerator.getValue(genInputData.value, et, loopIndex)
)
case ObjectType(cls) if cls == classOf[Object] =>
val it = ctx.freshName("it")
(
s"$seq == null ? $array.length : $seq.size()",
s"scala.collection.Iterator $it = $seq == null ? null : $seq.toIterator();",
s"$it == null ? $array[$loopIndex] : $it.next()"
)
}
// Make a copy of the data if it's unsafe-backed
def makeCopyIfInstanceOf(clazz: Class[_ <: Any], value: String) =
s"$value instanceof ${clazz.getSimpleName}? ${value}.copy() : $value"
val genFunctionValue: String = lambdaFunction.dataType match {
case StructType(_) => makeCopyIfInstanceOf(classOf[UnsafeRow], genFunction.value)
case ArrayType(_, _) => makeCopyIfInstanceOf(classOf[UnsafeArrayData], genFunction.value)
case MapType(_, _, _) => makeCopyIfInstanceOf(classOf[UnsafeMapData], genFunction.value)
case _ => genFunction.value
}
val loopNullCheck = if (loopIsNull != "false") {
ctx.addMutableState(
CodeGenerator.JAVA_BOOLEAN, loopIsNull, forceInline = true, useFreshName = false)
inputDataType match {
case _: ArrayType => s"$loopIsNull = ${genInputData.value}.isNullAt($loopIndex);"
case _ => s"$loopIsNull = $loopValue == null;"
}
} else {
""
}
val (initCollection, addElement, getResult): (String, String => String, String) =
customCollectionCls match {
case Some(cls) if classOf[Seq[_]].isAssignableFrom(cls) ||
classOf[scala.collection.Set[_]].isAssignableFrom(cls) =>
// Scala sequence or set
val getBuilder = s"${cls.getName}$$.MODULE$$.newBuilder()"
val builder = ctx.freshName("collectionBuilder")
(
s"""
${classOf[Builder[_, _]].getName} $builder = $getBuilder;
$builder.sizeHint($dataLength);
""",
genValue => s"$builder.$$plus$$eq($genValue);",
s"(${cls.getName}) $builder.result();"
)
case Some(cls) if classOf[java.util.List[_]].isAssignableFrom(cls) =>
// Java list
val builder = ctx.freshName("collectionBuilder")
(
if (cls == classOf[java.util.List[_]] || cls == classOf[java.util.AbstractList[_]] ||
cls == classOf[java.util.AbstractSequentialList[_]]) {
s"${cls.getName} $builder = new java.util.ArrayList($dataLength);"
} else {
val param = Try(cls.getConstructor(Integer.TYPE)).map(_ => dataLength).getOrElse("")
s"${cls.getName} $builder = new ${cls.getName}($param);"
},
genValue => s"$builder.add($genValue);",
s"$builder;"
)
case None =>
// array
(
s"""
$convertedType[] $convertedArray = null;
$convertedArray = $arrayConstructor;
""",
genValue => s"$convertedArray[$loopIndex] = $genValue;",
s"new ${classOf[GenericArrayData].getName}($convertedArray);"
)
}
val code = genInputData.code + code"""
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
if (!${genInputData.isNull}) {
$determineCollectionType
int $dataLength = $getLength;
$initCollection
int $loopIndex = 0;
$prepareLoop
while ($loopIndex < $dataLength) {
$loopValue = ($elementJavaType) ($getLoopVar);
$loopNullCheck
${genFunction.code}
if (${genFunction.isNull}) {
${addElement("null")}
} else {
${addElement(genFunctionValue)}
}
$loopIndex += 1;
}
${ev.value} = $getResult
}
"""
ev.copy(code = code, isNull = genInputData.isNull)
}
}
object CatalystToExternalMap {
private val curId = new java.util.concurrent.atomic.AtomicInteger()
/**
* Construct an instance of CatalystToExternalMap case class.
*
* @param keyFunction The function applied on the key collection elements.
* @param valueFunction The function applied on the value collection elements.
* @param inputData An expression that when evaluated returns a map object.
* @param collClass The type of the resulting collection.
*/
def apply(
keyFunction: Expression => Expression,
valueFunction: Expression => Expression,
inputData: Expression,
collClass: Class[_]): CatalystToExternalMap = {
val id = curId.getAndIncrement()
val keyLoopValue = s"CatalystToExternalMap_keyLoopValue$id"
val mapType = inputData.dataType.asInstanceOf[MapType]
val keyLoopVar = LambdaVariable(keyLoopValue, "", mapType.keyType, nullable = false)
val valueLoopValue = s"CatalystToExternalMap_valueLoopValue$id"
val valueLoopIsNull = if (mapType.valueContainsNull) {
s"CatalystToExternalMap_valueLoopIsNull$id"
} else {
"false"
}
val valueLoopVar = LambdaVariable(valueLoopValue, valueLoopIsNull, mapType.valueType)
CatalystToExternalMap(
keyLoopValue, keyFunction(keyLoopVar),
valueLoopValue, valueLoopIsNull, valueFunction(valueLoopVar),
inputData, collClass)
}
}
/**
* Expression used to convert a Catalyst Map to an external Scala Map.
* The collection is constructed using the associated builder, obtained by calling `newBuilder`
* on the collection's companion object.
*
* @param keyLoopValue the name of the loop variable that is used when iterating over the key
* collection, and which is used as input for the `keyLambdaFunction`
* @param keyLambdaFunction A function that takes the `keyLoopVar` as input, and is used as
* a lambda function to handle collection elements.
* @param valueLoopValue the name of the loop variable that is used when iterating over the value
* collection, and which is used as input for the `valueLambdaFunction`
* @param valueLoopIsNull the nullability of the loop variable that is used when iterating over
* the value collection, and which is used as input for the
* `valueLambdaFunction`
* @param valueLambdaFunction A function that takes the `valueLoopVar` as input, and is used as
* a lambda function to handle collection elements.
* @param inputData An expression that when evaluated returns a map object.
* @param collClass The type of the resulting collection.
*/
case class CatalystToExternalMap private(
keyLoopValue: String,
keyLambdaFunction: Expression,
valueLoopValue: String,
valueLoopIsNull: String,
valueLambdaFunction: Expression,
inputData: Expression,
collClass: Class[_]) extends Expression with NonSQLExpression {
override def nullable: Boolean = inputData.nullable
override def children: Seq[Expression] =
keyLambdaFunction :: valueLambdaFunction :: inputData :: Nil
private lazy val inputMapType = inputData.dataType.asInstanceOf[MapType]
private lazy val keyConverter =
CatalystTypeConverters.createToScalaConverter(inputMapType.keyType)
private lazy val valueConverter =
CatalystTypeConverters.createToScalaConverter(inputMapType.valueType)
private lazy val (newMapBuilderMethod, moduleField) = {
val clazz = Utils.classForName(collClass.getCanonicalName + "$")
(clazz.getMethod("newBuilder"), clazz.getField("MODULE$").get(null))
}
private def newMapBuilder(): Builder[AnyRef, AnyRef] = {
newMapBuilderMethod.invoke(moduleField).asInstanceOf[Builder[AnyRef, AnyRef]]
}
override def eval(input: InternalRow): Any = {
val result = inputData.eval(input).asInstanceOf[MapData]
if (result != null) {
val builder = newMapBuilder()
builder.sizeHint(result.numElements())
val keyArray = result.keyArray()
val valueArray = result.valueArray()
var i = 0
while (i < result.numElements()) {
val key = keyConverter(keyArray.get(i, inputMapType.keyType))
val value = valueConverter(valueArray.get(i, inputMapType.valueType))
builder += Tuple2(key, value)
i += 1
}
builder.result()
} else {
null
}
}
override def dataType: DataType = ObjectType(collClass)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
// The data with PythonUserDefinedType are actually stored with the data type of its sqlType.
// When we want to apply MapObjects on it, we have to use it.
def inputDataType(dataType: DataType) = dataType match {
case p: PythonUserDefinedType => p.sqlType
case _ => dataType
}
val mapType = inputDataType(inputData.dataType).asInstanceOf[MapType]
val keyElementJavaType = CodeGenerator.javaType(mapType.keyType)
ctx.addMutableState(keyElementJavaType, keyLoopValue, forceInline = true, useFreshName = false)
val genKeyFunction = keyLambdaFunction.genCode(ctx)
val valueElementJavaType = CodeGenerator.javaType(mapType.valueType)
ctx.addMutableState(valueElementJavaType, valueLoopValue, forceInline = true,
useFreshName = false)
val genValueFunction = valueLambdaFunction.genCode(ctx)
val genInputData = inputData.genCode(ctx)
val dataLength = ctx.freshName("dataLength")
val loopIndex = ctx.freshName("loopIndex")
val tupleLoopValue = ctx.freshName("tupleLoopValue")
val builderValue = ctx.freshName("builderValue")
val getLength = s"${genInputData.value}.numElements()"
val keyArray = ctx.freshName("keyArray")
val valueArray = ctx.freshName("valueArray")
val getKeyArray =
s"${classOf[ArrayData].getName} $keyArray = ${genInputData.value}.keyArray();"
val getKeyLoopVar = CodeGenerator.getValue(keyArray, inputDataType(mapType.keyType), loopIndex)
val getValueArray =
s"${classOf[ArrayData].getName} $valueArray = ${genInputData.value}.valueArray();"
val getValueLoopVar = CodeGenerator.getValue(
valueArray, inputDataType(mapType.valueType), loopIndex)
// Make a copy of the data if it's unsafe-backed
def makeCopyIfInstanceOf(clazz: Class[_ <: Any], value: String) =
s"$value instanceof ${clazz.getSimpleName}? $value.copy() : $value"
def genFunctionValue(lambdaFunction: Expression, genFunction: ExprCode) =
lambdaFunction.dataType match {
case StructType(_) => makeCopyIfInstanceOf(classOf[UnsafeRow], genFunction.value)
case ArrayType(_, _) => makeCopyIfInstanceOf(classOf[UnsafeArrayData], genFunction.value)
case MapType(_, _, _) => makeCopyIfInstanceOf(classOf[UnsafeMapData], genFunction.value)
case _ => genFunction.value
}
val genKeyFunctionValue = genFunctionValue(keyLambdaFunction, genKeyFunction)
val genValueFunctionValue = genFunctionValue(valueLambdaFunction, genValueFunction)
val valueLoopNullCheck = if (valueLoopIsNull != "false") {
ctx.addMutableState(CodeGenerator.JAVA_BOOLEAN, valueLoopIsNull, forceInline = true,
useFreshName = false)
s"$valueLoopIsNull = $valueArray.isNullAt($loopIndex);"
} else {
""
}
val builderClass = classOf[Builder[_, _]].getName
val constructBuilder = s"""
$builderClass $builderValue = ${collClass.getName}$$.MODULE$$.newBuilder();
$builderValue.sizeHint($dataLength);
"""
val tupleClass = classOf[(_, _)].getName
val appendToBuilder = s"""
$tupleClass $tupleLoopValue;
if (${genValueFunction.isNull}) {
$tupleLoopValue = new $tupleClass($genKeyFunctionValue, null);
} else {
$tupleLoopValue = new $tupleClass($genKeyFunctionValue, $genValueFunctionValue);
}
$builderValue.$$plus$$eq($tupleLoopValue);
"""
val getBuilderResult = s"${ev.value} = (${collClass.getName}) $builderValue.result();"
val code = genInputData.code + code"""
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
if (!${genInputData.isNull}) {
int $dataLength = $getLength;
$constructBuilder
$getKeyArray
$getValueArray
int $loopIndex = 0;
while ($loopIndex < $dataLength) {
$keyLoopValue = ($keyElementJavaType) ($getKeyLoopVar);
$valueLoopValue = ($valueElementJavaType) ($getValueLoopVar);
$valueLoopNullCheck
${genKeyFunction.code}
${genValueFunction.code}
$appendToBuilder
$loopIndex += 1;
}
$getBuilderResult
}
"""
ev.copy(code = code, isNull = genInputData.isNull)
}
}
object ExternalMapToCatalyst {
private val curId = new java.util.concurrent.atomic.AtomicInteger()
def apply(
inputMap: Expression,
keyType: DataType,
keyConverter: Expression => Expression,
keyNullable: Boolean,
valueType: DataType,
valueConverter: Expression => Expression,
valueNullable: Boolean): ExternalMapToCatalyst = {
val id = curId.getAndIncrement()
val keyName = "ExternalMapToCatalyst_key" + id
val keyIsNull = if (keyNullable) {
"ExternalMapToCatalyst_key_isNull" + id
} else {
"false"
}
val valueName = "ExternalMapToCatalyst_value" + id
val valueIsNull = if (valueNullable) {
"ExternalMapToCatalyst_value_isNull" + id
} else {
"false"
}
ExternalMapToCatalyst(
keyName,
keyIsNull,
keyType,
keyConverter(LambdaVariable(keyName, keyIsNull, keyType, keyNullable)),
valueName,
valueIsNull,
valueType,
valueConverter(LambdaVariable(valueName, valueIsNull, valueType, valueNullable)),
inputMap
)
}
}
/**
* Converts a Scala/Java map object into catalyst format, by applying the key/value converter when
* iterate the map.
*
* @param key the name of the map key variable that used when iterate the map, and used as input for
* the `keyConverter`
* @param keyIsNull the nullability of the map key variable that used when iterate the map, and
* used as input for the `keyConverter`
* @param keyType the data type of the map key variable that used when iterate the map, and used as
* input for the `keyConverter`
* @param keyConverter A function that take the `key` as input, and converts it to catalyst format.
* @param value the name of the map value variable that used when iterate the map, and used as input
* for the `valueConverter`
* @param valueIsNull the nullability of the map value variable that used when iterate the map, and
* used as input for the `valueConverter`
* @param valueType the data type of the map value variable that used when iterate the map, and
* used as input for the `valueConverter`
* @param valueConverter A function that take the `value` as input, and converts it to catalyst
* format.
* @param child An expression that when evaluated returns the input map object.
*/
case class ExternalMapToCatalyst private(
key: String,
keyIsNull: String,
keyType: DataType,
keyConverter: Expression,
value: String,
valueIsNull: String,
valueType: DataType,
valueConverter: Expression,
child: Expression)
extends UnaryExpression with NonSQLExpression {
override def foldable: Boolean = false
override def dataType: MapType = MapType(
keyConverter.dataType, valueConverter.dataType, valueContainsNull = valueConverter.nullable)
private lazy val mapCatalystConverter: Any => (Array[Any], Array[Any]) = {
val rowBuffer = InternalRow.fromSeq(Array[Any](1))
def rowWrapper(data: Any): InternalRow = {
rowBuffer.update(0, data)
rowBuffer
}
child.dataType match {
case ObjectType(cls) if classOf[java.util.Map[_, _]].isAssignableFrom(cls) =>
(input: Any) => {
val data = input.asInstanceOf[java.util.Map[Any, Any]]
val keys = new Array[Any](data.size)
val values = new Array[Any](data.size)
val iter = data.entrySet().iterator()
var i = 0
while (iter.hasNext) {
val entry = iter.next()
val (key, value) = (entry.getKey, entry.getValue)
keys(i) = if (key != null) {
keyConverter.eval(rowWrapper(key))
} else {
throw new RuntimeException("Cannot use null as map key!")
}
values(i) = if (value != null) {
valueConverter.eval(rowWrapper(value))
} else {
null
}
i += 1
}
(keys, values)
}
case ObjectType(cls) if classOf[scala.collection.Map[_, _]].isAssignableFrom(cls) =>
(input: Any) => {
val data = input.asInstanceOf[scala.collection.Map[Any, Any]]
val keys = new Array[Any](data.size)
val values = new Array[Any](data.size)
var i = 0
for ((key, value) <- data) {
keys(i) = if (key != null) {
keyConverter.eval(rowWrapper(key))
} else {
throw new RuntimeException("Cannot use null as map key!")
}
values(i) = if (value != null) {
valueConverter.eval(rowWrapper(value))
} else {
null
}
i += 1
}
(keys, values)
}
}
}
override def eval(input: InternalRow): Any = {
val result = child.eval(input)
if (result != null) {
val (keys, values) = mapCatalystConverter(result)
new ArrayBasedMapData(new GenericArrayData(keys), new GenericArrayData(values))
} else {
null
}
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val inputMap = child.genCode(ctx)
val genKeyConverter = keyConverter.genCode(ctx)
val genValueConverter = valueConverter.genCode(ctx)
val length = ctx.freshName("length")
val index = ctx.freshName("index")
val convertedKeys = ctx.freshName("convertedKeys")
val convertedValues = ctx.freshName("convertedValues")
val entry = ctx.freshName("entry")
val entries = ctx.freshName("entries")
val keyElementJavaType = CodeGenerator.javaType(keyType)
val valueElementJavaType = CodeGenerator.javaType(valueType)
ctx.addMutableState(keyElementJavaType, key, forceInline = true, useFreshName = false)
ctx.addMutableState(valueElementJavaType, value, forceInline = true, useFreshName = false)
val (defineEntries, defineKeyValue) = child.dataType match {
case ObjectType(cls) if classOf[java.util.Map[_, _]].isAssignableFrom(cls) =>
val javaIteratorCls = classOf[java.util.Iterator[_]].getName
val javaMapEntryCls = classOf[java.util.Map.Entry[_, _]].getName
val defineEntries =
s"final $javaIteratorCls $entries = ${inputMap.value}.entrySet().iterator();"
val defineKeyValue =
s"""
final $javaMapEntryCls $entry = ($javaMapEntryCls) $entries.next();
$key = (${CodeGenerator.boxedType(keyType)}) $entry.getKey();
$value = (${CodeGenerator.boxedType(valueType)}) $entry.getValue();
"""
defineEntries -> defineKeyValue
case ObjectType(cls) if classOf[scala.collection.Map[_, _]].isAssignableFrom(cls) =>
val scalaIteratorCls = classOf[Iterator[_]].getName
val scalaMapEntryCls = classOf[Tuple2[_, _]].getName
val defineEntries = s"final $scalaIteratorCls $entries = ${inputMap.value}.iterator();"
val defineKeyValue =
s"""
final $scalaMapEntryCls $entry = ($scalaMapEntryCls) $entries.next();
$key = (${CodeGenerator.boxedType(keyType)}) $entry._1();
$value = (${CodeGenerator.boxedType(valueType)}) $entry._2();
"""
defineEntries -> defineKeyValue
}
val keyNullCheck = if (keyIsNull != "false") {
ctx.addMutableState(
CodeGenerator.JAVA_BOOLEAN, keyIsNull, forceInline = true, useFreshName = false)
s"$keyIsNull = $key == null;"
} else {
""
}
val valueNullCheck = if (valueIsNull != "false") {
ctx.addMutableState(
CodeGenerator.JAVA_BOOLEAN, valueIsNull, forceInline = true, useFreshName = false)
s"$valueIsNull = $value == null;"
} else {
""
}
val arrayCls = classOf[GenericArrayData].getName
val mapCls = classOf[ArrayBasedMapData].getName
val convertedKeyType = CodeGenerator.boxedType(keyConverter.dataType)
val convertedValueType = CodeGenerator.boxedType(valueConverter.dataType)
val code = inputMap.code +
code"""
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
if (!${inputMap.isNull}) {
final int $length = ${inputMap.value}.size();
final Object[] $convertedKeys = new Object[$length];
final Object[] $convertedValues = new Object[$length];
int $index = 0;
$defineEntries
while($entries.hasNext()) {
$defineKeyValue
$keyNullCheck
$valueNullCheck
${genKeyConverter.code}
if (${genKeyConverter.isNull}) {
throw new RuntimeException("Cannot use null as map key!");
} else {
$convertedKeys[$index] = ($convertedKeyType) ${genKeyConverter.value};
}
${genValueConverter.code}
if (${genValueConverter.isNull}) {
$convertedValues[$index] = null;
} else {
$convertedValues[$index] = ($convertedValueType) ${genValueConverter.value};
}
$index++;
}
${ev.value} = new $mapCls(new $arrayCls($convertedKeys), new $arrayCls($convertedValues));
}
"""
ev.copy(code = code, isNull = inputMap.isNull)
}
}
/**
* Constructs a new external row, using the result of evaluating the specified expressions
* as content.
*
* @param children A list of expression to use as content of the external row.
*/
case class CreateExternalRow(children: Seq[Expression], schema: StructType)
extends Expression with NonSQLExpression {
override def dataType: DataType = ObjectType(classOf[Row])
override def nullable: Boolean = false
override def eval(input: InternalRow): Any = {
val values = children.map(_.eval(input)).toArray
new GenericRowWithSchema(values, schema)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val rowClass = classOf[GenericRowWithSchema].getName
val values = ctx.freshName("values")
val childrenCodes = children.zipWithIndex.map { case (e, i) =>
val eval = e.genCode(ctx)
s"""
|${eval.code}
|if (${eval.isNull}) {
| $values[$i] = null;
|} else {
| $values[$i] = ${eval.value};
|}
""".stripMargin
}
val childrenCode = ctx.splitExpressionsWithCurrentInputs(
expressions = childrenCodes,
funcName = "createExternalRow",
extraArguments = "Object[]" -> values :: Nil)
val schemaField = ctx.addReferenceObj("schema", schema)
val code =
code"""
|Object[] $values = new Object[${children.size}];
|$childrenCode
|final ${classOf[Row].getName} ${ev.value} = new $rowClass($values, $schemaField);
""".stripMargin
ev.copy(code = code, isNull = FalseLiteral)
}
}
/**
* Serializes an input object using a generic serializer (Kryo or Java).
*
* @param kryo if true, use Kryo. Otherwise, use Java.
*/
case class EncodeUsingSerializer(child: Expression, kryo: Boolean)
extends UnaryExpression with NonSQLExpression with SerializerSupport {
override def nullSafeEval(input: Any): Any = {
serializerInstance.serialize(input).array()
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val serializer = addImmutableSerializerIfNeeded(ctx)
// Code to serialize.
val input = child.genCode(ctx)
val javaType = CodeGenerator.javaType(dataType)
val serialize = s"$serializer.serialize(${input.value}, null).array()"
val code = input.code + code"""
final $javaType ${ev.value} =
${input.isNull} ? ${CodeGenerator.defaultValue(dataType)} : $serialize;
"""
ev.copy(code = code, isNull = input.isNull)
}
override def dataType: DataType = BinaryType
}
/**
* Serializes an input object using a generic serializer (Kryo or Java). Note that the ClassTag
* is not an implicit parameter because TreeNode cannot copy implicit parameters.
*
* @param kryo if true, use Kryo. Otherwise, use Java.
*/
case class DecodeUsingSerializer[T](child: Expression, tag: ClassTag[T], kryo: Boolean)
extends UnaryExpression with NonSQLExpression with SerializerSupport {
override def nullSafeEval(input: Any): Any = {
val inputBytes = java.nio.ByteBuffer.wrap(input.asInstanceOf[Array[Byte]])
serializerInstance.deserialize(inputBytes)
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val serializer = addImmutableSerializerIfNeeded(ctx)
// Code to deserialize.
val input = child.genCode(ctx)
val javaType = CodeGenerator.javaType(dataType)
val deserialize =
s"($javaType) $serializer.deserialize(java.nio.ByteBuffer.wrap(${input.value}), null)"
val code = input.code + code"""
final $javaType ${ev.value} =
${input.isNull} ? ${CodeGenerator.defaultValue(dataType)} : $deserialize;
"""
ev.copy(code = code, isNull = input.isNull)
}
override def dataType: DataType = ObjectType(tag.runtimeClass)
}
/**
* Initialize a Java Bean instance by setting its field values via setters.
*/
case class InitializeJavaBean(beanInstance: Expression, setters: Map[String, Expression])
extends Expression with NonSQLExpression {
override def nullable: Boolean = beanInstance.nullable
override def children: Seq[Expression] = beanInstance +: setters.values.toSeq
override def dataType: DataType = beanInstance.dataType
private lazy val resolvedSetters = {
assert(beanInstance.dataType.isInstanceOf[ObjectType])
val ObjectType(beanClass) = beanInstance.dataType
setters.map {
case (name, expr) =>
// Looking for known type mapping.
// But also looking for general `Object`-type parameter for generic methods.
val paramTypes = ScalaReflection.expressionJavaClasses(Seq(expr)) ++ Seq(classOf[Object])
val methods = paramTypes.flatMap { fieldClass =>
try {
Some(beanClass.getDeclaredMethod(name, fieldClass))
} catch {
case e: NoSuchMethodException => None
}
}
if (methods.isEmpty) {
throw new NoSuchMethodException(s"""A method named "$name" is not declared """ +
"in any enclosing class nor any supertype")
}
methods.head -> expr
}
}
override def eval(input: InternalRow): Any = {
val instance = beanInstance.eval(input)
if (instance != null) {
val bean = instance.asInstanceOf[Object]
resolvedSetters.foreach {
case (setter, expr) =>
val paramVal = expr.eval(input)
// We don't call setter if input value is null.
if (paramVal != null) {
setter.invoke(bean, paramVal.asInstanceOf[AnyRef])
}
}
}
instance
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val instanceGen = beanInstance.genCode(ctx)
val javaBeanInstance = ctx.freshName("javaBean")
val beanInstanceJavaType = CodeGenerator.javaType(beanInstance.dataType)
val initialize = setters.map {
case (setterMethod, fieldValue) =>
val fieldGen = fieldValue.genCode(ctx)
s"""
|${fieldGen.code}
|if (!${fieldGen.isNull}) {
| $javaBeanInstance.$setterMethod(${fieldGen.value});
|}
""".stripMargin
}
val initializeCode = ctx.splitExpressionsWithCurrentInputs(
expressions = initialize.toSeq,
funcName = "initializeJavaBean",
extraArguments = beanInstanceJavaType -> javaBeanInstance :: Nil)
val code = instanceGen.code +
code"""
|$beanInstanceJavaType $javaBeanInstance = ${instanceGen.value};
|if (!${instanceGen.isNull}) {
| $initializeCode
|}
""".stripMargin
ev.copy(code = code, isNull = instanceGen.isNull, value = instanceGen.value)
}
}
/**
* Asserts that input values of a non-nullable child expression are not null.
*
* Note that there are cases where `child.nullable == true`, while we still need to add this
* assertion. Consider a nullable column `s` whose data type is a struct containing a non-nullable
* `Int` field named `i`. Expression `s.i` is nullable because `s` can be null. However, for all
* non-null `s`, `s.i` can't be null.
*/
case class AssertNotNull(child: Expression, walkedTypePath: Seq[String] = Nil)
extends UnaryExpression with NonSQLExpression {
override def dataType: DataType = child.dataType
override def foldable: Boolean = false
override def nullable: Boolean = false
override def flatArguments: Iterator[Any] = Iterator(child)
private val errMsg = "Null value appeared in non-nullable field:" +
walkedTypePath.mkString("\\n", "\\n", "\\n") +
"If the schema is inferred from a Scala tuple/case class, or a Java bean, " +
"please try to use scala.Option[_] or other nullable types " +
"(e.g. java.lang.Integer instead of int/scala.Int)."
override def eval(input: InternalRow): Any = {
val result = child.eval(input)
if (result == null) {
throw new NullPointerException(errMsg)
}
result
}
override protected def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val childGen = child.genCode(ctx)
// Use unnamed reference that doesn't create a local field here to reduce the number of fields
// because errMsgField is used only when the value is null.
val errMsgField = ctx.addReferenceObj("errMsg", errMsg)
val code = childGen.code + code"""
if (${childGen.isNull}) {
throw new NullPointerException($errMsgField);
}
"""
ev.copy(code = code, isNull = FalseLiteral, value = childGen.value)
}
}
/**
* Returns the value of field at index `index` from the external row `child`.
* This class can be viewed as [[GetStructField]] for [[Row]]s instead of [[InternalRow]]s.
*
* Note that the input row and the field we try to get are both guaranteed to be not null, if they
* are null, a runtime exception will be thrown.
*/
case class GetExternalRowField(
child: Expression,
index: Int,
fieldName: String) extends UnaryExpression with NonSQLExpression {
override def nullable: Boolean = false
override def dataType: DataType = ObjectType(classOf[Object])
private val errMsg = s"The ${index}th field '$fieldName' of input row cannot be null."
override def eval(input: InternalRow): Any = {
val inputRow = child.eval(input).asInstanceOf[Row]
if (inputRow == null) {
throw new RuntimeException("The input external row cannot be null.")
}
if (inputRow.isNullAt(index)) {
throw new RuntimeException(errMsg)
}
inputRow.get(index)
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
// Use unnamed reference that doesn't create a local field here to reduce the number of fields
// because errMsgField is used only when the field is null.
val errMsgField = ctx.addReferenceObj("errMsg", errMsg)
val row = child.genCode(ctx)
val code = code"""
${row.code}
if (${row.isNull}) {
throw new RuntimeException("The input external row cannot be null.");
}
if (${row.value}.isNullAt($index)) {
throw new RuntimeException($errMsgField);
}
final Object ${ev.value} = ${row.value}.get($index);
"""
ev.copy(code = code, isNull = FalseLiteral)
}
}
/**
* Validates the actual data type of input expression at runtime. If it doesn't match the
* expectation, throw an exception.
*/
case class ValidateExternalType(child: Expression, expected: DataType)
extends UnaryExpression with NonSQLExpression with ExpectsInputTypes {
override def inputTypes: Seq[AbstractDataType] = Seq(ObjectType(classOf[Object]))
override def nullable: Boolean = child.nullable
override val dataType: DataType = RowEncoder.externalDataTypeForInput(expected)
private val errMsg = s" is not a valid external type for schema of ${expected.simpleString}"
private lazy val checkType: (Any) => Boolean = expected match {
case _: DecimalType =>
(value: Any) => {
value.isInstanceOf[java.math.BigDecimal] || value.isInstanceOf[scala.math.BigDecimal] ||
value.isInstanceOf[Decimal]
}
case _: ArrayType =>
(value: Any) => {
value.getClass.isArray || value.isInstanceOf[Seq[_]]
}
case _ =>
val dataTypeClazz = ScalaReflection.javaBoxedType(dataType)
(value: Any) => {
dataTypeClazz.isInstance(value)
}
}
override def eval(input: InternalRow): Any = {
val result = child.eval(input)
if (checkType(result)) {
result
} else {
throw new RuntimeException(s"${result.getClass.getName}$errMsg")
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
// Use unnamed reference that doesn't create a local field here to reduce the number of fields
// because errMsgField is used only when the type doesn't match.
val errMsgField = ctx.addReferenceObj("errMsg", errMsg)
val input = child.genCode(ctx)
val obj = input.value
val typeCheck = expected match {
case _: DecimalType =>
Seq(classOf[java.math.BigDecimal], classOf[scala.math.BigDecimal], classOf[Decimal])
.map(cls => s"$obj instanceof ${cls.getName}").mkString(" || ")
case _: ArrayType =>
s"$obj.getClass().isArray() || $obj instanceof ${classOf[Seq[_]].getName}"
case _ =>
s"$obj instanceof ${CodeGenerator.boxedType(dataType)}"
}
val code = code"""
${input.code}
${CodeGenerator.javaType(dataType)} ${ev.value} = ${CodeGenerator.defaultValue(dataType)};
if (!${input.isNull}) {
if ($typeCheck) {
${ev.value} = (${CodeGenerator.boxedType(dataType)}) $obj;
} else {
throw new RuntimeException($obj.getClass().getName() + $errMsgField);
}
}
"""
ev.copy(code = code, isNull = input.isNull)
}
}
| bravo-zhang/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/objects/objects.scala | Scala | apache-2.0 | 66,899 |
package org.scalacoin.marshallers
import org.scalacoin.protocol.{AssetAddress, Address, BitcoinAddress}
import spray.json._
/**
* Created by chris on 12/19/15.
*/
object BitcoinAddressProtocol extends DefaultJsonProtocol {
implicit val bitcoinAddressFormat = jsonFormat1(BitcoinAddress.apply _)
}
object AddressProtocol extends DefaultJsonProtocol {
implicit object AddressFormat extends RootJsonFormat[Address] {
override def read(jsValue: JsValue) = {
jsValue match {
case JsString(string) => string match {
case s if s(0) == 'a' => AssetAddress(s)
case s if s(0) == '1' || s(0) == '3' => BitcoinAddress(s)
case _ => throw new RuntimeException("Addresses should always start with 'a' '1' or '3'")
}
case _ => throw new RuntimeException("Addresses should always be reprsented by a JsString")
}
}
override def write(address: Address) = {
val m: Map[String, JsValue] = Map("address" -> JsString(address.value))
JsObject(m)
}
}
}
| scalacoin/scalacoin | src/main/scala/org/scalacoin/marshallers/BitcoinAddressProtocol.scala | Scala | mit | 1,036 |
package tests.rescala.fullmv.mirrors
import java.util.concurrent.atomic.{AtomicInteger, AtomicReference}
import org.scalatest.funsuite.AnyFunSuite
import rescala.fullmv.DistributedFullMVApi.{FullMVEngine, Var, Evt, Signal, Event, ReactiveLocalClone}
import tests.rescala.testtools.Spawn
import scala.annotation.tailrec
import scala.concurrent.duration._
import scala.concurrent.TimeoutException
import scala.util.{Failure, Success}
class XShapeMirrorTest extends AnyFunSuite {
case class Data[+T](name: String, data: T)
case class Merge[+T](left: T, right: T)
def isGlitched(v: Merge[Data[Merge[_]]]): Boolean = v.left.data != v.right.data
val ports = new AtomicInteger(1099)
class SideHost(name: String) extends FullMVEngine(Duration.Zero, name) {
val counter = new AtomicInteger(0)
def nextValue() = Data(name, counter.getAndIncrement)
val source = Var(nextValue)
def step() = source.set(nextValue)
}
test("X-shaped serializability") {
val leftHost = new SideHost("left")
val rightHost = new SideHost("right")
val leftMerge = {
import leftHost._
val sourceFromRight = ReactiveLocalClone(rightHost.source, leftHost)
Signal { Data("lmerge", Merge(source(), sourceFromRight())) }
}
val rightMerge = {
import rightHost._
val sourceFromLeft = ReactiveLocalClone(leftHost.source, rightHost)
Signal { Data("rmerge", Merge(sourceFromLeft(), source())) }
}
object topHost extends FullMVEngine(Duration.Zero, "top")
import topHost._
val mergeFromLeft = ReactiveLocalClone(leftMerge, topHost)
val mergeFromRight = ReactiveLocalClone(rightMerge, topHost)
val merge = Signal { Merge(mergeFromLeft(), mergeFromRight()) }
val violations = new AtomicReference[List[Merge[Data[Merge[Data[Int]]]]]](Nil)
merge.observe { v =>
if (isGlitched(v)) {
@tailrec @inline def retryAdd(): Unit = {
val before = violations.get
if (!violations.compareAndSet(before, v :: before)) retryAdd()
}
retryAdd()
}
}
assert(violations.get.isEmpty)
assert(merge.readValueOnce === Merge(
Data("lmerge", Merge(Data("left", 0), Data("right", 0))),
Data("rmerge", Merge(Data("left", 0), Data("right", 0)))
))
leftHost.step()
assert(violations.get.isEmpty)
assert(merge.readValueOnce === Merge(
Data("lmerge", Merge(Data("left", 1), Data("right", 0))),
Data("rmerge", Merge(Data("left", 1), Data("right", 0)))
))
rightHost.step()
assert(violations.get.isEmpty)
assert(merge.readValueOnce === Merge(
Data("lmerge", Merge(Data("left", 1), Data("right", 1))),
Data("rmerge", Merge(Data("left", 1), Data("right", 1)))
))
val duration = 10000
println(s"starting X-Shape mirror stress test " + (if (duration == 0) "until key press"
else s"for ${duration / 1000} seconds..."))
var running: Boolean = true
def worker(host: SideHost) =
Spawn {
try {
var iterations = 1
while (running) {
host.step()
iterations += 1
if (violations.get.nonEmpty) throw new RuntimeException(s"Found Violations after iteration $iterations")
}
iterations
} catch {
case t: Throwable =>
running = false
throw t
}
}
val workerLeft = worker(leftHost)
val workerRight = worker(rightHost)
val workerTimeout = System.currentTimeMillis() + duration
while (running && (if (duration == 0) System.in.available() == 0 else System.currentTimeMillis() < workerTimeout)) {
Thread.sleep(50)
}
if (!running)
println(
s"Premature termination after ${(duration - (workerTimeout - System.currentTimeMillis())) / 1000} seconds"
)
running = false
val scoreLeft = workerLeft.awaitTry(500)
val scoreRight = workerRight.awaitTry(500)
val scores = Array(scoreLeft, scoreRight)
println("X-Shape mirror stress test thread results:")
println("\\t" + scores.zipWithIndex.map { case (count, idx) => idx + ": " + count }.mkString("\\n\\t"))
scores.find {
case Failure(ex: TimeoutException) => false
case Failure(_) => true
case Success(_) => false
}.asInstanceOf[Option[Failure[_]]].foreach {
case Failure(ex) =>
ex.printStackTrace()
}
scores.foldLeft(Option(0L)) {
case (None, _) => None
case (Some(score), Failure(_)) => None
case (Some(score), Success(moreScore)) => Some(score + moreScore)
} match {
case None =>
println("no total and stats due to failures. state snapshot:")
println(s"sources: ${leftHost.source.readValueOnce} and ${leftHost.source.readValueOnce}")
println(s"side merges: ${leftMerge.readValueOnce} and ${rightMerge.readValueOnce}")
println(s"top merge: ${merge.readValueOnce}")
fail("there were errors")
case Some(sum) =>
println(s"X-Shape mirror stress test totaled $sum iterations (individual scores: ${scores.mkString(", ")}")
assert(violations.get.isEmpty)
assert(merge.readValueOnce === Merge(
Data("lmerge", Merge(Data("left", scoreLeft.get), Data("right", scoreRight.get))),
Data("rmerge", Merge(Data("left", scoreLeft.get), Data("right", scoreRight.get)))
))
val hosts = Array(leftHost, rightHost)
hosts.foreach(host => println(s"$host orphan stats: ${host.cacheStatus}"))
println(" == Orphan listing == ")
hosts.foreach { host =>
if (!host.instances.isEmpty || !host.lockHost.instances.isEmpty) {
println(s"orphans on $host:")
val it1 = host.instances.values().iterator()
while (it1.hasNext) println("\\t" + it1.next())
val it2 = host.lockHost.instances.values().iterator()
while (it2.hasNext) println("\\t" + it2.next())
}
}
assert(hosts.map(host => host.instances.size() + host.lockHost.instances.size()).sum === 0)
}
}
}
| guidosalva/REScala | Code/Extensions/MultiversionDistributed/multiversion/src/test/scala/tests/rescala/fullmv/mirrors/XShapeMirrorTest.scala | Scala | apache-2.0 | 6,220 |
def f() = print("Hello World")
| clemus90/competitive-programming | hackerRank/FunctionalProgramming/HelloWorld.scala | Scala | mit | 31 |
package org.juitar.query.api
/**
* @author sha1n
* @since 6/12/14
*/
class ParserException(message: String, e: Exception)
extends RuntimeException(message, e) {
def this(e: Exception) = this(null, e)
def this(message: String) = this(message, null)
}
| sha1n/scala-rest | query-parser/src/main/scala/org/juitar/query/api/ParserException.scala | Scala | apache-2.0 | 263 |
package com.twitter.conversions.common
import com.twitter.common.quantity.Amount
import com.twitter.common.quantity.{Time => CommonTime}
import com.twitter.conversions.time._
import com.twitter.util.Duration
import java.util.concurrent.TimeUnit
object quantity {
val COMMON_FOREVER: Duration = 0.millis
class CommonDurationAdapter(d: Duration) {
def toIntAmount = Amount.of(d.inMillis.toInt, CommonTime.MILLISECONDS)
def toLongAmount = Amount.of(d.inMillis.toLong, CommonTime.MILLISECONDS)
}
/** Implicit conversion of Duration to CommonDuration */
implicit def commonDuration(d: Duration) = new CommonDurationAdapter(d)
class DurationAmountAdapter(a: Amount[java.lang.Long, CommonTime]) {
def toDuration: Duration = Duration(a.getValue.longValue, translateUnit(a.getUnit))
def translateUnit(unit: CommonTime) = unit match {
case CommonTime.DAYS => TimeUnit.DAYS
case CommonTime.HOURS => TimeUnit.HOURS
case CommonTime.MINUTES => TimeUnit.MINUTES
case CommonTime.MICROSECONDS => TimeUnit.MICROSECONDS
case CommonTime.MILLISECONDS => TimeUnit.MILLISECONDS
case CommonTime.NANOSECONDS => TimeUnit.NANOSECONDS
case CommonTime.SECONDS => TimeUnit.SECONDS
}
}
/** Implicit conversion of Amount to DurationAmountAdapter */
implicit def commonDuration(a: Amount[java.lang.Long, CommonTime]) = new DurationAmountAdapter(a)
}
| mosesn/util | util-zk-common/src/main/scala/com/twitter/conversions/common/quantity.scala | Scala | apache-2.0 | 1,430 |
package com.twitter.finagle.httpx
import com.google.common.base.Charsets
import com.twitter.finagle.httpx.netty.{HttpResponseProxy, Bijections}
import com.twitter.io.Reader
import org.jboss.netty.buffer.{ChannelBuffer, ChannelBuffers}
import org.jboss.netty.handler.codec.embedder.{DecoderEmbedder, EncoderEmbedder}
import org.jboss.netty.handler.codec.http.{
DefaultHttpResponse, HttpResponse, HttpResponseDecoder, HttpResponseEncoder,
HttpResponseStatus, HttpVersion
}
import Bijections._
/**
* Rich HttpResponse
*/
abstract class Response extends Message with HttpResponseProxy {
def isRequest = false
def status: Status = from(getStatus)
def status_=(value: Status) { setStatus(from(value)) }
def statusCode: Int = getStatus.getCode
def statusCode_=(value: Int) { setStatus(HttpResponseStatus.valueOf(value)) }
def getStatusCode(): Int = statusCode
def setStatusCode(value: Int) { statusCode = value }
/** Encode as an HTTP message */
def encodeString(): String = {
val encoder = new EncoderEmbedder[ChannelBuffer](new HttpResponseEncoder)
encoder.offer(httpResponse)
val buffer = encoder.poll()
buffer.toString(Charsets.UTF_8)
}
override def toString =
"Response(\\"" + version + " " + status + "\\")"
}
object Response {
/** Decode a Response from a String */
def decodeString(s: String): Response = {
val decoder = new DecoderEmbedder(
new HttpResponseDecoder(Int.MaxValue, Int.MaxValue, Int.MaxValue))
decoder.offer(ChannelBuffers.wrappedBuffer(s.getBytes(Charsets.UTF_8)))
val res = decoder.poll().asInstanceOf[HttpResponse]
assert(res ne null)
Response(res)
}
/** Create Response. */
def apply(): Response =
apply(Version.Http11, Status.Ok)
/** Create Response from version and status. */
def apply(version: Version, status: Status): Response =
apply(new DefaultHttpResponse(from(version), from(status)))
/**
* Create a Response from version, status, and Reader.
*/
def apply(version: Version, status: Status, reader: Reader): Response = {
val res = new DefaultHttpResponse(from(version), from(status))
res.setChunked(true)
apply(res, reader)
}
private[httpx] def apply(response: HttpResponse): Response =
new Response {
val httpResponse = response
}
private[httpx] def apply(response: HttpResponse, readerIn: Reader): Response =
new Response {
val httpResponse = response
override val reader = readerIn
}
/** Create Response from status. */
def apply(status: Status): Response =
apply(Version.Http11, status)
/** Create Response from Request. */
private[httpx] def apply(httpRequest: Request): Response =
new Response {
final val httpResponse =
new DefaultHttpResponse(httpRequest.getProtocolVersion, HttpResponseStatus.OK)
}
}
| Krasnyanskiy/finagle | finagle-httpx/src/main/scala/com/twitter/finagle/httpx/Response.scala | Scala | apache-2.0 | 2,891 |
package org.scaladebugger.api.profiles.java.requests.classes
import com.sun.jdi.event.ClassPrepareEvent
import org.scaladebugger.api.lowlevel.JDIArgument
import org.scaladebugger.api.lowlevel.classes._
import org.scaladebugger.api.lowlevel.events.EventManager
import org.scaladebugger.api.lowlevel.events.EventType._
import org.scaladebugger.api.lowlevel.requests.JDIRequestArgument
import org.scaladebugger.api.lowlevel.requests.properties.UniqueIdProperty
import org.scaladebugger.api.lowlevel.utils.JDIArgumentGroup
import org.scaladebugger.api.pipelines.Pipeline.IdentityPipeline
import org.scaladebugger.api.profiles.RequestHelper
import org.scaladebugger.api.profiles.traits.info.InfoProducer
import org.scaladebugger.api.profiles.traits.info.events.ClassPrepareEventInfo
import org.scaladebugger.api.profiles.traits.requests.classes.ClassPrepareRequest
import org.scaladebugger.api.virtualmachines.ScalaVirtualMachine
import scala.util.Try
/**
* Represents a java profile for class prepare events that adds no
* extra logic on top of the standard JDI.
*/
trait JavaClassPrepareRequest extends ClassPrepareRequest {
protected val classPrepareManager: ClassPrepareManager
protected val eventManager: EventManager
protected val scalaVirtualMachine: ScalaVirtualMachine
protected val infoProducer: InfoProducer
private lazy val eventProducer = infoProducer.eventProducer
/** Represents helper utility to create/manage requests. */
private lazy val requestHelper = newClassPrepareRequestHelper()
/**
* Constructs a new request helper for class prepare.
*
* @return The new request helper
*/
protected def newClassPrepareRequestHelper() = {
type E = ClassPrepareEvent
type EI = ClassPrepareEventInfo
type RequestArgs = Seq[JDIRequestArgument]
type CounterKey = Seq[JDIRequestArgument]
new RequestHelper[E, EI, RequestArgs, CounterKey](
scalaVirtualMachine = scalaVirtualMachine,
eventManager = eventManager,
etInstance = ClassPrepareEventType,
_newRequestId = () => java.util.UUID.randomUUID().toString,
_newRequest = (requestId, _, jdiRequestArgs) => {
classPrepareManager.createClassPrepareRequestWithId(
requestId,
jdiRequestArgs: _*
)
},
_hasRequest = (requestArgs) => {
classPrepareManager.classPrepareRequestList
.flatMap(classPrepareManager.getClassPrepareRequestInfo)
.map(_.extraArguments)
.map(_.filterNot(_.isInstanceOf[UniqueIdProperty]))
.contains(requestArgs)
},
_removeRequestById = (requestId) => {
classPrepareManager.removeClassPrepareRequest(requestId)
},
_newEventInfo = (s, event, jdiArgs) => {
eventProducer.newDefaultClassPrepareEventInfo(s, event, jdiArgs: _*)
},
_retrieveRequestInfo = classPrepareManager.getClassPrepareRequestInfo
)
}
/**
* Retrieves the collection of active and pending class prepare requests.
*
* @return The collection of information on class prepare requests
*/
override def classPrepareRequests: Seq[ClassPrepareRequestInfo] = {
val activeRequests = classPrepareManager.classPrepareRequestList.flatMap(
classPrepareManager.getClassPrepareRequestInfo
)
activeRequests ++ (classPrepareManager match {
case p: PendingClassPrepareSupportLike => p.pendingClassPrepareRequests
case _ => Nil
})
}
/**
* Constructs a stream of class prepare events.
*
* @param extraArguments The additional JDI arguments to provide
* @return The stream of class prepare events and any retrieved data based on
* requests from extra arguments
*/
override def tryGetOrCreateClassPrepareRequestWithData(
extraArguments: JDIArgument*
): Try[IdentityPipeline[ClassPrepareEventAndData]] = {
val JDIArgumentGroup(rArgs, eArgs, _) = JDIArgumentGroup(extraArguments: _*)
val requestArgs = rArgs
requestHelper.newRequest(requestArgs, rArgs)
.flatMap(id => requestHelper.newEventPipeline(id, eArgs, requestArgs))
}
/**
* Determines if the class prepare request with the specified arguments
* is pending.
*
* @param extraArguments The additional arguments provided to the specific
* class prepare request
* @return True if there is at least one class prepare request with the
* provided extra arguments that is pending, otherwise false
*/
override def isClassPrepareRequestWithArgsPending(
extraArguments: JDIArgument*
): Boolean = {
classPrepareRequests
.filter(_.extraArguments == extraArguments)
.exists(_.isPending)
}
/**
* Removes all class prepare requests with the specified extra arguments.
*
* @param extraArguments the additional arguments provided to the specific
* class prepare request
* @return Some information about the removed request if it existed,
* otherwise None
*/
override def removeClassPrepareRequestWithArgs(
extraArguments: JDIArgument*
): Option[ClassPrepareRequestInfo] = {
classPrepareRequests.find(_.extraArguments == extraArguments).filter(c =>
classPrepareManager.removeClassPrepareRequest(c.requestId)
)
}
/**
* Removes all class prepare requests.
*
* @return The collection of information about removed class prepare requests
*/
override def removeAllClassPrepareRequests(): Seq[ClassPrepareRequestInfo] = {
classPrepareRequests.filter(c =>
classPrepareManager.removeClassPrepareRequest(c.requestId)
)
}
}
| ensime/scala-debugger | scala-debugger-api/src/main/scala/org/scaladebugger/api/profiles/java/requests/classes/JavaClassPrepareRequest.scala | Scala | apache-2.0 | 5,637 |
package com.karasiq.bootstrap.context
import scala.language.postfixOps
import rx._
import com.karasiq.bootstrap.context.ReactiveBinds._
trait ReactiveRead[Element, Property] {
def bindRead(element: Element, property: Property): Unit
}
trait ReactiveWrite[Element, Property] {
def bindWrite(element: Element, property: Property): Unit
}
trait ReactiveRW[Element, Property] extends ReactiveRead[Element, Property] with ReactiveWrite[Element, Property]
object ReactiveBinds {
case class EventListener[EL, EV](`type`: String, callback: (EL, EV) ⇒ Unit)
case class Modify[E, V](value: Rx[V], func: (E, V) ⇒ Unit)
case class BindNode[N](value: Rx[N])
case class FormValue[T](value: Var[T])
case class Visibility(visible: Rx[Boolean])
}
/**
* Predefined binds
*/
trait ReactiveBinds { self: RenderingContext ⇒
protected type Event
protected type Renderable[T] = T ⇒ scalaTags.Frag
implicit val scalaRxContext: Ctx.Owner
implicit def rxEventListener[EL <: Element, EV <: Event]: ReactiveRead[EL, EventListener[EL, EV]]
implicit def rxModify[E <: Element, T]: ReactiveWrite[E, Modify[E, T]]
implicit def rxBindNode[E <: Element, N: Renderable]: ReactiveWrite[E, BindNode[N]]
implicit def rxFormValue[E <: Element]: ReactiveRW[E, FormValue[String]]
implicit def rxFormValueInt[E <: Element]: ReactiveRW[E, FormValue[Int]]
implicit def rxFormValueDouble[E <: Element]: ReactiveRW[E, FormValue[Double]]
implicit def rxFormValueBoolean[E <: Element]: ReactiveRW[E, FormValue[Boolean]]
implicit def rxFormValueStrings[E <: Element]: ReactiveRW[E, FormValue[Seq[String]]]
implicit def rxVisibility[E <: Element]: ReactiveWrite[E, Visibility]
}
| Karasiq/scalajs-bootstrap | context/shared/src/main/scala/com/karasiq/bootstrap/context/ReactiveBinds.scala | Scala | mit | 1,688 |
package se.lu.nateko.cp.meta.instanceserver
import scala.util.Try
import org.eclipse.rdf4j.model.IRI
import org.eclipse.rdf4j.model.Statement
import org.eclipse.rdf4j.model.Value
class WriteNotifyingInstanceServer(val inner: InstanceServer) extends InstanceServer {
private[this] var cb: Function0[Unit] = () => ()
def setSubscriber(sub: () => Unit): Unit = cb = sub
def unsetSubscriber(): Unit = cb = () => ()
def applyAll(updates: Seq[RdfUpdate]): Try[Unit] = {
val res = inner.applyAll(updates)
if(!updates.isEmpty) cb()
res
}
def factory = inner.factory
def filterNotContainedStatements(stats: IterableOnce[Statement]) = inner.filterNotContainedStatements(stats)
def getStatements(subj: Option[IRI], pred: Option[IRI], obj: Option[Value]) = inner.getStatements(subj, pred, obj)
def hasStatement(subj: Option[IRI], pred: Option[IRI], obj: Option[Value]) = inner.hasStatement(subj, pred, obj)
def makeNewInstance(prefix: IRI) = inner.makeNewInstance(prefix)
def readContexts = inner.readContexts
def writeContexts = inner.writeContexts
def withContexts(read: Seq[IRI], write: Seq[IRI]) = inner.withContexts(read, write)
}
| ICOS-Carbon-Portal/meta | src/main/scala/se/lu/nateko/cp/meta/instanceserver/WriteNotifyingInstanceServer.scala | Scala | gpl-3.0 | 1,150 |
/* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
package scaps.nucleus.querying
import scaps.nucleus.Type
import scaps.nucleus.TypeRef
import scaps.nucleus.TypeParam
import scaps.nucleus.Covariant
import scaps.nucleus.indexing.TypeView
import scaps.nucleus.indexing.TypeViewIndex
import scaps.nucleus.IndexAccess
import scaps.nucleus.indexing.TypeNormalization
private[nucleus] object QueryExpansion {
import scaps.nucleus.indexing.{ InternalTypes => I }
def expandQuery(tpe: Type, viewsFrom: Type => Seq[TypeView]): ExpandedQuery = {
import ExpandedQuery._
def parts(tr: TypeRef, depth: Int, dist: Float, outerTpes: Set[TypeRef], fraction: Double): Alternative = {
tr match {
case I.Ignored(v, args) =>
Sum(args.map { arg =>
val partF = fraction / args.length
alternatives(arg, depth, outerTpes, partF)
})
case _ =>
val partF = fraction / (1 + tr.args.length)
val parts = tr.args.map { arg =>
if (outerTpes.contains(arg)) Leaf(arg.variance, arg.name, partF, depth + 1, 1)
else alternatives(arg, depth + 1, outerTpes, partF)
}
Sum(Leaf(tr.variance, tr.name, partF, depth, dist) :: parts)
}
}
def alternatives(tr: TypeRef, depth: Int, outerTpes: Set[TypeRef], fraction: Double): Part = {
val alternativesWithDistance =
viewsFrom(Type(tpe.params, tr))
.flatMap { v =>
v(tr).map { alt =>
(TypeNormalization.substituteTypeParams(Type(tpe.params, alt)), v.distance, v.retainedInformation)
}
}
.distinct
.toList
val outerTpesAndAlts = outerTpes ++ alternativesWithDistance.map(_._1)
val alternativesParts =
alternativesWithDistance.map {
case (alt, dist, retainedInfo) =>
parts(alt, depth, dist, outerTpesAndAlts, fraction * retainedInfo)
}
Max(alternativesParts)
}
val expanded = ((TypeNormalization.substituteTypeParams _) andThen (TypeNormalization.normalize _))(tpe) match {
case f @ I.Fn(v, args, res) =>
val itpe = I.Ignored(v, args :+ res)
parts(itpe, 0, 0, Set(), 1)
case tr @ I.Ignored(_, _) =>
parts(tr, 0, 0, Set(), 1)
case tr =>
val itpe = I.Ignored(Covariant, tr :: Nil)
parts(itpe, 0, 0, Set(), 1)
}
expanded.minimize()
}
}
| scala-search/scaps | nucleus/src/main/scala/scaps/nucleus/querying/QueryExpansion.scala | Scala | mpl-2.0 | 2,582 |
//
// ExecutionExceptions.scala -- Scala subclasses of ExecutionException
// Project OrcScala
//
// $Id: ExecutionExceptions.scala 2933 2011-12-15 16:26:02Z jthywissen $
//
// Created by jthywiss on Aug 11, 2010.
//
// Copyright (c) 2011 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.error.runtime
import scala.util.parsing.input.NoPosition
////////
// Token exceptions
////////
class RuntimeSupportException(name: String) extends TokenException("This runtime does not support '" + name + "'.")
/** Access denied to an operation because the engine does not have a required right
*/
class RightException(val rightName: String) extends TokenException("This execution does not have the right '" + rightName + "'")
/**
*/
class StackLimitReachedError(val limit: Int) extends TokenException("Stack limit (limit=" + limit + ") reached")
/**
*/
class TokenLimitReachedError(val limit: Int) extends TokenError("Token limit (limit=" + limit + ") reached")
/**
*/
class UncaughtException(message: String, cause: Throwable) extends TokenException(message, cause)
////////
// Runtime type exceptions
////////
/** Superclass of all runtime type exceptions, including arity mismatches,
* argument type mismatches, and attempts to call uncallable values.
*/
abstract class RuntimeTypeException(message: String) extends TokenException(message)
/** per JLS section 15.12.2.5
*/
class AmbiguousInvocationException(val methodNames: Array[String]) extends RuntimeTypeException("Ambiguous method invocation: " + methodNames.mkString(" -OR- "))
/**
*/
class ArgumentTypeMismatchException(val argPosition: Int, val expectedType: String, val providedType: String) extends RuntimeTypeException("Expected type " + expectedType + " for argument " + argPosition + ", got " + providedType + " instead")
/**
*/
class ArityMismatchException(val arityExpected: Int, val arityProvided: Int) extends RuntimeTypeException("Arity mismatch, expected " + arityExpected + " arguments, got " + arityProvided + " arguments.")
/**
*/
class InsufficientArgsException(val missingArg: Int, val arityProvided: Int) extends RuntimeTypeException("Arity mismatch, could not find argument " + missingArg + ", only got " + arityProvided + " arguments.")
/**
*/
class MalformedArrayAccessException(val args: List[AnyRef]) extends RuntimeTypeException("Array access requires a single Integer as an argument")
class BadArrayElementTypeException(val badType: String) extends TokenException("Unrecognized array element type: " + badType)
/**
*/
class TupleIndexOutOfBoundsException(val index: Int) extends RuntimeTypeException("Tuple index out of range: " + index)
/**
*/
class MethodTypeMismatchException(val methodName: String, val clazz: Class[_]) extends RuntimeTypeException("Argument types did not match any implementation for method '" + methodName + "' in " + clazz.getName() + ".")
/** Exception raised when an uncallable value occurs in call argPosition.
*/
class UncallableValueException(val uncallable: Any) extends RuntimeTypeException("Value not callable: \"" + uncallable.toString() + "\"")
/** Attempted dot access at an unknown member.
*/
class NoSuchMemberException(val v: AnyRef, val unknownMember: String) extends RuntimeTypeException("Value " + v + " does not have a '" + unknownMember + "' member")
////////
// Site exceptions
////////
/** A container for Java-level exceptions raised by code
* implementing sites. These are wrapped as Orc exceptions
* to localize the failure to the calling token.
*/
class JavaException(cause: Throwable) extends SiteException(cause.toString(), cause) {
/** @return "position: ClassName: detailMessage (newline) position.longString"
*/
override def getMessageAndPositon(): String = {
if (getPosition() != null && getPosition() != NoPosition) {
getPosition().toString() + ": " + getCause().toString() + "\n" + getPosition().longString
} else {
getCause().toString()
}
}
/** @return "position: ClassName: detailMessage (newline) position.longString (newline) Orc stack trace... (newline) Java stack trace..."
*/
override def getMessageAndDiagnostics(): String = {
getMessageAndPositon() + "\n" + getOrcStacktraceAsString() + getJavaStacktraceAsString(getCause());
}
}
class ProgramSignalledError(message: String) extends SiteException(message)
| laurenyew/cOrcS | src/orc/error/runtime/ExecutionExceptions.scala | Scala | bsd-3-clause | 4,588 |
/** The System Under Test.
* We bail on the earlier round that generates the first error.
*/
class SUT extends J
| folone/dotty | tests/untried/neg/t6289/SUT_5.scala | Scala | bsd-3-clause | 117 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.rdd
import java.util.{HashMap => JHashMap}
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
import spark.RDD
import spark.Partitioner
import spark.Dependency
import spark.TaskContext
import spark.Partition
import spark.SparkEnv
import spark.ShuffleDependency
import spark.OneToOneDependency
/**
* An optimized version of cogroup for set difference/subtraction.
*
* It is possible to implement this operation with just `cogroup`, but
* that is less efficient because all of the entries from `rdd2`, for
* both matching and non-matching values in `rdd1`, are kept in the
* JHashMap until the end.
*
* With this implementation, only the entries from `rdd1` are kept in-memory,
* and the entries from `rdd2` are essentially streamed, as we only need to
* touch each once to decide if the value needs to be removed.
*
* This is particularly helpful when `rdd1` is much smaller than `rdd2`, as
* you can use `rdd1`'s partitioner/partition size and not worry about running
* out of memory because of the size of `rdd2`.
*/
private[spark] class SubtractedRDD[K: ClassManifest, V: ClassManifest, W: ClassManifest](
@transient var rdd1: RDD[(K, V)],
@transient var rdd2: RDD[(K, W)],
part: Partitioner,
val serializerClass: String = null)
extends RDD[(K, V)](rdd1.context, Nil) {
override def getDependencies: Seq[Dependency[_]] = {
Seq(rdd1, rdd2).map { rdd =>
if (rdd.partitioner == Some(part)) {
logInfo("Adding one-to-one dependency with " + rdd)
new OneToOneDependency(rdd)
} else {
logInfo("Adding shuffle dependency with " + rdd)
new ShuffleDependency(rdd.asInstanceOf[RDD[(K, Any)]], part, serializerClass)
}
}
}
override def getPartitions: Array[Partition] = {
val array = new Array[Partition](part.numPartitions)
for (i <- 0 until array.size) {
// Each CoGroupPartition will depend on rdd1 and rdd2
array(i) = new CoGroupPartition(i, Seq(rdd1, rdd2).zipWithIndex.map { case (rdd, j) =>
dependencies(j) match {
case s: ShuffleDependency[_, _] =>
new ShuffleCoGroupSplitDep(s.shuffleId)
case _ =>
new NarrowCoGroupSplitDep(rdd, i, rdd.partitions(i))
}
}.toArray)
}
array
}
override val partitioner = Some(part)
override def compute(p: Partition, context: TaskContext): Iterator[(K, V)] = {
val partition = p.asInstanceOf[CoGroupPartition]
val serializer = SparkEnv.get.serializerManager.get(serializerClass)
val map = new JHashMap[K, ArrayBuffer[V]]
def getSeq(k: K): ArrayBuffer[V] = {
val seq = map.get(k)
if (seq != null) {
seq
} else {
val seq = new ArrayBuffer[V]()
map.put(k, seq)
seq
}
}
def integrate(dep: CoGroupSplitDep, op: ((K, V)) => Unit) = dep match {
case NarrowCoGroupSplitDep(rdd, _, itsSplit) => {
for (t <- rdd.iterator(itsSplit, context))
op(t.asInstanceOf[(K, V)])
}
case ShuffleCoGroupSplitDep(shuffleId) => {
val iter = SparkEnv.get.shuffleFetcher.fetch(shuffleId, partition.index,
context.taskMetrics, serializer)
for (t <- iter)
op(t.asInstanceOf[(K, V)])
}
}
// the first dep is rdd1; add all values to the map
integrate(partition.deps(0), t => getSeq(t._1) += t._2)
// the second dep is rdd2; remove all of its keys
integrate(partition.deps(1), t => map.remove(t._1))
map.iterator.map { t => t._2.iterator.map { (t._1, _) } }.flatten
}
override def clearDependencies() {
super.clearDependencies()
rdd1 = null
rdd2 = null
}
}
| vax11780/spark | core/src/main/scala/spark/rdd/SubtractedRDD.scala | Scala | apache-2.0 | 4,511 |
package processes.freeMonads.single
import play.api.mvc.Request
import scala.concurrent.Future
import play.api.mvc.AnyContent
import processes.Services
import processes.PatchAssignment
import processes.freeMonads.HttpResultImplementation
import scala.Right
trait HappyFlowOnlyProgramRunner { _:HttpResultImplementation with HappyFlowOnlyProgramParts with PatchAssignment =>
protected def services: Services
protected def patchProgramRunner[A]: Method[A] => HttpResult[A] = {
case ParseJson(request: Request[AnyContent]) =>
val result =
services
.parseJson(request)
.toRight(left = results.badRequest)
Future successful result
case JsonToProfile(json) =>
val result =
services
.jsonToProfile(json)
.asEither
.left.map(results.unprocessableEntity)
Future successful result
case GetProfileById(id) =>
services
.getProfileById(id)
.map(_.toRight(left = results.notFound(id)))
case MergeProfile(oldProfile, newProfile) =>
val result = services.mergeProfile(oldProfile, newProfile)
Future successful Right(result)
case UpdateProfile(id, profile) =>
services
.updateProfile(id, profile)
.map(Right.apply)
}
} | EECOLOR/scala-clean-code-patterns | src/main/scala/processes/freeMonads/single/HappyFlowOnlyProgramRunner.scala | Scala | mit | 1,284 |
import org.scalatest._
import org.scalatest.junit._
import org.junit.runner.RunWith
import org.junit._
import com.tngtech.jgiven.junit._
class SimpleHelloTest extends SimpleScenarioTest[Steps] {
@Test
def my_first_JGiven_scenario_in_Scala = {
given.some_state
when.some_action
then.some_outcome
}
}
| ahus1/JGiven | examples/scala/src/test/scala/SimpleHelloTest.scala | Scala | apache-2.0 | 356 |
package calculator.parser
import calculator.Main.memory
import calculator.Calculator
import calculator.lexer._
trait Parser extends Lexer {
self: Calculator =>
import Trees._
import calculator.lexer.Tokens._
def exprSource: ExprTree = { readToken; parseExpr }
def computeSource: Double = { readToken; parseExpr.compute }
/** Store the current token, as read from the lexer. */
private var currentToken: Token = Token(BAD)
/** update currentToken using nextToken in the Lexer. */
def readToken: Unit = { currentToken = nextToken }
/** ""Eats"" the expected token, or terminates with an error. */
private def eat(tokenClass: TokenClass): Unit = if (tokenClass == currentToken.info.tokenClass) readToken else expected(tokenClass)
/** Complains that what was found was not expected. The method accepts arbitrarily many arguments of type TokenClass */
private def expected(tokenClass: TokenClass, more: TokenClass*): Nothing = fatalError("expected: " + (tokenClass :: more.toList).mkString(" or ") + ", found: " + currentToken)
private def parseExpr: ExprTree = {
val ret = parsePrime(null)
// ensure the end of file when reaching end of parsing (doesn't allow
eat(EOF)
ret
}
/**
* A valid expression is either:
* - PRIME(term) => parsePrime (priority -1)
* - id = term => parseEquals (priority 0)
* - term => parseTerm (priority 1)
*/
/* Priority -2 : PRIME (special meaning) */
/* added by Valentin Minder */
/** If we find a PRIME keyword, there are special conditions
* - PRIME(ex) must be first item (no recursive call or recall to this method, not allowing "2 * PRIME(ex)" nor "id = PRIME(ex)")
* - PRIME(ex) must be last item (must finish expression, not allowing: "PRIME(ex) * 2"
* - ex is a term (direct call to parseTerm, skipping parseEquals, not allowing "PRIME(id = ex)")
* - Following calls with term should never call this method again (not allowing "PRIME(PRIME(2))"),
* These conditions are verified in the code, and any transgression will throw an error.
*/
private def parsePrime(ex: ExprTree): ExprTree = {
if (currentToken.info == PRIME) {
eat(PRIME)
eat(LPAREN)
val e = Prime(parseTerm(ex)) // parseTerm(null)direct call to parseTerm, skipping parseEquals, not allowing "PRIME(id = ex)")
eat(RPAREN)
e
// PRIME(ex) must be last item (must finish expression, not allowing: "PRIME(ex) * 2"
// ensured by the EAT(EOF) in parseExpr
} else {
parseComma(ex)
}
}
/**
* /* Priority -1: , */
* Comma allows BY-VALUE Assignment !
* a,b,c = 3 => c is assigned to 3, b and c are assigned to 3 too (by copy)
*
* a,b,c = d is not allowed (Identifier in RHS of = is reference)
*/
private def parseComma(ex: ExprTree): ExprTree = {
val e = parseEquals(ex)
if (currentToken.info == COMMA) {
eat(COMMA)
e match {
case id@Identifier(_) => {
val rhs = parseComma(ex)
memory += (id -> rhs)
rhs match {
// copy is only allowed left to another copy or an Assignment
case Assign(_, _) => Copy(id, rhs)
case Copy(_,_) => Copy(id, rhs)
case _ => fatalError("Invalid variable declaration! Right-hand side of , must be identifier with Assignment, not Reference")
}
}
case _ => fatalError("Invalid variable declaration! Left-hand side of , must be Identifier")
}
} else {
e
}
}
/* Priority 0: equal (LOWER priority) */
/* modified by Valentin Minder */
/** If we find an EQSIGN keyword, there are special conditions
* - id = ex must be first item (no recursive call to this method, not allowing "2 * id = ex")
* - ex is a term (direct call to parseTerm, not allowing "id = PRIME(ex)")
*
* Equal allows BY-REFERENCE Assignment.
* a = b = c = 3 => c is assigned to 3, b references c, a references b.
* a = b = c => b references c, a references b.
* a = b = c+1 => b is assigned to the value of c+1, a references b.
*/
private def parseEquals(ex: ExprTree): ExprTree = {
// - ex is a term (direct call to parseTerm, not allowing "id = PRIME(ex)")
var e = parseTerm(ex)
if (currentToken.info == EQSIGN) {
eat(EQSIGN)
e match {
case id @ Identifier(_) => {
val rhs = parseEquals(ex)
memory += (id -> rhs)
rhs match {
// RHS of = sign is Identifier => Reference
case Identifier(_) => References(id, rhs)
// Second = sign (previous Assign or Reference) => Reference
case Assign(_,_) => References(id, rhs)
case References(_, _) => References(id, rhs)
// Single = sign => Assign
case _ => {
Assign(id, rhs)
}
}
}
case _ => fatalError("Invalid variable declaration! Left-hand side of = must be identifier.")
}
} else {
e
}
}
/* Priority 1: plus/minus (LOWER priority) */
/* modified by Valentin Minder */
/* Plus and minus have same priority: right to left association*/
private def parseTerm(ex: ExprTree): ExprTree = {
var e = parseTimesDivision(ex)
while (currentToken.info == PLUS || currentToken.info == MINUS) {
if (currentToken.info == PLUS) {
eat(PLUS)
e = Plus(e, parseTimesDivision(e))
}
if (currentToken.info == MINUS) {
eat(MINUS)
e = Minus(e, parseTimesDivision(e))
}
}
e
}
/* Priority 2: times/division */
/* Added by Valentin Minder */
private def parseTimesDivision(ex: ExprTree): ExprTree = {
var e = parseModulo(ex)
/* Times and division have same priority: right to left association*/
while (currentToken.info == TIMES || currentToken.info == DIVISION) {
if (currentToken.info == TIMES) {
eat(TIMES)
e = Times(e, parseModulo(e))
}
if (currentToken.info == DIVISION) {
eat(DIVISION)
e = Division(e, parseModulo(e))
}
}
e
}
/* Priority 3: modulo */
/* Added by Valentin Minder */
private def parseModulo(ex: ExprTree): ExprTree = {
var e = parsePower(ex)
if (currentToken.info == MODULO) {
eat(MODULO)
e = Modulo(e, parsePower(e))
}
e
}
/* Priority 4: power */
/* Added by Valentin Minder */
private def parsePower(ex: ExprTree): ExprTree = {
var e = parseFactorial(ex)
if (currentToken.info == POWER) {
eat(POWER)
e = Power(e, parseFactorial(e))
}
e
}
/* Priority 5: factorial */
/* Added by Valentin Minder */
private def parseFactorial(ex: ExprTree): ExprTree = {
var e = parseSimpleExpr(ex)
if (currentToken.info == FACTORIAL) {
eat(FACTORIAL)
e = Factorial(e)
}
e
}
/* Priority 6: simple expression (HIGHEST) */
/* modified by Valentin Minder */
private def parseSimpleExpr(ex: ExprTree): ExprTree = {
currentToken.info match {
// Numeric value
case NUM(value) => parseExprTreeToken(NumLit(value))
// Identifier
case ID(value) => parseExprTreeToken(Identifier(value))
// Parenthesis
case LPAREN => parseParenthesis
/* modified by Valentin Minder */
/* Keywords : GCD SQRT PRIME */
case GCD => parseGCD
case SQRT => parseSQRT
// PRIME removed because not a basic operation but terminal!
//case EOF => null
case _ => {
expected(NUM(""), ID(""), LPAREN, GCD, SQRT);
}
}
}
/* Added by Valentin Minder */
/**
* Eat the gcd keyword, eat the left parenthesis, parse left expr,
* eat the comma, parse right expr, eat right parenthesis.
* Return Gcd(ParsedExprLeft, ParsedExprRight)
*/
private def parseGCD: ExprTree = {
eat(GCD)
val ret = parseParenthesisTuple
Gcd(ret._1, ret._2)
}
/* Added by Valentin Minder */
/**
* Eat the SQRT keyword, eat the left parenthesis, parse expr, eat right parenthesis.
* Return Sqrt(ParsedExpr)
*/
private def parseSQRT: ExprTree = {
eat(SQRT)
Sqrt(parseParenthesis)
}
private def parseExprTreeToken[T <: ExprTree](retTree: T): ExprTree = {
val ret = retTree
readToken
ret
}
private def parseParenthesis: ExprTree = {
eat(LPAREN)
val ret = parseTerm(null)
eat(RPAREN)
ret
}
/* Added by Valentin Minder */
/**
* Parse a Parenthesis Tuple (expr1, expr2)
* Eat the left parenthesis, parse left expr,
* eat the comma, parse right expr, eat right parenthesis.
*
* @return A Tuple2(EvalExpr1, EvalExpr2)
*/
private def parseParenthesisTuple = { // Tuple2[ExprTree]
eat(LPAREN)
val e1 = parseTerm(null)
eat(COMMA)
val e2 = parseTerm(null)
eat(RPAREN)
Tuple2(e1, e2)
}
private def parseIdentifier: Identifier = currentToken.info match {
case ID(value) => { val ret = Identifier(value); readToken; ret }
case _ => expected(ID(""))
}
} | ValentinMinder/scala-labs-HEIG | lab02/src/calculator/parser/Parser.scala | Scala | mit | 9,075 |
package com.tpersson.client.common.utils
import javafx.application.Platform
import scala.concurrent.ExecutionContext
class UiExecutionContext extends ExecutionContext {
override def execute(runnable: Runnable): Unit = {
Platform.runLater(runnable)
}
override def reportFailure(cause: Throwable): Unit = {
}
}
| TommyPersson/scala-mvvm-example | src/main/scala/com/tpersson/client/common/utils/UiExecutionContext.scala | Scala | apache-2.0 | 325 |
/**
* Track the trackers
* Copyright (C) 2014 Sebastian Schelter, Felix Neutatz
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package io.ssc.trackthetrackers.analysis.algorithms
import io.ssc.trackthetrackers.analysis.{GraphUtils, Edge}
import org.apache.flink.api.scala.ExecutionEnvironment
import org.apache.flink.api.scala._
import org.apache.flink.core.fs.FileSystem.WriteMode
/**
* http://konect.uni-koblenz.de/statistics/power
*
* Be aware that this code only considers the outdegree distribution currently
*
*/
@deprecated
object PowerLawExponentEstimation extends App {
estimatePowerLawExponent("/home/ssc/Entwicklung/projects/trackthetrackers/analysis/src/main/resources/cfindergoogle/links.tsv",
"/tmp/flink-scala/estimatedExponent")
def estimatePowerLawExponent(linksFile: String, outputPath: String) = {
implicit val env = ExecutionEnvironment.getExecutionEnvironment
val edges = GraphUtils.readEdges(linksFile)
val verticesWithDegree =
edges.map { edge => edge.src -> 1 }
.groupBy(0)
.reduce { (vertexWithCount1, vertexWithCount2) =>
vertexWithCount1._1 -> (vertexWithCount1._2 + vertexWithCount2._2)
}
val minDegree = verticesWithDegree.min(1).map { _._2 }
val numVertices =
verticesWithDegree.map { vertexWithDegree => Tuple1(vertexWithDegree._1) }
.distinct
.reduceGroup{ _.size }
val estimatedExponent =
verticesWithDegree.cross(minDegree) { (vertexWithDegree, minDegree) =>
math.log(vertexWithDegree._2.toDouble / minDegree)
}
.reduce { _ + _ }
.cross(numVertices) { (v, n) =>
val gamma = 1.0 + (n / v)
val sigma = math.sqrt(n) / v
(gamma, sigma)
}
estimatedExponent.writeAsText(outputPath, writeMode = WriteMode.OVERWRITE)
env.execute()
}
}
| HungUnicorn/trackthetrackers | analysis/src/main/scala/io/ssc/trackthetrackers/analysis/algorithms/PowerLawExponentEstimation.scala | Scala | gpl-3.0 | 2,488 |
/*
* Copyright (C) Lightbend Inc. <https://www.lightbend.com>
*/
package scalaguide.logging
import javax.inject.Inject
import org.junit.runner.RunWith
import org.specs2.mock.Mockito
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import org.slf4j._
import play.api._
import play.api.mvc._
import play.api.test._
import play.api.test.Helpers._
@RunWith(classOf[JUnitRunner])
class ScalaLoggingSpec extends Specification with Mockito {
private def riskyCalculation: Int = {
10 / scala.util.Random.nextInt(2)
}
"The logger" should {
"properly log" in {
val logger = new play.api.LoggerLike {
// Mock underlying logger implementation
val logger = mock[org.slf4j.Logger].smart
logger.isDebugEnabled().returns(true)
logger.isErrorEnabled().returns(true)
}
//#logging-example
// Log some debug info
logger.debug("Attempting risky calculation.")
try {
val result = riskyCalculation
// Log result if successful
logger.debug(s"Result=$result")
} catch {
case t: Throwable => {
// Log error with message and Throwable.
logger.error("Exception with riskyCalculation", t)
}
}
//#logging-example
there.was(atLeastOne(logger.logger).isDebugEnabled())
there.was(atLeastOne(logger.logger).debug(anyString))
there.was(atMostOne(logger.logger).isErrorEnabled())
there.was(atMostOne(logger.logger).error(anyString, any[Throwable]))
}
}
"Creating a Logger" should {
"return a new Logger with specified name" in {
//#logging-import
import play.api.Logger
//#logging-import
//#logging-create-logger-name
val accessLogger: Logger = Logger("access")
//#logging-create-logger-name
accessLogger.underlyingLogger.getName must equalTo("access")
}
"return a new Logger with class name" in {
import play.api.Logger
//#logging-create-logger-class
val logger: Logger = Logger(this.getClass())
//#logging-create-logger-class
logger.underlyingLogger.getName must equalTo("scalaguide.logging.ScalaLoggingSpec")
}
"use Logging trait" in {
//#logging-trait
import play.api.Logging
class MyClassWithLogging extends Logging {
logger.info("Using the trait")
}
//#logging-trait
new MyClassWithLogging()
success
}
"allow for using multiple loggers" in {
// object Logger extends LoggerLike {
// // Mock underlying logger implementation
// val logger = mock[org.slf4j.Logger].smart
//
// def apply[T](clazz: Class[T]): play.api.Logger = new play.api.Logger(mock[org.slf4j.Logger].smart)
// def apply[T](name: String): play.api.Logger = new play.api.Logger(mock[org.slf4j.Logger].smart)
// }
//#logging-pattern-mix
import scala.concurrent.Future
import play.api.Logger
import play.api.mvc._
import javax.inject.Inject
class AccessLoggingAction @Inject() (parser: BodyParsers.Default)(implicit ec: ExecutionContext)
extends ActionBuilderImpl(parser) {
val accessLogger = Logger("access")
override def invokeBlock[A](request: Request[A], block: (Request[A]) => Future[Result]) = {
accessLogger.info(s"method=${request.method} uri=${request.uri} remote-address=${request.remoteAddress}")
block(request)
}
}
class Application @Inject() (val accessLoggingAction: AccessLoggingAction, cc: ControllerComponents)
extends AbstractController(cc) {
val logger = Logger(this.getClass())
def index = accessLoggingAction {
try {
val result = riskyCalculation
Ok(s"Result=$result")
} catch {
case t: Throwable => {
logger.error("Exception with riskyCalculation", t)
InternalServerError("Error in calculation: " + t.getMessage())
}
}
}
}
//#logging-pattern-mix
import akka.actor._
import akka.stream.Materializer
implicit val system = ActorSystem()
implicit val mat = Materializer.matFromSystem
implicit val ec: ExecutionContext = system.dispatcher
val controller =
new Application(new AccessLoggingAction(new BodyParsers.Default()), Helpers.stubControllerComponents())
controller.accessLoggingAction.accessLogger.underlyingLogger.getName must equalTo("access")
controller.logger.underlyingLogger.getName must contain("Application")
}
"allow for use in filters" in {
//#logging-pattern-filter
import javax.inject.Inject
import akka.stream.Materializer
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import play.api.Logger
import play.api.mvc._
import play.api._
class AccessLoggingFilter @Inject() (implicit val mat: Materializer) extends Filter {
val accessLogger = Logger("access")
def apply(next: (RequestHeader) => Future[Result])(request: RequestHeader): Future[Result] = {
val resultFuture = next(request)
resultFuture.foreach(result => {
val msg = s"method=${request.method} uri=${request.uri} remote-address=${request.remoteAddress}" +
s" status=${result.header.status}";
accessLogger.info(msg)
})
resultFuture
}
}
//#logging-pattern-filter
ok
}
}
"Underlying logger" should {
"return logger name" in {
import play.api.Logger
val logger: Logger = Logger("access")
//#logging-underlying
val underlyingLogger: org.slf4j.Logger = logger.underlyingLogger
val loggerName = underlyingLogger.getName()
//#logging-underlying
loggerName must equalTo("access")
}
}
//#logging-default-marker-context
val someMarker: org.slf4j.Marker = MarkerFactory.getMarker("SOMEMARKER")
case object SomeMarkerContext extends play.api.DefaultMarkerContext(someMarker)
//#logging-default-marker-context
"MarkerContext" should {
"return some marker" in {
import play.api.Logger
val logger: Logger = Logger("access")
//#logging-marker-context
val marker: org.slf4j.Marker = MarkerFactory.getMarker("SOMEMARKER")
val mc: MarkerContext = MarkerContext(marker)
//#logging-marker-context
mc.marker must beSome.which(_ must be_==(marker))
}
"logger.info with explicit marker context" in {
import play.api.Logger
val logger: Logger = Logger("access")
//#logging-log-info-with-explicit-markercontext
// use a typed marker as input
logger.info("log message with explicit marker context with case object")(SomeMarkerContext)
// Use a specified marker.
val otherMarker: Marker = MarkerFactory.getMarker("OTHER")
val otherMarkerContext: MarkerContext = MarkerContext(otherMarker)
logger.info("log message with explicit marker context")(otherMarkerContext)
//#logging-log-info-with-explicit-markercontext
success
}
"logger.info with implicit marker context" in {
import play.api.Logger
val logger: Logger = Logger("access")
//#logging-log-info-with-implicit-markercontext
val marker: Marker = MarkerFactory.getMarker("SOMEMARKER")
implicit val mc: MarkerContext = MarkerContext(marker)
// Use the implicit MarkerContext in logger.info...
logger.info("log message with implicit marker context")
//#logging-log-info-with-implicit-markercontext
mc.marker must beSome.which(_ must be_==(marker))
}
"implicitly convert a Marker to a MarkerContext" in {
import play.api.Logger
val logger: Logger = Logger("access")
//#logging-log-info-with-implicit-conversion
val mc: MarkerContext = MarkerFactory.getMarker("SOMEMARKER")
// Use the marker that has been implicitly converted to MarkerContext
logger.info("log message with implicit marker context")(mc)
//#logging-log-info-with-implicit-conversion
success
}
"implicitly pass marker context in controller" in new WithApplication() with Injecting {
val controller = inject[ImplicitRequestController]
val result = controller.asyncIndex()(FakeRequest())
contentAsString(result) must be_==("testing")
}
}
}
//#logging-request-context-trait
trait RequestMarkerContext {
// Adding 'implicit request' enables implicit conversion chaining
// See http://docs.scala-lang.org/tutorials/FAQ/chaining-implicits.html
implicit def requestHeaderToMarkerContext(implicit request: RequestHeader): MarkerContext = {
import net.logstash.logback.marker.LogstashMarker
import net.logstash.logback.marker.Markers._
val requestMarkers: LogstashMarker = append("host", request.host)
.and(append("path", request.path))
MarkerContext(requestMarkers)
}
}
//#logging-request-context-trait
class ImplicitRequestController @Inject() (cc: ControllerComponents)(implicit otherExecutionContext: ExecutionContext)
extends AbstractController(cc)
with RequestMarkerContext {
private val logger = play.api.Logger(getClass)
//#logging-log-info-with-request-context
def asyncIndex = Action.async { implicit request =>
Future {
methodInOtherExecutionContext() // implicit conversion here
}(otherExecutionContext)
}
def methodInOtherExecutionContext()(implicit mc: MarkerContext): Result = {
logger.debug("index: ") // same as above
Ok("testing")
}
//#logging-log-info-with-request-context
}
//#logging-log-trace-with-tracer-controller
trait TracerMarker {
import TracerMarker._
implicit def requestHeaderToMarkerContext(implicit request: RequestHeader): MarkerContext = {
val marker = org.slf4j.MarkerFactory.getDetachedMarker("dynamic") // base do-nothing marker...
if (request.getQueryString("trace").nonEmpty) {
marker.add(tracerMarker)
}
marker
}
}
object TracerMarker {
private val tracerMarker = org.slf4j.MarkerFactory.getMarker("TRACER")
}
class TracerBulletController @Inject() (cc: ControllerComponents) extends AbstractController(cc) with TracerMarker {
private val logger = play.api.Logger("application")
def index = Action { implicit request: Request[AnyContent] =>
logger.trace("Only logged if queryString contains trace=true")
Ok("hello world")
}
}
//#logging-log-trace-with-tracer-controller
| wegtam/playframework | documentation/manual/working/scalaGuide/main/logging/code/ScalaLoggingSpec.scala | Scala | apache-2.0 | 10,744 |
package audit.collector
import java.util.Date
import akka.actor.{Actor, Props}
import akka.http.scaladsl.model.StatusCodes.OK
import akka.http.scaladsl.testkit.ScalatestRouteTest
import audit.Api
import audit.collector.Collector.Event
import audit.collector.CollectorActor.Stored
import org.scalatest.WordSpec
class CollectorApiSpec extends WordSpec with ScalatestRouteTest with CollectorApi with Api {
override implicit val executionContext = executor
val event = Event("system-01", "eid-01", "user-01", new Date(), "msg-01", "desc-01", "state-01")
override val collector = system.actorOf(Props(new Actor {
override def receive = {
case _ => sender() ! Stored
}
}))
"CollectionApi" should {
"store event" in {
Post("/collect", event) ~> collectorRoute ~> check {
status === OK
}
}
}
}
| grzesiekw/audit | src/test/scala/audit/collector/CollectorApiSpec.scala | Scala | mit | 847 |
/*
* Copyright (c) 2015-2017 Toby Weston
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package s4j.scala.chapter18
import s4j.scala.chapter18.Extractors.{DiscountExtractor, UrlExtractor, YearsOfCustom}
object ExtractorExample extends App {
val today = ""
val customer = new Customer("Bob", "1 Church street")
customer match {
case Customer(name, address) => println(name + " " + address)
}
customer.yearsACustomer = 3
val discount = customer match {
case YearsOfCustom(years) if years >= 5 => Discount(0.5)
case YearsOfCustom(years) if years >= 2 => Discount(0.2)
case YearsOfCustom(years) if years >= 1 => Discount(0.1)
case _ if blackFriday(today) => Discount(0)
case _ => Discount(0)
}
println(discount)
val discount2 = customer match {
case DiscountExtractor(discount) => discount
}
println(discount2)
val url = "http://baddotrobot.com" match {
case UrlExtractor(protocol, host) => println(protocol + " " + host)
}
def blackFriday(x: String): Boolean = true
}
| tobyweston/learn-scala-java-devs | src/main/scala/s4j/scala/chapter18/ExtractorExample.scala | Scala | apache-2.0 | 1,593 |
package de.tu_berlin.dima.bdapro.cli.command
import java.lang.{System => Sys}
import java.nio.file.Paths
import net.sourceforge.argparse4j.inf.{Namespace, Subparser}
import org.peelframework.core.cli.command.Command
import org.peelframework.core.util.console.ConsoleColorise
import org.peelframework.core.util.shell
import org.scalactic.{Bad, Good}
import org.springframework.context.ApplicationContext
abstract class MergeTask extends Command {
type Valid = Unit
type Invalid = Error
case class Error(msg: String)
def pass = Good(())
def fail(msg: String) = Bad(Error(msg))
val Commit = """([0-9a-f]+) (.+)""".r
val Additions = """(\d+) file changed, (\d+) insertions\(\+\)""".r
val Path = """(\S+)\W+\|\W+(\d+)\W+\++""".r
def commitFle(user: String): String = Paths.get(
"bdapro-ws1617-flink-jobs",
"src", "main",
"scala", "de", "tu_berlin", "dima", "bdapro",
"flink", taskBranch, toLowerCaseAndUnderscore(user), s"${taskName.replaceAll("\\W", "")}.scala"
).toString
def commitMsg(user: String): String
val taskName: String
val taskBranch: String
override val help = s"Merge solutions for the $taskName task."
override def register(parser: Subparser) = {
// arguments
parser.addArgument("base-commit")
.`type`(classOf[String])
.dest("app.merge.commit.base")
.metavar("HASH")
.help("base commit")
parser.addArgument("username")
.`type`(classOf[String])
.dest("app.merge.remote.user")
.metavar("USER")
.help("GitHub username")
}
override def configure(ns: Namespace) = {
// set ns options and arguments to system properties
Sys.setProperty("app.merge.local.src", Paths.get(Sys.getenv("BUNDLE_SRC"), "bdapro-ws1617").toAbsolutePath.toString)
Sys.setProperty("app.merge.remote.user", ns.getString("app.merge.remote.user"))
Sys.setProperty("app.merge.commit.base", ns.getString("app.merge.commit.base"))
}
override def run(context: ApplicationContext) = {
val locSrc = Sys.getProperty("app.merge.local.src")
val remUsr = Sys.getProperty("app.merge.remote.user")
val basCmt = Sys.getProperty("app.merge.commit.base")
logger.info(s"Attempting to merge implementation for '$taskName' task for user '$remUsr' ")
logger.info(s"Bundle source is '$locSrc'")
val status = for {
_ <- {
logger.info(s"Creating branch warmup-solutions (or rebasing it to master if it exists)")
val ret1 = shell !
s"""
|cd $locSrc;
|git checkout -q master
|git checkout -q -b warmup-solutions;
|git checkout -q warmup-solutions;
|git rebase master
""".stripMargin
if (ret1 == 0) pass
else fail(s"Problem when creating or rebasing branch warmup-solutions")
logger.info(s"Preparing local branch '$remUsr-$taskBranch' based on $basCmt")
val ret2 = shell !
s"""
|cd $locSrc;
|git branch -D $remUsr-$taskBranch;
|git checkout -b $remUsr-$taskBranch;
|git reset --hard $basCmt;
""".stripMargin
if (ret2 == 0) pass
else fail(s"Cannot prepare local branch '$remUsr-$taskBranch'")
}
_ <- {
logger.info(s"Pulling '$remUsr/$taskBranch'")
val ret = shell !
s"""
|cd $locSrc;
|git pull --ff-only [email protected]:$remUsr/BDAPRO.WS1617.git $taskBranch;
""".stripMargin
if (ret == 0) pass
else fail(s"Cannot pull '$remUsr/$taskBranch'")
}
_ <- {
logger.info(s"Validating commits on '$remUsr-$taskBranch'")
val ret = shell !!
s"""
|cd $locSrc;
|git checkout -q warmup-solutions;
|git log -n2 --oneline $remUsr-$taskBranch
""".stripMargin
ret.split('\n').map(_.trim).toList match {
case Commit(solCmt, solMsg) :: Commit(basID, _) :: Nil =>
def expMsg = commitMsg(remUsr)
val expFle = commitFle(remUsr)
if (!(basCmt startsWith basID)) {
fail(s"Solution is not within a single commit based on $basCmt")
} else if (!(solMsg.trim.toLowerCase == expMsg.trim.toLowerCase)) {
fail(s"Malformed commit message for solution:\nexp: $expMsg\ngot: $solMsg")
} else {
val logStat = shell !!
s"""
|cd $locSrc;
|git log -n1 --stat=300 $remUsr-$taskBranch
""".stripMargin
logStat.split('\n').map(_.trim).reverse.toList match {
case Additions("1", _) :: Path(`expFle`, _) :: _ =>
pass
case Additions("1", _) :: y :: rest =>
fail(s"Commit does not consist of a single file located at '$expFle'")
}
}
case _ =>
fail(s"Cannot validate commits on '$remUsr-$taskBranch'")
}
}
_ <- {
logger.info(s"Cherry-picking last commit of '$remUsr-$taskBranch'")
val ret = shell !
s"""
|cd $locSrc;
|git checkout -q warmup-solutions;
|git cherry-pick $remUsr-$taskBranch
""".stripMargin
if (ret == 0) pass
else fail(s"Cannot pull '$remUsr/$taskBranch'")
}
} yield pass
status match {
case Good(_) =>
logger.info("Everything is fine".green)
case Bad(Error(msg)) =>
logger.error(s"Error while merging: $msg".red)
}
}
def toLowerCaseAndUnderscore(s: String): String = {
s.toLowerCase.replace('-','_')
}
}
| cristiprg/BDAPRO.GlobalStateML | bdapro-ws1617-peelextensions/src/main/scala/de/tu_berlin/dima/bdapro/cli/command/MergeTask.scala | Scala | apache-2.0 | 5,708 |
/*
* Copyright (c) 2018. Fengguo Wei and others.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License v2.0
* which accompanies this distribution, and is available at
* https://www.apache.org/licenses/LICENSE-2.0
*
* Detailed contributors are listed in the CONTRIBUTOR.md
*/
package org.argus.jnsaf.analysis
import org.argus.amandroid.alir.taintAnalysis.{DataLeakageAndroidSourceAndSinkManager, IntentSinkKind}
import org.argus.amandroid.core.ApkGlobal
import org.argus.jawa.core.elements.Signature
import org.argus.jawa.core.util._
import org.argus.jawa.flow.cfg.ICFGCallNode
import org.argus.jawa.flow.pta.PTAResult
import org.argus.jawa.flow.taintAnalysis.{SSPosition, SourceAndSinkCategory}
/**
* Created by fgwei on 4/27/17.
*/
class JNISourceAndSinkManager(sasFilePath: String) extends DataLeakageAndroidSourceAndSinkManager(sasFilePath) {
override def isSinkMethod(global: ApkGlobal, sig: Signature): Option[(String, ISet[SSPosition])] = {
val poss = this.customSinks.getOrElse("ICC", mmapEmpty).filter(sink => matches(global, sig, sink._1)).map(_._2._1).fold(isetEmpty)(iunion)
if(poss.nonEmpty) {
Some((SourceAndSinkCategory.ICC_SINK, poss))
} else {
super.isSinkMethod(global, sig)
}
}
override def intentSink: IntentSinkKind.Value = IntentSinkKind.ALL
override def isIntentSink(apk: ApkGlobal, invNode: ICFGCallNode, pos: Option[Int], s: PTAResult): Boolean = {
getCustomSinks("ICC").contains(invNode.getCalleeSig) || super.isIntentSink(apk, invNode, pos, s)
}
override def isEntryPointSource(apk: ApkGlobal, signature: Signature): Boolean = {
apk.model.getComponentInfos foreach { info =>
if(info.compType == signature.classTyp) {
return apk.getEntryPoints(info).contains(signature)
}
}
false
}
} | arguslab/Argus-SAF | jnsaf/src/main/scala/org/argus/jnsaf/analysis/JNISourceAndSinkManager.scala | Scala | apache-2.0 | 1,877 |
/*
* Copyright 2016 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600.v2.calculations
import org.joda.time.LocalDate
import org.scalatest.prop.TableDrivenPropertyChecks._
import org.scalatest.prop.Tables.Table
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.CATO04
import uk.gov.hmrc.ct.computations.{CP1, CP2, CP295, HmrcAccountingPeriod}
import uk.gov.hmrc.ct.ct600.calculations.{CorporationTaxCalculatorParameters, InvalidAccountingPeriodException}
import uk.gov.hmrc.ct.ct600.v2.{B39, B38, B37}
class MarginalRateReliefCalculatorSpec extends WordSpec with Matchers {
"MarginalRateReliefCalculator input validation" should {
"Fail validation for an AP where the end date is before the start date" in new Calculator {
an[InvalidAccountingPeriodException] should be thrownBy
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2014, 9, 1)), CP2(new LocalDate(2014, 5, 31))))
an[InvalidAccountingPeriodException] should be thrownBy
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2014, 9, 1)), CP2(new LocalDate(2013, 1, 31))))
}
"Not fail validation for an AP of 365 days across multiple non-leap years" in new Calculator {
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2013, 3, 1)), CP2(new LocalDate(2014, 2, 28))))
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2013, 4, 1)), CP2(new LocalDate(2014, 3, 31))))
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2013, 10, 1)), CP2(new LocalDate(2014, 9, 30))))
}
"Fail validation for an AP of 365 days across multiple non-leap years" in new Calculator {
an[InvalidAccountingPeriodException] should be thrownBy
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2013, 3, 1)), CP2(new LocalDate(2014, 3, 1))))
an[InvalidAccountingPeriodException] should be thrownBy
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2013, 4, 1)), CP2(new LocalDate(2014, 4, 1))))
an[InvalidAccountingPeriodException] should be thrownBy
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2013, 10, 1)), CP2(new LocalDate(2014, 10, 1))))
}
"Not fail validation for an AP of 366 days starting beginning of February on a leap year and ending at the end of January the following year" in new Calculator {
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2012, 2, 1)), CP2(new LocalDate(2013, 1, 31))))
}
"Not fail validation for short APs in a single financial year which is not a leap year" in new Calculator {
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2014, 9, 1)), CP2(new LocalDate(2014, 12, 31))))
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2013, 12, 1)), CP2(new LocalDate(2014, 3, 1))))
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2014, 3, 1)), CP2(new LocalDate(2014, 3, 31))))
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2014, 1, 1)), CP2(new LocalDate(2014, 3, 31))))
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2014, 9, 1)), CP2(new LocalDate(2014, 11, 30))))
}
"Fail validation for an AP which starts before 1st April 2012" in new Calculator {
an[InvalidAccountingPeriodException] should be thrownBy
calculateMRR(B37(300001), B38(None), B39(None), HmrcAccountingPeriod(CP1(new LocalDate(2006, 10, 1)), CP2(new LocalDate(2007, 9, 30))))
}
}
val jiraCalculationCATO1581Examples = Table(
("b37BasicProfitChargeable", "cp1AccountingPeriodStartDate", "cp2AccountingPeriodEndDate", "b38FrankedInvestment", "cato04MRR"),
(300000, "2013-04-01", "2014-03-31", 0, 0.00),
(300001, "2013-04-01", "2014-03-31", 0, 8999.99),
(400000, "2013-04-01", "2014-03-31", 0, 8250.00),
(1000000, "2013-04-01", "2014-03-31", 0, 3750.00),
(1400000, "2013-04-01", "2014-03-31", 0, 750.00),
(1499999, "2013-04-01", "2014-03-31", 0, 0.01),
(1500000, "2013-11-01", "2014-10-1", 0, 0.00),
(300000, "2014-1-01", "2014-3-31", 0, 523.97),
(300001, "2014-1-01", "2014-3-31", 0, 523.97),
(400000, "2014-1-01", "2014-3-31", 0, 0.00),
(1000000, "2014-1-01", "2014-3-31", 0, 0.00),
(1400000, "2014-1-01", "2014-3-31", 0, 0.00),
(1499999, "2014-1-01", "2014-3-31", 0, 0.00),
(1500000, "2014-1-01", "2014-3-31", 0, 0.00),
(200000, "2013-11-01", "2014-10-1", 0, 0.00),
(300000, "2013-11-01", "2014-10-1", 0, 5118.40),
(400000, "2013-11-01", "2014-10-1", 0, 4643.03),
(1000000, "2013-11-01", "2014-10-1", 0, 1790.79),
(1400000, "2013-11-01", "2014-10-1", 0, 0.00),
(1499999, "2013-11-01", "2014-10-1", 0, 0.00),
(1500000, "2013-11-01", "2014-10-1", 0, 0.00),
(300000, "2013-04-01", "2014-03-31", 500, 8981.28),
(300001, "2013-04-01", "2014-03-31", 500, 8981.27),
(299500, "2013-04-01", "2014-03-31", 500, 0.00),
(299501, "2013-04-01", "2014-03-31", 500, 8984.99),
(400000, "2013-04-01", "2014-03-31", 500, 8235.96),
(1000000, "2013-04-01", "2014-03-31", 500, 3744.38),
(1400000, "2013-04-01", "2014-03-31", 500, 745.98),
(1499999, "2013-04-01", "2014-03-31", 500, 0.00),
(1500000, "2013-04-01", "2014-03-31", 500, 0.00),
(300000, "2014-01-01", "2014-03-31", 500, 519.36),
(300001, "2014-01-01", "2014-03-31", 500, 519.35),
(400000, "2014-01-01", "2014-03-31", 500, 0.00),
(1000000, "2014-01-01", "2014-03-31", 500, 0.00),
(1400000, "2014-01-01", "2014-03-31", 500, 0.00),
(1499999, "2014-01-01", "2014-03-31", 500, 0.00),
(1500000, "2014-01-01", "2014-03-31", 500, 0.00),
(200000, "2013-11-01", "2014-10-1", 500, 0.00),
(300000, "2013-11-01", "2014-10-1", 500, 5107.51),
(400000, "2013-11-01", "2014-10-1", 500, 4634.86),
(1000000, "2013-11-01", "2014-10-1", 500, 1787.52),
(1400000, "2013-11-01", "2014-10-1", 500, 0.00),
(1499999, "2013-11-01", "2014-10-1", 500, 0.00),
(1500000, "2013-11-01", "2014-10-1", 500, 0.00)
)
val jiraCalculationCATO1672Examples = Table(
("b37BasicProfitChargeable", "cp1AccountingPeriodStartDate", "cp2AccountingPeriodEndDate", "b38FrankedInvestment", "cato04MRR"),
(300000, "2006-10-02", "2007-10-01", 0, 0.00),
(300001, "2006-10-02", "2007-10-01", 0, 31487.65),
(400000, "2006-10-02", "2007-10-01", 0, 28863.70),
(1000000, "2006-10-02", "2007-10-01", 0, 13119.86),
(1400000, "2006-10-02", "2007-10-01", 0, 2623.97),
(1499999, "2006-10-02", "2007-10-01", 0, 0.03),
(1500000, "2006-10-02", "2007-10-01", 0, 0.00),
(300000, "2007-07-01", "2008-06-30", 0, 0.00),
(300001, "2007-07-01", "2008-06-30", 0, 27762.28),
(400000, "2007-07-01", "2008-06-30", 0, 25448.76),
(1000000, "2007-07-01", "2008-06-30", 0, 11567.62),
(1400000, "2007-07-01", "2008-06-30", 0, 2313.52),
(1499999, "2007-07-01", "2008-06-30", 0, 0.02),
(1500000, "2007-07-01", "2008-06-30", 0, 0.00),
(300000, "2009-07-01", "2010-06-30", 0, 0.00),
(300001, "2009-07-01", "2010-06-30", 0, 20999.98),
(400000, "2009-07-01", "2010-06-30", 0, 19250.00),
(1000000, "2009-07-01", "2010-06-30", 0, 8750.00),
(1400000, "2009-07-01", "2010-06-30", 0, 1750.00),
(1499999, "2009-07-01", "2010-06-30", 0, 0.02),
(1500000, "2009-07-01", "2010-06-30", 0, 0.00),
(300000, "2011-07-01", "2012-06-30", 0, 0.00),
(300001, "2011-07-01", "2012-06-30", 0, 16508.19),
(400000, "2011-07-01", "2012-06-30", 0, 15132.51),
(1000000, "2011-07-01", "2012-06-30", 0, 6878.42),
(1400000, "2011-07-01", "2012-06-30", 0, 1375.68),
(1499999, "2011-07-01", "2012-06-30", 0, 0.01),
(1500000, "2011-07-01", "2012-06-30", 0, 0.00),
(300000, "2013-07-01", "2014-06-30", 0, 0.00),
(300001, "2013-07-01", "2014-06-30", 0, 7504.10),
(400000, "2013-07-01", "2014-06-30", 0, 6878.77),
(1000000, "2013-07-01", "2014-06-30", 0, 3126.71),
(1400000, "2013-07-01", "2014-06-30", 0, 625.34),
(1499999, "2013-07-01", "2014-06-30", 0, 0.01),
(1500000, "2013-07-01", "2014-06-30", 0, 0.00)
)
val jiraCalculationCATO1670Examples = Table(
("b37BasicProfitChargeable", "cp1AccountingPeriodStartDate", "cp2AccountingPeriodEndDate", "b38FrankedInvestment", "b39AssociatedCompanies", "cato04MRR"),
(82475, "2013-01-01", "2013-12-31", 0, 3, 2374.26),
(82475, "2013-01-01", "2013-12-31", 0, 0, 0.00),
(82475, "2013-01-01", "2013-12-31", 0, 2, 0.00),
(282475, "2013-01-01", "2013-12-31", 0, 3, 750.97),
(282475, "2013-01-01", "2013-12-31", 0, 2, 1765.53),
(282475, "2013-01-01", "2013-12-31", 20000, 2, 1497.19),
(410000, "2014-04-01", "2015-03-31", 0, 0, 2725.00),
(221000, "2014-04-01", "2015-03-31", 0, 0, 0.00),
(30000, "2014-04-01", "2015-03-31", 50000, 2, 0.00),
(80000, "2014-04-01", "2015-03-31", 50000, 2, 569.23),
(100000, "2014-04-01", "2015-03-31", 410000, 2, 0.00),
(90000, "2014-04-01", "2015-03-31", 0, 3, 712.50),
(90000, "2014-04-01", "2015-03-31", 0, 2, 0.00),
(280000, "2014-04-01", "2015-03-31", 35000, 0, 2633.33),
(240000, "2014-04-01", "2015-03-31", 49999, 0, 0.00),
(221000, "2014-06-01", "2015-05-31", 0, 0, 0.00),
(30000, "2014-07-01", "2015-06-30", 50000, 2, 0.00),
(100000, "2014-09-01", "2015-08-31", 410000, 2, 0.00),
(90000, "2014-12-01", "2015-11-30", 0, 2, 0.00),
(240000, "2014-03-01", "2015-02-28", 49999, 0, 0.00),
(1650, "2014-04-01", "2014-04-02", 0, 0, 16.42),
(50135, "2015-03-01", "2015-04-30", 0, 0, 0.00),
(10000, "2014-05-01", "2014-06-30", 6250, 2, 0.00),
(294000, "2015-03-01", "2015-09-30", 0, 2, 0.00),
(44125, "2014-05-01", "2014-11-30", 0, 3, 439.35),
(225343, "2014-05-01", "2015-03-31", 49999, 0, 0.00)
)
val jiraCalculationCATO2238Examples = Table(
("b37BasicProfitChargeable", "cp1AccountingPeriodStartDate", "cp2AccountingPeriodEndDate", "b38FrankedInvestment", "b39AssociatedCompanies", "cato04MRR"),
(410000, "2014-04-01", "2015-03-31", 0, 0, 2725.00),
(1000000, "2014-09-01", "2015-08-31", 0, 0, 726.03),
(410000, "2014-05-01", "2015-04-30", 0, 0, 2501.02),
(80000, "2014-08-01", "2015-07-31", 50000, 2, 378.97),
(100000, "2015-04-01", "2016-03-31", 0, 0, 0.00)
)
"MarginalRateReliefCalculator" should {
"satisfy calculations example provided in jira CATO-1581" in new Calculator {
forAll(jiraCalculationCATO1581Examples) {
(b37Value: Int,
cp1Value: String,
cp2Value: String,
b38Value: Int,
cato04Value: Double) =>
calculateMRR(
b37 = B37(b37Value),
b38 = B38(Some(b38Value)),
b39 = B39(None),
accountingPeriod = HmrcAccountingPeriod(CP1(new LocalDate(cp1Value)), CP2(new LocalDate(cp2Value)))
) should be(CATO04(cato04Value))
}
}
"satisfy calculations example provided in jira CATO-1672 for years 2006 to 2014" in new Calculator {
forAll(jiraCalculationCATO1672Examples) {
(b37Value: Int,
cp1Value: String,
cp2Value: String,
b38Value: Int,
cato04Value: Double) =>
calculateMRR(
b37 = B37(b37Value),
b38 = B38(Some(b38Value)),
b39 = B39(None),
accountingPeriod = HmrcAccountingPeriod(CP1(new LocalDate(cp1Value)), CP2(new LocalDate(cp2Value)))
) should be(CATO04(cato04Value))
}
}
"satisfy calculations example provided in jira CATO-1670" in new Calculator {
forAll(jiraCalculationCATO1670Examples) {
(b37Value: Int,
cp1Value: String,
cp2Value: String,
b38Value: Int,
b39Value: Int,
cato04Value: Double) =>
calculateMRR(
b37 = B37(b37Value),
b38 = B38(Some(b38Value)),
b39 = B39(Option(b39Value)),
accountingPeriod = HmrcAccountingPeriod(CP1(new LocalDate(cp1Value)), CP2(new LocalDate(cp2Value)))
) should be(CATO04(cato04Value))
}
}
}
"satisfy calculations example provided in jira CATO-2238" in new Calculator {
forAll(jiraCalculationCATO2238Examples) {
(b37Value: Int,
cp1Value: String,
cp2Value: String,
b38Value: Int,
b39Value: Int,
cato04Value: Double) =>
calculateMRR(
b37 = B37(b37Value),
b38 = B38(Some(b38Value)),
b39 = B39(Option(b39Value)),
accountingPeriod = HmrcAccountingPeriod(CP1(new LocalDate(cp1Value)), CP2(new LocalDate(cp2Value)))
) should be(CATO04(cato04Value))
}
}
}
trait Calculator extends MarginalRateReliefCalculator with CorporationTaxCalculator {
def calculateMRR(b37: B37, b38: B38, b39: B39, accountingPeriod: HmrcAccountingPeriod): CATO04 = {
val b44 = calculateApportionedProfitsChargeableFy1(CorporationTaxCalculatorParameters(CP295(b37.value), accountingPeriod))
val b54 = calculateApportionedProfitsChargeableFy2(CorporationTaxCalculatorParameters(CP295(b37.value), accountingPeriod))
computeMarginalRateRelief(b37, b44 = b44, b54 = b54, b38 = b38, b39 = b39, accountingPeriod = accountingPeriod)
}
}
| ahudspith-equalexperts/ct-calculations | src/test/scala/uk/gov/hmrc/ct/ct600/v2/calculations/MarginalRateReliefCalculatorSpec.scala | Scala | apache-2.0 | 14,089 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.analysis.UnresolvedRelation
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.execution.{SparkPlan, SparkStrategy}
import org.apache.spark.sql.hive.execution.command.{CarbonDropDatabaseCommand, CarbonResetCommand, CarbonSetCommand}
import org.apache.carbondata.core.util.CarbonUtil
import org.apache.carbondata.spark.exception.MalformedCarbonCommandException
/**
* Carbon strategies for ddl commands
*/
class DDLStrategy(sparkSession: SparkSession) extends SparkStrategy {
def apply(plan: LogicalPlan): Seq[SparkPlan] = {
plan match {
case LoadDataCommand(identifier, path, isLocal, isOverwrite, partition)
if CarbonEnv.getInstance(sparkSession).carbonMetastore
.tableExists(identifier)(sparkSession) =>
ExecutedCommandExec(LoadTable(identifier.database, identifier.table.toLowerCase, path,
Seq(), Map(), isOverwrite)) :: Nil
case alter@AlterTableRenameCommand(oldTableIdentifier, newTableIdentifier, _) =>
val dbOption = oldTableIdentifier.database.map(_.toLowerCase)
val tableIdentifier = TableIdentifier(oldTableIdentifier.table.toLowerCase(), dbOption)
val isCarbonTable = CarbonEnv.getInstance(sparkSession).carbonMetastore
.tableExists(tableIdentifier)(
sparkSession)
if (isCarbonTable) {
val renameModel = AlterTableRenameModel(tableIdentifier, newTableIdentifier)
ExecutedCommandExec(AlterTableRenameTable(renameModel)) :: Nil
} else {
ExecutedCommandExec(alter) :: Nil
}
case DropTableCommand(identifier, ifNotExists, isView, _)
if CarbonEnv.getInstance(sparkSession).carbonMetastore
.isTablePathExists(identifier)(sparkSession) =>
ExecutedCommandExec(
CarbonDropTableCommand(ifNotExists, identifier.database,
identifier.table.toLowerCase)) :: Nil
case ShowLoadsCommand(databaseName, table, limit) =>
ExecutedCommandExec(ShowLoads(databaseName, table.toLowerCase, limit, plan.output)) :: Nil
case InsertIntoCarbonTable(relation: CarbonDatasourceHadoopRelation,
_, child: LogicalPlan, overwrite, _) =>
ExecutedCommandExec(LoadTableByInsert(relation, child, overwrite.enabled)) :: Nil
case createDb@CreateDatabaseCommand(dbName, ifNotExists, _, _, _) =>
CarbonUtil.createDatabaseDirectory(dbName, CarbonEnv.getInstance(sparkSession).storePath)
ExecutedCommandExec(createDb) :: Nil
case drop@DropDatabaseCommand(dbName, ifExists, isCascade) =>
ExecutedCommandExec(CarbonDropDatabaseCommand(drop)) :: Nil
case alterTable@AlterTableCompaction(altertablemodel) =>
val isCarbonTable = CarbonEnv.getInstance(sparkSession).carbonMetastore
.tableExists(TableIdentifier(altertablemodel.tableName,
altertablemodel.dbName))(sparkSession)
if (isCarbonTable) {
if (altertablemodel.compactionType.equalsIgnoreCase("minor") ||
altertablemodel.compactionType.equalsIgnoreCase("major")) {
ExecutedCommandExec(alterTable) :: Nil
} else {
throw new MalformedCarbonCommandException(
"Unsupported alter operation on carbon table")
}
} else {
throw new MalformedCarbonCommandException(
"Operation not allowed : " + altertablemodel.alterSql)
}
case dataTypeChange@AlterTableDataTypeChange(alterTableChangeDataTypeModel) =>
val isCarbonTable = CarbonEnv.getInstance(sparkSession).carbonMetastore
.tableExists(TableIdentifier(alterTableChangeDataTypeModel.tableName,
alterTableChangeDataTypeModel.databaseName))(sparkSession)
if (isCarbonTable) {
ExecutedCommandExec(dataTypeChange) :: Nil
} else {
throw new MalformedCarbonCommandException("Unsupported alter operation on hive table")
}
case addColumn@AlterTableAddColumns(alterTableAddColumnsModel) =>
val isCarbonTable = CarbonEnv.getInstance(sparkSession).carbonMetastore
.tableExists(TableIdentifier(alterTableAddColumnsModel.tableName,
alterTableAddColumnsModel.databaseName))(sparkSession)
if (isCarbonTable) {
ExecutedCommandExec(addColumn) :: Nil
} else {
throw new MalformedCarbonCommandException("Unsupported alter operation on hive table")
}
case dropColumn@AlterTableDropColumns(alterTableDropColumnModel) =>
val isCarbonTable = CarbonEnv.getInstance(sparkSession).carbonMetastore
.tableExists(TableIdentifier(alterTableDropColumnModel.tableName,
alterTableDropColumnModel.databaseName))(sparkSession)
if (isCarbonTable) {
ExecutedCommandExec(dropColumn) :: Nil
} else {
throw new MalformedCarbonCommandException("Unsupported alter operation on hive table")
}
case desc@DescribeTableCommand(identifier, partitionSpec, isExtended, isFormatted)
if CarbonEnv.getInstance(sparkSession).carbonMetastore
.tableExists(identifier)(sparkSession) && isFormatted =>
val resolvedTable =
sparkSession.sessionState.executePlan(UnresolvedRelation(identifier, None)).analyzed
val resultPlan = sparkSession.sessionState.executePlan(resolvedTable).executedPlan
ExecutedCommandExec(DescribeCommandFormatted(resultPlan, plan.output, identifier)) :: Nil
case ShowPartitionsCommand(t, cols) =>
val isCarbonTable = CarbonEnv.getInstance(sparkSession).carbonMetastore
.tableExists(t)(sparkSession)
if (isCarbonTable) {
ExecutedCommandExec(ShowCarbonPartitionsCommand(t)) :: Nil
} else {
ExecutedCommandExec(ShowPartitionsCommand(t, cols)) :: Nil
}
case set@SetCommand(kv) =>
ExecutedCommandExec(CarbonSetCommand(set)) :: Nil
case reset@ResetCommand =>
ExecutedCommandExec(CarbonResetCommand()) :: Nil
case org.apache.spark.sql.execution.datasources.CreateTable(tableDesc, mode, None)
if tableDesc.provider.get != DDLUtils.HIVE_PROVIDER
&& tableDesc.provider.get.equals("org.apache.spark.sql.CarbonSource") =>
val updatedCatalog =
CarbonSource.updateCatalogTableWithCarbonSchema(tableDesc, sparkSession)
val cmd =
CreateDataSourceTableCommand(updatedCatalog, ignoreIfExists = mode == SaveMode.Ignore)
ExecutedCommandExec(cmd) :: Nil
case _ => Nil
}
}
}
| aniketadnaik/carbondataStreamIngest | integration/spark2/src/main/scala/org/apache/spark/sql/execution/command/DDLStrategy.scala | Scala | apache-2.0 | 7,506 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.api
import play.api.libs.json.{Format, JsString}
sealed trait BankAccountDetailsStatus
case object ValidStatus extends BankAccountDetailsStatus
case object InvalidStatus extends BankAccountDetailsStatus
case object IndeterminateStatus extends BankAccountDetailsStatus
object BankAccountDetailsStatus {
val map: Map[BankAccountDetailsStatus, String] = Map(
ValidStatus -> "yes",
InvalidStatus -> "no",
IndeterminateStatus -> "indeterminate"
)
val inverseMap: Map[String, BankAccountDetailsStatus] = map.map(_.swap)
def fromString(value: String): BankAccountDetailsStatus = inverseMap(value)
def toJsString(value: BankAccountDetailsStatus): JsString = JsString(map(value))
implicit val format = Format[BankAccountDetailsStatus](_.validate[String] map fromString, toJsString)
} | hmrc/vat-registration-frontend | app/models/api/BankAccountDetailsStatus.scala | Scala | apache-2.0 | 1,424 |
/***
* Copyright 2018 Rackspace US, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.rackspace.com.papi.components.checker.step
import javax.xml.namespace.QName
import javax.servlet.FilterChain
import com.rackspace.com.papi.components.checker.step.base.Step
import com.rackspace.com.papi.components.checker.step.base.StepContext
import com.rackspace.com.papi.components.checker.step.results.Result
import com.rackspace.com.papi.components.checker.servlet.CheckerServletResponse
import com.rackspace.com.papi.components.checker.servlet.CheckerServletRequest
import com.rackspace.com.papi.components.checker.servlet.CheckerServletRequest.MAP_ROLES_HEADER
import com.rackspace.com.papi.components.checker.servlet.CheckerServletRequest.ROLES_HEADER
import com.rackspace.com.papi.components.checker.util.ImmutableNamespaceContext
import com.rackspace.com.papi.components.checker.util.HeaderMap
import com.rackspace.com.papi.components.checker.LogAssertions
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import com.fasterxml.jackson.databind.ObjectMapper
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TenantRoleStepSuite extends BaseStepSuite with LogAssertions {
val mapHeaderValue = b64Encode("""
{
"tenant1" : ["admin","foo","bar"],
"tenant2" : ["admin", "foo"],
"tenant3" : ["foo", "bar", "biz", "booz"],
"tenant4" : ["booga"]
}
""")
val CAPTURE_HEADER = "X-TENANT-HEADER"
val XSD_STRING = new QName("http://www.w3.org/2001/XMLSchema", "string", "xsd")
val initContext = new StepContext(0, (new HeaderMap).addHeaders(ROLES_HEADER, List("foo")))
val privateMapper = {
val om = new ObjectMapper
om.registerModule(DefaultScalaModule)
om
}
type ProcessStepType = (String /* Tenant Param Name */,
Boolean /* enable tenant */,
Option[List[String]] /* matchTenants */,
Option[Set[String]] /* matchRoles */,
Option[String] /* Capture Header */,
StepContext /* existing context */) => StepContext
type TenantRoleSteps = Map[String /*step name*/, ProcessStepType]
//
// These are functions of type ProcessStep that create a step and
// do a check based on parameters.
//
def xpathProcessStep(tenantName : String, enableTenant : Boolean,
matchTenants : Option[List[String]], matchRoles : Option[Set[String]],
captureHeader : Option[String],
context : StepContext) : StepContext = {
val nsContext = ImmutableNamespaceContext(Map("tst"->"http://test.org/test"))
val xpath = new XPath("XPath", "XPath", Some(tenantName), "/tst:tenants/tst:tenant[1]", None, None, nsContext, 20, captureHeader, enableTenant, 10, Array[Step]())
val xml = <tenants xmlns="http://test.org/test">
{
matchTenants match {
case Some(matches) => matches.map { t => <tenant>{t}</tenant> }
case None => <tenant/>
}
}
</tenants>
val req = request("PUT", "/a/b", "application/xml", xml, true, Map(MAP_ROLES_HEADER->List(mapHeaderValue)))
xpath.checkStep (req, response, chain, context).get
}
def jsonXPathProcessStep(tenantName : String, enableTenant : Boolean,
matchTenants : Option[List[String]], matchRoles : Option[Set[String]],
captureHeader : Option[String],
context : StepContext) : StepContext = {
val nsContext = ImmutableNamespaceContext(Map[String,String]())
val xpath = new JSONXPath("JSONXPath", "JSONXPath", Some(tenantName), "$_?tenants(1)", None, None, nsContext, 31, captureHeader, enableTenant, 10, Array[Step]())
val json : Map[String, List[String]] = Map[String, List[String]]( "tenants" -> { matchTenants match {
case Some(mts : List[String]) => mts
case None => List[String]("")
}})
val req = request("PUT", "/a/b", "application/json",privateMapper.writeValueAsString(json), true, Map(MAP_ROLES_HEADER->List(mapHeaderValue)))
xpath.checkStep (req, response, chain, context).get
}
def uriProcessStep(tenantName : String, enableTenant : Boolean,
matchTenants : Option[List[String]], matchRoles : Option[Set[String]],
captureHeader : Option[String],
context : StepContext) : StepContext = {
val uri = new URI("URI", "URI", Some(tenantName), ".*".r, captureHeader, enableTenant, Array[Step]())
//
// This step requires a single match tenant, No match and
// multi-match don't make sense in a URI param.
//
// So if you're here 'cus of a None.get or NoSuchElementException,
// you're using this function in the wrong test!
//
val uriMatch = matchTenants.get.head
uri.checkStep (request("GET", s"/$uriMatch/b","","", false, Map(MAP_ROLES_HEADER->List(mapHeaderValue))), response, chain, context).get
}
def uriXSDProcessStep(tenantName : String, enableTenant : Boolean,
matchTenants : Option[List[String]],
matchRoles : Option[Set[String]], captureHeader : Option[String],
context : StepContext) : StepContext = {
//
// Because of the way error-messaging works with URIXSD, we can't
// just call checkStep directly. We must call check and intercept
// the context from there. Kinda nasty, this got somewhat fixed
// with content error types, but not in URLXSD.
//
var retContext : Option[StepContext] = None
val capture = new Step("capture", "CaptureContext") {
override def check(req : CheckerServletRequest,
resp : CheckerServletResponse,
chain : FilterChain,
captureContext : StepContext) : Option[Result] = {
retContext = Some(captureContext)
None
}
}
val urixsd = new URIXSD("URIXSD", "URIXSD", Some(tenantName), XSD_STRING, xsdSchema, captureHeader, enableTenant, Array[Step](capture))
//
// This step requires a single match tenant, No match and
// multi-match don't make sense in a URI param.
//
// So if you're here 'cus of a None.get or NoSuchElementException,
// you're using this function in the wrong test!
//
val uriMatch = matchTenants.get.head
urixsd.check (request("GET", s"/$uriMatch/b","","", false, Map(MAP_ROLES_HEADER->List(mapHeaderValue))), response, chain, context)
retContext.get
}
def headerSingleProcessStep(tenantName : String, enableTenant : Boolean,
matchTenants : Option[List[String]],
matchRoles : Option[Set[String]], captureHeader : Option[String],
context : StepContext) : StepContext = {
val header = new HeaderSingle("HEADER_SINGLE", "Header Single", tenantName, ".*".r, None, None, captureHeader, enableTenant, 12345, Array[Step]())
//
// This step requires a single match tenant, multi-match does not
// make sense in a HeaderSingle step.
//
//
val headerMatch = matchTenants.get.head
header.checkStep (request("GET", s"/a/b","","", false, Map(MAP_ROLES_HEADER->List(mapHeaderValue), tenantName->List(headerMatch))),
response, chain, context).get
}
def headerXSDSingleProcessStep(tenantName : String, enableTenant : Boolean,
matchTenants : Option[List[String]], matchRoles : Option[Set[String]],
captureHeader : Option[String],
context : StepContext) : StepContext = {
val header = new HeaderXSDSingle("HEADER_SINGLE", "Header Single", tenantName, XSD_STRING, xsdSchema, None, None, captureHeader, enableTenant, 12345, Array[Step]())
//
// This step requires a single match tenant, multi-match does not
// make sense in a HeaderSingle step.
//
//
val headerMatch = matchTenants.get.head
header.checkStep (request("GET", s"/a/b","","", false, Map(MAP_ROLES_HEADER->List(mapHeaderValue), tenantName->List(headerMatch))),
response, chain, context).get
}
def headerProcessStep(tenantName : String, enableTenant : Boolean,
matchTenants : Option[List[String]], matchRoles : Option[Set[String]],
captureHeader : Option[String],
context : StepContext) : StepContext = {
val header = new Header("HEADER", "Header", tenantName, ".*".r, None, None, captureHeader, matchRoles, enableTenant, 12345, Array[Step]())
header.checkStep (request("GET", s"/a/b","","", false, Map(MAP_ROLES_HEADER->List(mapHeaderValue), tenantName->matchTenants.get)),
response, chain, context).get
}
def headerXSDProcessStep(tenantName : String, enableTenant : Boolean,
matchTenants : Option[List[String]], matchRoles : Option[Set[String]],
captureHeader : Option[String],
context : StepContext) : StepContext = {
val header = new HeaderXSD("HEADERXSD", "Header XSD", tenantName, XSD_STRING, xsdSchema, None, None, captureHeader, matchRoles, enableTenant, 12345, Array[Step]())
header.checkStep (request("GET", s"/a/b","","", false, Map(MAP_ROLES_HEADER->List(mapHeaderValue), tenantName->matchTenants.get)),
response, chain, context).get
}
def headerAnyProcessStep(tenantName : String, enableTenant : Boolean,
matchTenants : Option[List[String]], matchRoles : Option[Set[String]],
captureHeader : Option[String],
context : StepContext) : StepContext = {
val header = new HeaderAny("HEADER_ANY", "Header Any", tenantName, ".*".r, None, None, captureHeader, matchRoles, enableTenant, 12345, Array[Step]())
header.checkStep (request("GET", s"/a/b","","", false, Map(MAP_ROLES_HEADER->List(mapHeaderValue), tenantName->matchTenants.get)),
response, chain, context).get
}
def headerXSDAnyProcessStep(tenantName : String, enableTenant : Boolean,
matchTenants : Option[List[String]],
matchRoles : Option[Set[String]], captureHeader : Option[String],
context : StepContext) : StepContext = {
val testHeader = "X-TEST-HEADER"
val header = new HeaderXSDAny("HEADERXSD_Any", "HeaderXSD Any", tenantName, XSD_STRING, xsdSchema, None, None, captureHeader, matchRoles, enableTenant, 12345, Array[Step]())
header.checkStep (request("GET", s"/a/b","","", false, Map(MAP_ROLES_HEADER->List(mapHeaderValue), tenantName->matchTenants.get)),
response, chain, context).get
}
def headerAllProcessStep(tenantName : String, enableTenant : Boolean,
matchTenants : Option[List[String]], matchRoles : Option[Set[String]],
captureHeader : Option[String],
context : StepContext) : StepContext = {
val header = new HeaderAll("HEADER_ALL", "Header All", tenantName, None, None, Some(".*".r), None, None, captureHeader, matchRoles, enableTenant, 12345, Array[Step]())
header.checkStep (request("GET", s"/a/b","","", false, Map(MAP_ROLES_HEADER->List(mapHeaderValue), tenantName->matchTenants.get)),
response, chain, context).get
}
def captureHeaderProcessStep(tenantName : String, enableTenant : Boolean,
matchTenants : Option[List[String]], matchRoles : Option[Set[String]],
captureHeader : Option[String],
context : StepContext) : StepContext = {
val nsContext = ImmutableNamespaceContext(Map[String,String]())
val captureHeaderStep = new CaptureHeader("CaptureHeader", "Capture Header", tenantName, "$_?tenants?*", nsContext, 31, matchRoles, enableTenant, Array[Step]())
val json : Map[String, List[String]] = Map[String, List[String]]( "tenants" -> { matchTenants match {
case Some(mts : List[String]) => mts
case None => List[String]("")
}})
val req = request("PUT", "/a/b", "application/json",privateMapper.writeValueAsString(json), true, Map(MAP_ROLES_HEADER->List(mapHeaderValue)))
//
// Capture header is a weird case because the parameter name and
// the capture header name are always the same. We split these up
// to play nice with the test faramework.
//
val contextWithRoles = captureHeaderStep.checkStep (req, response, chain, context).get
captureHeader match {
case Some(header) =>
contextWithRoles.copy(requestHeaders = contextWithRoles.requestHeaders.addHeaders(header, contextWithRoles.requestHeaders(tenantName)))
case None => contextWithRoles
}
}
//
// Steps that can processes only a single tenant value.
//
val tenantRoleStepsSingle : TenantRoleSteps = Map(
"XPATH" -> xpathProcessStep,
"JSON_XPATH" -> jsonXPathProcessStep,
"URI" -> uriProcessStep,
"URIXSD" -> uriXSDProcessStep,
"HEADER_SINGLE" -> headerSingleProcessStep,
"HEADERXSD_SINGLE" -> headerXSDSingleProcessStep
)
//
// Steps that can process multiple tenant values
//
val tenantRoleStepsMulti : TenantRoleSteps = Map(
"HEADER" -> headerProcessStep,
"HEADERXSD" -> headerXSDProcessStep,
"HEADER_ANY" -> headerAnyProcessStep,
"HEADERXSD_ANY" -> headerXSDAnyProcessStep,
"HEADER_ALL" -> headerAllProcessStep,
"CAPTURE_HEADER" -> captureHeaderProcessStep
)
//
// These tests cover single tenant value, note that we also run
// these tests on steps that support multiple tenant values as well.
//
for ((stepName, processStep) <- tenantRoleStepsSingle ++ tenantRoleStepsMulti) {
test(s"If isTenant is enabled in a(n) $stepName step should set correct roles on a match") {
val tstContext = processStep("happyTenant",true, Some(List("tenant1")), None, None, initContext)
val tstContext2 = processStep("happyTenant",true, Some(List("tenant2")), None, None, initContext)
val tstContext3 = processStep("happyTenant",true, Some(List("tenant4")), None, None, initContext)
assert (tstContext.requestHeaders(ROLES_HEADER) == List("foo","admin/{happyTenant}", "foo/{happyTenant}", "bar/{happyTenant}"))
assert (!tstContext.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext2.requestHeaders(ROLES_HEADER) == List("foo","admin/{happyTenant}", "foo/{happyTenant}"))
assert (!tstContext2.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext3.requestHeaders(ROLES_HEADER) == List("foo","booga/{happyTenant}"))
assert (!tstContext3.requestHeaders.contains(CAPTURE_HEADER))
}
test(s"If isTenant is enabled in a(n) $stepName step should set correct roles on a match (capture header)") {
val tstContext = processStep("happyTenant",true, Some(List("tenant1")), None, Some(CAPTURE_HEADER), initContext)
val tstContext2 = processStep("happyTenant",true, Some(List("tenant2")), None, Some(CAPTURE_HEADER), initContext)
val tstContext3 = processStep("happyTenant",true, Some(List("tenant4")), None, Some(CAPTURE_HEADER), initContext)
assert (tstContext.requestHeaders(ROLES_HEADER) == List("foo","admin/{happyTenant}", "foo/{happyTenant}", "bar/{happyTenant}"))
assert (tstContext.requestHeaders(CAPTURE_HEADER) == List("tenant1"))
assert (tstContext2.requestHeaders(ROLES_HEADER) == List("foo","admin/{happyTenant}", "foo/{happyTenant}"))
assert (tstContext2.requestHeaders(CAPTURE_HEADER) == List("tenant2"))
assert (tstContext3.requestHeaders(ROLES_HEADER) == List("foo","booga/{happyTenant}"))
assert (tstContext3.requestHeaders(CAPTURE_HEADER) == List("tenant4"))
}
test(s"If isTenant is enabled in a(n) $stepName, but there is no tenant match there should be no change in the content") {
val tstContext = processStep("happyTenant", true, Some(List("t1")), None, None, initContext)
val tstContext2 = processStep("happyTenant", true, Some(List("t2")), None, None, initContext)
val tstContext3 = processStep("happyTenant", true, Some(List("t4")), None, None, initContext)
assert (tstContext.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext2.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext2.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext3.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext3.requestHeaders.contains(CAPTURE_HEADER))
}
test(s"If isTenant is enabled in a(n) $stepName, but there is no tenant match there should be no change in the content (capture header)") {
val tstContext = processStep("happyTenant", true, Some(List("t1")), None, Some(CAPTURE_HEADER), initContext)
val tstContext2 = processStep("happyTenant", true, Some(List("t2")), None, Some(CAPTURE_HEADER), initContext)
val tstContext3 = processStep("happyTenant", true, Some(List("t4")), None, Some(CAPTURE_HEADER), initContext)
assert (tstContext.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext.requestHeaders(CAPTURE_HEADER) == List("t1"))
assert (tstContext2.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext2.requestHeaders(CAPTURE_HEADER) == List("t2"))
assert (tstContext3.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext3.requestHeaders(CAPTURE_HEADER) == List("t4"))
}
test(s"If isTenant is disabled in a(n) $stepName there should be no change in the content") {
val tstContext = processStep("happyTenant",false, Some(List("tenant1")), None, None, initContext)
val tstContext2 = processStep("happyTenant",false, Some(List("tenant2")), None, None, initContext)
val tstContext3 = processStep("happyTenant",false, Some(List("tenant4")), None, None, initContext)
assert (tstContext.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext2.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext2.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext3.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext3.requestHeaders.contains(CAPTURE_HEADER))
}
test(s"If isTenant is disabled in a(n) $stepName there should be no change in the content (capture header)") {
val tstContext = processStep("happyTenant",false, Some(List("tenant1")), None, Some(CAPTURE_HEADER), initContext)
val tstContext2 = processStep("happyTenant",false, Some(List("tenant2")), None, Some(CAPTURE_HEADER), initContext)
val tstContext3 = processStep("happyTenant",false, Some(List("tenant4")), None, Some(CAPTURE_HEADER), initContext)
assert (tstContext.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext.requestHeaders(CAPTURE_HEADER) == List("tenant1"))
assert (tstContext2.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext2.requestHeaders(CAPTURE_HEADER) == List("tenant2"))
assert (tstContext3.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext3.requestHeaders(CAPTURE_HEADER) == List("tenant4"))
}
}
//
// These tests cover multi tenant value checks
//
for ((stepName, processStep) <- tenantRoleStepsMulti) {
test(s"If isTenant is enabled in a(n) $stepName step should set correct roles on a match (multi-tenant)") {
val tstContext = processStep("happyTenant",true, Some(List("tenant1", "tenant2", "tenant3")), Some(Set("foo/{happyTenant}")), None, initContext)
val tstContext2 = processStep("happyTenant",true, Some(List("tenant1", "tenant3")), Some(Set("bar/{happyTenant}","foo/{happyTenant}")), None, initContext)
val tstContext3 = processStep("happyTenant",true, Some(List("tenant1", "tenant2")), Some(Set("admin/{happyTenant}")), None, initContext)
assert ((Set[String]() ++ tstContext.requestHeaders(ROLES_HEADER)) == Set("foo", "foo/{happyTenant}"))
assert (!tstContext.requestHeaders.contains(CAPTURE_HEADER))
assert ((Set[String]() ++ tstContext2.requestHeaders(ROLES_HEADER)) == Set("foo","bar/{happyTenant}", "foo/{happyTenant}"))
assert (!tstContext2.requestHeaders.contains(CAPTURE_HEADER))
assert ((Set[String]() ++ tstContext3.requestHeaders(ROLES_HEADER)) == Set("foo","admin/{happyTenant}"))
assert (!tstContext3.requestHeaders.contains(CAPTURE_HEADER))
}
test(s"If isTenant is enabled in a(n) $stepName step should set correct roles on a match (multi-tenant, capture header)") {
val tstContext = processStep("happyTenant",true, Some(List("tenant1", "tenant2", "tenant3")), Some(Set("foo/{happyTenant}")), Some(CAPTURE_HEADER), initContext)
val tstContext2 = processStep("happyTenant",true, Some(List("tenant1", "tenant3")), Some(Set("bar/{happyTenant}","foo/{happyTenant}")), Some(CAPTURE_HEADER), initContext)
val tstContext3 = processStep("happyTenant",true, Some(List("tenant1", "tenant2")), Some(Set("admin/{happyTenant}")), Some(CAPTURE_HEADER), initContext)
assert ((Set[String]() ++ tstContext.requestHeaders(ROLES_HEADER)) == Set("foo", "foo/{happyTenant}"))
assert (tstContext.requestHeaders(CAPTURE_HEADER) == List("tenant1", "tenant2", "tenant3"))
assert ((Set[String]() ++ tstContext2.requestHeaders(ROLES_HEADER)) == Set("foo","bar/{happyTenant}", "foo/{happyTenant}"))
assert (tstContext2.requestHeaders(CAPTURE_HEADER) == List("tenant1", "tenant3"))
assert ((Set[String]() ++ tstContext3.requestHeaders(ROLES_HEADER)) == Set("foo","admin/{happyTenant}"))
assert (tstContext3.requestHeaders(CAPTURE_HEADER) == List("tenant1", "tenant2"))
}
test(s"If isTenant is enabled in a(n) $stepName, but there is no tenant match there should be no change in the content (multi-tenant)") {
val tstContext = processStep("happyTenant", true, Some(List("t1", "tenant1")), Some(Set("foo/{happyTenant}")), None, initContext)
val tstContext2 = processStep("happyTenant", true, Some(List("tenant1", "t2")), Some(Set("foo/{happyTenant}")), None, initContext)
val tstContext3 = processStep("happyTenant", true, Some(List("tenant3", "tenant1", "tenant4")), Some(Set("foo/{happyTenant}")), None, initContext)
val tstContext4 = processStep("happyTenant", true, Some(List("tenant4", "tenant2")), Some(Set("foo/{happyTenant}")), None, initContext)
assert (tstContext.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext2.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext2.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext3.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext3.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext4.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext4.requestHeaders.contains(CAPTURE_HEADER))
}
test(s"If isTenant is enabled in a(n) $stepName, but there is no tenant match there should be no change in the content (multi-tenant, capture header)") {
val tstContext = processStep("happyTenant", true, Some(List("t1", "tenant1")), Some(Set("foo/{happyTenant}")), Some(CAPTURE_HEADER), initContext)
val tstContext2 = processStep("happyTenant", true, Some(List("tenant1", "t2")), Some(Set("foo/{happyTenant}")), Some(CAPTURE_HEADER), initContext)
val tstContext3 = processStep("happyTenant", true, Some(List("tenant3", "tenant1", "tenant4")), Some(Set("foo/{happyTenant}")), Some(CAPTURE_HEADER), initContext)
val tstContext4 = processStep("happyTenant", true, Some(List("tenant4", "tenant2")), Some(Set("foo/{happyTenant}")), Some(CAPTURE_HEADER), initContext)
assert (tstContext.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext.requestHeaders(CAPTURE_HEADER) == List("t1", "tenant1"))
assert (tstContext2.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext2.requestHeaders(CAPTURE_HEADER) == List("tenant1", "t2"))
assert (tstContext3.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext3.requestHeaders(CAPTURE_HEADER) == List("tenant3", "tenant1", "tenant4"))
assert (tstContext4.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext4.requestHeaders(CAPTURE_HEADER) == List("tenant4", "tenant2"))
}
test(s"If isTenant is disabled in a(n) $stepName there should be no change in the content (multi-tenant)") {
val tstContext = processStep("happyTenant",false, Some(List("tenant1", "tenant2", "tenant3")), Some(Set("foo/{happyTenant}")), None, initContext)
val tstContext2 = processStep("happyTenant",false, Some(List("tenant1", "tenant3")), Some(Set("foo/{happyTenant}")), None, initContext)
val tstContext3 = processStep("happyTenant",false, Some(List("tenant1", "tenant2")), Some(Set("foo/{happyTenant}")), None, initContext)
assert (tstContext.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext2.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext2.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext3.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext3.requestHeaders.contains(CAPTURE_HEADER))
}
test(s"If isTenant is disabled in a(n) $stepName there should be no change in the content (multi-tenant, capture header)") {
val tstContext = processStep("happyTenant",false, Some(List("tenant1", "tenant2", "tenant3")), Some(Set("foo/{happyTenant}")), Some(CAPTURE_HEADER), initContext)
val tstContext2 = processStep("happyTenant",false, Some(List("tenant1", "tenant3")), Some(Set("foo/{happyTenant}")), Some(CAPTURE_HEADER), initContext)
val tstContext3 = processStep("happyTenant",false, Some(List("tenant1", "tenant2")), Some(Set("foo/{happyTenant}")), Some(CAPTURE_HEADER), initContext)
assert (tstContext.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext.requestHeaders(CAPTURE_HEADER) == List("tenant1", "tenant2", "tenant3"))
assert (tstContext2.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext2.requestHeaders(CAPTURE_HEADER) == List("tenant1", "tenant3"))
assert (tstContext3.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext3.requestHeaders(CAPTURE_HEADER) == List("tenant1", "tenant2"))
}
}
//
// XPath on XML has a weird properity where you can select a node
// that won't actually resolve to a string. This is specific to
// XML nodes, and doesn't affect JSON. We test the weird case here.
//
val tenantRoleXPathStep : TenantRoleSteps = Map(
"XPATH" -> xpathProcessStep
)
for ((stepName, processStep) <- tenantRoleXPathStep) {
test(s"If isTenant is enabled in a(n) $stepName, but no tenant is selected there should be no change in the content") {
val tstContext = processStep("happyTenant", true, None, None, None, initContext)
val tstContext2 = processStep("happyTenant", true, None, None, None, initContext)
val tstContext3 = processStep("happyTenant", true, None, None, None, initContext)
assert (tstContext.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext2.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext2.requestHeaders.contains(CAPTURE_HEADER))
assert (tstContext3.requestHeaders(ROLES_HEADER) == List("foo"))
assert (!tstContext3.requestHeaders.contains(CAPTURE_HEADER))
}
test(s"If isTenant is enabled in a(n) $stepName, but no tenant is selected there should be no change in the content (capture header)") {
val tstContext = processStep("happyTenant", true, None, None, Some(CAPTURE_HEADER), initContext)
val tstContext2 = processStep("happyTenant", true, None, None, Some(CAPTURE_HEADER), initContext)
val tstContext3 = processStep("happyTenant", true, None, None, Some(CAPTURE_HEADER), initContext)
assert (tstContext.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext.requestHeaders(CAPTURE_HEADER) == List(""))
assert (tstContext2.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext2.requestHeaders(CAPTURE_HEADER) == List(""))
assert (tstContext3.requestHeaders(ROLES_HEADER) == List("foo"))
assert (tstContext3.requestHeaders(CAPTURE_HEADER) == List(""))
}
}
}
| wdschei/api-checker | core/src/test/scala/com/rackspace/com/papi/components/checker/step/TenantRoleStepSuite.scala | Scala | apache-2.0 | 29,548 |
import sbt._
import com.twitter.sbt._
import sbt.Keys._
import sbtassembly.Plugin._
import AssemblyKeys._
import java.io.File
object Zipkin extends Build {
val CASSIE_VERSION = "0.23.0"
val FINAGLE_VERSION = "5.3.20"
val OSTRICH_VERSION = "8.2.9"
val UTIL_VERSION = "5.3.13"
val proxyRepo = Option(System.getenv("SBT_PROXY_REPO"))
val travisCi = Option(System.getenv("SBT_TRAVIS_CI")) // for adding travis ci maven repos before others
lazy val testDependencies = Seq(
"org.scala-tools.testing" % "specs_2.9.1" % "1.6.9" % "test",
"org.jmock" % "jmock" % "2.4.0" % "test",
"org.hamcrest" % "hamcrest-all" % "1.1" % "test",
"cglib" % "cglib" % "2.2.2" % "test",
"asm" % "asm" % "1.5.3" % "test",
"org.objenesis" % "objenesis" % "1.1" % "test"
)
def zipkinSettings = Seq(
organization := "com.twitter",
version := "1.0.1-SNAPSHOT",
crossPaths := false /* Removes Scala version from artifact name */
)
def defaultSettings = Project.defaultSettings ++ StandardProject.newSettings ++ TravisCiRepos.newSettings ++ zipkinSettings
lazy val zipkin =
Project(
id = "zipkin",
base = file(".")
) settings(
crossPaths := false
) aggregate(hadoop, hadoopjobrunner, test, thrift, queryCore, queryService, common, scrooge, collectorScribe, web, cassandra, collectorCore, collectorService, kafka)
lazy val hadoop = Project(
id = "zipkin-hadoop",
base = file("zipkin-hadoop"),
settings = defaultSettings ++ assemblySettings
).settings(
name := "zipkin-hadoop",
parallelExecution in Test := false,
libraryDependencies ++= Seq(
"com.twitter" % "scalding_2.9.1" % "0.5.3",
/*
FIXME ElephantBird 3.0.0 picks up libthrift 0.7.0, which is currently
incompatible with sbt-thrift so made these intransitive
*/
"com.twitter.elephantbird" % "elephant-bird-cascading2" % "3.0.0" intransitive(),
"com.twitter.elephantbird" % "elephant-bird-core" % "3.0.0" intransitive(),
"org.slf4j" % "slf4j-log4j12" % "1.6.4" % "runtime",
"com.google.protobuf" % "protobuf-java" % "2.3.0",
"org.apache.thrift" % "libthrift" % "0.5.0",
"cascading" % "cascading-hadoop" % "2.0.0-wip-288",
/* Test dependencies */
"org.scala-tools.testing" % "specs_2.9.1" % "1.6.9" % "test"
),
resolvers ++= (proxyRepo match {
case None => Seq(
"elephant-bird repo" at "http://oss.sonatype.org/content/repositories/comtwitter-286",
"Concurrent Maven Repo" at "http://conjars.org/repo")
case Some(pr) => Seq() // if proxy is set we assume that it has the artifacts we would get from the above repo
}),
mainClass in assembly := Some("com.twitter.scalding.Tool"),
ivyXML := // slim down the jar
<dependencies>
<exclude module="jms"/>
<exclude module="jmxri"/>
<exclude module="jmxtools"/>
<exclude org="com.sun.jdmk"/>
<exclude org="com.sun.jmx"/>
<exclude org="javax.jms"/>
<exclude org="org.mortbay.jetty"/>
</dependencies>,
mergeStrategy in assembly := {
case inf if inf.startsWith("META-INF/") || inf.startsWith("project.clj") => MergeStrategy.discard
case _ => MergeStrategy.deduplicate
}
).dependsOn(thrift)
lazy val hadoopjobrunner = Project(
id = "zipkin-hadoop-job-runner",
base = file("zipkin-hadoop-job-runner"),
settings = defaultSettings ++ assemblySettings
).settings(
name := "zipkin-hadoop-job-runner",
parallelExecution in Test := false,
libraryDependencies ++= Seq(
"org.slf4j" % "slf4j-log4j12" % "1.6.4" % "runtime",
"javax.mail" % "mail" % "1.4.3",
"com.github.spullara.mustache.java" % "compiler" % "0.8.2",
"com.twitter" % "util-core" % UTIL_VERSION,
"com.twitter" % "util-logging" % UTIL_VERSION,
/* Test dependencies */
"org.scala-tools.testing" % "specs_2.9.1" % "1.6.9" % "test"
),
mergeStrategy in assembly := {
case inf if inf.startsWith("META-INF/") || inf.startsWith("project.clj") => MergeStrategy.discard
case _ => MergeStrategy.deduplicate
}
).dependsOn(thrift)
lazy val test = Project(
id = "zipkin-test",
base = file("zipkin-test"),
settings = defaultSettings ++ CompileThrift.newSettings
).settings(
name := "zipkin-test",
libraryDependencies ++= testDependencies
) dependsOn(queryService, collectorService)
lazy val thrift =
Project(
id = "zipkin-thrift",
base = file("zipkin-thrift"),
settings = defaultSettings ++ SubversionPublisher.newSettings ++ CompileThrift.newSettings
).settings(
name := "zipkin-thrift",
libraryDependencies ++= Seq(
"org.apache.thrift" % "libthrift" % "0.5.0",
"org.slf4j" % "slf4j-api" % "1.5.8"
),
sources in (Compile, doc) ~= (_ filter (_.getName contains "src_managed"))
)
lazy val common =
Project(
id = "zipkin-common",
base = file("zipkin-common"),
settings = defaultSettings ++ SubversionPublisher.newSettings
).settings(
libraryDependencies ++= Seq(
"com.twitter" % "finagle-ostrich4" % FINAGLE_VERSION,
"com.twitter" % "finagle-thrift" % FINAGLE_VERSION,
"com.twitter" % "finagle-zipkin" % FINAGLE_VERSION,
"com.twitter" % "ostrich" % OSTRICH_VERSION,
"com.twitter" % "util-core" % UTIL_VERSION,
"com.twitter.common.zookeeper" % "client" % "0.0.6"
) ++ testDependencies
)
lazy val scrooge =
Project(
id = "zipkin-scrooge",
base = file("zipkin-scrooge"),
settings = defaultSettings ++ SubversionPublisher.newSettings ++ CompileThriftScrooge.newSettings
).settings(
libraryDependencies ++= Seq(
"com.twitter" % "finagle-ostrich4" % FINAGLE_VERSION,
"com.twitter" % "finagle-thrift" % FINAGLE_VERSION,
"com.twitter" % "finagle-zipkin" % FINAGLE_VERSION,
"com.twitter" % "ostrich" % OSTRICH_VERSION,
"com.twitter" % "util-core" % UTIL_VERSION,
/*
FIXME Scrooge 3.0.0 picks up libthrift 0.8.0, which is currently
incompatible with cassie 0.21.5 so made these intransitive
*/
"com.twitter" % "scrooge" % "3.0.1" intransitive(),
"com.twitter" % "scrooge-runtime_2.9.2" % "3.0.1" intransitive()
) ++ testDependencies,
CompileThriftScrooge.scroogeVersion := "3.0.1"
).dependsOn(common)
lazy val collectorCore = Project(
id = "zipkin-collector-core",
base = file("zipkin-collector-core"),
settings = defaultSettings ++ SubversionPublisher.newSettings
).settings(
libraryDependencies ++= Seq(
"com.twitter" % "finagle-ostrich4" % FINAGLE_VERSION,
"com.twitter" % "finagle-serversets"% FINAGLE_VERSION,
"com.twitter" % "finagle-thrift" % FINAGLE_VERSION,
"com.twitter" % "finagle-zipkin" % FINAGLE_VERSION,
"com.twitter" % "ostrich" % OSTRICH_VERSION,
"com.twitter" % "util-core" % UTIL_VERSION,
"com.twitter" % "util-zk" % UTIL_VERSION,
"com.twitter" % "util-zk-common" % UTIL_VERSION,
"com.twitter.common.zookeeper" % "candidate" % "0.0.9",
"com.twitter.common.zookeeper" % "group" % "0.0.9"
) ++ testDependencies
).dependsOn(common, scrooge)
lazy val cassandra = Project(
id = "zipkin-cassandra",
base = file("zipkin-cassandra"),
settings = defaultSettings ++ SubversionPublisher.newSettings
).settings(
libraryDependencies ++= Seq(
"com.twitter" % "cassie-core" % CASSIE_VERSION,
"com.twitter" % "cassie-serversets" % CASSIE_VERSION,
"com.twitter" % "util-logging" % UTIL_VERSION,
"org.iq80.snappy" % "snappy" % "0.1"
) ++ testDependencies,
/* Add configs to resource path for ConfigSpec */
unmanagedResourceDirectories in Test <<= baseDirectory {
base =>
(base / "config" +++ base / "src" / "test" / "resources").get
}
).dependsOn(scrooge)
lazy val queryCore =
Project(
id = "zipkin-query-core",
base = file("zipkin-query-core"),
settings = defaultSettings ++ SubversionPublisher.newSettings
).settings(
libraryDependencies ++= Seq(
"com.twitter" % "finagle-ostrich4" % FINAGLE_VERSION,
"com.twitter" % "finagle-serversets"% FINAGLE_VERSION,
"com.twitter" % "finagle-thrift" % FINAGLE_VERSION,
"com.twitter" % "finagle-zipkin" % FINAGLE_VERSION,
"com.twitter" % "ostrich" % OSTRICH_VERSION,
"com.twitter" % "util-core" % UTIL_VERSION,
"com.twitter" % "util-zk" % UTIL_VERSION,
"com.twitter" % "util-zk-common" % UTIL_VERSION,
"com.twitter.common.zookeeper" % "candidate" % "0.0.9",
"com.twitter.common.zookeeper" % "group" % "0.0.9"
) ++ testDependencies
).dependsOn(common, scrooge)
lazy val queryService = Project(
id = "zipkin-query-service",
base = file("zipkin-query-service"),
settings = defaultSettings ++ SubversionPublisher.newSettings
).settings(
libraryDependencies ++= testDependencies,
PackageDist.packageDistZipName := "zipkin-query-service.zip",
BuildProperties.buildPropertiesPackage := "com.twitter.zipkin",
/* Add configs to resource path for ConfigSpec */
unmanagedResourceDirectories in Test <<= baseDirectory {
base =>
(base / "config" +++ base / "src" / "test" / "resources").get
}
).dependsOn(queryCore, cassandra)
lazy val collectorScribe =
Project(
id = "zipkin-collector-scribe",
base = file("zipkin-collector-scribe"),
settings = defaultSettings ++ SubversionPublisher.newSettings
).settings(
libraryDependencies ++= testDependencies
).dependsOn(collectorCore, scrooge)
lazy val kafka =
Project(
id = "zipkin-kafka",
base = file("zipkin-kafka"),
settings = defaultSettings ++ SubversionPublisher.newSettings
).settings(
libraryDependencies ++= Seq(
"org.clojars.jasonjckn" % "kafka_2.9.1" % "0.7.0"
) ++ testDependencies,
resolvers ++= (proxyRepo match {
case None => Seq(
"clojars" at "http://clojars.org/repo")
case Some(pr) => Seq() // if proxy is set we assume that it has the artifacts we would get from the above repo
})
).dependsOn(collectorCore, scrooge)
lazy val collectorService = Project(
id = "zipkin-collector-service",
base = file("zipkin-collector-service"),
settings = defaultSettings ++ SubversionPublisher.newSettings
).settings(
libraryDependencies ++= testDependencies,
PackageDist.packageDistZipName := "zipkin-collector-service.zip",
BuildProperties.buildPropertiesPackage := "com.twitter.zipkin",
/* Add configs to resource path for ConfigSpec */
unmanagedResourceDirectories in Test <<= baseDirectory {
base =>
(base / "config" +++ base / "src" / "test" / "resources").get
}
).dependsOn(collectorCore, collectorScribe, cassandra, kafka)
lazy val web =
Project(
id = "zipkin-web",
base = file("zipkin-web"),
settings = defaultSettings
).settings(
resolvers += "finatra" at "http://repo.juliocapote.com",
resolvers += "codahale" at "http://repo.codahale.com",
libraryDependencies ++= Seq(
"com.twitter" % "finatra" % "0.2.4",
"com.twitter.common.zookeeper" % "server-set" % "1.0.7",
"com.twitter" % "finagle-serversets" % FINAGLE_VERSION,
"com.twitter" % "finagle-zipkin" % FINAGLE_VERSION
) ++ testDependencies,
PackageDist.packageDistZipName := "zipkin-web.zip",
BuildProperties.buildPropertiesPackage := "com.twitter.zipkin",
/* Add configs to resource path for ConfigSpec */
unmanagedResourceDirectories in Test <<= baseDirectory {
base =>
(base / "config" +++ base / "src" / "test" / "resources").get
}
).dependsOn(common, scrooge)
}
/*
* We build our project using Travis CI. In order for it to finish in the max run time,
* we need to use their local maven mirrors.
*/
object TravisCiRepos extends Plugin with Environmentalist {
val travisCiResolvers = SettingKey[Seq[Resolver]](
"travisci-central",
"Use these resolvers when building on travis-ci"
)
val localRepo = SettingKey[File](
"local-repo",
"local folder to use as a repo (and where publish-local publishes to)"
)
val newSettings = Seq(
travisCiResolvers := Seq(
"travisci-central" at "http://maven.travis-ci.org/nexus/content/repositories/central/",
"travisci-sonatype" at "http://maven.travis-ci.org/nexus/content/repositories/sonatype/"
),
// configure resolvers for the build
resolvers <<= (resolvers, travisCiResolvers) { (resolvers, travisCiResolvers) =>
if("true".equalsIgnoreCase(System.getenv("SBT_TRAVIS_CI"))) {
travisCiResolvers ++ resolvers
} else {
resolvers
}
},
// don't add any special resolvers.
externalResolvers <<= (resolvers) map identity
)
}
| julio/zipkin | project/Project.scala | Scala | apache-2.0 | 13,502 |
/*
* Copyright (c) 2016 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package org.apache.spark.sql.sources
/**
* A class for tracking the statistics of a set of numbers (count, mean and
* variance) in a numerically robust way. Includes support for merging two
* StatVarianceCounters.
*
* Taken from Spark's StatCounter implementation removing max and min.
*/
trait StatVarianceCounter extends Serializable {
// Running count of our values
final var count: Long = 0
// Running mean of our values
final var mean: Double = 0
// Running variance times count of our values
final private[sql] var nvariance: Double = 0
private[sql] final def initStats(count: Long, mean: Double,
nvariance: Double): Unit = {
this.count = count
this.mean = mean
this.nvariance = nvariance
}
/**
* Add a value into this StatVarianceCounter,
* updating the internal statistics.
*/
final def merge(value: Double) {
val delta = value - mean
count += 1
mean += delta / count
nvariance += delta * (value - mean)
}
/**
* Add multiple values into this StatVarianceCounter,
* updating the internal statistics.
*/
final def merge(values: TraversableOnce[Double]): Unit = values.foreach(merge)
/**
* Merge another StatVarianceCounter into this one,
* adding up the internal statistics.
*/
final def merge(other: StatVarianceCounter) {
if (other != this) {
mergeDistinctCounter(other)
}
else {
merge(other.copy()) // Avoid overwriting fields in a weird order
}
}
/**
* Merge another StatVarianceCounter into this one,
* adding up the internal statistics when other != this.
*/
protected def mergeDistinctCounter(other: StatVarianceCounter) {
if (count == 0) {
mean = other.mean
nvariance = other.nvariance
count = other.count
} else if (other.count != 0) {
val delta = other.mean - mean
if (other.count * 10 < count) {
mean = mean + (delta * other.count) / (count + other.count)
} else if (count * 10 < other.count) {
mean = other.mean - (delta * count) / (count + other.count)
} else {
mean = (mean * count + other.mean * other.count) /
(count + other.count)
}
nvariance += other.nvariance + (delta * delta * count * other.count) /
(count + other.count)
count += other.count
}
}
/** Clone this StatVarianceCounter */
def copy(): StatVarianceCounter
final def sum: Double = mean * count
/** Return the variance of the values. */
final def variance: Double = {
if (count != 0) {
nvariance / count
} else {
Double.NaN
}
}
/**
* Return the sample variance, which corrects for bias in estimating the variance by dividing
* by N-1 instead of N.
*/
final def sampleVariance: Double = {
if (count > 1) {
nvariance / (count - 1)
} else {
Double.NaN
}
}
/** Return the standard deviation of the values. */
final def stdev: Double = math.sqrt(variance)
/**
* Return the sample standard deviation of the values, which corrects for bias in estimating the
* variance by dividing by N-1 instead of N.
*/
final def sampleStdev: Double = math.sqrt(sampleVariance)
override def toString: String = {
"(count: %d, mean: %f, stdev: %f)".format(count, mean, stdev)
}
}
final class StatCounter extends StatVarianceCounter with Serializable {
/** Clone this StatCounter */
override def copy(): StatCounter = {
val other = new StatCounter
other.count = count
other.mean = mean
other.nvariance = nvariance
other
}
}
| vjr/snappydata | core/src/main/scala/org/apache/spark/sql/sources/StatVarianceCounter.scala | Scala | apache-2.0 | 4,242 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.io.{File, IOException}
import java.nio.file.Files
import java.util.Locale
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.apache.hadoop.fs.{FileStatus, Path, RawLocalFileSystem}
import org.apache.hadoop.mapreduce.JobContext
import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkHadoopUtil
import org.apache.spark.internal.io.FileCommitProtocol
import org.apache.spark.scheduler.{SparkListener, SparkListenerTaskEnd}
import org.apache.spark.sql.{AnalysisException, DataFrame}
import org.apache.spark.sql.execution.DataSourceScanExec
import org.apache.spark.sql.execution.datasources._
import org.apache.spark.sql.execution.datasources.v2.{BatchScanExec, DataSourceV2Relation, FileScan, FileTable}
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}
import org.apache.spark.util.Utils
abstract class FileStreamSinkSuite extends StreamTest {
import testImplicits._
override def beforeAll(): Unit = {
super.beforeAll()
spark.sessionState.conf.setConf(SQLConf.ORC_IMPLEMENTATION, "native")
}
override def afterAll(): Unit = {
try {
spark.sessionState.conf.unsetConf(SQLConf.ORC_IMPLEMENTATION)
} finally {
super.afterAll()
}
}
protected def checkQueryExecution(df: DataFrame): Unit
test("unpartitioned writing and batch reading") {
val inputData = MemoryStream[Int]
val df = inputData.toDF()
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
val checkpointDir = Utils.createTempDir(namePrefix = "stream.checkpoint").getCanonicalPath
var query: StreamingQuery = null
try {
query =
df.writeStream
.option("checkpointLocation", checkpointDir)
.format("parquet")
.start(outputDir)
inputData.addData(1, 2, 3)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val outputDf = spark.read.parquet(outputDir).as[Int]
checkDatasetUnorderly(outputDf, 1, 2, 3)
} finally {
if (query != null) {
query.stop()
}
}
}
test("SPARK-21167: encode and decode path correctly") {
val inputData = MemoryStream[String]
val ds = inputData.toDS()
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
val checkpointDir = Utils.createTempDir(namePrefix = "stream.checkpoint").getCanonicalPath
val query = ds.map(s => (s, s.length))
.toDF("value", "len")
.writeStream
.partitionBy("value")
.option("checkpointLocation", checkpointDir)
.format("parquet")
.start(outputDir)
try {
// The output is partitioned by "value", so the value will appear in the file path.
// This is to test if we handle spaces in the path correctly.
inputData.addData("hello world")
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val outputDf = spark.read.parquet(outputDir)
checkDatasetUnorderly(outputDf.as[(Int, String)], ("hello world".length, "hello world"))
} finally {
query.stop()
}
}
test("partitioned writing and batch reading") {
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
val checkpointDir = Utils.createTempDir(namePrefix = "stream.checkpoint").getCanonicalPath
var query: StreamingQuery = null
try {
query =
ds.map(i => (i, i * 1000))
.toDF("id", "value")
.writeStream
.partitionBy("id")
.option("checkpointLocation", checkpointDir)
.format("parquet")
.start(outputDir)
inputData.addData(1, 2, 3)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val outputDf = spark.read.parquet(outputDir)
val expectedSchema = new StructType()
.add(StructField("value", IntegerType, nullable = false))
.add(StructField("id", IntegerType))
assert(outputDf.schema === expectedSchema)
// Verify the data is correctly read
checkDatasetUnorderly(
outputDf.as[(Int, Int)],
(1000, 1), (2000, 2), (3000, 3))
checkQueryExecution(outputDf)
} finally {
if (query != null) {
query.stop()
}
}
}
test("partitioned writing and batch reading with 'basePath'") {
withTempDir { outputDir =>
withTempDir { checkpointDir =>
val outputPath = outputDir.getAbsolutePath
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
var query: StreamingQuery = null
try {
query =
ds.map(i => (i, -i, i * 1000))
.toDF("id1", "id2", "value")
.writeStream
.partitionBy("id1", "id2")
.option("checkpointLocation", checkpointDir.getAbsolutePath)
.format("parquet")
.start(outputPath)
inputData.addData(1, 2, 3)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
val readIn = spark.read.option("basePath", outputPath).parquet(s"$outputDir/*/*")
checkDatasetUnorderly(
readIn.as[(Int, Int, Int)],
(1000, 1, -1), (2000, 2, -2), (3000, 3, -3))
} finally {
if (query != null) {
query.stop()
}
}
}
}
}
// This tests whether FileStreamSink works with aggregations. Specifically, it tests
// whether the correct streaming QueryExecution (i.e. IncrementalExecution) is used to
// to execute the trigger for writing data to file sink. See SPARK-18440 for more details.
test("writing with aggregation") {
// Since FileStreamSink currently only supports append mode, we will test FileStreamSink
// with aggregations using event time windows and watermark, which allows
// aggregation + append mode.
val inputData = MemoryStream[Long]
val inputDF = inputData.toDF.toDF("time")
val outputDf = inputDF
.selectExpr("timestamp_seconds(time) AS timestamp")
.withWatermark("timestamp", "10 seconds")
.groupBy(window($"timestamp", "5 seconds"))
.count()
.select("window.start", "window.end", "count")
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
val checkpointDir = Utils.createTempDir(namePrefix = "stream.checkpoint").getCanonicalPath
var query: StreamingQuery = null
try {
query =
outputDf.writeStream
.option("checkpointLocation", checkpointDir)
.format("parquet")
.start(outputDir)
def addTimestamp(timestampInSecs: Int*): Unit = {
inputData.addData(timestampInSecs.map(_ * 1L): _*)
failAfter(streamingTimeout) {
query.processAllAvailable()
}
}
def check(expectedResult: ((Long, Long), Long)*): Unit = {
val outputDf = spark.read.parquet(outputDir)
.selectExpr(
"CAST(start as BIGINT) AS start",
"CAST(end as BIGINT) AS end",
"count")
.orderBy("start") // sort the DataFrame in order to compare with the expected one.
checkDataset(
outputDf.as[(Long, Long, Long)],
expectedResult.map(x => (x._1._1, x._1._2, x._2)): _*)
}
addTimestamp(100) // watermark = None before this, watermark = 100 - 10 = 90 after this
check() // nothing emitted yet
addTimestamp(104, 123) // watermark = 90 before this, watermark = 123 - 10 = 113 after this
check((100L, 105L) -> 2L) // no-data-batch emits results on 100-105,
addTimestamp(140) // wm = 113 before this, emit results on 100-105, wm = 130 after this
check((100L, 105L) -> 2L, (120L, 125L) -> 1L) // no-data-batch emits results on 120-125
} finally {
if (query != null) {
query.stop()
}
}
}
test("Update and Complete output mode not supported") {
val df = MemoryStream[Int].toDF().groupBy().count()
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
withTempDir { dir =>
def testOutputMode(mode: String): Unit = {
val e = intercept[AnalysisException] {
df.writeStream.format("parquet").outputMode(mode).start(dir.getCanonicalPath)
}
Seq(mode, "not support").foreach { w =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(w))
}
}
testOutputMode("update")
testOutputMode("complete")
}
}
test("parquet") {
testFormat(None) // should not throw error as default format parquet when not specified
testFormat(Some("parquet"))
}
test("orc") {
testFormat(Some("orc"))
}
test("text") {
testFormat(Some("text"))
}
test("json") {
testFormat(Some("json"))
}
def testFormat(format: Option[String]): Unit = {
val inputData = MemoryStream[Int]
val ds = inputData.toDS()
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
val checkpointDir = Utils.createTempDir(namePrefix = "stream.checkpoint").getCanonicalPath
var query: StreamingQuery = null
try {
val writer = ds.map(i => (i, i * 1000)).toDF("id", "value").writeStream
if (format.nonEmpty) {
writer.format(format.get)
}
query = writer.option("checkpointLocation", checkpointDir).start(outputDir)
} finally {
if (query != null) {
query.stop()
}
}
}
test("FileStreamSink.ancestorIsMetadataDirectory()") {
val hadoopConf = spark.sessionState.newHadoopConf()
def assertAncestorIsMetadataDirectory(path: String): Unit =
assert(FileStreamSink.ancestorIsMetadataDirectory(new Path(path), hadoopConf))
def assertAncestorIsNotMetadataDirectory(path: String): Unit =
assert(!FileStreamSink.ancestorIsMetadataDirectory(new Path(path), hadoopConf))
assertAncestorIsMetadataDirectory(s"/${FileStreamSink.metadataDir}")
assertAncestorIsMetadataDirectory(s"/${FileStreamSink.metadataDir}/")
assertAncestorIsMetadataDirectory(s"/a/${FileStreamSink.metadataDir}")
assertAncestorIsMetadataDirectory(s"/a/${FileStreamSink.metadataDir}/")
assertAncestorIsMetadataDirectory(s"/a/b/${FileStreamSink.metadataDir}/c")
assertAncestorIsMetadataDirectory(s"/a/b/${FileStreamSink.metadataDir}/c/")
assertAncestorIsNotMetadataDirectory(s"/a/b/c")
assertAncestorIsNotMetadataDirectory(s"/a/b/c/${FileStreamSink.metadataDir}extra")
}
test("SPARK-20460 Check name duplication in schema") {
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
val inputData = MemoryStream[(Int, Int)]
val df = inputData.toDF()
val outputDir = Utils.createTempDir(namePrefix = "stream.output").getCanonicalPath
val checkpointDir = Utils.createTempDir(namePrefix = "stream.checkpoint").getCanonicalPath
var query: StreamingQuery = null
try {
query =
df.writeStream
.option("checkpointLocation", checkpointDir)
.format("json")
.start(outputDir)
inputData.addData((1, 1))
failAfter(streamingTimeout) {
query.processAllAvailable()
}
} finally {
if (query != null) {
query.stop()
}
}
val errorMsg = intercept[AnalysisException] {
spark.read.schema(s"$c0 INT, $c1 INT").json(outputDir).as[(Int, Int)]
}.getMessage
assert(errorMsg.contains("Found duplicate column(s) in the data schema: "))
}
}
}
test("SPARK-23288 writing and checking output metrics") {
Seq("parquet", "orc", "text", "json").foreach { format =>
val inputData = MemoryStream[String]
val df = inputData.toDF()
withTempDir { outputDir =>
withTempDir { checkpointDir =>
var query: StreamingQuery = null
var numTasks = 0
var recordsWritten: Long = 0L
var bytesWritten: Long = 0L
try {
spark.sparkContext.addSparkListener(new SparkListener() {
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
val outputMetrics = taskEnd.taskMetrics.outputMetrics
recordsWritten += outputMetrics.recordsWritten
bytesWritten += outputMetrics.bytesWritten
numTasks += 1
}
})
query =
df.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format(format)
.start(outputDir.getCanonicalPath)
inputData.addData("1", "2", "3")
inputData.addData("4", "5")
failAfter(streamingTimeout) {
query.processAllAvailable()
}
spark.sparkContext.listenerBus.waitUntilEmpty(streamingTimeout.toMillis)
assert(numTasks > 0)
assert(recordsWritten === 5)
// This is heavily file type/version specific but should be filled
assert(bytesWritten > 0)
} finally {
if (query != null) {
query.stop()
}
}
}
}
}
}
test("special characters in output path") {
withTempDir { tempDir =>
val checkpointDir = new File(tempDir, "chk")
val outputDir = new File(tempDir, "output @#output")
val inputData = MemoryStream[Int]
inputData.addData(1, 2, 3)
val q = inputData.toDF()
.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("parquet")
.start(outputDir.getCanonicalPath)
try {
q.processAllAvailable()
} finally {
q.stop()
}
// The "_spark_metadata" directory should be in "outputDir"
assert(outputDir.listFiles.map(_.getName).contains(FileStreamSink.metadataDir))
val outputDf = spark.read.parquet(outputDir.getCanonicalPath).as[Int]
checkDatasetUnorderly(outputDf, 1, 2, 3)
}
}
testQuietly("cleanup incomplete output for aborted task") {
withTempDir { tempDir =>
val checkpointDir = new File(tempDir, "chk")
val outputDir = new File(tempDir, "output")
val inputData = MemoryStream[Int]
inputData.addData(1, 2, 3)
val q = inputData.toDS().map(_ / 0)
.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("parquet")
.start(outputDir.getCanonicalPath)
intercept[StreamingQueryException] {
try {
q.processAllAvailable()
} finally {
q.stop()
}
}
val outputFiles = Files.walk(outputDir.toPath).iterator().asScala
.filter(_.toString.endsWith(".parquet"))
assert(outputFiles.toList.isEmpty, "Incomplete files should be cleaned up.")
}
}
testQuietly("cleanup complete but invalid output for aborted job") {
withSQLConf(("spark.sql.streaming.commitProtocolClass",
classOf[PendingCommitFilesTrackingManifestFileCommitProtocol].getCanonicalName)) {
withTempDir { tempDir =>
val checkpointDir = new File(tempDir, "chk")
val outputDir = new File(tempDir, "output @#output")
val inputData = MemoryStream[Int]
inputData.addData(1, 2, 3, 4, 5, 6, 7, 8, 9, 10)
val q = inputData.toDS()
.repartition(10)
.map { value =>
// we intend task failure after some tasks succeeds
if (value == 5) {
// put some delay to let other task commits before this task fails
Thread.sleep(100)
value / 0
} else {
value
}
}
.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format("parquet")
.start(outputDir.getCanonicalPath)
intercept[StreamingQueryException] {
try {
q.processAllAvailable()
} finally {
q.stop()
}
}
import PendingCommitFilesTrackingManifestFileCommitProtocol._
val outputFileNames = Files.walk(outputDir.toPath).iterator().asScala
.filter(_.toString.endsWith(".parquet"))
.map(_.getFileName.toString)
.toSet
val trackingFileNames = tracking.map(new Path(_).getName).toSet
// there would be possible to have race condition:
// - some tasks complete while abortJob is being called
// we can't delete complete files for these tasks (it's OK since this is a best effort)
assert(outputFileNames.intersect(trackingFileNames).isEmpty,
"abortJob should clean up files reported as successful.")
}
}
}
test("Handle FileStreamSink metadata correctly for empty partition") {
Seq("parquet", "orc", "text", "json").foreach { format =>
val inputData = MemoryStream[String]
val df = inputData.toDF()
withTempDir { outputDir =>
withTempDir { checkpointDir =>
var query: StreamingQuery = null
try {
// repartition to more than the input to leave empty partitions
query =
df.repartition(10)
.writeStream
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.format(format)
.start(outputDir.getCanonicalPath)
inputData.addData("1", "2", "3")
inputData.addData("4", "5")
failAfter(streamingTimeout) {
query.processAllAvailable()
}
} finally {
if (query != null) {
query.stop()
}
}
val outputDirPath = new Path(outputDir.getCanonicalPath)
val hadoopConf = spark.sessionState.newHadoopConf()
val fs = outputDirPath.getFileSystem(hadoopConf)
val logPath = FileStreamSink.getMetadataLogPath(fs, outputDirPath, conf)
val sinkLog = new FileStreamSinkLog(FileStreamSinkLog.VERSION, spark, logPath.toString)
val allFiles = sinkLog.allFiles()
// only files from non-empty partition should be logged
assert(allFiles.length < 10)
assert(allFiles.forall(file => fs.exists(new Path(file.path))))
// the query should be able to read all rows correctly with metadata log
val outputDf = spark.read.format(format).load(outputDir.getCanonicalPath)
.selectExpr("CAST(value AS INT)").as[Int]
checkDatasetUnorderly(outputDf, 1, 2, 3, 4, 5)
}
}
}
}
test("formatCheck fail should not fail the query") {
withSQLConf(
"fs.file.impl" -> classOf[FailFormatCheckFileSystem].getName,
"fs.file.impl.disable.cache" -> "true") {
withTempDir { tempDir =>
val path = new File(tempDir, "text").getCanonicalPath
Seq("foo").toDF.write.format("text").save(path)
spark.read.format("text").load(path)
}
}
}
test("fail to check glob path should not fail the query") {
withSQLConf(
"fs.file.impl" -> classOf[FailFormatCheckFileSystem].getName,
"fs.file.impl.disable.cache" -> "true") {
withTempDir { tempDir =>
val path = new File(tempDir, "text").getCanonicalPath
Seq("foo").toDF.write.format("text").save(path)
spark.read.format("text").load(path + "/*")
}
}
}
}
object PendingCommitFilesTrackingManifestFileCommitProtocol {
val tracking: ArrayBuffer[String] = new ArrayBuffer[String]()
def cleanPendingCommitFiles(): Unit = tracking.clear()
def addPendingCommitFiles(paths: Seq[String]): Unit = tracking ++= paths
}
class PendingCommitFilesTrackingManifestFileCommitProtocol(jobId: String, path: String)
extends ManifestFileCommitProtocol(jobId, path) {
import PendingCommitFilesTrackingManifestFileCommitProtocol._
override def setupJob(jobContext: JobContext): Unit = {
super.setupJob(jobContext)
cleanPendingCommitFiles()
}
override def onTaskCommit(taskCommit: FileCommitProtocol.TaskCommitMessage): Unit = {
super.onTaskCommit(taskCommit)
addPendingCommitFiles(taskCommit.obj.asInstanceOf[Seq[SinkFileStatus]].map(_.path))
}
}
class FileStreamSinkV1Suite extends FileStreamSinkSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "csv,json,orc,text,parquet")
override def checkQueryExecution(df: DataFrame): Unit = {
// Verify that MetadataLogFileIndex is being used and the correct partitioning schema has
// been inferred
val hadoopdFsRelations = df.queryExecution.analyzed.collect {
case LogicalRelation(baseRelation: HadoopFsRelation, _, _, _) => baseRelation
}
assert(hadoopdFsRelations.size === 1)
assert(hadoopdFsRelations.head.location.isInstanceOf[MetadataLogFileIndex])
assert(hadoopdFsRelations.head.partitionSchema.exists(_.name == "id"))
assert(hadoopdFsRelations.head.dataSchema.exists(_.name == "value"))
/** Check some condition on the partitions of the FileScanRDD generated by a DF */
def checkFileScanPartitions(df: DataFrame)(func: Seq[FilePartition] => Unit): Unit = {
val getFileScanRDD = df.queryExecution.executedPlan.collect {
case scan: DataSourceScanExec if scan.inputRDDs().head.isInstanceOf[FileScanRDD] =>
scan.inputRDDs().head.asInstanceOf[FileScanRDD]
}.headOption.getOrElse {
fail(s"No FileScan in query\\n${df.queryExecution}")
}
func(getFileScanRDD.filePartitions)
}
// Read without pruning
checkFileScanPartitions(df) { partitions =>
// There should be as many distinct partition values as there are distinct ids
assert(partitions.flatMap(_.files.map(_.partitionValues)).distinct.size === 3)
}
// Read with pruning, should read only files in partition dir id=1
checkFileScanPartitions(df.filter("id = 1")) { partitions =>
val filesToBeRead = partitions.flatMap(_.files)
assert(filesToBeRead.map(_.filePath).forall(_.contains("/id=1/")))
assert(filesToBeRead.map(_.partitionValues).distinct.size === 1)
}
// Read with pruning, should read only files in partition dir id=1 and id=2
checkFileScanPartitions(df.filter("id in (1,2)")) { partitions =>
val filesToBeRead = partitions.flatMap(_.files)
assert(!filesToBeRead.map(_.filePath).exists(_.contains("/id=3/")))
assert(filesToBeRead.map(_.partitionValues).distinct.size === 2)
}
}
}
class FileStreamSinkV2Suite extends FileStreamSinkSuite {
override protected def sparkConf: SparkConf =
super
.sparkConf
.set(SQLConf.USE_V1_SOURCE_LIST, "")
override def checkQueryExecution(df: DataFrame): Unit = {
// Verify that MetadataLogFileIndex is being used and the correct partitioning schema has
// been inferred
val table = df.queryExecution.analyzed.collect {
case DataSourceV2Relation(table: FileTable, _, _, _, _) => table
}
assert(table.size === 1)
assert(table.head.fileIndex.isInstanceOf[MetadataLogFileIndex])
assert(table.head.fileIndex.partitionSchema.exists(_.name == "id"))
assert(table.head.dataSchema.exists(_.name == "value"))
/** Check some condition on the partitions of the FileScanRDD generated by a DF */
def checkFileScanPartitions(df: DataFrame)(func: Seq[FilePartition] => Unit): Unit = {
val fileScan = df.queryExecution.executedPlan.collect {
case batch: BatchScanExec if batch.scan.isInstanceOf[FileScan] =>
batch.scan.asInstanceOf[FileScan]
}.headOption.getOrElse {
fail(s"No FileScan in query\\n${df.queryExecution}")
}
func(fileScan.planInputPartitions().map(_.asInstanceOf[FilePartition]))
}
// Read without pruning
checkFileScanPartitions(df) { partitions =>
// There should be as many distinct partition values as there are distinct ids
assert(partitions.flatMap(_.files.map(_.partitionValues)).distinct.size === 3)
}
// TODO: test partition pruning when file source V2 supports it.
}
}
/**
* A special file system that fails when accessing metadata log directory or using a glob path to
* access.
*/
class FailFormatCheckFileSystem extends RawLocalFileSystem {
override def getFileStatus(f: Path): FileStatus = {
if (f.getName == FileStreamSink.metadataDir) {
throw new IOException("cannot access metadata log")
}
if (SparkHadoopUtil.get.isGlobPath(f)) {
throw new IOException("fail to access a glob path")
}
super.getFileStatus(f)
}
}
| cloud-fan/spark | sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSinkSuite.scala | Scala | apache-2.0 | 25,831 |
/**
* Defines which nodes are considered as neighbours
*/
package uk.ac.cdrc.mintsearch.graph
import org.neo4j.graphdb.RelationshipType
import org.neo4j.graphdb.traversal.{Evaluators, TraversalDescription, Uniqueness}
import org.neo4j.kernel.impl.traversal.MonoDirectionalTraversalDescription
trait TraversalStrategy {
val traversalDescription: TraversalDescription
}
trait NeighbourhoodByRadiusAndRelationships extends TraversalStrategy {
val radius: Int
val relTypes: Seq[String]
override lazy val traversalDescription: TraversalDescription =
relTypes
.foldLeft(
new MonoDirectionalTraversalDescription(): TraversalDescription
)(
(td, rType) => td.relationships(RelationshipType.withName(rType))
)
.uniqueness(Uniqueness.NODE_GLOBAL)
.evaluator(Evaluators.toDepth(radius))
override def toString: String = s"td$radius"
}
trait NeighbourhoodByRadius extends TraversalStrategy {
val radius: Int
override lazy val traversalDescription: TraversalDescription = new MonoDirectionalTraversalDescription()
.uniqueness(Uniqueness.NODE_GLOBAL)
.evaluator(Evaluators.toDepth(radius))
override def toString: String = s"td$radius"
}
object TraversalStrategy {
}
| spacelis/mint-search | neo4j-plugin/src/main/scala/uk/ac/cdrc/mintsearch/graph/TraversalStrategy.scala | Scala | apache-2.0 | 1,236 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.eval
package internal
import monix.execution.Callback
import monix.eval.Task.Context
import monix.execution.Scheduler
import scala.util.control.NonFatal
private[eval] object TaskDeferAction {
/** Implementation for `Task.deferAction`. */
def apply[A](f: Scheduler => Task[A]): Task[A] = {
val start = (context: Context, callback: Callback[Throwable, A]) => {
implicit val ec = context.scheduler
var streamErrors = true
try {
val fa = f(ec)
streamErrors = false
Task.unsafeStartNow(fa, context, callback)
} catch {
case ex if NonFatal(ex) =>
if (streamErrors)
callback.onError(ex)
else {
// $COVERAGE-OFF$
ec.reportFailure(ex)
// $COVERAGE-ON$
}
}
}
Task.Async(
start,
trampolineBefore = true,
trampolineAfter = true,
restoreLocals = false
)
}
}
| alexandru/monifu | monix-eval/shared/src/main/scala/monix/eval/internal/TaskDeferAction.scala | Scala | apache-2.0 | 1,624 |
package monocle.function
import monocle.function.fields._
import monocle.{Iso, Lens}
import scala.annotation.implicitNotFound
/**
* Typeclass that defines an [[Iso]] between an `S` and its head `H` and tail `T`
* [[Cons1]] is like [[Cons]] but for types that have *always* an head and tail, e.g. a non empty list
* @tparam S source of [[Iso]]
* @tparam H head of [[Iso]] target, `A` is supposed to be unique for a given `S`
* @tparam T tail of [[Iso]] target, `T` is supposed to be unique for a given `S`
*/
@implicitNotFound("Could not find an instance of Cons1[${S}, ${H}, ${T}], please check Monocle instance location policy to " +
"find out which import is necessary")
abstract class Cons1[S, H, T] extends Serializable {
def cons1: Iso[S, (H, T)]
def head: Lens[S, H] = cons1 composeLens first
def tail: Lens[S, T] = cons1 composeLens second
}
trait Cons1Functions {
final def cons1[S, H, T](implicit ev: Cons1[S, H, T]): Iso[S, (H, T)] = ev.cons1
final def head[S, H, T](implicit ev: Cons1[S, H, T]): Lens[S, H] = ev.head
final def tail[S, H, T](implicit ev: Cons1[S, H, T]): Lens[S, T] = ev.tail
/** append an element to the head */
final def _cons1[S, H, T](head: H, tail: T)(implicit ev: Cons1[S, H, T]): S =
ev.cons1.reverseGet((head, tail))
/** deconstruct an S between its head and tail */
final def _uncons1[S, H, T](s: S)(implicit ev: Cons1[S, H, T]): (H, T) =
ev.cons1.get(s)
}
object Cons1 extends Cons1Functions {
/** lift an instance of [[Cons1]] using an [[Iso]] */
def fromIso[S, A, H, T](iso: Iso[S, A])(implicit ev: Cons1[A, H, T]): Cons1[S, H, T] = new Cons1[S, H, T] {
val cons1: Iso[S, (H, T)] =
iso composeIso ev.cons1
}
/************************************************************************************************/
/** Std instances */
/************************************************************************************************/
implicit def tuple2Cons1[A1, A2]: Cons1[(A1, A2), A1, A2] = new Cons1[(A1, A2), A1, A2] {
val cons1 = Iso[(A1, A2), (A1, A2)](identity)(identity)
}
implicit def tuple3Cons1[A1, A2, A3]: Cons1[(A1, A2, A3), A1, (A2, A3)] = new Cons1[(A1, A2, A3), A1, (A2, A3)] {
val cons1 = Iso[(A1, A2, A3), (A1, (A2, A3))](t => (t._1, (t._2, t._3))){ case (h, t) => (h, t._1, t._2) }
}
implicit def tuple4Cons1[A1, A2, A3, A4]: Cons1[(A1, A2, A3, A4), A1, (A2, A3, A4)] = new Cons1[(A1, A2, A3, A4), A1, (A2, A3, A4)]{
val cons1 = Iso[(A1, A2, A3, A4), (A1, (A2, A3, A4))](t => (t._1, (t._2, t._3, t._4))){ case (h, t) => (h, t._1, t._2, t._3) }
}
implicit def tuple5Cons1[A1, A2, A3, A4, A5]: Cons1[(A1, A2, A3, A4, A5), A1, (A2, A3, A4, A5)] = new Cons1[(A1, A2, A3, A4, A5), A1, (A2, A3, A4, A5)]{
val cons1 = Iso[(A1, A2, A3, A4, A5), (A1, (A2, A3, A4, A5))](t => (t._1, (t._2, t._3, t._4, t._5))){ case (h, t) => (h, t._1, t._2, t._3, t._4) }
}
implicit def tuple5Snoc1[A1, A2, A3, A4, A5]: Snoc1[(A1, A2, A3, A4, A5), (A1, A2, A3, A4), A5] = new Snoc1[(A1, A2, A3, A4, A5), (A1, A2, A3, A4), A5]{
def snoc1 = Iso[(A1, A2, A3, A4, A5), ((A1, A2, A3, A4), A5)](t => ((t._1, t._2, t._3, t._4), t._5)){ case (i, l) => (i._1, i._2, i._3, i._4, l) }
}
implicit def tuple6Cons1[A1, A2, A3, A4, A5, A6]: Cons1[(A1, A2, A3, A4, A5, A6), A1, (A2, A3, A4, A5, A6)] = new Cons1[(A1, A2, A3, A4, A5, A6), A1, (A2, A3, A4, A5, A6)]{
val cons1 = Iso[(A1, A2, A3, A4, A5, A6), (A1, (A2, A3, A4, A5, A6))](t => (t._1, (t._2, t._3, t._4, t._5, t._6))){ case (h, t) => (h, t._1, t._2, t._3, t._4, t._5) }
}
/************************************************************************************************/
/** Scalaz instances */
/************************************************************************************************/
import scalaz.{Cofree, IList, NonEmptyList, OneAnd}
implicit def cofreeCons1[S[_], A]: Cons1[Cofree[S, A], A, S[Cofree[S, A]]] =
new Cons1[Cofree[S, A], A, S[Cofree[S, A]]] {
val cons1: Iso[Cofree[S, A], (A, S[Cofree[S, A]])] =
Iso((c: Cofree[S, A]) => (c.head, c.tail)){ case (h, t) => Cofree(h, t) }
/** Overridden to prevent forcing evaluation of the `tail` when we're only
* interested in using the `head` */
override def head: Lens[Cofree[S, A], A] =
Lens((c: Cofree[S, A]) => c.head)(h => c => Cofree.delay(h, c.tail))
}
implicit def nelCons1[A]: Cons1[NonEmptyList[A], A, IList[A]] =
new Cons1[NonEmptyList[A],A,IList[A]]{
val cons1: Iso[NonEmptyList[A], (A, IList[A])] =
Iso((nel: NonEmptyList[A]) => (nel.head,nel.tail)){case (h,t) => NonEmptyList.nel(h, t)}
}
implicit def oneAndCons1[T[_], A]: Cons1[OneAnd[T, A], A, T[A]] = new Cons1[OneAnd[T, A], A, T[A]] {
val cons1 = Iso[OneAnd[T, A], (A, T[A])](o => (o.head, o.tail)){ case (h, t) => OneAnd(h, t)}
}
} | rperry/Monocle | core/shared/src/main/scala/monocle/function/Cons1.scala | Scala | mit | 5,003 |
package io.iohk.ethereum.jsonrpc
import akka.actor.ActorSystem
import akka.testkit.{TestKit, TestProbe}
import akka.util.ByteString
import io.iohk.ethereum.Mocks.MockValidatorsAlwaysSucceed
import io.iohk.ethereum.blockchain.sync.SyncProtocol.Status.Progress
import io.iohk.ethereum.blockchain.sync.{EphemBlockchainTestSetup, SyncProtocol}
import io.iohk.ethereum.consensus._
import io.iohk.ethereum.consensus.blocks.{PendingBlock, PendingBlockAndState}
import io.iohk.ethereum.consensus.ethash.blocks.{EthashBlockGenerator, RestrictedEthashBlockGeneratorImpl}
import io.iohk.ethereum.consensus.ethash.difficulty.EthashDifficultyCalculator
import io.iohk.ethereum.crypto.{ECDSASignature, kec256}
import io.iohk.ethereum.db.storage.AppStateStorage
import io.iohk.ethereum.domain.BlockHeader.getEncodedWithoutNonce
import io.iohk.ethereum.domain.{Address, Block, BlockHeader, BlockchainImpl, UInt256, _}
import io.iohk.ethereum.jsonrpc.EthService.{ProtocolVersionRequest, _}
import io.iohk.ethereum.jsonrpc.FilterManager.TxLog
import io.iohk.ethereum.jsonrpc.server.controllers.JsonRpcBaseController.JsonRpcConfig
import io.iohk.ethereum.keystore.KeyStore
import io.iohk.ethereum.ledger.Ledger.TxResult
import io.iohk.ethereum.ledger.{Ledger, StxLedger}
import io.iohk.ethereum.mpt.{ByteArrayEncoder, ByteArraySerializable, MerklePatriciaTrie}
import io.iohk.ethereum.nodebuilder.ApisBuilder
import io.iohk.ethereum.ommers.OmmersPool
import io.iohk.ethereum.testing.ActorsTesting.simpleAutoPilot
import io.iohk.ethereum.transactions.PendingTransactionsManager
import io.iohk.ethereum.transactions.PendingTransactionsManager.{
GetPendingTransactions,
PendingTransaction,
PendingTransactionsResponse
}
import io.iohk.ethereum.utils._
import io.iohk.ethereum._
import monix.execution.Scheduler.Implicits.global
import org.bouncycastle.util.encoders.Hex
import org.scalactic.TypeCheckedTripleEquals
import org.scalamock.scalatest.MockFactory
import org.scalatest.OptionValues
import org.scalatest.concurrent.ScalaFutures
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers
import scala.concurrent.duration.{Duration, DurationInt, FiniteDuration}
// scalastyle:off file.size.limit
class EthServiceSpec
extends TestKit(ActorSystem("EthServiceSpec_ActorSystem"))
with AnyFlatSpecLike
with WithActorSystemShutDown
with Matchers
with ScalaFutures
with OptionValues
with MockFactory
with NormalPatience
with TypeCheckedTripleEquals {
"EthService" should "answer eth_blockNumber with the latest block number" in new TestSetup {
val bestBlockNumber = 10
blockchain.saveBestKnownBlocks(bestBlockNumber)
val response = ethService.bestBlockNumber(BestBlockNumberRequest()).runSyncUnsafe(Duration.Inf).right.get
response.bestBlockNumber shouldEqual bestBlockNumber
}
it should "return ethereum protocol version" in new TestSetup {
val response = ethService.protocolVersion(ProtocolVersionRequest()).runSyncUnsafe()
val protocolVersion = response.right.get.value
Integer.parseInt(protocolVersion.drop(2), 16) shouldEqual currentProtocolVersion
}
it should "return configured chain id" in new TestSetup {
val response = ethService.chainId(ChainIdRequest()).runSyncUnsafe().right.get
assert(response === ChainIdResponse(blockchainConfig.chainId))
}
it should "answer eth_getBlockTransactionCountByHash with None when the requested block isn't in the blockchain" in new TestSetup {
val request = TxCountByBlockHashRequest(blockToRequestHash)
val response = ethService.getBlockTransactionCountByHash(request).runSyncUnsafe(Duration.Inf).right.get
response.txsQuantity shouldBe None
}
it should "answer eth_getBlockTransactionCountByHash with the block has no tx when the requested block is in the blockchain and has no tx" in new TestSetup {
blockchain.storeBlock(blockToRequest.copy(body = BlockBody(Nil, Nil))).commit()
val request = TxCountByBlockHashRequest(blockToRequestHash)
val response = ethService.getBlockTransactionCountByHash(request).runSyncUnsafe(Duration.Inf).right.get
response.txsQuantity shouldBe Some(0)
}
it should "answer eth_getBlockTransactionCountByHash correctly when the requested block is in the blockchain and has some tx" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val request = TxCountByBlockHashRequest(blockToRequestHash)
val response = ethService.getBlockTransactionCountByHash(request).runSyncUnsafe(Duration.Inf).right.get
response.txsQuantity shouldBe Some(blockToRequest.body.transactionList.size)
}
it should "answer eth_getTransactionByBlockHashAndIndex with None when there is no block with the requested hash" in new TestSetup {
val txIndexToRequest = blockToRequest.body.transactionList.size / 2
val request = GetTransactionByBlockHashAndIndexRequest(blockToRequest.header.hash, txIndexToRequest)
val response = ethService.getTransactionByBlockHashAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.transactionResponse shouldBe None
}
it should "answer eth_getTransactionByBlockHashAndIndex with None when there is no tx in requested index" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val invalidTxIndex = blockToRequest.body.transactionList.size
val requestWithInvalidIndex = GetTransactionByBlockHashAndIndexRequest(blockToRequest.header.hash, invalidTxIndex)
val response = ethService
.getTransactionByBlockHashAndIndex(requestWithInvalidIndex)
.runSyncUnsafe(Duration.Inf)
.right
.get
response.transactionResponse shouldBe None
}
it should "answer eth_getTransactionByBlockHashAndIndex with the transaction response correctly when the requested index has one" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val txIndexToRequest = blockToRequest.body.transactionList.size / 2
val request = GetTransactionByBlockHashAndIndexRequest(blockToRequest.header.hash, txIndexToRequest)
val response = ethService.getTransactionByBlockHashAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
val requestedStx = blockToRequest.body.transactionList.apply(txIndexToRequest)
val expectedTxResponse = TransactionResponse(requestedStx, Some(blockToRequest.header), Some(txIndexToRequest))
response.transactionResponse shouldBe Some(expectedTxResponse)
}
it should "answer eth_getRawTransactionByBlockHashAndIndex with None when there is no block with the requested hash" in new TestSetup {
// given
val txIndexToRequest = blockToRequest.body.transactionList.size / 2
val request = GetTransactionByBlockHashAndIndexRequest(blockToRequest.header.hash, txIndexToRequest)
// when
val response = ethService.getRawTransactionByBlockHashAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
// then
response.transactionResponse shouldBe None
}
it should "answer eth_getRawTransactionByBlockHashAndIndex with None when there is no tx in requested index" in new TestSetup {
// given
blockchain.storeBlock(blockToRequest).commit()
val invalidTxIndex = blockToRequest.body.transactionList.size
val requestWithInvalidIndex = GetTransactionByBlockHashAndIndexRequest(blockToRequest.header.hash, invalidTxIndex)
// when
val response = ethService
.getRawTransactionByBlockHashAndIndex(requestWithInvalidIndex)
.runSyncUnsafe(Duration.Inf)
.toOption
.value
// then
response.transactionResponse shouldBe None
}
it should "answer eth_getRawTransactionByBlockHashAndIndex with the transaction response correctly when the requested index has one" in new TestSetup {
// given
blockchain.storeBlock(blockToRequest).commit()
val txIndexToRequest = blockToRequest.body.transactionList.size / 2
val request = GetTransactionByBlockHashAndIndexRequest(blockToRequest.header.hash, txIndexToRequest)
// when
val response = ethService.getRawTransactionByBlockHashAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
// then
val expectedTxResponse = blockToRequest.body.transactionList.lift(txIndexToRequest)
response.transactionResponse shouldBe expectedTxResponse
}
it should "handle eth_getRawTransactionByHash if the tx is not on the blockchain and not in the tx pool" in new TestSetup {
// given
(ledger.consensus _: (() => Consensus)).expects().returns(consensus)
val request = GetTransactionByHashRequest(txToRequestHash)
// when
val response = ethService.getRawTransactionByHash(request).runSyncUnsafe()
// then
pendingTransactionsManager.expectMsg(PendingTransactionsManager.GetPendingTransactions)
pendingTransactionsManager.reply(PendingTransactionsResponse(Nil))
response shouldEqual Right(RawTransactionResponse(None))
}
it should "handle eth_getRawTransactionByHash if the tx is still pending" in new TestSetup {
// given
(ledger.consensus _: (() => Consensus)).expects().returns(consensus)
val request = GetTransactionByHashRequest(txToRequestHash)
// when
val response = ethService.getRawTransactionByHash(request).runToFuture
// then
pendingTransactionsManager.expectMsg(PendingTransactionsManager.GetPendingTransactions)
pendingTransactionsManager.reply(
PendingTransactionsResponse(Seq(PendingTransaction(txToRequestWithSender, System.currentTimeMillis)))
)
response.futureValue shouldEqual Right(RawTransactionResponse(Some(txToRequest)))
}
it should "handle eth_getRawTransactionByHash if the tx was already executed" in new TestSetup {
// given
(ledger.consensus _: (() => Consensus)).expects().returns(consensus)
val blockWithTx = Block(Fixtures.Blocks.Block3125369.header, Fixtures.Blocks.Block3125369.body)
blockchain.storeBlock(blockWithTx).commit()
val request = GetTransactionByHashRequest(txToRequestHash)
// when
val response = ethService.getRawTransactionByHash(request).runSyncUnsafe()
// then
pendingTransactionsManager.expectMsg(PendingTransactionsManager.GetPendingTransactions)
pendingTransactionsManager.reply(PendingTransactionsResponse(Nil))
response shouldEqual Right(RawTransactionResponse(Some(txToRequest)))
}
it should "answer eth_getBlockByNumber with the correct block when the pending block is requested" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus)
(appStateStorage.getBestBlockNumber _: () => BigInt).expects().returns(blockToRequest.header.number)
(blockGenerator.getPendingBlockAndState _)
.expects()
.returns(Some(PendingBlockAndState(PendingBlock(blockToRequest, Nil), fakeWorld)))
val request = BlockByNumberRequest(BlockParam.Pending, fullTxs = true)
val response = ethService.getBlockByNumber(request).runSyncUnsafe().right.get
response.blockResponse.isDefined should be(true)
val blockResponse = response.blockResponse.get
blockResponse.hash shouldBe None
blockResponse.nonce shouldBe None
blockResponse.miner shouldBe None
blockResponse.number shouldBe blockToRequest.header.number
}
it should "answer eth_getBlockByNumber with the latest block pending block is requested and there are no pending ones" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus)
blockchain
.storeBlock(blockToRequest)
.and(blockchain.storeChainWeight(blockToRequestHash, blockWeight))
.commit()
blockchain.saveBestKnownBlocks(blockToRequest.header.number)
(blockGenerator.getPendingBlockAndState _).expects().returns(None)
val request = BlockByNumberRequest(BlockParam.Pending, fullTxs = true)
val response = ethService.getBlockByNumber(request).runSyncUnsafe().right.get
response.blockResponse.get.hash.get shouldEqual blockToRequest.header.hash
}
it should "answer eth_getBlockByNumber with None when the requested block isn't in the blockchain" in new TestSetup {
val request = BlockByNumberRequest(BlockParam.WithNumber(blockToRequestNumber), fullTxs = true)
val response = ethService.getBlockByNumber(request).runSyncUnsafe(Duration.Inf).right.get
response.blockResponse shouldBe None
}
it should "answer eth_getBlockByNumber with the block response correctly when it's chain weight is in blockchain" in new TestSetup {
blockchain
.storeBlock(blockToRequest)
.and(blockchain.storeChainWeight(blockToRequestHash, blockWeight))
.commit()
val request = BlockByNumberRequest(BlockParam.WithNumber(blockToRequestNumber), fullTxs = true)
val response = ethService.getBlockByNumber(request).runSyncUnsafe(Duration.Inf).right.get
val stxResponses = blockToRequest.body.transactionList.zipWithIndex.map { case (stx, txIndex) =>
TransactionResponse(stx, Some(blockToRequest.header), Some(txIndex))
}
response.blockResponse shouldBe Some(
BlockResponse(blockToRequest, fullTxs = true, weight = Some(blockWeight))
)
response.blockResponse.get.chainWeight shouldBe Some(blockWeight)
response.blockResponse.get.transactions.right.toOption shouldBe Some(stxResponses)
}
it should "answer eth_getBlockByNumber with the block response correctly when it's chain weight is not in blockchain" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val request = BlockByNumberRequest(BlockParam.WithNumber(blockToRequestNumber), fullTxs = true)
val response = ethService.getBlockByNumber(request).runSyncUnsafe(Duration.Inf).right.get
val stxResponses = blockToRequest.body.transactionList.zipWithIndex.map { case (stx, txIndex) =>
TransactionResponse(stx, Some(blockToRequest.header), Some(txIndex))
}
response.blockResponse shouldBe Some(BlockResponse(blockToRequest, fullTxs = true))
response.blockResponse.get.chainWeight shouldBe None
response.blockResponse.get.transactions.right.toOption shouldBe Some(stxResponses)
}
it should "answer eth_getBlockByNumber with the block response correctly when the txs should be hashed" in new TestSetup {
blockchain
.storeBlock(blockToRequest)
.and(blockchain.storeChainWeight(blockToRequestHash, blockWeight))
.commit()
val request = BlockByNumberRequest(BlockParam.WithNumber(blockToRequestNumber), fullTxs = true)
val response = ethService.getBlockByNumber(request.copy(fullTxs = false)).runSyncUnsafe(Duration.Inf).right.get
response.blockResponse shouldBe Some(
BlockResponse(blockToRequest, fullTxs = false, weight = Some(blockWeight))
)
response.blockResponse.get.chainWeight shouldBe Some(blockWeight)
response.blockResponse.get.transactions.left.toOption shouldBe Some(blockToRequest.body.transactionList.map(_.hash))
}
it should "answer eth_getBlockByHash with None when the requested block isn't in the blockchain" in new TestSetup {
val request = BlockByBlockHashRequest(blockToRequestHash, fullTxs = true)
val response = ethService.getByBlockHash(request).runSyncUnsafe(Duration.Inf).right.get
response.blockResponse shouldBe None
}
it should "answer eth_getBlockByHash with the block response correctly when it's chain weight is in blockchain" in new TestSetup {
blockchain
.storeBlock(blockToRequest)
.and(blockchain.storeChainWeight(blockToRequestHash, blockWeight))
.commit()
val request = BlockByBlockHashRequest(blockToRequestHash, fullTxs = true)
val response = ethService.getByBlockHash(request).runSyncUnsafe(Duration.Inf).right.get
val stxResponses = blockToRequest.body.transactionList.zipWithIndex.map { case (stx, txIndex) =>
TransactionResponse(stx, Some(blockToRequest.header), Some(txIndex))
}
response.blockResponse shouldBe Some(
BlockResponse(blockToRequest, fullTxs = true, weight = Some(blockWeight))
)
response.blockResponse.get.chainWeight shouldBe Some(blockWeight)
response.blockResponse.get.transactions.right.toOption shouldBe Some(stxResponses)
}
it should "answer eth_getBlockByHash with the block response correctly when it's chain weight is not in blockchain" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val request = BlockByBlockHashRequest(blockToRequestHash, fullTxs = true)
val response = ethService.getByBlockHash(request).runSyncUnsafe(Duration.Inf).right.get
val stxResponses = blockToRequest.body.transactionList.zipWithIndex.map { case (stx, txIndex) =>
TransactionResponse(stx, Some(blockToRequest.header), Some(txIndex))
}
response.blockResponse shouldBe Some(BlockResponse(blockToRequest, fullTxs = true))
response.blockResponse.get.chainWeight shouldBe None
response.blockResponse.get.transactions.right.toOption shouldBe Some(stxResponses)
}
it should "answer eth_getBlockByHash with the block response correctly when the txs should be hashed" in new TestSetup {
blockchain
.storeBlock(blockToRequest)
.and(blockchain.storeChainWeight(blockToRequestHash, blockWeight))
.commit()
val request = BlockByBlockHashRequest(blockToRequestHash, fullTxs = true)
val response = ethService.getByBlockHash(request.copy(fullTxs = false)).runSyncUnsafe(Duration.Inf).right.get
response.blockResponse shouldBe Some(
BlockResponse(blockToRequest, fullTxs = false, weight = Some(blockWeight))
)
response.blockResponse.get.chainWeight shouldBe Some(blockWeight)
response.blockResponse.get.transactions.left.toOption shouldBe Some(blockToRequest.body.transactionList.map(_.hash))
}
it should "answer eth_getUncleByBlockHashAndIndex with None when the requested block isn't in the blockchain" in new TestSetup {
val uncleIndexToRequest = 0
val request = UncleByBlockHashAndIndexRequest(blockToRequestHash, uncleIndexToRequest)
val response = ethService.getUncleByBlockHashAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.uncleBlockResponse shouldBe None
}
it should "answer eth_getUncleByBlockHashAndIndex with None when there's no uncle" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val uncleIndexToRequest = 0
val request = UncleByBlockHashAndIndexRequest(blockToRequestHash, uncleIndexToRequest)
val response = ethService.getUncleByBlockHashAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.uncleBlockResponse shouldBe None
}
it should "answer eth_getUncleByBlockHashAndIndex with None when there's no uncle in the requested index" in new TestSetup {
blockchain.storeBlock(blockToRequestWithUncles).commit()
val uncleIndexToRequest = 0
val request = UncleByBlockHashAndIndexRequest(blockToRequestHash, uncleIndexToRequest)
val response1 =
ethService.getUncleByBlockHashAndIndex(request.copy(uncleIndex = 1)).runSyncUnsafe(Duration.Inf).right.get
val response2 =
ethService.getUncleByBlockHashAndIndex(request.copy(uncleIndex = -1)).runSyncUnsafe(Duration.Inf).right.get
response1.uncleBlockResponse shouldBe None
response2.uncleBlockResponse shouldBe None
}
it should "answer eth_getUncleByBlockHashAndIndex correctly when the requested index has one but there's no chain weight for it" in new TestSetup {
blockchain.storeBlock(blockToRequestWithUncles).commit()
val uncleIndexToRequest = 0
val request = UncleByBlockHashAndIndexRequest(blockToRequestHash, uncleIndexToRequest)
val response = ethService.getUncleByBlockHashAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.uncleBlockResponse shouldBe Some(BlockResponse(uncle, None, pendingBlock = false))
response.uncleBlockResponse.get.chainWeight shouldBe None
response.uncleBlockResponse.get.transactions shouldBe Left(Nil)
response.uncleBlockResponse.get.uncles shouldBe Nil
}
it should "anwer eth_getUncleByBlockHashAndIndex correctly when the requested index has one and there's chain weight for it" in new TestSetup {
blockchain
.storeBlock(blockToRequestWithUncles)
.and(blockchain.storeChainWeight(uncle.hash, uncleWeight))
.commit()
val uncleIndexToRequest = 0
val request = UncleByBlockHashAndIndexRequest(blockToRequestHash, uncleIndexToRequest)
val response = ethService.getUncleByBlockHashAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.uncleBlockResponse shouldBe Some(BlockResponse(uncle, Some(uncleWeight), pendingBlock = false))
response.uncleBlockResponse.get.chainWeight shouldBe Some(uncleWeight)
response.uncleBlockResponse.get.transactions shouldBe Left(Nil)
response.uncleBlockResponse.get.uncles shouldBe Nil
}
it should "answer eth_getUncleByBlockNumberAndIndex with None when the requested block isn't in the blockchain" in new TestSetup {
val uncleIndexToRequest = 0
val request = UncleByBlockNumberAndIndexRequest(BlockParam.WithNumber(blockToRequestNumber), uncleIndexToRequest)
val response = ethService.getUncleByBlockNumberAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.uncleBlockResponse shouldBe None
}
it should "answer eth_getUncleByBlockNumberAndIndex with None when there's no uncle" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val uncleIndexToRequest = 0
val request = UncleByBlockNumberAndIndexRequest(BlockParam.WithNumber(blockToRequestNumber), uncleIndexToRequest)
val response = ethService.getUncleByBlockNumberAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.uncleBlockResponse shouldBe None
}
it should "answer eth_getUncleByBlockNumberAndIndex with None when there's no uncle in the requested index" in new TestSetup {
blockchain.storeBlock(blockToRequestWithUncles).commit()
val uncleIndexToRequest = 0
val request = UncleByBlockNumberAndIndexRequest(BlockParam.WithNumber(blockToRequestNumber), uncleIndexToRequest)
val response1 =
ethService.getUncleByBlockNumberAndIndex(request.copy(uncleIndex = 1)).runSyncUnsafe(Duration.Inf).right.get
val response2 =
ethService.getUncleByBlockNumberAndIndex(request.copy(uncleIndex = -1)).runSyncUnsafe(Duration.Inf).right.get
response1.uncleBlockResponse shouldBe None
response2.uncleBlockResponse shouldBe None
}
it should "answer eth_getUncleByBlockNumberAndIndex correctly when the requested index has one but there's no chain weight for it" in new TestSetup {
blockchain.storeBlock(blockToRequestWithUncles).commit()
val uncleIndexToRequest = 0
val request = UncleByBlockNumberAndIndexRequest(BlockParam.WithNumber(blockToRequestNumber), uncleIndexToRequest)
val response = ethService.getUncleByBlockNumberAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.uncleBlockResponse shouldBe Some(BlockResponse(uncle, None, pendingBlock = false))
response.uncleBlockResponse.get.chainWeight shouldBe None
response.uncleBlockResponse.get.transactions shouldBe Left(Nil)
response.uncleBlockResponse.get.uncles shouldBe Nil
}
it should "anwer eth_getUncleByBlockNumberAndIndex correctly when the requested index has one and there's chain weight for it" in new TestSetup {
blockchain
.storeBlock(blockToRequestWithUncles)
.and(blockchain.storeChainWeight(uncle.hash, uncleWeight))
.commit()
val uncleIndexToRequest = 0
val request = UncleByBlockNumberAndIndexRequest(BlockParam.WithNumber(blockToRequestNumber), uncleIndexToRequest)
val response = ethService.getUncleByBlockNumberAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.uncleBlockResponse shouldBe Some(BlockResponse(uncle, Some(uncleWeight), pendingBlock = false))
response.uncleBlockResponse.get.chainWeight shouldBe Some(uncleWeight)
response.uncleBlockResponse.get.transactions shouldBe Left(Nil)
response.uncleBlockResponse.get.uncles shouldBe Nil
}
it should "return syncing info if the peer is syncing" in new TestSetup {
syncingController.setAutoPilot(simpleAutoPilot { case SyncProtocol.GetStatus =>
SyncProtocol.Status.Syncing(999, Progress(200, 10000), Some(Progress(100, 144)))
})
val response = ethService.syncing(SyncingRequest()).runSyncUnsafe().right.get
response shouldEqual SyncingResponse(
Some(
EthService.SyncingStatus(
startingBlock = 999,
currentBlock = 200,
highestBlock = 10000,
knownStates = 144,
pulledStates = 100
)
)
)
}
// scalastyle:off magic.number
it should "return no syncing info if the peer is not syncing" in new TestSetup {
syncingController.setAutoPilot(simpleAutoPilot { case SyncProtocol.GetStatus =>
SyncProtocol.Status.NotSyncing
})
val response = ethService.syncing(SyncingRequest()).runSyncUnsafe()
response shouldEqual Right(SyncingResponse(None))
}
it should "return no syncing info if sync is done" in new TestSetup {
syncingController.setAutoPilot(simpleAutoPilot { case SyncProtocol.GetStatus =>
SyncProtocol.Status.SyncDone
})
val response = ethService.syncing(SyncingRequest()).runSyncUnsafe()
response shouldEqual Right(SyncingResponse(None))
}
it should "return requested work" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus).anyNumberOfTimes()
(blockGenerator.generateBlock _)
.expects(parentBlock, Nil, *, *, *)
.returning(PendingBlockAndState(PendingBlock(block, Nil), fakeWorld))
blockchain.save(parentBlock, Nil, ChainWeight.totalDifficultyOnly(parentBlock.header.difficulty), true)
val response = ethService.getWork(GetWorkRequest()).runSyncUnsafe()
pendingTransactionsManager.expectMsg(PendingTransactionsManager.GetPendingTransactions)
pendingTransactionsManager.reply(PendingTransactionsManager.PendingTransactionsResponse(Nil))
ommersPool.expectMsg(OmmersPool.GetOmmers(parentBlock.hash))
ommersPool.reply(OmmersPool.Ommers(Nil))
response shouldEqual Right(GetWorkResponse(powHash, seedHash, target))
}
it should "generate and submit work when generating block for mining with restricted ethash generator" in new TestSetup {
lazy val cons = buildTestConsensus().withBlockGenerator(restrictedGenerator)
(ledger.consensus _: (() => Consensus)).expects().returns(cons).anyNumberOfTimes()
blockchain.save(parentBlock, Nil, ChainWeight.totalDifficultyOnly(parentBlock.header.difficulty), true)
val response = ethService.getWork(GetWorkRequest()).runSyncUnsafe()
pendingTransactionsManager.expectMsg(PendingTransactionsManager.GetPendingTransactions)
pendingTransactionsManager.reply(PendingTransactionsManager.PendingTransactionsResponse(Nil))
ommersPool.expectMsg(OmmersPool.GetOmmers(parentBlock.hash))
ommersPool.reply(OmmersPool.Ommers(Nil))
assert(response.isRight)
val responseData = response.right.get
val submitRequest =
SubmitWorkRequest(ByteString("nonce"), responseData.powHeaderHash, ByteString(Hex.decode("01" * 32)))
val response1 = ethService.submitWork(submitRequest).runSyncUnsafe()
response1 shouldEqual Right(SubmitWorkResponse(true))
}
it should "accept submitted correct PoW" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus)
val headerHash = ByteString(Hex.decode("01" * 32))
(blockGenerator.getPrepared _).expects(headerHash).returning(Some(PendingBlock(block, Nil)))
(appStateStorage.getBestBlockNumber _).expects().returning(0)
val req = SubmitWorkRequest(ByteString("nonce"), headerHash, ByteString(Hex.decode("01" * 32)))
val response = ethService.submitWork(req)
response.runSyncUnsafe() shouldEqual Right(SubmitWorkResponse(true))
}
it should "reject submitted correct PoW when header is no longer in cache" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus)
val headerHash = ByteString(Hex.decode("01" * 32))
(blockGenerator.getPrepared _).expects(headerHash).returning(None)
(appStateStorage.getBestBlockNumber _).expects().returning(0)
val req = SubmitWorkRequest(ByteString("nonce"), headerHash, ByteString(Hex.decode("01" * 32)))
val response = ethService.submitWork(req)
response.runSyncUnsafe() shouldEqual Right(SubmitWorkResponse(false))
}
it should "execute call and return a value" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
blockchain.saveBestKnownBlocks(blockToRequest.header.number)
val txResult = TxResult(
BlockchainImpl(storagesInstance.storages)
.getWorldStateProxy(-1, UInt256.Zero, ByteString.empty, noEmptyAccounts = false, ethCompatibleStorage = true),
123,
Nil,
ByteString("return_value"),
None
)
(stxLedger.simulateTransaction _).expects(*, *, *).returning(txResult)
val tx = CallTx(
Some(ByteString(Hex.decode("da714fe079751fa7a1ad80b76571ea6ec52a446c"))),
Some(ByteString(Hex.decode("abbb6bebfa05aa13e908eaa492bd7a8343760477"))),
Some(1),
2,
3,
ByteString("")
)
val response = ethService.call(CallRequest(tx, BlockParam.Latest))
response.runSyncUnsafe() shouldEqual Right(CallResponse(ByteString("return_value")))
}
it should "execute estimateGas and return a value" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
blockchain.saveBestKnownBlocks(blockToRequest.header.number)
val estimatedGas = BigInt(123)
(stxLedger.binarySearchGasEstimation _).expects(*, *, *).returning(estimatedGas)
val tx = CallTx(
Some(ByteString(Hex.decode("da714fe079751fa7a1ad80b76571ea6ec52a446c"))),
Some(ByteString(Hex.decode("abbb6bebfa05aa13e908eaa492bd7a8343760477"))),
Some(1),
2,
3,
ByteString("")
)
val response = ethService.estimateGas(CallRequest(tx, BlockParam.Latest))
response.runSyncUnsafe() shouldEqual Right(EstimateGasResponse(123))
}
it should "get uncle count by block number" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
blockchain.saveBestKnownBlocks(blockToRequest.header.number)
val response = ethService.getUncleCountByBlockNumber(GetUncleCountByBlockNumberRequest(BlockParam.Latest))
response.runSyncUnsafe() shouldEqual Right(
GetUncleCountByBlockNumberResponse(blockToRequest.body.uncleNodesList.size)
)
}
it should "get uncle count by block hash" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val response = ethService.getUncleCountByBlockHash(GetUncleCountByBlockHashRequest(blockToRequest.header.hash))
response.runSyncUnsafe() shouldEqual Right(
GetUncleCountByBlockHashResponse(blockToRequest.body.uncleNodesList.size)
)
}
it should "get transaction count by block number" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val response = ethService.getBlockTransactionCountByNumber(
GetBlockTransactionCountByNumberRequest(BlockParam.WithNumber(blockToRequest.header.number))
)
response.runSyncUnsafe() shouldEqual Right(
GetBlockTransactionCountByNumberResponse(blockToRequest.body.transactionList.size)
)
}
it should "get transaction count by latest block number" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
blockchain.saveBestKnownBlocks(blockToRequest.header.number)
val response =
ethService.getBlockTransactionCountByNumber(GetBlockTransactionCountByNumberRequest(BlockParam.Latest))
response.runSyncUnsafe() shouldEqual Right(
GetBlockTransactionCountByNumberResponse(blockToRequest.body.transactionList.size)
)
}
it should "handle getCode request" in new TestSetup {
val address = Address(ByteString(Hex.decode("abbb6bebfa05aa13e908eaa492bd7a8343760477")))
storagesInstance.storages.evmCodeStorage.put(ByteString("code hash"), ByteString("code code code")).commit()
import MerklePatriciaTrie.defaultByteArraySerializable
val mpt =
MerklePatriciaTrie[Array[Byte], Account](storagesInstance.storages.stateStorage.getBackingStorage(0))
.put(
crypto.kec256(address.bytes.toArray[Byte]),
Account(0, UInt256(0), ByteString(""), ByteString("code hash"))
)
val newBlockHeader = blockToRequest.header.copy(stateRoot = ByteString(mpt.getRootHash))
val newblock = blockToRequest.copy(header = newBlockHeader)
blockchain.storeBlock(newblock).commit()
blockchain.saveBestKnownBlocks(newblock.header.number)
val response = ethService.getCode(GetCodeRequest(address, BlockParam.Latest))
response.runSyncUnsafe() shouldEqual Right(GetCodeResponse(ByteString("code code code")))
}
it should "accept and report hashrate" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus).anyNumberOfTimes()
val rate: BigInt = 42
val id = ByteString("id")
ethService.submitHashRate(SubmitHashRateRequest(12, id)).runSyncUnsafe() shouldEqual Right(
SubmitHashRateResponse(true)
)
ethService.submitHashRate(SubmitHashRateRequest(rate, id)).runSyncUnsafe() shouldEqual Right(
SubmitHashRateResponse(true)
)
val response = ethService.getHashRate(GetHashRateRequest())
response.runSyncUnsafe() shouldEqual Right(GetHashRateResponse(rate))
}
it should "combine hashrates from many miners and remove timed out rates" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus).anyNumberOfTimes()
val rate: BigInt = 42
val id1 = ByteString("id1")
val id2 = ByteString("id2")
ethService.submitHashRate(SubmitHashRateRequest(rate, id1)).runSyncUnsafe() shouldEqual Right(
SubmitHashRateResponse(true)
)
Thread.sleep(minerActiveTimeout.toMillis / 2)
ethService.submitHashRate(SubmitHashRateRequest(rate, id2)).runSyncUnsafe() shouldEqual Right(
SubmitHashRateResponse(true)
)
val response1 = ethService.getHashRate(GetHashRateRequest())
response1.runSyncUnsafe() shouldEqual Right(GetHashRateResponse(rate * 2))
Thread.sleep(minerActiveTimeout.toMillis / 2)
val response2 = ethService.getHashRate(GetHashRateRequest())
response2.runSyncUnsafe() shouldEqual Right(GetHashRateResponse(rate))
}
it should "return if node is mining base on getWork" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus).anyNumberOfTimes()
ethService.getMining(GetMiningRequest()).runSyncUnsafe() shouldEqual Right(GetMiningResponse(false))
(blockGenerator.generateBlock _)
.expects(parentBlock, *, *, *, *)
.returning(PendingBlockAndState(PendingBlock(block, Nil), fakeWorld))
blockchain.storeBlock(parentBlock).commit()
ethService.getWork(GetWorkRequest())
val response = ethService.getMining(GetMiningRequest())
response.runSyncUnsafe() shouldEqual Right(GetMiningResponse(true))
}
it should "return if node is mining base on submitWork" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus).anyNumberOfTimes()
ethService.getMining(GetMiningRequest()).runSyncUnsafe() shouldEqual Right(GetMiningResponse(false))
(blockGenerator.getPrepared _).expects(*).returning(Some(PendingBlock(block, Nil)))
(appStateStorage.getBestBlockNumber _).expects().returning(0)
ethService.submitWork(
SubmitWorkRequest(ByteString("nonce"), ByteString(Hex.decode("01" * 32)), ByteString(Hex.decode("01" * 32)))
)
val response = ethService.getMining(GetMiningRequest())
response.runSyncUnsafe() shouldEqual Right(GetMiningResponse(true))
}
it should "return if node is mining base on submitHashRate" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus).anyNumberOfTimes()
ethService.getMining(GetMiningRequest()).runSyncUnsafe() shouldEqual Right(GetMiningResponse(false))
ethService.submitHashRate(SubmitHashRateRequest(42, ByteString("id")))
val response = ethService.getMining(GetMiningRequest())
response.runSyncUnsafe() shouldEqual Right(GetMiningResponse(true))
}
it should "return if node is mining after time out" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus).anyNumberOfTimes()
(blockGenerator.generateBlock _)
.expects(parentBlock, *, *, *, *)
.returning(PendingBlockAndState(PendingBlock(block, Nil), fakeWorld))
blockchain.storeBlock(parentBlock).commit()
ethService.getWork(GetWorkRequest())
Thread.sleep(minerActiveTimeout.toMillis)
val response = ethService.getMining(GetMiningRequest())
response.runSyncUnsafe() shouldEqual Right(GetMiningResponse(false))
}
it should "return correct coinbase" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus)
val response = ethService.getCoinbase(GetCoinbaseRequest())
response.runSyncUnsafe() shouldEqual Right(GetCoinbaseResponse(consensusConfig.coinbase))
}
it should "return 0 gas price if there are no transactions" in new TestSetup {
(appStateStorage.getBestBlockNumber _).expects().returning(42)
val response = ethService.getGetGasPrice(GetGasPriceRequest())
response.runSyncUnsafe() shouldEqual Right(GetGasPriceResponse(0))
}
it should "return average gas price" in new TestSetup {
blockchain.saveBestKnownBlocks(42)
blockchain
.storeBlock(Block(Fixtures.Blocks.Block3125369.header.copy(number = 42), Fixtures.Blocks.Block3125369.body))
.commit()
val response = ethService.getGetGasPrice(GetGasPriceRequest())
response.runSyncUnsafe() shouldEqual Right(GetGasPriceResponse(BigInt("20000000000")))
}
it should "getTransactionByBlockNumberAndIndexRequest return transaction by index" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
blockchain.saveBestKnownBlocks(blockToRequest.header.number)
val txIndex: Int = 1
val request = GetTransactionByBlockNumberAndIndexRequest(BlockParam.Latest, txIndex)
val response = ethService.getTransactionByBlockNumberAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
val expectedTxResponse =
TransactionResponse(blockToRequest.body.transactionList(txIndex), Some(blockToRequest.header), Some(txIndex))
response.transactionResponse shouldBe Some(expectedTxResponse)
}
it should "getTransactionByBlockNumberAndIndexRequest return empty response if transaction does not exists when getting by index" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val txIndex: Int = blockToRequest.body.transactionList.length + 42
val request =
GetTransactionByBlockNumberAndIndexRequest(BlockParam.WithNumber(blockToRequest.header.number), txIndex)
val response = ethService.getTransactionByBlockNumberAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.transactionResponse shouldBe None
}
it should "getTransactionByBlockNumberAndIndex return empty response if block does not exists when getting by index" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val txIndex: Int = 1
val request =
GetTransactionByBlockNumberAndIndexRequest(BlockParam.WithNumber(blockToRequest.header.number - 42), txIndex)
val response = ethService.getTransactionByBlockNumberAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.transactionResponse shouldBe None
}
it should "getRawTransactionByBlockNumberAndIndex return transaction by index" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
blockchain.saveBestKnownBlocks(blockToRequest.header.number)
val txIndex: Int = 1
val request = GetTransactionByBlockNumberAndIndexRequest(BlockParam.Latest, txIndex)
val response = ethService.getRawTransactionByBlockNumberAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
val expectedTxResponse = blockToRequest.body.transactionList.lift(txIndex)
response.transactionResponse shouldBe expectedTxResponse
}
it should "getRawTransactionByBlockNumberAndIndex return empty response if transaction does not exists when getting by index" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val txIndex: Int = blockToRequest.body.transactionList.length + 42
val request =
GetTransactionByBlockNumberAndIndexRequest(BlockParam.WithNumber(blockToRequest.header.number), txIndex)
val response = ethService.getRawTransactionByBlockNumberAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.transactionResponse shouldBe None
}
it should "getRawTransactionByBlockNumberAndIndex return empty response if block does not exists when getting by index" in new TestSetup {
blockchain.storeBlock(blockToRequest).commit()
val txIndex: Int = 1
val request =
GetTransactionByBlockNumberAndIndexRequest(BlockParam.WithNumber(blockToRequest.header.number - 42), txIndex)
val response = ethService.getRawTransactionByBlockNumberAndIndex(request).runSyncUnsafe(Duration.Inf).right.get
response.transactionResponse shouldBe None
}
it should "handle getBalance request" in new TestSetup {
val address = Address(ByteString(Hex.decode("abbb6bebfa05aa13e908eaa492bd7a8343760477")))
import MerklePatriciaTrie.defaultByteArraySerializable
val mpt =
MerklePatriciaTrie[Array[Byte], Account](storagesInstance.storages.stateStorage.getBackingStorage(0))
.put(
crypto.kec256(address.bytes.toArray[Byte]),
Account(0, UInt256(123), ByteString(""), ByteString("code hash"))
)
val newBlockHeader = blockToRequest.header.copy(stateRoot = ByteString(mpt.getRootHash))
val newblock = blockToRequest.copy(header = newBlockHeader)
blockchain.storeBlock(newblock).commit()
blockchain.saveBestKnownBlocks(newblock.header.number)
val response = ethService.getBalance(GetBalanceRequest(address, BlockParam.Latest))
response.runSyncUnsafe() shouldEqual Right(GetBalanceResponse(123))
}
it should "handle MissingNodeException when getting balance" in new TestSetup {
val address = Address(ByteString(Hex.decode("abbb6bebfa05aa13e908eaa492bd7a8343760477")))
val newBlockHeader = blockToRequest.header
val newblock = blockToRequest.copy(header = newBlockHeader)
blockchain.storeBlock(newblock).commit()
blockchain.saveBestKnownBlocks(newblock.header.number)
val response = ethService.getBalance(GetBalanceRequest(address, BlockParam.Latest))
response.runSyncUnsafe() shouldEqual Left(JsonRpcError.NodeNotFound)
}
it should "handle getStorageAt request" in new TestSetup {
import io.iohk.ethereum.rlp.UInt256RLPImplicits._
val address = Address(ByteString(Hex.decode("abbb6bebfa05aa13e908eaa492bd7a8343760477")))
import MerklePatriciaTrie.defaultByteArraySerializable
val byteArrayUInt256Serializer = new ByteArrayEncoder[UInt256] {
override def toBytes(input: UInt256): Array[Byte] = input.bytes.toArray[Byte]
}
val rlpUInt256Serializer = new ByteArraySerializable[UInt256] {
override def fromBytes(bytes: Array[Byte]): UInt256 = ByteString(bytes).toUInt256
override def toBytes(input: UInt256): Array[Byte] = input.toBytes
}
val storageMpt =
io.iohk.ethereum.domain.EthereumUInt256Mpt
.storageMpt(
ByteString(MerklePatriciaTrie.EmptyRootHash),
storagesInstance.storages.stateStorage.getBackingStorage(0)
)
.put(UInt256(333), UInt256(123))
val mpt =
MerklePatriciaTrie[Array[Byte], Account](storagesInstance.storages.stateStorage.getBackingStorage(0))
.put(
crypto.kec256(address.bytes.toArray[Byte]),
Account(0, UInt256(0), ByteString(storageMpt.getRootHash), ByteString(""))
)
val newBlockHeader = blockToRequest.header.copy(stateRoot = ByteString(mpt.getRootHash))
val newblock = blockToRequest.copy(header = newBlockHeader)
blockchain.storeBlock(newblock).commit()
blockchain.saveBestKnownBlocks(newblock.header.number)
val response = ethService.getStorageAt(GetStorageAtRequest(address, 333, BlockParam.Latest))
response.runSyncUnsafe().map(v => UInt256(v.value)) shouldEqual Right(UInt256(123))
}
it should "handle get transaction count request" in new TestSetup {
val address = Address(ByteString(Hex.decode("abbb6bebfa05aa13e908eaa492bd7a8343760477")))
import MerklePatriciaTrie.defaultByteArraySerializable
val mpt =
MerklePatriciaTrie[Array[Byte], Account](storagesInstance.storages.stateStorage.getBackingStorage(0))
.put(crypto.kec256(address.bytes.toArray[Byte]), Account(999, UInt256(0), ByteString(""), ByteString("")))
val newBlockHeader = blockToRequest.header.copy(stateRoot = ByteString(mpt.getRootHash))
val newblock = blockToRequest.copy(header = newBlockHeader)
blockchain.storeBlock(newblock).commit()
blockchain.saveBestKnownBlocks(newblock.header.number)
val response = ethService.getTransactionCount(GetTransactionCountRequest(address, BlockParam.Latest))
response.runSyncUnsafe() shouldEqual Right(GetTransactionCountResponse(BigInt(999)))
}
it should "handle get transaction by hash if the tx is not on the blockchain and not in the tx pool" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus)
val request = GetTransactionByHashRequest(txToRequestHash)
val response = ethService.getTransactionByHash(request).runSyncUnsafe()
pendingTransactionsManager.expectMsg(PendingTransactionsManager.GetPendingTransactions)
pendingTransactionsManager.reply(PendingTransactionsResponse(Nil))
response shouldEqual Right(GetTransactionByHashResponse(None))
}
it should "handle get transaction by hash if the tx is still pending" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus)
val request = GetTransactionByHashRequest(txToRequestHash)
val response = ethService.getTransactionByHash(request).runToFuture
pendingTransactionsManager.expectMsg(PendingTransactionsManager.GetPendingTransactions)
pendingTransactionsManager.reply(
PendingTransactionsResponse(Seq(PendingTransaction(txToRequestWithSender, System.currentTimeMillis)))
)
response.futureValue shouldEqual Right(GetTransactionByHashResponse(Some(TransactionResponse(txToRequest))))
}
it should "handle get transaction by hash if the tx was already executed" in new TestSetup {
(ledger.consensus _: (() => Consensus)).expects().returns(consensus)
val blockWithTx = Block(Fixtures.Blocks.Block3125369.header, Fixtures.Blocks.Block3125369.body)
blockchain.storeBlock(blockWithTx).commit()
val request = GetTransactionByHashRequest(txToRequestHash)
val response = ethService.getTransactionByHash(request).runSyncUnsafe()
pendingTransactionsManager.expectMsg(PendingTransactionsManager.GetPendingTransactions)
pendingTransactionsManager.reply(PendingTransactionsResponse(Nil))
response shouldEqual Right(
GetTransactionByHashResponse(Some(TransactionResponse(txToRequest, Some(blockWithTx.header), Some(0))))
)
}
it should "calculate correct contract address for contract creating by transaction" in new TestSetup {
val body = BlockBody(Seq(Fixtures.Blocks.Block3125369.body.transactionList.head, contractCreatingTransaction), Nil)
val blockWithTx = Block(Fixtures.Blocks.Block3125369.header, body)
val gasUsedByTx = 4242
blockchain
.storeBlock(blockWithTx)
.and(
blockchain.storeReceipts(
Fixtures.Blocks.Block3125369.header.hash,
Seq(fakeReceipt, fakeReceipt.copy(cumulativeGasUsed = fakeReceipt.cumulativeGasUsed + gasUsedByTx))
)
)
.commit()
val request = GetTransactionReceiptRequest(contractCreatingTransaction.hash)
val response = ethService.getTransactionReceipt(request)
response.runSyncUnsafe() shouldEqual Right(
GetTransactionReceiptResponse(
Some(
TransactionReceiptResponse(
transactionHash = contractCreatingTransaction.hash,
transactionIndex = 1,
blockNumber = Fixtures.Blocks.Block3125369.header.number,
blockHash = Fixtures.Blocks.Block3125369.header.hash,
cumulativeGasUsed = fakeReceipt.cumulativeGasUsed + gasUsedByTx,
gasUsed = gasUsedByTx,
contractAddress = Some(createdContractAddress),
logs = Seq(
TxLog(
logIndex = 0,
transactionIndex = 1,
transactionHash = contractCreatingTransaction.hash,
blockHash = Fixtures.Blocks.Block3125369.header.hash,
blockNumber = Fixtures.Blocks.Block3125369.header.number,
address = fakeReceipt.logs.head.loggerAddress,
data = fakeReceipt.logs.head.data,
topics = fakeReceipt.logs.head.logTopics
)
)
)
)
)
)
}
it should "send message to pendingTransactionsManager and return an empty GetPendingTransactionsResponse" in new TestSetup {
val res = ethService.getTransactionsFromPool.runSyncUnsafe()
pendingTransactionsManager.expectMsg(GetPendingTransactions)
pendingTransactionsManager.reply(PendingTransactionsResponse(Nil))
res shouldBe PendingTransactionsResponse(Nil)
}
it should "send message to pendingTransactionsManager and return GetPendingTransactionsResponse with two transactions" in new TestSetup {
val transactions = (0 to 1)
.map(_ => {
val fakeTransaction = SignedTransactionWithSender(
Transaction(
nonce = 0,
gasPrice = 123,
gasLimit = 123,
receivingAddress = Address("0x1234"),
value = 0,
payload = ByteString()
),
signature = ECDSASignature(0, 0, 0.toByte),
sender = Address("0x1234")
)
PendingTransaction(fakeTransaction, System.currentTimeMillis)
})
.toList
val res = ethService.getTransactionsFromPool.runToFuture
pendingTransactionsManager.expectMsg(GetPendingTransactions)
pendingTransactionsManager.reply(PendingTransactionsResponse(transactions))
res.futureValue shouldBe PendingTransactionsResponse(transactions)
}
it should "send message to pendingTransactionsManager and return an empty GetPendingTransactionsResponse in case of error" in new TestSetup {
val res = ethService.getTransactionsFromPool.runSyncUnsafe()
pendingTransactionsManager.expectMsg(GetPendingTransactions)
pendingTransactionsManager.reply(new ClassCastException("error"))
res shouldBe PendingTransactionsResponse(Nil)
}
// NOTE TestSetup uses Ethash consensus; check `consensusConfig`.
class TestSetup(implicit system: ActorSystem) extends MockFactory with EphemBlockchainTestSetup with ApisBuilder {
val blockGenerator = mock[EthashBlockGenerator]
val appStateStorage = mock[AppStateStorage]
val keyStore = mock[KeyStore]
override lazy val ledger = mock[Ledger]
override lazy val stxLedger = mock[StxLedger]
override lazy val consensus: TestConsensus = buildTestConsensus().withBlockGenerator(blockGenerator)
val syncingController = TestProbe()
val pendingTransactionsManager = TestProbe()
val ommersPool = TestProbe()
val filterManager = TestProbe()
override lazy val consensusConfig = ConsensusConfigs.consensusConfig
val miningConfig = ConsensusConfigs.ethashConfig
val fullConsensusConfig = ConsensusConfigs.fullConsensusConfig
val minerActiveTimeout: FiniteDuration = 5.seconds
val getTransactionFromPoolTimeout: FiniteDuration = 5.seconds
val filterConfig = new FilterConfig {
override val filterTimeout: FiniteDuration = Timeouts.normalTimeout
override val filterManagerQueryTimeout: FiniteDuration = Timeouts.normalTimeout
}
lazy val minerKey = crypto.keyPairFromPrvKey(
ByteStringUtils.string2hash("00f7500a7178548b8a4488f78477660b548c9363e16b584c21e0208b3f1e0dc61f")
)
lazy val difficultyCalc = new EthashDifficultyCalculator(blockchainConfig)
lazy val restrictedGenerator = new RestrictedEthashBlockGeneratorImpl(
validators = MockValidatorsAlwaysSucceed,
blockchain = blockchain,
blockchainConfig = blockchainConfig,
consensusConfig = consensusConfig,
blockPreparator = consensus.blockPreparator,
difficultyCalc,
minerKey
)
val currentProtocolVersion = 11
val jsonRpcConfig = JsonRpcConfig(Config.config, available)
lazy val ethService = new EthService(
blockchain,
ledger,
stxLedger,
keyStore,
pendingTransactionsManager.ref,
syncingController.ref,
ommersPool.ref,
filterManager.ref,
filterConfig,
blockchainConfig,
currentProtocolVersion,
jsonRpcConfig,
getTransactionFromPoolTimeout,
Timeouts.shortTimeout
)
val blockToRequest = Block(Fixtures.Blocks.Block3125369.header, Fixtures.Blocks.Block3125369.body)
val blockToRequestNumber = blockToRequest.header.number
val blockToRequestHash = blockToRequest.header.hash
val blockWeight = ChainWeight.totalDifficultyOnly(blockToRequest.header.difficulty)
val uncle = Fixtures.Blocks.DaoForkBlock.header
val uncleWeight = ChainWeight.totalDifficultyOnly(uncle.difficulty)
val blockToRequestWithUncles = blockToRequest.copy(body = BlockBody(Nil, Seq(uncle)))
val difficulty = 131072
val parentBlock = Block(
header = BlockHeader(
parentHash = ByteString.empty,
ommersHash = ByteString.empty,
beneficiary = ByteString.empty,
stateRoot = ByteString(MerklePatriciaTrie.EmptyRootHash),
transactionsRoot = ByteString.empty,
receiptsRoot = ByteString.empty,
logsBloom = ByteString.empty,
difficulty = difficulty,
number = 0,
gasLimit = 16733003,
gasUsed = 0,
unixTimestamp = 1494604900,
extraData = ByteString.empty,
mixHash = ByteString.empty,
nonce = ByteString.empty
),
body = BlockBody.empty
)
val block = Block(
header = BlockHeader(
parentHash = parentBlock.header.hash,
ommersHash = ByteString(Hex.decode("1dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347")),
beneficiary = ByteString(Hex.decode("000000000000000000000000000000000000002a")),
stateRoot = ByteString(Hex.decode("2627314387b135a548040d3ca99dbf308265a3f9bd9246bee3e34d12ea9ff0dc")),
transactionsRoot = ByteString(Hex.decode("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")),
receiptsRoot = ByteString(Hex.decode("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")),
logsBloom = ByteString(Hex.decode("00" * 256)),
difficulty = difficulty,
number = 1,
gasLimit = 16733003,
gasUsed = 0,
unixTimestamp = 1494604913,
extraData = ByteString(Hex.decode("6d696e6564207769746820657463207363616c61")),
mixHash = ByteString.empty,
nonce = ByteString.empty
),
body = BlockBody.empty
)
val mixHash = ByteString(Hex.decode("40d9bd2064406d7f22390766d6fe5eccd2a67aa89bf218e99df35b2dbb425fb1"))
val nonce = ByteString(Hex.decode("ce1b500070aeec4f"))
val seedHash = ByteString(Hex.decode("00" * 32))
val powHash = ByteString(kec256(getEncodedWithoutNonce(block.header)))
val target = ByteString((BigInt(2).pow(256) / difficulty).toByteArray)
val v: Byte = 0x1c
val r = ByteString(Hex.decode("b3493e863e48a8d67572910933114a4c0e49dac0cb199e01df1575f35141a881"))
val s = ByteString(Hex.decode("5ba423ae55087e013686f89ad71a449093745f7edb4eb39f30acd30a8964522d"))
val payload = ByteString(
Hex.decode(
"60606040526040516101e43803806101e483398101604052808051820191906020018051906020019091908051" +
"9060200190919050505b805b83835b600060018351016001600050819055503373ffffffffffffffffffffffff" +
"ffffffffffffffff16600260005060016101008110156100025790900160005b50819055506001610102600050" +
"60003373ffffffffffffffffffffffffffffffffffffffff168152602001908152602001600020600050819055" +
"50600090505b82518110156101655782818151811015610002579060200190602002015173ffffffffffffffff" +
"ffffffffffffffffffffffff166002600050826002016101008110156100025790900160005b50819055508060" +
"0201610102600050600085848151811015610002579060200190602002015173ffffffffffffffffffffffffff" +
"ffffffffffffff168152602001908152602001600020600050819055505b80600101905080506100b9565b8160" +
"00600050819055505b50505080610105600050819055506101866101a3565b610107600050819055505b505b50" +
"5050602f806101b56000396000f35b600062015180420490506101b2565b905636600080376020600036600073" +
"6ab9dd83108698b9ca8d03af3c7eb91c0e54c3fc60325a03f41560015760206000f30000000000000000000000" +
"000000000000000000000000000000000000000060000000000000000000000000000000000000000000000000" +
"000000000000000200000000000000000000000000000000000000000000000000000000000000000000000000" +
"0000000000000000000000000000000000000000000000000000020000000000000000000000006c9fbd9a7f06" +
"d62ce37db2ab1e1b0c288edc797a000000000000000000000000c482d695f42b07e0d6a22925d7e49b46fd9a3f80"
)
)
//tx 0xb7b8cc9154896b25839ede4cd0c2ad193adf06489fdd9c0a9dfce05620c04ec1
val contractCreatingTransaction: SignedTransaction = SignedTransaction(
Transaction(
nonce = 2550,
gasPrice = BigInt("20000000000"),
gasLimit = 3000000,
receivingAddress = None,
value = 0,
payload
),
v,
r,
s,
0x3d.toByte
)
val fakeReceipt = Receipt.withHashOutcome(
postTransactionStateHash = ByteString(Hex.decode("01" * 32)),
cumulativeGasUsed = 43,
logsBloomFilter = ByteString(Hex.decode("00" * 256)),
logs = Seq(TxLogEntry(Address(42), Seq(ByteString(Hex.decode("01" * 32))), ByteString(Hex.decode("03" * 32))))
)
val createdContractAddress = Address(Hex.decode("c1d93b46be245617e20e75978f5283c889ae048d"))
val txToRequest = Fixtures.Blocks.Block3125369.body.transactionList.head
val txSender = SignedTransaction.getSender(txToRequest).get
val txToRequestWithSender = SignedTransactionWithSender(txToRequest, txSender)
val txToRequestHash = txToRequest.hash
val fakeWorld = blockchain.getReadOnlyWorldStateProxy(
None,
UInt256.Zero,
ByteString.empty,
noEmptyAccounts = false,
ethCompatibleStorage = true
)
}
}
| input-output-hk/etc-client | src/test/scala/io/iohk/ethereum/jsonrpc/EthServiceSpec.scala | Scala | mit | 59,050 |
object SyntheticNonSynthetic2 {
def foo[A >: Exception] (a : A) {}
}
| felixmulder/scala | test/files/positions/SyntheticNonSynthetic2.scala | Scala | bsd-3-clause | 71 |
package concrete.generator.cspompatterns
import cspom.compiler.ConstraintCompiler._
import cspom.compiler.{ConstraintCompilerNoData, Delta, Functions}
import cspom.extension.MDDRelation
import cspom.util.{Infinitable, IntInterval, RangeSet}
import cspom.variable.IntExpression
import cspom.variable.IntExpression.implicits.iterable
import cspom.{CSPOM, CSPOMConstraint}
import mdd.MDD
object Pow extends ConstraintCompilerNoData {
def functions = Functions("pow")
override def matchBool(constraint: CSPOMConstraint[_], problem: CSPOM): Boolean =
true
def compile(constraint: CSPOMConstraint[_], problem: CSPOM): Delta = {
val args = constraint.arguments.map { case IntExpression(e) => e }
val IntExpression(r) = constraint.result
val Seq(x, y) = args
val mdd = try {
pow(
iterable(x).toSeq.map(cspom.util.Math.toIntExact),
iterable(y).toSeq.map(cspom.util.Math.toIntExact),
iterable(r).r)
.reduce()
} catch {
case e: ArithmeticException =>
throw new IllegalStateException(s"Could not handle $r = $x ^ $y", e)
}
// println(constraint)
// println(mdd)
val rDom = RangeSet(mdd.projectOne(2).map { i => IntInterval.singleton(i) })
val newArgs = CSPOM.IntSeqOperations(Seq(x, y, r))
replaceCtr(constraint, newArgs in new MDDRelation(mdd), problem) ++ replace(r, reduceDomain(r, rDom), problem)
}
private def pow(xs: Seq[Int], ys: Seq[Int], rSpan: RangeSet[Infinitable]): MDD = {
val lb = rSpan.span.lb
val ub = rSpan.span.ub
val trie = xs.map { x =>
val xb = BigInt(x)
val trie = for {
y <- ys
if y >= 0
rb = xb.pow(y)
if lb <= rb && ub >= rb
} yield {
y -> MDD(Array(cspom.util.Math.toIntExact(rb)))
}
x -> MDD.fromTrie(trie)
}
MDD.fromTrie(trie)
}
}
| concrete-cp/concrete | src/main/scala/concrete/generator/cspompatterns/Pow.scala | Scala | lgpl-2.1 | 1,873 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.helpers
object LazyIterable {
def apply[T](f: => Iterator[T]) = new LazyIterable[T] {
override def iterator = f
}
}
sealed abstract class LazyIterable[T] extends Iterable[T]
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/helpers/LazyIterable.scala | Scala | apache-2.0 | 1,028 |
package to.gplus.rayracine.pack
import org.scalatest.Suite
class SimplePackTest extends Suite {
import java.io.{DataOutputStream, DataInputStream,
ByteArrayOutputStream, ByteArrayInputStream}
import PackKit._
implicit val optIntPack = optionPack[Int]
implicit val optStringPack = optionPack[String]
implicit val optBigDecimalPack = optionPack[BigDecimal]
def packNone (os: Output, opt: Option[String]) = {
Pack.pack (os, opt)
}
def testOptionPack () = {
val ba = new ByteArrayOutputStream (512)
val os = new DataOutputStream (ba) with Output
val str = "Ray"
val optStr = Some ("ray"): Option[String]
val optInt = Some (3): Option[Int]
val optBD = Some (BigDecimal (3.025)): Option[BigDecimal]
Pack.pack (os, str)
Pack.pack (os, 3)
Pack.pack (os, optInt)
Pack.pack (os, optStr)
Pack.pack (os, optBD)
os.flush
val is = new DataInputStream (new ByteArrayInputStream (ba.toByteArray)) with Input
assert (Pack.unpack[String] (is) == str)
assert (Pack.unpack[Int] (is) == 3)
assert (Pack.unpack[Option[Int]] (is) == optInt)
assert (Pack.unpack[Option[String]] (is) == optStr)
assert (Pack.unpack[Option[BigDecimal]] (is) == optBD)
}
}
| RayRacine/Pack | src/test/scala/to/gplus/rayracine/pack/SimpleTest.scala | Scala | mit | 1,264 |
/*
* Copyright (c) 2012-2018 Broad Institute, Inc.
*
* This file is part of Pilon.
*
* Pilon is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2
* as published by the Free Software Foundation.
*
* Pilon is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Pilon. If not, see <http://www.gnu.org/licenses/>.
*
*/
package org.broadinstitute.pilon
import math.pow
// Calculates the moments of a sample distribution.
class NormalDistribution(values: Array[Double], nMoments: Int) {
require(values.size > 0, "can't compute moments of empty distribution")
require(nMoments > 1, "assumes at least two moments (mean, stddev)")
val mean = values.sum / values.size
val median = {
val sorted = values.clone.sorted
val n = sorted.size
if (n % 2 == 0)
(values(n/2) + values(n/2 - 1)) / 2.0
else
values(n/2)
}
val moments = new Array[Double](nMoments)
moments(0) = mean
for (n <- 1 until nMoments) {
for (v <- values) moments(n) += pow(v - mean, n + 1)
moments(n) = pow(moments(n) / values.size, 1.0 / (n + 1))
}
def toSigma(value: Double, maxSigma: Double = 5.0) = {
val sigma = (value - moments(0)) / moments(1)
sigma
}
def toSigma10x(value: Double) = (toSigma(value) * 10.0).round.toInt
def fromSigma(sigma: Double) = moments(0) + sigma * moments(1)
def this(ivalues: Array[Int], nMoments: Int) =
this(for { v<-ivalues } yield v.toDouble, nMoments)
def this(ivalues: Array[Short], nMoments: Int) =
this(for { v<-ivalues } yield v.toDouble, nMoments)
def this(ivalues: Array[Byte], nMoments: Int) =
this(for { v<-ivalues } yield v.toDouble, nMoments)
override def toString = "<moments: n=" + values.size + ",moments=" + (moments mkString ",") + ">"
}
| B-UMMI/INNUca | src/pilon_v1.23/pilon/src/main/scala/org/broadinstitute/pilon/NormalDistribution.scala | Scala | gpl-3.0 | 2,099 |
/*
* Copyright (C) 2015 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.core.expansion
import java.io.File
import org.openmole.core.compiler.ScalaREPL.CompilationError
import org.openmole.core.compiler._
import org.openmole.core.context._
import org.openmole.core.exception._
import org.openmole.core.fileservice.FileService
import org.openmole.core.pluginmanager._
import org.openmole.tool.types.TypeTool._
import org.openmole.core.workspace.TmpDirectory
import org.openmole.tool.cache._
import org.openmole.tool.random._
import scala.util._
trait CompilationClosure[+T] extends ScalaCompilation.ContextClosure[T] {
def apply(context: Context, rng: RandomProvider, newFile: TmpDirectory): T
}
/**
* Methods for compiling scala code
*/
object ScalaCompilation {
/**
* OpenMOLE namespace to import
* @return
*/
def openMOLEImports = Seq(s"${org.openmole.core.code.CodePackage.namespace}._")
/**
* Prepend OpenMOLE imports to a script
* @param code
* @return
*/
def addImports(code: String) =
s"""
|${openMOLEImports.map("import " + _).mkString("\\n")}
|
|$code""".stripMargin
/**
* Get osgi bundles given a sequence of plugins
* @param plugins
* @return
*/
def priorityBundles(plugins: Seq[File]) = {
val pluginBundles = plugins.flatMap(PluginManager.bundle)
pluginBundles ++ pluginBundles.flatMap(PluginManager.allPluginDependencies) ++ PluginManager.bundleForClass(this.getClass)
}
/**
* Compile scala code using a [[org.openmole.core.compiler.Interpreter]]
*
* @param script
* @param plugins
* @param libraries
* @param newFile
* @param fileService
* @tparam RETURN
* @return
*/
private def compile[RETURN](script: Script, plugins: Seq[File] = Seq.empty, libraries: Seq[File] = Seq.empty)(implicit newFile: TmpDirectory, fileService: FileService) = {
val osgiMode = org.openmole.core.compiler.Activator.osgi
val interpreter =
if (osgiMode) Interpreter(priorityBundles(plugins), libraries)
else Interpreter(jars = libraries)
def errorMsg =
if (osgiMode) s"""in osgi mode with priority bundles [${priorityBundles(plugins).map(b ⇒ s"${b.getSymbolicName}").mkString(", ")}], libraries [${libraries.mkString(", ")}], classpath [${OSGiScalaCompiler.classPath(priorityBundles(plugins), libraries).mkString(", ")}]."""
else s"""in non osgi mode with libraries ${libraries.mkString(", ")}"""
Try[RETURN] {
val evaluated = interpreter.eval(addImports(script.code))
if (evaluated == null) throw new InternalProcessingError(
s"""The return value of the script was null:
|${script.code}""".stripMargin
)
evaluated.asInstanceOf[RETURN]
} match {
case util.Success(s) ⇒ Success(s)
case util.Failure(e: CompilationError) ⇒
val errors = ScalaREPL.compilationMessage(e.errorMessages.filter(_.error), script.originalCode, lineOffset = script.headerLines + 2)
val userBadDataError =
new UserBadDataError(
s"""${errors}
|With interpreter $errorMsg"
|""".stripMargin
)
util.Failure(userBadDataError)
case util.Failure(e) ⇒ util.Failure(new InternalProcessingError(s"Error while compiling with interpreter $errorMsg", e))
}
}
def function[RETURN](inputs: Seq[Val[_]], source: String, plugins: Seq[File], libraries: Seq[File], wrapping: OutputWrapping[RETURN], returnType: ValType[_ <: RETURN])(implicit newFile: TmpDirectory, fileService: FileService) = {
val s = script(inputs, source, wrapping, returnType)
compile[CompilationClosure[RETURN]](s, plugins, libraries)
}
def closure[RETURN](inputs: Seq[Val[_]], source: String, plugins: Seq[File], libraries: Seq[File], wrapping: OutputWrapping[RETURN], returnType: ValType[_ <: RETURN])(implicit newFile: TmpDirectory, fileService: FileService) =
function[RETURN](inputs, source, plugins, libraries, wrapping, returnType)
/**
* prefix used for input values in [[script]] construction
* @return
*/
def prefix = "_input_value_"
/**
* name of the input object in [[script]] construction
* @return
*/
def inputObject = "input"
/**
* Embed script elements in a compilable String.
* - an object of the runtime class of the CompilationClosure, parametrized with return type forced to scala native, is created by this code.
* @param inputs input prototypes
* @param source the source code in itself
* @param wrapping how outputs are wrapped as code string
* @param returnType the return type of the script
* @tparam RETURN
* @return
*/
def script[RETURN](inputs: Seq[Val[_]], source: String, wrapping: OutputWrapping[RETURN], returnType: ValType[_ <: RETURN]) = {
val header =
s"""new ${classOf[CompilationClosure[_]].getName}[${ValType.toTypeString(returnType)}] {
| def apply(${prefix}context: ${manifest[Context].toString}, ${prefix}RNG: ${manifest[RandomProvider].toString}, ${prefix}NewFile: ${manifest[TmpDirectory].toString}) = {
| object $inputObject {
| ${inputs.toSeq.map(i ⇒ s"""var ${i.name} = ${prefix}context("${i.name}").asInstanceOf[${ValType.toTypeString(i.`type`)}]""").mkString("; ")}
| }
| import ${inputObject}._
| implicit def ${Val.name(Variable.openMOLENameSpace, "RNGProvider")} = ${prefix}RNG
| implicit def ${Val.name(Variable.openMOLENameSpace, "NewFile")} = ${prefix}NewFile
|"""
val code =
s"""$header
| $source
| ${wrapping.wrapOutput}
| }: ${ValType.toTypeString(returnType)}
|}""".stripMargin
Script(code, source, header.split("\\n").size + 1)
}
case class Script(code: String, originalCode: String, headerLines: Int)
def static[R](
code: String,
inputs: Seq[Val[_]],
wrapping: OutputWrapping[R] = RawOutput(),
libraries: Seq[File] = Seq.empty,
plugins: Seq[File] = Seq.empty
)(implicit m: Manifest[_ <: R], newFile: TmpDirectory, fileService: FileService) =
closure[R](inputs, code, plugins, libraries, wrapping, ValType(m)).get
def dynamic[R: Manifest](code: String, wrapping: OutputWrapping[R] = RawOutput[R]()) = {
class ScalaWrappedCompilation {
def returnType = ValType.apply[R]
val cache = Cache(collection.mutable.HashMap[Seq[Val[_]], Try[ContextClosure[R]]]())
def compiled(context: Context)(implicit newFile: TmpDirectory, fileService: FileService): Try[ContextClosure[R]] = {
val contextPrototypes = context.values.map { _.prototype }.toSeq
compiled(contextPrototypes)
}
def compiled(inputs: Seq[Val[_]])(implicit newFile: TmpDirectory, fileService: FileService): Try[ContextClosure[R]] =
cache().synchronized {
val allInputMap = inputs.groupBy(_.name)
val duplicatedInputs = allInputMap.filter { _._2.size > 1 }.map(_._2)
duplicatedInputs match {
case Nil ⇒
def sortedInputNames = inputs.map(_.name).distinct.sorted
val scriptInputs = sortedInputNames.map(n ⇒ allInputMap(n).head)
cache().getOrElseUpdate(
scriptInputs,
closure[R](scriptInputs, code, Seq.empty, Seq.empty, wrapping, returnType)
)
case duplicated ⇒ throw new UserBadDataError("Duplicated inputs: " + duplicated.mkString(", "))
}
}
def validate = Validate { p ⇒
import p._
compiled(inputs) match {
case Success(_) ⇒ Seq()
case Failure(e) ⇒ Seq(e)
}
}
def apply()(implicit newFile: TmpDirectory, fileService: FileService): FromContext[R] = FromContext { p ⇒
val closure = compiled(p.context).get
try closure.apply(p.context, p.random, p.newFile)
catch {
case t: Throwable ⇒ throw new UserBadDataError(t, s"Error in execution of compiled closure in context: ${p.context}")
}
}
}
new ScalaWrappedCompilation()
}
type ContextClosure[+R] = (Context, RandomProvider, TmpDirectory) ⇒ R
trait OutputWrapping[+R] {
def wrapOutput: String
}
/**
* Wraps a prototype set as compilable code (used to build the [[script]])
* @param outputs
*/
case class WrappedOutput(outputs: PrototypeSet) extends OutputWrapping[java.util.Map[String, Any]] {
def wrapOutput =
s"""
|scala.jdk.CollectionConverters.MapHasAsJava(Map[String, Any]( ${outputs.toSeq.map(p ⇒ s""" "${p.name}" -> (${p.name}: ${ValType.toTypeString(p.`type`)})""").mkString(",")} )).asJava
|""".stripMargin
}
case class RawOutput[T]() extends OutputWrapping[T] { compilation ⇒
def wrapOutput = ""
}
}
| openmole/openmole | openmole/core/org.openmole.core.expansion/src/main/scala/org/openmole/core/expansion/ScalaCompilation.scala | Scala | agpl-3.0 | 9,492 |
package controllers.s_eligibility
import org.specs2.mutable._
import play.api.test.FakeRequest
import play.api.test.Helpers._
import models.domain._
import controllers.s_eligibility
import models.domain.Claim
import models.view.CachedClaim
import utils.WithApplication
class GApproveSpec extends Specification {
section("unit", models.domain.CarersAllowance.id)
"""Can you get Carer's Allowance""" should {
"acknowledge that the carer is eligible for allowance" in new WithApplication with Claiming {
val request = FakeRequest().withSession(CachedClaim.key -> claimKey)
val claim = Claim(CachedClaim.key).update(Benefits(benefitsAnswer = "yes"))
.update(Eligibility(hours = "yes", over16 = "yes", origin = "GB"))
cache.set("default"+claimKey, claim)
val result = s_eligibility.CarersAllowance.approve(request)
contentAsString(result) must contain("section class=\"prompt e-prompt\"")
}
"note that the carer is not eligible for allowance" in new WithApplication with Claiming {
val request = FakeRequest().withSession(CachedClaim.key -> claimKey)
val claim = Claim(CachedClaim.key).update(Benefits(benefitsAnswer = "yes"))
.update(Eligibility(hours = "yes", over16 = "no", origin = "GB"))
cache.set("default"+claimKey, claim)
val result = s_eligibility.CarersAllowance.approve(request)
contentAsString(result) must contain("section class=\"prompt e-prompt\"")
}
}
section("unit", models.domain.CarersAllowance.id)
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/test/controllers/s_eligibility/GApproveSpec.scala | Scala | mit | 1,525 |
package com.yourtion.Pattern16
/**
* Created by Yourtion on 05/04/2017.
*/
object PartialExamples {
def addTwoInts(intOne: Int, intTwo: Int) = intOne + intTwo
def taxForState(amount: Double, state: Symbol) = state match {
// Simple tax logic, for example only!
case ('NY) => amount * 0.0645
case ('PA) => amount * 0.045
// Rest of states...
}
def run(): Unit = {
println("PartialExamples :")
val addFortyTwo = addTwoInts(42, _: Int)
println("100 addFortyTwo: " + addFortyTwo(100))
val nyTax = taxForState(_: Double, 'NY)
println("nyTax 100 : " + nyTax(100))
val paTax = taxForState(_: Double, 'PA)
println("paTax 100 : " + paTax(100))
}
}
| yourtion/LearningFunctionalProgramming | Scala/src/com/yourtion/Pattern16/PartialExamples.scala | Scala | mit | 705 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import org.scalatest.{Matchers, WordSpec}
import uk.gov.hmrc.ct.accounts.frs102.BoxesFixture
import uk.gov.hmrc.ct.box.CtValidation
class ACQ5032Spec extends WordSpec with Matchers with BoxesFixture {
"ACQ5032" should {
"for Full Accounts pass validation" when {
"all no value" in {
ac44noValue
ac45noValue
acq5031noValue
acq5033noValue
acq5034noValue
acq5035noValue
ACQ5032(None).validate(boxRetriever) shouldBe empty
}
}
"for Full Accounts fail validation" when {
val cannotExistError = Set(CtValidation(Some("ACQ5032"),"error.ACQ5032.cannot.exist",None))
"ac44,ac45 have no value and acq5032 has value" in {
ac44noValue
ac45noValue
acq5031noValue
acq5033noValue
acq5034noValue
acq5035noValue
ACQ5032(Some(false)).validate(boxRetriever) shouldBe cannotExistError
}
}
}
}
| hmrc/ct-calculations | src/test/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/ACQ5032Spec.scala | Scala | apache-2.0 | 1,596 |
/*
* _ _
* _ __ ___ | |__ | | ___
* | '_ \\ / _ \\| '_ \\| |/ _ \\ noble :: norcane blog engine
* | | | | (_) | |_) | | __/ Copyright (c) 2016-2018 norcane
* |_| |_|\\___/|_.__/|_|\\___|
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
* in compliance with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License
* is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
* or implied. See the License for the specific language governing permissions and limitations under
* the License.
*/
package com.norcane.noble.api
import java.io.InputStream
import com.norcane.noble.api.models.{BlogInfo, BlogPostMeta, StaticPageMeta, StorageConfig}
/**
* Factory class for the concrete implementation of [[BlogStorage]]. This factory class is used to
* register custom storage implementation into the Noble. The ''storage type'' is unique string
* name of the concrete storage implementation, used to refer it in blog configuration file (e.g.
* `git` for storage implementation using the Git as physical storage). For further details about
* creating and registering custom blog storage, please refer the official project documentation.
*
* @author Vaclav Svejcar ([email protected])
*/
trait BlogStorageFactory {
/**
* ''Storage type'' represents the unique string name of the blog storage implementation (for
* example `git` or `mongodb`). The storage type is used to refer the specific implementation in
* blog configuration file.
*
* @return storage type
*/
def storageType: String
/**
* Creates new instance of the specific [[BlogStorage]] implementation. As an parameters,
* blog storage config and map of all registered [[FormatSupport]] instances.
*
* @param config blog storage configuration
* @param formatSupports map of all registered [[FormatSupport]] instances
* @return [[BlogStorage]] instance or error details in case of failure
*/
def create(config: StorageConfig,
formatSupports: Map[String, FormatSupport]): Either[BlogStorageError, BlogStorage]
}
/**
* Represents the blog storage, from which the blog info, blog posts and blog assets are loaded.
* Please note that every implementation '''must''' satisfy the following rules:
*
* - Noble's blog storage is expected to be versioned. Each version can be described by its
* ''version ID'' (e.g. numbers from 0, Git commit ID). Current (i.e. last known) version ID must
* be provided by the [[BlogStorage#currentVersionId]] method.
* - Any methods accepting `versionId` as their parameter '''must''' return always the same result
* for the same version ID.
*
* @author Vaclav Svejcar ([email protected])
*/
trait BlogStorage {
/**
* Returns the current (last known) version ID.
*
* @return current version ID
*/
def currentVersionId: String
/**
* Loads the blog info (blog title, theme name, ...) from the storage, using the `versionId`
* parameter as the ID of the content version. Blog info is loaded only during application
* startup and when blog reloading is triggered.
*
* @param versionId version ID
* @return loaded blog info or error details in case of failure
*/
def loadInfo(versionId: String): Either[BlogStorageError, BlogInfo]
/**
* Loads the content for blog post specified by its metadata, using the `versionId` parameter as
* the ID of the content version. Blog post content must be formatted either as a plain text or
* HTML.
*
* @param versionId version ID
* @param post blog post metadata
* @param placeholders text placeholders (as map of their names and actual values to
* replace placeholders with)
* @return blog post content or error details in case of failure
*/
def loadPostContent(versionId: String, post: BlogPostMeta,
placeholders: Map[String, Any]): Either[BlogStorageError, String]
def loadPageContent(versionId: String, page: StaticPageMeta,
placeholders: Map[String, Any]): Either[BlogStorageError, String]
/**
* Loads the collection of all blog posts (metadata only), using the `versionId` parameter as
* the ID of the content version.
*
* @param versionId version ID
* @return collection of all blog posts (metadata only)
*/
def loadBlogPosts(versionId: String): Either[BlogStorageError, Seq[BlogPostMeta]]
def loadPages(versionId: String): Either[BlogStorageError, Seq[StaticPageMeta]]
/**
* Loads the blog asset, specified by its path, using the `versionId` parameter as
* the ID of the content version.
*
* @param versionId version ID
* @param path blog asset path
* @return blog asset stream or error details in case of failure
*/
def loadAsset(versionId: String, path: String): Either[BlogStorageError, ContentStream]
}
/**
* Simple wrapper class around the standard Java's `InputStream`, adding info about stream length
* in bytes.
*
* @param stream `InputStream` instance
* @param length length of the input stream in bytes
*/
case class ContentStream(stream: InputStream, length: Long)
/**
* Error indicating failure occurred during some of the [[BlogStorage]] operations.
*
* @param message error message
* @param cause error cause (optional)
*/
case class BlogStorageError(message: String, cause: Option[Throwable] = None)
| norcane/noble | sdk/noble-api/src/main/scala/com/norcane/noble/api/BlogStorage.scala | Scala | apache-2.0 | 5,725 |
package sttp.client3.okhttp
import java.io.{BufferedInputStream, ByteArrayInputStream, FileInputStream, InputStream}
import sttp.capabilities.Streams
import sttp.client3.internal.{BodyFromResponseAs, FileHelpers, SttpFile, toByteArray}
import sttp.client3.ws.{GotAWebSocketException, NotAWebSocketException}
import sttp.client3.{
ResponseAs,
ResponseAsWebSocket,
ResponseAsWebSocketStream,
ResponseAsWebSocketUnsafe,
WebSocketResponseAs
}
import sttp.model.ResponseMetadata
import sttp.monad.MonadError
import sttp.monad.syntax._
import sttp.ws.{WebSocket, WebSocketFrame}
import scala.util.Try
private[okhttp] trait BodyFromOkHttp[F[_], S] {
val streams: Streams[S]
implicit val monad: MonadError[F]
def responseBodyToStream(inputStream: InputStream): streams.BinaryStream
private def fromWs[TT](r: WebSocketResponseAs[TT, _], ws: WebSocket[F], meta: ResponseMetadata): F[TT] =
r match {
case ResponseAsWebSocket(f) =>
f.asInstanceOf[(WebSocket[F], ResponseMetadata) => F[TT]](ws, meta).ensure(ws.close())
case ResponseAsWebSocketUnsafe() => ws.unit.asInstanceOf[F[TT]]
case ResponseAsWebSocketStream(_, p) =>
compileWebSocketPipe(ws, p.asInstanceOf[streams.Pipe[WebSocketFrame.Data[_], WebSocketFrame]])
}
def compileWebSocketPipe(ws: WebSocket[F], pipe: streams.Pipe[WebSocketFrame.Data[_], WebSocketFrame]): F[Unit]
def apply[T](
responseBody: InputStream,
responseAs: ResponseAs[T, _],
responseMetadata: ResponseMetadata,
ws: Option[WebSocket[F]]
): F[T] = bodyFromResponseAs(responseAs, responseMetadata, ws.toRight(responseBody))
private lazy val bodyFromResponseAs =
new BodyFromResponseAs[F, InputStream, WebSocket[F], streams.BinaryStream] {
override protected def withReplayableBody(
response: InputStream,
replayableBody: Either[Array[Byte], SttpFile]
): F[InputStream] = {
(replayableBody match {
case Left(byteArray) => new ByteArrayInputStream(byteArray)
case Right(file) => new BufferedInputStream(new FileInputStream(file.toFile))
}).unit
}
override protected def regularIgnore(response: InputStream): F[Unit] = monad.eval(response.close())
override protected def regularAsByteArray(response: InputStream): F[Array[Byte]] =
monad.fromTry {
val body = Try(toByteArray(response))
response.close()
body
}
override protected def regularAsFile(response: InputStream, file: SttpFile): F[SttpFile] =
monad
.fromTry {
val body = Try(FileHelpers.saveFile(file.toFile, response))
response.close()
body.map(_ => file)
}
override protected def regularAsStream(response: InputStream): F[(streams.BinaryStream, () => F[Unit])] =
monad.eval((responseBodyToStream(response), () => monad.eval(response.close())))
override protected def handleWS[T](
responseAs: WebSocketResponseAs[T, _],
meta: ResponseMetadata,
ws: WebSocket[F]
): F[T] = fromWs(responseAs, ws, meta)
override protected def cleanupWhenNotAWebSocket(response: InputStream, e: NotAWebSocketException): F[Unit] =
monad.eval(response.close())
override protected def cleanupWhenGotWebSocket(response: WebSocket[F], e: GotAWebSocketException): F[Unit] =
response.close()
}
}
| softwaremill/sttp | okhttp-backend/src/main/scala/sttp/client3/okhttp/BodyFromOkHttp.scala | Scala | apache-2.0 | 3,441 |
package ldbc
import ingraph.bulkloader.csv.loader.LdbcUpdateStreamCsvLoader
import scala.collection.JavaConverters._
import scala.io.Source
class LdbcUpdateLoader(val csvDir: String,
val queryPrefix: String,
val queryPostfix: String) {
def load(): Iterable[LdbcUpdate] = {
implicit def longs(x: Any): Long = x.asInstanceOf[Long]
implicit def string(x: Any): String = x.asInstanceOf[String]
implicit def int(x: Any): Int = x.asInstanceOf[Int]
implicit def longList(x: Any): List[Long] = x.asInstanceOf[java.lang.Iterable[Object]].asScala.map(_.asInstanceOf[Long]).toList
implicit def stringList(x: Any): List[String] = x.asInstanceOf[java.lang.Iterable[Object]].asScala.map(_.asInstanceOf[String]).toList
implicit def orgList(x: Any): List[Organization] = { if (x == null) return List(); Organization.parse(x.asInstanceOf[String]).toList }
val loader = new LdbcUpdateStreamCsvLoader(csvDir)
for (update <- loader.getUpdates.asScala) yield {
val u = update.asScala
val updateType = u(2).asInstanceOf[Int]
updateType match {
case 1 => Update1AddPerson(u(3), u(4), u(5), u(6), u(7), u(8), u(9), u(10), u(11), u(12), u(13), u(14), u(15), u(16))
case 2 => Update2AddPostLike(u(3), u(4), u(5))
case 3 => Update3AddCommentLike(u(3), u(4), u(5))
case 4 => Update4AddForum(u(3), u(4), u(5), u(6), u(7))
case 5 => Update5AddForumMembership(u(3), u(4), u(5))
case 6 => Update6AddPost(u(3), u(4), u(5), u(6), u(7), u(8), u(9), u(10), u(11), u(12), u(13), u(14))
case 7 => Update7AddComment(u(3), u(4), u(5), u(6), u(7), u(8), u(9), u(10), u(11), u(12), u(13))
case 8 => Update8AddFriendship(u(3), u(4), u(5))
case 9 => Update9RemovePost(u(3))
case 10 => Update10RemovePerson(u(3))
case 11 => Update11RemoveHasInterest(u(3), u(4))
}
}
}
def generateQuerySpecifications(): Iterable[String] = {
val updates: Iterable[LdbcUpdate] = load()
for (up <- updates) yield {
up match {
case up: Update1AddPerson => update(up)
case up: Update2AddPostLike => update(up)
case up: Update3AddCommentLike => update(up)
case up: Update4AddForum => update(up)
case up: Update5AddForumMembership => update(up)
case up: Update6AddPost => update(up)
case up: Update7AddComment => update(up)
case up: Update8AddFriendship => update(up)
case up: Update9RemovePost => update(up)
case up: Update10RemovePerson => update(up)
case up: Update11RemoveHasInterest => update(up)
}
}
}
def convertLongList(longs: List[Long]): String = {
"[" + longs.mkString(", ") + "]"
}
def convertStringList(strings: List[String]): String = {
"[" + strings.map(convertString).mkString(", ") + "]"
}
def convertOrgList(organizations: List[Organization]): String = {
"[" +
organizations.map(
it => s"[${it.organizationId}, ${it.year}]"
).mkString(", ") +
"]"
}
def convertString(str: String): String = s"'${str.replace("'", "\\\\'")}'"
def update(u: Update1AddPerson): String = {
val parameters = Map(
"personId" -> u.personId,
"personFirstName" -> convertString(u.personFirstName),
"personLastName" -> convertString(u.personLastName),
"gender" -> convertString(u.gender),
"birthday" -> u.birthday,
"creationDate" -> u.creationDate,
"cityId" -> u.cityId,
"locationIP" -> convertString(u.locationIP),
"browserUsed" -> convertString(u.browserUsed),
"languages" -> convertStringList(u.languages),
"emails" -> convertStringList(u.emails),
"tagIds" -> convertLongList(u.tagIds),
"studyAt" -> convertOrgList(u.studyAt),
"workAt" -> convertOrgList(u.workAt)
)
substituteParameters(1, parameters)
}
def update(u: Update2AddPostLike): String = {
val parameters = Map(
"personId" -> u.personId,
"postId" -> u.postId,
"creationDate" -> u.creationDate
)
substituteParameters(2, parameters)
}
def update(u: Update3AddCommentLike): String = {
val parameters = Map(
"personId" -> u.personId,
"commentId" -> u.commentId,
"creationDate" -> u.creationDate
)
substituteParameters(3, parameters)
}
def update(u: Update4AddForum): String = {
val parameters = Map(
"forumId" -> u.forumId,
"forumTitle" -> convertString(u.forumTitle),
"creationDate" -> u.creationDate,
"moderatorPersonId" -> u.moderatorPersonId,
"tagIds" -> convertLongList(u.tagIds)
)
substituteParameters(4, parameters)
}
def update(u: Update5AddForumMembership): String = {
val parameters = Map(
"personId" -> u.personId,
"forumId" -> u.forumId,
"joinDate" -> u.joinDate
)
substituteParameters(5, parameters)
}
def update(u: Update6AddPost): String = {
val parameters = Map(
"postId" -> u.postId,
"imageFile" -> convertString(u.imageFile),
"creationDate" -> u.creationDate,
"locationIP" -> convertString(u.locationIP),
"browserUsed" -> convertString(u.browserUsed),
"language" -> convertString(u.language),
"content" -> convertString(u.content),
"length" -> u.length,
"authorPersonId" -> u.authorPersonId,
"forumId" -> u.forumId,
"countryId" -> u.countryId,
"tagIds" -> convertLongList(u.tagIds)
)
substituteParameters(6, parameters)
}
def update(u: Update7AddComment): String = {
val parameters = Map(
"commentId" -> u.commentId,
"creationDate" -> u.creationDate,
"locationIP" -> convertString(u.locationIP),
"browserUsed" -> convertString(u.browserUsed),
"content" -> convertString(u.content),
"length" -> u.length,
"authorPersonId" -> u.authorPersonId,
"countryId" -> u.countryId,
"replyToPostId" -> u.replyToPostId,
"replyToCommentId" -> u.replyToCommentId,
"tagIds" -> convertLongList(u.tagIds)
)
substituteParameters(7, parameters)
}
def update(u: Update8AddFriendship): String = {
val parameters = Map(
"person1Id" -> u.person1Id,
"person2Id" -> u.person2Id,
"creationDate" -> u.creationDate
)
substituteParameters(8, parameters)
}
def update(u: Update9RemovePost): String = {
val parameters = Map(
"postId" -> u.postId
)
substituteParameters(9, parameters)
}
def update(u: Update10RemovePerson): String = {
val parameters = Map(
"personId" -> u.personId
)
substituteParameters(10, parameters)
}
def update(u: Update11RemoveHasInterest): String = {
val parameters = Map(
"personId" -> u.personId,
"tagId" -> u.tagId
)
substituteParameters(11, parameters)
}
def substituteParameters(query: Int, parameters: Map[String, Any]): String = {
val queryFile = queryPrefix + query + queryPostfix
val baseQuerySpecification = Source.fromFile(queryFile).getLines().mkString("\\n")
parameters.foldLeft(baseQuerySpecification)(
(querySpecification, parameter) => querySpecification.replaceAllLiterally("$" + parameter._1.toString, parameter._2.toString)
)
}
}
| FTSRG/ingraph | tests/src/main/scala/ldbc/LdbcUpdateLoader.scala | Scala | epl-1.0 | 7,277 |
package lore.compiler.utils
import scala.concurrent.ExecutionContext
case object ExecutionContexts {
implicit val default: ExecutionContext = scala.concurrent.ExecutionContext.global
}
| marcopennekamp/lore | compiler/src/lore/compiler/utils/ExecutionContexts.scala | Scala | mit | 189 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.client
import java.io.File
import java.lang.reflect.InvocationTargetException
import java.net.{URL, URLClassLoader}
import java.util
import scala.util.Try
import org.apache.commons.io.{FileUtils, IOUtils}
import org.apache.commons.lang3.{JavaVersion, SystemUtils}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hadoop.hive.shims.ShimLoader
import org.apache.spark.SparkConf
import org.apache.spark.deploy.SparkSubmitUtils
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.util.quietly
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.internal.NonClosableMutableURLClassLoader
import org.apache.spark.util.{MutableURLClassLoader, Utils}
/** Factory for `IsolatedClientLoader` with specific versions of hive. */
private[hive] object IsolatedClientLoader extends Logging {
/**
* Creates isolated Hive client loaders by downloading the requested version from maven.
*/
def forVersion(
hiveMetastoreVersion: String,
hadoopVersion: String,
sparkConf: SparkConf,
hadoopConf: Configuration,
config: Map[String, String] = Map.empty,
ivyPath: Option[String] = None,
sharedPrefixes: Seq[String] = Seq.empty,
barrierPrefixes: Seq[String] = Seq.empty,
sharesHadoopClasses: Boolean = true): IsolatedClientLoader = synchronized {
val resolvedVersion = hiveVersion(hiveMetastoreVersion)
// We will first try to share Hadoop classes. If we cannot resolve the Hadoop artifact
// with the given version, we will use Hadoop 2.7 and then will not share Hadoop classes.
var _sharesHadoopClasses = sharesHadoopClasses
val files = if (resolvedVersions.contains((resolvedVersion, hadoopVersion))) {
resolvedVersions((resolvedVersion, hadoopVersion))
} else {
val (downloadedFiles, actualHadoopVersion) =
try {
(downloadVersion(resolvedVersion, hadoopVersion, ivyPath), hadoopVersion)
} catch {
case e: RuntimeException if e.getMessage.contains("hadoop") =>
// If the error message contains hadoop, it is probably because the hadoop
// version cannot be resolved.
val fallbackVersion = "2.7.4"
logWarning(s"Failed to resolve Hadoop artifacts for the version $hadoopVersion. We " +
s"will change the hadoop version from $hadoopVersion to $fallbackVersion and try " +
"again. Hadoop classes will not be shared between Spark and Hive metastore client. " +
"It is recommended to set jars used by Hive metastore client through " +
"spark.sql.hive.metastore.jars in the production environment.")
_sharesHadoopClasses = false
(downloadVersion(resolvedVersion, fallbackVersion, ivyPath), fallbackVersion)
}
resolvedVersions.put((resolvedVersion, actualHadoopVersion), downloadedFiles)
resolvedVersions((resolvedVersion, actualHadoopVersion))
}
new IsolatedClientLoader(
hiveVersion(hiveMetastoreVersion),
sparkConf,
execJars = files,
hadoopConf = hadoopConf,
config = config,
sharesHadoopClasses = _sharesHadoopClasses,
sharedPrefixes = sharedPrefixes,
barrierPrefixes = barrierPrefixes)
}
def hiveVersion(version: String): HiveVersion = version match {
case "12" | "0.12" | "0.12.0" => hive.v12
case "13" | "0.13" | "0.13.0" | "0.13.1" => hive.v13
case "14" | "0.14" | "0.14.0" => hive.v14
case "1.0" | "1.0.0" | "1.0.1" => hive.v1_0
case "1.1" | "1.1.0" | "1.1.1" => hive.v1_1
case "1.2" | "1.2.0" | "1.2.1" | "1.2.2" => hive.v1_2
case "2.0" | "2.0.0" | "2.0.1" => hive.v2_0
case "2.1" | "2.1.0" | "2.1.1" => hive.v2_1
case "2.2" | "2.2.0" => hive.v2_2
case "2.3" | "2.3.0" | "2.3.1" | "2.3.2" | "2.3.3" | "2.3.4" | "2.3.5" => hive.v2_3
case "3.1" | "3.1.0" | "3.1.1" => hive.v3_1
case version =>
throw new UnsupportedOperationException(s"Unsupported Hive Metastore version ($version). " +
s"Please set ${HiveUtils.HIVE_METASTORE_VERSION.key} with a valid version.")
}
private def downloadVersion(
version: HiveVersion,
hadoopVersion: String,
ivyPath: Option[String]): Seq[URL] = {
val hiveArtifacts = version.extraDeps ++
Seq("hive-metastore", "hive-exec", "hive-common", "hive-serde")
.map(a => s"org.apache.hive:$a:${version.fullVersion}") ++
Seq("com.google.guava:guava:14.0.1",
s"org.apache.hadoop:hadoop-client:$hadoopVersion")
val classpath = quietly {
SparkSubmitUtils.resolveMavenCoordinates(
hiveArtifacts.mkString(","),
SparkSubmitUtils.buildIvySettings(
Some("http://www.datanucleus.org/downloads/maven2"),
ivyPath),
exclusions = version.exclusions)
}
val allFiles = classpath.split(",").map(new File(_)).toSet
// TODO: Remove copy logic.
val tempDir = Utils.createTempDir(namePrefix = s"hive-${version}")
allFiles.foreach(f => FileUtils.copyFileToDirectory(f, tempDir))
logInfo(s"Downloaded metastore jars to ${tempDir.getCanonicalPath}")
tempDir.listFiles().map(_.toURI.toURL)
}
// A map from a given pair of HiveVersion and Hadoop version to jar files.
// It is only used by forVersion.
private val resolvedVersions =
new scala.collection.mutable.HashMap[(HiveVersion, String), Seq[URL]]
}
/**
* Creates a [[HiveClient]] using a classloader that works according to the following rules:
* - Shared classes: Java, Scala, logging, and Spark classes are delegated to `baseClassLoader`
* allowing the results of calls to the [[HiveClient]] to be visible externally.
* - Hive classes: new instances are loaded from `execJars`. These classes are not
* accessible externally due to their custom loading.
* - [[HiveClientImpl]]: a new copy is created for each instance of `IsolatedClassLoader`.
* This new instance is able to see a specific version of hive without using reflection. Since
* this is a unique instance, it is not visible externally other than as a generic
* [[HiveClient]], unless `isolationOn` is set to `false`.
*
* @param version The version of hive on the classpath. used to pick specific function signatures
* that are not compatible across versions.
* @param execJars A collection of jar files that must include hive and hadoop.
* @param config A set of options that will be added to the HiveConf of the constructed client.
* @param isolationOn When true, custom versions of barrier classes will be constructed. Must be
* true unless loading the version of hive that is on Sparks classloader.
* @param sharesHadoopClasses When true, we will share Hadoop classes between Spark and
* @param baseClassLoader The spark classloader that is used to load shared classes.
*/
private[hive] class IsolatedClientLoader(
val version: HiveVersion,
val sparkConf: SparkConf,
val hadoopConf: Configuration,
val execJars: Seq[URL] = Seq.empty,
val config: Map[String, String] = Map.empty,
val isolationOn: Boolean = true,
val sharesHadoopClasses: Boolean = true,
val baseClassLoader: ClassLoader = Thread.currentThread().getContextClassLoader,
val sharedPrefixes: Seq[String] = Seq.empty,
val barrierPrefixes: Seq[String] = Seq.empty)
extends Logging {
/** All jars used by the hive specific classloader. */
protected def allJars = execJars.toArray
protected def isSharedClass(name: String): Boolean = {
val isHadoopClass =
name.startsWith("org.apache.hadoop.") && !name.startsWith("org.apache.hadoop.hive.")
name.startsWith("org.slf4j") ||
name.startsWith("org.apache.log4j") || // log4j1.x
name.startsWith("org.apache.logging.log4j") || // log4j2
name.startsWith("org.apache.spark.") ||
(sharesHadoopClasses && isHadoopClass) ||
name.startsWith("scala.") ||
(name.startsWith("com.google") && !name.startsWith("com.google.cloud")) ||
name.startsWith("java.") ||
name.startsWith("javax.sql.") ||
sharedPrefixes.exists(name.startsWith)
}
/** True if `name` refers to a spark class that must see specific version of Hive. */
protected def isBarrierClass(name: String): Boolean =
name.startsWith(classOf[HiveClientImpl].getName) ||
name.startsWith(classOf[Shim].getName) ||
name.startsWith(classOf[ShimLoader].getName) ||
barrierPrefixes.exists(name.startsWith)
protected def classToPath(name: String): String =
name.replaceAll("\\\\.", "/") + ".class"
/**
* The classloader that is used to load an isolated version of Hive.
* This classloader is a special URLClassLoader that exposes the addURL method.
* So, when we add jar, we can add this new jar directly through the addURL method
* instead of stacking a new URLClassLoader on top of it.
*/
private[hive] val classLoader: MutableURLClassLoader = {
val isolatedClassLoader =
if (isolationOn) {
if (allJars.isEmpty) {
// See HiveUtils; this is the Java 9+ + builtin mode scenario
baseClassLoader
} else {
val rootClassLoader: ClassLoader =
if (SystemUtils.isJavaVersionAtLeast(JavaVersion.JAVA_9)) {
// In Java 9, the boot classloader can see few JDK classes. The intended parent
// classloader for delegation is now the platform classloader.
// See http://java9.wtf/class-loading/
val platformCL =
classOf[ClassLoader].getMethod("getPlatformClassLoader").
invoke(null).asInstanceOf[ClassLoader]
// Check to make sure that the root classloader does not know about Hive.
assert(Try(platformCL.loadClass("org.apache.hadoop.hive.conf.HiveConf")).isFailure)
platformCL
} else {
// The boot classloader is represented by null (the instance itself isn't accessible)
// and before Java 9 can see all JDK classes
null
}
new URLClassLoader(allJars, rootClassLoader) {
override def loadClass(name: String, resolve: Boolean): Class[_] = {
val loaded = findLoadedClass(name)
if (loaded == null) doLoadClass(name, resolve) else loaded
}
def doLoadClass(name: String, resolve: Boolean): Class[_] = {
val classFileName = name.replaceAll("\\\\.", "/") + ".class"
if (isBarrierClass(name)) {
// For barrier classes, we construct a new copy of the class.
val bytes = IOUtils.toByteArray(baseClassLoader.getResourceAsStream(classFileName))
logDebug(s"custom defining: $name - ${util.Arrays.hashCode(bytes)}")
defineClass(name, bytes, 0, bytes.length)
} else if (!isSharedClass(name)) {
logDebug(s"hive class: $name - ${getResource(classToPath(name))}")
super.loadClass(name, resolve)
} else {
// For shared classes, we delegate to baseClassLoader, but fall back in case the
// class is not found.
logDebug(s"shared class: $name")
try {
baseClassLoader.loadClass(name)
} catch {
case _: ClassNotFoundException =>
super.loadClass(name, resolve)
}
}
}
}
}
} else {
baseClassLoader
}
// Right now, we create a URLClassLoader that gives preference to isolatedClassLoader
// over its own URLs when it loads classes and resources.
// We may want to use ChildFirstURLClassLoader based on
// the configuration of spark.executor.userClassPathFirst, which gives preference
// to its own URLs over the parent class loader (see Executor's createClassLoader method).
new NonClosableMutableURLClassLoader(isolatedClassLoader)
}
private[hive] def addJar(path: URL): Unit = synchronized {
classLoader.addURL(path)
}
/** The isolated client interface to Hive. */
private[hive] def createClient(): HiveClient = synchronized {
val warehouseDir = Option(hadoopConf.get(ConfVars.METASTOREWAREHOUSE.varname))
if (!isolationOn) {
return new HiveClientImpl(version, warehouseDir, sparkConf, hadoopConf, config,
baseClassLoader, this)
}
// Pre-reflective instantiation setup.
logDebug("Initializing the logger to avoid disaster...")
val origLoader = Thread.currentThread().getContextClassLoader
Thread.currentThread.setContextClassLoader(classLoader)
try {
classLoader
.loadClass(classOf[HiveClientImpl].getName)
.getConstructors.head
.newInstance(version, warehouseDir, sparkConf, hadoopConf, config, classLoader, this)
.asInstanceOf[HiveClient]
} catch {
case e: InvocationTargetException =>
if (e.getCause().isInstanceOf[NoClassDefFoundError]) {
val cnf = e.getCause().asInstanceOf[NoClassDefFoundError]
throw new ClassNotFoundException(
s"$cnf when creating Hive client using classpath: ${execJars.mkString(", ")}\\n" +
"Please make sure that jars for your version of hive and hadoop are included in the " +
s"paths passed to ${HiveUtils.HIVE_METASTORE_JARS.key}.", e)
} else {
throw e
}
} finally {
Thread.currentThread.setContextClassLoader(origLoader)
}
}
/**
* The place holder for shared Hive client for all the HiveContext sessions (they share an
* IsolatedClientLoader).
*/
private[hive] var cachedHive: Any = null
}
| LantaoJin/spark | sql/hive/src/main/scala/org/apache/spark/sql/hive/client/IsolatedClientLoader.scala | Scala | apache-2.0 | 14,613 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.exchange
import java.util.Random
import java.util.function.Supplier
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.serializer.Serializer
import org.apache.spark.shuffle.sort.SortShuffleManager
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.errors._
import org.apache.spark.sql.catalyst.expressions.{Attribute, UnsafeProjection, UnsafeRow}
import org.apache.spark.sql.catalyst.expressions.codegen.LazilyGeneratedOrdering
import org.apache.spark.sql.catalyst.plans.physical._
import org.apache.spark.sql.execution._
import org.apache.spark.sql.execution.metric.SQLMetrics
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.StructType
import org.apache.spark.util.MutablePair
import org.apache.spark.util.collection.unsafe.sort.{PrefixComparators, RecordComparator}
/**
* Performs a shuffle that will result in the desired `newPartitioning`.
*/
case class ShuffleExchangeExec(
var newPartitioning: Partitioning,
child: SparkPlan,
@transient coordinator: Option[ExchangeCoordinator]) extends Exchange {
// NOTE: coordinator can be null after serialization/deserialization,
// e.g. it can be null on the Executor side
override lazy val metrics = Map(
"dataSize" -> SQLMetrics.createSizeMetric(sparkContext, "data size"))
override def nodeName: String = {
val extraInfo = coordinator match {
case Some(exchangeCoordinator) =>
s"(coordinator id: ${System.identityHashCode(exchangeCoordinator)})"
case _ => ""
}
val simpleNodeName = "Exchange"
s"$simpleNodeName$extraInfo"
}
override def outputPartitioning: Partitioning = newPartitioning
private val serializer: Serializer =
new UnsafeRowSerializer(child.output.size, longMetric("dataSize"))
override protected def doPrepare(): Unit = {
// If an ExchangeCoordinator is needed, we register this Exchange operator
// to the coordinator when we do prepare. It is important to make sure
// we register this operator right before the execution instead of register it
// in the constructor because it is possible that we create new instances of
// Exchange operators when we transform the physical plan
// (then the ExchangeCoordinator will hold references of unneeded Exchanges).
// So, we should only call registerExchange just before we start to execute
// the plan.
coordinator match {
case Some(exchangeCoordinator) => exchangeCoordinator.registerExchange(this)
case _ =>
}
}
/**
* Returns a [[ShuffleDependency]] that will partition rows of its child based on
* the partitioning scheme defined in `newPartitioning`. Those partitions of
* the returned ShuffleDependency will be the input of shuffle.
*/
private[exchange] def prepareShuffleDependency()
: ShuffleDependency[Int, InternalRow, InternalRow] = {
ShuffleExchangeExec.prepareShuffleDependency(
child.execute(), child.output, newPartitioning, serializer)
}
/**
* Returns a [[ShuffledRowRDD]] that represents the post-shuffle dataset.
* This [[ShuffledRowRDD]] is created based on a given [[ShuffleDependency]] and an optional
* partition start indices array. If this optional array is defined, the returned
* [[ShuffledRowRDD]] will fetch pre-shuffle partitions based on indices of this array.
*/
private[exchange] def preparePostShuffleRDD(
shuffleDependency: ShuffleDependency[Int, InternalRow, InternalRow],
specifiedPartitionStartIndices: Option[Array[Int]] = None): ShuffledRowRDD = {
// If an array of partition start indices is provided, we need to use this array
// to create the ShuffledRowRDD. Also, we need to update newPartitioning to
// update the number of post-shuffle partitions.
specifiedPartitionStartIndices.foreach { indices =>
assert(newPartitioning.isInstanceOf[HashPartitioning])
newPartitioning = UnknownPartitioning(indices.length)
}
new ShuffledRowRDD(shuffleDependency, specifiedPartitionStartIndices)
}
/**
* Caches the created ShuffleRowRDD so we can reuse that.
*/
private var cachedShuffleRDD: ShuffledRowRDD = null
protected override def doExecute(): RDD[InternalRow] = attachTree(this, "execute") {
// Returns the same ShuffleRowRDD if this plan is used by multiple plans.
if (cachedShuffleRDD == null) {
cachedShuffleRDD = coordinator match {
case Some(exchangeCoordinator) =>
val shuffleRDD = exchangeCoordinator.postShuffleRDD(this)
assert(shuffleRDD.partitions.length == newPartitioning.numPartitions)
shuffleRDD
case _ =>
val shuffleDependency = prepareShuffleDependency()
preparePostShuffleRDD(shuffleDependency)
}
}
cachedShuffleRDD
}
}
object ShuffleExchangeExec {
def apply(newPartitioning: Partitioning, child: SparkPlan): ShuffleExchangeExec = {
ShuffleExchangeExec(newPartitioning, child, coordinator = Option.empty[ExchangeCoordinator])
}
/**
* Determines whether records must be defensively copied before being sent to the shuffle.
* Several of Spark's shuffle components will buffer deserialized Java objects in memory. The
* shuffle code assumes that objects are immutable and hence does not perform its own defensive
* copying. In Spark SQL, however, operators' iterators return the same mutable `Row` object. In
* order to properly shuffle the output of these operators, we need to perform our own copying
* prior to sending records to the shuffle. This copying is expensive, so we try to avoid it
* whenever possible. This method encapsulates the logic for choosing when to copy.
*
* In the long run, we might want to push this logic into core's shuffle APIs so that we don't
* have to rely on knowledge of core internals here in SQL.
*
* See SPARK-2967, SPARK-4479, and SPARK-7375 for more discussion of this issue.
*
* @param partitioner the partitioner for the shuffle
* @return true if rows should be copied before being shuffled, false otherwise
*/
private def needToCopyObjectsBeforeShuffle(partitioner: Partitioner): Boolean = {
// Note: even though we only use the partitioner's `numPartitions` field, we require it to be
// passed instead of directly passing the number of partitions in order to guard against
// corner-cases where a partitioner constructed with `numPartitions` partitions may output
// fewer partitions (like RangePartitioner, for example).
val conf = SparkEnv.get.conf
val shuffleManager = SparkEnv.get.shuffleManager
val sortBasedShuffleOn = shuffleManager.isInstanceOf[SortShuffleManager]
val bypassMergeThreshold = conf.getInt("spark.shuffle.sort.bypassMergeThreshold", 200)
val numParts = partitioner.numPartitions
if (sortBasedShuffleOn) {
if (numParts <= bypassMergeThreshold) {
// If we're using the original SortShuffleManager and the number of output partitions is
// sufficiently small, then Spark will fall back to the hash-based shuffle write path, which
// doesn't buffer deserialized records.
// Note that we'll have to remove this case if we fix SPARK-6026 and remove this bypass.
false
} else if (numParts <= SortShuffleManager.MAX_SHUFFLE_OUTPUT_PARTITIONS_FOR_SERIALIZED_MODE) {
// SPARK-4550 and SPARK-7081 extended sort-based shuffle to serialize individual records
// prior to sorting them. This optimization is only applied in cases where shuffle
// dependency does not specify an aggregator or ordering and the record serializer has
// certain properties and the number of partitions doesn't exceed the limitation. If this
// optimization is enabled, we can safely avoid the copy.
//
// Exchange never configures its ShuffledRDDs with aggregators or key orderings, and the
// serializer in Spark SQL always satisfy the properties, so we only need to check whether
// the number of partitions exceeds the limitation.
false
} else {
// Spark's SortShuffleManager uses `ExternalSorter` to buffer records in memory, so we must
// copy.
true
}
} else {
// Catch-all case to safely handle any future ShuffleManager implementations.
true
}
}
/**
* Returns a [[ShuffleDependency]] that will partition rows of its child based on
* the partitioning scheme defined in `newPartitioning`. Those partitions of
* the returned ShuffleDependency will be the input of shuffle.
*/
def prepareShuffleDependency(
rdd: RDD[InternalRow],
outputAttributes: Seq[Attribute],
newPartitioning: Partitioning,
serializer: Serializer): ShuffleDependency[Int, InternalRow, InternalRow] = {
val part: Partitioner = newPartitioning match {
case RoundRobinPartitioning(numPartitions) => new HashPartitioner(numPartitions)
case HashPartitioning(_, n) =>
new Partitioner {
override def numPartitions: Int = n
// For HashPartitioning, the partitioning key is already a valid partition ID, as we use
// `HashPartitioning.partitionIdExpression` to produce partitioning key.
override def getPartition(key: Any): Int = key.asInstanceOf[Int]
}
case RangePartitioning(sortingExpressions, numPartitions) =>
// Internally, RangePartitioner runs a job on the RDD that samples keys to compute
// partition bounds. To get accurate samples, we need to copy the mutable keys.
val rddForSampling = rdd.mapPartitionsInternal { iter =>
val mutablePair = new MutablePair[InternalRow, Null]()
iter.map(row => mutablePair.update(row.copy(), null))
}
implicit val ordering = new LazilyGeneratedOrdering(sortingExpressions, outputAttributes)
new RangePartitioner(
numPartitions,
rddForSampling,
ascending = true,
samplePointsPerPartitionHint = SQLConf.get.rangeExchangeSampleSizePerPartition)
case SinglePartition =>
new Partitioner {
override def numPartitions: Int = 1
override def getPartition(key: Any): Int = 0
}
case l: LocalPartitioning =>
new Partitioner {
override def numPartitions: Int = l.numPartitions
override def getPartition(key: Any): Int = key.asInstanceOf[Int]
}
case _ => sys.error(s"Exchange not implemented for $newPartitioning")
// TODO: Handle BroadcastPartitioning.
}
def getPartitionKeyExtractor(): InternalRow => Any = newPartitioning match {
case RoundRobinPartitioning(numPartitions) =>
// Distributes elements evenly across output partitions, starting from a random partition.
var position = new Random(TaskContext.get().partitionId()).nextInt(numPartitions)
(row: InternalRow) => {
// The HashPartitioner will handle the `mod` by the number of partitions
position += 1
position
}
case h: HashPartitioning =>
val projection = UnsafeProjection.create(h.partitionIdExpression :: Nil, outputAttributes)
row => projection(row).getInt(0)
case RangePartitioning(_, _) | SinglePartition => identity
case _: LocalPartitioning =>
val partitionId = TaskContext.get().partitionId()
_ => partitionId
case _ => sys.error(s"Exchange not implemented for $newPartitioning")
}
val isRoundRobin = newPartitioning.isInstanceOf[RoundRobinPartitioning] &&
newPartitioning.numPartitions > 1
val rddWithPartitionIds: RDD[Product2[Int, InternalRow]] = {
// [SPARK-23207] Have to make sure the generated RoundRobinPartitioning is deterministic,
// otherwise a retry task may output different rows and thus lead to data loss.
//
// Currently we following the most straight-forward way that perform a local sort before
// partitioning.
//
// Note that we don't perform local sort if the new partitioning has only 1 partition, under
// that case all output rows go to the same partition.
val newRdd = if (isRoundRobin && SQLConf.get.sortBeforeRepartition) {
rdd.mapPartitionsInternal { iter =>
val recordComparatorSupplier = new Supplier[RecordComparator] {
override def get: RecordComparator = new RecordBinaryComparator()
}
// The comparator for comparing row hashcode, which should always be Integer.
val prefixComparator = PrefixComparators.LONG
val canUseRadixSort = SparkEnv.get.conf.get(SQLConf.RADIX_SORT_ENABLED)
// The prefix computer generates row hashcode as the prefix, so we may decrease the
// probability that the prefixes are equal when input rows choose column values from a
// limited range.
val prefixComputer = new UnsafeExternalRowSorter.PrefixComputer {
private val result = new UnsafeExternalRowSorter.PrefixComputer.Prefix
override def computePrefix(row: InternalRow):
UnsafeExternalRowSorter.PrefixComputer.Prefix = {
// The hashcode generated from the binary form of a [[UnsafeRow]] should not be null.
result.isNull = false
result.value = row.hashCode()
result
}
}
val pageSize = SparkEnv.get.memoryManager.pageSizeBytes
val sorter = UnsafeExternalRowSorter.createWithRecordComparator(
StructType.fromAttributes(outputAttributes),
recordComparatorSupplier,
prefixComparator,
prefixComputer,
pageSize,
canUseRadixSort)
sorter.sort(iter.asInstanceOf[Iterator[UnsafeRow]])
}
} else {
rdd
}
// round-robin function is order sensitive if we don't sort the input.
val isOrderSensitive = isRoundRobin && !SQLConf.get.sortBeforeRepartition
if (needToCopyObjectsBeforeShuffle(part)) {
newRdd.mapPartitionsWithIndexInternal((_, iter) => {
val getPartitionKey = getPartitionKeyExtractor()
iter.map { row => (part.getPartition(getPartitionKey(row)), row.copy()) }
}, isOrderSensitive = isOrderSensitive)
} else {
newRdd.mapPartitionsWithIndexInternal((_, iter) => {
val getPartitionKey = getPartitionKeyExtractor()
val mutablePair = new MutablePair[Int, InternalRow]()
iter.map { row => mutablePair.update(part.getPartition(getPartitionKey(row)), row) }
}, isOrderSensitive = isOrderSensitive)
}
}
// Now, we manually create a ShuffleDependency. Because pairs in rddWithPartitionIds
// are in the form of (partitionId, row) and every partitionId is in the expected range
// [0, part.numPartitions - 1]. The partitioner of this is a PartitionIdPassthrough.
val dependency =
new ShuffleDependency[Int, InternalRow, InternalRow](
rddWithPartitionIds,
new PartitionIdPassthrough(part.numPartitions),
serializer)
dependency
}
}
| rekhajoshm/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/exchange/ShuffleExchangeExec.scala | Scala | apache-2.0 | 16,033 |
/*
* The MIT License (MIT)
* <p>
* Copyright (c) 2017-2019
* <p>
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
* <p>
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
* <p>
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
* THE SOFTWARE.
*/
package io.techcode.streamy.util.json
import akka.util.ByteString
import com.typesafe.config.ConfigFactory
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpecLike
import pureconfig._
import pureconfig.generic.auto._
/**
* JsonImplicit spec.
*/
class JsonImplicitSpec extends AnyWordSpecLike with Matchers {
"JsonImplicit" should {
"support pureconfig integration" in {
case class Test(
doc: Json,
pointer: JsonPointer
)
val test = ConfigSource.fromConfig(ConfigFactory.parseString("""{"doc":"{\\"test\\":\\"test\\"}", "pointer":"/key"}""")).loadOrThrow[Test]
test.doc should equal(Json.obj("test" -> "test"))
test.pointer should equal(Root / "key")
}
"provide a shortcut to convert json in string" in {
jsonToString(Json.obj("test" -> "test")) should equal("""{"test":"test"}""")
}
"provide a shortcut to convert string in json" in {
stringToJson("""{"test":"test"}""") should equal(JsString.fromLiteral("""{"test":"test"}"""))
}
"provide a shortcut to convert float in json" in {
floatToJson(2.0F) should equal(JsFloat.fromLiteral(2.0F))
}
"provide a shortcut to convert double in json" in {
doubleToJson(2.0D) should equal(JsDouble.fromLiteral(2.0D))
}
"provide a shortcut to convert byte in json" in {
byteToJson(2) should equal(JsInt.fromLiteral(2))
}
"provide a shortcut to convert short in json" in {
shortToJson(2) should equal(JsInt.fromLiteral(2))
}
"provide a shortcut to convert int in json" in {
intToJson(2) should equal(JsInt.fromLiteral(2))
}
"provide a shortcut to convert long in json" in {
longToJson(2L) should equal(JsLong.fromLiteral(2L))
}
"provide a shortcut to convert boolean in json" in {
booleanToJson(true) should equal(JsTrue)
}
"provide a shortcut to convert byte string in json" in {
byteStringToJson(ByteString.empty) should equal(JsBytes.fromLiteral(ByteString.empty))
}
"provide a shortcut to convert big decimal in json" in {
bigDecimalToJson(BigDecimal.valueOf(0)) should equal(JsBigDecimal.fromLiteral(BigDecimal.valueOf(0)))
}
}
}
| amannocci/streamy | core/src/test/scala/io/techcode/streamy/util/json/JsonImplicitSpec.scala | Scala | mit | 3,385 |
package mesosphere.marathon.tasks
import scala.collection._
import scala.collection.JavaConverters._
import org.apache.mesos.Protos.{TaskID, TaskStatus}
import javax.inject.Inject
import org.apache.mesos.state.State
import java.util.logging.{Level, Logger}
import mesosphere.marathon.Protos._
import mesosphere.marathon.Main
import java.io._
import scala.Some
import scala.concurrent.{ExecutionContext, Future}
/**
* @author Tobi Knaup
*/
class TaskTracker @Inject()(state: State) {
import TaskTracker.App
import ExecutionContext.Implicits.global
import mesosphere.util.BackToTheFuture.futureToFuture
private[this] val log = Logger.getLogger(getClass.getName)
val prefix = "tasks:"
private val apps = new mutable.HashMap[String, App] with
mutable.SynchronizedMap[String, App]
def get(appName: String) = {
apps.getOrElseUpdate(appName, fetchApp(appName)).tasks
}
def list = {
apps
}
def count(appName: String) = {
get(appName).size
}
def contains(appName: String) = {
apps.contains(appName)
}
def take(appName: String, n: Int) = {
get(appName).take(n)
}
def starting(appName: String, task: MarathonTask) {
// Keep this here so running() can pick it up
get(appName) += task
}
def running(appName: String, status: TaskStatus): Future[MarathonTask] = {
val taskId = status.getTaskId.getValue
val task = get(appName).find(_.getId == taskId) match {
case Some(stagedTask) => {
get(appName).remove(stagedTask)
stagedTask.toBuilder
.setStartedAt(System.currentTimeMillis)
.addStatuses(status)
.build
}
case _ => {
log.warning(s"No staged task for ID ${taskId}")
// We lost track of the host and port of this task, but still need to keep track of it
MarathonTask.newBuilder
.setId(taskId)
.setStagedAt(System.currentTimeMillis)
.setStartedAt(System.currentTimeMillis)
.addStatuses(status)
.build
}
}
get(appName) += task
store(appName).map(_ => task)
}
def terminated(appName: String,
status: TaskStatus): Future[Option[MarathonTask]] = {
val now = System.currentTimeMillis
val appTasks = get(appName)
val taskId = status.getTaskId.getValue
appTasks.find(_.getId == taskId) match {
case Some(task) => {
apps(appName).tasks = appTasks - task
val ret = store(appName).map(_ => Some(task))
log.info(s"Task ${taskId} removed from TaskTracker")
if (apps(appName).shutdown && apps(appName).tasks.isEmpty) {
// Are we shutting down this app? If so, expunge
expunge(appName)
}
ret
}
case None =>
if (apps(appName).shutdown && apps(appName).tasks.isEmpty) {
// Are we shutting down this app? If so, expunge
expunge(appName)
}
Future.successful(None)
}
}
def statusUpdate(appName: String,
status: TaskStatus): Future[Option[MarathonTask]] = {
val taskId = status.getTaskId.getValue
get(appName).find(_.getId == taskId) match {
case Some(task) => {
get(appName).remove(task)
val updatedTask = task.toBuilder
.addStatuses(status)
.build
get(appName) += updatedTask
store(appName).map(_ => Some(updatedTask))
}
case _ => {
log.warning(s"No task for ID ${taskId}")
Future.successful(None)
}
}
}
def expunge(appName: String) {
val variable = fetchFromState(appName)
state.expunge(variable)
apps.remove(appName)
log.warning(s"Expunged app ${appName}")
}
def shutDown(appName: String) {
apps.getOrElseUpdate(appName, fetchApp(appName)).shutdown = true
}
def newTaskId(appName: String) = {
val taskCount = count(appName)
TaskID.newBuilder()
.setValue(TaskIDUtil.taskId(appName, taskCount))
.build
}
def fetchApp(appName: String): App = {
val bytes = fetchFromState(appName).value
if (bytes.length > 0) {
val source = new ObjectInputStream(new ByteArrayInputStream(bytes))
val fetchedTasks = deserialize(appName, source)
if (fetchedTasks.size > 0) {
apps(appName) =
new App(appName,
fetchedTasks, false)
}
}
if (apps.contains(appName)) {
apps(appName)
//set.map(map => MarathonTask(map("id"), map("host"), map("port").asInstanceOf[Int], map("attributes"))
} else {
new App(appName,
new mutable.HashSet[MarathonTask](), false)
}
}
def deserialize(appName: String, source: ObjectInputStream)
: mutable.HashSet[MarathonTask] = {
var results = mutable.HashSet[MarathonTask]()
try {
if (source.available > 0) {
val size = source.readInt
val bytes = new Array[Byte](size)
source.readFully(bytes)
val app = MarathonApp.parseFrom(bytes)
if (app.getName != appName) {
log.warning(s"App name from task state for ${appName} is wrong! Got '${app.getName}' Continuing anyway...")
}
results ++= app.getTasksList.asScala.toSet
} else {
log.warning(s"Unable to deserialize task state for ${appName}")
}
} catch {
case e: com.google.protobuf.InvalidProtocolBufferException =>
log.log(Level.WARNING, "Unable to deserialize task state for ${appName}", e)
}
results
}
def getProto(appName: String, tasks: Set[MarathonTask]): MarathonApp = {
MarathonApp.newBuilder
.setName(appName)
.addAllTasks(tasks.toList.asJava)
.build
}
def serialize(appName: String, tasks: Set[MarathonTask], sink: ObjectOutputStream) {
val app = getProto(appName, tasks)
val size = app.getSerializedSize
sink.writeInt(size)
sink.write(app.toByteArray)
sink.flush
}
def fetchFromState(appName: String) = {
state.fetch(prefix + appName).get()
}
def store(appName: String) = {
val oldVar = fetchFromState(appName)
val bytes = new ByteArrayOutputStream()
val output = new ObjectOutputStream(bytes)
serialize(appName, get(appName), output)
val newVar = oldVar.mutate(bytes.toByteArray)
state.store(newVar)
}
def checkStagedTasks: Iterable[MarathonTask] = {
val now = System.currentTimeMillis
val expires = now - Main.conf.taskLaunchTimeout()
val toKill = apps.values.map { app =>
app.tasks.filter(t => Option(t.getStartedAt).isEmpty && t.getStagedAt < expires)
}.flatten
toKill.foreach(t => {
log.warning(s"Task '${t.getId}' was staged ${(now - t.getStagedAt)/1000}s ago and has not yet started")
})
toKill
}
}
object TaskTracker {
class App(
val appName: String,
var tasks: mutable.Set[MarathonTask],
var shutdown: Boolean
)
}
| MiLk/marathon | src/main/scala/mesosphere/marathon/tasks/TaskTracker.scala | Scala | apache-2.0 | 6,861 |
package edu.gemini.phase2.skeleton.factory
import edu.gemini.spModel.gemini.obscomp.SPProgram.ProgramMode._
import edu.gemini.spModel.too.TooType
import edu.gemini.model.p1.immutable._
import edu.gemini.model.p1.immutable.ExchangePartner._
import edu.gemini.model.p1.immutable.NgoPartner._
import edu.gemini.model.p1.immutable.SpecialProposalType._
import edu.gemini.model.p1.mutable.InvestigatorStatus.GRAD_THESIS
import edu.gemini.model.p1.mutable.TooOption
import edu.gemini.spModel.gemini.obscomp.SPProgram
import edu.gemini.spModel.gemini.obscomp.SPProgram.{PIInfo, ProgramMode}
import edu.gemini.shared.util.TimeValue
import edu.gemini.spModel.timeacct.{TimeAcctAllocation, TimeAcctCategory}
import edu.gemini.spModel.gemini.phase1.{GsaPhase1Data => Gsa}
import scala.collection.JavaConverters._
import scalaz._
import Scalaz._
import edu.gemini.spModel.core.Affiliate
/**
* Factory for creating an SPProgram from a Phase 1 Proposal document.
*/
object SpProgramFactory {
private val AFFILIATES = Map(
AR -> "Argentina",
AU -> "Australia",
BR -> "Brazil",
CA -> "Canada",
CL -> "Chile",
KR -> "Korea",
UH -> "University of Hawaii",
US -> "United States"
)
private val NGO_TIME_ACCT = Map(
AR -> TimeAcctCategory.AR,
AU -> TimeAcctCategory.AU,
BR -> TimeAcctCategory.BR,
CA -> TimeAcctCategory.CA,
CL -> TimeAcctCategory.CL,
KR -> TimeAcctCategory.KR,
UH -> TimeAcctCategory.UH,
US -> TimeAcctCategory.US
)
private val EXC_TIME_ACCT = Map(
SUBARU -> TimeAcctCategory.JP,
KECK -> TimeAcctCategory.XCHK
)
private val SPC_TIME_ACCT = Map(
DEMO_SCIENCE -> TimeAcctCategory.DS,
DIRECTORS_TIME -> TimeAcctCategory.DD,
SYSTEM_VERIFICATION -> TimeAcctCategory.SV
// NOTE: no category for poor weather
)
def create(proposal: Proposal): SPProgram = {
val prog = new SPProgram()
prog.setTitle(proposal.title)
val pmode = mode(proposal)
prog.setProgramMode(pmode)
if (pmode == QUEUE) {
// TODO: use TooTypeSetter
prog.setTooType(too(proposal))
// REL-2079 Set default to 1
val b = band(proposal).getOrElse(1)
prog.setQueueBand(b.toString)
}
prog.setRolloverStatus(isRollover(proposal))
prog.setThesis(isThesis(proposal))
prog.setPIInfo(piInfo(proposal))
hostNgoEmail(proposal) foreach { e => prog.setNGOContactEmail(e) }
// Note: not a typo -- "contact person" is an email
gemEmail(proposal) foreach { e => prog.setContactPerson(e) }
minBand3Time(proposal) foreach { tv => prog.setMinimumTime(tv) }
timeAcctAllocation(proposal) foreach { alloc => prog.setTimeAcctAllocation(alloc) }
prog.setGsaPhase1Data(gsaPhase1Data(proposal))
prog
}
private def piInfo(proposal: Proposal): PIInfo = {
val pi = proposal.investigators.pi
val first = pi.firstName
val last = pi.lastName
val email = pi.email
val phone = pi.phone.mkString(",")
val aff = affiliate(proposal)
new PIInfo(first, last, email, phone, aff.orNull)
}
def mode(proposal: Proposal): ProgramMode =
proposal.proposalClass match {
case c: ClassicalProposalClass => CLASSICAL
case _ => QUEUE
}
def band(proposal: Proposal): Option[Int] =
for {
i <- proposal.proposalClass.itac
b <- band(i)
} yield b
private def band(itac: Itac): Option[Int] = itac.decision.right.map(_.band).right.toOption
def too(proposal: Proposal): TooType =
proposal.proposalClass match {
case q: QueueProposalClass => too(q.tooOption)
case _ => TooType.none
}
private def too(tooType: TooOption): TooType =
tooType match {
case TooOption.STANDARD => TooType.standard
case TooOption.RAPID => TooType.rapid
case _ => TooType.none
}
/**
* The host submission is the proposal's NgoSubmission (if any) that was
* accepted and had the largest time award.
*/
def hostSubmission(proposal: Proposal): Option[NgoSubmission] =
proposal.proposalClass match {
case n: GeminiNormalProposalClass => hostSubmission(n)
case e: ExchangeProposalClass => hostSubmission(e.subs)
case _ => None
}
private def hostSubmission(n: GeminiNormalProposalClass): Option[NgoSubmission] =
for {
l <- n.subs.left.toOption
h <- hostSubmission(l)
} yield h
private def hostSubmission(l: List[NgoSubmission]): Option[NgoSubmission] =
if (l.size == 0) {
None
} else {
Some(l.maxBy(s => timeAward(s).getOrElse(TimeAmount.empty).hours))
}
private def acceptance(sub: Submission): Option[SubmissionAccept] =
for {
r <- sub.response
d <- r.decision
a <- d.decision.right.toOption
} yield a
private def timeAward(ngo: NgoSubmission): Option[TimeAmount] =
acceptance(ngo) map { a => a.recommended }
def affiliate(proposal: Proposal): Option[Affiliate] =
proposal.proposalClass match {
case _: LargeProgramClass => Some(Affiliate.GEMINI_STAFF)
case _ => hostSubmission(proposal) flatMap { s => affiliate(s.partner) }
}
def hostNgoEmail(p: Proposal): Option[String] =
for {
h <- hostSubmission(p)
a <- acceptance(h)
} yield a.email
private def affiliate(ngoPartner: NgoPartner): Option[Affiliate] =
Option(Affiliate.fromString(ngoPartner.value()))
private def itacAcceptance(proposal: Proposal): Option[ItacAccept] =
for {
i <- proposal.proposalClass.itac
a <- i.decision.right.toOption
} yield a
def isRollover(proposal: Proposal): Boolean =
itacAcceptance(proposal).exists(_.rollover)
def isThesis(proposal: Proposal): Boolean =
proposal.investigators.all exists { i => i.status == GRAD_THESIS }
def gemEmail(proposal: Proposal): Option[String] =
itacAcceptance(proposal).flatMap(_.contact)
def minBand3Time(proposal: Proposal): Option[TimeValue] =
proposal.proposalClass match {
case q: QueueProposalClass => q.band3request map { r => toTimeValue(r.minTime) }
case _ => None
}
private def toTimeValue(ta: TimeAmount): TimeValue =
ta.units match {
case TimeUnit.NIGHT => new TimeValue(ta.value, TimeValue.Units.nights)
case _ => new TimeValue(ta.toHours.value, TimeValue.Units.hours)
}
def timeAcctAllocation(proposal: Proposal): Option[TimeAcctAllocation] =
awardedHours(proposal).filter(_ > 0.0) flatMap { hrs =>
timeAccountingRatios(proposal) match {
case Nil => None
case ratios =>
val jmap = ratios.toMap.mapValues(d => new java.lang.Double(d)).asJava
Some(new TimeAcctAllocation(hrs, jmap))
}
}
def awardedHours(proposal: Proposal): Option[Double] =
itacAcceptance(proposal) map { a => a.award.toHours.value }
def timeAccountingRatios(proposal: Proposal): List[(TimeAcctCategory, Double)] =
proposal.proposalClass match {
case n: GeminiNormalProposalClass =>
n.subs match {
case Left(ngos) => ngoRatios(ngos)
case Right(exc) => excRatio(exc).toList
}
case _: LargeProgramClass => List((TimeAcctCategory.LP, 1.0))
case e: ExchangeProposalClass => ngoRatios(e.subs)
// TBD This part of the code won't be triggered because FT doesn't go through itac (See awardedHours function)
// But we'll leave it here for the future
case f: FastTurnaroundProgramClass =>
// In principle there are no proposals submitted without a PA but check just in case
~f.partnerAffiliation.map { partnerAffiliation =>
val s = NgoSubmission(f.sub.request, f.sub.response, partnerAffiliation, InvestigatorRef(proposal.investigators.pi))
ngoRatios(List(s))
}
case s: SpecialProposalClass => spcRatio(s.sub).toList
}
private def ngoRatios(subs: List[NgoSubmission]): List[(TimeAcctCategory, Double)] = {
val catHrs = categorizedHours(subs)
val total = catHrs.unzip._2.sum
if (total == 0.0) {
catHrs map { case (cat, _) => (cat, 1.0 / catHrs.length)}
} else {
catHrs map { case (cat, hrs) => (cat, hrs / total)}
}
}
private def categorizedHours(subs: List[NgoSubmission]): List[(TimeAcctCategory, Double)] =
for {
ngo <- subs
cat <- NGO_TIME_ACCT.get(ngo.partner)
acc <- acceptance(ngo) if acc.recommended.hours >= 0.0
} yield (cat, acc.recommended.hours)
private def excRatio(exc: ExchangeSubmission): Option[(TimeAcctCategory, Double)] =
EXC_TIME_ACCT.get(exc.partner) map { cat => (cat, 1.0) }
private def spcRatio(spc: SpecialSubmission): Option[(TimeAcctCategory, Double)] =
SPC_TIME_ACCT.get(spc.specialType) map { cat => (cat, 1.0) }
def gsaPhase1Data(proposal: Proposal): Gsa = {
val abstrakt = new Gsa.Abstract(proposal.abstrakt)
val category = new Gsa.Category(~proposal.tacCategory.map(_.value()))
val keywords = proposal.keywords.map(k => new Gsa.Keyword(k.value())).asJava
val pi = gsaPhase1DataInvestigator(proposal.investigators.pi)
val cois = proposal.investigators.cois.map(gsaPhase1DataInvestigator).asJava
new Gsa(abstrakt, category, keywords, pi, cois)
}
private def gsaPhase1DataInvestigator(inv: Investigator): Gsa.Investigator =
new Gsa.Investigator(inv.firstName, inv.lastName, inv.email)
}
| arturog8m/ocs | bundle/edu.gemini.phase2.skeleton.servlet/src/main/scala/edu/gemini/phase2/skeleton/factory/SpProgramFactory.scala | Scala | bsd-3-clause | 9,508 |
/* Copyright 2009-2021 EPFL, Lausanne */
object NestedFunState1 {
def simpleSideEffect(n: BigInt): BigInt = {
require(n > 0)
var a = BigInt(0)
def incA(prevA: BigInt): Unit = {
require(prevA == a)
a += 1
} ensuring(_ => a == prevA + 1)
incA(a)
incA(a)
incA(a)
incA(a)
a
} ensuring(_ == 5)
}
| epfl-lara/stainless | frontends/benchmarks/imperative/invalid/NestedFunState1.scala | Scala | apache-2.0 | 349 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.jsinterop
import scala.language.implicitConversions
import scala.scalajs.js
import js.JSConverters._
import js.annotation.JSExport
import org.junit.Assert._
import org.junit.Test
import org.scalajs.testsuite.utils.JSAssert._
class DynamicTest {
implicit def dyn2Bool(dyn: js.Dynamic): Boolean =
dyn.asInstanceOf[Boolean]
implicit def dyn2Int(dyn: js.Dynamic): Int =
dyn.asInstanceOf[Int]
implicit def dyn2AnyRef(dyn: js.Dynamic): AnyRef =
dyn.asInstanceOf[AnyRef]
// scala.scalajs.js.Dynamic
@Test def should_workaround_Scala_2_10_issue_with_implicit_conversion_for_dynamic_fields_named_x_issue_8(): Unit = {
class Point(val x: Int, val y: Int)
def jsonToPoint(json: js.Dynamic): Point = {
new Point(json.x.toString.toInt, json.y.toString.toInt)
}
val json = js.eval("var dynamicTestPoint = { x: 1, y: 2 }; dynamicTestPoint;")
val point = jsonToPoint(json.asInstanceOf[js.Dynamic])
assertEquals(1, point.x)
assertEquals(2, point.y)
}
@Test def should_allow_to_call_functions_with_arguments_named_x(): Unit = {
class A {
def a: Int = 1
}
class B extends A {
@JSExport
def x(par: Int): Int = a + par // make sure `this` is bound correctly in JS
}
val b = (new B).asInstanceOf[js.Dynamic]
assertEquals(11, b.x(10))
}
@Test def should_allow_instanciating_JS_classes_dynamically_issue_10(): Unit = {
val DynamicTestClass = js.eval("""
var DynamicTestClass = function(x) {
this.x = x;
};
DynamicTestClass;
""").asInstanceOf[js.Dynamic]
val obj = js.Dynamic.newInstance(DynamicTestClass)("Scala.js")
assertEquals("Scala.js", obj.x)
}
@Test def should_allow_instantiating_JS_classes_dynamically_with_varargs_issue_708(): Unit = {
val DynamicTestClassVarArgs = js.eval("""
var DynamicTestClassVarArgs = function() {
this.count = arguments.length;
for (var i = 0; i < arguments.length; i++)
this['elem'+i] = arguments[i];
};
DynamicTestClassVarArgs;
""").asInstanceOf[js.Dynamic]
val obj1 = js.Dynamic.newInstance(DynamicTestClassVarArgs)("Scala.js")
val obj1_count = obj1.count
assertEquals(1, obj1_count)
val obj1_elem0 = obj1.elem0
assertEquals("Scala.js", obj1_elem0)
val obj2 = js.Dynamic.newInstance(DynamicTestClassVarArgs)(
"Scala.js", 42, true)
val obj2_count = obj2.count
assertEquals(3, obj2_count)
val obj2_elem0 = obj2.elem0
assertEquals("Scala.js", obj2_elem0)
val obj2_elem1 = obj2.elem1
assertEquals(42, obj2_elem1)
val obj2_elem2 = obj2.elem2
assertTrue(obj2_elem2)
def obj3Args: Seq[js.Any] = Seq("Scala.js", 42, true)
val obj3 = js.Dynamic.newInstance(DynamicTestClassVarArgs)(obj3Args: _*)
val obj3_count = obj3.count
assertEquals(3, obj3_count)
val obj3_elem0 = obj3.elem0
assertEquals("Scala.js", obj3_elem0)
val obj3_elem1 = obj3.elem1
assertEquals(42, obj3_elem1)
val obj3_elem2 = obj3.elem2
assertTrue(obj3_elem2)
// Check backward binary compatibility with the 0.6.{0,1,2} codegen output
val obj4 = scala.scalajs.runtime.newJSObjectWithVarargs(
DynamicTestClassVarArgs, obj3Args.toJSArray).asInstanceOf[js.Dynamic]
val obj4_count = obj4.count
assertEquals(3, obj4_count)
val obj4_elem0 = obj4.elem0
assertEquals("Scala.js", obj4_elem0)
val obj4_elem1 = obj4.elem1
assertEquals(42, obj4_elem1)
val obj4_elem2 = obj4.elem2
assertTrue(obj4_elem2)
}
@Test def should_provide_an_object_literal_construction(): Unit = {
import js.Dynamic.{ literal => obj }
val x = obj(foo = 3, bar = "foobar")
val x_foo = x.foo
assertEquals(3, x_foo.asInstanceOf[Int])
val x_bar = x.bar
assertEquals("foobar", x_bar)
val x_unknown = x.unknown
assertJSUndefined(x_unknown)
val y = obj(
inner = obj(name = "inner obj"),
fun = { () => 42 }
)
val y_inner_name = y.inner.name
assertEquals("inner obj", y_inner_name)
assertEquals(42, y.fun())
val obj_anything = obj().anything
assertJSUndefined(obj_anything)
}
@Test def object_literal_in_statement_position_issue_1627(): Unit = {
// Just make sure it does not cause a SyntaxError
js.Dynamic.literal(foo = "bar")
// and also test the case without param (different code path in Printers)
js.Dynamic.literal()
}
@Test def should_provide_object_literal_construction_with_dynamic_naming(): Unit = {
import js.Dynamic.{ literal => obj }
val x = obj("foo" -> 3, "bar" -> "foobar")
val x_foo = x.foo
assertEquals(3, x_foo)
val x_bar = x.bar
assertEquals("foobar", x_bar)
val x_unknown = x.unknown
assertJSUndefined(x_unknown)
val tup1 = ("hello1", 3: js.Any)
val tup2 = ("hello2", 10: js.Any)
val y = obj(tup1, tup2)
val y_hello1 = y.hello1
assertEquals(3, y_hello1)
val y_hello2 = y.hello2
assertEquals(10, y_hello2)
var count = 0
val z = obj({ count += 1; ("foo", "bar")})
val z_foo = z.foo
assertEquals("bar", z_foo)
assertEquals(1, count)
}
@Test def should_preserve_evaluation_order_of_keys_and_values(): Unit = {
import js.Dynamic.{ literal => obj }
val orderCheck = Array.newBuilder[Int]
val x = obj(
{ orderCheck += 1; "foo" } -> { orderCheck += 2; 3 },
{ orderCheck += 3; "bar" } -> { orderCheck += 4; "foobar" })
val x_foo = x.foo
assertEquals(3, x_foo)
val x_bar = x.bar
assertEquals("foobar", x_bar)
val x_unknown = x.unknown
assertJSUndefined(x_unknown)
assertArrayEquals(Array(1, 2, 3, 4), orderCheck.result())
val orderCheck2 = Array.newBuilder[Int]
def tup1 = ({ orderCheck2 += 1; "hello1" }, { orderCheck2 += 2; 3: js.Any })
def tup2 = ({ orderCheck2 += 3; "hello2" }, { orderCheck2 += 4; 10: js.Any })
val y = obj(tup1, tup2)
val y_hello1 = y.hello1
assertEquals(3, y_hello1)
val y_hello2 = y.hello2
assertEquals(10, y_hello2)
assertArrayEquals(Array(1, 2, 3, 4), orderCheck2.result())
@noinline def block[A](a: A): A = a
val orderCheck3 = Array.newBuilder[Int]
val z = obj(
{ val a = block("foo"); orderCheck3 += 1; a } ->
{ val a = block(3); orderCheck3 += 2; a },
{ val a = block("bar"); orderCheck3 += 3; a } ->
{ val a = block("foobar"); orderCheck3 += 4; a })
val z_foo = z.foo
assertEquals(3, z_foo)
val z_bar = z.bar
assertEquals("foobar", z_bar)
val z_unknown = z.unknown
assertJSUndefined(z_unknown)
assertArrayEquals(Array(1, 2, 3, 4), orderCheck3.result())
}
@Test def should_allow_to_create_an_empty_object_with_the_literal_syntax(): Unit = {
import js.Dynamic.{ literal => obj }
val x = obj()
assertTrue(x.isInstanceOf[js.Object])
}
@Test def should_properly_encode_object_literal_property_names(): Unit = {
import js.Dynamic.{ literal => obj }
val obj0 = obj("3-" -> 42)
val `obj0_3-` = obj0.`3-`
assertEquals(42, `obj0_3-`)
val obj0Dict = obj0.asInstanceOf[js.Dictionary[js.Any]]
assertEquals(42, obj0Dict("3-"))
val checkEvilProperties = js.eval("""
function dynamicLiteralNameEncoding_checkEvilProperties(x) {
return x['.o[3√!|-pr()per7:3$];'] === ' such eval ';
}
dynamicLiteralNameEncoding_checkEvilProperties
""").asInstanceOf[js.Function1[js.Any, Boolean]]
val obj1 = obj(
".o[3√!|-pr()per7:3$];" -> " such eval ").asInstanceOf[js.Dictionary[js.Any]]
assertEquals(" such eval ", obj1(".o[3√!|-pr()per7:3$];"))
assertTrue(checkEvilProperties(obj1))
val checkQuotesProperty = js.eval("""
function dynamicLiteralNameEncoding_quote(x) {
return x["'" + '"'] === 7357;
}
dynamicLiteralNameEncoding_quote
""").asInstanceOf[js.Function1[js.Any, Boolean]]
val quote = '"'
Seq(
obj("'" + quote -> 7357),
obj(s"'$quote" -> 7357),
obj("'\\"" -> 7357),
obj("'" + quote -> 7357)
).foreach { o =>
val dict = o.asInstanceOf[js.Dictionary[js.Any]]
assertEquals(7357, dict("'\\""))
assertEquals(7357, dict("'" + quote))
assertEquals(7357, dict(s"'$quote"))
assertTrue(checkQuotesProperty(o))
}
}
@Test def `should_accept_:__*_arguments_for_literal_construction_issue_1743`(): Unit = {
import js.Dynamic.literal
val fields = Seq[(String, js.Any)]("foo" -> 42, "bar" -> "foobar")
/* Note: we cannot write
* literal(fields: _*)
* because scalac does not like it. But we still have to support the
* expanded notation.
*/
val x = literal.applyDynamic("apply")(fields: _*)
val x_foo = x.foo
assertEquals(42, x_foo)
val x_bar = x.bar
assertEquals("foobar", x_bar)
val y = literal.applyDynamicNamed("apply")(fields: _*)
val y_foo = y.foo
assertEquals(42, y_foo)
val y_bar = y.bar
assertEquals("foobar", y_bar)
}
@Test def should_allow_object_literals_to_have_duplicate_keys_issue_1595(): Unit = {
import js.Dynamic.{literal => obj}
// Basic functionality
val a = obj(foo = 4, bar = 5, foo = 6)
val a_foo = a.foo
assertEquals(6, a_foo) // last wins
val a_bar = a.bar
assertEquals(5, a_bar)
// Side-effects of overwritten properties are kept
var counter = 0
val b = obj(foo = { counter += 1; "foo" }, bar = "bar", foo = "foobar")
assertEquals(1, counter)
val b_foo = b.foo
assertEquals("foobar", b_foo)
val b_bar = b.bar
assertEquals("bar", b_bar)
// In a position where unnesting is required - #1628
@noinline
def test(x: js.Dynamic): Unit = {
assertEquals(6, x.foo) // last wins
assertEquals(5, x.bar)
}
test(obj(foo = 4, bar = 5, foo = 6))
}
@Test def should_return_subclasses_of_js_Object_in_literal_construction_issue_783(): Unit = {
import js.Dynamic.{ literal => obj }
val a: js.Object = obj(theValue = 1)
assertTrue(a.hasOwnProperty("theValue"))
assertFalse(a.hasOwnProperty("noValue"))
val b: js.Object = obj("theValue" -> 2)
assertTrue(b.hasOwnProperty("theValue"))
assertFalse(b.hasOwnProperty("noValue"))
}
}
| xuwei-k/scala-js | test-suite/js/src/test/scala/org/scalajs/testsuite/jsinterop/DynamicTest.scala | Scala | bsd-3-clause | 10,830 |
package org.jetbrains.plugins.scala
package lang.rearranger
import java.util
import com.intellij.internal.statistic.UsageTrigger
import com.intellij.openapi.editor.Document
import com.intellij.openapi.util.{Pair, TextRange}
import com.intellij.psi.PsiElement
import com.intellij.psi.codeStyle.CodeStyleSettings
import com.intellij.psi.codeStyle.arrangement.`match`.{ArrangementSectionRule, StdArrangementEntryMatcher, StdArrangementMatchRule}
import com.intellij.psi.codeStyle.arrangement.group.ArrangementGroupingRule
import com.intellij.psi.codeStyle.arrangement.model.{ArrangementAtomMatchCondition, ArrangementCompositeMatchCondition, ArrangementMatchCondition}
import com.intellij.psi.codeStyle.arrangement.std.StdArrangementTokens.EntryType._
import com.intellij.psi.codeStyle.arrangement.std.StdArrangementTokens.Grouping._
import com.intellij.psi.codeStyle.arrangement.std.StdArrangementTokens.Modifier._
import com.intellij.psi.codeStyle.arrangement.std.StdArrangementTokens.Order._
import com.intellij.psi.codeStyle.arrangement.std.StdArrangementTokens._
import com.intellij.psi.codeStyle.arrangement.std._
import com.intellij.psi.codeStyle.arrangement.{ArrangementSettings, _}
import scala.collection.JavaConversions._
import scala.collection.{immutable, mutable}
/**
* @author Roman.Shein
* Date: 08.07.13
*/
class ScalaRearranger extends Rearranger[ScalaArrangementEntry] with ArrangementStandardSettingsAware {
override def parseWithNew(root: PsiElement, document: Document, ranges: java.util.Collection[TextRange],
element: PsiElement, settings: ArrangementSettings): Pair[ScalaArrangementEntry, java.util.List[ScalaArrangementEntry]] = {
val groupingRules = getGroupingRules(settings)
val existingInfo = new ScalaArrangementParseInfo
root.accept(new ScalaArrangementVisitor(existingInfo, document, collectionAsScalaIterable(ranges), groupingRules))
val newInfo = new ScalaArrangementParseInfo
element.accept(new ScalaArrangementVisitor(newInfo, document, Iterable(element.getTextRange), groupingRules))
if (newInfo.entries.size != 1) {
null
} else {
Pair.create(newInfo.entries.head, existingInfo.entries)
}
}
override def parse(root: PsiElement, document: Document,
ranges: java.util.Collection[TextRange], settings: ArrangementSettings): util.List[ScalaArrangementEntry] = {
UsageTrigger.trigger(ScalaRearranger.featureId)
val info = new ScalaArrangementParseInfo
root.accept(new ScalaArrangementVisitor(info, document, ranges, getGroupingRules(settings)))
if (settings != null) {
for (rule <- settings.getGroupings) {
if (DEPENDENT_METHODS == rule.getGroupingType) {
setupUtilityMethods(info, rule.getOrderType)
} else if (JAVA_GETTERS_AND_SETTERS == rule.getGroupingType) {
setupJavaGettersAndSetters(info)
} else if (SCALA_GETTERS_AND_SETTERS == rule.getGroupingType) {
setupScalaGettersAndSetters(info)
}
}
}
info.entries
}
override def getBlankLines(settings: CodeStyleSettings, parent: ScalaArrangementEntry, previous:
ScalaArrangementEntry, target: ScalaArrangementEntry): Int = {
if (previous == null) {
-1
} else {
val codeStyleSettings = settings.getCommonSettings(ScalaLanguage.INSTANCE) //probably this will not work
def getBlankLines(typeAround: ArrangementSettingsToken) =
if (typeAround == VAL || typeAround == VAR || typeAround == TYPE) codeStyleSettings.BLANK_LINES_AROUND_FIELD
else if (typeAround == FUNCTION || typeAround == MACRO) codeStyleSettings.BLANK_LINES_AROUND_METHOD
else if (typeAround == CLASS || typeAround == TRAIT || typeAround == OBJECT) {
codeStyleSettings.BLANK_LINES_AROUND_CLASS
} else -1
val targetType = target.innerEntryType.getOrElse(target.getType)
val previousType = previous.innerEntryType.getOrElse(previous.getType)
Math.max(getBlankLines(targetType), getBlankLines(previousType))
}
}
private def getGroupingRules(settings: ArrangementSettings) = {
var result = immutable.HashSet[ArrangementSettingsToken]()
if (settings != null) {
for (rule <- settings.getGroupings) {
result = result + rule.getGroupingType
}
}
result
}
override def getDefaultSettings: StdArrangementSettings = ScalaRearranger.defaultSettings
override def getSerializer = ScalaRearranger.SETTINGS_SERIALIZER
override def getSupportedGroupingTokens: util.List[CompositeArrangementSettingsToken] =
seqAsJavaList(immutable.List(new CompositeArrangementSettingsToken(DEPENDENT_METHODS, BREADTH_FIRST, DEPTH_FIRST),
new CompositeArrangementSettingsToken(JAVA_GETTERS_AND_SETTERS),
new CompositeArrangementSettingsToken(SCALA_GETTERS_AND_SETTERS),
new CompositeArrangementSettingsToken(SPLIT_INTO_UNARRANGEABLE_BLOCKS_BY_EXPRESSIONS),
new CompositeArrangementSettingsToken(SPLIT_INTO_UNARRANGEABLE_BLOCKS_BY_IMPLICITS)
))
override def getSupportedMatchingTokens: util.List[CompositeArrangementSettingsToken] =
seqAsJavaList(immutable.List(new CompositeArrangementSettingsToken(General.TYPE,
scalaTypesValues.toList), new CompositeArrangementSettingsToken(General.MODIFIER, scalaModifiers.toList),
new CompositeArrangementSettingsToken(General.ORDER, Order.KEEP, Order.BY_NAME)))
override def isEnabled(token: ArrangementSettingsToken, current: ArrangementMatchCondition): Boolean =
(scalaTypesValues.contains(token) || supportedOrders.contains(token)) ||
(if (current != null) {
val tokenType = ArrangementUtil.parseType(current)
if (tokenType != null) {
tokensForType(tokenType).contains(token)
} else {
commonModifiers.contains(token)
}
} else {
commonModifiers.contains(token)
})
override def buildMatcher(condition: ArrangementMatchCondition) = throw new IllegalArgumentException("Can't build a matcher for condition " + condition)
override def getMutexes: util.List[util.Set[ArrangementSettingsToken]] = seqAsJavaList(immutable.List(scalaAccessModifiersValues, scalaTypesValues))
private def setupUtilityMethods(info: ScalaArrangementParseInfo, orderType: ArrangementSettingsToken) {
if (DEPTH_FIRST == orderType) {
for (root <- info.getMethodDependencyRoots) {
setupDepthFirstDependency(root)
}
}
else if (BREADTH_FIRST == orderType) {
for (root <- info.getMethodDependencyRoots) {
setupBreadthFirstDependency(root)
}
}
else {
assert(assertion = false, orderType)
}
}
private def setupDepthFirstDependency(info: ScalaArrangementDependency) {
for (dependency <- info.getDependentMethodInfos) {
setupDepthFirstDependency(dependency)
val dependentEntry = dependency.getAnchorMethod
if (dependentEntry.getDependencies == null) {
dependentEntry.addDependency(info.getAnchorMethod)
}
}
}
private def setupBreadthFirstDependency(info: ScalaArrangementDependency) {
val toProcess = mutable.Queue[ScalaArrangementDependency]()
toProcess += info
while (toProcess.nonEmpty) {
val current = toProcess.dequeue()
for (dependencyInfo <- current.getDependentMethodInfos) {
val dependencyMethod = dependencyInfo.getAnchorMethod
if (dependencyMethod.getDependencies == null) {
dependencyMethod.addDependency(current.getAnchorMethod)
}
toProcess += dependencyInfo
}
}
}
private def setupJavaGettersAndSetters(info: ScalaArrangementParseInfo) = setupGettersAndSetters(info.javaProperties)
private def setupScalaGettersAndSetters(info: ScalaArrangementParseInfo) = setupGettersAndSetters(info.scalaProperties)
private def setupGettersAndSetters(properties: Iterable[ScalaPropertyInfo]) {
for (propertyInfo <- properties) {
if (propertyInfo.isComplete && propertyInfo.setter.getDependencies == null) {
propertyInfo.setter.addDependency(propertyInfo.getter)
}
}
}
}
object ScalaRearranger {
private val featureId = "scala.rearrange"
private def addCondition(matchRules: immutable.List[ArrangementSectionRule], conditions: ArrangementSettingsToken*) = {
if (conditions.length == 1) {
ArrangementSectionRule.create(
new StdArrangementMatchRule(
new StdArrangementEntryMatcher(new ArrangementAtomMatchCondition(conditions(0), conditions(0)))
)
) :: matchRules
} else {
val composite = new ArrangementCompositeMatchCondition
for (condition <- conditions) {
composite.addOperand(new ArrangementAtomMatchCondition(condition, condition))
}
ArrangementSectionRule.create(new StdArrangementMatchRule(new StdArrangementEntryMatcher(composite))) :: matchRules
}
}
private def getDefaultSettings = {
val groupingRules = immutable.List[ArrangementGroupingRule](new ArrangementGroupingRule(DEPENDENT_METHODS, DEPTH_FIRST), new ArrangementGroupingRule(JAVA_GETTERS_AND_SETTERS),
new ArrangementGroupingRule(SCALA_GETTERS_AND_SETTERS), new ArrangementGroupingRule(SPLIT_INTO_UNARRANGEABLE_BLOCKS_BY_IMPLICITS))
var matchRules = immutable.List[ArrangementSectionRule]()
matchRules = addCondition(matchRules, OBJECT, IMPLICIT)
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, FUNCTION, access, IMPLICIT)
}
matchRules = addCondition(matchRules, CLASS, IMPLICIT)
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, TYPE, access, FINAL)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, TYPE, access)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, VAL, access, FINAL, LAZY)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, VAL, access, FINAL)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, VAL, access, LAZY)
}
matchRules = addCondition(matchRules, VAL, ABSTRACT)
matchRules = addCondition(matchRules, VAL, OVERRIDE)
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, VAL, access)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, VAR, access, OVERRIDE)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, VAR, access)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, CONSTRUCTOR, access)
}
matchRules = addCondition(matchRules, CONSTRUCTOR)
matchRules = addCondition(matchRules, FUNCTION, PUBLIC, FINAL, OVERRIDE)
matchRules = addCondition(matchRules, FUNCTION, PROTECTED, FINAL, OVERRIDE)
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, FUNCTION, access)
}
matchRules = addCondition(matchRules, MACRO, PUBLIC, OVERRIDE)
matchRules = addCondition(matchRules, MACRO, PROTECTED, OVERRIDE)
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, MACRO, access)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, TRAIT, access, ABSTRACT, SEALED)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, TRAIT, access, ABSTRACT)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, TRAIT, access, SEALED)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, TRAIT, access)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, CLASS, access, ABSTRACT, SEALED)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, CLASS, access, ABSTRACT)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, CLASS, access, SEALED)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, CLASS, access)
}
for (access <- scalaAccessModifiersValues) {
matchRules = addCondition(matchRules, OBJECT, access)
}
//TODO: Is 'override' ok for macros?
new StdArrangementSettings(groupingRules, matchRules.reverse)
}
private val defaultSettings = getDefaultSettings
private val SETTINGS_SERIALIZER = new DefaultArrangementSettingsSerializer(new ScalaSettingsSerializerMixin(), defaultSettings)
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/lang/rearranger/ScalaRearranger.scala | Scala | apache-2.0 | 12,797 |
package scutil.lang.tc
object MonoidSyntax extends MonoidSyntax
trait MonoidSyntax {
implicit final class MonoidSyntaxExt[T](peer:T)(implicit T:Monoid[T]) {
def times(count:Int):T = T.times(peer, count)
}
implicit final class IterableMonoidSyntaxExt[T](peer:Iterable[T])(implicit T:Monoid[T]) {
def combineAll:T = T.combineAll(peer)
}
implicit final class IterableMonoidSyntax2Ext[T](peer:Iterable[T]) {
def foldMap[U](func:T=>U)(implicit U:Monoid[U]):U = U.foldMap(peer)(func)
}
}
| ritschwumm/scutil | modules/core/src/main/scala/scutil/lang/tc/MonoidSyntax.scala | Scala | bsd-2-clause | 498 |
package stormlantern.consul.client.loadbalancers
import org.scalatest.{ Matchers, FlatSpecLike }
class RoundRobinLoadBalancerSpec extends FlatSpecLike with Matchers {
"The RoundRobinLoadBalancer" should "select a connection" in {
val sut = new RoundRobinLoadBalancer
sut.selectConnection shouldBe empty
sut.connectionProviderAdded("one")
sut.selectConnection should contain("one")
sut.selectConnection should contain("one")
sut.connectionProviderAdded("two")
sut.connectionProviderAdded("three")
sut.selectConnection should contain("one")
sut.selectConnection should contain("two")
sut.selectConnection should contain("three")
sut.selectConnection should contain("one")
sut.connectionProviderRemoved("two")
sut.selectConnection should contain("one")
sut.selectConnection should contain("three")
}
}
| derjust/reactive-consul | client/src/test/scala/stormlantern/consul/client/loadbalancers/RoundRobinLoadBalancerSpec.scala | Scala | mit | 863 |
package com.mnenmenth.camclient.core
import java.awt.event.{ActionEvent, ActionListener, WindowAdapter, WindowEvent}
import java.awt.image.BufferedImage
import java.awt.{Dimension, Graphics, GraphicsEnvironment}
import java.io._
import java.net.{Socket, SocketAddress}
import javax.imageio.ImageIO
import javax.swing.{JButton, JFrame, JPanel}
import scala.collection.mutable
/**
* Created by Mnenmenth Alkaborin
* Please refer to LICENSE file if included
* for licensing information
* https://github.com/Mnenmenth
*/
object CamClient {
private val winSize = new Dimension(640, 480)
var imgQueue: mutable.Queue[BufferedImage] = mutable.Queue[BufferedImage]()
val gfx = GraphicsEnvironment.getLocalGraphicsEnvironment.getDefaultScreenDevice.getDefaultConfiguration
def queueImg(data: Array[Byte]): Unit = {
//val img = new BufferedImage(winSize.width, winSize.height, BufferedImage.TYPE_3BYTE_BGR)
//img.setData(Raster.createRaster(img.getSampleModel, new DataBufferByte(buff, buff.length), new Point()))
val img = new BufferedImage(winSize.width, winSize.height, BufferedImage.TYPE_3BYTE_BGR)
img.getRaster.setDataElements(0, 0, winSize.width, winSize.height, data)
//println(img == null)
//ImageIO.write(img, "png", new File(s"./ey$data"))
imgQueue.enqueue(img)
//SwingUtilities.invokeLater(() => imgQueue += img)
}
def main(args: Array[String]): Unit = {
val queueThread = new Thread(() => {
var socket = new Socket("192.168.1.125", 2223)
val stream = socket.getInputStream
val reader = new BufferedReader(new InputStreamReader(stream))
val datastream = new DataInputStream(stream)
while (socket.isConnected) {
try {
val len = reader.readLine()
val buff = new Array[Byte](len.toInt)
datastream.readFully(buff)
//val data = new ByteArrayInputStream(buff)
queueImg(buff)
} catch {
case _: java.lang.NumberFormatException =>
socket.close()
socket = new Socket("192.168.1.125", 2223)
}
}
}
)
val frame = new JFrame("CamClient")
val panel = new JPanel {
override def paintComponent(g: Graphics): Unit = {
super.paintComponent(g)
if (imgQueue.length >= 30) {
imgQueue.drop(imgQueue.length)
}
if (imgQueue.nonEmpty) {
g.drawImage(imgQueue.dequeue(), 0, 0, winSize.getWidth.toInt, winSize.getHeight.toInt, null)
println("pls")
}
repaint()
}
}
frame.add(panel)
frame.setDefaultCloseOperation(JFrame.EXIT_ON_CLOSE)
frame.setSize(winSize)
frame.setMinimumSize(winSize)
frame.setVisible(true)
frame.addWindowListener(new WindowAdapter {
override def windowClosing(e: WindowEvent): Unit = {
super.windowClosing(e)
queueThread.interrupt()
}
})
queueThread.start()
}
}
| Mnenmenth/RobotCode | Pi/CamClient/src/main/scala/com/mnenmenth/camclient/core/CamClient.scala | Scala | apache-2.0 | 2,941 |
package actors
import akka.actor.{Props, ActorRef, Actor}
import utils.{StockQuote, FakeStockQuote}
import java.util.Random
import scala.collection.immutable.{HashSet, Queue}
import scala.collection.JavaConverters._
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext.Implicits.global
import play.libs.Akka
/**
* There is one StockActor per stock symbol. The StockActor maintains a list of users watching the stock and the stock
* values. Each StockActor updates a rolling dataset of randomly generated stock values.
*/
class BeaconActor(symbol: String) extends Actor {
lazy val stockQuote: StockQuote = new FakeStockQuote
protected[this] var watchers: HashSet[ActorRef] = HashSet.empty[ActorRef]
var beaconHistory: Queue[java.lang.Double] = {
Queue.fill(50)(-74)
}
def receive = {
case updateRSSI @ UpdateRSSI(symbol: String, rssi: Int) =>
assert(this.symbol == symbol, s"${this.symbol} != $symbol")
println(s"Received $updateRSSI")
beaconHistory = beaconHistory.drop(1) :+ new java.lang.Double(rssi)
// notify watchers
watchers.foreach(_ ! RSSIUpdate(symbol, rssi))
case msg@WatchBeacon(_) =>
println(msg)
// send the stock history to the user
sender ! BeaconHistory(symbol, beaconHistory.asJava)
// add the watcher to the list
watchers = watchers + sender
case msg@UnwatchStock(_) =>
println(msg)
watchers = watchers - sender
}
}
class StocksActor extends Actor {
def receive = {
case updateRSSI @ UpdateRSSI(symbol, rssi) =>
// get or create the StockActor for the symbol and forward this message
context.child(symbol).getOrElse {
println(s"Creating a new actor for $symbol, self = $self")
context.actorOf(Props(new BeaconActor(symbol)), symbol)
} forward updateRSSI
case watchStock @ WatchBeacon(symbol) =>
// get or create the StockActor for the symbol and forward this message
context.child(symbol).getOrElse {
context.actorOf(Props(new BeaconActor(symbol)), symbol)
} forward watchStock
case unwatchStock @ UnwatchStock(Some(symbol)) =>
// if there is a StockActor for the symbol forward this message
context.child(symbol).foreach(_.forward(unwatchStock))
case unwatchStock @ UnwatchStock(None) =>
// if no symbol is specified, forward to everyone
context.children.foreach(_.forward(unwatchStock))
case FetchAllBeaconSymbols =>
println(s"61955_13474: ${context.child("61955_13474").isDefined}, self = $self")
println(s"Children size = ${context.children.size}")
val symbols = context.children.toList.map(_.path.name).toList
println(s"All children are $symbols")
sender ! AllBeaconSymbols(symbols.asJava)
}
}
object StocksActor {
lazy val stocksActor: ActorRef = Akka.system.actorOf(Props(classOf[StocksActor]))
}
case object FetchLatest
case object FetchAllBeaconSymbols
case class AllBeaconSymbols(symbols: java.util.List[String])
case class RSSIUpdate(symbol: String, rssi: Int)
case class BeaconHistory(symbol: String, history: java.util.List[java.lang.Double])
case class WatchBeacon(symbol: String)
case class UpdateRSSI(symbol: String, rssi: Int)
case class UnwatchStock(symbol: Option[String])
| gkossakowski/vernier | ibeacon-server/app/actors/BeaconActor.scala | Scala | mit | 3,296 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.raster.iterators
import java.util.{Map => JMap}
import com.typesafe.scalalogging.LazyLogging
import org.apache.accumulo.core.data.{Key, Value}
import org.apache.accumulo.core.iterators.{IteratorEnvironment, SortedKeyValueIterator}
class IndexedSpatioTemporalFilter
extends GeomesaFilteringIterator
with HasFeatureType
with HasIndexValueDecoder
with HasSpatioTemporalFilter
with LazyLogging {
override def init(source: SortedKeyValueIterator[Key, Value],
options: JMap[String, String],
env: IteratorEnvironment) = {
super.init(source, options, env)
initFeatureType(options)
init(featureType, options)
this.source = source.deepCopy(env)
}
override def setTopConditionally() = {
val sourceValue = source.getTopValue
val meetsFilter = stFilter == null || {
val sf = indexEncoder.deserialize(sourceValue.get)
stFilter.evaluate(sf)
}
if (meetsFilter) {
topKey = source.getTopKey
topValue = sourceValue
}
}
}
| ddseapy/geomesa | geomesa-accumulo/geomesa-accumulo-raster/src/main/scala/org/locationtech/geomesa/raster/iterators/IndexedSpatioTemporalFilter.scala | Scala | apache-2.0 | 1,533 |
package com.karasiq.mailrucloud.api
import scala.concurrent.{ExecutionContext, Future}
import scala.language.postfixOps
import akka.actor.ActorSystem
import akka.http.scaladsl.model.{HttpRequest, HttpResponse}
import akka.stream.Materializer
import com.karasiq.mailrucloud.api.impl.DefaultMailCloudContext
trait MailCloudContext {
implicit val actorSystem: ActorSystem
implicit val materializer: Materializer
implicit val executionContext: ExecutionContext
def doHttpRequest(request: HttpRequest, handleRedirects: Boolean = false): Future[HttpResponse]
}
trait MailCloudContextProvider {
val context: MailCloudContext
}
object MailCloudContext {
def apply()(implicit as: ActorSystem): MailCloudContext = new DefaultMailCloudContext
}
| Karasiq/mailrucloud-api | library/src/main/scala/com/karasiq/mailrucloud/api/MailCloudContext.scala | Scala | apache-2.0 | 752 |
package ctlmc
import ctlmc.bddgraph._
class Model(
val parameters: Model.Parameters,
val states: Array[State],
val transitions: Graph
)
object Model {
type ParameterName = String
type Domain = Map[String, Int]
type Parameters = Map[ParameterName, (Domain, Int)]
}
| fpoli/ctlmc | src/main/scala/Model.scala | Scala | gpl-3.0 | 272 |
/*
* Copyright 2014 James Shade
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.shade.common.collection
object MapDecorators {
implicit class MapJoin[K, V1](map1: Map[K, V1]) {
def outerJoin[V2, R](map2: Map[K, V2])(join: (Option[V1], Option[V2]) => R): Map[K, R] = {
(map1.keys ++ map2.keys).map { key =>
key -> join(map1.get(key), map2.get(key))
}.toMap
}
def leftJoin[V2, R](map2: Map[K, V2])(join: (V1, Option[V2]) => R): Map[K, R] = {
map1.keys.map { key =>
key -> join(map1(key), map2.get(key))
}.toMap
}
def rightJoin[V2, R](map2: Map[K, V2])(join: (Option[V1], V2) => R): Map[K, R] = {
map2.keys.map { key =>
key -> join(map1.get(key), map2(key))
}.toMap
}
def innerJoin[V2, R](map2: Map[K, V2])(join: (V1, V2) => R): Map[K, R] = {
map1.keys.collect {
case key if map2.contains(key) => key -> join(map1(key), map2(key))
}.toMap
}
}
}
| jamesshade/common | src/main/scala/org/shade/common/collection/MapDecorators.scala | Scala | apache-2.0 | 1,501 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package iht.controllers.registration.executor
import iht.config.AppConfig
import iht.connector.CachingConnector
import iht.controllers.registration.{RegistrationController, routes => registrationRoutes}
import iht.forms.registration.CoExecutorForms
import iht.metrics.IhtMetrics
import iht.models.RegistrationDetails
import iht.views.html.registration.executor.executor_overview
import javax.inject.Inject
import play.api.data.Form
import play.api.mvc.{AnyContent, Call, MessagesControllerComponents, Request}
import uk.gov.hmrc.auth.core.AuthConnector
import uk.gov.hmrc.play.bootstrap.frontend.controller.FrontendController
import scala.concurrent.Future
class ExecutorOverviewControllerImpl @Inject()(val metrics: IhtMetrics,
val cachingConnector: CachingConnector,
val authConnector: AuthConnector,
val executorOverviewView: executor_overview,
implicit val appConfig: AppConfig,
val cc: MessagesControllerComponents) extends FrontendController(cc) with ExecutorOverviewController
trait ExecutorOverviewController extends RegistrationController with CoExecutorForms {
def cachingConnector: CachingConnector
override def guardConditions: Set[Predicate] = Set((rd, _) => rd.areOthersApplyingForProbate.getOrElse(false))
def metrics: IhtMetrics
def submitRoute = routes.ExecutorOverviewController.onSubmit()
def editSubmitRoute = routes.ExecutorOverviewController.onEditSubmit()
val executorOverviewView: executor_overview
private def badRequest(rd: RegistrationDetails, submitRoute: Call, showCancelRoute: Boolean,
formWithErrors: Form[Option[Boolean]], request: Request[AnyContent]) =
{
implicit val req = request
Future.successful(
BadRequest(executorOverviewView(
formWithErrors,
rd.areOthersApplyingForProbate.get,
rd.coExecutors,
submitRoute,
if (showCancelRoute) cancelToRegSummary else None)))
}
private def goodRequest(rd: RegistrationDetails, submitRoute: Call, showCancelRoute: Boolean, request: Request[AnyContent]) =
{
implicit val req = request
Future.successful(
Ok(executorOverviewView(executorOverviewForm,
rd.areOthersApplyingForProbate.getOrElse(false),
rd.coExecutors,
submitRoute,
if (showCancelRoute) cancelToRegSummary else None)))
}
def onPageLoad = pageLoad(showCancelRoute = false, submitRoute)
def onEditPageLoad = pageLoad(showCancelRoute = true, editSubmitRoute)
private def pageLoad(showCancelRoute: Boolean, route: Call) = authorisedForIht {
implicit request =>
withRegistrationDetailsRedirectOnGuardCondition { goodRequest(_, route, showCancelRoute, request) }
}
def onSubmit = submit(showCancelRoute = false, submitRoute)
def onEditSubmit = submit(showCancelRoute = true, editSubmitRoute)
private def submit(showCancelRoute: Boolean, route: Call) = authorisedForIht {
implicit request =>
withRegistrationDetailsRedirectOnGuardCondition { rd =>
val boundForm = executorOverviewForm.bindFromRequest
boundForm.fold(formWithErrors => badRequest(rd, route, showCancelRoute, formWithErrors, request), {addMore =>
(addMore, rd.areOthersApplyingForProbate, rd.coExecutors.isEmpty) match {
case (Some(true), Some(true), _) =>
Future.successful(Redirect(routes.CoExecutorPersonalDetailsController.onPageLoad(None)))
case (Some(false), Some(true), true) =>
badRequest(rd, route, showCancelRoute,
boundForm.withError("addMoreCoExecutors",
"error.applicant.insufficientCoExecutors"), request)
case _ => Future.successful(Redirect(registrationRoutes.RegistrationSummaryController.onPageLoad()))
}
})
}
}
} | hmrc/iht-frontend | app/iht/controllers/registration/executor/ExecutorOverviewController.scala | Scala | apache-2.0 | 4,649 |
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.2
* @date Sun Sep 16 22:35:14 EDT 2012
* @see LICENSE (MIT style license file).
* @see http://en.wikipedia.org/wiki/Gillespie_algorithm
*/
// U N D E R D E V E L O P M E N T
package scalation.dynamics
import scalation.linalgebra.{MatrixD, MatrixI, VectorD}
import scalation.util.Error
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SSA` class implements the Gillespie Stochastic Simulation Algorithm 'SSA'.
* @param c the matrix giving sub-volume connectivity
* @param r the matrix indicating which the reactions that are active in each sub-volume
* @param z the matrix giving stoichiometry for all reactions
* @param x the matrix giving species population per volume
* @param t0 the start time for the simulation
*/
class SSA (c: MatrixI, r: MatrixI, z: MatrixI, x: MatrixD, t0: Double = 0.0)
extends Error
{
val L = c.dim1 // the number of sub-volumes
val R = r.dim2 // the number of possible reactions
val S = z.dim2 // the number of species (e.g., types of molecules)
if (c.dim2 != L) flaw ("constructor", "wrong dimensions for c matrix")
if (r.dim1 != L) flaw ("constructor", "wrong dimensions for x matrix")
if (z.dim1 != R) flaw ("constructor", "wrong dimensions for x matrix")
if (x.dim1 != L || x.dim2 != S) flaw ("constructor", "wrong dimensions for x matrix")
val cut = (.003, 3.0, 100.0) // cut-off values
val e = for (l <- 0 until L) yield r(l).sum + c(l).sum // reaction + diffusion events
var t = t0 // the simulation clock (current time)
println ("e = " + e)
val a = Array.ofDim [VectorD] (L)
for (l <- 0 until L) {
val a_l = new VectorD (e(l))
for (j <- 0 until e(l)) a_l(j) = .1 * x(l, j) // formula is application dependent
a(l) = a_l
} // for
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/**
*/
def simulate (tf: Double)
{
} // simulate
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/**
*/
override def toString = "a = " + a.deep
} // SSA class
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `SSATest` object tests the `SSA` class.
*/
object SSATest extends App
{
// Connectivity of (3) sub-volumes (L by L)
val c = new MatrixI ((3, 3), 0, 1, 0, // connectivity: 0 <-> 1 <-> 2
1, 0, 1,
0, 1, 0)
// Reactions that can occur in each sub-volume out of a total of 4 possible (L by R)
val r = new MatrixI ((3, 4), 1, 1, 0, 0, // sub-vol 0: reactions 0, 1
0, 1, 1, 0, // sub-vol 1: reactions 1, 2
0, 0, 1, 1) // sub-vol 2: reactions 2, 3
// Stoichiometry for each of 4 possible reactions (R by S)
val z = new MatrixI ((4, 4), -1, -1, 1, 0, // reaction 0: S0 + S1 -> S2
-1, 0, -1, 1, // reaction 1: S0 + S2 -> S3
0, -1, -1, 1, // reaction 2: S1 + S2 -> S3
0, 1, -1, -1) // reaction 4: S2 + S3 -> S1
// initial population for each species (L by S)
val x = new MatrixD ((3, 4), 50.0, 50.0, 0.0, 0.0, // initial pop. in sub-vol. 0
0.0, 0.0, 0.0, 0.0, // initial pop. in sub-vol. 1
0.0, 0.0, 0.0, 0.0) // initial pop. in sub-vol. 1
val pathway = new SSA (c, r, z, x)
println ("pathway = " + pathway)
} // SSATest object
| scalation/fda | scalation_1.2/src/main/scala/scalation/dynamics/SSA.scala | Scala | mit | 3,935 |
package org.openmole.site
import scalatags.Text.all._
import tools._
/*
* Copyright (C) 01/04/16 // [email protected]
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY, without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package object stylesheet {
lazy val GREEN = "#a6bf26"
lazy val DARK_GREY = "#555"
lazy val LIGHT_GREY = "#e7e7e7"
//
// lazy val VERY_LIGHT_GREY = "#e7e7e7"
//
// lazy val BS_GREY = "#808080"
//
// lazy val FUN_GREY = "#cccccc"
private def center(percentage: Int) = Seq(
width := s"$percentage%",
margin := "0 auto",
display := "block"
)
private def centerW(w: Int) = Seq(
width := s"${w}px",
margin := "0 auto",
display := "block"
)
/* lazy val mainDiv = Seq(
paddingTop := 100,
paddingBottom := 50,
minHeight := 800
) ++ center(50)*/
def rightDetailButtons(topValue: Int) = Seq(
top := topValue,
minWidth := 230,
lineHeight := "1em"
)
def leftDetailButtons(topValue: Int) = Seq(
top := topValue,
paddingRight := 50,
minWidth := 230,
lineHeight := "1em"
)
lazy val navigateDoc = Seq(
fixedPosition,
top := 200,
fontSize := "32px",
color := "black",
textDecoration := "none",
width := 30
)
lazy val stepHeader = Seq(
// width := "22%",
// fontSize := "22px",
// fontWeight := "bold",
// margin := "0 auto",
// minHeight := 85,
// width := "95%"
) ++ center(90)
val headerImg = Seq(
paddingRight := 20,
marginBottom := 30,
height := 80
)
lazy val previousDoc = Seq(
float := "left",
left := 230
) ++ navigateDoc
lazy val nextDoc = Seq(
right := 300,
float := "right"
) ++ navigateDoc
lazy val mainTitle = Seq(
color := DARK_GREY,
fontWeight := "bold",
fontSize := "35px",
padding := 10
)
lazy val mainText = Seq(
color := DARK_GREY
)
lazy val centerBox = Seq(
textAlign := "center"
) ++ center(70)
lazy val centerBox100 = Seq(
textAlign := "center"
) ++ center(100)
lazy val footer = Seq(
position := "relative",
clear := "both",
backgroundColor := "#f3d56",
color := "#ffffff"
)
val leftMole = Seq(
float := "left",
marginLeft := 20,
textAlign := "right",
maxHeight := 100
)
val memberStyle = Seq(
color := DARK_GREY,
fontSize := "25px",
paddingTop := 5
)
val partners = Seq(
width := 270,
padding := 50
)
val smallPartners = Seq(
width := 120,
padding := 30
)
val h1Like = Seq(
// color := "#444",
fontSize := "5.2rem",
// fontWeight := "bold",
textTransform := "uppercase",
// lineHeight := "50px"
)
def svgRunButton(top: Int) = Seq(
position := "absolute",
marginTop := top
)
def centerJustify(ratio: Int) = Seq(
width := s"$ratio%",
paddingTop := 10,
textAlign := "justify",
marginLeft := "auto",
marginRight := "auto"
)
val suggest = Seq(
padding := 25,
float := "right"
)
}
| openmole/openmole | openmole/bin/org.openmole.site/jvm/src/main/scala/org/openmole/site/stylesheet.scala | Scala | agpl-3.0 | 3,596 |
package com.olvind
package sui
import ammonite.ops._
import scala.language.implicitConversions
case class SuiLibrary(base: Path) extends Library {
/* todo: make requiresjs clever enough to figure this out by itself */
override val locations =
Seq(
base
)
override val prefixOpt = Some("Sui")
override val name = "semanticui"
override val typeMapper = SuiTypeMapper
override val memberMapper = SuiTypeMemberMethodMapper
override val indexNames = Set("index.js")
override val packageName = "chandu0101.scalajs.react.components.semanticui"
val icon = ComponentDef(CompName("Icon"))
val components: Seq[ComponentDef] =
Seq(
// ComponentDef(CompName("AutoControlledComponent")),
// ComponentDef(CompName("Embed")), //has weird enum for screen res
// ComponentDef(CompName("keyboardKey")),
// ComponentDef(CompName("leven")),
// ComponentDef(CompName("Select")),
// ComponentDef(CompName("Accordion")),
// ComponentDef(CompName("AccordionContent")),
// ComponentDef(CompName("AccordionTitle")),
// ComponentDef(CompName("Advertisement")),
// ComponentDef(CompName("Breadcrumb")),
// ComponentDef(CompName("BreadcrumbDivider")),
// ComponentDef(CompName("BreadcrumbSection")),
ComponentDef(CompName("Button"), domeTypeOpt = Some(DomInput)),
// ComponentDef(CompName("ButtonContent")),
// ComponentDef(CompName("ButtonGroup")),
// ComponentDef(CompName("ButtonOr")),
// ComponentDef(CompName("Card")),
// ComponentDef(CompName("CardContent")),
// ComponentDef(CompName("CardDescription")),
// ComponentDef(CompName("CardGroup")),
// ComponentDef(CompName("CardHeader")),
// ComponentDef(CompName("CardMeta")),
// ComponentDef(CompName("Checkbox")),
// ComponentDef(CompName("Comment")),
// ComponentDef(CompName("CommentAction")),
// ComponentDef(CompName("CommentActions")),
// ComponentDef(CompName("CommentAuthor")),
// ComponentDef(CompName("CommentAvatar")),
// ComponentDef(CompName("CommentContent")),
// ComponentDef(CompName("CommentGroup")),
// ComponentDef(CompName("CommentMetadata")),
// ComponentDef(CompName("CommentText")),
// ComponentDef(CompName("Confirm")),
ComponentDef(CompName("Container")),
// ComponentDef(CompName("Dimmer")),
// ComponentDef(CompName("DimmerDimmable")),
ComponentDef(CompName("Divider")),
// ComponentDef(CompName("Dropdown")),
// ComponentDef(CompName("DropdownDivider")),
// ComponentDef(CompName("DropdownHeader")),
// ComponentDef(CompName("DropdownItem")),
// ComponentDef(CompName("DropdownMenu")),
// ComponentDef(CompName("Feed")),
// ComponentDef(CompName("FeedContent")),
// ComponentDef(CompName("FeedDate")),
// ComponentDef(CompName("FeedEvent")),
// ComponentDef(CompName("FeedExtra")),
// ComponentDef(CompName("FeedLabel")),
// ComponentDef(CompName("FeedLike")),
// ComponentDef(CompName("FeedMeta")),
// ComponentDef(CompName("FeedSummary")),
// ComponentDef(CompName("FeedUser")),
ComponentDef(CompName("Flag")),
// ComponentDef(CompName("Form")),
// ComponentDef(CompName("FormButton")),
// ComponentDef(CompName("FormCheckbox")),
// ComponentDef(CompName("FormDropdown")),
// ComponentDef(CompName("FormField")),
// ComponentDef(CompName("FormGroup")),
// ComponentDef(CompName("FormInput")),
// ComponentDef(CompName("FormRadio")),
// ComponentDef(CompName("FormSelect")),
// ComponentDef(CompName("FormTextArea")),
ComponentDef(CompName("Grid")),
ComponentDef(CompName("GridColumn")),
// ComponentDef(CompName("GridRow")),
ComponentDef(CompName("Header")),
// ComponentDef(CompName("HeaderContent")),
// ComponentDef(CompName("HeaderSubheader")),
icon,
ComponentDef(CompName("IconGroup")),
ComponentDef(CompName("Image")),
// ComponentDef(CompName("ImageGroup")),
ComponentDef(CompName("Input"), domeTypeOpt = Some(DomInput)),
// ComponentDef(CompName("Item")),
// ComponentDef(CompName("ItemContent")),
// ComponentDef(CompName("ItemDescription")),
// ComponentDef(CompName("ItemExtra")),
// ComponentDef(CompName("ItemGroup")),
// ComponentDef(CompName("ItemHeader")),
// ComponentDef(CompName("ItemImage")),
// ComponentDef(CompName("ItemMeta")),
// ComponentDef(CompName("Label")),
// ComponentDef(CompName("LabelDetail")),
// ComponentDef(CompName("LabelGroup")),
ComponentDef(CompName("List")),
ComponentDef(CompName("ListContent")),
// ComponentDef(CompName("ListDescription")),
// ComponentDef(CompName("ListHeader")),
ComponentDef(CompName("ListIcon"), shared = Some(icon)),
ComponentDef(CompName("ListItem")),
// ComponentDef(CompName("ListList")),
// ComponentDef(CompName("Loader")),
ComponentDef(CompName("Menu")),
ComponentDef(CompName("MenuHeader")),
ComponentDef(CompName("MenuItem")),
ComponentDef(CompName("MenuMenu")),
// ComponentDef(CompName("Message")),
// ComponentDef(CompName("MessageContent")),
// ComponentDef(CompName("MessageHeader")),
// ComponentDef(CompName("MessageItem")),
// ComponentDef(CompName("MessageList")),
// ComponentDef(CompName("Modal")),
// ComponentDef(CompName("ModalActions")),
// ComponentDef(CompName("ModalContent")),
// ComponentDef(CompName("ModalDescription")),
// ComponentDef(CompName("ModalHeader")),
// ComponentDef(CompName("Popup")),
// ComponentDef(CompName("PopupContent")),
// ComponentDef(CompName("PopupHeader")),
// ComponentDef(CompName("Portal")),
// ComponentDef(CompName("Progress")),
// ComponentDef(CompName("Radio")),
// ComponentDef(CompName("Rail")),
// ComponentDef(CompName("Rating")),
// ComponentDef(CompName("RatingIcon")),
// ComponentDef(CompName("Reveal")),
// ComponentDef(CompName("RevealContent")),
// ComponentDef(CompName("Search")),
// ComponentDef(CompName("SearchCategory")),
// ComponentDef(CompName("SearchResult")),
// ComponentDef(CompName("SearchResults")),
ComponentDef(CompName("Segment")),
// ComponentDef(CompName("SegmentGroup")),
// ComponentDef(CompName("Sidebar")),
// ComponentDef(CompName("SidebarPushable")),
// ComponentDef(CompName("SidebarPusher")),
// ComponentDef(CompName("Statistic")),
// ComponentDef(CompName("StatisticGroup")),
// ComponentDef(CompName("StatisticLabel")),
// ComponentDef(CompName("StatisticValue")),
// ComponentDef(CompName("Step")),
// ComponentDef(CompName("StepContent")),
// ComponentDef(CompName("StepDescription")),
// ComponentDef(CompName("StepGroup")),
// ComponentDef(CompName("StepTitle")),
// ComponentDef(CompName("Table")),
// ComponentDef(CompName("TableBody")),
// ComponentDef(CompName("TableCell")),
// ComponentDef(CompName("TableFooter")),
// ComponentDef(CompName("TableHeader")),
// ComponentDef(CompName("TableHeaderCell")),
// ComponentDef(CompName("TableRow")),
ComponentDef(CompName("TextArea")) // ComponentDef(CompName("Visibility"))
)
}
| chandu0101/scalajs-react-components | gen/src/main/scala/com/olvind/sui/SuiLibrary.scala | Scala | apache-2.0 | 8,135 |
package biology
import physical.GeoCoordinate
class Birthplace(val name: String, val reef : Int, val location: GeoCoordinate) {}
| shawes/zissou | src/main/scala/biology/Birthplace.scala | Scala | mit | 131 |
import sbt._
import sbt.Keys._
import play.Play.autoImport._
import com.typesafe.sbt.SbtNativePackager._
object PublicOnFileSystem {
val settings = Seq(
mappings in Universal <++= (baseDirectory in Compile) { _ / "public" } map { dir: File =>
val directoryLen = dir.getCanonicalPath.length
val pathFinder = dir ** "*"
pathFinder.get map {
publicFile: File =>
publicFile -> ("public/" + publicFile.getCanonicalPath.substring(directoryLen))
}
}
)
} | pahomovda/scissis-prototype1 | project/PublicOnFileSystem.scala | Scala | mit | 502 |
package com.rklaehn.interval
import com.rklaehn.interval.IntervalTrieSampleCheck._
import org.scalacheck.Properties
import org.scalacheck.Prop._
import spire.math.Rational
import spire.syntax.all._
import spire.std.any._
object IntervalSeqSampleCheck extends Properties("IntervalSeq.Sample") {
// this will resolve to the Arbitrary instance for Boolean from scalacheck
implicit def arb = IntervalSeqArbitrary.arbIntervalSeq
// a test that works by sampling the result at all relevant places and checks consistency with the boolean operation
def unarySampleTest(a: IntervalSeq[Int], r: IntervalSeq[Int], op: Boolean => Boolean) = {
val support = a.edges.toArray.sorted.distinct
support.forall { value =>
val sameBefore = r.below(value) === op(a.below(value))
val sameAt = r.at(value) === op(a.at(value))
val sameAfter = r.above(value) === op(a.above(value))
sameBefore & sameAt & sameAfter
}
}
// a test that works by sampling the result at all relevant places and checks consistency with the boolean operation
def binarySampleTest(a: IntervalSeq[Int], b: IntervalSeq[Int], r: IntervalSeq[Int], op: (Boolean, Boolean) => Boolean) = {
val support = (a.edges ++ b.edges).toArray.sorted.distinct
support.forall { value =>
val sameBefore = r.below(value) === op(a.below(value), b.below(value))
val sameAt = r.at(value) === op(a.at(value), b.at(value))
val sameAfter = r.above(value) === op(a.above(value), b.above(value))
sameBefore & sameAt & sameAfter
}
}
// a test that works by sampling the result at all relevant places and checks consistency with the boolean operation
def trinarySampleTest(a: IntervalSeq[Int], b: IntervalSeq[Int], c: IntervalSeq[Int], r: IntervalTrie[Int], op: (Boolean, Boolean, Boolean) => Boolean) = {
val support = (a.edges ++ b.edges ++ c.edges).toArray.sorted.distinct
support.forall { value =>
val sameBefore = r.below(value) === op(a.below(value), b.below(value), c.below(value))
val sameAt = r.at(value) === op(a.at(value), b.at(value), c.at(value))
val sameAfter = r.above(value) === op(a.above(value), b.above(value), c.above(value))
sameBefore & sameAt & sameAfter
}
}
property("sample_not") = forAll { a: IntervalSeq[Int] =>
unarySampleTest(a, ~a, ~_)
}
property("sample_and") = forAll { (a: IntervalSeq[Int], b: IntervalSeq[Int]) =>
binarySampleTest(a, b, a & b, _ & _)
}
property("sample_or") = forAll { (a: IntervalSeq[Int], b: IntervalSeq[Int]) =>
binarySampleTest(a, b, a | b, _ | _)
}
property("sample_xor") = forAll { (a: IntervalSeq[Int], b: IntervalSeq[Int]) =>
binarySampleTest(a, b, a ^ b, _ ^ _)
}
property("toStringParse") = forAll { a0: IntervalSeq[Int] =>
// first convert the interval of long to an interval of rationals, since that is what parse returns
val rationalIntervals = a0.intervals.map(_.mapBounds(Rational.apply))
val a: IntervalSeq[Rational] = (IntervalSeq.empty[Rational] /: rationalIntervals)(_ | IntervalSeq(_))
// then do the roundtrip test like with IntervalSet
val aText = a.toString
val b = IntervalSeq(aText)
a == b
}
property("isContiguous") = forAll { a: IntervalSeq[Int] =>
a.isContiguous == (a.intervals.size <= 1)
}
property("hull") = forAll { a: IntervalSeq[Int] =>
val hullSet = IntervalSeq(a.hull)
val outside = ~hullSet
val nothingOutside = (a & outside) == IntervalSeq.empty[Int]
val allInside = a.intervals.forall(i => hullSet.isSupersetOf(IntervalSeq(i)))
nothingOutside & allInside
}
/**
* Check optimized intersects method against naive implementation using &
*/
property("intersects/intersection") = forAll { (a: IntervalSeq[Int], b: IntervalSeq[Int]) =>
val r1 = a intersects b
val r2 = !(a & b).isEmpty
r1 == r2
}
/**
* Check optimized isSupersetOf method against naive implementation using &
*/
property("isSupersetOf/intersection") = forAll { (a: IntervalSeq[Int], b: IntervalSeq[Int]) =>
val r1 = a isSupersetOf b
val r2 = (a & b) == b
r1 == r2
}
property("isSupersetOf") = forAll { (a: IntervalSeq[Int], x: Int) =>
val b = a & IntervalSeq.atOrAbove(x)
a isSupersetOf b
}
property("disjoint") = forAll { (s: IntervalSeq[Int], x: Int) =>
val a = s & IntervalSeq.below(x)
val b = s & IntervalSeq.atOrAbove(x)
!(a intersects b)
}
property("equals/hashCode") = forAll { (a: IntervalSeq[Int], b: IntervalSeq[Int]) =>
if (a == b) a.hashCode == b.hashCode else true
}
property("iterator") = forAll { a: IntervalSeq[Int] =>
a.intervalIterator.toIndexedSeq == a.intervals.toIndexedSeq
}
} | rklaehn/intervalset | src/test/scala/com/rklaehn/interval/IntervalSeqSampleCheck.scala | Scala | apache-2.0 | 4,705 |
package io.scalding.approximations.BloomFilter
import com.twitter.algebird._
import com.twitter.scalding._
import io.scalding.approximations.model.Wikipedia
/**
* Generate a BF per month - containing all the unique authors of Wikipedia that were active during that month
* There are roughly 5 Million authors in dataset
*
* @author Antonios Chalkiopoulos - http://scalding.io
*/
class WikipediaBF(args:Args) extends Job(args) {
val input = args.getOrElse("input" ,"datasets/wikipedia/wikipedia-revisions-sample.tsv") //
val serialized = args.getOrElse("serialized","results/wikipedia-per-month-BF-serialized")
// We don't know a priori how big the filters need to be
// So let's use HLL to get an approximate count
val hllAggregator = HyperLogLogAggregator
.sizeAggregator(12)
.composePrepare[Wikipedia](_.ContributorID.toString.getBytes("UTF-8"))
val wikiHLL = TypedPipe.from(TypedTsv[Wikipedia.WikipediaType](input))
.map { Wikipedia.fromTuple }
.map { wiki => wiki.copy(DateTime = wiki.DateTime.substring(0,7)) } // extract YYYY-MM
.groupBy { wiki => wiki.DateTime }
.aggregate(hllAggregator)
.mapped
.map { case (key:String,value:HLL) => (key,value.approximateSize.estimate) }
.sumByKey
.toTypedPipe
.groupAll
.values
// Also let's store the HLL results
.write(TypedTsv("results/wikipedia-per-month-HLL.tsv") )
// Example output is => Key = 2011-02 , Value = 149804
// Now that we know how large each group is, we will instantiate one BloomFilterMonoid per group
// Also as HLL is an approximate count we will add 10 % to the size of the filters (* 1.1)
val BFilters =
wikiHLL.map {
case (key,value) => (key, BloomFilter(numEntries = (value*1.1).toInt , fpProb = 0.02D) )
}
// Example output is => Key = 2011-02 , Value = BloomFilterMonoid(164784,0.02)
// All the above calculations have been done JUST for creating optimal sized BF
// So now, we will read in all the data, group by month and JOIN them with the initialized BloomFilters
val wikiData: TypedPipe[(String, (List[Long], BloomFilterMonoid))] = TypedPipe.from(TypedTsv[Wikipedia.WikipediaType](input))
.map { Wikipedia.fromTuple }
.map { wiki => ( wiki.DateTime.substring(0,7), wiki.ContributorID ) }
// extract YYYY-MM
.group
.toList
.join { BFilters }
.toTypedPipe
.write(TypedTsv("joined"))
// All that is left to happen is to create a BF for every item in the group and then UNION them together
val result = wikiData
.map { case(month,(contributorIDlist,bf)) => (month, bf.create(contributorIDlist.map(_+""):_*))}
.write(source.TypedSequenceFile("results/wikipedia-per-month-BF2"))
// And if you want to write ONE file per month - use either TemplatedSequenceFile or PartitionedSequenceFile
// Note that those taps work only on --hdfs mode (!)
result
.toPipe('month, 'bf)
.write(TemplatedSequenceFile("results/wikipedia-per-month-BF/","month-%s",'month))
// .write(PartitionedSequenceFile("results/wikipedia-per-month-BF/",pathFields = 'month))
}
object WikipediaBFRunner extends App {
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.util.ToolRunner
val timer = io.scalding.approximations.Utils.withTimeCalc("WikipediaBF time") {
ToolRunner.run(new Configuration, new Tool, (classOf[WikipediaBF].getName :: "--local" :: args.toList).toArray)
}
println(s"Execution time: $timer msec")
}
/** Results of HLL - Execution time: 1,372,025 msec ~ 24 minutes on a MacBookPro --local
2007-10,166996
2003-01,981
2012-05,141356
2008-11,152595
2009-05,157920
2010-01,157696
2004-05,6350
2001-09,211
2003-06,1418
2001-02,21
2008-05,165681
2012-09,125894
2009-01,157483
2002-01,192
2009-11,159021
2005-06,25026
2010-04,154992
2004-01,3133
2006-02,77046
2011-02,149804
2004-12,14129
2002-12,666
2012-02,144421
2005-02,15250
2007-04,187151
2010-08,133454
2013-01,137526
2006-06,110596
2003-05,1242
2009-09,148815
2002-09,528
2007-08,159038
2001-11,216
2013-06,118390
2011-12,136025
2012-10,133134
2001-05,36
2010-12,127857
2011-08,147036
2001-03,45
2013-04,126339
2009-02,156397
2003-12,2600
2001-08,117
2012-04,136469
2009-04,152883
2004-11,13623
2005-05,23861
2010-03,161559
2003-07,1800
2008-04,174184
2002-06,279
2004-04,5746
2009-12,147700
2012-08,130004
2002-04,237
2006-10,145966
2003-04,897
2007-07,162248
2002-11,594
2011-04,143488
2013-07,127391
2007-11,161548
2012-03,144520
2001-12,249
2004-07,7806
2011-07,145639
2007-03,198309
2001-04,32
2005-12,62300
2008-08,148193
2009-08,145785
2005-01,14633
2010-07,133337
2008-01,169771
2005-10,40307
2006-05,108635
2010-02,147269
2006-04,97928
2005-08,35970
2008-03,181257
2011-06,144215
2004-10,11853
2006-11,159335
2006-09,134422
2002-07,313
2009-03,168878
2005-04,21652
2003-08,1992
2007-01,174574
2003-11,2514
2008-12,146775
2012-07,134653
2009-07,145757
2002-03,275
2004-03,5602
2006-01,69737
2004-08,8816
2010-10,135862
2007-12,151433
2011-03,161782
2002-10,610
2007-06,159144
2007-02,176912
2008-02,165678
2010-06,141161
2011-10,143617
2013-08,35616
2001-07,114
2006-12,156938
2013-03,135337
2006-08,137256
2008-07,149341
2003-03,930
2005-07,31325
2012-11,133406
2008-06,151126
2002-02,291
2008-10,157479
2005-11,43552
2005-09,35132
2006-03,93270
2002-08,417
2011-05,146364
2010-11,133634
2001-10,200
2004-06,7361
2009-06,153742
2012-06,127486
2011-01,147708
2010-05,152890
2008-09,152471
2004-02,4230
2003-10,2358
2003-09,2007
2002-05,227
2010-09,131116
2013-02,131526
2001-01,21
2007-05,180177
2009-10,154551
2006-07,119011
2005-03,18591
2012-01,145807
2003-02,989
2012-12,124838
2007-09,161844
2004-09,10315
2011-09,145536
2013-05,130017
2011-11,143233
2001-06,40
*/
| scalding-io/social-media-analytics | src/main/scala/io/scalding/approximations/BloomFilter/WikipediaBF.scala | Scala | apache-2.0 | 5,709 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.tools.utils
import com.beust.jcommander.{JCommander, Parameter}
import org.junit.runner.RunWith
import org.locationtech.geomesa.tools.utils.ParameterConverters.DurationConverter
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import scala.concurrent.duration.Duration
@RunWith(classOf[JUnitRunner])
class ParameterConvertersTest extends Specification {
"ParameterConverters" should {
"parse durations" in {
val params = new AnyRef {
@Parameter(names = Array("-d"), description = "duration", converter = classOf[DurationConverter])
var duration: Duration = _
}
val jc = new JCommander()
jc.setProgramName("test")
jc.addCommand("foo", params)
jc.parse("foo", "-d", "5 SECONDS")
jc.getParsedCommand mustEqual "foo"
params.duration mustEqual Duration("5 seconds")
}
}
}
| elahrvivaz/geomesa | geomesa-tools/src/test/scala/org/locationtech/geomesa/tools/utils/ParameterConvertersTest.scala | Scala | apache-2.0 | 1,375 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.twitter.zipkin.query
import com.twitter.conversions.time._
import com.twitter.finagle.stats.{StatsReceiver, NullStatsReceiver}
import com.twitter.finagle.tracing.{Trace => FTrace}
import com.twitter.logging.Logger
import com.twitter.ostrich.admin.Service
import com.twitter.util.{Time, Future}
import com.twitter.zipkin.conversions.thrift._
import com.twitter.zipkin.gen
import com.twitter.zipkin.query.adjusters.Adjuster
import com.twitter.zipkin.storage._
import java.nio.ByteBuffer
import java.util.concurrent.atomic.AtomicBoolean
import org.apache.thrift.TException
import scala.collection.Set
/**
* Able to respond to users queries regarding the traces. Usually does so
* by lookup the information in the index and then fetch the required trace data
* from the storage.
*/
class QueryService(storage: Storage, index: Index, aggregates: Aggregates, adjusterMap: Map[gen.Adjust, Adjuster],
statsReceiver: StatsReceiver = NullStatsReceiver) extends gen.ZipkinQuery.FutureIface with Service {
private val log = Logger.get
private val running = new AtomicBoolean(false)
private val stats = statsReceiver.scope("QueryService")
private val methodStats = stats.scope("methods")
private val errorStats = stats.scope("errors")
private val timingStats = stats.scope("timing")
// how to sort the trace summaries
private val OrderByDurationDesc = {
(a: TraceIdDuration, b: TraceIdDuration) => a.duration > b.duration
}
private val OrderByDurationAsc = {
(a: TraceIdDuration, b: TraceIdDuration) => a.duration < b.duration
}
private val OrderByTimestampDesc = {
(a: TraceIdDuration, b: TraceIdDuration) => a.startTimestamp > b.startTimestamp
}
private val OrderByTimestampAsc = {
(a: TraceIdDuration, b: TraceIdDuration) => a.startTimestamp < b.startTimestamp
}
// this is how many trace durations we fetch in one request
// TODO config
var traceDurationFetchBatchSize = 500
def start() {
running.set(true)
}
def shutdown() {
running.set(false)
storage.close
index.close
aggregates.close
}
private def constructQueryResponse(indexedIds: Seq[IndexedTraceId], limit: Int, order: gen.Order, defaultEndTs: Long = -1): Future[gen.QueryResponse] = {
val ids = indexedIds.map { _.traceId }
val ts = indexedIds.map { _.timestamp }
sortTraceIds(Future(ids), limit, order).map { sortedIds =>
val (min, max) = sortedIds match {
case Nil => (-1L, defaultEndTs)
case _ => (ts.min, ts.max)
}
gen.QueryResponse(sortedIds, min, max)
}
}
def getTraceIds(queryRequest: gen.QueryRequest): Future[gen.QueryResponse] = {
val method = "getTraceIds"
log.debug("%s: %s".format(method, queryRequest.toString))
call(method) {
val serviceName = queryRequest.`serviceName`
val spanName = queryRequest.`spanName`
val endTs = queryRequest.`endTs`
val limit = queryRequest.`limit`
val order = queryRequest.`order`
val sliceQueries = Seq(
spanName.map { name =>
Seq(SpanSliceQuery(serviceName, name, endTs, 1))
},
queryRequest.`annotations`.map {
_.map { a =>
AnnotationSliceQuery(serviceName, a, None, endTs, 1)
}
},
queryRequest.`binaryAnnotations`.map {
_.map { b =>
AnnotationSliceQuery(serviceName, b.`key`, Some(b.`value`), endTs, 1)
}
}
).collect {
case Some(q: Seq[SliceQuery]) => q
}.flatten
log.debug(sliceQueries.toString())
sliceQueries match {
case Nil => {
/* No queries: get service level traces */
index.getTraceIdsByName(serviceName, None, endTs, limit).map {
constructQueryResponse(_, limit, order)
}.flatten
}
case head :: Nil => {
/* One query: just run it */
(head match {
case s: SpanSliceQuery => s.copy(limit = limit)
case a: AnnotationSliceQuery => a.copy(limit = limit)
}).execute(index).map {
constructQueryResponse(_, limit, order)
}.flatten
}
case queries => {
/* Multiple: Fetch a single column from each to reconcile non-overlapping portions
then fetch the entire slice */
Future.collect {
queries.map {
_.execute(index)
}
}.map {
_.flatten.map {
_.timestamp
}.min
}.map { alignedTimestamp =>
/* Pad the aligned timestamp by a minute */
val ts = padTimestamp(alignedTimestamp)
Future.collect {
queries.map {
case s: SpanSliceQuery => s.copy(endTs = ts, limit = limit).execute(index)
case a: AnnotationSliceQuery => a.copy(endTs = ts, limit = limit).execute(index)
}
}.map { ids =>
traceIdsIntersect(ids) match {
case Nil => {
val endTimestamp = ids.map {
_.map { _.timestamp }.min
}.max
constructQueryResponse(Nil, limit, order, endTimestamp)
}
case seq => {
constructQueryResponse(seq, limit, order)
}
}
}
}.flatten.flatten
}
}
}
}
private[query] def padTimestamp(timestamp: Long): Long = timestamp + Constants.TraceTimestampPadding.inMicroseconds
private[query] def traceIdsIntersect(idSeqs: Seq[Seq[IndexedTraceId]]): Seq[IndexedTraceId] = {
/* Find the trace IDs present in all the Seqs */
val idMaps = idSeqs.map {
_.groupBy {
_.traceId
}
}
val traceIds = idMaps.map {
_.keys.toSeq
}
val commonTraceIds = traceIds.tail.fold(traceIds(0)) { _.intersect(_) }
/*
* Find the timestamps associated with each trace ID and construct a new IndexedTraceId
* that has the trace ID's maximum timestamp (ending) as the timestamp
*/
commonTraceIds.map { id =>
val maxTime = idMaps.map { m =>
m(id).map { _.timestamp }
}.flatten.max
IndexedTraceId(id, maxTime)
}
}
def getTraceIdsBySpanName(serviceName: String, spanName: String, endTs: Long,
limit: Int, order: gen.Order): Future[Seq[Long]] = {
val method = "getTraceIdsBySpanName"
log.debug("%s. serviceName: %s spanName: %s endTs: %s limit: %s order: %s".format(method, serviceName, spanName,
endTs, limit, order))
call(method) {
if (serviceName == null || "".equals(serviceName)) {
errorStats.counter("%s_no_service".format(method)).incr()
return Future.exception(gen.QueryException("No service name provided"))
}
// do we have a valid span name to query indexes by?
val span = convertToOption(spanName)
FTrace.recordBinary("serviceName", serviceName)
FTrace.recordBinary("spanName", spanName)
FTrace.recordBinary("endTs", endTs)
FTrace.recordBinary("limit", limit)
FTrace.recordBinary("order", order)
val traceIds = index.getTraceIdsByName(serviceName, span, endTs, limit).map {
_.map { _.traceId }
}
sortTraceIds(traceIds, limit, order)
}
}
def getTraceIdsByServiceName(serviceName: String, endTs: Long,
limit: Int, order: gen.Order): Future[Seq[Long]] = {
val method = "getTraceIdsByServiceName"
log.debug("%s. serviceName: %s endTs: %s limit: %s order: %s".format(method, serviceName, endTs, limit, order))
call(method) {
if (serviceName == null || "".equals(serviceName)) {
errorStats.counter("%s_no_service".format(method)).incr()
return Future.exception(gen.QueryException("No service name provided"))
}
FTrace.recordBinary("serviceName", serviceName)
FTrace.recordBinary("endTs", endTs)
FTrace.recordBinary("limit", limit)
FTrace.recordBinary("order", order)
val traceIds = index.getTraceIdsByName(serviceName, None, endTs, limit).map {
_.map { _.traceId }
}
sortTraceIds(traceIds, limit, order)
}
}
def getTraceIdsByAnnotation(serviceName: String, annotation: String, value: ByteBuffer, endTs: Long,
limit: Int, order: gen.Order): Future[Seq[Long]] = {
val method = "getTraceIdsByAnnotation"
log.debug("%s. serviceName: %s annotation: %s value: %s endTs: %s limit: %s order: %s".format(method, serviceName,
annotation, value, endTs, limit, order))
call(method) {
if (annotation == null || "".equals(annotation)) {
errorStats.counter("%s_no_annotation".format(method)).incr()
return Future.exception(gen.QueryException("No annotation provided"))
}
// do we have a valid annotation value to query indexes by?
val valueOption = convertToOption(value)
FTrace.recordBinary("serviceName", serviceName)
FTrace.recordBinary("annotation", annotation)
FTrace.recordBinary("endTs", endTs)
FTrace.recordBinary("limit", limit)
FTrace.recordBinary("order", order)
val traceIds = index.getTraceIdsByAnnotation(serviceName, annotation, valueOption, endTs, limit).map {
_.map { _.traceId }
}
sortTraceIds(traceIds, limit, order)
}
}
def tracesExist(traceIds: Seq[Long]): Future[Set[Long]] = {
log.debug("tracesExist. " + traceIds)
call("tracesExist") {
FTrace.recordBinary("numIds", traceIds.length)
storage.tracesExist(traceIds)
}
}
def getTracesByIds(traceIds: Seq[Long], adjust: Seq[gen.Adjust]): Future[Seq[gen.Trace]] = {
log.debug("getTracesByIds. " + traceIds + " adjust " + adjust)
call("getTracesByIds") {
val adjusters = getAdjusters(adjust)
FTrace.recordBinary("numIds", traceIds.length)
storage.getSpansByTraceIds(traceIds).map { traces =>
traces.map { spans =>
val trace = Trace(spans)
adjusters.foldLeft(trace)((t, adjuster) => adjuster.adjust(t)).toThrift
}
}
}
}
def getTraceTimelinesByIds(traceIds: Seq[Long],
adjust: Seq[gen.Adjust]): Future[Seq[gen.TraceTimeline]] = {
log.debug("getTraceTimelinesByIds. " + traceIds + " adjust " + adjust)
call("getTraceTimelinesByIds") {
val adjusters = getAdjusters(adjust)
FTrace.recordBinary("numIds", traceIds.length)
storage.getSpansByTraceIds(traceIds).map { traces =>
traces.flatMap { spans =>
val trace = Trace(spans)
TraceTimeline(adjusters.foldLeft(trace)((t, adjuster) => adjuster.adjust(t))).map(_.toThrift)
}
}
}
}
def getTraceSummariesByIds(traceIds: Seq[Long],
adjust: Seq[gen.Adjust]): Future[Seq[gen.TraceSummary]] = {
log.debug("getTraceSummariesByIds. traceIds: " + traceIds + " adjust " + adjust)
call("getTraceSummariesByIds") {
val adjusters = getAdjusters(adjust)
FTrace.recordBinary("numIds", traceIds.length)
storage.getSpansByTraceIds(traceIds.toList).map { traces =>
traces.flatMap { spans =>
val trace = Trace(spans)
TraceSummary(adjusters.foldLeft(trace)((t, adjuster) => adjuster.adjust(t))).map(_.toThrift)
}
}
}
}
def getTraceCombosByIds(traceIds: Seq[Long], adjust: Seq[gen.Adjust]): Future[Seq[gen.TraceCombo]] = {
log.debug("getTraceComboByIds. traceIds: " + traceIds + " adjust " + adjust)
call("getTraceComboByIds") {
val adjusters = getAdjusters(adjust)
FTrace.recordBinary("numIds", traceIds.length)
storage.getSpansByTraceIds(traceIds).map { traces =>
traces.map { spans =>
val trace = Trace(spans)
TraceCombo(adjusters.foldLeft(trace)((t, adjuster) => adjuster.adjust(t))).toThrift
}
}
}
}
def getDataTimeToLive: Future[Int] = {
log.debug("getDataTimeToLive")
call("getDataTimeToLive") {
Future(storage.getDataTimeToLive)
}
}
def getServiceNames: Future[Set[String]] = {
log.debug("getServiceNames")
call("getServiceNames") {
index.getServiceNames
}
}
def getSpanNames(service: String): Future[Set[String]] = {
log.debug("getSpanNames")
call("getSpanNames") {
index.getSpanNames(service)
}
}
def setTraceTimeToLive(traceId: Long, ttlSeconds: Int): Future[Unit] = {
log.debug("setTimeToLive: " + traceId + " " + ttlSeconds)
call("setTraceTimeToLive") {
storage.setTimeToLive(traceId, ttlSeconds.seconds)
}
}
def getTraceTimeToLive(traceId: Long): Future[Int] = {
log.debug("getTimeToLive: " + traceId)
call("getTraceTimeToLive") {
storage.getTimeToLive(traceId).map(_.inSeconds)
}
}
/** Aggregates related */
def getDependencies(startTime: Long, endTime: Option[Long]) : Future[gen.Dependencies] = {
log.debug("getDependencies: " + startTime + " - " + endTime)
call("getDependencies") {
val start = Time.fromNanoseconds(startTime*1000)
val end = endTime.map { t => Time.fromNanoseconds(t*1000) }
aggregates.getDependencies(start, end) map {_.toThrift}
}
}
def getTopAnnotations(serviceName: String): Future[Seq[String]] = {
log.debug("getTopAnnotations: " + serviceName)
call("getTopAnnotations") {
aggregates.getTopAnnotations(serviceName)
}
}
def getTopKeyValueAnnotations(serviceName: String): Future[Seq[String]] = {
log.debug("getTopKeyValueAnnotations: " + serviceName)
call("getTopKeyValueAnnotations") {
aggregates.getTopKeyValueAnnotations(serviceName)
}
}
private def checkIfRunning() = {
if (!running.get) {
log.warning("Server not running, throwing exception")
throw new TException("Server not running")
}
}
private[this] def call[T](name: String)(f: => Future[T]): Future[T] = {
checkIfRunning()
methodStats.counter(name).incr()
timingStats.timeFuture(name) {
f rescue {
case e: Exception => {
log.error(e, "%s failed".format(name))
errorStats.counter(name).incr()
Future.exception(gen.QueryException(e.toString))
}
}
}
}
/**
* Convert incoming Thrift order by enum into sort function.
*/
private def getOrderBy(order: gen.Order) = {
order match {
case gen.Order.None => OrderByDurationDesc
case gen.Order.DurationDesc => OrderByDurationDesc
case gen.Order.DurationAsc => OrderByDurationAsc
case gen.Order.TimestampDesc => OrderByTimestampDesc
case gen.Order.TimestampAsc => OrderByTimestampAsc
}
}
private def getAdjusters(adjusters: Seq[gen.Adjust]): Seq[Adjuster] = {
adjusters.flatMap { adjusterMap.get(_) }
}
/**
* Do we have a valid object to query indexes by?
*/
private def convertToOption[O](param: O): Option[O] = {
param match {
case null => None
case "" => None
case s => Some(s)
}
}
/**
* Given a sequence of traceIds get their durations
*/
private def getTraceIdDurations(
traceIds: Future[Seq[Long]]
): Future[Seq[TraceIdDuration]] = {
traceIds.map { t =>
Future.collect {
t.grouped(traceDurationFetchBatchSize)
.toSeq
.map {index.getTracesDuration(_)}
}
}.flatten.map {_.flatten}
}
private def sortTraceIds(
traceIds: Future[Seq[Long]],
limit: Int,
order: gen.Order
): Future[Seq[Long]] = {
// No sorting wanted
if (order == gen.Order.None) {
traceIds
} else {
val durations = getTraceIdDurations(traceIds)
durations map { d =>
d.sortWith(getOrderBy(order)).slice(0, limit).map(_.traceId)
}
}
}
}
| AnSavvides/zipkin | zipkin-query-core/src/main/scala/com/twitter/zipkin/query/QueryService.scala | Scala | apache-2.0 | 16,447 |
/*
* Copyright 2008-present MongoDB, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.mongodb.scala
import com.mongodb.{MongoCredential => JMongoCredential}
/**
* Represents credentials to authenticate to a MongoDB server, as well as the source of the credentials and the authentication mechanism
* to use.
*
* @since 1.0
*/
object MongoCredential {
/**
* Creates a MongoCredential instance with an unspecified mechanism. The client will negotiate the best mechanism based on the
* version of the server that the client is authenticating to. If the server version is 3.0 or higher,
* the driver will authenticate using the SCRAM-SHA-1 mechanism. Otherwise, the driver will authenticate using the MONGODB_CR
* mechanism.
*
*
* @param userName the user name
* @param database the database where the user is defined
* @param password the user's password
* @return the credential
*
* @see [[http://docs.mongodb.org/manual/core/authentication/#mongodb-cr-authentication MONGODB-CR]]
* @see [[http://docs.mongodb.org/manual/core/authentication/#authentication-scram-sha-1 SCRAM-SHA-1]]
*/
def createCredential(userName: String, database: String, password: Array[Char]): JMongoCredential =
JMongoCredential.createCredential(userName, database, password)
/**
* Creates a MongoCredential instance for the SCRAM-SHA-1 SASL mechanism. Use this method only if you want to ensure that
* the driver uses the MONGODB_CR mechanism regardless of whether the server you are connecting to supports a more secure
* authentication mechanism. Otherwise use the [[createCredential]] method to allow the driver to
* negotiate the best mechanism based on the server version.
*
*
* @param userName the non-null user name
* @param source the source where the user is defined.
* @param password the non-null user password
* @return the credential
* @see [[createCredential]]
*
* @see [[http://docs.mongodb.org/manual/core/authentication/#authentication-scram-sha-1 SCRAM-SHA-1]]
*/
def createScramSha1Credential(userName: String, source: String, password: Array[Char]): JMongoCredential =
JMongoCredential.createScramSha1Credential(userName, source, password)
/**
* Creates a MongoCredential instance for the SCRAM-SHA-256 SASL mechanism.
*
*
* @param userName the non-null user name
* @param source the source where the user is defined.
* @param password the non-null user password
* @return the credential
* @note Requires MongoDB 4.0 or greater
* @see [[http://docs.mongodb.org/manual/core/authentication/#authentication-scram-sha-256 SCRAM-SHA-256]]
*/
def createScramSha256Credential(userName: String, source: String, password: Array[Char]): JMongoCredential =
JMongoCredential.createScramSha256Credential(userName, source, password)
/**
* Creates a MongoCredential instance for the MongoDB Challenge Response protocol. Use this method only if you want to ensure that
* the driver uses the MONGODB_CR mechanism regardless of whether the server you are connecting to supports a more secure
* authentication mechanism. Otherwise use the [[createCredential]] method to allow the driver to
* negotiate the best mechanism based on the server version.
*
* @param userName the user name
* @param database the database where the user is defined
* @param password the user's password
* @return the credential
* @see [[createCredential]]
* @see [[http://docs.mongodb.org/manual/core/authentication/#mongodb-cr-authentication MONGODB-CR]]
*/
@deprecated("MONGODB-CR was replaced by SCRAM-SHA-1 in MongoDB 3.0, and is now deprecated.", "2.4")
def createMongoCRCredential(userName: String, database: String, password: Array[Char]): JMongoCredential =
JMongoCredential.createMongoCRCredential(userName, database, password)
/**
* Creates a MongoCredential instance for the MongoDB X.509 protocol.
*
* @param userName the user name
* @return the credential
* @see [[http://docs.mongodb.org/manual/core/authentication/#x-509-certificate-authentication X-509]]
*/
def createMongoX509Credential(userName: String): JMongoCredential = JMongoCredential.createMongoX509Credential(userName)
/**
* Creates a MongoCredential instance for the MongoDB X.509 protocol where the distinguished subject name of the client certificate
* acts as the userName.
*
* @return the credential
* @see [[http://docs.mongodb.org/manual/core/authentication/#x-509-certificate-authentication X-509]]
* @since 1.2
* @note Requires MongoDB 3.4 or greater
*/
def createMongoX509Credential(): JMongoCredential = JMongoCredential.createMongoX509Credential()
/**
* Creates a MongoCredential instance for the PLAIN SASL mechanism.
*
* @param userName the non-null user name
* @param source the source where the user is defined. This can be either `\\$external` or the name of a database.
* @param password the non-null user password
* @return the credential
* @see [[http://docs.mongodb.org/manual/core/authentication/#ldap-proxy-authority-authentication PLAIN]]
*/
def createPlainCredential(userName: String, source: String, password: Array[Char]): JMongoCredential =
JMongoCredential.createPlainCredential(userName, source, password)
/**
* Creates a MongoCredential instance for the GSSAPI SASL mechanism. To override the default service name of `mongodb`, add a
* mechanism property with the name `SERVICE_NAME`. To force canonicalization of the host name prior to authentication, add a
* mechanism property with the name `CANONICALIZE_HOST_NAME` with the value `true`.
*
* @param userName the non-null user name
* @return the credential
* @see [[http://docs.mongodb.org/manual/core/authentication/#kerberos-authentication GSSAPI]]
*/
def createGSSAPICredential(userName: String): JMongoCredential = JMongoCredential.createGSSAPICredential(userName)
}
| rozza/mongo-scala-driver | driver/src/main/scala/org/mongodb/scala/MongoCredential.scala | Scala | apache-2.0 | 6,528 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.