code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
import io.gatling.core.Predef._
import io.gatling.core.scenario.Simulation
import io.gatling.http.Predef._
import scala.concurrent.duration._
class GetDocumentExt extends Simulation {
val httpConf = http
.baseURL("http://localhost:7474")
.acceptHeader("application/json")
/* Uncomment to see the response of each request.
.extraInfoExtractor(extraInfo => {
println(extraInfo.response.body.string)
Nil
})
.disableResponseChunksDiscarding
*/
// Use a data file for our requests and repeat values if we get to the end.
val feeder = csv("ids100000.csv").circular
val scn = scenario("Get Document Ext")
.repeat(3125) {
feed(feeder)
.exec(
http("get document ext")
.get("/v1/service/getDocument/${key}")
.basicAuth("neo4j", "swordfish")
.check(status.is(200))
)
}
setUp(
scn.inject(rampUsers(32) over(0 seconds)).protocols(httpConf)
)
} | maxdemarzi/neo_bench | performance/src/test/scala/GetDocumentExt.scala | Scala | mit | 946 |
/*
* Copyright 2012 The SIRIS Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* The SIRIS Project is a cooperation between Beuth University, Berlin and the
* HCI Group at the University of Würzburg. The project is funded by the German
* Federal Ministry of Education and Research (grant no. 17N4409).
*/
package simx.core.svaractor
import java.util.UUID
import simx.core.entity.description.SVal.SValType
import simx.core.svaractor.TimedRingBuffer.{Time, BufferMode}
import simx.core.svaractor.handlersupport.Types.CPSRet
import scala.reflect.ClassTag
import scala.util.continuations
import simx.core.entity.typeconversion.{ConvertedSVar, ConversionInfo}
import unifiedaccess.{Observability, Mutability, Immutability, Accessibility}
import simx.core.svaractor.SVarActor.Ref
// Static methods for SVars.
/**
* This is the companion object of the SVar trait.
*
* It has two methods to create a new State Variables.
*/
trait SVarObjectInterface {
/**
* This method creates a new state variable on the calling actor. This
* method must be called within a SVarActor, otherwise a NotSVarActorException is
* thrown. The method blocks until the creation of the State Variable is completly
* finished.
*
*
* @param value The initial value of the State Variable
* @tparam T The data type of the state variable
* @return The created state variable. Never returns null.
*/
def apply[T](value: SValType[T], timestamp : Time, bufferLength : BufferMode)(implicit actorContext : SVarActor) : SVar[T]
}
trait SVBase[+T, B <: T] extends Observability[T] with Accessibility[T] with Mutability[B] with Serializable{
def observe(handler: (T, Time) => Unit, ignoredWriters: Set[Ref] = Set())(implicit actorContext: SVarActor) : java.util.UUID
def containedValueManifest : ClassTag[_ <: T]
def ignore()(implicit actorContext : SVarActor)
def as[T2](cInfo : ConversionInfo[T2, B]) : StateParticle[T2]
def isMutable : Boolean
def getValue : Option[B] = None
def read(implicit actorContext : SVarActor) : T@CPSRet =
continuations.shift{ k : (T => Any) => get{ x => k(x) } }
}
private object StateParticleId {
val defaultId = UUID.randomUUID()
}
trait StateParticle[T] extends SVBase[T, T] with Serializable{
/**
* The unique id trough which a SVar is identified
*/
def id: UUID = StateParticleId.defaultId
}
/**
* This trait represents a state variable. State Variables
* are every time created trough the mechanism of the companion object.
*
* @tparam T The datatype of the represented value.
*/
trait SVar[T] extends StateParticle[T] with Serializable{
def initialOwner : SVarActor.Ref
/**
* Overrides equals to only compare _id
*/
final override def equals(other: Any) : Boolean = other match {
case that: SVar[_] => id.equals(that.id)
case _ => false
}
/**
* Overrides hashCode to use the _id's hashCode method
*/
final override def hashCode =
id.hashCode
def as[T2](cInfo: ConversionInfo[T2, T]) : SVar[T2] =
new ConvertedSVar(this, cInfo)
override def toString: String =
"SVar with id " + id
}
trait ImmutableSVar[+T, B <: T] extends SVBase[T, B] with Immutability[B] with Serializable{
def ignore()(implicit actorContext: SVarActor){}
}
| simulator-x/core | src/simx/core/svaractor/SVar.scala | Scala | apache-2.0 | 3,813 |
package org.bitcoins.spvnode
import org.bitcoins.core.config.NetworkParameters
import org.bitcoins.core.protocol.NetworkElement
import org.bitcoins.core.util.Factory
import org.bitcoins.spvnode.headers.NetworkHeader
import org.bitcoins.spvnode.messages.NetworkPayload
import org.bitcoins.spvnode.serializers.RawNetworkMessageSerializer
/**
* Created by chris on 6/10/16.
* Represents an entire p2p network message in bitcoins
*/
sealed trait NetworkMessage extends NetworkElement {
def header : NetworkHeader
def payload : NetworkPayload
override def hex = RawNetworkMessageSerializer.write(this)
}
object NetworkMessage extends Factory[NetworkMessage] {
private case class NetworkMessageImpl(header : NetworkHeader, payload : NetworkPayload) extends NetworkMessage
def fromBytes(bytes : Seq[Byte]) : NetworkMessage = RawNetworkMessageSerializer.read(bytes)
/**
* Creates a network message from it's [[NetworkHeader]] and [[NetworkPayload]]
* @param header the [[NetworkHeader]] which is being sent across the network
* @param payload the [[NetworkPayload]] which contains the information being sent across the network
* @return
*/
def apply(header : NetworkHeader, payload : NetworkPayload) : NetworkMessage = {
NetworkMessageImpl(header,payload)
}
/**
* Creates a [[NetworkMessage]] out of it's [[NetworkPayload]]
* @param network the [[NetworkParameters]] indicating the network which the message is going to be sent on
* @param payload the payload that needs to be sent across the network
* @return
*/
def apply(network : NetworkParameters, payload : NetworkPayload) : NetworkMessage = {
val header = NetworkHeader(network, payload)
NetworkMessage(header,payload)
}
}
| bitcoin-s/bitcoin-s-spv-node | src/main/scala/org/bitcoins/spvnode/NetworkMessage.scala | Scala | mit | 1,759 |
/**
* (C) Copyright IBM Corp. 2015, 2016
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.ibm.spark.netezza
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.BooleanType
/**
* Test converting from Netezza string data to Spark SQL Row. Netezza external table
* mechanism writes the data in particular format specified in the external
* table definition.
*/
class DataConversionSuite extends NetezzaBaseSuite {
test("Test varchar data type.") {
val dbCols = Array(Column("col1", java.sql.Types.VARCHAR))
val schema = buildSchema(dbCols)
val nzrow: NetezzaRow = new NetezzaRow(schema)
val row: Row = nzrow
nzrow.setValue(0, "mars")
assert(row.get(0) == "mars")
// test null
nzrow.setValue(0, null)
assert(row.get(0) == null)
}
test("Test date and timestamp datatype.") {
val dbCols = Array(
Column("col1", java.sql.Types.DATE), Column("col2", java.sql.Types.TIMESTAMP),
Column("col3", java.sql.Types.TIMESTAMP), Column("col4", java.sql.Types.TIMESTAMP),
Column("col5", java.sql.Types.TIMESTAMP), Column("col6", java.sql.Types.TIMESTAMP),
Column("col7", java.sql.Types.TIMESTAMP))
val schema = buildSchema(dbCols)
val nzrow: NetezzaRow = new NetezzaRow(schema)
var i = 0
for (value <- Array("1947-08-15", "2000-12-24 01:02", "1901-12-24 01:02:03",
"1850-01-24 01:02:03.1", "2020-11-24 01:02:03.12", "2015-11-24 01:02:03.123", null)) {
nzrow.setValue(i, value)
i = i + 1
}
// cast it regular row, and call only spark sql row method for verification.
val row: Row = nzrow
assert(row.length == 7)
assert(row.get(0) == java.sql.Date.valueOf("1947-08-15"))
assert(row.get(1) == java.sql.Timestamp.valueOf("2000-12-24 01:02:00"))
assert(row.get(2) == java.sql.Timestamp.valueOf("1901-12-24 01:02:03"))
assert(row.get(3) == java.sql.Timestamp.valueOf("1850-01-24 01:02:03.001"))
assert(row.get(4) == java.sql.Timestamp.valueOf("2020-11-24 01:02:03.012"))
assert(row.get(5) == java.sql.Timestamp.valueOf("2015-11-24 01:02:03.123"))
assert(row.get(6) == null.asInstanceOf[java.sql.Timestamp])
}
test("Test Boolean datatypes") {
val dbCols = Array(Column("col1", java.sql.Types.BOOLEAN))
val schema = buildSchema(dbCols)
val nzrow: NetezzaRow = new NetezzaRow(schema)
// cast it regular row, and call only spark sql row method for verification.
val row: Row = nzrow
assert(row.length == 1)
for (value <- List("T", "F", null)) {
nzrow.setValue(0, value)
val expValue = value match {
case "T" => true
case "F" => false
case null => null.asInstanceOf[BooleanType]
}
assert(row.get(0) == expValue)
}
}
test("Test integer datatypes") {
val dbCols = Array(Column("col1", java.sql.Types.TINYINT),
Column("col2", java.sql.Types.SMALLINT),
Column("col3", java.sql.Types.INTEGER),
Column("col4", java.sql.Types.BIGINT),
Column("col4", java.sql.Types.BIGINT, 0, 0, true))
val schema = buildSchema(dbCols)
val nzrow: NetezzaRow = new NetezzaRow(schema)
// cast it regular row, and call only spark sql row method for verification.
val row: Row = nzrow
var i = 0
for (value <-
Array("10", "32767", "2147483647", "9223372036854775807", "-9223372036854775808")) {
nzrow.setValue(i, value)
i = i + 1
}
assert(row.length == 5)
assert(row.get(0) == 10)
assert(row.get(1) == 32767)
assert(row.get(2) == 2147483647)
assert(row.get(3) == 9223372036854775807L)
assert(row.get(4) == -9223372036854775808L)
}
test("Test decimal data types") {
val dbCols = Array(Column("col1", java.sql.Types.FLOAT),
Column("col2", java.sql.Types.DOUBLE),
Column("col3", java.sql.Types.NUMERIC),
Column("col4", java.sql.Types.NUMERIC, 5, 3),
Column("col5", java.sql.Types.DECIMAL),
Column("col6", java.sql.Types.DECIMAL, 4, 2))
val schema = buildSchema(dbCols)
val expSchema = Array("StructField(col1,FloatType,true)",
"StructField(col2,DoubleType,true)",
"StructField(col3,DecimalType(38,18),true)",
"StructField(col4,DecimalType(5,3),true)",
"StructField(col5,DecimalType(38,18),true)",
"StructField(col6,DecimalType(4,2),true)"
)
for ((colSchema, expColSchema) <- (schema zip expSchema)) {
assert(colSchema.toString() == expColSchema)
}
val nzrow: NetezzaRow = new NetezzaRow(schema)
// cast it regular row, and call only spark sql row method for verification.
val row: Row = nzrow
var i = 0
for (value <- Array("1.2", "3.3", "3434.443", "99.1234", "5.3256789", "3456.22")){
nzrow.setValue(i , value)
i = i + 1
}
assert(row.length == 6)
assert(row.get(0) == 1.2f)
assert(row.get(1) == 3.3d)
assert(row.get(2) == BigDecimal("3434.443"))
assert(row.get(3) == BigDecimal("99.1234"))
assert(row.get(4) == BigDecimal("5.3256789"))
assert(row.get(5) == BigDecimal("3456.22"))
}
}
| SparkTC/spark-netezza | src/test/scala/com/ibm/spark/netezza/DataConversionSuite.scala | Scala | apache-2.0 | 5,602 |
package io.dylemma.spac
/** Marker trait used by `SpacTraceElement.InInput` to extract location information from inputs that cause parsing exceptions.
*
* @group errors
*/
trait HasLocation {
def location: ContextLocation
}
| dylemma/xml-spac | core/src/main/scala/io/dylemma/spac/HasLocation.scala | Scala | mit | 232 |
package auction
import akka.actor._
import akka.util.Timeout
import auction.Notifier.Notify
import scala.concurrent.Await
import scala.concurrent.duration._
import scala.language.postfixOps
import akka.pattern.ask
class NotifierRequest(publisher: ActorRef, notify: Notify) extends Actor {
implicit val timeout = Timeout(5 seconds)
val future = publisher ? notify
Await.result(future, timeout.duration)
override def receive: Receive = {
case _ =>
}
}
| mjoniak/scala-reactive | src/main/scala/auction/NotifierRequest.scala | Scala | mit | 467 |
package spark.scheduler.cluster
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import spark.Logging
import spark.scheduler.cluster.SchedulingMode.SchedulingMode
/**
* An Schedulable entity that represent collection of Pools or TaskSetManagers
*/
private[spark] class Pool(
val poolName: String,
val schedulingMode: SchedulingMode,
initMinShare: Int,
initWeight: Int)
extends Schedulable
with Logging {
var schedulableQueue = new ArrayBuffer[Schedulable]
var schedulableNameToSchedulable = new HashMap[String, Schedulable]
var weight = initWeight
var minShare = initMinShare
var runningTasks = 0
var priority = 0
var stageId = 0
var name = poolName
var parent:Schedulable = null
var taskSetSchedulingAlgorithm: SchedulingAlgorithm = {
schedulingMode match {
case SchedulingMode.FAIR =>
new FairSchedulingAlgorithm()
case SchedulingMode.FIFO =>
new FIFOSchedulingAlgorithm()
}
}
override def addSchedulable(schedulable: Schedulable) {
schedulableQueue += schedulable
schedulableNameToSchedulable(schedulable.name) = schedulable
schedulable.parent= this
}
override def removeSchedulable(schedulable: Schedulable) {
schedulableQueue -= schedulable
schedulableNameToSchedulable -= schedulable.name
}
override def getSchedulableByName(schedulableName: String): Schedulable = {
if (schedulableNameToSchedulable.contains(schedulableName)) {
return schedulableNameToSchedulable(schedulableName)
}
for (schedulable <- schedulableQueue) {
var sched = schedulable.getSchedulableByName(schedulableName)
if (sched != null) {
return sched
}
}
return null
}
override def executorLost(executorId: String, host: String) {
schedulableQueue.foreach(_.executorLost(executorId, host))
}
override def checkSpeculatableTasks(): Boolean = {
var shouldRevive = false
for (schedulable <- schedulableQueue) {
shouldRevive |= schedulable.checkSpeculatableTasks()
}
return shouldRevive
}
override def getSortedTaskSetQueue(): ArrayBuffer[TaskSetManager] = {
var sortedTaskSetQueue = new ArrayBuffer[TaskSetManager]
val sortedSchedulableQueue = schedulableQueue.sortWith(taskSetSchedulingAlgorithm.comparator)
for (schedulable <- sortedSchedulableQueue) {
sortedTaskSetQueue ++= schedulable.getSortedTaskSetQueue()
}
return sortedTaskSetQueue
}
override def increaseRunningTasks(taskNum: Int) {
runningTasks += taskNum
if (parent != null) {
parent.increaseRunningTasks(taskNum)
}
}
override def decreaseRunningTasks(taskNum: Int) {
runningTasks -= taskNum
if (parent != null) {
parent.decreaseRunningTasks(taskNum)
}
}
override def hasPendingTasks(): Boolean = {
schedulableQueue.exists(_.hasPendingTasks())
}
}
| baeeq/incubator-spark | core/src/main/scala/spark/scheduler/cluster/Pool.scala | Scala | bsd-3-clause | 2,915 |
package io.github.loustler.traitt.mixin
/**
* @author loustler
* @since 09/04/2018
*/
trait Queue[T] {
def get(): T
def put(x: T): Unit
def size: Int
}
| loustler/sKaLa | src/main/scala/io/github/loustler/traitt/mixin/Queue.scala | Scala | apache-2.0 | 165 |
package io.scalajs.npm.mongodb
import scala.scalajs.js
/**
* Delete Write Operation Result
* @author [email protected]
*/
@js.native
trait DeleteWriteOpResult extends js.Object {
/** The raw result returned from MongoDB, field will vary depending on server version. */
var result: DeleteWriteOpResult.Outcome = js.native
/** The connection object used for the operation. */
var connection: Connection = js.native
/** The number of documents deleted. */
var deletedCount: Int = js.native
}
/**
* Delete Write Operation Result Companion
* @author [email protected]
*/
object DeleteWriteOpResult {
/**
* Delete Outcome
* @author [email protected]
*/
@js.native
trait Outcome extends js.Object with Okayable {
/** The total count of documents deleted. */
var n: Int = js.native
}
}
| scalajs-io/mongodb | src/main/scala/io/scalajs/npm/mongodb/DeleteWriteOpResult.scala | Scala | apache-2.0 | 867 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import cats.laws._
import cats.laws.discipline._
import monix.eval.Task
import monix.reactive.Observable
import scala.concurrent.duration.Duration.Zero
import scala.concurrent.duration._
object ReduceSuite extends BaseOperatorSuite {
def createObservable(sourceCount: Int) = Some {
if (sourceCount > 1) {
val o = Observable.range(1, sourceCount.toLong + 1).reduce(_ + _)
Sample(o, 1, sum(sourceCount), Zero, Zero)
} else {
val o = Observable.range(1, 3).reduce(_ + _)
Sample(o, 1, 3, Zero, Zero)
}
}
def sum(sourceCount: Int): Int =
sourceCount * (sourceCount + 1) / 2
def observableInError(sourceCount: Int, ex: Throwable) = Some {
val o = Observable.range(1, sourceCount.toLong + 1).endWithError(ex).reduce(_ + _)
Sample(o, 0, 0, Zero, Zero)
}
def brokenUserCodeObservable(sourceCount: Int, ex: Throwable) = Some {
val o = Observable.range(0, sourceCount.toLong + 1).reduce { (acc, elem) =>
if (elem == sourceCount)
throw ex
else
acc + elem
}
Sample(o, 0, 0, Zero, Zero)
}
override def cancelableObservables(): Seq[Sample] = {
val o = Observable.range(0, 100).delayOnNext(1.second).reduce(_ + _)
Seq(Sample(o, 0, 0, 0.seconds, 0.seconds))
}
test("Observable.reduce is equivalent with List.reduce") { implicit s =>
check1 { list: List[Int] =>
val obs = Observable.fromIterable(list)
val result = obs.reduce(_ + _).lastL
result <-> Task.eval(list.reduce(_ + _))
}
}
}
| monifu/monix | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/ReduceSuite.scala | Scala | apache-2.0 | 2,239 |
package hashedcomputation
import hashedcomputation.Fingerprint.Entry
import org.jetbrains.annotations.{NotNull, Nullable}
import scala.collection.mutable
import scala.collection.mutable.ListBuffer
import scala.util.control.Breaks
import scala.util.control.Breaks.{break, tryBreakable}
object Fingerprint {
final case class Entry[A, B](element: Element[A, B], fingerprint: Fingerprint[B]) {
type OutT = B
type InT = A
def matches(value: A): Boolean = {
val extracted = element.extract(value)
fingerprint.matches(extracted)
}
}
object Entry {
def apply[A, B: Hashable](element: Element[A, B], value: A): Entry[A, B] =
Entry(element, Fingerprint(Hashable.hash(element.extract(value))))
}
def apply[A: Hashable](hash: Hash[A]) = new Fingerprint[A](hash, None)
def apply[A: Hashable](value: A) = new Fingerprint[A](Hashable.hash(value), None)
}
/** fingerprints==None means no fingerprinting, can only rely on the overall hash */
case class Fingerprint[A: Hashable](@NotNull hash: Hash[A], @NotNull fingerprints: Option[Seq[Entry[A, _]]]) {
def project[B: Hashable](targetHash: Hash[B], projectEntry: Entry[A,_] => Option[Seq[Entry[B, _]]]): Fingerprint[B] =
fingerprints match {
case None => new Fingerprint[B](targetHash, None)
case Some(fingerprints) =>
tryBreakable {
val merged = for (entry <- fingerprints;
projFp = projectEntry(entry).getOrElse { break() };
fp2 <- projFp)
yield fp2
new Fingerprint[B](targetHash, Some(merged))
} catchBreak {
new Fingerprint[B](targetHash, None)
}
}
/** Must be fingerprints for the same value */
def join(other: Fingerprint[A]): Fingerprint[A] = {
type SE = Seq[Entry[A, _]]
assert(hash==other.hash)
val fp : Option[SE] = (fingerprints, other.fingerprints) match {
case (None, None) => None
case (f : Some[SE], None) => None
case (None, f: Some[SE]) => None
case (Some(f1), Some(f2)) => Some(f1 ++ f2)
}
new Fingerprint(hash, fp)
}
def matches(value: A): Boolean = {
if (hash == Hashable.hash(value)) true
else {
fingerprints match {
case None => false
case Some(fingerprints) =>
fingerprints.forall(_.matches(value))
}
}
}
type U[B] = (Element[A, B], Hash[B])
def unfold: Seq[U[_]] = {
val result = new ListBuffer[U[_]]
def unfold[B](fp: Fingerprint[B], element: Element[A, B]): Unit = fp.fingerprints match {
case None => result.append((element, fp.hash): U[B])
case Some(fingerprints) =>
def doEntry[C](entry: Entry[B, C]): Unit = {
val subElement = NestedElement(element, entry.element)
unfold(entry.fingerprint, subElement)
}
for (entry <- fingerprints) doEntry(entry)
}
unfold(this, IDElement())
result.toList
}
}
object DummyMap extends mutable.Map[Any, Any] {
override def get(key: Any): Option[Nothing] =
throw new UnsupportedOperationException
override def subtractOne(elem: Any): DummyMap.this.type = this
override def addOne(elem: (Any, Any)): DummyMap.this.type = this
override def iterator: Iterator[(Any, Any)] = new Iterator[(Any, Any)] {
override def hasNext: Boolean = throw new UnsupportedOperationException
override def next(): Nothing = throw new UnsupportedOperationException
}
override def updateWith(key: Any)(remappingFunction: Option[Any] => Option[Any]): Option[Any] =
throw new UnsupportedOperationException
}
trait FingerprintBuilder[A] {
def access[B](element: Element[A,B], fingerprint: Fingerprint[B]): Unit
def accessAll(): Unit
def access[B : Hashable](element: Element[A,B], value: B): Unit =
access(element, Hashable.hash(value))
def access[B: Hashable](element: Element[A,B], hash: Hash[B]) : Unit =
access(element, Fingerprint(hash))
def access(fingerprint: Fingerprint[A]): Unit = fingerprint.fingerprints match {
case None => accessAll()
case Some(entries) =>
for (entry <- entries)
entry match {
case entry : Entry[A,b] =>
access(entry.element, entry.fingerprint)
}
}
// TODO: rename to something like buildFingerprint
def fingerprint : Fingerprint[A]
def unsafeUnderlyingValue : A
}
/** Not thread safe */
final class FingerprintBuilderImpl[A : Hashable](value: A) extends FingerprintBuilder[A] {
private type MapType = mutable.Map[Element[A, _], Fingerprint[_]]
private var entries : MapType =
new mutable.LinkedHashMap[Element[A, _], Fingerprint[_]]
override def unsafeUnderlyingValue: A = value
def access[B](element: Element[A,B], fingerprint: Fingerprint[B]): Unit =
if (entries eq DummyMap)
{}
else
entries.updateWith(element) {
case None => Some(fingerprint)
case Some(fingerprint1) =>
Some(fingerprint1.asInstanceOf[Fingerprint[B]].join(fingerprint))
}
def accessAll(): Unit = entries = DummyMap.asInstanceOf[MapType]
def fingerprint : Fingerprint[A] = {
if (entries eq DummyMap)
Fingerprint(Hashable.hash(value), None)
else
Fingerprint(Hashable.hash(value),
Some(entries.toSeq.map { case (elem, fp) =>
new Entry(elem, fp.asInstanceOf[Fingerprint[Any]]) }))
}
}
/*
trait FingerprintingView[A, B] { this: B =>
/** This function returns the underlying value without registering the access in the fingerprint */
def unsafePeekUnderlyingValue: A
/** Will cause the whole object to be marked as accessed */
def everything : A
}
trait HasFingerprintingView[A, B] extends Hashable[A] {
@NotNull def newView(@NotNull value: A, @NotNull fingerprintBuilder: FingerprintBuilder[A]) : FingerprintingView[A,B]
}
trait WithFingerprintingView[B] extends HashedValue {
@NotNull def newView(@NotNull fingerprintBuilder: FingerprintBuilder[this.type]) : FingerprintingView[this.type,B]
}
object WithFingerprintingView {
implicit def withFingerprintingViewHasFingerprintingView[B]: HasFingerprintingView[WithFingerprintingView[B], B] =
new HasFingerprintingView[WithFingerprintingView[B], B] {
override def hash[A1 <: HashedValue](value: A1): Hash[A1] = value.hash
override def newView(value: WithFingerprintingView[B], fingerprintBuilder: FingerprintBuilder[WithFingerprintingView[B]]): FingerprintingView[WithFingerprintingView[B], B] =
value.newView(fingerprintBuilder)
}
}
*/
| dominique-unruh/qrhl-tool | hashedcomputation/src/main/scala/hashedcomputation/Fingerprint.scala | Scala | mit | 6,512 |
/*
* This file is part of the "silex" library of helpers for Apache Spark.
*
* Copyright (c) 2015 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.redhat.et.silex.frame
import com.redhat.et.silex.testing.PerTestSparkContext
import org.scalatest._
private[frame] case class LPExample1(label: Double, v1: Double, v2: Double) {}
private[frame] case class LPExample2(a: Int, b: Int, c: Int) {}
class LabeledPointSpec extends FlatSpec with Matchers with PerTestSparkContext {
import org.apache.spark.sql.Row
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.mllib.regression.LabeledPoint
it should "construct vectors from RDDs with Double-valued vector columns" in {
val sqlc = sqlContext
import sqlc.implicits._
val df = context.parallelize((1 to 100).map { i => LPExample1(i * 1.0d, i * 2.0d, i * 4.0d)}).toDF()
val lps = FrameToVector.toDenseVectors(df, "v1", "v2")
assert(lps.count() == 100)
lps.collect.foreach { vec => {
assert(vec(1) == vec(0) * 2.0d)
}
}
}
it should "construct vectors from RDDs with Int-valued vector columns" in {
val sqlc = sqlContext
import sqlc.implicits._
val df = context.parallelize((1 to 100).map { i => LPExample2(i, i * 2, i * 4)}).toDF()
val lps = FrameToVector.toDenseVectors(df, "a", "b")
assert(lps.count() == 100)
lps.collect.foreach { vec => {
assert(vec(1) == vec(0) * 2.0d)
}
}
}
it should "construct labeled points from RDDs with Double-valued label and vector columns" in {
val sqlc = sqlContext
import sqlc.implicits._
val df = context.parallelize((1 to 100).map { i => LPExample1(i * 1.0d, i * 2.0d, i * 4.0d)}).toDF()
val lps = FrameToVector.toLabeledPoints(df, "label", "v1", "v2")
assert(lps.count() == 100)
lps.collect.foreach {
case LabeledPoint(l, vec) => {
assert(vec(0) == l * 2.0d)
assert(vec(1) == l * 4.0d)
}
}
}
}
| erikerlandson/silex | src/test/scala/com/redhat/et/silex/frame/labeledPointSuite.scala | Scala | apache-2.0 | 2,590 |
package org.bitcoins.core.protocol.ln.channel
import org.bitcoins.core.number.UInt64
import org.bitcoins.crypto.{Factory, NetworkElement}
import scodec.bits.ByteVector
case class ShortChannelId(u64: UInt64) extends NetworkElement {
override def bytes: ByteVector = u64.bytes
val blockHeight: UInt64 = (u64 >> 40) & UInt64(0xffffff)
val txIndex: UInt64 = (u64 >> 16) & UInt64(0xffffff)
val outputIndex: UInt64 = u64 & UInt64(0xffff)
/** Output example:
* {{{
* > ShortChannelId.fromHex("db0000010000")
* 219x1x0
* }}}
*/
override def toString: String = toHumanReadableString
/** Converts the short channel id into the human readable form defined in BOLT.
* @see [[https://github.com/lightningnetwork/lightning-rfc/blob/master/07-routing-gossip.md BOLT7]]
*
* Output example:
* {{{
* > ShortChannelId.fromHex("db0000010000")
* 219x1x0
* }}}
*/
def toHumanReadableString: String = {
s"${blockHeight.toInt}x${txIndex.toInt}x${outputIndex.toInt}"
}
}
object ShortChannelId extends Factory[ShortChannelId] {
override def fromBytes(byteVector: ByteVector): ShortChannelId = {
new ShortChannelId(UInt64.fromBytes(byteVector))
}
def fromHumanReadableString(str: String): ShortChannelId =
str.split("x") match {
case Array(_blockHeight, _txIndex, _outputIndex) =>
val blockHeight = BigInt(_blockHeight)
val txIndex = _txIndex.toInt
val outputIndex = _outputIndex.toInt
apply(blockHeight, txIndex, outputIndex)
case _: Array[String] => fromHex(str)
}
def apply(
blockHeight: BigInt,
txIndex: Int,
outputIndex: Int): ShortChannelId = {
require(blockHeight >= 0 && blockHeight <= 0xffffff,
s"ShortChannelId: invalid block height $blockHeight")
require(txIndex >= 0 && txIndex <= 0xffffff,
s"ShortChannelId:invalid tx index $txIndex")
require(outputIndex >= 0 && outputIndex <= 0xffff,
s"ShortChannelId: invalid output index $outputIndex")
val u64 = UInt64(
((blockHeight & 0xffffffL) << 40) | ((txIndex & 0xffffffL) << 16) | (outputIndex & 0xffffL))
ShortChannelId(u64)
}
}
| bitcoin-s/bitcoin-s | core/src/main/scala/org/bitcoins/core/protocol/ln/channel/ShortChannelId.scala | Scala | mit | 2,196 |
/*
* Copyright 2014 websudos ltd.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.websudos.reactiveneo.client
import com.websudos.reactiveneo.RequiresNeo4jServer
import com.websudos.reactiveneo.dsl._
import org.scalatest.concurrent.{IntegrationPatience, ScalaFutures}
import org.scalatest.{FeatureSpec, GivenWhenThen, Matchers}
import play.api.libs.functional.syntax._
import play.api.libs.json.Reads._
import play.api.libs.json._
case class InsertResult(id: Int)
case class Person(name: String, age: Int)
class RestClientSpec extends FeatureSpec with GivenWhenThen with Matchers
with ScalaFutures with IntegrationPatience {
info("As a user")
info("I want to be able to make a call to Neo4j server")
info("So I can get the data")
info("And expect the the result to be parsed for me")
feature("REST client") {
scenario("send a simple MATCH query", RequiresNeo4jServer) {
Given("started Neo4j server")
implicit val service = RestConnection("localhost", 7474)
val query: MatchQuery[_, _, _, _, _, TestNodeRecord] = TestNode().returns { case go ~~ _ => go }
When("REST call is executed")
val result = query.execute
Then("The result should be delivered")
whenReady(result) { res =>
res should not be empty
}
}
scenario("send a query and use a custom parser to get the result", RequiresNeo4jServer) {
Given("started Neo4j server")
val service = RestConnection("localhost", 7474)
val query = "CREATE (n) RETURN id(n)"
implicit val parsRester: Reads[InsertResult] = __.read[Int].map { arr =>
InsertResult(arr)
}
When("REST call is executed")
val result = service.makeRequest[InsertResult](query).execute
Then("The result should be delivered")
whenReady(result) { res =>
res should not be empty
res.head.id shouldBe >(0)
}
}
scenario("create a Person node and load it", RequiresNeo4jServer) {
Given("started Neo4j server")
val service = RestConnection("localhost", 7474)
val query = "CREATE (p: Person { name: 'Mike', age: 10 }) RETURN p"
implicit val parser: Reads[Person] = ((__ \\ "name").read[String] and (__ \\ "age").read[Int])(Person)
When("REST call is executed")
val result = service.makeRequest[Person](query).execute
Then("The result should be delivered")
whenReady(result) { res =>
res should not be empty
res.head.name shouldBe "Mike"
}
}
}
}
| websudos/reactiveneo | reactiveneo-dsl/src/test/scala/com/websudos/reactiveneo/client/RestClientSpec.scala | Scala | gpl-2.0 | 3,039 |
def ConfigOut(in: GE) = Out.ar(0, Pan2.ar(Limiter.ar(LeakDC.ar(in))))
def EnvGen_Triangle(dur: GE = 1.0f, level: GE = 1.0f, gate: GE = 1,
levelScale: GE = 1.0f, levelBias: GE = 0.0f, timeScale: GE = 1.0f): GE = {
val mkEnv: Env = Env.triangle(dur = dur, level = level)
EnvGen.ar(mkEnv, gate = gate, levelScale = levelScale,
levelBias = levelBias, timeScale = timeScale)
}
play {
// RandSeed.ir(trig = 1, seed = 56789.0)
val xi = GbmanL.ar(freq = 686.5739, xi = -2526.418, yi = 419.73846)
val bRF_0 = BRF.ar(636.937, freq = -0.0029116, rq = 419.73846)
val in_0 = Blip.ar(freq = 4.2980185E-4, numHarm = 0.007095831)
val peakFollower = PeakFollower.ar(in_0, decay = 0.34497613)
val gate_0 = GbmanN.ar(freq = 0.34497613, xi = -0.0058356896, yi = 636.937)
val gate_1 = Gate.ar(-676.2965, gate = gate_0)
val c = GbmanL.ar(freq = 83.65495, xi = 936.9255, yi = 4.2980185E-4)
val plus = bRF_0 + c
val bRF_1 = BRF.ar(636.937, freq = 419.73846, rq = -0.0029116)
val linCongL = LinCongL.ar(freq = 636.937, a = -676.2965, c = c, m = bRF_1, xi = xi)
val g = LFTri.ar(freq = 1.0334905E-4, iphase = 8.225018E-4)
val yi_0 = TBall.ar(83.65495, g = g, damp = 0.006726554, friction = -0.0054616947)
val gbmanL = GbmanL.ar(freq = -0.0054616947, xi = bRF_1, yi = yi_0)
val gbmanN = GbmanN.ar(freq = -4334.8867, xi = -0.0058356896, yi = 8.225018E-4)
val grayNoise = GrayNoise.ar(18.71311)
val lag3 = Lag3.ar(419.73846, time = 636.937)
val saw = Saw.ar(0.022197612)
val mix = Mix(Seq[GE](saw, lag3, grayNoise, gbmanN, gbmanL, linCongL, plus, gate_1, peakFollower))
val mono = Mix.Mono(mix)
ConfigOut(mono)
}
| Sciss/Grenzwerte | individual_sounds/1820_133.scala | Scala | gpl-3.0 | 1,798 |
package com.iheart.playSwagger
object Domain {
type Path = String
type Method = String
case class Definition( name: String,
properties: Seq[SwaggerParameter],
description: Option[String] = None)
case class SwaggerParameter( name: String,
`type`: Option[String] = None,
format: Option[String] = None,
required: Boolean = true,
referenceType: Option[String] = None,
items: Option[String] = None)
}
| Product-Foundry/play-swagger | src/main/scala/com/iheart/playSwagger/Domain.scala | Scala | apache-2.0 | 614 |
/*
* Copyright 2017 Datamountaineer.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.datamountaineer.streamreactor.connect.hbase.writers
import com.datamountaineer.streamreactor.connect.hbase.BytesHelper._
import com.datamountaineer.streamreactor.connect.hbase.HbaseHelper
import org.apache.hadoop.hbase.client.{Connection, ConnectionFactory, Scan}
import org.apache.hadoop.hbase.util.Bytes
import org.apache.hadoop.hbase.{CellUtil, HBaseConfiguration, TableName}
import scala.collection.JavaConverters._
object HbaseReaderHelper {
def createConnection: Connection = {
ConnectionFactory.createConnection(HBaseConfiguration.create())
}
def getAllRecords(tableName: String, columnFamily: String)(implicit connection: Connection): List[HbaseRowData] = {
HbaseHelper.withTable(TableName.valueOf(tableName)) { tbl =>
val scan = new Scan()
scan.addFamily(columnFamily.fromString())
val scanner = tbl.getScanner(scan)
scanner.asScala.map { rs =>
val cells = rs.rawCells().map { cell =>
Bytes.toString(CellUtil.cloneQualifier(cell)) -> CellUtil.cloneValue(cell)
}.toMap
HbaseRowData(rs.getRow, cells)
}.toList
}
}
}
case class HbaseRowData(key: Array[Byte], cells: Map[String, Array[Byte]]) | datamountaineer/stream-reactor | kafka-connect-hbase/src/test/scala/com/datamountaineer/streamreactor/connect/hbase/writers/HbaseReaderHelper.scala | Scala | apache-2.0 | 1,796 |
package com.rklaehn.interval
import java.util.Arrays
import org.scalacheck.{Arbitrary, Properties}
import org.scalacheck.Prop._
import spire.implicits._
import scala.reflect.ClassTag
object QuickArrayMergeCheck extends Properties("QuickArrayMerge") {
implicit val arbitraryArray = implicitly[Arbitrary[Array[Int]]]
property("merge") = forAll { (a:Array[Int], b:Array[Int]) =>
val r = (a ++ b)
Arrays.sort(a)
Arrays.sort(b)
Arrays.sort(r)
val order = new CountingOrder[Int]
val r1 = QuickArrayMerge.merge(a,b)(order, ClassTag.Int)
// val sa = a.mkString(",")
// val sb = b.mkString(",")
// println(s"$sa\\n$sb\\n")
// true
// val worstCase = math.max(a.length + b.length - 1, 0)
// if(order.count > worstCase) {
// println(s"$worstCase ${order.count}")
// }
r1.corresponds(r)(_ == _)
}
property("merge order") = forAll { (a:Array[Int], b:Array[Int]) =>
val r = (a ++ b)
Arrays.sort(a)
Arrays.sort(b)
Arrays.sort(r)
val o1 = new CountingOrder[Int]
val r1 = QuickArrayMerge.merge(a,b)(o1, ClassTag.Int)
val o2 = new CountingOrder[Int]
val r2 = QuickArrayMerge.merge(b,a)(o2, ClassTag.Int)
val worstCase = math.max(a.length + b.length - 1, 0)
// println(s"${o1.count} ${o2.count} $worstCase")
r1.corresponds(r2)(_ == _)
}
}
| non/intervalset | src/test/scala/com/rklaehn/interval/QuickArrayMergeCheck.scala | Scala | apache-2.0 | 1,349 |
package zalgo
import math._
// Code courtesy of Textgrounder: https://bitbucket.org/utcompling/textgrounder
object DistanceUtil {
/** *** Fixed values *****/
val minimum_latitude = -90.0
val maximum_latitude = 90.0
val minimum_longitude = -180.0
val maximum_longitude = 180.0 - 1e-10
// Radius of the earth in miles. Used to compute spherical distance in miles,
// and miles per degree of latitude/longitude.
val earth_radius_in_miles = 3963.191
val meter_per_mile = 1609.34
// Number of kilometers per mile.
val km_per_mile = 1.609
// Number of miles per degree, at the equator. For longitude, this is the
// same everywhere, but for latitude it is proportional to the degrees away
// from the equator.
val miles_per_degree = Pi * 2 * earth_radius_in_miles / 360.0
/** *** Computed values based on command-line params *****/
// Size of each region in degrees. Determined by the --degrees-per-region
// option, unless --miles-per-region is set, in which case it takes
// priority.
var degrees_per_region = 0.0
// Size of each region (vertical dimension; horizontal dimension only near
// the equator) in miles. Determined from degrees_per_region.
var miles_per_region = 0.0
var width_of_stat_region = 1
// A 2-dimensional coordinate.
//
// The following fields are defined:
//
// lat, long: Latitude and longitude of coordinate.
case class Coord(lat: Double, long: Double,
validate: Boolean = true) {
if (validate) {
// Not sure why this code was implemented with coerce_within_bounds,
// but either always coerce, or check the bounds ...
require(lat >= minimum_latitude)
require(lat <= maximum_latitude)
require(long >= minimum_longitude)
require(long <= maximum_longitude)
}
override def toString() = "(%.2f,%.2f)".format(lat, long)
}
object Coord {
// Create a coord, with METHOD defining how to handle coordinates
// out of bounds. If METHOD = "accept", just accept them; if
// "validate", check within bounds, and abort if not. If "coerce",
// coerce within bounds (latitudes are cropped, longitudes are taken
// mod 360).
def apply(lat: Double, long: Double, method: String) = {
var validate = false
val (newlat, newlong) =
method match {
case "coerce-warn" => {
coerce(lat, long)
}
case "coerce" => coerce(lat, long)
case "validate" => {
validate = true; (lat, long)
}
case "accept" => {
(lat, long)
}
case _ => {
require(false,
"Invalid method to Coord(): %s" format method)
(0.0, 0.0)
}
}
new Coord(newlat, newlong, validate = validate)
}
def valid(lat: Double, long: Double) =
lat >= minimum_latitude &&
lat <= maximum_latitude &&
long >= minimum_longitude &&
long <= maximum_longitude
def coerce(lat: Double, long: Double) = {
var newlat = lat
var newlong = long
if (newlat > maximum_latitude) newlat = maximum_latitude
while (newlong > maximum_longitude) newlong -= 360.0
if (newlat < minimum_latitude) newlat = minimum_latitude
while (newlong < minimum_longitude) newlong += 360.0
(newlat, newlong)
}
}
// Compute spherical distance in METERS (along a great circle) between two
// coordinates.
def spheredist(p1: Coord, p2: Coord): Double = {
if (p1 == null || p2 == null) return 1000000.0
val thisRadLat = (p1.lat / 180.0) * Pi
val thisRadLong = (p1.long / 180.0) * Pi
val otherRadLat = (p2.lat / 180.0) * Pi
val otherRadLong = (p2.long / 180.0) * Pi
val angle_cos = (sin(thisRadLat) * sin(otherRadLat)
+ cos(thisRadLat) * cos(otherRadLat) *
cos(otherRadLong - thisRadLong))
// If the values are extremely close to each other, the resulting cosine
// value will be extremely close to 1. In reality, however, if the values
// are too close (e.g. the same), the computed cosine will be slightly
// above 1, and acos() will complain. So special-case this.
if (abs(angle_cos) > 1.0) {
if (abs(angle_cos) > 1.000001) {
return 1000000.0
} else
return 0.0
}
earth_radius_in_miles * meter_per_mile * acos(angle_cos)
}
}
| itszero/neo4j-astar-example | src/main/scala/zalgo/DistanceUtil.scala | Scala | mit | 4,394 |
/*
Copyright (c) 2009, 2010 Hanno Braun <[email protected]>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.hannobraun.sd.dynamics
import com.hannobraun.sd.core.StepPhase
import com.hannobraun.sd.math.Vector2
import scala.math._
class PositionConstraintSolver extends StepPhase[ PositionConstraint, Nothing ] {
def execute( dt: Double, constraints: Iterable[ PositionConstraint ], c: Iterable[ Nothing ] ) = {
for ( constraint <- constraints ) {
val initialX = constraint.position.x
val initialY = constraint.position.y
val xAfterMin = constraint.minX match {
case Some( minX ) => max( initialX, minX )
case None => initialX
}
val yAfterMin = constraint.minY match {
case Some( minY ) => max( initialY, minY )
case None => initialY
}
val xAfterMax = constraint.maxX match {
case Some( maxX ) => min( xAfterMin, maxX )
case None => xAfterMin
}
val yAfterMax = constraint.maxY match {
case Some( maxY ) => min( yAfterMin, maxY )
case None => yAfterMin
}
constraint.position = Vector2( xAfterMax, yAfterMax )
}
( constraints, c )
}
}
| hannobraun/ScalableDynamics | src/main/scala/com/hannobraun/sd/dynamics/PositionConstraintSolver.scala | Scala | apache-2.0 | 1,622 |
package io.github.mandar2812.dynaml.optimization
import breeze.linalg.{Tensor}
import com.tinkerpop.blueprints.Edge
import com.tinkerpop.frames.EdgeFrame
/**
* Trait for optimization problem solvers.
*
* @tparam K The type indexing the Parameter vector, should be Int in
* most cases.
* @tparam P The type of the parameters of the model to be optimized.
* @tparam Q The type of the predictor variable
* @tparam R The type of the target variable
* @tparam S The type of the edge containing the
* features and label.
*/
trait Optimizer[K, P, Q, R, S] extends Serializable {
/**
* Solve the convex optimization problem.
*/
def optimize(nPoints: Long, ParamOutEdges: S, initialP: P): P
}
abstract class RegularizedOptimizer[K, P, Q, R, S]
extends Optimizer[K, P, Q, R, S] with Serializable {
protected var regParam: Double = 1.0
protected var numIterations: Int = 10
protected var miniBatchFraction: Double = 1.0
protected var stepSize: Double = 1.0
/**
* Set the regularization parameter. Default 0.0.
*/
def setRegParam(regParam: Double): this.type = {
this.regParam = regParam
this
}
/**
* Set fraction of data to be used for each SGD iteration.
* Default 1.0 (corresponding to deterministic/classical gradient descent)
*/
def setMiniBatchFraction(fraction: Double): this.type = {
this.miniBatchFraction = fraction
this
}
/**
* Set the number of iterations for SGD. Default 100.
*/
def setNumIterations(iters: Int): this.type = {
this.numIterations = iters
this
}
/**
* Set the initial step size of SGD for the first step. Default 1.0.
* In subsequent steps, the step size will decrease with stepSize/sqrt(t)
*/
def setStepSize(step: Double): this.type = {
this.stepSize = step
this
}
} | mandar2812/bayeslearn | src/main/scala/io/github/mandar2812/dynaml/optimization/Optimizer.scala | Scala | apache-2.0 | 1,833 |
package blended.updater.config
import com.typesafe.config.ConfigFactory
import org.scalatest.{FreeSpecLike, Matchers}
import scala.util.Success
class ResolvedRuntimeConfigSpec
extends FreeSpecLike
with Matchers {
"A Config with features references" - {
val config = """
|name = name
|version = 1
|bundles = [{url = "mvn:base:bundle1:1"}]
|startLevel = 10
|defaultStartLevel = 10
|features = [
| { name = feature1, version = 1 }
| { name = feature2, version = 1 }
|]
|""".stripMargin
val feature1 = """
|name = feature1
|version = 1
|bundles = [{url = "mvn:feature1:bundle1:1"}]
|""".stripMargin
val feature2 = """
|name = feature2
|version = 1
|bundles = [{url = "mvn:feature2:bundle1:1"}]
|features = [{name = feature3, version = 1}]
|""".stripMargin
val feature3 = """
|name = feature3
|version = 1
|bundles = [{url = "mvn:feature3:bundle1:1", startLevel = 0}]
|""".stripMargin
val features = List(feature1, feature2, feature3).map(f => {
val fc = FeatureConfigCompanion.read(ConfigFactory.parseString(f))
fc shouldBe a[Success[_]]
fc.get
})
val runtimeConfig : RuntimeConfig = RuntimeConfigCompanion.read(ConfigFactory.parseString(config)).get
"should be constructable with extra features" in {
ResolvedRuntimeConfig(runtimeConfig, features)
}
"should be constructable with optional resolved features" in {
ResolvedRuntimeConfig(runtimeConfig.copy(resolvedFeatures = features))
}
"should not be constructable when some feature refs are not resolved" in {
val ex = intercept[IllegalArgumentException] {
ResolvedRuntimeConfig(runtimeConfig)
}
ex.getMessage should startWith("requirement failed: Contains resolved feature: feature1-1")
}
"should not be constructable when no bundle with startlevel 0 is present" in {
val f3 = features.find(_.name == "feature3").get
val fs = features.filter { _ != f3 } ++ Seq(f3.copy(bundles = f3.bundles.map(_.copy(startLevel = None))))
val ex = intercept[IllegalArgumentException] {
ResolvedRuntimeConfig(runtimeConfig, fs)
}
ex.getMessage should startWith("requirement failed: A ResolvedRuntimeConfig needs exactly one bundle with startLevel '0'")
}
"should not be constructable when cycles between feature refs exist" in {
pending
}
"should migrate all known features into RuntimeConfig.resolvedFeatures" in {
runtimeConfig.resolvedFeatures shouldBe empty
features should have size (3)
val rrc1 = ResolvedRuntimeConfig(runtimeConfig, features)
rrc1.runtimeConfig.resolvedFeatures should have size (3)
val rrc2 = ResolvedRuntimeConfig(rrc1.runtimeConfig)
rrc1.allReferencedFeatures should contain theSameElementsAs (rrc2.allReferencedFeatures)
rrc1 should equal(rrc2)
}
}
} | lefou/blended | blended.updater.config/jvm/src/test/scala/blended/updater/config/ResolvedRuntimeConfigSpec.scala | Scala | apache-2.0 | 2,999 |
package unfiltered.request
import scala.util.control.Exception.{ allCatch, catching }
trait DateParser extends (String => java.util.Date)
object DateFormatting {
import java.text.SimpleDateFormat
import java.util.{ Date, Locale, TimeZone }
def format(date: Date) =
new SimpleDateFormat("E, dd MMM yyyy HH:mm:ss z", Locale.ENGLISH) {
setTimeZone(TimeZone.getTimeZone("GMT"))
}.format(date)
def parseAs(fmt: String)(value: String): Option[Date] =
allCatch.opt(new SimpleDateFormat(fmt, Locale.US).parse(value))
/** Preferred HTTP date format Sun, 06 Nov 1994 08:49:37 GMT */
def RFC1123 = parseAs("EEE, dd MMM yyyy HH:mm:ss z")_
/** Sunday, 06-Nov-94 08:49:37 GMT */
def RFC1036 = parseAs("EEEEEE, dd-MMM-yy HH:mm:ss z")_
/** Sun Nov 6 08:49:37 1994 */
def ANSICTime = parseAs("EEE MMM d HH:mm:ss yyyy")_
/** @return various date coersion formats falling back on None value */
def parseDate(raw: String) = RFC1123(raw) orElse RFC1036(raw) orElse ANSICTime(raw)
}
/** A header with values mapped to keys in a Map. */
private [request] class MappedRequestHeader[A, B](val name: String)(parser: Iterator[String] => Map[A, B]) extends RequestExtractor[Map[A, B]] {
def unapply[T](req: HttpRequest[T]) = Some(parser(req.headers(name)))
def apply[T](req: HttpRequest[T]) = parser(req.headers(name))
}
/** A header with comma delimited values. Implementations of this extractor
* will not match requests for which the header `name` is not present.*/
private [request] class SeqRequestHeader[T](val name: String)(parser: Iterator[String] => List[T]) extends RequestExtractor[List[T]] {
def unapply[A](req: HttpRequest[A]) =
Some(parser(req.headers(name))).filter { !_.isEmpty }
def apply[T](req: HttpRequest[T]) = parser(req.headers(name))
}
/** A header with a single value. Implementations of this extractor
* will not match requests for which the header `name` is not present.*/
private [request] class RequestHeader[A](val name: String)(parser: Iterator[String] => List[A]) extends RequestExtractor[A] {
def unapply[T](req: HttpRequest[T]) = parser(req.headers(name)).headOption
def apply[T](req: HttpRequest[T]) = parser(req.headers(name)).headOption
}
private [request] object DateValueParser extends (Iterator[String] => List[java.util.Date]) {
import DateFormatting._
def apply(values: Iterator[String]) =
values.toList.flatMap(parseDate)
}
private [request] object IntValueParser extends (Iterator[String] => List[Int]) {
def tryInt(raw: String) = catching(classOf[NumberFormatException]).opt(raw.toInt)
def apply(values: Iterator[String]) =
values.toList.flatMap(tryInt)
}
private [request] object StringValueParser extends (Iterator[String] => List[String]) {
def apply(values: Iterator[String]) =
values.toList
}
private [request] object UriValueParser extends (Iterator[String] => List[java.net.URI]) {
import java.net.{ URI, URISyntaxException }
def toUri(raw: String) =
catching(classOf[URISyntaxException], classOf[NullPointerException]).opt(new URI(raw))
def apply(values: Iterator[String]) =
values.toList.flatMap(toUri)
}
private [request] object SeqValueParser extends (Iterator[String] => List[String]) {
def apply(values: Iterator[String]) = {
def split(raw: String): List[String] =
(raw.split(",") map {
_.trim.takeWhile { _ != ';' }.mkString
}).toList
values.toList.flatMap(split)
}
}
private [request] case class Conneg(value: String, qualifier: Double = 1.0)
private [request] object Conneg {
val EqualsMatcher = """(\\w*)="?([a-zA-Z\\.0-9]*)"?""".r
def apply(input: String): Conneg = {
val split = input.trim().split(";").toList
val params = split.tail.foldLeft(Map[String, Option[String]]()) {
case (map, s) => {
val item = s.trim match {
case EqualsMatcher(a, b) => (a.trim, Some(b.trim))
case _ => (s, None)
}
map + item
}
}.collect{case (a, Some(b)) => (a, b)}
new Conneg(split.head, params.get("q").map(_.toDouble).getOrElse(1.0))
}
}
private [request] object ConnegValueParser extends (Iterator[String] => List[String]) {
def apply(values: Iterator[String]) = {
def parse: (String) => scala.List[Conneg] = {
raw => raw.split(",").map(Conneg(_)).toList
}
values.toList.flatMap(parse).sortBy(_.qualifier)(Ordering.Double.reverse).map(_.value)
}
}
/** Header whose value should be a date and time. Parsing is attempted
* for formats defined in the DateFormatting object, in this order:
* RFC1123, RFC1036, ANSICTime. */
class DateHeader(name: String) extends RequestHeader(name)(DateValueParser)
/** A repeatable header may be specified in more than one header k-v pair and
* whose values are a list delimited by comma
* see also [[http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2]] */
class RepeatableHeader(name: String) extends SeqRequestHeader(name)(SeqValueParser)
/** Header whose value should be a valid URI. */
class UriHeader(name: String) extends RequestHeader(name)(UriValueParser)
/** Header whose value can be any string. */
class StringHeader(name: String) extends RequestHeader(name)(StringValueParser)
/** Header whose value should be an integer. (Is stored in an Int.) */
class IntHeader(name: String) extends RequestHeader(name)(IntValueParser)
/* Header where the value needs to be sorted by the qualifier attribute. */
class ConnegHeader(name: String) extends SeqRequestHeader(name)(ConnegValueParser)
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html#sec14.10
object Accept extends ConnegHeader("Accept")
object AcceptCharset extends ConnegHeader("Accept-Charset")
object AcceptEncoding extends ConnegHeader("Accept-Encoding")
object AcceptLanguage extends ConnegHeader("Accept-Language")
/** To handle request body content encodings */
object RequestContentEncoding extends ConnegHeader("Content-Encoding") {
private def matching(t: String) =
RequestExtractor.predicate(RequestContentEncoding) { encs =>
encs.exists { _.equalsIgnoreCase(t) }
}
val GZip = matching("gzip")
val Deflate = matching("deflate")
val Compress = matching("compress")
val SDCH = matching("sdch")
val Identity = matching("identity")
}
object Authorization extends StringHeader("Authorization")
object Connection extends StringHeader("Connection")
object RequestContentType extends StringHeader("Content-Type")
object Expect extends StringHeader("Expect")
object From extends StringHeader("From")
object Host extends StringHeader("Host")
object IfMatch extends RepeatableHeader("If-Match")
object IfModifiedSince extends DateHeader("If-Modified-Since")
object IfNoneMatch extends RepeatableHeader("If-None-Match")
object IfRange extends StringHeader("If-Range") // can also be an http date
object IfUnmodifiedSince extends DateHeader("If-Unmodified-Since")
object MaxForwards extends IntHeader("Max-Forwards")
object ProxyAuthorization extends StringHeader("Proxy-Authorization")
object Range extends RepeatableHeader("Range")// there more structure here
object Referer extends UriHeader("Referer")
object TE extends RepeatableHeader("TE")
object Upgrade extends RepeatableHeader("Upgrade")
object UserAgent extends StringHeader("User-Agent")// maybe a bit more structure here
object Via extends RepeatableHeader("Via")
object XForwardedFor extends RepeatableHeader("X-Forwarded-For")
object XForwardedPort extends IntHeader("X-Forwarded-Port")
object XForwardedProto extends StringHeader("X-Forwarded-Proto")
/** Extracts the charset value from the Content-Type header, if present */
object Charset {
import unfiltered.util.MIMEType
def unapply[T](req: HttpRequest[T]) = {
for {
MIMEType(mimeType) <- RequestContentType(req)
charset <- mimeType.params.get("charset")
} yield charset
}
def apply[T](req: HttpRequest[T]) = unapply(req)
}
/** Extracts hostname and port separately from the Host header, setting
* a default port of 80 or 443 when none is specified */
object HostPort {
import unfiltered.util.Of
def unapply[T](req: HttpRequest[T]): Option[(String, Int)] =
req match {
case Host(hostname) => hostname.split(':') match {
case Array(host, Of.Int(port)) => Some((host, port))
case _ => Some((hostname, if(req.isSecure) 443 else 80))
}
case _ => None
}
}
| jarin/unfiltered | library/src/main/scala/request/headers.scala | Scala | mit | 8,378 |
/*
* Copyright 2015 Textocat
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
*
*/
package com.textocat.textokit.phrrecog.parsing
import com.textocat.textokit.morph.fs.{Word, Wordform}
import com.textocat.textokit.morph.model.{MorphConstants => M}
import com.textocat.textokit.phrrecog.parsing.NPParsers._
import com.textocat.textokit.phrrecog.parsing.WordUtils._
import com.textocat.textokit.tokenizer.fstype.NUM
import org.apache.uima.cas.text.AnnotationFS
import scala.collection.immutable.Queue
import scala.util.parsing.combinator.Parsers
/**
* @author Rinat Gareev
*
*/
trait NPParsers extends Parsers {
type Elem = Word
// atomic
def adjf(grs: GrammemeMatcher*) = posParser(M.ADJF, grs: _*)
def prtf(grs: GrammemeMatcher*) = posParser(M.PRTF, grs: _*)
def noun(grs: GrammemeMatcher*) = posParser(M.NOUN, grs: _*)
def pronoun(grs: GrammemeMatcher*) = posParser(M.NPRO, grs: _*)
// adjective or perfective
def aNom = adjf(M.nomn) | prtf(M.nomn)
def aGen = adjf(M.gent) | prtf(M.gent)
def aDat = adjf(M.datv) | prtf(M.datv)
def aAcc = adjf(M.accs) | prtf(M.accs)
def aAbl = adjf(M.ablt) | prtf(M.ablt)
def aLoc = adjf(M.loct) | prtf(M.loct)
// Noun base
def nounBase(grs: GrammemeMatcher*) = noun(grs: _*) | pronoun(grs: _*)
// Coordinated Adjective + Noun
def cANNom(grs: GrammemeMatcher*) =
rep(aNom) ~ nounBase(has(M.nomn) +: grs: _*) ^^ { case deps ~ n => new NP(n, deps) }
def cANGen(grs: GrammemeMatcher*) =
rep(aGen) ~ nounBase(has(M.gent) +: grs: _*) ^^ { case deps ~ n => new NP(n, deps) }
def cANDat(grs: GrammemeMatcher*) =
rep(aDat) ~ nounBase(has(M.datv) +: grs: _*) ^^ { case deps ~ n => new NP(n, deps) }
def cANAcc(grs: GrammemeMatcher*) =
rep(aAcc) ~ nounBase(has(M.accs) +: grs: _*) ^^ { case deps ~ n => new NP(n, deps) }
def cANAbl(grs: GrammemeMatcher*) =
rep(aAbl) ~ nounBase(has(M.ablt) +: grs: _*) ^^ { case deps ~ n => new NP(n, deps) }
def cANLoc(grs: GrammemeMatcher*) =
rep(aLoc) ~ nounBase(has(M.loct) +: grs: _*) ^^ { case deps ~ n => new NP(n, deps) }
// NU = Numeral + Unit
def nUNom = (numNot1 ~ cANGen() ^^ { case n ~ can => new NP(n, can) }
| num1 ~ cANNom() ^^ { case n ~ can => new NP(n, can) })
def nUGen = num ~ cANGen() ^^ { case n ~ can => new NP(n, can) }
def nUDat = num ~ cANDat() ^^ { case n ~ can => new NP(n, can) }
def nUAcc = (num24 ~ cANAcc(M.anim) ^^ { case n ~ can => new NP(n, can) }
| num24 ~ cANGen(hasNot(M.anim)) ^^ { case n ~ can => new NP(n, can) }
| num059 ~ cANGen() ^^ { case n ~ can => new NP(n, can) }
| num1 ~ cANAcc() ^^ { case n ~ can => new NP(n, can) })
def nUAbl = num ~ cANAbl() ^^ { case n ~ can => new NP(n, can) }
def nULoc = num ~ cANLoc() ^^ { case n ~ can => new NP(n, can) }
// prepositions
def gentPrep = textParser(gentPrepositions, M.PREP)
def datPrep = textParser(datPrepositions, M.PREP)
def accPrep = textParser(accPrepositions, M.PREP)
def ablPrep = textParser(ablPrepositions, M.PREP)
def locPrep = textParser(locPrepositions, M.PREP)
// Prepositional CAN
def pCANNom = nUNom | cANNom()
def pCANGen = opt(gentPrep) ~ (nUGen | cANGen()) ^^ {
case Some(prep) ~ np => np.setPreposition(prep) //new NP(noun = np.noun, prepOpt = Some(prep), depWords = np.depWords)
case None ~ np => np
}
def pCANDat = opt(datPrep) ~ (nUDat | cANDat()) ^^ {
case Some(prep) ~ np => np.setPreposition(prep) // new NP(np.noun, Some(prep), np.deps)
case None ~ np => np
}
def pCANAcc = opt(accPrep) ~ (nUAcc | cANAcc()) ^^ {
case Some(prep) ~ np => np.setPreposition(prep) // new NP(np.noun, Some(prep), np.deps)
case None ~ np => np
}
def pCANAbl = opt(ablPrep) ~ (nUAbl | cANAbl()) ^^ {
case Some(prep) ~ np => np.setPreposition(prep) // new NP(np.noun, Some(prep), np.deps)
case None ~ np => np
}
def pCANLoc = opt(locPrep) ~ (nULoc | cANLoc()) ^^ {
case Some(prep) ~ np => np.setPreposition(prep) // new NP(np.noun, Some(prep), np.deps)
case None ~ np => np
}
// NP = pCAN + genitives
def np = (pCANNom | pCANGen | pCANDat | pCANAcc | pCANAbl | pCANLoc) ~ rep(cANGen()) ^^ {
case headNP ~ depNPList => {
val genHeadOpt = toDependentNPChain(depNPList)
genHeadOpt match {
case None => headNP
case Some(genHead) =>
if (headNP.depNPs.isEmpty)
headNP.addDependentNP(genHeadOpt)
else new NP(headNP.noun, headNP.prepOpt, headNP.particleOpt, headNP.depWords,
// add genitive NP chain head to last
headNP.depNPs.init.enqueue(headNP.depNPs.last.addDependentNP(genHeadOpt)))
}
}
}
def posParser(pos: String, grs: GrammemeMatcher*) = new Parser[Wordform] {
override def apply(in: Input) =
if (in.atEnd) Failure("end of sequence detected", in)
else findWordform(in.first, pos, grs: _*) match {
case Some(wf) => Success(wf, in.rest)
case None => Failure("%s with grammems {%s} expected".format(pos, grs), in)
}
}
def textParser(variants: Set[String], requiredPos: String) = new Parser[Wordform] {
def apply(in: Input) =
if (in.atEnd) Failure("end of sequence detected", in)
else if (variants.contains(in.first.getCoveredText))
findWordform(in.first, requiredPos) match {
case Some(wf) => Success(wf, in.rest)
case None => Failure(
"Found word '%s' does not have expected pos '%s'".format(in.first.getCoveredText, requiredPos),
in)
}
else Failure("One of %s was expected".format(variants), in)
}
// num ends on 1
def num1 = num(endsOn(Set('1'))(_), M.NUMR)
// num ends on 2,3,4
def num24 = num(endsOn(Set('2', '3', '4'))(_), M.NUMR)
// num ends on 0,5-9
def num059 = num(endsOn(Set('0', '5', '6', '7', '8', '9'))(_), M.NUMR)
// num ends on 0,2-9
def numNot1 = num(n => !(endsOn(Set('1'))(n)), M.NUMR)
def num: Parser[Wordform] = num(n => true, M.NUMR)
def num(matcher: NUM => Boolean, requiredPos: String) = new Parser[Wordform] {
def apply(in: Input) =
if (in.atEnd) Failure("end of sequence detected", in)
else in.first.getToken() match {
case n: NUM => if (matcher(n))
findWordform(in.first, requiredPos) match {
case Some(wf) => Success(wf, in.rest)
case None => Failure(
"NUM word '%s' does not have required pos '%s'"
.format(in.first.getCoveredText, requiredPos),
in)
}
else Failure("num does not match condition", in)
case _ => Failure("NUM was expected", in)
}
}
private def endsOn(requiredEnds: Set[Char])(anno: AnnotationFS): Boolean = {
val annoTxt = anno.getCoveredText()
requiredEnds.contains(annoTxt.last)
}
private implicit def stringToReqGramemme(grString: String): GrammemeRequired =
has(grString)
}
class NP(val noun: Wordform,
val prepOpt: Option[Wordform] = None, val particleOpt: Option[Wordform] = None,
val depWords: List[Wordform] = Nil, val depNPs: Queue[NP] = Queue()) {
// aux constructor
def this(noun: Wordform, nps: NP*) = this(noun, None, None, Nil, Queue() ++ nps)
// aux constructor
def this(noun: Wordform, deps: List[Wordform]) = this(noun, None, None, deps, Queue())
// clone and change
def setPreposition(newPrep: Wordform): NP =
if (prepOpt.isDefined) throw new IllegalStateException(
"Can't add preposition '%s' because NP already has one: '%s'".format(
newPrep.getWord.getCoveredText, prepOpt.get.getWord.getCoveredText))
else new NP(noun, Some(newPrep), particleOpt, depWords, depNPs)
// clone and change
def addDependentNP(newDepNPOpt: Option[NP]): NP = newDepNPOpt match {
case None => this
case Some(newDepNP) => new NP(noun, prepOpt, particleOpt, depWords, depNPs.enqueue(newDepNP))
}
}
object NPParsers {
private val gentPrepositions = generateCommonWordsSet("без", "до", "из", "от", "у", "для", "ради", "между", "с")
private val datPrepositions = generateCommonWordsSet("к", "по")
private val accPrepositions = generateCommonWordsSet("про", "через", "сквозь", "в", "на", "о", "за", "под", "по", "с")
private val ablPrepositions = generateCommonWordsSet("над", "перед", "между", "за", "под", "с")
private val locPrepositions = generateCommonWordsSet("при", "в", "на", "о", "по")
/*
private[parsing] def flatten(nps: TraversableOnce[NP]): List[Word] = {
val result = new ListBuffer[Word]
for (np <- nps) {
result += np.noun
result ++= np.deps
}
result.toList
}
*/
private def toDependentNPChain(nps: List[NP]): Option[NP] =
if (nps == null || nps.isEmpty) None
else Some(nps.head.addDependentNP(toDependentNPChain(nps.tail)))
private def generateCommonWordsSet(words: String*): Set[String] =
Set() ++ words ++ words.map(_.capitalize)
} | Denis220795/Textokit | Textokit.PhraseRecognizer/src/main/scala/com/textocat/textokit/phrrecog/parsing/NPParsers.scala | Scala | apache-2.0 | 9,533 |
object exercise2_1 {
def fib(n: Int): Int = {
@annotation.tailrec
def loop(n: Int, prev: Int, current: Int): Int =
if(n == 0) prev
else loop(n-1, current, prev + current)
loop(n, 0, 1)
}
def main(args: Array[String]): Unit = {
for(i <- 0 to 10) println(fib(i))
}
}
| joonjeong/shadow-boxing | fp-in-scala/exercise2/fib.scala | Scala | mit | 302 |
package coursier.core
object Orders {
@deprecated("Will likely be removed at some point in future versions", "2.0.0-RC3")
trait PartialOrdering[T] extends scala.math.PartialOrdering[T] {
def lteq(x: T, y: T): Boolean =
tryCompare(x, y)
.exists(_ <= 0)
}
/** All configurations that each configuration extends, including the ones it extends transitively
*/
@deprecated("Will likely be removed at some point in future versions", "2.0.0-RC3")
def allConfigurations(
configurations: Map[Configuration, Seq[Configuration]]
): Map[Configuration, Set[Configuration]] =
allConfigurations0(configurations)
private[core] def allConfigurations0(
configurations: Map[Configuration, Seq[Configuration]]
): Map[Configuration, Set[Configuration]] = {
def allParents(config: Configuration): Set[Configuration] = {
def helper(configs: Set[Configuration], acc: Set[Configuration]): Set[Configuration] =
if (configs.isEmpty)
acc
else if (configs.exists(acc))
helper(configs -- acc, acc)
else if (configs.exists(!configurations.contains(_))) {
val (remaining, notFound) = configs.partition(configurations.contains)
helper(remaining, acc ++ notFound)
}
else {
val extraConfigs = configs.flatMap(configurations)
helper(extraConfigs, acc ++ configs)
}
helper(Set(config), Set.empty)
}
configurations
.keys
.toList
.map(config => config -> (allParents(config) - config))
.toMap
}
/** Configurations partial order based on configuration mapping `configurations`.
*
* @param configurations:
* for each configuration, the configurations it directly extends.
*/
@deprecated("Will likely be removed at some point in future versions", "2.0.0-RC3")
def configurationPartialOrder(
configurations: Map[Configuration, Seq[Configuration]]
): PartialOrdering[Configuration] =
configurationPartialOrder0(configurations)
private def configurationPartialOrder0(
configurations: Map[Configuration, Seq[Configuration]]
): PartialOrdering[Configuration] =
new PartialOrdering[Configuration] {
val allParentsMap = allConfigurations0(configurations)
def tryCompare(x: Configuration, y: Configuration) =
if (x == y)
Some(0)
else if (allParentsMap.get(x).exists(_(y)))
Some(-1)
else if (allParentsMap.get(y).exists(_(x)))
Some(1)
else
None
}
/** Non-optional < optional */
@deprecated("Will likely be removed at some point in future versions", "2.0.0-RC3")
val optionalPartialOrder: PartialOrdering[Boolean] =
new PartialOrdering[Boolean] {
def tryCompare(x: Boolean, y: Boolean) =
Some(
if (x == y) 0
else if (x) 1
else -1
)
}
/** Exclusions partial order.
*
* x <= y iff all that x excludes is also excluded by y. x and y not related iff x excludes some
* elements not excluded by y AND y excludes some elements not excluded by x.
*
* In particular, no exclusions <= anything <= Set(("*", "*"))
*/
@deprecated(
"Can give incorrect results - will likely be removed at some point in future versions",
"2.0.0-RC3"
)
val exclusionsPartialOrder: PartialOrdering[Set[(Organization, ModuleName)]] =
new PartialOrdering[Set[(Organization, ModuleName)]] {
def boolCmp(a: Boolean, b: Boolean) = (a, b) match {
case (true, true) => Some(0)
case (true, false) => Some(1)
case (false, true) => Some(-1)
case (false, false) => None
}
def tryCompare(x: Set[(Organization, ModuleName)], y: Set[(Organization, ModuleName)]) = {
val (xAll, xExcludeByOrg1, xExcludeByName1, xRemaining0) = Exclusions.partition(x)
val (yAll, yExcludeByOrg1, yExcludeByName1, yRemaining0) = Exclusions.partition(y)
boolCmp(xAll, yAll).orElse {
def filtered(e: Set[(Organization, ModuleName)]) =
e.filter { case (org, name) =>
!xExcludeByOrg1(org) && !yExcludeByOrg1(org) &&
!xExcludeByName1(name) && !yExcludeByName1(name)
}
def removeIntersection[T](a: Set[T], b: Set[T]) =
(a -- b, b -- a)
def allEmpty(set: Set[_]*) = set.forall(_.isEmpty)
val (xRemaining1, yRemaining1) =
(filtered(xRemaining0), filtered(yRemaining0))
val (xProperRemaining, yProperRemaining) =
removeIntersection(xRemaining1, yRemaining1)
val (onlyXExcludeByOrg, onlyYExcludeByOrg) =
removeIntersection(xExcludeByOrg1, yExcludeByOrg1)
val (onlyXExcludeByName, onlyYExcludeByName) =
removeIntersection(xExcludeByName1, yExcludeByName1)
val (noXProper, noYProper) = (
allEmpty(xProperRemaining, onlyXExcludeByOrg, onlyXExcludeByName),
allEmpty(yProperRemaining, onlyYExcludeByOrg, onlyYExcludeByName)
)
boolCmp(noYProper, noXProper) // order matters
}
}
}
private def fallbackConfigIfNecessary(dep: Dependency, configs: Set[Configuration]): Dependency =
Parse.withFallbackConfig(dep.configuration) match {
case Some((main, fallback)) =>
val config0 =
if (configs(main))
main
else if (configs(fallback))
fallback
else
dep.configuration
dep.withConfiguration(config0)
case _ =>
dep
}
/** Assume all dependencies have same `module`, `version`, and `artifact`; see `minDependencies`
* if they don't.
*/
@deprecated(
"Can give incorrect results - will likely be removed at some point in future versions",
"2.0.0-RC3"
)
def minDependenciesUnsafe(
dependencies: Set[Dependency],
configs: Map[Configuration, Seq[Configuration]]
): Set[Dependency] = {
val availableConfigs = configs.keySet
val groupedDependencies = dependencies
.map(fallbackConfigIfNecessary(_, availableConfigs))
.groupBy(dep => (dep.optional, dep.configuration))
.mapValues { deps =>
deps.head.withExclusions(deps.foldLeft(Exclusions.one)((acc, dep) =>
Exclusions.meet(acc, dep.exclusions)
))
}
.toList
val remove =
for {
List(((xOpt, xScope), xDep), ((yOpt, yScope), yDep)) <- groupedDependencies.combinations(2)
optCmp <- optionalPartialOrder.tryCompare(xOpt, yOpt).iterator
scopeCmp <- configurationPartialOrder0(configs).tryCompare(xScope, yScope).iterator
if optCmp * scopeCmp >= 0
exclCmp <- exclusionsPartialOrder.tryCompare(xDep.exclusions, yDep.exclusions).iterator
if optCmp * exclCmp >= 0
if scopeCmp * exclCmp >= 0
xIsMin = optCmp < 0 || scopeCmp < 0 || exclCmp < 0
yIsMin = optCmp > 0 || scopeCmp > 0 || exclCmp > 0
if xIsMin || yIsMin // should be always true, unless xDep == yDep, which shouldn't happen
} yield if (xIsMin) yDep else xDep
groupedDependencies.map(_._2).toSet -- remove
}
/** Minified representation of `dependencies`.
*
* The returned set brings exactly the same things as `dependencies`, with no redundancy.
*/
@deprecated(
"Can give incorrect results - will likely be removed at some point in future versions, use DependencySet.minimizedSet instead to minimize a dependency set",
"2.0.0-RC3"
)
def minDependencies(
dependencies: Set[Dependency],
configs: ((Module, String)) => Map[Configuration, Seq[Configuration]]
): Set[Dependency] = {
dependencies
.groupBy(
_.withConfiguration(Configuration.empty).withExclusions(Set.empty).withOptional(false)
)
.mapValues(deps => minDependenciesUnsafe(deps, configs(deps.head.moduleVersion)))
.valuesIterator
.fold(Set.empty)(_ ++ _)
}
}
| alexarchambault/coursier | modules/core/shared/src/main/scala/coursier/core/Orders.scala | Scala | apache-2.0 | 7,972 |
/**
* Copyright (c) 2011, Andrew Shewring
* Licensed under the new BSD License (see the LICENSE.txt file for details).
*/
package com.github.ashewring.sbttycho
import java.io.File
object SbtTychoConstants {
val TychoBuildUsage = "\\nUsage:\\ntycho-build <maven arguments>"
val BundleVersionKey = "Bundle-Version"
val WorkingDirectory = new File(System.getProperty("user.dir"))
} | ashewring/sbt-tycho | src/main/scala/com/github/ashewring/sbttycho/SbtTychoConstants.scala | Scala | bsd-3-clause | 386 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.avocado.algorithms.hmm
trait Aligner {
/**
* Aligns sequences.
*
* @param refSequence Reference sequence over the active region.
* @param testSequence Sequence being scored.
* @param testQualities String of qualities. Not currently used.
* @return Alignment which stores the aligned sequences and likelihoods
*/
def alignSequences(refSequence: String, testSequence: String, testQualities: String): Alignment
}
| FusionWorks/avocado | avocado-core/src/main/scala/org/bdgenomics/avocado/algorithms/hmm/Aligner.scala | Scala | apache-2.0 | 1,254 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.coordinator
import org.apache.samza.config.StorageConfig
import org.apache.samza.job.model.{JobModel, TaskModel}
import org.apache.samza.config.Config
import org.apache.samza.SamzaException
import org.apache.samza.container.grouper.task.TaskNameGrouperFactory
import org.apache.samza.container.grouper.stream.SystemStreamPartitionGrouperFactory
import java.util
import org.apache.samza.container.{LocalityManager, TaskName}
import org.apache.samza.storage.ChangelogPartitionManager
import org.apache.samza.util.Logging
import org.apache.samza.metrics.MetricsRegistryMap
import org.apache.samza.util.Util
import scala.collection.JavaConversions._
import org.apache.samza.config.JobConfig.Config2Job
import org.apache.samza.config.TaskConfig.Config2Task
import org.apache.samza.Partition
import org.apache.samza.system.StreamMetadataCache
import org.apache.samza.system.SystemStreamPartition
import org.apache.samza.system.SystemFactory
import org.apache.samza.coordinator.server.HttpServer
import org.apache.samza.checkpoint.{Checkpoint, CheckpointManager}
import org.apache.samza.coordinator.server.JobServlet
import org.apache.samza.config.SystemConfig.Config2System
import org.apache.samza.coordinator.stream.CoordinatorStreamSystemFactory
/**
* Helper companion object that is responsible for wiring up a JobCoordinator
* given a Config object.
*/
object JobCoordinator extends Logging {
/**
* a volatile value to store the current instantiated <code>JobCoordinator</code>
*/
@volatile var currentJobCoordinator: JobCoordinator = null
/**
* @param coordinatorSystemConfig A config object that contains job.name,
* job.id, and all system.<job-coordinator-system-name>.*
* configuration. The method will use this config to read all configuration
* from the coordinator stream, and instantiate a JobCoordinator.
*/
def apply(coordinatorSystemConfig: Config, metricsRegistryMap: MetricsRegistryMap): JobCoordinator = {
val coordinatorStreamSystemFactory: CoordinatorStreamSystemFactory = new CoordinatorStreamSystemFactory()
val coordinatorSystemConsumer = coordinatorStreamSystemFactory.getCoordinatorStreamSystemConsumer(coordinatorSystemConfig, metricsRegistryMap)
val coordinatorSystemProducer = coordinatorStreamSystemFactory.getCoordinatorStreamSystemProducer(coordinatorSystemConfig, metricsRegistryMap)
info("Registering coordinator system stream.")
coordinatorSystemConsumer.register
debug("Starting coordinator system stream.")
coordinatorSystemConsumer.start
debug("Bootstrapping coordinator system stream.")
coordinatorSystemConsumer.bootstrap
val config = coordinatorSystemConsumer.getConfig
info("Got config: %s" format config)
val changelogManager = new ChangelogPartitionManager(coordinatorSystemProducer, coordinatorSystemConsumer, "Job-coordinator")
val localityManager = new LocalityManager(coordinatorSystemProducer, coordinatorSystemConsumer)
val systemNames = getSystemNames(config)
// Map the name of each system to the corresponding SystemAdmin
val systemAdmins = systemNames.map(systemName => {
val systemFactoryClassName = config
.getSystemFactory(systemName)
.getOrElse(throw new SamzaException("A stream uses system %s, which is missing from the configuration." format systemName))
val systemFactory = Util.getObj[SystemFactory](systemFactoryClassName)
systemName -> systemFactory.getAdmin(systemName, config)
}).toMap
val streamMetadataCache = new StreamMetadataCache(systemAdmins)
val jobCoordinator = getJobCoordinator(config, changelogManager, localityManager, streamMetadataCache)
createChangeLogStreams(config, jobCoordinator.jobModel.maxChangeLogStreamPartitions, streamMetadataCache)
jobCoordinator
}
def apply(coordinatorSystemConfig: Config): JobCoordinator = apply(coordinatorSystemConfig, new MetricsRegistryMap())
/**
* Build a JobCoordinator using a Samza job's configuration.
*/
def getJobCoordinator(config: Config,
changelogManager: ChangelogPartitionManager,
localityManager: LocalityManager,
streamMetadataCache: StreamMetadataCache) = {
val jobModelGenerator = initializeJobModel(config, changelogManager, localityManager, streamMetadataCache)
val server = new HttpServer
server.addServlet("/*", new JobServlet(jobModelGenerator))
currentJobCoordinator = new JobCoordinator(jobModelGenerator(), server)
currentJobCoordinator
}
/**
* For each input stream specified in config, exactly determine its
* partitions, returning a set of SystemStreamPartitions containing them all.
*/
def getInputStreamPartitions(config: Config, streamMetadataCache: StreamMetadataCache) = {
val inputSystemStreams = config.getInputStreams
// Get the set of partitions for each SystemStream from the stream metadata
streamMetadataCache
.getStreamMetadata(inputSystemStreams)
.flatMap {
case (systemStream, metadata) =>
metadata
.getSystemStreamPartitionMetadata
.keys
.map(new SystemStreamPartition(systemStream, _))
}.toSet
}
/**
* Gets a SystemStreamPartitionGrouper object from the configuration.
*/
def getSystemStreamPartitionGrouper(config: Config) = {
val factoryString = config.getSystemStreamPartitionGrouperFactory
val factory = Util.getObj[SystemStreamPartitionGrouperFactory](factoryString)
factory.getSystemStreamPartitionGrouper(config)
}
/**
* The method intializes the jobModel and creates a JobModel generator which can be used to generate new JobModels
* which catchup with the latest content from the coordinator stream.
*/
private def initializeJobModel(config: Config,
changelogManager: ChangelogPartitionManager,
localityManager: LocalityManager,
streamMetadataCache: StreamMetadataCache): () => JobModel = {
// Do grouping to fetch TaskName to SSP mapping
val allSystemStreamPartitions = getInputStreamPartitions(config, streamMetadataCache)
val grouper = getSystemStreamPartitionGrouper(config)
info("SystemStreamPartitionGrouper " + grouper + " has grouped the SystemStreamPartitions into the following taskNames:")
val groups = grouper.group(allSystemStreamPartitions)
// Initialize the ChangelogPartitionManager and the CheckpointManager
val previousChangelogMapping = if (changelogManager != null)
{
changelogManager.start()
changelogManager.readChangeLogPartitionMapping()
}
else
{
new util.HashMap[TaskName, Integer]()
}
// We don't need to start() localityManager as they share the same instances with checkpoint and changelog managers.
// TODO: This code will go away with refactoring - SAMZA-678
localityManager.start()
// Generate the jobModel
def jobModelGenerator(): JobModel = refreshJobModel(config,
allSystemStreamPartitions,
groups,
previousChangelogMapping,
localityManager)
val jobModel = jobModelGenerator()
// Save the changelog mapping back to the ChangelogPartitionmanager
if (changelogManager != null)
{
// newChangelogMapping is the merging of all current task:changelog
// assignments with whatever we had before (previousChangelogMapping).
// We must persist legacy changelog assignments so that
// maxChangelogPartitionId always has the absolute max, not the current
// max (in case the task with the highest changelog partition mapping
// disappears.
val newChangelogMapping = jobModel.getContainers.flatMap(_._2.getTasks).map{case (taskName,taskModel) => {
taskName -> Integer.valueOf(taskModel.getChangelogPartition.getPartitionId)
}}.toMap ++ previousChangelogMapping
info("Saving task-to-changelog partition mapping: %s" format newChangelogMapping)
changelogManager.writeChangeLogPartitionMapping(newChangelogMapping)
}
// Return a jobModelGenerator lambda that can be used to refresh the job model
jobModelGenerator
}
/**
* Build a full Samza job model. The function reads the latest checkpoint from the underlying coordinator stream and
* builds a new JobModel.
* This method needs to be thread safe, the reason being, for every HTTP request from a container, this method is called
* and underlying it uses the same instance of coordinator stream producer and coordinator stream consumer.
*/
private def refreshJobModel(config: Config,
allSystemStreamPartitions: util.Set[SystemStreamPartition],
groups: util.Map[TaskName, util.Set[SystemStreamPartition]],
previousChangelogMapping: util.Map[TaskName, Integer],
localityManager: LocalityManager): JobModel = {
this.synchronized
{
// If no mappings are present(first time the job is running) we return -1, this will allow 0 to be the first change
// mapping.
var maxChangelogPartitionId = previousChangelogMapping.values.map(_.toInt).toList.sorted.lastOption.getOrElse(-1)
// Assign all SystemStreamPartitions to TaskNames.
val taskModels =
{
groups.map
{ case (taskName, systemStreamPartitions) =>
val changelogPartition = Option(previousChangelogMapping.get(taskName)) match
{
case Some(changelogPartitionId) => new Partition(changelogPartitionId)
case _ =>
// If we've never seen this TaskName before, then assign it a
// new changelog.
maxChangelogPartitionId += 1
info("New task %s is being assigned changelog partition %s." format(taskName, maxChangelogPartitionId))
new Partition(maxChangelogPartitionId)
}
new TaskModel(taskName, systemStreamPartitions, changelogPartition)
}.toSet
}
// Here is where we should put in a pluggable option for the
// SSPTaskNameGrouper for locality, load-balancing, etc.
val containerGrouperFactory = Util.getObj[TaskNameGrouperFactory](config.getTaskNameGrouperFactory)
val containerGrouper = containerGrouperFactory.build(config)
val containerModels = asScalaSet(containerGrouper.group(setAsJavaSet(taskModels))).map
{ case (containerModel) => Integer.valueOf(containerModel.getContainerId) -> containerModel }.toMap
new JobModel(config, containerModels, localityManager)
}
}
private def createChangeLogStreams(config: StorageConfig, changeLogPartitions: Int, streamMetadataCache: StreamMetadataCache) {
val changeLogSystemStreams = config
.getStoreNames
.filter(config.getChangelogStream(_).isDefined)
.map(name => (name, config.getChangelogStream(name).get)).toMap
.mapValues(Util.getSystemStreamFromNames(_))
for ((storeName, systemStream) <- changeLogSystemStreams) {
val systemAdmin = Util.getObj[SystemFactory](config
.getSystemFactory(systemStream.getSystem)
.getOrElse(throw new SamzaException("A stream uses system %s, which is missing from the configuration." format systemStream.getSystem))
).getAdmin(systemStream.getSystem, config)
systemAdmin.createChangelogStream(systemStream.getStream, changeLogPartitions)
}
val changeLogMetadata = streamMetadataCache.getStreamMetadata(changeLogSystemStreams.values.toSet)
info("Got change log stream metadata: %s" format changeLogMetadata)
}
private def getSystemNames(config: Config) = config.getSystemNames.toSet
}
/**
* <p>JobCoordinator is responsible for managing the lifecycle of a Samza job
* once it's been started. This includes starting and stopping containers,
* managing configuration, etc.</p>
*
* <p>Any new cluster manager that's integrated with Samza (YARN, Mesos, etc)
* must integrate with the job coordinator.</p>
*
* <p>This class' API is currently unstable, and likely to change. The
* coordinator's responsibility is simply to propagate the job model, and HTTP
* server right now.</p>
*/
class JobCoordinator(
/**
* The data model that describes the Samza job's containers and tasks.
*/
val jobModel: JobModel,
/**
* HTTP server used to serve a Samza job's container model to SamzaContainers when they start up.
*/
val server: HttpServer = null) extends Logging {
debug("Got job model: %s." format jobModel)
def start {
if (server != null) {
debug("Starting HTTP server.")
server.start
info("Startd HTTP server: %s" format server.getUrl)
}
}
def stop {
if (server != null) {
debug("Stopping HTTP server.")
server.stop
info("Stopped HTTP server.")
}
}
}
| gustavoanatoly/samza | samza-core/src/main/scala/org/apache/samza/coordinator/JobCoordinator.scala | Scala | apache-2.0 | 14,149 |
import scala.language.implicitConversions
trait TripleEqualsSupport:
class Equalizer[L](val leftSide: L)
def convertToEqualizer[T](left: T): Equalizer[T]
trait TripleEquals extends TripleEqualsSupport:
implicit override def convertToEqualizer[T](left: T): Equalizer[T] = new Equalizer(left)
class GraphDB[Id]:
class Node private[GraphDB](val id: Id)
object GraphDBSpec extends TripleEquals:
object graph extends GraphDB[String]
import graph.Node
val m = new Node("Alice") // error
| dotty-staging/dotty | tests/neg/i11466.scala | Scala | apache-2.0 | 499 |
package com.rasterfoundry.api.user
import com.dropbox.core.DbxSessionStore
import io.circe.generic.JsonCodec
import scala.beans.BeanProperty
import scala.util.Random
import java.util.Base64
@JsonCodec
final case class DropboxAuthRequest(
authorizationCode: String,
redirectURI: String
)
/** Mock a DbxSessionStore.
*
* To implement the DbxSessionStore interface, we need
* get, set, and clear. The Dropbox SDK is really happy
* if we're using the servlet API, but since we're not,
* we have this dumb class instead.
*
* get returns a random 16 byte string that we're using
* to pretend we set state in the /authorize request.
* set and clear are around just to make the interface
* happy
*/
class DummySessionStore extends DbxSessionStore {
@BeanProperty
var token: String = ""
def get: String = {
val s = this.getToken()
s match {
case "" =>
val bytes: Array[Byte] =
Array.fill(16)((Random.nextInt(255) - 128).toByte)
val encoder = Base64.getEncoder()
this.setToken(encoder.encodeToString(bytes))
this.getToken()
case _ => s
}
}
def set(s: String): Unit = this.setToken(s)
/** This method exists just for class compatibility but doesn't do anything */
@SuppressWarnings(Array("EmptyMethod"))
def clear(): Unit = ()
}
| raster-foundry/raster-foundry | app-backend/api/src/main/scala/user/DropboxUser.scala | Scala | apache-2.0 | 1,338 |
/*
* -╥⌐⌐⌐⌐ -⌐⌐⌐⌐-
* ≡╢░░░░⌐\\░░░φ ╓╝░░░░⌐░░░░╪╕
* ╣╬░░` `░░░╢┘ φ▒╣╬╝╜ ░░╢╣Q
* ║╣╬░⌐ ` ╤▒▒▒Å` ║╢╬╣
* ╚╣╬░⌐ ╔▒▒▒▒`«╕ ╢╢╣▒
* ╫╬░░╖ .░ ╙╨╨ ╣╣╬░φ ╓φ░╢╢Å
* ╙╢░░░░⌐"░░░╜ ╙Å░░░░⌐░░░░╝`
* ``˚¬ ⌐ ˚˚⌐´
*
* Copyright © 2016 Flipkart.com
*/
package com.flipkart.connekt.busybees.tests.streams.topologies
import java.util.concurrent.atomic.AtomicLong
import akka.stream.scaladsl.{Sink, Source}
import com.flipkart.connekt.busybees.streams.sources.KafkaSource
import com.flipkart.connekt.busybees.tests.streams.TopologyUTSpec
import com.flipkart.connekt.commons.factories.{ConnektLogger, LogFile}
import com.flipkart.connekt.commons.iomodels.ConnektRequest
import com.flipkart.connekt.commons.metrics.Instrumented
import com.flipkart.connekt.commons.utils.StringUtils._
import org.scalatest.Ignore
//import com.softwaremill.react.kafka.{ConsumerProperties, ReactiveKafka}
import scala.concurrent.duration._
import scala.concurrent.{Await, Promise}
//@Ignore
class KafkaBenchmarkTest extends TopologyUTSpec with Instrumented {
val counter: AtomicLong = new AtomicLong(0)
val counterTS: AtomicLong = new AtomicLong(System.currentTimeMillis)
"KafkaBenchmarkTest" should "bench" in {
val kSource = Source.fromGraph(new KafkaSource[ConnektRequest](getKafkaConsumerConf, "email_8e494f43126ade96ce7320ad8ccfc709", "ckt_email"))
val qps = meter("kafa.read")
/*val rKafka = new ReactiveKafka()
val publisher = rKafka.consume(ConsumerProperties(
brokerList = "127.0.0.1:9092",
zooKeeperHost = "127.0.0.1:2181/bro/kafka-nm-qa",
topic = "push_connekt_insomnia_d346b56a260f1a",
groupId = "ckt",
decoder = new MessageDecoder[ConnektRequest]()
))
val reactiveSource = Source.fromPublisher(publisher).map(_.message())
*/
//Run the benchmark topology
val rF = kSource.runWith(Sink.foreach( r => {
qps.mark()
if(0 == (counter.incrementAndGet() % 50)) {
ConnektLogger(LogFile.SERVICE).info(s">>> MR[${qps.getMeanRate}], 1MR[${qps.getOneMinuteRate}], 5MR[${qps.getFiveMinuteRate}]")
}
}))
Await.result(rF, 500.seconds)
}
}
| Flipkart/connekt | busybees/src/test/scala/com/flipkart/connekt/busybees/tests/streams/topologies/KafkaBenchmarkTest.scala | Scala | mit | 2,491 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package osgi.embedded.scala
import java.io.{PrintWriter, InputStreamReader}
import javax.jcr.{Session, Repository, Node, SimpleCredentials}
import junit.framework.TestCase
import junit.framework.Assert.{assertEquals, assertTrue, assertFalse}
import org.apache.jackrabbit.core.{TransientRepository}
import osgi.embedded.scala.JcrFS.{JcrNode, JcrFile, JcrFolder}
class JcrFSTest extends TestCase {
var session: Session = null
var repository: Repository = null
var testRoot: Node = null
override def setUp() {
super.setUp()
repository = new TransientRepository
session = repository.login(new SimpleCredentials("admin", "admin".toCharArray))
testRoot = session.getRootNode.addNode("testRoot", "nt:folder")
session.save()
}
override def tearDown() {
testRoot.remove()
testRoot = null
session.save()
session.logout()
session = null
repository = null
super.tearDown()
}
def testTraverse: Unit = {
def traverse(entry: JcrNode): String = {
//(for (entry <- entry.elements) yield {entry match {
(for (entry <- entry.iterator) yield {entry match {
case file: JcrFile => file.path
case folder: JcrFolder => folder.path + traverse(folder)
}})
.mkString("(", ",", ")")
}
var _id = 0
def id() = {
_id += 1
_id
}
def addChildren(folder: Node) = {
folder.addNode("file" + id(), "nt:file")
folder.addNode("file" + id(), "nt:file")
(folder.addNode("folder" + id(), "nt:folder"), folder.addNode("folder" + id(), "nt:folder"))
}
val (f1, f2) = addChildren(testRoot)
val (f3, f4) = addChildren(f1)
addChildren(f2)
addChildren(f3)
addChildren(f4)
val actual = traverse(JcrFS.create(testRoot))
val expected =
"(/testRoot/file1,/testRoot/file2,/testRoot/folder3(/testRoot/folder3/file5,/testRoot/folder3/file6," +
"/testRoot/folder3/folder7(/testRoot/folder3/folder7/file13,/testRoot/folder3/folder7/file14," +
"/testRoot/folder3/folder7/folder15(),/testRoot/folder3/folder7/folder16())," +
"/testRoot/folder3/folder8(/testRoot/folder3/folder8/file17,/testRoot/folder3/folder8/file18," +
"/testRoot/folder3/folder8/folder19(),/testRoot/folder3/folder8/folder20()))," +
"/testRoot/folder4(/testRoot/folder4/file9,/testRoot/folder4/file10,/testRoot/folder4/folder11()," +
"/testRoot/folder4/folder12()))"
assertEquals(expected, actual)
}
def testCreateFile {
val root = JcrFS.create(testRoot)
val file = root.fileNamed("file")
val fileNode = testRoot.getNode("file")
assertFalse(file.isDirectory)
assertEquals("nt:file", fileNode.getPrimaryNodeType.getName)
assertEquals("file", file.name)
assertEquals("/testRoot/file", file.path)
assertEquals(fileNode.getProperty("jcr:content/jcr:lastModified").getLong, file.lastModified)
assertEquals(fileNode.getProperty("jcr:content/jcr:data").getLength, file.sizeOption.get.toLong)
val contentNode = fileNode.getNode("jcr:content")
assertEquals("nt:resource", contentNode.getPrimaryNodeType.getName)
val input = file.input
assertEquals(0, input.available)
assertEquals(-1, input.read)
}
def testCreateFolder {
val root = JcrFS.create(testRoot)
val folder = root.subdirectoryNamed("folder")
val folderNode = testRoot.getNode("folder")
assertTrue(folder.isDirectory)
assertEquals(0L, folder.lastModified)
assertEquals("nt:folder", folderNode.getPrimaryNodeType.getName)
assertEquals("folder", folder.name)
assertEquals("/testRoot/folder", folder.path)
}
def testParent {
val root = JcrFS.create(testRoot)
val folder = root.subdirectoryNamed("folder")
val file = folder.fileNamed("file")
assertEquals(folder, file.container)
}
def testReadWriteContent {
val root = JcrFS.create(testRoot)
val file = root.fileNamed("file")
val contentNode = testRoot.getNode("file/jcr:content")
val writer = new PrintWriter(file.output)
writer.print("Hello world")
writer.close
assertEquals("Hello world", contentNode.getProperty("jcr:data").getString)
assertEquals(11, file.sizeOption.get)
val reader = new InputStreamReader(file.input)
val c = new Array[Char](32)
reader.read(c)
assertEquals("Hello world", new String(c, 0, 11))
}
} | scrawford/osgi-embedded-scala | script-engine-tests/src/test/scala/osgi/embedded/scala/JcrFSTest.scala | Scala | apache-2.0 | 5,155 |
package org.scalajs.testinterface.internal
import scala.scalajs.js
import js.annotation.JSName
@JSName("scalajsCom")
object Com extends js.Object {
def init(onReceive: js.Function1[String, Unit]): Unit = js.native
def send(msg: String): Unit = js.native
def close(): Unit = js.native
}
| jmnarloch/scala-js | test-interface/src/main/scala/org/scalajs/testinterface/internal/Com.scala | Scala | bsd-3-clause | 294 |
/*
* Copyright 2015 LG CNS.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package scouter.server.db.xlog;
import java.io.File;
import java.io.IOException;
import java.util.Hashtable;
import java.util.Properties;
import scouter.server.Configure;
import scouter.server.db.io.RealDataFile;
import scouter.server.db.io.zip.GZipStore;
import scouter.util.FileUtil;
import scouter.util.IClose;
object XLogDataWriter {
val table = new Hashtable[String, XLogDataWriter]();
def open(date: String, file: String): XLogDataWriter = {
table.synchronized {
var writer = table.get(file);
if (writer != null) {
writer.refrence += 1;
} else {
writer = new XLogDataWriter(date, file);
table.put(file, writer);
}
return writer;
}
}
}
class XLogDataWriter(date: String, file: String) extends IClose {
var refrence = 0;
val conf = Configure.getInstance()
var gzip = conf.gzip_xlog
var f = new File(file + ".service.conf");
if (f.exists()) {
val properties = FileUtil.readProperties(f);
gzip = "true".equalsIgnoreCase(properties.getProperty("gzip_xlog", ""+conf.gzip_xlog).trim());
} else {
gzip = conf.gzip_xlog;
val properties = new Properties();
properties.put("gzip_xlog", "" + conf.gzip_xlog);
FileUtil.writeProperties(f, properties);
}
var out:RealDataFile = null
if(gzip==false){
out=new RealDataFile(file + ".service");
}
def write(bytes: Array[Byte]): Long = {
if (gzip) {
return GZipStore.getInstance().write(date, bytes);
}
this.synchronized {
val point = out.getOffset();
out.writeShort(bytes.length.toShort);
out.write(bytes);
out.flush();
return point;
}
}
override def close() {
XLogDataWriter.table.synchronized {
if (this.refrence == 0) {
XLogDataWriter.table.remove(this.file);
FileUtil.close(out);
} else {
this.refrence -= 1
}
}
}
} | jw0201/scouter | scouter.server/src/scouter/server/db/xlog/XLogDataWriter.scala | Scala | apache-2.0 | 2,808 |
package mdtags
import org.specs2.mutable.Specification
class ListSpec extends Specification {
"A List" should {
"render correctly" in {
list(
"item 1",
"item 2",
"item 3"
).toMarkdown() must equalTo(
"""* item 1
|* item 2
|* item 3""".stripMargin
)
}
"render lists in lists correctly" in {
list(
"item 1",
list(
"sub-item 1",
"sub-item 2"
),
"item 2"
).toMarkdown() must equalTo(
"""* item 1
| * sub-item 1
| * sub-item 2
|* item 2""".stripMargin
)
}
"be able to contain inline elements" in {
list(
b("a bold text"),
i("an italic text"),
s("a strikethrough text"),
"a text and" & b("a bold text")
).toMarkdown() must equalTo(
"""* **a bold text**
|* *an italic text*
|* ~~a strikethrough text~~
|* a text and **a bold text**""".stripMargin
)
}
"render scala syntax correctly" in {
list(
"first item",
"second item" & b("with bold text"),
"third item"
).convertToMarkup() must equalTo(
"""list(
| "first item",
| "second item" & b("with bold text"),
| "third item"
|)""".stripMargin
)
}
}
}
| timo-schmid/mdtags | src/test/scala/mdtags/ListSpec.scala | Scala | apache-2.0 | 1,397 |
/*
* @author Philip Stutz
*
* Copyright 2014 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.deployment
import akka.actor.ActorRef
trait DeployableAlgorithm {
def execute(parameters: Map[String, String], nodeActors: Array[ActorRef])
}
| danihegglin/DynDCO | src/main/scala/com/signalcollect/deployment/DeployableAlgorithm.scala | Scala | apache-2.0 | 821 |
/*
# Copyright 2016 Georges Lipka
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
*/
package com.glipka.easyReactJS.react
import scala.scalajs.js
import scala.scalajs.js._
import org.scalajs.dom.html
import js.{ UndefOr, Any, Function => JFn }
import js.annotation.{ JSBracketAccess, JSName }
import js.{ Any => jAny }
// https://github.com/DefinitelyTyped/DefinitelyTyped/blob/master/react/react.d.ts
@js.native
class ShallowRenderer extends js.Any {
def getRenderOutput(): ReactElement[_] = js.native
def render(element: ReactElement[_], context: Any): Unit = js.native
def unmount(): Unit = js.native
def render(element: ReactElement[_]): Unit =js.native
}
| glipka/Easy-React-With-ScalaJS | src/main/scala/com/glipka/easyReactJS/react/ShallowRenderer.scala | Scala | apache-2.0 | 1,173 |
/*
* package.scala
* Definitions of solvers.
*
* Created By: Avi Pfeffer ([email protected])
* Creation Date: March 1, 2015
*
* Copyright 2015 Avrom J. Pfeffer and Charles River Analytics, Inc.
* See http://www.cra.com or email [email protected] for information.
*
* See http://www.github.com/p2t2/figaro for a copy of the software license.
*/
package com.cra.figaro.algorithm.structured
import com.cra.figaro.algorithm.factored.factors.Factor
import com.cra.figaro.algorithm.factored.factors.Variable
import com.cra.figaro.algorithm.factored.gibbs.Gibbs
import com.cra.figaro.algorithm.structured.solver.BPSolver
import com.cra.figaro.algorithm.structured.solver.GibbsSolver
import com.cra.figaro.algorithm.structured.solver.VESolver
import com.cra.figaro.algorithm.factored.factors.SumProductSemiring
import com.cra.figaro.algorithm.factored.factors.MaxProductSemiring
package object solver {
/**
* A Solver takes a set of variables to eliminate, a set of variables to preserve, and a list of factors.
* It returns a list of factors that mention only the preserved variables.
*/
type Solver = (Problem, Set[Variable[_]], Set[Variable[_]], List[Factor[Double]]) => (List[Factor[Double]], Map[Variable[_], Factor[_]])
/**
* Creates a Gibbs sampling solver.
* @param numSamples number of samples to take
* @param burnIn number of burn-in samples to throw away
* @param interval number of samples to throw away between recorded samples
* @param blockToSampler function for creating Gibbs block samplers
* @param problem the problem to solve
* @param toEliminate the variables to be eliminated
* @param toPreserve the variables to be preserved (not eliminated)
* @param factors all the factors in the problem
*/
def marginalGibbs(numSamples: Int, burnIn: Int, interval: Int, blockToSampler: Gibbs.BlockSamplerCreator)(problem: Problem, toEliminate: Set[Variable[_]], toPreserve: Set[Variable[_]], factors: List[Factor[Double]]): (List[Factor[Double]], Map[Variable[_], Factor[_]]) = {
val gibbs = new GibbsSolver(problem, toEliminate, toPreserve, factors, numSamples, burnIn, interval, blockToSampler)
(gibbs.go(), Map())
}
/**
* Creates a variable elimination solver.
* @param problem the problem to solve
* @param toEliminate the variables to be eliminated
* @param toPreserve the variables to be preserved (not eliminated)
* @param factors all the factors in the problem
*/
def marginalVariableElimination(problem: Problem, toEliminate: Set[Variable[_]], toPreserve: Set[Variable[_]], factors: List[Factor[Double]]): (List[Factor[Double]], Map[Variable[_], Factor[_]]) = {
val ve = new VESolver(problem, toEliminate, toPreserve, factors, SumProductSemiring())
ve.go()
}
/**
* Creates an MPE variable elimination solver.
* @param problem the problem to solve
* @param toEliminate the variables to be eliminated
* @param toPreserve the variables to be preserved (not eliminated)
* @param factors all the factors in the problem
*/
def mpeVariableElimination(problem: Problem, toEliminate: Set[Variable[_]], toPreserve: Set[Variable[_]], factors: List[Factor[Double]]): (List[Factor[Double]], Map[Variable[_], Factor[_]]) = {
val ve = new VESolver(problem, toEliminate, toPreserve, factors, MaxProductSemiring())
ve.go()
}
/**
* Creates a belief propagation solver.
* @param iterations number of iterations of BP to run
* @param problem the problem to solve
* @param toEliminate the variables to be eliminated
* @param toPreserve the variables to be preserved (not eliminated)
* @param factors all the factors in the problem
*/
def marginalBeliefPropagation(iterations: Int = 100)(problem: Problem, toEliminate: Set[Variable[_]],
toPreserve: Set[Variable[_]], factors: List[Factor[Double]]): (List[Factor[Double]], Map[Variable[_], Factor[_]]) = {
val bp = new BPSolver(problem, toEliminate, toPreserve, factors, iterations, SumProductSemiring())
bp.go()
}
/**
* Creates an MPE belief propagation solver.
* @param iterations number of iterations of BP to run
* @param problem the problem to solve
* @param toEliminate the variables to be eliminated
* @param toPreserve the variables to be preserved (not eliminated)
* @param factors all the factors in the problem
*/
def mpeBeliefPropagation(iterations: Int = 100)(problem: Problem, toEliminate: Set[Variable[_]],
toPreserve: Set[Variable[_]], factors: List[Factor[Double]]): (List[Factor[Double]], Map[Variable[_], Factor[_]]) = {
val bp = new BPSolver(problem, toEliminate, toPreserve, factors, iterations, MaxProductSemiring())
bp.go()
}
}
| scottcb/figaro | Figaro/src/main/scala/com/cra/figaro/algorithm/structured/solver/package.scala | Scala | bsd-3-clause | 4,815 |
package circumflex
package orm
import collection.mutable.ListBuffer
/*!# Predicates
`Predicate` is essentially a parameterized expression which yields boolean
value when executed by database.
Predicates are designed to participate in `WHERE` clauses of SQL queries.
*/
trait Predicate extends Expression
object Predicate {
implicit def toAggregateHelper(predicate: Predicate) =
new AggregatePredicateHelper(predicate)
}
object EmptyPredicate extends Predicate {
def parameters: scala.Seq[Any] = Nil
def toSql: String = ormConf.dialect.emptyPredicate
}
class SimpleExpression(val expression: String, val parameters: Seq[Any])
extends Predicate {
def toSql = expression
}
class AggregatePredicate(val operator: String,
protected val _predicates: Seq[Predicate])
extends Predicate {
def parameters = predicates.flatMap(_.parameters)
def add(predicate: Predicate*): AggregatePredicate =
new AggregatePredicate(operator, _predicates ++ predicate)
def predicates: Seq[Predicate] = _predicates.flatMap {
case EmptyPredicate => None
case p: AggregatePredicate if (p.predicates.size == 0) => None
case p: AggregatePredicate if (p.predicates.size == 1) =>
Some(p.predicates(0))
case p => Some(p)
}
def toSql: String = {
val p = predicates
if (p.size == 0) EmptyPredicate.toSql
else "(" + p.map(_.toSql).mkString(" " + operator + " ") + ")"
}
}
class SubqueryExpression[T](expression: String,
val subquery: SQLQuery[T])
extends SimpleExpression(
ormConf.dialect.subquery(expression, subquery),
subquery.parameters)
/*! `SimpleExpressionHelper` is used to compose predicates in a DSL-style.
`String` expressions are converted to `SimpleExpressionHelper` implicitly.
*/
class SimpleExpressionHelper(val expr: String) {
// Simple expressions
def EQ(value: Any) = new SimpleExpression(ormConf.dialect.EQ(expr), List(value))
def NE(value: Any) = new SimpleExpression(ormConf.dialect.NE(expr), List(value))
def GT(value: Any) = new SimpleExpression(ormConf.dialect.GT(expr), List(value))
def GE(value: Any) = new SimpleExpression(ormConf.dialect.GE(expr), List(value))
def LT(value: Any) = new SimpleExpression(ormConf.dialect.LT(expr), List(value))
def LE(value: Any) = new SimpleExpression(ormConf.dialect.LE(expr), List(value))
def IS_NULL = new SimpleExpression(ormConf.dialect.IS_NULL(expr), Nil)
def IS_NOT_NULL = new SimpleExpression(ormConf.dialect.IS_NOT_NULL(expr), Nil)
def LIKE(value: Any) = new SimpleExpression(ormConf.dialect.LIKE(expr), List(value))
def ILIKE(value: Any) = new SimpleExpression(ormConf.dialect.ILIKE(expr), List(value))
def IN(params: Any*) = new SimpleExpression(
ormConf.dialect.parameterizedIn(expr, params.map(p => "?")), params.toList)
def BETWEEN(lowerValue: Any, upperValue: Any) = new SimpleExpression(
ormConf.dialect.BETWEEN(expr), List(lowerValue, upperValue))
// Simple subqueries
def IN(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.IN(expr), query)
def NOT_IN(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.NOT_IN(expr), query)
def EQ_ALL(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.EQ(expr, ormConf.dialect.ALL), query)
def NE_ALL(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.NE(expr, ormConf.dialect.ALL), query)
def GT_ALL(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.GT(expr, ormConf.dialect.ALL), query)
def GE_ALL(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.GE(expr, ormConf.dialect.ALL), query)
def LT_ALL(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.LT(expr, ormConf.dialect.ALL), query)
def LE_ALL(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.LE(expr, ormConf.dialect.ALL), query)
def EQ_SOME(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.EQ(expr, ormConf.dialect.SOME), query)
def NE_SOME(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.NE(expr, ormConf.dialect.SOME), query)
def GT_SOME(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.GT(expr, ormConf.dialect.SOME), query)
def GE_SOME(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.GE(expr, ormConf.dialect.SOME), query)
def LT_SOME(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.LT(expr, ormConf.dialect.SOME), query)
def LE_SOME(query: SQLQuery[_]) =
new SubqueryExpression(ormConf.dialect.LE(expr, ormConf.dialect.SOME), query)
}
/*! `AggregatePredicateHelper` is used to compose predicates using infix notation. */
class AggregatePredicateHelper(predicate: Predicate) {
def AND(predicates: Predicate*) = orm.AND((Seq(predicate) ++ predicates): _*)
def OR(predicates: Predicate*) = orm.OR((Seq(predicate) ++ predicates): _*)
}
/*! `PredicateBuffer` is a mutable helper which accumulates predicates
via `add` method and emits the immutable `predicate` instance.
It is useful when multiple criteria are prepared and accumulated
along the querying method.
*/
trait PredicateBuffer {
protected val _buffer = new ListBuffer[Predicate]
def add(predicates: Predicate*): this.type = {
_buffer ++= predicates
this
}
def toPredicate: Predicate
}
object PredicateBuffer {
implicit def toPredicate(buff: PredicateBuffer) =
buff.toPredicate
}
class AggregatePredicateBuffer(val op: String)
extends PredicateBuffer {
def toPredicate = new AggregatePredicate(op, _buffer.toSeq)
} | inca/circumflex | orm/src/main/scala/predicate.scala | Scala | bsd-2-clause | 5,588 |
/*
* This file is part of the "consigliere" toolkit for sosreport
* and SAR data analytics and visualization.
*
* Copyright (c) 2014 Red Hat, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.redhat.et.c9e.common;
import com.redhat.et.silex.app.AppCommon
import org.json4s._
import org.json4s.jackson.JsonMethods._
trait CleaningHelpers {
def splitValue(sls: String, delim: String = " ") = {
JArray(sls.split(delim).map(JString(_)).toList)
}
def splitRpmList(sls: String) = {
JArray(
sls.split("\\n").map {
case RpmSplit(pkg, date) =>
JObject(List(("rpm", JString(pkg)), ("date", JString(date))))
case x => JString(x)
}.toList
)
}
val BogusName = "(.*[\\\\W].*)".r
val dashesToCamel = "(-+([a-z]))".r
val badChars = "(\\\\W)".r
val RpmSplit = "([^\\\\s]+)\\\\s+([^\\\\s].*)".r
val Cmd = "(COMMAND|cmdline)".r
def removeBadChars(name: String) = {
badChars.replaceAllIn(dashesToCamel.replaceAllIn(name, m => s"${(m group 2).toUpperCase}"), "")
}
}
trait CleaningTransformations extends CleaningHelpers {
type FieldX = PartialFunction[JField, JField]
type ValueX = PartialFunction[JValue, JValue]
val sanitizeNames: FieldX = {
case JField(BogusName(name), v) => JField(removeBadChars(name), v)
}
val normalizeBooleans: FieldX = {
case JField(name, JString("yes")) => JField(name, JBool(true))
case JField(name, JString("no")) => JField(name, JBool(false))
}
val splitFlags: FieldX = {
case JField("flags", JString(s)) => JField("flags", splitValue(s))
}
// needs to run after sanitizeNames, obvs
val splitRpms: FieldX = {
case JField("installedRpms", JString(list)) => {
JField("installedRpms", splitRpmList(list))
}
}
// splits COMMAND and cmdline fields
val splitCmdline: FieldX = {
case JField(Cmd(cmd), JString(cmdline)) => JField(cmd, splitValue(cmdline))
}
val splitLsblk: FieldX = {
// TODO: implement a sensible way to split up this data
case JField("lsblk", x) => JField("lsblk", x)
}
val splitLspci: FieldX = {
// TODO: implement a sensible way to split up this data
case JField("lspci", x) => JField("lspci", x)
}
val timestampTidy: FieldX = {
case JField("timestamp", ts: JObject) =>
val date = ts \\ "date" match { case JString(str) => str }
val time = ts \\ "time" match { case JString(str) => str }
JField("timestamp", JString(date + "T" + time + "Z"))
}
def fieldTransforms: List[FieldX] = Nil
def valueTransforms: List[ValueX] = Nil
def apply(o: JValue): JValue = {
val withFields = (o /: fieldTransforms)({(o, x) => o.transformField(x)})
(withFields /: valueTransforms)({(o, x) => o.transform(x) })
}
}
object SosDefaultTransformations extends CleaningTransformations {
override def fieldTransforms = List(sanitizeNames,
normalizeBooleans,
splitFlags,
splitRpms,
splitCmdline,
splitLsblk,
splitLspci
)
}
object SarDefaultTransformations extends CleaningTransformations {
override def fieldTransforms = List(sanitizeNames,
normalizeBooleans,
timestampTidy
)
}
trait JsonProcessing {
import java.io.{File, FileReader, FileWriter}
import scala.util.{Try, Success, Failure}
// TODO: it would make sense to have custom case classes for sosreport kinds
def loadObjects(fn: String): Try[List[JValue]] = {
val f = new File(fn)
val parsedFile = Try(parse(new FileReader(f)))
parsedFile.map(_ match {
case JArray(jls) => jls.collect { case j:JObject => j }
case o: JObject => List(o)
case _ => List[JObject]()
})
}
def lazyLoadObjects(fn: String): Try[Iterator[JValue]] = {
val f = new File(fn)
val parsedFile = Try(parse(new FileReader(f)))
parsedFile.map(_ match {
case JArray(jls) => jls.iterator.collect { case j:JObject => j }
case o: JObject => List(o).iterator
case _ => List[JObject]().iterator
})
}
def partitionByKinds(jls: Iterable[JValue], xform: JValue => JValue = {_ \\ "_source"}): Map[String, Vector[JValue]] = {
implicit val formats = new org.json4s.DefaultFormats {}
def partitionOne(m: Map[String, Vector[JValue]], jv: JValue) = {
val kind = (jv \\ "_type") match {
case JString(s) => s
case _ => "UNKNOWN"
}
m + ((kind, m.getOrElse(kind, Vector()) :+ xform(jv)))
}
(Map[String, Vector[JValue]]() /: jls)(partitionOne _)
}
}
trait PathOperations {
import java.io.File
import scala.util.{Try, Success, Failure}
def listFilesInDir(dirname: String): List[String] = {
val dir = new java.io.File(dirname)
if (dir.exists && dir.isDirectory) {
dir.listFiles.filter(_.isFile).toList.map(dirname + PATHSEP + _.getName.toString).filter(fn => fn.endsWith(".json"))
} else {
println(s"warning: $dirname either does not exist or is not a directory")
Nil
}
}
def ensureDir(dirname: String): Try[String] = {
val dir = new File(dirname)
(dir.exists, dir.isDirectory) match {
case (true, true) => Success(dirname)
case (true, false) => Failure(
new RuntimeException(s"$dirname already exists but is not a directory")
)
case (false, _) => Try(Pair(dir.mkdirs(), dirname)._2)
}
}
lazy val PATHSEP = java.lang.System.getProperty("file.separator").toString
}
trait Preprocessing extends PathOperations {
import java.io.{File, FileReader, FileWriter}
import scala.util.{Try, Success, Failure}
case class AppOptions(inputFiles: Vector[String], outputDir: String) {
def withFile(f: String) = this.copy(inputFiles=inputFiles:+f)
def withFiles(fs: Seq[String]) = this.copy(inputFiles=inputFiles++fs)
def withOutputDir(d: String) = this.copy(outputDir=d)
}
object AppOptions {
def default = AppOptions(Vector[String](), ".")
}
def parseArgs(args: Array[String]) = {
def phelper(params: List[String], options: AppOptions): AppOptions = {
params match {
case Nil => options
case "--output-dir" :: dir :: rest => phelper(rest, options.withOutputDir(dir))
case "--input-dir" :: dir :: rest => phelper(rest, options.withFiles(listFilesInDir(dir)))
case "--" :: rest => options.withFiles(rest)
case bogusOpt if bogusOpt(0) == "-" => throw new RuntimeException(s"unrecognized option $bogusOpt")
case file :: rest => phelper(rest, options.withFile(file))
}
}
phelper(args.toList, AppOptions.default)
}
}
trait GenericTransformer[Result] extends JsonProcessing with Preprocessing {
type KOPair = Pair[String, Vector[JValue]]
type KOMap = Map[String, Vector[JValue]]
def objectTransform(jv: JValue): JValue = jv
// XXX: make options, f implicit?
def transform(options: AppOptions, f: String)(ko: KOPair): KOPair = ko match {
case (kind, objects) => (kind, objects.map(objectTransform(_)))
}
def postprocess(options: AppOptions, fn: String, kom: KOMap): Result
def run(args: Array[String]): TraversableOnce[Result] = {
val options = parseArgs(args)
options.inputFiles.map { f =>
Console.println(s"processing $f...")
val kindMap = loadObjects(f).map(objList => partitionByKinds(objList)).get
val kom = kindMap.map(transform(options, f))
postprocess(options, f, kom)
}
}
def main(args: Array[String]) {
run(args)
}
}
trait InPlaceRecordPartitioner extends GenericTransformer[Unit] {
def postprocess(options: AppOptions, fn: String, kom: KOMap) = {
kom.foreach {
case (kind, objects) => {
val basename = new java.io.File(fn).getName()
val outputDir = ensureDir(options.outputDir + PATHSEP + kind).get
val outputWriter = new java.io.PrintWriter(new java.io.File(s"$outputDir/$kind-$basename"))
Console.println(s" - writing $kind records from $basename...")
objects foreach { obj =>
outputWriter.println(compact(render(obj)))
}
outputWriter.close()
}
}
()
}
}
object SosReportPreprocessor extends InPlaceRecordPartitioner {
override def objectTransform(jv: JValue) = SosDefaultTransformations(jv)
}
object SarPreprocessor extends InPlaceRecordPartitioner {
override def objectTransform(jv: JValue) = SarDefaultTransformations(jv)
}
object SarConverter extends GenericTransformer[Map[String, Vector[JValue]]] {
implicit val formats = new org.json4s.DefaultFormats {}
import com.redhat.et.c9e.sar.SarRecord
def join[K,V](combOp: (V, V) => V, dfl: V)(left: Map[K,V], right: Map[K,V]) = {
val keys = left.keySet ++ right.keySet
(keys map {k => Pair(k, combOp(left.getOrElse(k, dfl), right.getOrElse(k, dfl)))}).toMap
}
override def objectTransform(jv: JValue) = SarDefaultTransformations(jv)
def postprocess(options: AppOptions, fn: String, kom: KOMap) = kom
def convert(args: Array[String]): Iterable[SarRecord] = {
implicit val formats = new org.json4s.DefaultFormats {}
val all = (Map[String,Vector[JValue]]() /: run(args))(join(_ ++ _, Vector()))
(all.iterator flatMap { case (k, vs) => vs map (_.extract[SarRecord]) }).toIterable
}
}
object LazySarConverter extends JsonProcessing with Preprocessing {
import com.redhat.et.c9e.sar.SarRecord
type KOPair = Pair[String, Vector[JValue]]
type KOMap = Map[String, Vector[JValue]]
implicit val formats = new org.json4s.DefaultFormats {}
def run(args: Array[String]): TraversableOnce[SarRecord] = {
val options = parseArgs(args)
options.inputFiles.flatMap { f =>
Console.println(s"processing $f...")
loadObjects(f).get.map {jv => SarDefaultTransformations(jv \\ "_source").extract[SarRecord]}
}
}
def main(args: Array[String]) {
run(args)
}
}
| willb/c9e | common/src/main/scala/com/redhat/et/c9e/common/preprocess.scala | Scala | apache-2.0 | 10,358 |
package org.denigma.graphs.ui.views
import org.denigma.binding.extensions._
import org.denigma.binding.messages._
import org.denigma.binding.picklers._
import org.denigma.semantic.storages.Storage
import org.scalajs.spickling.PicklerRegistry
import org.scalax.semweb.rdf.{IRI, Quad, Res}
import org.scalax.semweb.sparql.Pat
import scala.concurrent.Future
class GraphStorage(path:String)(implicit registry:PicklerRegistry = rp) extends Storage {
def channel:String = path
/**
*
* @param resource resource to be explored
* @param props if empty then all props are ok
* @param patterns if empty then all paterns are ok
* @param depth is 1 by default
* @return
*/
def explore(resource:Res,props:List[IRI] = List.empty,patterns:List[Pat] = List.empty, depth:Int = 1): Future[List[Quad]] = {
sq.post(path,GraphMessages.NodeExplore(resource,props,patterns,depth, id = genId())):Future[List[Quad]]
}
}
| antonkulaga/semantic-graph | ui/src/main/scala/org/denigma/graphs/ui/views/GraphStorage.scala | Scala | mpl-2.0 | 935 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions.aggregate
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.TypeUtils
import org.apache.spark.sql.types._
@ExpressionDescription(
usage = "_FUNC_(expr) - Returns the sum calculated from values of a group.",
examples = """
Examples:
> SELECT _FUNC_(col) FROM VALUES (5), (10), (15) AS tab(col);
30
> SELECT _FUNC_(col) FROM VALUES (NULL), (10), (15) AS tab(col);
25
> SELECT _FUNC_(col) FROM VALUES (NULL), (NULL) AS tab(col);
NULL
""",
since = "1.0.0")
case class Sum(child: Expression) extends DeclarativeAggregate with ImplicitCastInputTypes {
override def children: Seq[Expression] = child :: Nil
override def nullable: Boolean = true
// Return data type.
override def dataType: DataType = resultType
override def inputTypes: Seq[AbstractDataType] = Seq(NumericType)
override def checkInputDataTypes(): TypeCheckResult =
TypeUtils.checkForNumericExpr(child.dataType, "function sum")
private lazy val resultType = child.dataType match {
case DecimalType.Fixed(precision, scale) =>
DecimalType.bounded(precision + 10, scale)
case _: IntegralType => LongType
case _ => DoubleType
}
private lazy val sumDataType = resultType
private lazy val sum = AttributeReference("sum", sumDataType)()
private lazy val zero = Cast(Literal(0), sumDataType)
override lazy val aggBufferAttributes = sum :: Nil
override lazy val initialValues: Seq[Expression] = Seq(
/* sum = */ Literal.create(null, sumDataType)
)
override lazy val updateExpressions: Seq[Expression] = {
if (child.nullable) {
Seq(
/* sum = */
coalesce(coalesce(sum, zero) + child.cast(sumDataType), sum)
)
} else {
Seq(
/* sum = */
coalesce(sum, zero) + child.cast(sumDataType)
)
}
}
override lazy val mergeExpressions: Seq[Expression] = {
Seq(
/* sum = */
coalesce(coalesce(sum.left, zero) + sum.right, sum.left)
)
}
override lazy val evaluateExpression: Expression = sum
}
| WindCanDie/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/aggregate/Sum.scala | Scala | apache-2.0 | 3,056 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.interpreter.scala
import java.io.ByteArrayOutputStream
import java.net.{URL, URLClassLoader}
import java.nio.charset.Charset
import java.util.concurrent.ExecutionException
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.spark.repl.Main
import org.apache.toree.interpreter._
import org.apache.toree.kernel.api.{KernelLike, KernelOptions}
import org.apache.toree.utils.{MultiOutputStream, TaskManager}
import org.slf4j.LoggerFactory
import scala.annotation.tailrec
import scala.concurrent.{Await, Future}
import scala.language.reflectiveCalls
import scala.tools.nsc.Settings
import scala.tools.nsc.interpreter.{IR, OutputStream}
import scala.tools.nsc.util.ClassPath
import scala.util.{Try => UtilTry}
class ScalaInterpreter(private val config:Config = ConfigFactory.load) extends Interpreter with ScalaInterpreterSpecific {
protected val logger = LoggerFactory.getLogger(this.getClass.getName)
protected val _thisClassloader = this.getClass.getClassLoader
protected val lastResultOut = new ByteArrayOutputStream()
protected val multiOutputStream = MultiOutputStream(List(Console.out, lastResultOut))
private[scala] var taskManager: TaskManager = _
/** Since the ScalaInterpreter can be started without a kernel, we need to ensure that we can compile things.
Adding in the default classpaths as needed.
*/
def appendClassPath(settings: Settings): Settings = {
settings.classpath.value = buildClasspath(_thisClassloader)
settings.embeddedDefaults(_runtimeClassloader)
settings
}
protected var settings: Settings = newSettings(List())
settings = appendClassPath(settings)
private val maxInterpreterThreads: Int = {
if(config.hasPath("max_interpreter_threads"))
config.getInt("max_interpreter_threads")
else
TaskManager.DefaultMaximumWorkers
}
protected def newTaskManager(): TaskManager =
new TaskManager(maximumWorkers = maxInterpreterThreads)
/**
* This has to be called first to initialize all the settings.
*
* @return The newly initialized interpreter
*/
override def init(kernel: KernelLike): Interpreter = {
val args = interpreterArgs(kernel)
settings = newSettings(args)
settings = appendClassPath(settings)
start()
bindKernelVariable(kernel)
bindSparkSession()
bindSparkContext()
this
}
protected[scala] def buildClasspath(classLoader: ClassLoader): String = {
def toClassLoaderList( classLoader: ClassLoader ): Seq[ClassLoader] = {
@tailrec
def toClassLoaderListHelper( aClassLoader: ClassLoader, theList: Seq[ClassLoader]):Seq[ClassLoader] = {
if( aClassLoader == null )
return theList
toClassLoaderListHelper( aClassLoader.getParent, aClassLoader +: theList )
}
toClassLoaderListHelper(classLoader, Seq())
}
val urls = toClassLoaderList(classLoader).flatMap{
case cl: java.net.URLClassLoader => cl.getURLs.toList
case a => List()
}
urls.foldLeft("")((l, r) => ClassPath.join(l, r.toString))
}
protected def interpreterArgs(kernel: KernelLike): List[String] = {
import scala.collection.JavaConverters._
if (kernel == null || kernel.config == null) {
List()
}
else {
kernel.config.getStringList("interpreter_args").asScala.toList
}
}
protected def maxInterpreterThreads(kernel: KernelLike): Int = {
kernel.config.getInt("max_interpreter_threads")
}
protected def bindKernelVariable(kernel: KernelLike): Unit = {
logger.warn(s"kernel variable: ${kernel}")
// InterpreterHelper.kernelLike = kernel
// interpret("import org.apache.toree.kernel.interpreter.scala.InterpreterHelper")
// interpret("import org.apache.toree.kernel.api.Kernel")
//
// interpret(s"val kernel = InterpreterHelper.kernelLike.asInstanceOf[org.apache.toree.kernel.api.Kernel]")
doQuietly {
bind(
"kernel", "org.apache.toree.kernel.api.Kernel",
kernel, List( """@transient implicit""")
)
}
}
override def interrupt(): Interpreter = {
require(taskManager != null)
// Force dumping of current task (begin processing new tasks)
taskManager.restart()
this
}
override def interpret(code: String, silent: Boolean = false, output: Option[OutputStream]):
(Results.Result, Either[ExecuteOutput, ExecuteFailure]) = {
val starting = (Results.Success, Left(""))
interpretRec(code.trim.split("\\n").toList, false, starting)
}
def truncateResult(result:String, showType:Boolean =false, noTruncate: Boolean = false): String = {
val resultRX="""(?s)(res\\d+):\\s+(.+)\\s+=\\s+(.*)""".r
result match {
case resultRX(varName,varType,resString) => {
var returnStr=resString
if (noTruncate)
{
val r=read(varName)
returnStr=r.getOrElse("").toString
}
if (showType)
returnStr=varType+" = "+returnStr
returnStr
}
case _ => ""
}
}
protected def interpretRec(lines: List[String], silent: Boolean = false, results: (Results.Result, Either[ExecuteOutput, ExecuteFailure])): (Results.Result, Either[ExecuteOutput, ExecuteFailure]) = {
lines match {
case Nil => results
case x :: xs =>
val output = interpretLine(x)
output._1 match {
// if success, keep interpreting and aggregate ExecuteOutputs
case Results.Success =>
val result = for {
originalResult <- output._2.left
} yield(truncateResult(originalResult, KernelOptions.showTypes,KernelOptions.noTruncation))
interpretRec(xs, silent, (output._1, result))
// if incomplete, keep combining incomplete statements
case Results.Incomplete =>
xs match {
case Nil => interpretRec(Nil, silent, (Results.Incomplete, results._2))
case _ => interpretRec(x + "\\n" + xs.head :: xs.tail, silent, results)
}
//
case Results.Aborted =>
output
//interpretRec(Nil, silent, output)
// if failure, stop interpreting and return the error
case Results.Error =>
val result = for {
curr <- output._2.right
} yield curr
interpretRec(Nil, silent, (output._1, result))
}
}
}
protected def interpretLine(line: String, silent: Boolean = false):
(Results.Result, Either[ExecuteOutput, ExecuteFailure]) =
{
logger.trace(s"Interpreting line: $line")
val futureResult = interpretAddTask(line, silent)
// Map the old result types to our new types
val mappedFutureResult = interpretMapToCustomResult(futureResult)
// Determine whether to provide an error or output
val futureResultAndOutput = interpretMapToResultAndOutput(mappedFutureResult)
val futureResultAndExecuteInfo =
interpretMapToResultAndExecuteInfo(futureResultAndOutput)
// Block indefinitely until our result has arrived
import scala.concurrent.duration._
Await.result(futureResultAndExecuteInfo, Duration.Inf)
}
protected def interpretMapToCustomResult(future: Future[IR.Result]) = {
import scala.concurrent.ExecutionContext.Implicits.global
future map {
case IR.Success => Results.Success
case IR.Error => Results.Error
case IR.Incomplete => Results.Incomplete
} recover {
case ex: ExecutionException => Results.Aborted
}
}
protected def interpretMapToResultAndOutput(future: Future[Results.Result]) = {
import scala.concurrent.ExecutionContext.Implicits.global
future map {
result =>
val output =
lastResultOut.toString(Charset.forName("UTF-8").name()).trim
lastResultOut.reset()
(result, output)
}
}
def bindSparkContext() = {
val bindName = "sc"
doQuietly {
logger.info(s"Binding SparkContext into interpreter as $bindName")
interpret(s"""def ${bindName}: ${classOf[SparkContext].getName} = kernel.sparkContext""")
// NOTE: This is needed because interpreter blows up after adding
// dependencies to SparkContext and Interpreter before the
// cluster has been used... not exactly sure why this is the case
// TODO: Investigate why the cluster has to be initialized in the kernel
// to avoid the kernel's interpreter blowing up (must be done
// inside the interpreter)
logger.debug("Initializing Spark cluster in interpreter")
// doQuietly {
// interpret(Seq(
// "val $toBeNulled = {",
// " var $toBeNulled = sc.emptyRDD.collect()",
// " $toBeNulled = null",
// "}"
// ).mkString("\\n").trim())
// }
}
}
def bindSparkSession(): Unit = {
val bindName = "spark"
doQuietly {
// TODO: This only adds the context to the main interpreter AND
// is limited to the Scala interpreter interface
logger.debug(s"Binding SQLContext into interpreter as $bindName")
interpret(s"""def ${bindName}: ${classOf[SparkSession].getName} = kernel.sparkSession""")
// interpret(
// s"""
// |def $bindName: ${classOf[SparkSession].getName} = {
// | if (org.apache.toree.kernel.interpreter.scala.InterpreterHelper.sparkSession != null) {
// | org.apache.toree.kernel.interpreter.scala.InterpreterHelper.sparkSession
// | } else {
// | val s = org.apache.spark.repl.Main.createSparkSession()
// | org.apache.toree.kernel.interpreter.scala.InterpreterHelper.sparkSession = s
// | s
// | }
// |}
// """.stripMargin)
}
}
override def classLoader: ClassLoader = _runtimeClassloader
}
object ScalaInterpreter {
/**
* Utility method to ensure that a temporary directory for the REPL exists for testing purposes.
*/
def ensureTemporaryFolder(): String = {
val outputDir = Option(System.getProperty("spark.repl.class.outputDir")).getOrElse({
val execUri = System.getenv("SPARK_EXECUTOR_URI")
val outputDir: String = Main.outputDir.getAbsolutePath
System.setProperty("spark.repl.class.outputDir", outputDir)
if (execUri != null) {
System.setProperty("spark.executor.uri", execUri)
}
outputDir
})
outputDir
}
}
| poplav/incubator-toree | scala-interpreter/src/main/scala/org/apache/toree/kernel/interpreter/scala/ScalaInterpreter.scala | Scala | apache-2.0 | 11,582 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.common
import java.io.PrintStream
import java.time.Clock
import java.time.Instant
import java.time.ZoneId
import java.time.format.DateTimeFormatter
import akka.event.Logging.{DebugLevel, ErrorLevel, InfoLevel, WarningLevel}
import akka.event.Logging.LogLevel
import akka.event.LoggingAdapter
import kamon.Kamon
trait Logging {
/**
* Prints a message on DEBUG level
*
* @param from Reference, where the method was called from.
* @param message Message to write to the log
*/
def debug(from: AnyRef, message: String)(implicit id: TransactionId = TransactionId.unknown) = {
emit(DebugLevel, id, from, message)
}
/**
* Prints a message on INFO level
*
* @param from Reference, where the method was called from.
* @param message Message to write to the log
*/
def info(from: AnyRef, message: String)(implicit id: TransactionId = TransactionId.unknown) = {
emit(InfoLevel, id, from, message)
}
/**
* Prints a message on WARN level
*
* @param from Reference, where the method was called from.
* @param message Message to write to the log
*/
def warn(from: AnyRef, message: String)(implicit id: TransactionId = TransactionId.unknown) = {
emit(WarningLevel, id, from, message)
}
/**
* Prints a message on ERROR level
*
* @param from Reference, where the method was called from.
* @param message Message to write to the log
*/
def error(from: AnyRef, message: String)(implicit id: TransactionId = TransactionId.unknown) = {
emit(ErrorLevel, id, from, message)
}
/**
* Prints a message to the output.
*
* @param loglevel The level to log on
* @param id <code>TransactionId</code> to include in the log
* @param from Reference, where the method was called from.
* @param message Message to write to the log
*/
def emit(loglevel: LogLevel, id: TransactionId, from: AnyRef, message: String)
}
/**
* Implementation of Logging, that uses Akka logging.
*/
class AkkaLogging(loggingAdapter: LoggingAdapter) extends Logging {
def emit(loglevel: LogLevel, id: TransactionId, from: AnyRef, message: String) = {
if (loggingAdapter.isEnabled(loglevel)) {
val name = if (from.isInstanceOf[String]) from else Logging.getCleanSimpleClassName(from.getClass)
loggingAdapter.log(loglevel, s"[$id] [$name] $message")
}
}
}
/**
* Implementaion of Logging, that uses the output stream.
*/
class PrintStreamLogging(outputStream: PrintStream = Console.out) extends Logging {
def emit(loglevel: LogLevel, id: TransactionId, from: AnyRef, message: String) = {
val now = Instant.now(Clock.systemUTC)
val time = Emitter.timeFormat.format(now)
val name = if (from.isInstanceOf[String]) from else Logging.getCleanSimpleClassName(from.getClass)
val level = loglevel match {
case DebugLevel => "DEBUG"
case InfoLevel => "INFO"
case WarningLevel => "WARN"
case ErrorLevel => "ERROR"
}
val logMessage = Seq(message).collect {
case msg if msg.nonEmpty =>
msg.split('\\n').map(_.trim).mkString(" ")
}
val parts = Seq(s"[$time]", s"[$level]", s"[$id]") ++ Seq(s"[$name]") ++ logMessage
outputStream.println(parts.mkString(" "))
}
}
/**
* A triple representing the timestamp relative to which the elapsed time was computed,
* typically for a TransactionId, the elapsed time in milliseconds and a string containing
* the given marker token.
*
* @param token the LogMarkerToken that should be defined in LoggingMarkers
* @param deltaToTransactionStart the time difference between now and the start of the Transaction
* @param deltaToMarkerStart if this is an end marker, this is the time difference to the start marker
*/
case class LogMarker(token: LogMarkerToken, deltaToTransactionStart: Long, deltaToMarkerStart: Option[Long] = None) {
override def toString() = {
val parts = Seq(LogMarker.keyword, token.toString, deltaToTransactionStart) ++ deltaToMarkerStart
"[" + parts.mkString(":") + "]"
}
}
object LogMarker {
val keyword = "marker"
/** Convenience method for parsing log markers in unit tests. */
def parse(s: String) = {
val logmarker = raw"\\[${keyword}:([^\\s:]+):(\\d+)(?::(\\d+))?\\]".r.unanchored
val logmarker(token, deltaToTransactionStart, deltaToMarkerStart) = s
LogMarker(LogMarkerToken.parse(token), deltaToTransactionStart.toLong, Option(deltaToMarkerStart).map(_.toLong))
}
}
private object Logging {
/**
* Given a class object, return its simple name less the trailing dollar sign.
*/
def getCleanSimpleClassName(clz: Class[_]) = {
val simpleName = clz.getSimpleName
if (simpleName.endsWith("$")) simpleName.dropRight(1)
else simpleName
}
}
private object Emitter {
val timeFormat = DateTimeFormatter.ofPattern("yyyy-MM-dd'T'HH:mm:ss.SSS'Z'").withZone(ZoneId.of("UTC"))
}
case class LogMarkerToken(component: String, action: String, state: String) {
override def toString() = component + "_" + action + "_" + state
def asFinish = copy(state = LoggingMarkers.finish)
def asError = copy(state = LoggingMarkers.error)
}
object LogMarkerToken {
def parse(s: String) = {
// Per convention the components are guaranteed to not contain '_'
// thus it's safe to split at '_' to get the components
val Array(component, action, state) = s.split("_")
LogMarkerToken(component, action, state)
}
}
object MetricEmitter {
val metrics = Kamon.metrics
def emitCounterMetric(token: LogMarkerToken) = {
metrics
.counter(token.toString)
.increment(1)
}
def emitHistogramMetric(token: LogMarkerToken, value: Long) = {
metrics
.histogram(token.toString)
.record(value)
}
}
object LoggingMarkers {
val start = "start"
val finish = "finish"
val error = "error"
val count = "count"
private val controller = "controller"
private val invoker = "invoker"
private val database = "database"
private val activation = "activation"
private val kafka = "kafka"
private val loadbalancer = "loadbalancer"
/*
* Controller related markers
*/
def CONTROLLER_STARTUP(i: Int) = LogMarkerToken(controller, s"startup$i", count)
// Time of the activation in controller until it is delivered to Kafka
val CONTROLLER_ACTIVATION = LogMarkerToken(controller, activation, start)
val CONTROLLER_ACTIVATION_BLOCKING = LogMarkerToken(controller, "blockingActivation", start)
// Time that is needed load balance the activation
val CONTROLLER_LOADBALANCER = LogMarkerToken(controller, loadbalancer, start)
// Time that is needed to produce message in kafka
val CONTROLLER_KAFKA = LogMarkerToken(controller, kafka, start)
/*
* Invoker related markers
*/
def INVOKER_STARTUP(i: Int) = LogMarkerToken(invoker, s"startup$i", count)
// Check invoker healthy state from loadbalancer
val LOADBALANCER_INVOKER_OFFLINE = LogMarkerToken(loadbalancer, "invokerOffline", count)
val LOADBALANCER_INVOKER_UNHEALTHY = LogMarkerToken(loadbalancer, "invokerUnhealthy", count)
// Time that is needed to execute the action
val INVOKER_ACTIVATION_RUN = LogMarkerToken(invoker, "activationRun", start)
// Time that is needed to init the action
val INVOKER_ACTIVATION_INIT = LogMarkerToken(invoker, "activationInit", start)
// Time in invoker
val INVOKER_ACTIVATION = LogMarkerToken(invoker, activation, start)
def INVOKER_DOCKER_CMD(cmd: String) = LogMarkerToken(invoker, s"docker.$cmd", start)
def INVOKER_RUNC_CMD(cmd: String) = LogMarkerToken(invoker, s"runc.$cmd", start)
/*
* General markers
*/
val DATABASE_CACHE_HIT = LogMarkerToken(database, "cacheHit", count)
val DATABASE_CACHE_MISS = LogMarkerToken(database, "cacheMiss", count)
val DATABASE_SAVE = LogMarkerToken(database, "saveDocument", start)
val DATABASE_BULK_SAVE = LogMarkerToken(database, "saveDocumentBulk", start)
val DATABASE_DELETE = LogMarkerToken(database, "deleteDocument", start)
val DATABASE_GET = LogMarkerToken(database, "getDocument", start)
val DATABASE_QUERY = LogMarkerToken(database, "queryView", start)
val DATABASE_ATT_GET = LogMarkerToken(database, "getDocumentAttachment", start)
val DATABASE_ATT_SAVE = LogMarkerToken(database, "saveDocumentAttachment", start)
}
| duynguyen/incubator-openwhisk | common/scala/src/main/scala/whisk/common/Logging.scala | Scala | apache-2.0 | 9,082 |
package breeze.collection.immutable
/*
Copyright 2009 David Hall, Daniel Ramage
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.util.Iterators
import scala.annotation.tailrec
import scala.collection.IterableLike
import scala.collection.generic.CanBuildFrom
import scala.collection.mutable.Builder
/**
* From Okasaki's Functional Data Structures. Represents a functional heap
*
* @author dlwh
*/
class BinomialHeap[T]()(implicit ord: Ordering[T]) extends Iterable[T] with IterableLike[T, BinomialHeap[T]] with Serializable {
import BinomialHeap._
import ord.mkOrderingOps
protected val trees: List[Node[T]] = Nil
override val size = 0
def +(x: T) = mkHeap(insertTree(Node(0, x, Nil), trees), size + 1)
@tailrec
private def insertTree(n: Node[T], t: List[Node[T]]): List[Node[T]] = {
if (t.isEmpty) List(n)
else if (n.rank < t.head.rank) n :: t
else insertTree(n.link(t.head), t.tail)
}
def ++(other: BinomialHeap[T]) = mkHeap(merge(trees, other.trees, Nil), size + other.size)
// TODO: make somewhat tail recursive
private def merge(l1: List[Node[T]], l2: List[Node[T]], acc: List[Node[T]]): List[Node[T]] = (l1, l2) match {
case (Nil, l2) => acc.reverse ++ l2
case (l1, Nil) => acc.reverse ++ l1
case (n1 :: r1, n2 :: r2) =>
if (n1.rank < n2.rank) merge(r1, l2, n1 :: acc)
else if (n2.rank < n1.rank) merge(l1, r2, n2 :: acc)
else insertTree(n1.link(n2), merge(r1, r2, acc))
}
def min = get.get
protected override def newBuilder = new Builder[T, BinomialHeap[T]] {
var heap = BinomialHeap.empty[T]
def result() = heap
def clear() = heap = BinomialHeap.empty[T]
def +=(elem: T) = { heap += elem; this }
}
lazy val get = if (trees.isEmpty) None else Some(findMin(trees))
private def findMin(trees: List[Node[T]]): T = {
trees match {
case (t :: Nil) => t.x
case (t :: ts) =>
val x = t.x
val y = findMin(ts)
if (x < y) x else y
case _ => throw new IllegalArgumentException("Shouldn't get Nil!")
}
}
def delMin() = {
if (trees.isEmpty) this
else {
def getMin(t: List[Node[T]]): (Node[T], List[Node[T]]) = t match {
case (n :: Nil) => (n, Nil)
case (n :: ts) => {
val (n2, ts2) = getMin(ts)
if (n.x <= n2.x) (n, ts) else (n2, n :: ts2)
}
case _ => throw new IllegalArgumentException("Shouldn't get Nil!")
}
val (Node(_, x, t1), t2) = getMin(trees)
merge(t1.reverse, t2, Nil)
mkHeap(merge(t1.reverse, t2, Nil), size - 1)
}
}
def iterator: Iterator[T] = Iterators.merge(trees.map(treeIterator): _*)(ord.compare)
private def treeIterator(n: Node[T]): Iterator[T] = {
Iterators.merge((Iterator.single(n.x) :: (n.children.map(treeIterator))): _*)(ord.compare)
}
override def toString() = iterator.mkString("Heap(", ",", ")")
}
object BinomialHeap {
protected case class Node[T](rank: Int, x: T, children: List[Node[T]])(implicit ord: Ordering[T]) {
import ord.mkOrderingOps
def link(n: Node[T]) = {
if (x <= n.x) Node(rank + 1, x, n :: children) else Node(rank + 1, n.x, this :: n.children)
}
}
def empty[T: Ordering]: BinomialHeap[T] = new BinomialHeap[T] {
override val trees = Nil
}
private def mkHeap[T: Ordering](ns: List[Node[T]], sz: Int) = new BinomialHeap[T] {
override val trees = ns
override val size = sz
}
def apply[T: Ordering](t: T*): BinomialHeap[T] = empty[T] ++ t
implicit def cbfForBinomialHeap[T <: B, B: Ordering]: CanBuildFrom[BinomialHeap[T], B, BinomialHeap[B]] =
new CanBuildFrom[BinomialHeap[T], B, BinomialHeap[B]] {
def apply(): Builder[B, BinomialHeap[B]] = {
empty[B].newBuilder
}
def apply(from: BinomialHeap[T]) = apply()
}
}
| scalanlp/breeze | math/src/main/scala_2.11_2.12/breeze/collection/immutable/BinomialHeap.scala | Scala | apache-2.0 | 4,309 |
/*
* Copyright (c) 2016 SnappyData, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you
* may not use this file except in compliance with the License. You
* may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the License for the specific language governing
* permissions and limitations under the License. See accompanying
* LICENSE file.
*/
package org.apache.spark.sql
import com.pivotal.gemfirexd.internal.engine.db.FabricDatabase
import io.snappydata.benchmark.TPCHColumnPartitionedTable
import io.snappydata.{PlanTest, SnappyFunSuite}
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.rdd.ZippedPartitionsPartition
import org.apache.spark.sql.collection.MultiBucketExecutorPartition
import org.apache.spark.sql.execution.columnar.ColumnTableScan
class SingleNodeTest extends SnappyFunSuite with PlanTest with BeforeAndAfterEach {
var existingSkipSPSCompile = false
override def beforeAll(): Unit = {
System.setProperty("org.codehaus.janino.source_debugging.enable", "true")
System.setProperty("spark.sql.codegen.comments", "true")
System.setProperty("spark.testing", "true")
existingSkipSPSCompile = FabricDatabase.SKIP_SPS_PRECOMPILE
FabricDatabase.SKIP_SPS_PRECOMPILE = true
super.beforeAll()
}
override def afterAll(): Unit = {
System.clearProperty("org.codehaus.janino.source_debugging.enable")
System.clearProperty("spark.sql.codegen.comments")
System.clearProperty("spark.testing")
FabricDatabase.SKIP_SPS_PRECOMPILE = existingSkipSPSCompile
super.afterAll()
}
test("Nodes Pruning") {
SingleNodeTest.testNodesPruning(snc)
}
}
object SingleNodeTest {
def testNodesPruning(snc: SnappyContext): Unit = {
// scalastyle:off println
val tpchDataPath = TPCHColumnPartitionedTable.getClass.getResource("/TPCH").getPath
val buckets_Order_Lineitem = "5"
TPCHColumnPartitionedTable.createAndPopulateOrderTable(snc, tpchDataPath,
true, buckets_Order_Lineitem, null)
def validateSinglePartition(df: DataFrame, bucketId: Int): Unit = {
val scanRDD = df.queryExecution.executedPlan.collectFirst {
case c: ColumnTableScan => c.dataRDD
}
val partitions = scanRDD.map(_.partitions).getOrElse(
throw new AssertionError("Expecting ColumnTable Scan"))
assert(partitions.length == 1, {
val sb = new StringBuilder()
partitions.foreach(p => sb.append(p.index).append(","))
sb.toString
})
val bstr = partitions(0) match {
case zp: ZippedPartitionsPartition => zp.partitionValues.map {
case mb: MultiBucketExecutorPartition => mb.bucketsString
}
case _ => Seq.empty
}
// each BucketExecutor must have only one bucket.
// there are 2 BucketExecutor entries due to ZipPartion of RowBuffer.
assert(bstr.forall(_.toInt == bucketId), s"Expected $bucketId, found $bstr")
}
var df = snc.sql("select * from orders where o_orderkey = 1 ")
validateSinglePartition(df, 3)
assert(df.collect()(0).getInt(0) == 1)
df = snc.sql("select * from orders where o_orderkey = 32 ")
validateSinglePartition(df, 0)
assert(df.collect()(0).getInt(0) == 32)
df = snc.sql("select * from orders where o_orderkey = 801 ")
validateSinglePartition(df, 4)
assert(df.collect()(0).getInt(0) == 801)
df = snc.sql("select * from orders where o_orderkey = 801 ")
validateSinglePartition(df, 4)
assert(df.collect()(0).getInt(0) == 801)
df = snc.sql("select * from orders where o_orderkey = 1408 ")
validateSinglePartition(df, 3)
assert(df.collect()(0).getInt(0) == 1408)
df = snc.sql("select * from orders where o_orderkey = 1409 ")
validateSinglePartition(df, 3)
assert(df.collect()(0).getInt(0) == 1409)
df = snc.sql("select * from orders where o_orderkey = 1410 ")
validateSinglePartition(df, 2)
assert(df.collect()(0).getInt(0) == 1410)
df = snc.sql("select * from orders where o_orderkey = 1796 ")
validateSinglePartition(df, 0)
assert(df.collect()(0).getInt(0) == 1796)
df = snc.sql("select * from orders where o_orderkey = 801 ")
validateSinglePartition(df, 4)
assert(df.collect()(0).getInt(0) == 801)
df = snc.sql("select * from orders where o_orderkey = '1' ")
// validateSinglePartition(df, 3) // complex operator doesn't support pruning.
assert(df.collect()(0).getInt(0) == 1)
df = snc.sql("select * from orders where o_orderkey = '32' ")
// validateSinglePartition(df, 0) // complex operator doesn't support pruning.
assert(df.collect()(0).getInt(0) == 32)
df = snc.sql("select * from orders where o_orderkey = {fn substring('d1xxd2', 2, 1)} ")
assert(df.collect()(0).getInt(0) == 1)
df = snc.sql("select * from orders where o_orderkey = substring('acbc801xx', 5, 3) ")
assert(df.collect()(0).getInt(0) == 801)
df = snc.sql("select * from orders where o_orderkey = {fn trim(" +
"substring(' acbc801xx', length(' 12345'), length('801'))) }")
assert(df.collect()(0).getInt(0) == 801)
df = snc.sql("select * from orders where o_orderkey = trim(" +
"substring(' acbc1410xx', length(' 12345'), length('1410'))) ")
assert(df.collect()(0).getInt(0) == 1410)
df = snc.sql("select O_ORDERDATE, {fn TIMESTAMPADD(SQL_TSI_DAY," +
" {fn FLOOR((-1 * {fn DAYOFYEAR(O_ORDERDATE)} - 1))}, O_ORDERDATE)}" +
" from orders where O_ORDERKEY = 32")
val r = df.collect()(0)
assert(r.getDate(0).toString.equals("1995-07-16"))
assert(r.getDate(1).toString.equals("1994-12-30"))
// scalastyle:on println
}
} | vjr/snappydata | cluster/src/test/scala/org/apache/spark/sql/SingleNodeTest.scala | Scala | apache-2.0 | 5,945 |
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.entity
import scala.util.Try
import spray.json._
import spray.json.DefaultJsonProtocol
import whisk.common.Logging
import whisk.http.Messages._
protected[core] case class ActivationResponse private (
val statusCode: Int, val result: Option[JsValue]) {
def toJsonObject = ActivationResponse.serdes.write(this).asJsObject
// Used when presenting to end-users, to hide the statusCode (which is an implementation detail),
// and to provide a convenience boolean "success" field.
def toExtendedJson: JsObject = {
val baseFields = this.toJsonObject.fields
JsObject((baseFields - "statusCode") ++ Seq(
"success" -> JsBoolean(this.isSuccess),
"status" -> JsString(ActivationResponse.messageForCode(statusCode))))
}
def isSuccess = statusCode == ActivationResponse.Success
def isApplicationError = statusCode == ActivationResponse.ApplicationError
def isContainerError = statusCode == ActivationResponse.ContainerError
def isWhiskError = statusCode == ActivationResponse.WhiskError
override def toString = toJsonObject.compactPrint
}
protected[core] object ActivationResponse extends DefaultJsonProtocol {
/* The field name that is universally recognized as the marker of an error, from the application or otherwise. */
val ERROR_FIELD: String = "error"
val Success = 0 // action ran successfully and produced a result
val ApplicationError = 1 // action ran but there was an error and it was handled
val ContainerError = 2 // action ran but failed to handle an error, or action did not run and failed to initialize
val WhiskError = 3 // internal system error
protected[core] def messageForCode(code: Int) = {
require(code >= 0 && code <= 3)
code match {
case 0 => "success"
case 1 => "application error"
case 2 => "action developer error"
case 3 => "whisk internal error"
}
}
private def error(code: Int, errorValue: JsValue) = {
require(code == ApplicationError || code == ContainerError || code == WhiskError)
ActivationResponse(code, Some(JsObject(ERROR_FIELD -> errorValue)))
}
protected[core] def success(result: Option[JsValue] = None) = ActivationResponse(Success, result)
protected[core] def applicationError(errorValue: JsValue) = error(ApplicationError, errorValue)
protected[core] def applicationError(errorMsg: String) = error(ApplicationError, JsString(errorMsg))
protected[core] def containerError(errorValue: JsValue) = error(ContainerError, errorValue)
protected[core] def containerError(errorMsg: String) = error(ContainerError, JsString(errorMsg))
protected[core] def whiskError(errorValue: JsValue) = error(WhiskError, errorValue)
protected[core] def whiskError(errorMsg: String) = error(WhiskError, JsString(errorMsg))
/**
* Returns an ActivationResponse that is used as a placeholder for payload
* Used as a feed for starting a sequence.
* NOTE: the code is application error (since this response could be used as a response for the sequence
* if the payload contains an error)
*/
protected[core] def payloadPlaceholder(payload: Option[JsObject]) = ActivationResponse(ApplicationError, payload)
/**
* Class of errors for invoker-container communication.
*/
protected[core] sealed abstract class ContainerConnectionError
protected[core] case class NoHost() extends ContainerConnectionError
protected[core] case class ConnectionError(t: Throwable) extends ContainerConnectionError
protected[core] case class NoResponseReceived() extends ContainerConnectionError
protected[core] case class Timeout() extends ContainerConnectionError
/**
* @param okStatus the container response was OK (HTTP 200 status code), anything else is considered an error
* @param entity the entity response as string
* @param truncated either None to indicate complete entity or Some(actual length, max allowed)
*/
protected[core] case class ContainerResponse(okStatus: Boolean, entity: String, truncated: Option[(ByteSize, ByteSize)] = None) {
val ok = okStatus && truncated.isEmpty
override def toString = {
val base = if (okStatus) "ok" else "not ok"
val rest = truncated.map(e => s", truncated ${e.toString}").getOrElse("")
base + rest
}
}
/**
* Interprets response from container after initialization. This method is only called when the initialization failed.
*
* @param response an either a container error or container response (HTTP Status Code, HTTP response bytes as String)
* @return appropriate ActivationResponse representing initialization error
*/
protected[core] def processInitResponseContent(response: Either[ContainerConnectionError, ContainerResponse], logger: Logging): ActivationResponse = {
require(response.isLeft || !response.right.exists(_.ok), s"should not interpret init response when status code is OK")
response match {
case Right(ContainerResponse(code, str, truncated)) => truncated match {
case None =>
Try { str.parseJson.asJsObject } match {
case scala.util.Success(result @ JsObject(fields)) =>
// If the response is a JSON object container an error field, accept it as the response error.
val errorOpt = fields.get(ERROR_FIELD)
val errorContent = errorOpt getOrElse invalidInitResponse(str).toJson
containerError(errorContent)
case _ =>
containerError(invalidInitResponse(str))
}
case Some((length, maxlength)) =>
containerError(truncatedResponse(str, length, maxlength))
}
case Left(e) =>
// This indicates a terminal failure in the container (it exited prematurely).
containerError(abnormalInitialization)
}
}
/**
* Interprets response from container after running the action. This method is only called when the initialization succeeded.
*
* @param response an Option (HTTP Status Code, HTTP response bytes as String)
* @return appropriate ActivationResponse representing run result
*/
protected[core] def processRunResponseContent(response: Either[ContainerConnectionError, ContainerResponse], logger: Logging): ActivationResponse = {
response match {
case Right(ContainerResponse(okStatus, str, truncated)) => truncated match {
case None =>
Try { str.parseJson.asJsObject } match {
case scala.util.Success(result @ JsObject(fields)) =>
// If the response is a JSON object container an error field, accept it as the response error.
val errorOpt = fields.get(ERROR_FIELD)
if (okStatus) {
errorOpt map { error =>
applicationError(error)
} getOrElse {
// The happy path.
success(Some(result))
}
} else {
// Any non-200 code is treated as a container failure. We still need to check whether
// there was a useful error message in there.
val errorContent = errorOpt getOrElse invalidRunResponse(str).toJson
containerError(errorContent)
}
case scala.util.Success(notAnObj) =>
// This should affect only blackbox containers, since our own containers should already test for that.
containerError(invalidRunResponse(str))
case scala.util.Failure(t) =>
// This should affect only blackbox containers, since our own containers should already test for that.
logger.warn(this, s"response did not json parse: '$str' led to $t")
containerError(invalidRunResponse(str))
}
case Some((length, maxlength)) =>
containerError(truncatedResponse(str, length, maxlength))
}
case Left(e) =>
// This indicates a terminal failure in the container (it exited prematurely).
containerError(abnormalRun)
}
}
protected[core] implicit val serdes = jsonFormat2(ActivationResponse.apply)
}
| xin-cai/openwhisk | common/scala/src/main/scala/whisk/core/entity/ActivationResult.scala | Scala | apache-2.0 | 9,578 |
package app
import javax.ws.rs.{DefaultValue, GET, Path, Produces, QueryParam}
import javax.ws.rs.core.MediaType
@Path("hello")
class Hello {
@GET
@Produces(Array(MediaType.TEXT_PLAIN))
def say(@QueryParam("name") @DefaultValue("world") name: String): String =
s"Hello, $name"
}
| kazuhira-r/javaee7-scala-examples | jersey-standalone-example/src/main/scala/app/Hello.scala | Scala | mit | 291 |
package com.twitter.hello.heroku
import com.codahale.metrics.MetricFilter
import com.google.inject.Stage
import com.twitter.finagle.metrics.MetricsStatsReceiver
import com.twitter.finatra.http.EmbeddedHttpServer
import com.twitter.inject.server.FeatureTest
class HelloWorldStartupTest extends FeatureTest {
override val server = new EmbeddedHttpServer(
twitterServer = new HelloWorldServer,
stage = Stage.PRODUCTION,
verbose = false)
override def afterEach() {
MetricsStatsReceiver.metrics.removeMatching(MetricFilter.ALL)
}
"Server" should {
"startup" in {
// Because we disabled the adminHttpServer we instead check the started flag.
server.assertStarted()
}
}
}
| syamantm/finatra | examples/hello-world-heroku/src/test/scala/com/twitter/hello/heroku/HelloWorldStartupTest.scala | Scala | apache-2.0 | 716 |
package com.minalien.mffs
import net.minecraftforge.common.config.Configuration
/**
* Stores all configuration data for MFFS.
*/
object ModConfig {
object WorldGen {
var enableMonazitOre = true
var monazitOrePerVein = 8
var monazitOreVeinsPerChunk = 5
var monazitOreMinHeight = 10
var monazitOreMaxHeight = 64
}
object ForceEnergy {
var forceEnergyPerForcicium = 100f
var forciciumConsumptionCycle = 200
}
def load(configFile: Configuration) {
//////////////////////////////////////////////////
// World Gen
//////////////////////////////////////////////////
WorldGen.enableMonazitOre = configFile.get("WorldGen", "Enable Monazit Ore", WorldGen.enableMonazitOre,
"If this is disabled, the Force Energy Extractor recipe will be disabled as well!").getBoolean(WorldGen
.enableMonazitOre)
WorldGen.monazitOrePerVein = configFile.get("WorldGen", "Avg. Monazit Ore Blocks per Vein",
WorldGen.monazitOrePerVein, "Amount of Monazit ore, on average, in each Monazit Vein.").getInt(WorldGen
.monazitOrePerVein)
WorldGen.monazitOreVeinsPerChunk = configFile.get("WorldGen", "Monazit Ore Veins per Chunk",
WorldGen.monazitOreVeinsPerChunk, "Number of Monazit Ore veins to generate per chunk.").getInt(WorldGen
.monazitOreVeinsPerChunk)
WorldGen.monazitOreMinHeight = configFile.get("WorldGen", "Monazit Ore Minimum Height",
WorldGen.monazitOreMinHeight, "Minimum height in the world for Monazit Ore veins to spawn.")
.getInt(WorldGen.monazitOreMinHeight)
WorldGen.monazitOreMaxHeight = configFile.get("WorldGen", "Monazit Ore Maximum Height",
WorldGen.monazitOreMaxHeight, "Maximum height in the world for Monazit Ore veins tos pawn.")
.getInt(WorldGen.monazitOreMaxHeight)
//////////////////////////////////////////////////
// Force Energy
//////////////////////////////////////////////////
ForceEnergy.forceEnergyPerForcicium = configFile.get("ForceEnergy", "Force Energy Per Forcicium",
ForceEnergy.forceEnergyPerForcicium, "Amount of Force Energy generated per Forcicium in the FE " +
"Extractor").getDouble(ForceEnergy.forceEnergyPerForcicium).asInstanceOf[Float]
ForceEnergy.forciciumConsumptionCycle = configFile.get("ForceEnergy", "Forcicium Consumption Cycle",
ForceEnergy.forciciumConsumptionCycle, "Number of ticks it takes to consume one Forcicium")
.getInt(ForceEnergy.forciciumConsumptionCycle)
if(configFile.hasChanged)
configFile.save()
}
}
| Vexatos/MFFS | src/main/scala/com/minalien/mffs/ModConfig.scala | Scala | gpl-3.0 | 2,454 |
package com.joshcough.minecraft.examples
import com.joshcough.minecraft.{CommandsPlugin, ListenersPlugin}
import org.bukkit.{Location, Material}
import org.bukkit.block.Block
import org.bukkit.entity.Player
import Material.WOOD_AXE
class WorldEditDemo extends ListenersPlugin with CommandsPlugin {
val corners = collection.mutable.Map[Player, List[Location]]().withDefaultValue(Nil)
val listeners = List(
OnLeftClickBlock ((p, e) => if (p isHoldingA WOOD_AXE) { setFirstPos (p, e.loc); e.cancel }),
OnRightClickBlock((p, e) => if (p isHoldingA WOOD_AXE) { setSecondPos(p, e.loc) })
)
val commands = List(
Command(
name = "set" ,
desc = "Set all the selected blocks to the given material type.",
args = material)(
body = { case (p, m) => for(b <- cube(p)) b changeTo m }
),
Command(
name = "replace",
desc = "Replace all the selected blocks of the first material type to the second material type.",
args = material ~ material)(
body = { case (p, oldM ~ newM) => for(b <- cube(p); if(b is oldM)) b changeTo newM }
)
)
def cube(p:Player):Stream[Block] = corners(p).filter(_.length == 2) match {
case List(loc1, loc2) => loc1.cubeTo(loc2).blocks
case _ => p ! "Both corners must be set!"; Stream[Block]()
}
def setFirstPos(p:Player,loc: Location): Unit = {
corners += (p -> List(loc))
p ! s"first corner set to: ${loc.xyz}"
}
def setSecondPos(p:Player,loc2: Location): Unit = corners(p) match {
case loc1 :: _ =>
corners += (p -> List(loc1, loc2))
p ! s"second corner set to: ${loc2.xyz}"
case Nil =>
p ! "set corner one first! (with a left click)"
}
} | joshcough/MinecraftPlugins | examples/src/main/scala/com/joshcough/minecraft/examples/WorldEditDemo.scala | Scala | mit | 1,707 |
class PersonOneLiner(firstName: String, lastName: String);
new PersonOneLiner("Johnny", "Cash");
class Compass {
val directions = List("north" , "east" , "south" , "west" )
var bearing = 0
println("Initial bearing: " + direction)
// uses bearing and prints element in directions list
def direction() = directions(bearing)
def inform(turnDirection: String) {
println("Turning " + turnDirection + ". Now bearing " + direction)
}
def turnRight() {
bearing = (bearing + 1) % directions.size
inform("right" )
}
def turnLeft() {
bearing = (bearing + (directions.size - 1)) % directions.size
inform("left")
}
}
val myCompass = new Compass
myCompass.turnRight
myCompass.turnRight
myCompass.turnLeft
myCompass.turnLeft
myCompass.turnLeft
class Person(first_name: String) {
println("Outer constructor" )
def this(first_name: String, last_name: String) {
this(first_name) // this is mandatory.
println("Inner constructor" )
}
def talk() = println("Hi" )
}
val johnny = new Person("Johnny" )
val johnnyCash = new Person("Johnny" , "Cash" )
| Mastermindzh/Seven-Languages-in-Seven-Weeks | Scala/Day 1/classes.scala | Scala | mit | 1,132 |
package expr
sealed trait Expr
case class Var(name: String) extends Expr
case class Number(num: Double) extends Expr
case class UnOp(operator: String, arg: Expr) extends Expr
case class BinOp(operator: String, left: Expr, right: Expr) extends Expr
| mhotchen/programming-in-scala | src/expr/Expr.scala | Scala | apache-2.0 | 249 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.math.{BigDecimal, BigInteger}
import java.nio.ByteOrder
import java.time.{ZoneId, ZoneOffset}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.apache.parquet.column.Dictionary
import org.apache.parquet.io.ColumnIOFactory
import org.apache.parquet.io.api.{Binary, Converter, GroupConverter, PrimitiveConverter}
import org.apache.parquet.schema.{GroupType, Type, Types}
import org.apache.parquet.schema.LogicalTypeAnnotation._
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.{BINARY, FIXED_LEN_BYTE_ARRAY, INT32, INT64, INT96}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.util.{ArrayBasedMapData, CaseInsensitiveMap, DateTimeUtils, GenericArrayData}
import org.apache.spark.sql.errors.QueryExecutionErrors
import org.apache.spark.sql.execution.datasources.DataSourceUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.LegacyBehaviorPolicy
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.UTF8String
/**
* A [[ParentContainerUpdater]] is used by a Parquet converter to set converted values to some
* corresponding parent container. For example, a converter for a `StructType` field may set
* converted values to a [[InternalRow]]; or a converter for array elements may append converted
* values to an [[ArrayBuffer]].
*/
private[parquet] trait ParentContainerUpdater {
/** Called before a record field is being converted */
def start(): Unit = ()
/** Called after a record field is being converted */
def end(): Unit = ()
def set(value: Any): Unit = ()
def setBoolean(value: Boolean): Unit = set(value)
def setByte(value: Byte): Unit = set(value)
def setShort(value: Short): Unit = set(value)
def setInt(value: Int): Unit = set(value)
def setLong(value: Long): Unit = set(value)
def setFloat(value: Float): Unit = set(value)
def setDouble(value: Double): Unit = set(value)
}
/** A no-op updater used for root converter (who doesn't have a parent). */
private[parquet] object NoopUpdater extends ParentContainerUpdater
private[parquet] trait HasParentContainerUpdater {
def updater: ParentContainerUpdater
}
/**
* A convenient converter class for Parquet group types with a [[HasParentContainerUpdater]].
*/
private[parquet] abstract class ParquetGroupConverter(val updater: ParentContainerUpdater)
extends GroupConverter with HasParentContainerUpdater
/**
* Parquet converter for Parquet primitive types. Note that not all Spark SQL atomic types
* are handled by this converter. Parquet primitive types are only a subset of those of Spark
* SQL. For example, BYTE, SHORT, and INT in Spark SQL are all covered by INT32 in Parquet.
*/
private[parquet] class ParquetPrimitiveConverter(val updater: ParentContainerUpdater)
extends PrimitiveConverter with HasParentContainerUpdater {
override def addBoolean(value: Boolean): Unit = updater.setBoolean(value)
override def addInt(value: Int): Unit = updater.setInt(value)
override def addLong(value: Long): Unit = updater.setLong(value)
override def addFloat(value: Float): Unit = updater.setFloat(value)
override def addDouble(value: Double): Unit = updater.setDouble(value)
override def addBinary(value: Binary): Unit = updater.set(value.getBytes)
}
/**
* A [[ParquetRowConverter]] is used to convert Parquet records into Catalyst [[InternalRow]]s.
* Since Catalyst `StructType` is also a Parquet record, this converter can be used as root
* converter. Take the following Parquet type as an example:
* {{{
* message root {
* required int32 f1;
* optional group f2 {
* required double f21;
* optional binary f22 (utf8);
* }
* }
* }}}
* 5 converters will be created:
*
* - a root [[ParquetRowConverter]] for [[org.apache.parquet.schema.MessageType]] `root`,
* which contains:
* - a [[ParquetPrimitiveConverter]] for required
* [[org.apache.parquet.schema.LogicalTypeAnnotation.intType(32, true)]] field `f1`, and
* - a nested [[ParquetRowConverter]] for optional [[GroupType]] `f2`, which contains:
* - a [[ParquetPrimitiveConverter]] for required
* [[org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName.DOUBLE]] field `f21`, and
* - a [[ParquetStringConverter]] for optional
* [[org.apache.parquet.schema.LogicalTypeAnnotation.stringType()]] string field `f22`
*
* When used as a root converter, [[NoopUpdater]] should be used since root converters don't have
* any "parent" container.
*
* @param schemaConverter A utility converter used to convert Parquet types to Catalyst types.
* @param parquetType Parquet schema of Parquet records
* @param catalystType Spark SQL schema that corresponds to the Parquet record type. User-defined
* types should have been expanded.
* @param convertTz the optional time zone to convert to int96 data
* @param datetimeRebaseMode the mode of rebasing date/timestamp from Julian to Proleptic Gregorian
* calendar
* @param int96RebaseMode the mode of rebasing INT96 timestamp from Julian to Proleptic Gregorian
* calendar
* @param updater An updater which propagates converted field values to the parent container
*/
private[parquet] class ParquetRowConverter(
schemaConverter: ParquetToSparkSchemaConverter,
parquetType: GroupType,
catalystType: StructType,
convertTz: Option[ZoneId],
datetimeRebaseMode: LegacyBehaviorPolicy.Value,
int96RebaseMode: LegacyBehaviorPolicy.Value,
updater: ParentContainerUpdater)
extends ParquetGroupConverter(updater) with Logging {
assert(
parquetType.getFieldCount <= catalystType.length,
s"""Field count of the Parquet schema is greater than the field count of the Catalyst schema:
|
|Parquet schema:
|$parquetType
|Catalyst schema:
|${catalystType.prettyJson}
""".stripMargin)
assert(
!catalystType.existsRecursively(_.isInstanceOf[UserDefinedType[_]]),
s"""User-defined types in Catalyst schema should have already been expanded:
|${catalystType.prettyJson}
""".stripMargin)
logDebug(
s"""Building row converter for the following schema:
|
|Parquet form:
|$parquetType
|Catalyst form:
|${catalystType.prettyJson}
""".stripMargin)
/**
* Updater used together with field converters within a [[ParquetRowConverter]]. It propagates
* converted filed values to the `ordinal`-th cell in `currentRow`.
*/
private final class RowUpdater(row: InternalRow, ordinal: Int) extends ParentContainerUpdater {
override def set(value: Any): Unit = row(ordinal) = value
override def setBoolean(value: Boolean): Unit = row.setBoolean(ordinal, value)
override def setByte(value: Byte): Unit = row.setByte(ordinal, value)
override def setShort(value: Short): Unit = row.setShort(ordinal, value)
override def setInt(value: Int): Unit = row.setInt(ordinal, value)
override def setLong(value: Long): Unit = row.setLong(ordinal, value)
override def setDouble(value: Double): Unit = row.setDouble(ordinal, value)
override def setFloat(value: Float): Unit = row.setFloat(ordinal, value)
}
private[this] val currentRow = new SpecificInternalRow(catalystType.map(_.dataType))
/**
* The [[InternalRow]] converted from an entire Parquet record.
*/
def currentRecord: InternalRow = currentRow
private val dateRebaseFunc = DataSourceUtils.creteDateRebaseFuncInRead(
datetimeRebaseMode, "Parquet")
private val timestampRebaseFunc = DataSourceUtils.creteTimestampRebaseFuncInRead(
datetimeRebaseMode, "Parquet")
private val int96RebaseFunc = DataSourceUtils.creteTimestampRebaseFuncInRead(
int96RebaseMode, "Parquet INT96")
// Converters for each field.
private[this] val fieldConverters: Array[Converter with HasParentContainerUpdater] = {
// (SPARK-31116) Use case insensitive map if spark.sql.caseSensitive is false
// to prevent throwing IllegalArgumentException when searching catalyst type's field index
val catalystFieldNameToIndex = if (SQLConf.get.caseSensitiveAnalysis) {
catalystType.fieldNames.zipWithIndex.toMap
} else {
CaseInsensitiveMap(catalystType.fieldNames.zipWithIndex.toMap)
}
parquetType.getFields.asScala.map { parquetField =>
val fieldIndex = catalystFieldNameToIndex(parquetField.getName)
val catalystField = catalystType(fieldIndex)
// Converted field value should be set to the `fieldIndex`-th cell of `currentRow`
newConverter(parquetField, catalystField.dataType, new RowUpdater(currentRow, fieldIndex))
}.toArray
}
// Updaters for each field.
private[this] val fieldUpdaters: Array[ParentContainerUpdater] = fieldConverters.map(_.updater)
override def getConverter(fieldIndex: Int): Converter = fieldConverters(fieldIndex)
override def end(): Unit = {
var i = 0
while (i < fieldUpdaters.length) {
fieldUpdaters(i).end()
i += 1
}
updater.set(currentRow)
}
override def start(): Unit = {
var i = 0
val numFields = currentRow.numFields
while (i < numFields) {
currentRow.setNullAt(i)
i += 1
}
i = 0
while (i < fieldUpdaters.length) {
fieldUpdaters(i).start()
i += 1
}
}
/**
* Creates a converter for the given Parquet type `parquetType` and Spark SQL data type
* `catalystType`. Converted values are handled by `updater`.
*/
private def newConverter(
parquetType: Type,
catalystType: DataType,
updater: ParentContainerUpdater): Converter with HasParentContainerUpdater = {
def isUnsignedIntTypeMatched(bitWidth: Int): Boolean = {
parquetType.getLogicalTypeAnnotation match {
case i: IntLogicalTypeAnnotation if !i.isSigned => i.getBitWidth == bitWidth
case _ => false
}
}
catalystType match {
case LongType if isUnsignedIntTypeMatched(32) =>
new ParquetPrimitiveConverter(updater) {
override def addInt(value: Int): Unit =
updater.setLong(Integer.toUnsignedLong(value))
}
case BooleanType | IntegerType | LongType | FloatType | DoubleType | BinaryType |
_: AnsiIntervalType =>
new ParquetPrimitiveConverter(updater)
case ByteType =>
new ParquetPrimitiveConverter(updater) {
override def addInt(value: Int): Unit =
updater.setByte(value.asInstanceOf[ByteType#InternalType])
}
case ShortType =>
new ParquetPrimitiveConverter(updater) {
override def addInt(value: Int): Unit =
updater.setShort(value.asInstanceOf[ShortType#InternalType])
}
// For INT32 backed decimals
case _: DecimalType if parquetType.asPrimitiveType().getPrimitiveTypeName == INT32 =>
parquetType.asPrimitiveType().getLogicalTypeAnnotation match {
case decimalType: DecimalLogicalTypeAnnotation =>
new ParquetIntDictionaryAwareDecimalConverter(
decimalType.getPrecision, decimalType.getScale, updater)
case _ =>
// If the column is a plain INT32, we should pick the precision that can host the
// largest INT32 value.
new ParquetIntDictionaryAwareDecimalConverter(
DecimalType.IntDecimal.precision, 0, updater)
}
// For unsigned int64
case _: DecimalType if isUnsignedIntTypeMatched(64) =>
new ParquetPrimitiveConverter(updater) {
override def addLong(value: Long): Unit = {
updater.set(Decimal(java.lang.Long.toUnsignedString(value)))
}
}
// For INT64 backed decimals
case t: DecimalType if parquetType.asPrimitiveType().getPrimitiveTypeName == INT64 =>
parquetType.asPrimitiveType().getLogicalTypeAnnotation match {
case decimalType: DecimalLogicalTypeAnnotation =>
new ParquetLongDictionaryAwareDecimalConverter(
decimalType.getPrecision, decimalType.getScale, updater)
case _ =>
// If the column is a plain INT64, we should pick the precision that can host the
// largest INT64 value.
new ParquetLongDictionaryAwareDecimalConverter(
DecimalType.LongDecimal.precision, 0, updater)
}
// For BINARY and FIXED_LEN_BYTE_ARRAY backed decimals
case t: DecimalType
if parquetType.asPrimitiveType().getPrimitiveTypeName == FIXED_LEN_BYTE_ARRAY ||
parquetType.asPrimitiveType().getPrimitiveTypeName == BINARY =>
parquetType.asPrimitiveType().getLogicalTypeAnnotation match {
case decimalType: DecimalLogicalTypeAnnotation =>
new ParquetBinaryDictionaryAwareDecimalConverter(
decimalType.getPrecision, decimalType.getScale, updater)
case _ =>
throw QueryExecutionErrors.cannotCreateParquetConverterForTypeError(
t, parquetType.toString)
}
case t: DecimalType =>
throw QueryExecutionErrors.cannotCreateParquetConverterForDecimalTypeError(
t, parquetType.toString)
case StringType =>
new ParquetStringConverter(updater)
case TimestampType
if parquetType.getLogicalTypeAnnotation.isInstanceOf[TimestampLogicalTypeAnnotation] &&
parquetType.getLogicalTypeAnnotation
.asInstanceOf[TimestampLogicalTypeAnnotation].getUnit == TimeUnit.MICROS =>
new ParquetPrimitiveConverter(updater) {
override def addLong(value: Long): Unit = {
updater.setLong(timestampRebaseFunc(value))
}
}
case TimestampType
if parquetType.getLogicalTypeAnnotation.isInstanceOf[TimestampLogicalTypeAnnotation] &&
parquetType.getLogicalTypeAnnotation
.asInstanceOf[TimestampLogicalTypeAnnotation].getUnit == TimeUnit.MILLIS =>
new ParquetPrimitiveConverter(updater) {
override def addLong(value: Long): Unit = {
val micros = DateTimeUtils.millisToMicros(value)
updater.setLong(timestampRebaseFunc(micros))
}
}
// INT96 timestamp doesn't have a logical type, here we check the physical type instead.
case TimestampType if parquetType.asPrimitiveType().getPrimitiveTypeName == INT96 =>
new ParquetPrimitiveConverter(updater) {
// Converts nanosecond timestamps stored as INT96
override def addBinary(value: Binary): Unit = {
val julianMicros = ParquetRowConverter.binaryToSQLTimestamp(value)
val gregorianMicros = int96RebaseFunc(julianMicros)
val adjTime = convertTz.map(DateTimeUtils.convertTz(gregorianMicros, _, ZoneOffset.UTC))
.getOrElse(gregorianMicros)
updater.setLong(adjTime)
}
}
case TimestampNTZType
if canReadAsTimestampNTZ(parquetType) &&
parquetType.getLogicalTypeAnnotation
.asInstanceOf[TimestampLogicalTypeAnnotation].getUnit == TimeUnit.MICROS =>
new ParquetPrimitiveConverter(updater)
case TimestampNTZType
if canReadAsTimestampNTZ(parquetType) &&
parquetType.getLogicalTypeAnnotation
.asInstanceOf[TimestampLogicalTypeAnnotation].getUnit == TimeUnit.MILLIS =>
new ParquetPrimitiveConverter(updater) {
override def addLong(value: Long): Unit = {
val micros = DateTimeUtils.millisToMicros(value)
updater.setLong(micros)
}
}
case DateType =>
new ParquetPrimitiveConverter(updater) {
override def addInt(value: Int): Unit = {
updater.set(dateRebaseFunc(value))
}
}
// A repeated field that is neither contained by a `LIST`- or `MAP`-annotated group nor
// annotated by `LIST` or `MAP` should be interpreted as a required list of required
// elements where the element type is the type of the field.
case t: ArrayType
if !parquetType.getLogicalTypeAnnotation.isInstanceOf[ListLogicalTypeAnnotation] =>
if (parquetType.isPrimitive) {
new RepeatedPrimitiveConverter(parquetType, t.elementType, updater)
} else {
new RepeatedGroupConverter(parquetType, t.elementType, updater)
}
case t: ArrayType =>
new ParquetArrayConverter(parquetType.asGroupType(), t, updater)
case t: MapType =>
new ParquetMapConverter(parquetType.asGroupType(), t, updater)
case t: StructType =>
val wrappedUpdater = {
// SPARK-30338: avoid unnecessary InternalRow copying for nested structs:
// There are two cases to handle here:
//
// 1. Parent container is a map or array: we must make a deep copy of the mutable row
// because this converter may be invoked multiple times per Parquet input record
// (if the map or array contains multiple elements).
//
// 2. Parent container is a struct: we don't need to copy the row here because either:
//
// (a) all ancestors are structs and therefore no copying is required because this
// converter will only be invoked once per Parquet input record, or
// (b) some ancestor is struct that is nested in a map or array and that ancestor's
// converter will perform deep-copying (which will recursively copy this row).
if (updater.isInstanceOf[RowUpdater]) {
// `updater` is a RowUpdater, implying that the parent container is a struct.
updater
} else {
// `updater` is NOT a RowUpdater, implying that the parent container a map or array.
new ParentContainerUpdater {
override def set(value: Any): Unit = {
updater.set(value.asInstanceOf[SpecificInternalRow].copy()) // deep copy
}
}
}
}
new ParquetRowConverter(
schemaConverter,
parquetType.asGroupType(),
t,
convertTz,
datetimeRebaseMode,
int96RebaseMode,
wrappedUpdater)
case t =>
throw QueryExecutionErrors.cannotCreateParquetConverterForDataTypeError(
t, parquetType.toString)
}
}
// Only INT64 column with Timestamp logical annotation `isAdjustedToUTC=false`
// can be read as Spark's TimestampNTZ type. This is to avoid mistakes in reading the timestamp
// values.
private def canReadAsTimestampNTZ(parquetType: Type): Boolean =
parquetType.asPrimitiveType().getPrimitiveTypeName == INT64 &&
parquetType.getLogicalTypeAnnotation.isInstanceOf[TimestampLogicalTypeAnnotation] &&
!parquetType.getLogicalTypeAnnotation
.asInstanceOf[TimestampLogicalTypeAnnotation].isAdjustedToUTC
/**
* Parquet converter for strings. A dictionary is used to minimize string decoding cost.
*/
private final class ParquetStringConverter(updater: ParentContainerUpdater)
extends ParquetPrimitiveConverter(updater) {
private var expandedDictionary: Array[UTF8String] = null
override def hasDictionarySupport: Boolean = true
override def setDictionary(dictionary: Dictionary): Unit = {
this.expandedDictionary = Array.tabulate(dictionary.getMaxId + 1) { i =>
UTF8String.fromBytes(dictionary.decodeToBinary(i).getBytes)
}
}
override def addValueFromDictionary(dictionaryId: Int): Unit = {
updater.set(expandedDictionary(dictionaryId))
}
override def addBinary(value: Binary): Unit = {
// The underlying `ByteBuffer` implementation is guaranteed to be `HeapByteBuffer`, so here we
// are using `Binary.toByteBuffer.array()` to steal the underlying byte array without copying
// it.
val buffer = value.toByteBuffer
val offset = buffer.arrayOffset() + buffer.position()
val numBytes = buffer.remaining()
updater.set(UTF8String.fromBytes(buffer.array(), offset, numBytes))
}
}
/**
* Parquet converter for fixed-precision decimals.
*/
private abstract class ParquetDecimalConverter(
precision: Int, scale: Int, updater: ParentContainerUpdater)
extends ParquetPrimitiveConverter(updater) {
protected var expandedDictionary: Array[Decimal] = _
override def hasDictionarySupport: Boolean = true
override def addValueFromDictionary(dictionaryId: Int): Unit = {
updater.set(expandedDictionary(dictionaryId))
}
// Converts decimals stored as INT32
override def addInt(value: Int): Unit = {
addLong(value: Long)
}
// Converts decimals stored as INT64
override def addLong(value: Long): Unit = {
updater.set(decimalFromLong(value))
}
// Converts decimals stored as either FIXED_LENGTH_BYTE_ARRAY or BINARY
override def addBinary(value: Binary): Unit = {
updater.set(decimalFromBinary(value))
}
protected def decimalFromLong(value: Long): Decimal = {
Decimal(value, precision, scale)
}
protected def decimalFromBinary(value: Binary): Decimal = {
if (precision <= Decimal.MAX_LONG_DIGITS) {
// Constructs a `Decimal` with an unscaled `Long` value if possible.
val unscaled = ParquetRowConverter.binaryToUnscaledLong(value)
Decimal(unscaled, precision, scale)
} else {
// Otherwise, resorts to an unscaled `BigInteger` instead.
Decimal(new BigDecimal(new BigInteger(value.getBytes), scale), precision, scale)
}
}
}
private class ParquetIntDictionaryAwareDecimalConverter(
precision: Int, scale: Int, updater: ParentContainerUpdater)
extends ParquetDecimalConverter(precision, scale, updater) {
override def setDictionary(dictionary: Dictionary): Unit = {
this.expandedDictionary = Array.tabulate(dictionary.getMaxId + 1) { id =>
decimalFromLong(dictionary.decodeToInt(id).toLong)
}
}
}
private class ParquetLongDictionaryAwareDecimalConverter(
precision: Int, scale: Int, updater: ParentContainerUpdater)
extends ParquetDecimalConverter(precision, scale, updater) {
override def setDictionary(dictionary: Dictionary): Unit = {
this.expandedDictionary = Array.tabulate(dictionary.getMaxId + 1) { id =>
decimalFromLong(dictionary.decodeToLong(id))
}
}
}
private class ParquetBinaryDictionaryAwareDecimalConverter(
precision: Int, scale: Int, updater: ParentContainerUpdater)
extends ParquetDecimalConverter(precision, scale, updater) {
override def setDictionary(dictionary: Dictionary): Unit = {
this.expandedDictionary = Array.tabulate(dictionary.getMaxId + 1) { id =>
decimalFromBinary(dictionary.decodeToBinary(id))
}
}
}
/**
* Parquet converter for arrays. Spark SQL arrays are represented as Parquet lists. Standard
* Parquet lists are represented as a 3-level group annotated by `LIST`:
* {{{
* <list-repetition> group <name> (LIST) { <-- parquetSchema points here
* repeated group list {
* <element-repetition> <element-type> element;
* }
* }
* }}}
* The `parquetSchema` constructor argument points to the outermost group.
*
* However, before this representation is standardized, some Parquet libraries/tools also use some
* non-standard formats to represent list-like structures. Backwards-compatibility rules for
* handling these cases are described in Parquet format spec.
*
* @see https://github.com/apache/parquet-format/blob/master/LogicalTypes.md#lists
*/
private final class ParquetArrayConverter(
parquetSchema: GroupType,
catalystSchema: ArrayType,
updater: ParentContainerUpdater)
extends ParquetGroupConverter(updater) {
private[this] val currentArray = ArrayBuffer.empty[Any]
private[this] val elementConverter: Converter = {
val repeatedType = parquetSchema.getType(0)
val elementType = catalystSchema.elementType
// At this stage, we need to figure out if the repeated field maps to the element type or is
// just the syntactic repeated group of the 3-level standard LIST layout. Take the following
// Parquet LIST-annotated group type as an example:
//
// optional group f (LIST) {
// repeated group list {
// optional group element {
// optional int32 element;
// }
// }
// }
//
// This type is ambiguous:
//
// 1. When interpreted as a standard 3-level layout, the `list` field is just the syntactic
// group, and the entire type should be translated to:
//
// ARRAY<STRUCT<element: INT>>
//
// 2. On the other hand, when interpreted as a non-standard 2-level layout, the `list` field
// represents the element type, and the entire type should be translated to:
//
// ARRAY<STRUCT<element: STRUCT<element: INT>>>
//
//
// Here we try to convert field `list` into a Catalyst type to see whether the converted type
// matches the Catalyst array element type.
//
// If the guessed element type from the above does not match the Catalyst type (for example,
// in case of schema evolution), we need to check if the repeated type matches one of the
// backward-compatibility rules for legacy LIST types (see the link above).
//
// If the element type does not match the Catalyst type and the underlying repeated type
// does not belong to the legacy LIST type, then it is case 1; otherwise, it is case 2.
//
// Since `convertField` method requires a Parquet `ColumnIO` as input, here we first create
// a dummy message type which wraps the given repeated type, and then convert it to the
// `ColumnIO` using Parquet API.
val messageType = Types.buildMessage().addField(repeatedType).named("foo")
val column = new ColumnIOFactory().getColumnIO(messageType)
val guessedElementType = schemaConverter.convertField(column.getChild(0)).sparkType
val isLegacy = schemaConverter.isElementType(repeatedType, parquetSchema.getName)
if (DataType.equalsIgnoreCompatibleNullability(guessedElementType, elementType) || isLegacy) {
// If the repeated field corresponds to the element type, creates a new converter using the
// type of the repeated field.
newConverter(repeatedType, elementType, new ParentContainerUpdater {
override def set(value: Any): Unit = currentArray += value
})
} else {
// If the repeated field corresponds to the syntactic group in the standard 3-level Parquet
// LIST layout, creates a new converter using the only child field of the repeated field.
assert(!repeatedType.isPrimitive && repeatedType.asGroupType().getFieldCount == 1)
new ElementConverter(repeatedType.asGroupType().getType(0), elementType)
}
}
override def getConverter(fieldIndex: Int): Converter = elementConverter
override def end(): Unit = updater.set(new GenericArrayData(currentArray.toArray))
override def start(): Unit = currentArray.clear()
/** Array element converter */
private final class ElementConverter(parquetType: Type, catalystType: DataType)
extends GroupConverter {
private var currentElement: Any = _
private[this] val converter =
newConverter(parquetType, catalystType, new ParentContainerUpdater {
override def set(value: Any): Unit = currentElement = value
})
override def getConverter(fieldIndex: Int): Converter = converter
override def end(): Unit = currentArray += currentElement
override def start(): Unit = currentElement = null
}
}
/** Parquet converter for maps */
private final class ParquetMapConverter(
parquetType: GroupType,
catalystType: MapType,
updater: ParentContainerUpdater)
extends ParquetGroupConverter(updater) {
private[this] val currentKeys = ArrayBuffer.empty[Any]
private[this] val currentValues = ArrayBuffer.empty[Any]
private[this] val keyValueConverter = {
val repeatedType = parquetType.getType(0).asGroupType()
new KeyValueConverter(
repeatedType.getType(0),
repeatedType.getType(1),
catalystType.keyType,
catalystType.valueType)
}
override def getConverter(fieldIndex: Int): Converter = keyValueConverter
override def end(): Unit = {
// The parquet map may contains null or duplicated map keys. When it happens, the behavior is
// undefined.
// TODO (SPARK-26174): disallow it with a config.
updater.set(
new ArrayBasedMapData(
new GenericArrayData(currentKeys.toArray),
new GenericArrayData(currentValues.toArray)))
}
override def start(): Unit = {
currentKeys.clear()
currentValues.clear()
}
/** Parquet converter for key-value pairs within the map. */
private final class KeyValueConverter(
parquetKeyType: Type,
parquetValueType: Type,
catalystKeyType: DataType,
catalystValueType: DataType)
extends GroupConverter {
private var currentKey: Any = _
private var currentValue: Any = _
private[this] val converters = Array(
// Converter for keys
newConverter(parquetKeyType, catalystKeyType, new ParentContainerUpdater {
override def set(value: Any): Unit = currentKey = value
}),
// Converter for values
newConverter(parquetValueType, catalystValueType, new ParentContainerUpdater {
override def set(value: Any): Unit = currentValue = value
}))
override def getConverter(fieldIndex: Int): Converter = converters(fieldIndex)
override def end(): Unit = {
currentKeys += currentKey
currentValues += currentValue
}
override def start(): Unit = {
currentKey = null
currentValue = null
}
}
}
private trait RepeatedConverter {
private[this] val currentArray = ArrayBuffer.empty[Any]
protected def newArrayUpdater(updater: ParentContainerUpdater) = new ParentContainerUpdater {
override def start(): Unit = currentArray.clear()
override def end(): Unit = updater.set(new GenericArrayData(currentArray.toArray))
override def set(value: Any): Unit = currentArray += value
}
}
/**
* A primitive converter for converting unannotated repeated primitive values to required arrays
* of required primitives values.
*/
private final class RepeatedPrimitiveConverter(
parquetType: Type,
catalystType: DataType,
parentUpdater: ParentContainerUpdater)
extends PrimitiveConverter with RepeatedConverter with HasParentContainerUpdater {
val updater: ParentContainerUpdater = newArrayUpdater(parentUpdater)
private[this] val elementConverter: PrimitiveConverter =
newConverter(parquetType, catalystType, updater).asPrimitiveConverter()
override def addBoolean(value: Boolean): Unit = elementConverter.addBoolean(value)
override def addInt(value: Int): Unit = elementConverter.addInt(value)
override def addLong(value: Long): Unit = elementConverter.addLong(value)
override def addFloat(value: Float): Unit = elementConverter.addFloat(value)
override def addDouble(value: Double): Unit = elementConverter.addDouble(value)
override def addBinary(value: Binary): Unit = elementConverter.addBinary(value)
override def setDictionary(dict: Dictionary): Unit = elementConverter.setDictionary(dict)
override def hasDictionarySupport: Boolean = elementConverter.hasDictionarySupport
override def addValueFromDictionary(id: Int): Unit = elementConverter.addValueFromDictionary(id)
}
/**
* A group converter for converting unannotated repeated group values to required arrays of
* required struct values.
*/
private final class RepeatedGroupConverter(
parquetType: Type,
catalystType: DataType,
parentUpdater: ParentContainerUpdater)
extends GroupConverter with HasParentContainerUpdater with RepeatedConverter {
val updater: ParentContainerUpdater = newArrayUpdater(parentUpdater)
private[this] val elementConverter: GroupConverter =
newConverter(parquetType, catalystType, updater).asGroupConverter()
override def getConverter(field: Int): Converter = elementConverter.getConverter(field)
override def end(): Unit = elementConverter.end()
override def start(): Unit = elementConverter.start()
}
}
private[parquet] object ParquetRowConverter {
def binaryToUnscaledLong(binary: Binary): Long = {
// The underlying `ByteBuffer` implementation is guaranteed to be `HeapByteBuffer`, so here
// we are using `Binary.toByteBuffer.array()` to steal the underlying byte array without
// copying it.
val buffer = binary.toByteBuffer
val bytes = buffer.array()
val start = buffer.arrayOffset() + buffer.position()
val end = buffer.arrayOffset() + buffer.limit()
var unscaled = 0L
var i = start
while (i < end) {
unscaled = (unscaled << 8) | (bytes(i) & 0xff)
i += 1
}
val bits = 8 * (end - start)
unscaled = (unscaled << (64 - bits)) >> (64 - bits)
unscaled
}
def binaryToSQLTimestamp(binary: Binary): Long = {
assert(binary.length() == 12, s"Timestamps (with nanoseconds) are expected to be stored in" +
s" 12-byte long binaries. Found a ${binary.length()}-byte binary instead.")
val buffer = binary.toByteBuffer.order(ByteOrder.LITTLE_ENDIAN)
val timeOfDayNanos = buffer.getLong
val julianDay = buffer.getInt
DateTimeUtils.fromJulianDay(julianDay, timeOfDayNanos)
}
}
| nchammas/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetRowConverter.scala | Scala | apache-2.0 | 34,953 |
package org.jetbrains.plugins.scala.components.libextensions.ui
import java.awt.BorderLayout
import java.awt.event.ActionEvent
import java.util
import java.util.Collections
import com.intellij.openapi.project.Project
import com.intellij.openapi.ui.{DialogBuilder, InputValidatorEx, Messages}
import com.intellij.ui._
import com.intellij.ui.components.{JBLabel, JBList}
import com.intellij.util.ui.{JBUI, UIUtil}
import javax.swing._
import org.jetbrains.plugins.scala.components.libextensions.LibraryExtensionsManager._
import org.jetbrains.plugins.scala.components.libextensions.{ExtensionDescriptor, LibraryDescriptor, LibraryExtensionsManager}
import org.jetbrains.plugins.scala.settings.ScalaProjectSettings
class LibExtensionsSettingsPanelWrapper(private val rootPanel: JPanel,
private val project: Project) {
private val libraryExtensionsManager = LibraryExtensionsManager.getInstance(project)
// Exported components
val enabledCB: JCheckBox = new JCheckBox("Enable loading external extensions", true)
class LibraryListModel(val extensionsModel: LibraryDetailsModel) extends AbstractListModel[LibraryDescriptor] {
private val extensionsManager: LibraryExtensionsManager = libraryExtensionsManager
override def getSize: Int = extensionsManager.getAvailableLibraries.length
override def getElementAt(i: Int) = extensionsManager.getAvailableLibraries(i)
}
class LibraryDetailsModel(selectedDescriptor: Option[LibraryDescriptor]) extends AbstractListModel[ExtensionDescriptor] {
private val myExtensions = selectedDescriptor.flatMap(_.getCurrentPluginDescriptor.map(_.extensions)).getOrElse(Nil).filter(_.isAvailable)
override def getSize: Int = myExtensions.length
override def getElementAt(i: Int): ExtensionDescriptor = myExtensions(i)
}
class EditPatternsPanel(data: util.List[String]) extends AddEditDeleteListPanel[String]("Library extension search patterns", data) {
override protected def editSelectedItem(item: String): String = showEditDialog(item)
override def findItemToAdd(): String = showEditDialog("")
private def showEditDialog(initialValue: String) =
Messages.showInputDialog(this, "", "Enter search pattern", Messages.getQuestionIcon, initialValue, new InputValidatorEx {
override def getErrorText(inputString: String): String = if (!checkInput(inputString)) "org, module and version must be separated with %" else null
override def checkInput(inputString: String): Boolean = inputString.matches("^.*%.*%.*$")
override def canClose(inputString: String) = true
})
def getPatterns: util.Enumeration[String] = myListModel.elements()
}
def build(): Unit = {
import com.intellij.util.ui.UI
def showPatternManageDialog(e: ActionEvent): Unit = {
val builder = new DialogBuilder(project)
val settings = ScalaProjectSettings.getInstance(project)
val listView = new EditPatternsPanel(settings.getLextSearchPatterns)
listView.setPreferredSize(JBUI.size(350, 135))
builder.setCenterPanel(UI.PanelFactory.panel(listView)
.withComment("Use SBT dependency format to define the pattern")
.createPanel())
builder.show()
settings.setLextSearchPatterns(Collections.list(listView.getPatterns))
}
rootPanel.setLayout(new BorderLayout())
UIUtil.addBorder(rootPanel, JBUI.Borders.empty(10))
val checkBoxes = new JPanel()
checkBoxes.setLayout(new BoxLayout(checkBoxes, BoxLayout.Y_AXIS))
checkBoxes.add(UI.PanelFactory.panel(enabledCB)
.withTooltip("IDEA will try to search for extra support for particular libraries in your project")
.createPanel())
val settingsPanel = new JPanel(new BorderLayout())
settingsPanel.add(checkBoxes, BorderLayout.CENTER)
val button = new JButton("Manage Search Patterns")
button.addActionListener(showPatternManageDialog)
settingsPanel.add(button, BorderLayout.LINE_END)
rootPanel.add(settingsPanel, BorderLayout.PAGE_START)
val detailsModel = new LibraryDetailsModel(None)
val extensionsList = new JBList[ExtensionDescriptor](detailsModel)
val extensionsPane = new JPanel(new BorderLayout())
extensionsPane.add(ScrollPaneFactory.createScrollPane(extensionsList))
extensionsList.setEmptyText("Select library from the list above")
extensionsList.installCellRenderer { ext: ExtensionDescriptor =>
val ExtensionDescriptor(_, impl, name, description, _) = ext
val builder = new StringBuilder
if (name.nonEmpty) builder.append(name) else builder.append(impl)
if (description.nonEmpty) builder.append(s" - $description")
new JBLabel(builder.mkString)
}
val libraryListModel = new LibraryListModel(detailsModel)
val librariesList = new JBList[LibraryDescriptor](libraryListModel)
librariesList.setEmptyText("No known extension libraries")
librariesList.addListSelectionListener { event =>
val libraries = libraryExtensionsManager.getAvailableLibraries
val index = event.getFirstIndex
val newData = if (index != -1 && index < libraries.size) {
Some(libraries(index))
} else None
extensionsList.setModel(new LibraryDetailsModel(newData))
}
librariesList.installCellRenderer{ ld: LibraryDescriptor =>
val LibraryDescriptor(name, _, description, vendor, version, _) = ld
val builder = new StringBuilder
if (vendor.nonEmpty) builder.append(s"($vendor) ")
builder.append(s"$name $version")
if (description.nonEmpty) builder.append(s" - $description")
new JBLabel(builder.mkString)
}
val librariesPane = new JPanel(new BorderLayout())
librariesPane.add(ScrollPaneFactory.createScrollPane(librariesList))
val listsPane = new JBSplitter(true, 0.6f)
listsPane.setFirstComponent(librariesPane)
listsPane.setSecondComponent(extensionsPane)
UIUtil.addBorder(librariesPane,IdeBorderFactory.createTitledBorder("Known extension libraries", false))
UIUtil.addBorder(extensionsPane, IdeBorderFactory.createTitledBorder("Extensions in selected library", false))
enabledCB.addActionListener { _ =>
libraryExtensionsManager.setEnabled(enabledCB.isSelected)
val detailsModel = new LibraryDetailsModel(None)
val libraryListModel = new LibraryListModel(detailsModel)
extensionsList.setModel(detailsModel)
librariesList.setModel(libraryListModel)
UIUtil.setEnabled(listsPane, enabledCB.isSelected, true)
}
rootPanel.add(listsPane, BorderLayout.CENTER)
}
}
| jastice/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/components/libextensions/ui/LibExtensionsSettingsPanelWrapper.scala | Scala | apache-2.0 | 6,575 |
package org.jetbrains.plugins.scala
package lang
package psi
package api
package base
package patterns
import com.intellij.psi._
import com.intellij.psi.util.PsiModificationTracker
import com.intellij.psi.util.PsiModificationTracker._
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeVariableTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.expr.xml.ScXmlPattern
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScClassParameter, ScParameter, ScTypeParam}
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScFunction, ScValue, ScVariable}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.{ScClass, ScTemplateDefinition}
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiManager
import org.jetbrains.plugins.scala.lang.psi.impl.base.ScStableCodeReferenceElementImpl
import org.jetbrains.plugins.scala.lang.psi.types
import org.jetbrains.plugins.scala.lang.psi.types._
import org.jetbrains.plugins.scala.lang.psi.types.result.{Failure, Success, TypeResult, TypingContext}
import org.jetbrains.plugins.scala.lang.resolve._
import org.jetbrains.plugins.scala.lang.resolve.processor.{CompletionProcessor, ExpandedExtractorResolveProcessor}
import org.jetbrains.plugins.scala.macroAnnotations.{ModCount, CachedInsidePsiElement}
import org.jetbrains.plugins.scala.project.ScalaLanguageLevel.Scala_2_11
import org.jetbrains.plugins.scala.project._
import scala.annotation.tailrec
import scala.collection.immutable.Set
import scala.collection.mutable.ArrayBuffer
/**
* @author Alexander Podkhalyuzin
*/
trait ScPattern extends ScalaPsiElement {
def isIrrefutableFor(t: Option[ScType]): Boolean = false
def getType(ctx: TypingContext): TypeResult[ScType] = Failure("Cannot type pattern", Some(this))
def bindings: Seq[ScBindingPattern] = {
val b = new ArrayBuffer[ScBindingPattern]
def inner(p: ScPattern) {
p match {
case binding: ScBindingPattern => b += binding
case _ =>
}
for (sub <- p.subpatterns) {
inner(sub)
}
}
inner(this)
b
}
def typeVariables: Seq[ScTypeVariableTypeElement] = {
val b = new ArrayBuffer[ScTypeVariableTypeElement]
def inner(p: ScPattern) {
p match {
case ScTypedPattern(te) =>
te.accept(new ScalaRecursiveElementVisitor {
override def visitTypeVariableTypeElement(tvar: ScTypeVariableTypeElement): Unit = {
b += tvar
}
})
case _ =>
}
for (sub <- p.subpatterns) {
inner(sub)
}
}
inner(this)
b
}
override def accept(visitor: ScalaElementVisitor) {
visitor.visitPattern(this)
}
def subpatterns: Seq[ScPattern] = this match {
case _: ScReferencePattern => Seq.empty
case _ => findChildrenByClassScala[ScPattern](classOf[ScPattern])
}
private def expectedTypeForExtractorArg(ref: ScStableCodeReferenceElement,
argIndex: Int,
expected: Option[ScType],
totalNumberOfPatterns: Int): Option[ScType] = {
val bind: Option[ScalaResolveResult] = ref.bind() match {
case Some(ScalaResolveResult(_: ScBindingPattern | _: ScParameter, _)) =>
val resolve = ref match {
case refImpl: ScStableCodeReferenceElementImpl =>
refImpl.doResolve(refImpl, new ExpandedExtractorResolveProcessor(ref, ref.refName, ref.getKinds(incomplete = false), ref.getContext match {
case inf: ScInfixPattern => inf.expectedType
case constr: ScConstructorPattern => constr.expectedType
case _ => None
}))
}
if (resolve.length != 1) None
else {
resolve(0) match {
case s: ScalaResolveResult => Some(s)
case _ => None
}
}
case m => m
}
def calculateSubstitutor(_tp: ScType, funType: ScType, substitutor: ScSubstitutor): ScSubstitutor = {
val tp = _tp match {
case ex: ScExistentialType => ex.skolem
case _ => _tp
}
def rightWay: ScSubstitutor = {
val t = Conformance.conformsInner(tp, substitutor.subst(funType), Set.empty, new ScUndefinedSubstitutor)
if (t._1) {
val undefSubst = t._2
undefSubst.getSubstitutor match {
case Some(newSubst) => newSubst.followed(substitutor)
case _ => substitutor
}
} else substitutor
}
//todo: looks quite hacky to try another direction first, do you know better? see SCL-6543
val t = Conformance.conformsInner(substitutor.subst(funType), tp, Set.empty, new ScUndefinedSubstitutor)
if (t._1) {
val undefSubst = t._2
undefSubst.getSubstitutor match {
case Some(newSubst) => newSubst.followed(substitutor)
case _ => rightWay
}
} else rightWay
}
bind match {
case Some(ScalaResolveResult(fun: ScFunction, substitutor: ScSubstitutor)) if fun.name == "unapply" &&
fun.parameters.length == 1 =>
val subst = if (fun.typeParameters.isEmpty) substitutor else {
var undefSubst = fun.typeParameters.foldLeft(ScSubstitutor.empty) { (s, p) =>
s.bindT((p.name, ScalaPsiUtil.getPsiElementId(p)), ScUndefinedType(new ScTypeParameterType(p, substitutor)))
}
val clazz = ScalaPsiUtil.getContextOfType(this, true, classOf[ScTemplateDefinition])
clazz match {
case clazz: ScTemplateDefinition =>
undefSubst = undefSubst.followed(new ScSubstitutor(ScThisType(clazz)))
case _ =>
}
val firstParameterType = fun.parameters.head.getType(TypingContext.empty) match {
case Success(tp, _) => tp
case _ => return None
}
val funType = undefSubst.subst(firstParameterType)
expected match {
case Some(tp) => calculateSubstitutor(tp, funType, substitutor)
case _ => substitutor
}
}
fun.returnType match {
case Success(rt, _) =>
def updateRes(tp: ScType): ScType = {
val parameters: Seq[ScTypeParam] = fun.typeParameters
tp.recursiveVarianceUpdate {
case (tp: ScTypeParameterType, variance) if parameters.contains(tp.param) =>
(true, if (variance == -1) substitutor.subst(tp.lower.v)
else substitutor.subst(tp.upper.v))
case (typez, _) => (false, typez)
}
}
val subbedRetTp: ScType = subst.subst(rt)
if (subbedRetTp.equiv(lang.psi.types.Boolean)) None
else {
val args = ScPattern.extractorParameters(subbedRetTp, this, ScPattern.isOneArgCaseClassMethod(fun))
if (totalNumberOfPatterns == 1 && args.length > 1) Some(ScTupleType(args)(getProject, getResolveScope))
else if (argIndex < args.length) Some(updateRes(subst.subst(args(argIndex)).unpackedType))
else None
}
case _ => None
}
case Some(ScalaResolveResult(fun: ScFunction, substitutor: ScSubstitutor)) if fun.name == "unapplySeq" &&
fun.parameters.length == 1 =>
val subst = if (fun.typeParameters.isEmpty) substitutor else {
val undefSubst = substitutor followed fun.typeParameters.foldLeft(ScSubstitutor.empty) { (s, p) =>
s.bindT((p.name, ScalaPsiUtil.getPsiElementId(p)), ScUndefinedType(new ScTypeParameterType(p, substitutor)))
}
val firstParameterRetTp = fun.parameters.head.getType(TypingContext.empty) match {
case Success(tp, _) => tp
case _ => return None
}
val funType = undefSubst.subst(firstParameterRetTp)
expected match {
case Some(tp) => calculateSubstitutor(tp, funType, substitutor)
case _ => substitutor
}
}
fun.returnType match {
case Success(rt, _) =>
val args = ScPattern.extractorParameters(subst.subst(rt), this, ScPattern.isOneArgCaseClassMethod(fun))
if (args.isEmpty) return None
if (argIndex < args.length - 1) return Some(subst.subst(args(argIndex)))
val lastArg = args.last
(lastArg +: BaseTypes.get(lastArg)).find {
case ScParameterizedType(des, seqArgs) => seqArgs.length == 1 && ScType.extractClass(des).exists { clazz =>
clazz.qualifiedName == "scala.collection.Seq"
}
case _ => false
} match {
case Some(seq@ScParameterizedType(des, seqArgs)) =>
this match {
case n: ScNamingPattern if n.getLastChild.isInstanceOf[ScSeqWildcard] => Some(subst.subst(seq))
case _ => Some(subst.subst(seqArgs.head))
}
case _ => None
}
case _ => None
}
case _ => None
}
}
@CachedInsidePsiElement(this, ModCount.getBlockModificationCount)
def expectedType: Option[ScType] = getContext match {
case list : ScPatternList => list.getContext match {
case _var : ScVariable => _var.getType(TypingContext.empty).toOption
case _val : ScValue => _val.getType(TypingContext.empty).toOption
}
case argList : ScPatternArgumentList =>
argList.getContext match {
case constr : ScConstructorPattern =>
val thisIndex: Int = constr.args.patterns.indexWhere(_ == this)
expectedTypeForExtractorArg(constr.ref, thisIndex, constr.expectedType, argList.patterns.length)
case _ => None
}
case composite: ScCompositePattern => composite.expectedType
case infix: ScInfixPattern =>
val i =
if (infix.leftPattern == this) 0
else if (this.isInstanceOf[ScTuplePattern]) return None //this is handled elsewhere in this function
else 1
expectedTypeForExtractorArg(infix.reference, i, infix.expectedType, 2)
case par: ScParenthesisedPattern => par.expectedType
case patternList : ScPatterns => patternList.getContext match {
case tuple : ScTuplePattern =>
tuple.getContext match {
case infix: ScInfixPattern =>
if (infix.leftPattern != tuple) {
//so it's right pattern
val i = tuple.patternList match {
case Some(patterns: ScPatterns) => patterns.patterns.indexWhere(_ == this)
case _ => return None
}
val patternLength: Int = tuple.patternList match {
case Some(pat) => pat.patterns.length
case _ => -1 //is it possible to get here?
}
return expectedTypeForExtractorArg(infix.reference, i + 1, infix.expectedType, patternLength)
}
case _ =>
}
tuple.expectedType.flatMap {
case ScTupleType(comps) =>
for ((t, p) <- comps.iterator.zip(patternList.patterns.iterator)) {
if (p == this) return Some(t)
}
None
case et0 if et0 == types.AnyRef || et0 == types.Any => Some(types.Any)
case _ => None
}
case _: ScXmlPattern =>
val nodeClass: Option[PsiClass] = ScalaPsiManager.instance(getProject).getCachedClass(getResolveScope, "scala.xml.Node")
nodeClass.flatMap { nodeClass =>
this match {
case n: ScNamingPattern if n.getLastChild.isInstanceOf[ScSeqWildcard] =>
val seqClass: Option[PsiClass] =
ScalaPsiManager.instance(getProject).getCachedClass(getResolveScope, "scala.collection.Seq")
seqClass.map { seqClass =>
ScParameterizedType(ScDesignatorType(seqClass), Seq(ScDesignatorType(nodeClass)))
}
case _ => Some(ScDesignatorType(nodeClass))
}
}
case _ => None
}
case clause: ScCaseClause => clause.getContext/*clauses*/.getContext match {
case matchStat : ScMatchStmt => matchStat.expr match {
case Some(e) => Some(e.getType(TypingContext.empty).getOrAny)
case _ => None
}
case b: ScBlockExpr if b.getContext.isInstanceOf[ScCatchBlock] =>
val thr = ScalaPsiManager.instance(getProject).getCachedClass(getResolveScope, "java.lang.Throwable")
thr.map(ScType.designator(_))
case b : ScBlockExpr =>
b.expectedType(fromUnderscore = false) match {
case Some(et) =>
et.removeAbstracts match {
case ScFunctionType(_, Seq()) => Some(types.Unit)
case ScFunctionType(_, Seq(p0)) => Some(p0)
case ScFunctionType(_, params) =>
val tt = ScTupleType(params)(getProject, getResolveScope)
Some(tt)
case ScPartialFunctionType(_, param) => Some(param)
case _ => None
}
case None => None
}
case _ => None
}
case named: ScNamingPattern => named.expectedType
case gen: ScGenerator =>
val analog = getAnalog
if (analog != this) analog.expectedType
else None
case enum: ScEnumerator =>
Option(enum.rvalue).flatMap { rvalue =>
rvalue.getType(TypingContext.empty).toOption
}
case _ => None
}
def getAnalog: ScPattern = {
getContext match {
case gen: ScGenerator =>
val f: ScForStatement = gen.getContext.getContext match {
case fr: ScForStatement => fr
case _ => return this
}
f.getDesugarizedExpr match {
case Some(expr) =>
if (analog != null) return analog
case _ =>
}
this
case _ => this
}
}
var desugarizedPatternIndex = -1
var analog: ScPattern = null
}
object ScPattern {
def isOneArgCaseClassMethod(fun: ScFunction): Boolean = {
fun.syntheticCaseClass match {
case Some(c: ScClass) => c.constructor.exists(_.effectiveFirstParameterSection.length == 1)
case _ => false
}
}
private def findMember(name: String, tp: ScType, place: PsiElement): Option[ScType] = {
val cp = new CompletionProcessor(StdKinds.methodRef, place, forName = Some(name))
cp.processType(tp, place)
cp.candidatesS.flatMap {
case ScalaResolveResult(fun: ScFunction, subst) if fun.parameters.isEmpty && fun.name == name =>
Seq(subst.subst(fun.returnType.getOrAny))
case ScalaResolveResult(b: ScBindingPattern, subst) if b.name == name =>
Seq(subst.subst(b.getType(TypingContext.empty).getOrAny))
case ScalaResolveResult(param: ScClassParameter, subst) if param.name == name =>
Seq(subst.subst(param.getType(TypingContext.empty).getOrAny))
case _ => Seq.empty
}.headOption
}
private def extractPossibleProductParts(receiverType: ScType, place: PsiElement, isOneArgCaseClass: Boolean): Seq[ScType] = {
val res: ArrayBuffer[ScType] = new ArrayBuffer[ScType]()
@tailrec
def collect(i: Int) {
findMember(s"_$i", receiverType, place) match {
case Some(tp) if !isOneArgCaseClass =>
res += tp
collect(i + 1)
case _ =>
if (i == 1) res += receiverType
}
}
collect(1)
res.toSeq
}
def extractProductParts(tp: ScType, place: PsiElement): Seq[ScType] = {
extractPossibleProductParts(tp, place, isOneArgCaseClass = false)
}
def expectedNumberOfExtractorArguments(returnType: ScType, place: PsiElement, isOneArgCaseClass: Boolean): Int =
extractorParameters(returnType, place, isOneArgCaseClass).size
def extractorParameters(returnType: ScType, place: PsiElement, isOneArgCaseClass: Boolean): Seq[ScType] = {
def collectFor2_11: Seq[ScType] = {
findMember("isEmpty", returnType, place) match {
case Some(tp) if types.Boolean.equiv(tp) =>
case _ => return Seq.empty
}
val receiverType = findMember("get", returnType, place).getOrElse(return Seq.empty)
extractPossibleProductParts(receiverType, place, isOneArgCaseClass)
}
val level = place.languageLevel
if (level >= Scala_2_11) collectFor2_11
else {
returnType match {
case ScParameterizedType(des, args) =>
ScType.extractClass(des) match {
case Some(clazz) if clazz.qualifiedName == "scala.Option" ||
clazz.qualifiedName == "scala.Some" =>
if (args.length == 1) {
def checkProduct(tp: ScType): Seq[ScType] = {
val productChance = collectFor2_11
if (productChance.length <= 1) Seq(tp)
else {
val productFqn = "scala.Product" + productChance.length
(for {
productClass <- ScalaPsiManager.instance(place.getProject).getCachedClass(place.getResolveScope, productFqn)
clazz <- ScType.extractClass(tp, Some(place.getProject))
} yield clazz == productClass || clazz.isInheritor(productClass, true)).
filter(identity).fold(Seq(tp))(_ => productChance)
}
}
args.head match {
case tp if isOneArgCaseClass => Seq(tp)
case ScTupleType(comps) => comps
case tp => checkProduct(tp)
}
} else Seq.empty
case _ => Seq.empty
}
case _ => Seq.empty
}
}
}
} | JetBrains/intellij-scala-historical | src/org/jetbrains/plugins/scala/lang/psi/api/base/patterns/ScPattern.scala | Scala | apache-2.0 | 17,824 |
/* Copyright (C) 2008-2014 University of Massachusetts Amherst.
This file is part of "FACTORIE" (Factor graphs, Imperative, Extensible)
http://factorie.cs.umass.edu, http://github.com/factorie
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
package cc.factorie.util
/** New functionality on String instances, available by implicit conversion in the cc.factorie.factorie package object. */
class StringExtras(val s: String) extends AnyVal {
def toIntSafe: Option[Int] = try { Some(s.toInt) } catch { case _: Throwable => None }
def toDoubleSafe: Option[Double] = try { Some(s.toDouble) } catch { case _: Throwable => None }
def skipUntil(r:scala.util.matching.Regex): String = {
r.findFirstMatchIn(s) match {
case Some(m:scala.util.matching.Regex.Match) => s.substring(m.start)
case None => s
}
}
/** Return a new string that removes everything before a double newline.
Useful for skipping newsgroup headers or email headers in plain text documents. */
def skipHeader = skipUntil("\\n\\n".r)
/** Implements Levenshtein Distance, with specific operation costs to go from this String to String s2. */
def editDistance(s2: String, substCost: Int = 1, deleteCost: Int = 1, insertCost: Int = 1): Int = {
if (s.length == 0) s2.length
else if (s2.length == 0) s.length
else {
val d = Array.ofDim[Int](s.length + 1, s2.length + 1)
for (i <- 0 to s.length)
d(i)(0) = i * deleteCost
for (i <- 0 to s2.length)
d(0)(i) = i * insertCost
for (i <- 1 to s.length; j <- 1 to s2.length) {
val cost = if (s(i - 1) == s2(j - 1)) 0 else substCost
d(i)(j) = math.min(d(i - 1)(j) + deleteCost, math.min(d(i)(j - 1) + insertCost, d(i - 1)(j - 1) + cost))
}
d(s.length)(s2.length)
}
}
}
| hlin117/factorie | src/main/scala/cc/factorie/util/StringExtras.scala | Scala | apache-2.0 | 2,303 |
/**
* Copyright (c) 2012 Petr Kozelek <[email protected]>
*
* The full copyright and license information is presented
* in the file LICENSE that was distributed with this source code.
*/
package mql.gui.comparer
object Utilities {
def resource(name: String) = ClassLoader.getSystemClassLoader.getResource(name)
} | footcha/MQL | src/main/scala/mql/gui/comparer/Utilities.scala | Scala | bsd-3-clause | 325 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark
import org.apache.spark.api.r.RBackend
object SparkRBackend {
val backend : RBackend = new RBackend()
private var started = false;
private var portNumber = 0;
val backendThread : Thread = new Thread("SparkRBackend") {
override def run() {
backend.run()
}
}
def init() : Int = {
portNumber = backend.init()
portNumber
}
def start() : Unit = {
backendThread.start()
started = true
}
def close() : Unit = {
backend.close()
backendThread.join()
}
def isStarted() : Boolean = {
started
}
def port(): Int = {
return portNumber
}
}
| 1ambda/zeppelin | spark/src/main/scala/org/apache/spark/SparkRBackend.scala | Scala | apache-2.0 | 1,435 |
package models.submission
import
scala.concurrent.duration._
import
models.filemanager.FileManager
/**
* Created with IntelliJ IDEA.
* User: Jason
* Date: 11/1/12
* Time: 1:52 PM
*/
object SubmissionFileManager extends FileManager {
override def MyFolderName = "uploads"
override protected def LifeSpanOpt = None
override protected def SystemName = "SubmissionFiles"
def formatFilePath(fileNameBasis: String, bundle: TypeBundle): String =
s"$MyFolderName/${bundle.name}/$fileNameBasis.${bundle.fileExtension}"
def registerFile(contents: Array[Byte], fileNameBasis: String, bundle: TypeBundle): String = {
val filename = formatFilePath(fileNameBasis, bundle)
saveFile(contents, filename) dropWhile (_ != '/') drop 1 // Toss out the "assets/"
}
}
| NetLogo/SimServer | app/models/submission/SubmissionFileManager.scala | Scala | gpl-2.0 | 799 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, DataOutput, DataOutputStream, File,
FileOutputStream, PrintStream}
import java.lang.{Double => JDouble, Float => JFloat}
import java.net.{BindException, ServerSocket, URI}
import java.nio.{ByteBuffer, ByteOrder}
import java.nio.charset.StandardCharsets
import java.text.DecimalFormatSymbols
import java.util.Locale
import java.util.concurrent.TimeUnit
import java.util.zip.GZIPOutputStream
import scala.collection.mutable.ListBuffer
import scala.util.Random
import com.google.common.io.Files
import org.apache.commons.io.IOUtils
import org.apache.commons.lang3.SystemUtils
import org.apache.commons.math3.stat.inference.ChiSquareTest
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.spark.{SparkConf, SparkException, SparkFunSuite, TaskContext}
import org.apache.spark.internal.Logging
import org.apache.spark.network.util.ByteUnit
import org.apache.spark.scheduler.SparkListener
class UtilsSuite extends SparkFunSuite with ResetSystemProperties with Logging {
test("truncatedString") {
assert(Utils.truncatedString(Nil, "[", ", ", "]", 2) == "[]")
assert(Utils.truncatedString(Seq(1, 2), "[", ", ", "]", 2) == "[1, 2]")
assert(Utils.truncatedString(Seq(1, 2, 3), "[", ", ", "]", 2) == "[1, ... 2 more fields]")
assert(Utils.truncatedString(Seq(1, 2, 3), "[", ", ", "]", -5) == "[, ... 3 more fields]")
assert(Utils.truncatedString(Seq(1, 2, 3), ", ") == "1, 2, 3")
}
test("timeConversion") {
// Test -1
assert(Utils.timeStringAsSeconds("-1") === -1)
// Test zero
assert(Utils.timeStringAsSeconds("0") === 0)
assert(Utils.timeStringAsSeconds("1") === 1)
assert(Utils.timeStringAsSeconds("1s") === 1)
assert(Utils.timeStringAsSeconds("1000ms") === 1)
assert(Utils.timeStringAsSeconds("1000000us") === 1)
assert(Utils.timeStringAsSeconds("1m") === TimeUnit.MINUTES.toSeconds(1))
assert(Utils.timeStringAsSeconds("1min") === TimeUnit.MINUTES.toSeconds(1))
assert(Utils.timeStringAsSeconds("1h") === TimeUnit.HOURS.toSeconds(1))
assert(Utils.timeStringAsSeconds("1d") === TimeUnit.DAYS.toSeconds(1))
assert(Utils.timeStringAsMs("1") === 1)
assert(Utils.timeStringAsMs("1ms") === 1)
assert(Utils.timeStringAsMs("1000us") === 1)
assert(Utils.timeStringAsMs("1s") === TimeUnit.SECONDS.toMillis(1))
assert(Utils.timeStringAsMs("1m") === TimeUnit.MINUTES.toMillis(1))
assert(Utils.timeStringAsMs("1min") === TimeUnit.MINUTES.toMillis(1))
assert(Utils.timeStringAsMs("1h") === TimeUnit.HOURS.toMillis(1))
assert(Utils.timeStringAsMs("1d") === TimeUnit.DAYS.toMillis(1))
// Test invalid strings
intercept[NumberFormatException] {
Utils.timeStringAsMs("600l")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("This breaks 600s")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("This breaks 600ds")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("600s This breaks")
}
intercept[NumberFormatException] {
Utils.timeStringAsMs("This 123s breaks")
}
}
test("Test byteString conversion") {
// Test zero
assert(Utils.byteStringAsBytes("0") === 0)
assert(Utils.byteStringAsGb("1") === 1)
assert(Utils.byteStringAsGb("1g") === 1)
assert(Utils.byteStringAsGb("1023m") === 0)
assert(Utils.byteStringAsGb("1024m") === 1)
assert(Utils.byteStringAsGb("1048575k") === 0)
assert(Utils.byteStringAsGb("1048576k") === 1)
assert(Utils.byteStringAsGb("1k") === 0)
assert(Utils.byteStringAsGb("1t") === ByteUnit.TiB.toGiB(1))
assert(Utils.byteStringAsGb("1p") === ByteUnit.PiB.toGiB(1))
assert(Utils.byteStringAsMb("1") === 1)
assert(Utils.byteStringAsMb("1m") === 1)
assert(Utils.byteStringAsMb("1048575b") === 0)
assert(Utils.byteStringAsMb("1048576b") === 1)
assert(Utils.byteStringAsMb("1023k") === 0)
assert(Utils.byteStringAsMb("1024k") === 1)
assert(Utils.byteStringAsMb("3645k") === 3)
assert(Utils.byteStringAsMb("1024gb") === 1048576)
assert(Utils.byteStringAsMb("1g") === ByteUnit.GiB.toMiB(1))
assert(Utils.byteStringAsMb("1t") === ByteUnit.TiB.toMiB(1))
assert(Utils.byteStringAsMb("1p") === ByteUnit.PiB.toMiB(1))
assert(Utils.byteStringAsKb("1") === 1)
assert(Utils.byteStringAsKb("1k") === 1)
assert(Utils.byteStringAsKb("1m") === ByteUnit.MiB.toKiB(1))
assert(Utils.byteStringAsKb("1g") === ByteUnit.GiB.toKiB(1))
assert(Utils.byteStringAsKb("1t") === ByteUnit.TiB.toKiB(1))
assert(Utils.byteStringAsKb("1p") === ByteUnit.PiB.toKiB(1))
assert(Utils.byteStringAsBytes("1") === 1)
assert(Utils.byteStringAsBytes("1k") === ByteUnit.KiB.toBytes(1))
assert(Utils.byteStringAsBytes("1m") === ByteUnit.MiB.toBytes(1))
assert(Utils.byteStringAsBytes("1g") === ByteUnit.GiB.toBytes(1))
assert(Utils.byteStringAsBytes("1t") === ByteUnit.TiB.toBytes(1))
assert(Utils.byteStringAsBytes("1p") === ByteUnit.PiB.toBytes(1))
// Overflow handling, 1073741824p exceeds Long.MAX_VALUE if converted straight to Bytes
// This demonstrates that we can have e.g 1024^3 PB without overflowing.
assert(Utils.byteStringAsGb("1073741824p") === ByteUnit.PiB.toGiB(1073741824))
assert(Utils.byteStringAsMb("1073741824p") === ByteUnit.PiB.toMiB(1073741824))
// Run this to confirm it doesn't throw an exception
assert(Utils.byteStringAsBytes("9223372036854775807") === 9223372036854775807L)
assert(ByteUnit.PiB.toPiB(9223372036854775807L) === 9223372036854775807L)
// Test overflow exception
intercept[IllegalArgumentException] {
// This value exceeds Long.MAX when converted to bytes
Utils.byteStringAsBytes("9223372036854775808")
}
// Test overflow exception
intercept[IllegalArgumentException] {
// This value exceeds Long.MAX when converted to TB
ByteUnit.PiB.toTiB(9223372036854775807L)
}
// Test fractional string
intercept[NumberFormatException] {
Utils.byteStringAsMb("0.064")
}
// Test fractional string
intercept[NumberFormatException] {
Utils.byteStringAsMb("0.064m")
}
// Test invalid strings
intercept[NumberFormatException] {
Utils.byteStringAsBytes("500ub")
}
// Test invalid strings
intercept[NumberFormatException] {
Utils.byteStringAsBytes("This breaks 600b")
}
intercept[NumberFormatException] {
Utils.byteStringAsBytes("This breaks 600")
}
intercept[NumberFormatException] {
Utils.byteStringAsBytes("600gb This breaks")
}
intercept[NumberFormatException] {
Utils.byteStringAsBytes("This 123mb breaks")
}
}
test("bytesToString") {
assert(Utils.bytesToString(10) === "10.0 B")
assert(Utils.bytesToString(1500) === "1500.0 B")
assert(Utils.bytesToString(2000000) === "1953.1 KB")
assert(Utils.bytesToString(2097152) === "2.0 MB")
assert(Utils.bytesToString(2306867) === "2.2 MB")
assert(Utils.bytesToString(5368709120L) === "5.0 GB")
assert(Utils.bytesToString(5L * (1L << 40)) === "5.0 TB")
assert(Utils.bytesToString(5L * (1L << 50)) === "5.0 PB")
assert(Utils.bytesToString(5L * (1L << 60)) === "5.0 EB")
assert(Utils.bytesToString(BigInt(1L << 11) * (1L << 60)) === "2.36E+21 B")
}
test("copyStream") {
// input array initialization
val bytes = Array.ofDim[Byte](9000)
Random.nextBytes(bytes)
val os = new ByteArrayOutputStream()
Utils.copyStream(new ByteArrayInputStream(bytes), os)
assert(os.toByteArray.toList.equals(bytes.toList))
}
test("memoryStringToMb") {
assert(Utils.memoryStringToMb("1") === 0)
assert(Utils.memoryStringToMb("1048575") === 0)
assert(Utils.memoryStringToMb("3145728") === 3)
assert(Utils.memoryStringToMb("1024k") === 1)
assert(Utils.memoryStringToMb("5000k") === 4)
assert(Utils.memoryStringToMb("4024k") === Utils.memoryStringToMb("4024K"))
assert(Utils.memoryStringToMb("1024m") === 1024)
assert(Utils.memoryStringToMb("5000m") === 5000)
assert(Utils.memoryStringToMb("4024m") === Utils.memoryStringToMb("4024M"))
assert(Utils.memoryStringToMb("2g") === 2048)
assert(Utils.memoryStringToMb("3g") === Utils.memoryStringToMb("3G"))
assert(Utils.memoryStringToMb("2t") === 2097152)
assert(Utils.memoryStringToMb("3t") === Utils.memoryStringToMb("3T"))
}
test("splitCommandString") {
assert(Utils.splitCommandString("") === Seq())
assert(Utils.splitCommandString("a") === Seq("a"))
assert(Utils.splitCommandString("aaa") === Seq("aaa"))
assert(Utils.splitCommandString("a b c") === Seq("a", "b", "c"))
assert(Utils.splitCommandString(" a b\\t c ") === Seq("a", "b", "c"))
assert(Utils.splitCommandString("a 'b c'") === Seq("a", "b c"))
assert(Utils.splitCommandString("a 'b c' d") === Seq("a", "b c", "d"))
assert(Utils.splitCommandString("'b c'") === Seq("b c"))
assert(Utils.splitCommandString("a \\"b c\\"") === Seq("a", "b c"))
assert(Utils.splitCommandString("a \\"b c\\" d") === Seq("a", "b c", "d"))
assert(Utils.splitCommandString("\\"b c\\"") === Seq("b c"))
assert(Utils.splitCommandString("a 'b\\" c' \\"d' e\\"") === Seq("a", "b\\" c", "d' e"))
assert(Utils.splitCommandString("a\\t'b\\nc'\\nd") === Seq("a", "b\\nc", "d"))
assert(Utils.splitCommandString("a \\"b\\\\\\\\c\\"") === Seq("a", "b\\\\c"))
assert(Utils.splitCommandString("a \\"b\\\\\\"c\\"") === Seq("a", "b\\"c"))
assert(Utils.splitCommandString("a 'b\\\\\\"c'") === Seq("a", "b\\\\\\"c"))
assert(Utils.splitCommandString("'a'b") === Seq("ab"))
assert(Utils.splitCommandString("'a''b'") === Seq("ab"))
assert(Utils.splitCommandString("\\"a\\"b") === Seq("ab"))
assert(Utils.splitCommandString("\\"a\\"\\"b\\"") === Seq("ab"))
assert(Utils.splitCommandString("''") === Seq(""))
assert(Utils.splitCommandString("\\"\\"") === Seq(""))
}
test("string formatting of time durations") {
val second = 1000
val minute = second * 60
val hour = minute * 60
def str: (Long) => String = Utils.msDurationToString(_)
val sep = new DecimalFormatSymbols(Locale.US).getDecimalSeparator
assert(str(123) === "123 ms")
assert(str(second) === "1" + sep + "0 s")
assert(str(second + 462) === "1" + sep + "5 s")
assert(str(hour) === "1" + sep + "00 h")
assert(str(minute) === "1" + sep + "0 m")
assert(str(minute + 4 * second + 34) === "1" + sep + "1 m")
assert(str(10 * hour + minute + 4 * second) === "10" + sep + "02 h")
assert(str(10 * hour + 59 * minute + 59 * second + 999) === "11" + sep + "00 h")
}
def getSuffix(isCompressed: Boolean): String = {
if (isCompressed) {
".gz"
} else {
""
}
}
def writeLogFile(path: String, content: Array[Byte]): Unit = {
val outputStream = if (path.endsWith(".gz")) {
new GZIPOutputStream(new FileOutputStream(path))
} else {
new FileOutputStream(path)
}
IOUtils.write(content, outputStream)
outputStream.close()
content.size
}
private val workerConf = new SparkConf()
def testOffsetBytes(isCompressed: Boolean): Unit = {
val tmpDir2 = Utils.createTempDir()
val suffix = getSuffix(isCompressed)
val f1Path = tmpDir2 + "/f1" + suffix
writeLogFile(f1Path, "1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n".getBytes(StandardCharsets.UTF_8))
val f1Length = Utils.getFileLength(new File(f1Path), workerConf)
// Read first few bytes
assert(Utils.offsetBytes(f1Path, f1Length, 0, 5) === "1\\n2\\n3")
// Read some middle bytes
assert(Utils.offsetBytes(f1Path, f1Length, 4, 11) === "3\\n4\\n5\\n6")
// Read last few bytes
assert(Utils.offsetBytes(f1Path, f1Length, 12, 18) === "7\\n8\\n9\\n")
// Read some nonexistent bytes in the beginning
assert(Utils.offsetBytes(f1Path, f1Length, -5, 5) === "1\\n2\\n3")
// Read some nonexistent bytes at the end
assert(Utils.offsetBytes(f1Path, f1Length, 12, 22) === "7\\n8\\n9\\n")
// Read some nonexistent bytes on both ends
assert(Utils.offsetBytes(f1Path, f1Length, -3, 25) === "1\\n2\\n3\\n4\\n5\\n6\\n7\\n8\\n9\\n")
Utils.deleteRecursively(tmpDir2)
}
test("reading offset bytes of a file") {
testOffsetBytes(isCompressed = false)
}
test("reading offset bytes of a file (compressed)") {
testOffsetBytes(isCompressed = true)
}
def testOffsetBytesMultipleFiles(isCompressed: Boolean): Unit = {
val tmpDir = Utils.createTempDir()
val suffix = getSuffix(isCompressed)
val files = (1 to 3).map(i => new File(tmpDir, i.toString + suffix)) :+ new File(tmpDir, "4")
writeLogFile(files(0).getAbsolutePath, "0123456789".getBytes(StandardCharsets.UTF_8))
writeLogFile(files(1).getAbsolutePath, "abcdefghij".getBytes(StandardCharsets.UTF_8))
writeLogFile(files(2).getAbsolutePath, "ABCDEFGHIJ".getBytes(StandardCharsets.UTF_8))
writeLogFile(files(3).getAbsolutePath, "9876543210".getBytes(StandardCharsets.UTF_8))
val fileLengths = files.map(Utils.getFileLength(_, workerConf))
// Read first few bytes in the 1st file
assert(Utils.offsetBytes(files, fileLengths, 0, 5) === "01234")
// Read bytes within the 1st file
assert(Utils.offsetBytes(files, fileLengths, 5, 8) === "567")
// Read bytes across 1st and 2nd file
assert(Utils.offsetBytes(files, fileLengths, 8, 18) === "89abcdefgh")
// Read bytes across 1st, 2nd and 3rd file
assert(Utils.offsetBytes(files, fileLengths, 5, 24) === "56789abcdefghijABCD")
// Read bytes across 3rd and 4th file
assert(Utils.offsetBytes(files, fileLengths, 25, 35) === "FGHIJ98765")
// Read some nonexistent bytes in the beginning
assert(Utils.offsetBytes(files, fileLengths, -5, 18) === "0123456789abcdefgh")
// Read some nonexistent bytes at the end
assert(Utils.offsetBytes(files, fileLengths, 18, 45) === "ijABCDEFGHIJ9876543210")
// Read some nonexistent bytes on both ends
assert(Utils.offsetBytes(files, fileLengths, -5, 45) ===
"0123456789abcdefghijABCDEFGHIJ9876543210")
Utils.deleteRecursively(tmpDir)
}
test("reading offset bytes across multiple files") {
testOffsetBytesMultipleFiles(isCompressed = false)
}
test("reading offset bytes across multiple files (compressed)") {
testOffsetBytesMultipleFiles(isCompressed = true)
}
test("deserialize long value") {
val testval : Long = 9730889947L
val bbuf = ByteBuffer.allocate(8)
assert(bbuf.hasArray)
bbuf.order(ByteOrder.BIG_ENDIAN)
bbuf.putLong(testval)
assert(bbuf.array.length === 8)
assert(Utils.deserializeLongValue(bbuf.array) === testval)
}
test("writeByteBuffer should not change ByteBuffer position") {
// Test a buffer with an underlying array, for both writeByteBuffer methods.
val testBuffer = ByteBuffer.wrap(Array[Byte](1, 2, 3, 4))
assert(testBuffer.hasArray)
val bytesOut = new ByteBufferOutputStream(4096)
Utils.writeByteBuffer(testBuffer, bytesOut)
assert(testBuffer.position() === 0)
val dataOut = new DataOutputStream(bytesOut)
Utils.writeByteBuffer(testBuffer, dataOut: DataOutput)
assert(testBuffer.position() === 0)
// Test a buffer without an underlying array, for both writeByteBuffer methods.
val testDirectBuffer = ByteBuffer.allocateDirect(8)
assert(!testDirectBuffer.hasArray())
Utils.writeByteBuffer(testDirectBuffer, bytesOut)
assert(testDirectBuffer.position() === 0)
Utils.writeByteBuffer(testDirectBuffer, dataOut: DataOutput)
assert(testDirectBuffer.position() === 0)
}
test("get iterator size") {
val empty = Seq[Int]()
assert(Utils.getIteratorSize(empty.toIterator) === 0L)
val iterator = Iterator.range(0, 5)
assert(Utils.getIteratorSize(iterator) === 5L)
}
test("getIteratorZipWithIndex") {
val iterator = Utils.getIteratorZipWithIndex(Iterator(0, 1, 2), -1L + Int.MaxValue)
assert(iterator.toArray === Array(
(0, -1L + Int.MaxValue), (1, 0L + Int.MaxValue), (2, 1L + Int.MaxValue)
))
intercept[IllegalArgumentException] {
Utils.getIteratorZipWithIndex(Iterator(0, 1, 2), -1L)
}
}
test("doesDirectoryContainFilesNewerThan") {
// create some temporary directories and files
val parent: File = Utils.createTempDir()
// The parent directory has two child directories
val child1: File = Utils.createTempDir(parent.getCanonicalPath)
val child2: File = Utils.createTempDir(parent.getCanonicalPath)
val child3: File = Utils.createTempDir(child1.getCanonicalPath)
// set the last modified time of child1 to 30 secs old
child1.setLastModified(System.currentTimeMillis() - (1000 * 30))
// although child1 is old, child2 is still new so return true
assert(Utils.doesDirectoryContainAnyNewFiles(parent, 5))
child2.setLastModified(System.currentTimeMillis - (1000 * 30))
assert(Utils.doesDirectoryContainAnyNewFiles(parent, 5))
parent.setLastModified(System.currentTimeMillis - (1000 * 30))
// although parent and its immediate children are new, child3 is still old
// we expect a full recursive search for new files.
assert(Utils.doesDirectoryContainAnyNewFiles(parent, 5))
child3.setLastModified(System.currentTimeMillis - (1000 * 30))
assert(!Utils.doesDirectoryContainAnyNewFiles(parent, 5))
}
test("resolveURI") {
def assertResolves(before: String, after: String): Unit = {
// This should test only single paths
assert(before.split(",").length === 1)
def resolve(uri: String): String = Utils.resolveURI(uri).toString
assert(resolve(before) === after)
assert(resolve(after) === after)
// Repeated invocations of resolveURI should yield the same result
assert(resolve(resolve(after)) === after)
assert(resolve(resolve(resolve(after))) === after)
}
val rawCwd = System.getProperty("user.dir")
val cwd = if (Utils.isWindows) s"/$rawCwd".replace("\\\\", "/") else rawCwd
assertResolves("hdfs:/root/spark.jar", "hdfs:/root/spark.jar")
assertResolves("hdfs:///root/spark.jar#app.jar", "hdfs:///root/spark.jar#app.jar")
assertResolves("spark.jar", s"file:$cwd/spark.jar")
assertResolves("spark.jar#app.jar", s"file:$cwd/spark.jar#app.jar")
assertResolves("path to/file.txt", s"file:$cwd/path%20to/file.txt")
if (Utils.isWindows) {
assertResolves("C:\\\\path\\\\to\\\\file.txt", "file:/C:/path/to/file.txt")
assertResolves("C:\\\\path to\\\\file.txt", "file:/C:/path%20to/file.txt")
}
assertResolves("file:/C:/path/to/file.txt", "file:/C:/path/to/file.txt")
assertResolves("file:///C:/path/to/file.txt", "file:///C:/path/to/file.txt")
assertResolves("file:/C:/file.txt#alias.txt", "file:/C:/file.txt#alias.txt")
assertResolves("file:foo", "file:foo")
assertResolves("file:foo:baby", "file:foo:baby")
}
test("resolveURIs with multiple paths") {
def assertResolves(before: String, after: String): Unit = {
def resolve(uri: String): String = Utils.resolveURIs(uri)
assert(resolve(before) === after)
assert(resolve(after) === after)
// Repeated invocations of resolveURIs should yield the same result
assert(resolve(resolve(after)) === after)
assert(resolve(resolve(resolve(after))) === after)
}
val rawCwd = System.getProperty("user.dir")
val cwd = if (Utils.isWindows) s"/$rawCwd".replace("\\\\", "/") else rawCwd
assertResolves("jar1,jar2", s"file:$cwd/jar1,file:$cwd/jar2")
assertResolves("file:/jar1,file:/jar2", "file:/jar1,file:/jar2")
assertResolves("hdfs:/jar1,file:/jar2,jar3", s"hdfs:/jar1,file:/jar2,file:$cwd/jar3")
assertResolves("hdfs:/jar1,file:/jar2,jar3,jar4#jar5,path to/jar6",
s"hdfs:/jar1,file:/jar2,file:$cwd/jar3,file:$cwd/jar4#jar5,file:$cwd/path%20to/jar6")
if (Utils.isWindows) {
assertResolves("""hdfs:/jar1,file:/jar2,jar3,C:\\pi.py#py.pi,C:\\path to\\jar4""",
s"hdfs:/jar1,file:/jar2,file:$cwd/jar3,file:/C:/pi.py%23py.pi,file:/C:/path%20to/jar4")
}
assertResolves(",jar1,jar2", s"file:$cwd/jar1,file:$cwd/jar2")
// Also test resolveURIs with single paths
assertResolves("hdfs:/root/spark.jar", "hdfs:/root/spark.jar")
}
test("nonLocalPaths") {
assert(Utils.nonLocalPaths("spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("file:/spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("file:///spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("local:/spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("local:///spark.jar") === Array.empty)
assert(Utils.nonLocalPaths("hdfs:/spark.jar") === Array("hdfs:/spark.jar"))
assert(Utils.nonLocalPaths("hdfs:///spark.jar") === Array("hdfs:///spark.jar"))
assert(Utils.nonLocalPaths("file:/spark.jar,local:/smart.jar,family.py") === Array.empty)
assert(Utils.nonLocalPaths("local:/spark.jar,file:/smart.jar,family.py") === Array.empty)
assert(Utils.nonLocalPaths("hdfs:/spark.jar,s3:/smart.jar") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
assert(Utils.nonLocalPaths("hdfs:/spark.jar,path to/a.jar,s3:/smart.jar") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
assert(Utils.nonLocalPaths("hdfs:/spark.jar,s3:/smart.jar,local.py,file:/hello/pi.py") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
assert(Utils.nonLocalPaths("local.py,hdfs:/spark.jar,file:/hello/pi.py,s3:/smart.jar") ===
Array("hdfs:/spark.jar", "s3:/smart.jar"))
// Test Windows paths
assert(Utils.nonLocalPaths("C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("file:/C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("file:///C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("local:/C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("local:///C:/some/path.jar", testWindows = true) === Array.empty)
assert(Utils.nonLocalPaths("hdfs:/a.jar,C:/my.jar,s3:/another.jar", testWindows = true) ===
Array("hdfs:/a.jar", "s3:/another.jar"))
assert(Utils.nonLocalPaths("D:/your.jar,hdfs:/a.jar,s3:/another.jar", testWindows = true) ===
Array("hdfs:/a.jar", "s3:/another.jar"))
assert(Utils.nonLocalPaths("hdfs:/a.jar,s3:/another.jar,e:/our.jar", testWindows = true) ===
Array("hdfs:/a.jar", "s3:/another.jar"))
}
test("isBindCollision") {
// Negatives
assert(!Utils.isBindCollision(null))
assert(!Utils.isBindCollision(new Exception))
assert(!Utils.isBindCollision(new Exception(new Exception)))
assert(!Utils.isBindCollision(new Exception(new BindException)))
// Positives
val be = new BindException("Random Message")
val be1 = new Exception(new BindException("Random Message"))
val be2 = new Exception(new Exception(new BindException("Random Message")))
assert(Utils.isBindCollision(be))
assert(Utils.isBindCollision(be1))
assert(Utils.isBindCollision(be2))
// Actual bind exception
var server1: ServerSocket = null
var server2: ServerSocket = null
try {
server1 = new java.net.ServerSocket(0)
server2 = new java.net.ServerSocket(server1.getLocalPort)
} catch {
case e: Exception =>
assert(e.isInstanceOf[java.net.BindException])
assert(Utils.isBindCollision(e))
} finally {
Option(server1).foreach(_.close())
Option(server2).foreach(_.close())
}
}
// Test for using the util function to change our log levels.
test("log4j log level change") {
val current = org.apache.log4j.Logger.getRootLogger().getLevel()
try {
Utils.setLogLevel(org.apache.log4j.Level.ALL)
assert(log.isInfoEnabled())
Utils.setLogLevel(org.apache.log4j.Level.ERROR)
assert(!log.isInfoEnabled())
assert(log.isErrorEnabled())
} finally {
// Best effort at undoing changes this test made.
Utils.setLogLevel(current)
}
}
test("deleteRecursively") {
val tempDir1 = Utils.createTempDir()
assert(tempDir1.exists())
Utils.deleteRecursively(tempDir1)
assert(!tempDir1.exists())
val tempDir2 = Utils.createTempDir()
val sourceFile1 = new File(tempDir2, "foo.txt")
Files.touch(sourceFile1)
assert(sourceFile1.exists())
Utils.deleteRecursively(sourceFile1)
assert(!sourceFile1.exists())
val tempDir3 = new File(tempDir2, "subdir")
assert(tempDir3.mkdir())
val sourceFile2 = new File(tempDir3, "bar.txt")
Files.touch(sourceFile2)
assert(sourceFile2.exists())
Utils.deleteRecursively(tempDir2)
assert(!tempDir2.exists())
assert(!tempDir3.exists())
assert(!sourceFile2.exists())
}
test("loading properties from file") {
val tmpDir = Utils.createTempDir()
val outFile = File.createTempFile("test-load-spark-properties", "test", tmpDir)
try {
System.setProperty("spark.test.fileNameLoadB", "2")
Files.write("spark.test.fileNameLoadA true\\n" +
"spark.test.fileNameLoadB 1\\n", outFile, StandardCharsets.UTF_8)
val properties = Utils.getPropertiesFromFile(outFile.getAbsolutePath)
properties
.filter { case (k, v) => k.startsWith("spark.")}
.foreach { case (k, v) => sys.props.getOrElseUpdate(k, v)}
val sparkConf = new SparkConf
assert(sparkConf.getBoolean("spark.test.fileNameLoadA", false) === true)
assert(sparkConf.getInt("spark.test.fileNameLoadB", 1) === 2)
} finally {
Utils.deleteRecursively(tmpDir)
}
}
test("timeIt with prepare") {
var cnt = 0
val prepare = () => {
cnt += 1
Thread.sleep(1000)
}
val time = Utils.timeIt(2)({}, Some(prepare))
require(cnt === 2, "prepare should be called twice")
require(time < 500, "preparation time should not count")
}
test("fetch hcfs dir") {
val tempDir = Utils.createTempDir()
val sourceDir = new File(tempDir, "source-dir")
sourceDir.mkdir()
val innerSourceDir = Utils.createTempDir(root = sourceDir.getPath)
val sourceFile = File.createTempFile("someprefix", "somesuffix", innerSourceDir)
val targetDir = new File(tempDir, "target-dir")
Files.write("some text", sourceFile, StandardCharsets.UTF_8)
val path =
if (Utils.isWindows) {
new Path("file:/" + sourceDir.getAbsolutePath.replace("\\\\", "/"))
} else {
new Path("file://" + sourceDir.getAbsolutePath)
}
val conf = new Configuration()
val fs = Utils.getHadoopFileSystem(path.toString, conf)
assert(!targetDir.isDirectory())
Utils.fetchHcfsFile(path, targetDir, fs, new SparkConf(), conf, false)
assert(targetDir.isDirectory())
// Copy again to make sure it doesn't error if the dir already exists.
Utils.fetchHcfsFile(path, targetDir, fs, new SparkConf(), conf, false)
val destDir = new File(targetDir, sourceDir.getName())
assert(destDir.isDirectory())
val destInnerDir = new File(destDir, innerSourceDir.getName)
assert(destInnerDir.isDirectory())
val destInnerFile = new File(destInnerDir, sourceFile.getName)
assert(destInnerFile.isFile())
val filePath =
if (Utils.isWindows) {
new Path("file:/" + sourceFile.getAbsolutePath.replace("\\\\", "/"))
} else {
new Path("file://" + sourceFile.getAbsolutePath)
}
val testFileDir = new File(tempDir, "test-filename")
val testFileName = "testFName"
val testFilefs = Utils.getHadoopFileSystem(filePath.toString, conf)
Utils.fetchHcfsFile(filePath, testFileDir, testFilefs, new SparkConf(),
conf, false, Some(testFileName))
val newFileName = new File(testFileDir, testFileName)
assert(newFileName.isFile())
}
test("shutdown hook manager") {
val manager = new SparkShutdownHookManager()
val output = new ListBuffer[Int]()
val hook1 = manager.add(1, () => output += 1)
manager.add(3, () => output += 3)
manager.add(2, () => output += 2)
manager.add(4, () => output += 4)
manager.remove(hook1)
manager.runAll()
assert(output.toList === List(4, 3, 2))
}
test("isInDirectory") {
val tmpDir = new File(sys.props("java.io.tmpdir"))
val parentDir = new File(tmpDir, "parent-dir")
val childDir1 = new File(parentDir, "child-dir-1")
val childDir1b = new File(parentDir, "child-dir-1b")
val childFile1 = new File(parentDir, "child-file-1.txt")
val childDir2 = new File(childDir1, "child-dir-2")
val childDir2b = new File(childDir1, "child-dir-2b")
val childFile2 = new File(childDir1, "child-file-2.txt")
val childFile3 = new File(childDir2, "child-file-3.txt")
val nullFile: File = null
parentDir.mkdir()
childDir1.mkdir()
childDir1b.mkdir()
childDir2.mkdir()
childDir2b.mkdir()
childFile1.createNewFile()
childFile2.createNewFile()
childFile3.createNewFile()
// Identity
assert(Utils.isInDirectory(parentDir, parentDir))
assert(Utils.isInDirectory(childDir1, childDir1))
assert(Utils.isInDirectory(childDir2, childDir2))
// Valid ancestor-descendant pairs
assert(Utils.isInDirectory(parentDir, childDir1))
assert(Utils.isInDirectory(parentDir, childFile1))
assert(Utils.isInDirectory(parentDir, childDir2))
assert(Utils.isInDirectory(parentDir, childFile2))
assert(Utils.isInDirectory(parentDir, childFile3))
assert(Utils.isInDirectory(childDir1, childDir2))
assert(Utils.isInDirectory(childDir1, childFile2))
assert(Utils.isInDirectory(childDir1, childFile3))
assert(Utils.isInDirectory(childDir2, childFile3))
// Inverted ancestor-descendant pairs should fail
assert(!Utils.isInDirectory(childDir1, parentDir))
assert(!Utils.isInDirectory(childDir2, parentDir))
assert(!Utils.isInDirectory(childDir2, childDir1))
assert(!Utils.isInDirectory(childFile1, parentDir))
assert(!Utils.isInDirectory(childFile2, parentDir))
assert(!Utils.isInDirectory(childFile3, parentDir))
assert(!Utils.isInDirectory(childFile2, childDir1))
assert(!Utils.isInDirectory(childFile3, childDir1))
assert(!Utils.isInDirectory(childFile3, childDir2))
// Non-existent files or directories should fail
assert(!Utils.isInDirectory(parentDir, new File(parentDir, "one.txt")))
assert(!Utils.isInDirectory(parentDir, new File(parentDir, "one/two.txt")))
assert(!Utils.isInDirectory(parentDir, new File(parentDir, "one/two/three.txt")))
// Siblings should fail
assert(!Utils.isInDirectory(childDir1, childDir1b))
assert(!Utils.isInDirectory(childDir1, childFile1))
assert(!Utils.isInDirectory(childDir2, childDir2b))
assert(!Utils.isInDirectory(childDir2, childFile2))
// Null files should fail without throwing NPE
assert(!Utils.isInDirectory(parentDir, nullFile))
assert(!Utils.isInDirectory(childFile3, nullFile))
assert(!Utils.isInDirectory(nullFile, parentDir))
assert(!Utils.isInDirectory(nullFile, childFile3))
}
test("circular buffer: if nothing was written to the buffer, display nothing") {
val buffer = new CircularBuffer(4)
assert(buffer.toString === "")
}
test("circular buffer: if the buffer isn't full, print only the contents written") {
val buffer = new CircularBuffer(10)
val stream = new PrintStream(buffer, true, "UTF-8")
stream.print("test")
assert(buffer.toString === "test")
}
test("circular buffer: data written == size of the buffer") {
val buffer = new CircularBuffer(4)
val stream = new PrintStream(buffer, true, "UTF-8")
// fill the buffer to its exact size so that it just hits overflow
stream.print("test")
assert(buffer.toString === "test")
// add more data to the buffer
stream.print("12")
assert(buffer.toString === "st12")
}
test("circular buffer: multiple overflow") {
val buffer = new CircularBuffer(25)
val stream = new PrintStream(buffer, true, "UTF-8")
stream.print("test circular test circular test circular test circular test circular")
assert(buffer.toString === "st circular test circular")
}
test("nanSafeCompareDoubles") {
def shouldMatchDefaultOrder(a: Double, b: Double): Unit = {
assert(Utils.nanSafeCompareDoubles(a, b) === JDouble.compare(a, b))
assert(Utils.nanSafeCompareDoubles(b, a) === JDouble.compare(b, a))
}
shouldMatchDefaultOrder(0d, 0d)
shouldMatchDefaultOrder(0d, 1d)
shouldMatchDefaultOrder(Double.MinValue, Double.MaxValue)
assert(Utils.nanSafeCompareDoubles(Double.NaN, Double.NaN) === 0)
assert(Utils.nanSafeCompareDoubles(Double.NaN, Double.PositiveInfinity) === 1)
assert(Utils.nanSafeCompareDoubles(Double.NaN, Double.NegativeInfinity) === 1)
assert(Utils.nanSafeCompareDoubles(Double.PositiveInfinity, Double.NaN) === -1)
assert(Utils.nanSafeCompareDoubles(Double.NegativeInfinity, Double.NaN) === -1)
}
test("nanSafeCompareFloats") {
def shouldMatchDefaultOrder(a: Float, b: Float): Unit = {
assert(Utils.nanSafeCompareFloats(a, b) === JFloat.compare(a, b))
assert(Utils.nanSafeCompareFloats(b, a) === JFloat.compare(b, a))
}
shouldMatchDefaultOrder(0f, 0f)
shouldMatchDefaultOrder(1f, 1f)
shouldMatchDefaultOrder(Float.MinValue, Float.MaxValue)
assert(Utils.nanSafeCompareFloats(Float.NaN, Float.NaN) === 0)
assert(Utils.nanSafeCompareFloats(Float.NaN, Float.PositiveInfinity) === 1)
assert(Utils.nanSafeCompareFloats(Float.NaN, Float.NegativeInfinity) === 1)
assert(Utils.nanSafeCompareFloats(Float.PositiveInfinity, Float.NaN) === -1)
assert(Utils.nanSafeCompareFloats(Float.NegativeInfinity, Float.NaN) === -1)
}
test("isDynamicAllocationEnabled") {
val conf = new SparkConf()
conf.set("spark.master", "yarn")
conf.set("spark.submit.deployMode", "client")
assert(Utils.isDynamicAllocationEnabled(conf) === false)
assert(Utils.isDynamicAllocationEnabled(
conf.set("spark.dynamicAllocation.enabled", "false")) === false)
assert(Utils.isDynamicAllocationEnabled(
conf.set("spark.dynamicAllocation.enabled", "true")) === true)
assert(Utils.isDynamicAllocationEnabled(
conf.set("spark.executor.instances", "1")) === true)
assert(Utils.isDynamicAllocationEnabled(
conf.set("spark.executor.instances", "0")) === true)
assert(Utils.isDynamicAllocationEnabled(conf.set("spark.master", "local")) === false)
assert(Utils.isDynamicAllocationEnabled(conf.set("spark.dynamicAllocation.testing", "true")))
}
test("getDynamicAllocationInitialExecutors") {
val conf = new SparkConf()
assert(Utils.getDynamicAllocationInitialExecutors(conf) === 0)
assert(Utils.getDynamicAllocationInitialExecutors(
conf.set("spark.dynamicAllocation.minExecutors", "3")) === 3)
assert(Utils.getDynamicAllocationInitialExecutors( // should use minExecutors
conf.set("spark.executor.instances", "2")) === 3)
assert(Utils.getDynamicAllocationInitialExecutors( // should use executor.instances
conf.set("spark.executor.instances", "4")) === 4)
assert(Utils.getDynamicAllocationInitialExecutors( // should use executor.instances
conf.set("spark.dynamicAllocation.initialExecutors", "3")) === 4)
assert(Utils.getDynamicAllocationInitialExecutors( // should use initialExecutors
conf.set("spark.dynamicAllocation.initialExecutors", "5")) === 5)
assert(Utils.getDynamicAllocationInitialExecutors( // should use minExecutors
conf.set("spark.dynamicAllocation.initialExecutors", "2")
.set("spark.executor.instances", "1")) === 3)
}
test("Set Spark CallerContext") {
val context = "test"
new CallerContext(context).setCurrentContext()
if (CallerContext.callerContextSupported) {
val callerContext = Utils.classForName("org.apache.hadoop.ipc.CallerContext")
assert(s"SPARK_$context" ===
callerContext.getMethod("getCurrent").invoke(null).toString)
}
}
test("encodeFileNameToURIRawPath") {
assert(Utils.encodeFileNameToURIRawPath("abc") === "abc")
assert(Utils.encodeFileNameToURIRawPath("abc xyz") === "abc%20xyz")
assert(Utils.encodeFileNameToURIRawPath("abc:xyz") === "abc:xyz")
}
test("decodeFileNameInURI") {
assert(Utils.decodeFileNameInURI(new URI("files:///abc/xyz")) === "xyz")
assert(Utils.decodeFileNameInURI(new URI("files:///abc")) === "abc")
assert(Utils.decodeFileNameInURI(new URI("files:///abc%20xyz")) === "abc xyz")
}
test("Kill process") {
// Verify that we can terminate a process even if it is in a bad state. This is only run
// on UNIX since it does some OS specific things to verify the correct behavior.
if (SystemUtils.IS_OS_UNIX) {
def getPid(p: Process): Int = {
val f = p.getClass().getDeclaredField("pid")
f.setAccessible(true)
f.get(p).asInstanceOf[Int]
}
def pidExists(pid: Int): Boolean = {
val p = Runtime.getRuntime.exec(s"kill -0 $pid")
p.waitFor()
p.exitValue() == 0
}
def signal(pid: Int, s: String): Unit = {
val p = Runtime.getRuntime.exec(s"kill -$s $pid")
p.waitFor()
}
// Start up a process that runs 'sleep 10'. Terminate the process and assert it takes
// less time and the process is no longer there.
val startTimeMs = System.currentTimeMillis()
val process = new ProcessBuilder("sleep", "10").start()
val pid = getPid(process)
try {
assert(pidExists(pid))
val terminated = Utils.terminateProcess(process, 5000)
assert(terminated.isDefined)
process.waitFor(5, TimeUnit.SECONDS)
val durationMs = System.currentTimeMillis() - startTimeMs
assert(durationMs < 5000)
assert(!pidExists(pid))
} finally {
// Forcibly kill the test process just in case.
signal(pid, "SIGKILL")
}
val versionParts = System.getProperty("java.version").split("[+.\\\\-]+", 3)
var majorVersion = versionParts(0).toInt
if (majorVersion == 1) majorVersion = versionParts(1).toInt
if (majorVersion >= 8) {
// We'll make sure that forcibly terminating a process works by
// creating a very misbehaving process. It ignores SIGTERM and has been SIGSTOPed. On
// older versions of java, this will *not* terminate.
val file = File.createTempFile("temp-file-name", ".tmp")
file.deleteOnExit()
val cmd =
s"""
|#!/bin/bash
|trap "" SIGTERM
|sleep 10
""".stripMargin
Files.write(cmd.getBytes(StandardCharsets.UTF_8), file)
file.getAbsoluteFile.setExecutable(true)
val process = new ProcessBuilder(file.getAbsolutePath).start()
val pid = getPid(process)
assert(pidExists(pid))
try {
signal(pid, "SIGSTOP")
val start = System.currentTimeMillis()
val terminated = Utils.terminateProcess(process, 5000)
assert(terminated.isDefined)
process.waitFor(5, TimeUnit.SECONDS)
val duration = System.currentTimeMillis() - start
assert(duration < 6000) // add a little extra time to allow a force kill to finish
assert(!pidExists(pid))
} finally {
signal(pid, "SIGKILL")
}
}
}
}
test("chi square test of randomizeInPlace") {
// Parameters
val arraySize = 10
val numTrials = 1000
val threshold = 0.05
val seed = 1L
// results(i)(j): how many times Utils.randomize moves an element from position j to position i
val results = Array.ofDim[Long](arraySize, arraySize)
// This must be seeded because even a fair random process will fail this test with
// probability equal to the value of `threshold`, which is inconvenient for a unit test.
val rand = new java.util.Random(seed)
val range = 0 until arraySize
for {
_ <- 0 until numTrials
trial = Utils.randomizeInPlace(range.toArray, rand)
i <- range
} results(i)(trial(i)) += 1L
val chi = new ChiSquareTest()
// We expect an even distribution; this array will be rescaled by `chiSquareTest`
val expected = Array.fill(arraySize * arraySize)(1.0)
val observed = results.flatten
// Performs Pearson's chi-squared test. Using the sum-of-squares as the test statistic, gives
// the probability of a uniform distribution producing results as extreme as `observed`
val pValue = chi.chiSquareTest(expected, observed)
assert(pValue > threshold)
}
test("redact sensitive information") {
val sparkConf = new SparkConf
// Set some secret keys
val secretKeys = Seq(
"spark.executorEnv.HADOOP_CREDSTORE_PASSWORD",
"spark.my.password",
"spark.my.sECreT")
secretKeys.foreach { key => sparkConf.set(key, "sensitive_value") }
// Set a non-secret key
sparkConf.set("spark.regular.property", "regular_value")
// Set a property with a regular key but secret in the value
sparkConf.set("spark.sensitive.property", "has_secret_in_value")
// Redact sensitive information
val redactedConf = Utils.redact(sparkConf, sparkConf.getAll).toMap
// Assert that secret information got redacted while the regular property remained the same
secretKeys.foreach { key => assert(redactedConf(key) === Utils.REDACTION_REPLACEMENT_TEXT) }
assert(redactedConf("spark.regular.property") === "regular_value")
assert(redactedConf("spark.sensitive.property") === Utils.REDACTION_REPLACEMENT_TEXT)
}
test("tryWithSafeFinally") {
var e = new Error("Block0")
val finallyBlockError = new Error("Finally Block")
var isErrorOccurred = false
// if the try and finally blocks throw different exception instances
try {
Utils.tryWithSafeFinally { throw e }(finallyBlock = { throw finallyBlockError })
} catch {
case t: Error =>
assert(t.getSuppressed.head == finallyBlockError)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try and finally blocks throw the same exception instance then it should not
// try to add to suppressed and get IllegalArgumentException
e = new Error("Block1")
isErrorOccurred = false
try {
Utils.tryWithSafeFinally { throw e }(finallyBlock = { throw e })
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try throws the exception and finally doesn't throw exception
e = new Error("Block2")
isErrorOccurred = false
try {
Utils.tryWithSafeFinally { throw e }(finallyBlock = {})
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try and finally block don't throw exception
Utils.tryWithSafeFinally {}(finallyBlock = {})
}
test("tryWithSafeFinallyAndFailureCallbacks") {
var e = new Error("Block0")
val catchBlockError = new Error("Catch Block")
val finallyBlockError = new Error("Finally Block")
var isErrorOccurred = false
TaskContext.setTaskContext(TaskContext.empty())
// if the try, catch and finally blocks throw different exception instances
try {
Utils.tryWithSafeFinallyAndFailureCallbacks { throw e }(
catchBlock = { throw catchBlockError }, finallyBlock = { throw finallyBlockError })
} catch {
case t: Error =>
assert(t.getSuppressed.head == catchBlockError)
assert(t.getSuppressed.last == finallyBlockError)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try, catch and finally blocks throw the same exception instance then it should not
// try to add to suppressed and get IllegalArgumentException
e = new Error("Block1")
isErrorOccurred = false
try {
Utils.tryWithSafeFinallyAndFailureCallbacks { throw e }(catchBlock = { throw e },
finallyBlock = { throw e })
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try throws the exception, catch and finally don't throw exceptions
e = new Error("Block2")
isErrorOccurred = false
try {
Utils.tryWithSafeFinallyAndFailureCallbacks { throw e }(catchBlock = {}, finallyBlock = {})
} catch {
case t: Error =>
assert(t.getSuppressed.length == 0)
isErrorOccurred = true
}
assert(isErrorOccurred)
// if the try, catch and finally blocks don't throw exceptions
Utils.tryWithSafeFinallyAndFailureCallbacks {}(catchBlock = {}, finallyBlock = {})
TaskContext.unset
}
test("load extensions") {
val extensions = Seq(
classOf[SimpleExtension],
classOf[ExtensionWithConf],
classOf[UnregisterableExtension]).map(_.getName())
val conf = new SparkConf(false)
val instances = Utils.loadExtensions(classOf[Object], extensions, conf)
assert(instances.size === 2)
assert(instances.count(_.isInstanceOf[SimpleExtension]) === 1)
val extWithConf = instances.find(_.isInstanceOf[ExtensionWithConf])
.map(_.asInstanceOf[ExtensionWithConf])
.get
assert(extWithConf.conf eq conf)
class NestedExtension { }
val invalid = Seq(classOf[NestedExtension].getName())
intercept[SparkException] {
Utils.loadExtensions(classOf[Object], invalid, conf)
}
val error = Seq(classOf[ExtensionWithError].getName())
intercept[IllegalArgumentException] {
Utils.loadExtensions(classOf[Object], error, conf)
}
val wrongType = Seq(classOf[ListenerImpl].getName())
intercept[IllegalArgumentException] {
Utils.loadExtensions(classOf[Seq[_]], wrongType, conf)
}
}
test("check Kubernetes master URL") {
val k8sMasterURLHttps = Utils.checkAndGetK8sMasterUrl("k8s://https://host:port")
assert(k8sMasterURLHttps === "k8s://https://host:port")
val k8sMasterURLHttp = Utils.checkAndGetK8sMasterUrl("k8s://http://host:port")
assert(k8sMasterURLHttp === "k8s://http://host:port")
val k8sMasterURLWithoutScheme = Utils.checkAndGetK8sMasterUrl("k8s://127.0.0.1:8443")
assert(k8sMasterURLWithoutScheme === "k8s://https://127.0.0.1:8443")
val k8sMasterURLWithoutScheme2 = Utils.checkAndGetK8sMasterUrl("k8s://127.0.0.1")
assert(k8sMasterURLWithoutScheme2 === "k8s://https://127.0.0.1")
intercept[IllegalArgumentException] {
Utils.checkAndGetK8sMasterUrl("k8s:https://host:port")
}
intercept[IllegalArgumentException] {
Utils.checkAndGetK8sMasterUrl("k8s://foo://host:port")
}
}
}
private class SimpleExtension
private class ExtensionWithConf(val conf: SparkConf)
private class UnregisterableExtension {
throw new UnsupportedOperationException()
}
private class ExtensionWithError {
throw new IllegalArgumentException()
}
private class ListenerImpl extends SparkListener
| brad-kaiser/spark | core/src/test/scala/org/apache/spark/util/UtilsSuite.scala | Scala | apache-2.0 | 47,846 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ui
import java.util.EnumSet
import javax.servlet.DispatcherType
import javax.servlet.http.{HttpServlet, HttpServletRequest}
import scala.collection.mutable.ArrayBuffer
import scala.collection.mutable.HashMap
import scala.xml.Node
import org.eclipse.jetty.servlet.{FilterHolder, FilterMapping, ServletContextHandler, ServletHolder}
import org.json4s.JsonAST.{JNothing, JValue}
import org.apache.spark.{SecurityManager, SparkConf, SSLOptions}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config._
import org.apache.spark.ui.JettyUtils._
import org.apache.spark.util.Utils
/**
* The top level component of the UI hierarchy that contains the server.
*
* Each WebUI represents a collection of tabs, each of which in turn represents a collection of
* pages. The use of tabs is optional, however; a WebUI may choose to include pages directly.
*/
private[spark] abstract class WebUI(
val securityManager: SecurityManager,
val sslOptions: SSLOptions,
port: Int,
conf: SparkConf,
basePath: String = "",
name: String = "",
poolSize: Int = 200)
extends Logging {
protected val tabs = ArrayBuffer[WebUITab]()
protected val handlers = ArrayBuffer[ServletContextHandler]()
protected val pageToHandlers = new HashMap[WebUIPage, ArrayBuffer[ServletContextHandler]]
protected var serverInfo: Option[ServerInfo] = None
protected val publicHostName = Option(conf.getenv("SPARK_PUBLIC_DNS")).getOrElse(
conf.get(DRIVER_HOST_ADDRESS))
protected val className = Utils.getFormattedClassName(this)
def getBasePath: String = basePath
def getTabs: Seq[WebUITab] = tabs.toSeq
def getHandlers: Seq[ServletContextHandler] = handlers.toSeq
def getDelegatingHandlers: Seq[DelegatingServletContextHandler] = {
handlers.map(new DelegatingServletContextHandler(_)).toSeq
}
/** Attaches a tab to this UI, along with all of its attached pages. */
def attachTab(tab: WebUITab): Unit = {
tab.pages.foreach(attachPage)
tabs += tab
}
/** Detaches a tab from this UI, along with all of its attached pages. */
def detachTab(tab: WebUITab): Unit = {
tab.pages.foreach(detachPage)
tabs -= tab
}
/** Detaches a page from this UI, along with all of its attached handlers. */
def detachPage(page: WebUIPage): Unit = {
pageToHandlers.remove(page).foreach(_.foreach(detachHandler))
}
/** Attaches a page to this UI. */
def attachPage(page: WebUIPage): Unit = {
val pagePath = "/" + page.prefix
val renderHandler = createServletHandler(pagePath,
(request: HttpServletRequest) => page.render(request), conf, basePath)
val renderJsonHandler = createServletHandler(pagePath.stripSuffix("/") + "/json",
(request: HttpServletRequest) => page.renderJson(request), conf, basePath)
attachHandler(renderHandler)
attachHandler(renderJsonHandler)
val handlers = pageToHandlers.getOrElseUpdate(page, ArrayBuffer[ServletContextHandler]())
handlers += renderHandler
handlers += renderJsonHandler
}
/** Attaches a handler to this UI. */
def attachHandler(handler: ServletContextHandler): Unit = synchronized {
handlers += handler
serverInfo.foreach(_.addHandler(handler, securityManager))
}
/** Attaches a handler to this UI. */
def attachHandler(contextPath: String, httpServlet: HttpServlet, pathSpec: String): Unit = {
val ctx = new ServletContextHandler()
ctx.setContextPath(contextPath)
ctx.addServlet(new ServletHolder(httpServlet), pathSpec)
attachHandler(ctx)
}
/** Detaches a handler from this UI. */
def detachHandler(handler: ServletContextHandler): Unit = synchronized {
handlers -= handler
serverInfo.foreach(_.removeHandler(handler))
}
/**
* Detaches the content handler at `path` URI.
*
* @param path Path in UI to unmount.
*/
def detachHandler(path: String): Unit = {
handlers.find(_.getContextPath() == path).foreach(detachHandler)
}
/**
* Adds a handler for static content.
*
* @param resourceBase Root of where to find resources to serve.
* @param path Path in UI where to mount the resources.
*/
def addStaticHandler(resourceBase: String, path: String = "/static"): Unit = {
attachHandler(JettyUtils.createStaticHandler(resourceBase, path))
}
/** A hook to initialize components of the UI */
def initialize(): Unit
def initServer(): ServerInfo = {
val host = Option(conf.getenv("SPARK_LOCAL_IP")).getOrElse("0.0.0.0")
val server = startJettyServer(host, port, sslOptions, conf, name, poolSize)
logInfo(s"Bound $className to $host, and started at $webUrl")
server
}
/** Binds to the HTTP server behind this web interface. */
def bind(): Unit = {
assert(serverInfo.isEmpty, s"Attempted to bind $className more than once!")
try {
val server = initServer()
handlers.foreach(server.addHandler(_, securityManager))
serverInfo = Some(server)
} catch {
case e: Exception =>
logError(s"Failed to bind $className", e)
System.exit(1)
}
}
/** @return Whether SSL enabled. Only valid after [[bind]]. */
def isSecure: Boolean = serverInfo.map(_.securePort.isDefined).getOrElse(false)
/** @return The scheme of web interface. Only valid after [[bind]]. */
def scheme: String = if (isSecure) "https://" else "http://"
/** @return The url of web interface. Only valid after [[bind]]. */
def webUrl: String = s"${scheme}$publicHostName:${boundPort}"
/** @return The actual port to which this server is bound. Only valid after [[bind]]. */
def boundPort: Int = serverInfo.map(si => si.securePort.getOrElse(si.boundPort)).getOrElse(-1)
/** Stops the server behind this web interface. Only valid after [[bind]]. */
def stop(): Unit = {
assert(serverInfo.isDefined,
s"Attempted to stop $className before binding to a server!")
serverInfo.foreach(_.stop())
}
}
/**
* A tab that represents a collection of pages.
* The prefix is appended to the parent address to form a full path, and must not contain slashes.
*/
private[spark] abstract class WebUITab(parent: WebUI, val prefix: String) {
val pages = ArrayBuffer[WebUIPage]()
val name = prefix.capitalize
/** Attach a page to this tab. This prepends the page's prefix with the tab's own prefix. */
def attachPage(page: WebUIPage): Unit = {
page.prefix = (prefix + "/" + page.prefix).stripSuffix("/")
pages += page
}
/** Get a list of header tabs from the parent UI. */
def headerTabs: Seq[WebUITab] = parent.getTabs
def basePath: String = parent.getBasePath
}
/**
* A page that represents the leaf node in the UI hierarchy.
*
* The direct parent of a WebUIPage is not specified as it can be either a WebUI or a WebUITab.
* If the parent is a WebUI, the prefix is appended to the parent's address to form a full path.
* Else, if the parent is a WebUITab, the prefix is appended to the super prefix of the parent
* to form a relative path. The prefix must not contain slashes.
*/
private[spark] abstract class WebUIPage(var prefix: String) {
def render(request: HttpServletRequest): Seq[Node]
def renderJson(request: HttpServletRequest): JValue = JNothing
}
private[spark] class DelegatingServletContextHandler(handler: ServletContextHandler) {
def prependFilterMapping(
filterName: String,
spec: String,
types: EnumSet[DispatcherType]): Unit = {
val mapping = new FilterMapping()
mapping.setFilterName(filterName)
mapping.setPathSpec(spec)
mapping.setDispatcherTypes(types)
handler.getServletHandler.prependFilterMapping(mapping)
}
def addFilter(
filterName: String,
className: String,
filterParams: Map[String, String]): Unit = {
val filterHolder = new FilterHolder()
filterHolder.setName(filterName)
filterHolder.setClassName(className)
filterParams.foreach { case (k, v) => filterHolder.setInitParameter(k, v) }
handler.getServletHandler.addFilter(filterHolder)
}
def filterCount(): Int = {
handler.getServletHandler.getFilters.length
}
def getContextPath(): String = {
handler.getContextPath
}
}
| chuckchen/spark | core/src/main/scala/org/apache/spark/ui/WebUI.scala | Scala | apache-2.0 | 8,983 |
// See LICENSE.txt for license details.
package templates
import chisel3._
import chisel3.iotesters.{PeekPokeTester, Driver, ChiselFlatSpec}
import chisel3.testers.BasicTester
import org.scalatest._
import org.scalatest.prop._
/**
* Mem1D test harness
*/
class Mem1DTests(c: Mem1D) extends PeekPokeTester(c) {
step(1)
reset(1)
for (i <- 0 until c.size ) {
poke(c.io.w.addr, i)
poke(c.io.w.data, i*2)
poke(c.io.w.en, 1)
step(1)
poke(c.io.w.en, 0)
step(1)
}
for (i <- 0 until c.size ) {
poke(c.io.r.addr, i)
poke(c.io.r.en, 1)
step(1)
expect(c.io.output.data, i*2)
poke(c.io.r.en, 0)
step(1)
}
}
/**
* MemND test harness
*/
class MemNDTests(c: MemND) extends PeekPokeTester(c) {
val depth = c.dims.reduce{_*_}
val N = c.dims.length
step(1)
reset(1)
// poke(c.io.wMask, 1) // Do not mask at all when testing this template directly
// poke(c.io.rMask, 1) // Do not mask at all when testing this template directly
// Assume only 2D
for (i <- 0 until c.dims(0) ) {
for (j <- 0 until c.dims(1) ) {
c.io.w.addr.zip(List(i,j)).foreach { case (port, addr) => poke(port, addr) }
poke(c.io.w.data, (i*c.dims(0) + j)*2)
poke(c.io.w.en, 1)
poke(c.io.wMask, 1)
step(1)
poke(c.io.w.en, 0)
poke(c.io.wMask, 0)
step(1)
}
}
for (i <- 0 until c.dims(0) ) {
for (j <- 0 until c.dims(1) ) {
c.io.r.addr.zip(List(i,j)).foreach { case (port, addr) => poke(port, addr) }
poke(c.io.r.en, 1)
poke(c.io.rMask, 1)
step(1)
// Console.println(s"Expect ${2*(i*c.dims(0) + j)} but got ${peek(c.io.output.data)}")
expect(c.io.output.data, 2*(i*c.dims(0) + j))
poke(c.io.r.en, 0)
poke(c.io.rMask, 0)
step(1)
}
}
}
/**
* SRAM test harness
*/
class SRAMTests(c: SRAM) extends PeekPokeTester(c) {
val depth = c.logicalDims.reduce{_*_}
val N = c.logicalDims.length
reset(1)
// Write to each address
for (i <- 0 until c.logicalDims(0)) { // Each row
for (j <- 0 until c.logicalDims(1) by c.wPar(0)) {
// Set addrs
var idx = 0
(0 until c.wPar.length).foreach{ writer =>
(0 until c.wPar(writer)).foreach { kdim =>
poke(c.io.w(idx).addr(0), i)
poke(c.io.w(idx).addr(1), j+kdim)
poke(c.io.w(idx).data, (i*c.logicalDims(0) + j + kdim)*2)
if (writer == 0) {
poke(c.io.w(idx).en, true)
} else {
poke(c.io.w(idx).en, false)
}
idx = idx + 1
}
}
step(1)
}
}
// Turn off wEn
(0 until c.wPar.reduce{_+_}).foreach{ wbundle =>
poke(c.io.w(wbundle).en, false)
}
step(30)
// Check each address
for (i <- 0 until c.logicalDims(0)) { // Each row
for (j <- 0 until c.logicalDims(1) by c.rPar(0)) {
// Set addrs
var idx = 0
(0 until c.rPar.length).foreach{ reader =>
(0 until c.rPar(reader)).foreach { kdim =>
poke(c.io.r(idx).addr(0), i)
poke(c.io.r(idx).addr(1), j+kdim)
if (reader == 0) {
poke(c.io.r(idx).en, true)
} else {
poke(c.io.r(idx).en, false)
}
idx = idx + 1
}
}
step(1)
(0 until c.rPar(0)).foreach { kdim =>
expect(c.io.output.data(kdim), (i*c.logicalDims(0) + j + kdim)*2)
}
}
}
// Turn off rEn
(0 until c.rPar.reduce{_+_}).foreach{ reader =>
poke(c.io.r(reader).en, false)
}
step(1)
}
/**
* SRAM test harness
*/
class NBufSRAMTests(c: NBufSRAM) extends PeekPokeTester(c) {
val timeout = 400
val initvals = (0 until c.numBufs).map { i => i+1}
var stageActives = Array.tabulate(c.numBufs) { i => 0 }
val latencies = (0 until c.numBufs).map { i => math.abs(rnd.nextInt(15)) + 5 }
var stageCounts = Array.tabulate(c.numBufs) { i => 0 }
var stagesDone = 0
reset(1)
def fillSRAM(wPort: Int, dat: Int) {
// Write to each address
for (i <- 0 until c.logicalDims(0)) { // Each row
for (j <- 0 until c.logicalDims(1) by c.wPar(0)) {
// Set addrs
var idx = 0
(0 until c.wPar.length).foreach{ writer =>
(0 until c.wPar(writer)).foreach { kdim =>
poke(c.io.w(idx).addr(0), i)
poke(c.io.w(idx).addr(1), j+kdim)
poke(c.io.w(idx).data, 1000*dat + i*c.logicalDims(0) + j + kdim)
if (writer == 0) {
poke(c.io.w(idx).en, true)
} else {
poke(c.io.w(idx).en, false)
}
idx = idx + 1
}
}
step(1)
}
}
// Turn off wEn
(0 until c.wPar.reduce{_+_}).foreach{ writer =>
poke(c.io.w(writer).en, false)
}
step(30)
}
def broadcastFillSRAM(dat: Int) {
// Write to each address
for (i <- 0 until c.logicalDims(0)) { // Each row
for (j <- 0 until c.logicalDims(1) by c.bPar.head) {
// Set addrs
(0 until c.bPar.head).foreach { kdim =>
poke(c.io.broadcast(kdim).addr(0), i)
poke(c.io.broadcast(kdim).addr(1), j+kdim)
poke(c.io.broadcast(kdim).data, dat + i*c.logicalDims(0) + j + kdim)
poke(c.io.broadcast(kdim).en, true)
}
step(1)
}
}
// Turn off wEn
(0 until c.bPar.head).foreach {kdim =>
poke(c.io.broadcast(kdim).en, false)
}
step(30)
}
def readSRAM(rPort: Int, dat: Int, base: Int = 1000) {
// Read at each address
for (i <- 0 until c.logicalDims(0)) { // Each row
for (j <- 0 until c.logicalDims(1) by c.rPar(0)) {
// Set addrs
var idx = 0
(0 until c.rPar.length).foreach{ readers =>
(0 until c.rPar(readers)).foreach { kdim =>
poke(c.io.r(idx).addr(0), i)
poke(c.io.r(idx).addr(1), j+kdim)
if (readers == 0) {
poke(c.io.r(idx).en, true)
} else {
poke(c.io.r(idx).en, false)
}
idx = idx + 1
}
}
step(1)
(0 until c.rPar.max).foreach {kdim =>
val gold = base*dat + i*c.logicalDims(0) + j + kdim
// val a = peek(c.io.output.data(rPort*c.rPar.max + kdim))
// println(s"Expecting $gold but got $a (${a == gold}) on port $rPort")
expect(c.io.output.data(rPort*c.rPar.max + kdim), gold)
}
}
}
// Turn off wEn
(0 until c.rPar.reduce{_+_}).foreach{ reader =>
poke(c.io.r(reader).en, false)
}
step(30)
}
def executeStage(s: Int) {
// println(s" Stage $s active count ${stageCounts(s)}, numcicles $numCycles")
if (stageActives(s) == 1) stageCounts(s) += 1 else stageCounts(s) = 0
if (stageCounts(s) == latencies(s)) {
poke(c.io.sDone(s), 1)
} else if (stageCounts(s) == latencies(s) + 1) {
poke(c.io.sEn(s), 0)
poke(c.io.sDone(s), 0)
stageCounts(s) = 0
stagesDone = stagesDone + 1
stageActives(s) = 0
} else {
poke(c.io.sDone(s), 0)
}
}
def handleStageEnables = {
(0 until c.numBufs).foreach { i =>
executeStage(i)
}
}
var numCycles = 0
var iter = 1
var writingPort = 0
var readingPort = c.numBufs-1
for (k <- 0 until c.numBufs*5) {
numCycles = 0
stagesDone = 0
(0 until c.numBufs).foreach{ i =>
poke(c.io.sEn(i), 1)
stageActives(i) = 1
}
fillSRAM(writingPort, iter)
if (iter >= c.numBufs) readSRAM(readingPort, iter-c.numBufs+1)
while (!(stagesDone == c.numBufs) & numCycles < timeout) {
handleStageEnables
step(1)
numCycles = numCycles+1
}
iter += 1
step(5)
}
// test broadcast
broadcastFillSRAM(20)
for (k <- 0 until c.numBufs) {
numCycles = 0
stagesDone = 0
(0 until c.numBufs).foreach{ i =>
poke(c.io.sEn(i), 1)
stageActives(i) = 1
}
readSRAM(readingPort, 20, 1)
while (!(stagesDone == c.numBufs) & numCycles < timeout) {
handleStageEnables
step(1)
numCycles = numCycles+1
}
iter += 1
step(5)
}
step(5)
}
// class Mem1DTester extends ChiselFlatSpec {
// behavior of "Mem1D"
// backends foreach {backend =>
// it should s"correctly do $backend" in {
// Driver(() => new Mem1D(1024))(c => new Mem1DTests(c)) should be (true)
// }
// }
// }
// class MemNDTester extends ChiselFlatSpec {
// behavior of "MemND"
// backends foreach {backend =>
// it should s"correctly do $backend" in {
// Drivera(() => new MemND(List(4,8)))(c => new MemNDTests(c)) should be (true)
// }
// }
// }
// class SRAMTester extends ChiselFlatSpec {
// behavior of "SRAM"
// backends foreach {backend =>
// it should s"correctly do $backend" in {
// Driver(() => new SRAM(List(16,16), 32,
// List(1,2), List(1,1), 1, 1,
// 2, 2, "strided"))(c => new SRAMTests(c)) should be (true)
// }
// }
// }
| stanford-ppl/spatial-lang | spatial/core/resources/chiselgen/template-level/tests/templates/SRAM.scala | Scala | mit | 9,019 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.eagle.security.userprofile.daemon
import org.apache.eagle.common.DateTimeUtil
import org.joda.time.Period
import org.scalatest._
/**
* @since 9/2/15
*/
class UtilsSpec extends FlatSpec with Matchers{
import org.apache.eagle.security.userprofile.daemon.Utils._
it should "formatPathWithMilliseconds " in {
formatPathWithMilliseconds("/tmp/path/to/log.${yyyy-MM-dd-hh}.gz")(DateTimeUtil.humanDateToMilliseconds("2015-09-02 10:32:17,000")) should be ("/tmp/path/to/log.2015-09-02-10.gz")
formatPathWithMilliseconds("/tmp/path/to/${yyyy-MM-dd}/*.tar.gz")(DateTimeUtil.humanDateToMilliseconds("2015-09-02 10:32:17,000")) should be ("/tmp/path/to/2015-09-02/*.tar.gz")
formatPathWithMilliseconds("/tmp/path/to/${yyyy-MM-dd}/${yyyy-MM-dd-hh}.tar.gz")(DateTimeUtil.humanDateToMilliseconds("2015-09-02 10:32:17,000")) should be ("/tmp/path/to/2015-09-02/2015-09-02-10.tar.gz")
}
it should "formatPathsInDuration" in {
val pathes:Seq[String] = formatPathsInDuration("/tmp/path/to/log.${yyyy-MM-dd-hh}.gz",DateTimeUtil.humanDateToMilliseconds("2015-09-02 10:32:17,000"),Period.parse("P30D"))
//pathes.length should be (30 * 24)
}
} | eBay/Eagle | eagle-security/eagle-security-userprofile/training/src/test/scala/org/apache/eagle/security/userprofile/daemon/UtilsSpec.scala | Scala | apache-2.0 | 1,977 |
package com.github.cmanou.scrimage.utils.dithering.filters
import org.scalatest.{ OneInstancePerTest, BeforeAndAfter, FunSuite }
import com.sksamuel.scrimage.Image
class BurkesFilterTest extends FunSuite with BeforeAndAfter with OneInstancePerTest {
val original = Image.fromStream(getClass.getResourceAsStream("/macosx-desktop.png"))
test("filter output matches expected") {
val expected = Image.fromStream(getClass.getResourceAsStream("/com/github/cmanou/scrimage/utils/dithering/filters/macosx-desktop-burkes.png"))
assert(original.filter(BurkesFilter()) === expected)
}
} | cmanou/scrimage-utils | src/test/scala/com/github/cmanou/scrimage/utils/dithering/filters/BurkesFilterTest.scala | Scala | mit | 593 |
package com.bot4s.telegram.methods
import ParseMode.ParseMode
import com.bot4s.telegram.models.{ Message, ReplyMarkup }
import com.bot4s.telegram.models.ChatId
/**
* Use this method to edit text messages sent by the bot or via the bot (for inline bots).
* On success, if edited message is sent by the bot, the edited Message is returned, otherwise True is returned.
*
* @param chatId Integer or String Required if inline_message_id is not specified. Unique identifier for the target chat or username of the target channel (in the format @channelusername)
* @param messageId Integer Required if inline_message_id is not specified. Unique identifier of the sent message
* @param inlineMessageId String Required if chat_id and message_id are not specified. Identifier of the inline message
* @param text String New text of the message
* @param parseMode String Optional Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in your bot's message.
* @param disableWebPagePreview Boolean Optional Disables link previews for links in this message
* @param replyMarkup InlineKeyboardMarkup Optional A JSON-serialized object for an inline keyboard.
*/
case class EditMessageText(
chatId: Option[ChatId] = None,
messageId: Option[Int] = None,
inlineMessageId: Option[String] = None,
text: String,
parseMode: Option[ParseMode] = None,
disableWebPagePreview: Option[Boolean] = None,
replyMarkup: Option[ReplyMarkup] = None
) extends JsonRequest[Either[Boolean, Message]] {
if (inlineMessageId.isEmpty) {
require(chatId.isDefined, "Required if inlineMessageId is not specified")
require(messageId.isDefined, "Required if inlineMessageId is not specified")
}
if (chatId.isEmpty && messageId.isEmpty)
require(inlineMessageId.isDefined, "Required if chatId and messageId are not specified")
}
| mukel/telegrambot4s | core/src/com/bot4s/telegram/methods/EditMessageText.scala | Scala | apache-2.0 | 1,953 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.model.feats
import io.truthencode.ddo.model.classes.HeroicCharacterClass
import io.truthencode.ddo.support.requisite.{FeatRequisiteImpl, RequiresAnyOfClass}
/**
* Created by adarr on 2/21/2017.
*/
trait MaximizeSpell
extends FeatRequisiteImpl with MetaMagic with RequiresAnyOfClass with ArtificerBonusFeat
with AlchemistBonusFeat {
self: MetaMagicFeat =>
override def anyOfClass: Seq[(HeroicCharacterClass, Int)] =
MetaMagicFeat.minimumSpellCastingClass
}
| adarro/ddo-calc | subprojects/common/ddo-core/src/main/scala/io/truthencode/ddo/model/feats/MaximizeSpell.scala | Scala | apache-2.0 | 1,177 |
package com.wavesplatform.state.diffs
import cats._
import com.wavesplatform.BlocksTransactionsHelpers
import com.wavesplatform.block.Block
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.EitherExt2
import com.wavesplatform.db.WithDomain
import com.wavesplatform.features.BlockchainFeatures
import com.wavesplatform.features.BlockchainFeatures.BlockV5
import com.wavesplatform.lagonaki.mocks.TestBlock
import com.wavesplatform.lang.directives.values._
import com.wavesplatform.lang.script.Script
import com.wavesplatform.lang.script.v1.ExprScript
import com.wavesplatform.lang.utils._
import com.wavesplatform.lang.v1.compiler.Terms.CONST_BOOLEAN
import com.wavesplatform.lang.v1.compiler.{ExpressionCompiler, TestCompiler}
import com.wavesplatform.lang.v1.estimator.ScriptEstimatorV1
import com.wavesplatform.lang.v1.parser.Parser
import com.wavesplatform.settings.{FunctionalitySettings, TestFunctionalitySettings}
import com.wavesplatform.state._
import com.wavesplatform.state.diffs.smart.smartEnabledFS
import com.wavesplatform.test.PropSpec
import com.wavesplatform.transaction.Asset.IssuedAsset
import com.wavesplatform.transaction.assets._
import com.wavesplatform.transaction.transfer._
import com.wavesplatform.transaction.{GenesisTransaction, Transaction, TxHelpers, TxVersion}
import fastparse.Parsed
class AssetTransactionsDiffTest extends PropSpec with BlocksTransactionsHelpers with WithDomain {
def issueReissueBurnTxs(isReissuable: Boolean): ((GenesisTransaction, IssueTransaction), (ReissueTransaction, BurnTransaction)) = {
val master = TxHelpers.signer(1)
val genesis = TxHelpers.genesis(master.toAddress)
val issue = TxHelpers.issue(master, 100, reissuable = isReissuable, version = TxVersion.V1)
val asset = IssuedAsset(issue.id())
val reissue = TxHelpers.reissue(asset, master, 50, version = TxVersion.V1)
val burn = TxHelpers.burn(asset, 10, master, version = TxVersion.V1)
((genesis, issue), (reissue, burn))
}
property("Issue+Reissue+Burn do not break waves invariant and updates state") {
val ((gen, issue), (reissue, burn)) = issueReissueBurnTxs(isReissuable = true)
assertDiffAndState(Seq(TestBlock.create(Seq(gen, issue))), TestBlock.create(Seq(reissue, burn))) {
case (blockDiff, newState) =>
val totalPortfolioDiff = Monoid.combineAll(blockDiff.portfolios.values)
totalPortfolioDiff.balance shouldBe 0
totalPortfolioDiff.effectiveBalance shouldBe 0
totalPortfolioDiff.assets shouldBe Map(reissue.asset -> (reissue.quantity - burn.quantity))
val totalAssetVolume = issue.quantity + reissue.quantity - burn.quantity
newState.balance(issue.sender.toAddress, reissue.asset) shouldEqual totalAssetVolume
}
}
property("Cannot reissue/burn non-existing alias") {
val ((gen, _), (reissue, burn)) = issueReissueBurnTxs(true)
assertDiffEi(Seq(TestBlock.create(Seq(gen))), TestBlock.create(Seq(reissue))) { blockDiffEi =>
blockDiffEi should produce("Referenced assetId not found")
}
assertDiffEi(Seq(TestBlock.create(Seq(gen))), TestBlock.create(Seq(burn))) { blockDiffEi =>
blockDiffEi should produce("Referenced assetId not found")
}
}
property("Cannot reissue/burn non-owned alias") {
val setup = {
val issuer = TxHelpers.signer(1)
val nonIssuer = TxHelpers.signer(2)
val genesis = TxHelpers.genesis(issuer.toAddress)
val issue = TxHelpers.issue(issuer, 100, version = TxVersion.V1)
val asset = IssuedAsset(issue.id())
val reissue = TxHelpers.reissue(asset, nonIssuer, 50, version = TxVersion.V1)
val burn = TxHelpers.burn(asset, 10, nonIssuer, version = TxVersion.V1)
((genesis, issue), reissue, burn)
}
val ((gen, issue), reissue, burn) = setup
assertDiffEi(Seq(TestBlock.create(Seq(gen, issue))), TestBlock.create(Seq(reissue))) { blockDiffEi =>
blockDiffEi should produce("Asset was issued by other address")
}
assertDiffEi(Seq(TestBlock.create(Seq(gen, issue))), TestBlock.create(Seq(burn))) { blockDiffEi =>
blockDiffEi should produce("Asset was issued by other address")
}
}
property("Can burn non-owned alias if feature 'BurnAnyTokens' activated") {
val setup = {
val issuer = TxHelpers.signer(1)
val burner = TxHelpers.signer(2)
val genesis = TxHelpers.genesis(issuer.toAddress)
val issue = TxHelpers.issue(issuer, ENOUGH_AMT, version = TxVersion.V1)
val asset = IssuedAsset(issue.id())
val assetTransfer = TxHelpers.transfer(issuer, burner.toAddress, 1, asset, version = TxVersion.V1)
val wavesTransfer = TxHelpers.transfer(issuer, burner.toAddress, version = TxVersion.V1)
val burn = TxHelpers.burn(asset, assetTransfer.amount, burner, fee = wavesTransfer.amount, version = TxVersion.V1)
(genesis, issue, assetTransfer, wavesTransfer, burn)
}
val fs =
TestFunctionalitySettings.Enabled
.copy(
preActivatedFeatures = Map(BlockchainFeatures.SmartAccounts.id -> 0, BlockchainFeatures.BurnAnyTokens.id -> 0)
)
val (genesis, issue, assetTransfer, wavesTransfer, burn) = setup
assertDiffAndState(Seq(TestBlock.create(Seq(genesis, issue, assetTransfer, wavesTransfer))), TestBlock.create(Seq(burn)), fs) {
case (_, newState) =>
newState.balance(burn.sender.toAddress, burn.asset) shouldEqual 0
}
}
property("Can not reissue > long.max") {
val setup = {
val issuer = TxHelpers.signer(1)
val genesis = TxHelpers.genesis(issuer.toAddress)
val issue = TxHelpers.issue(issuer, version = TxVersion.V1)
val asset = IssuedAsset(issue.id())
val reissue = TxHelpers.reissue(asset, issuer, Long.MaxValue, version = TxVersion.V1)
(issuer, asset, genesis, issue, reissue)
}
val fs =
TestFunctionalitySettings.Enabled
.copy(
preActivatedFeatures = Map(BlockchainFeatures.SmartAccounts.id -> 0, BlockchainFeatures.DataTransaction.id -> 0)
)
val (_, _, genesis, issue, reissue) = setup
assertDiffEi(Seq(TestBlock.create(Seq(genesis, issue))), TestBlock.create(Seq(reissue)), fs) { ei =>
ei should produce("Asset total value overflow")
}
}
property("Can request reissue > long.max before BurnAnyTokens activated") {
val setup = {
val issuer = TxHelpers.signer(1)
val genesis = TxHelpers.genesis(issuer.toAddress)
val issue = TxHelpers.issue(issuer, version = TxVersion.V1)
val asset = IssuedAsset(issue.id())
val reissue = TxHelpers.reissue(asset, issuer, Long.MaxValue, version = TxVersion.V1)
(issuer, asset, genesis, issue, reissue)
}
val fs =
TestFunctionalitySettings.Enabled
val (_, _, genesis, issue, reissue) = setup
assertDiffEi(Seq(TestBlock.create(Seq(genesis, issue))), TestBlock.create(Seq(reissue)), fs) { ei =>
ei should produce("negative asset balance")
}
}
property("Can not total issue > long.max") {
val setup = {
val issuer = TxHelpers.signer(1)
val holder = TxHelpers.signer(2)
val genesis = TxHelpers.genesis(issuer.toAddress)
val amount = 100
val issue = TxHelpers.issue(issuer, amount, version = TxVersion.V1)
val asset = issue.asset
val transfer = TxHelpers.transfer(issuer, holder.toAddress, amount - 1, asset, attachment = ByteStr.fill(TransferTransaction.MaxAttachmentSize)(1), version = TxVersion.V1)
val reissue = TxHelpers.reissue(asset, issuer, (Long.MaxValue - amount) + 1, version = TxVersion.V1)
(issuer, asset, genesis, issue, reissue, transfer)
}
val fs =
TestFunctionalitySettings.Enabled
.copy(
preActivatedFeatures = Map(BlockchainFeatures.SmartAccounts.id -> 0, BlockchainFeatures.DataTransaction.id -> 0)
)
val (_, _, genesis, issue, reissue, transfer) = setup
assertDiffEi(Seq(TestBlock.create(Seq(genesis, issue, transfer))), TestBlock.create(Seq(reissue)), fs) { ei =>
ei should produce("Asset total value overflow")
}
}
property("Cannot reissue non-reissuable alias") {
val ((gen, issue), (reissue, _)) = issueReissueBurnTxs(isReissuable = false)
assertDiffEi(Seq(TestBlock.create(Seq(gen, issue))), TestBlock.create(Seq(reissue))) { blockDiffEi =>
blockDiffEi should produce("Asset is not reissuable")
}
}
private def createScript(code: String, version: StdLibVersion) = {
val Parsed.Success(expr, _) = Parser.parseExpr(code).get
ExprScript(version, ExpressionCompiler(compilerContext(version, Expression, isAssetScript = false), expr).explicitGet()._1).explicitGet()
}
def genesisIssueTransferReissue(
code: String,
version: StdLibVersion = V1
): (Seq[GenesisTransaction], IssueTransaction, TransferTransaction, ReissueTransaction, ReissueTransaction) = {
val accountA = TxHelpers.signer(1)
val accountB = TxHelpers.signer(2)
val genesis = Seq(accountA, accountB).map(acc => TxHelpers.genesis(acc.toAddress, Long.MaxValue / 100))
val issue = TxHelpers.issue(accountA, 100, script = Some(createScript(code, version)))
val asset = issue.asset
val transfer = TxHelpers.transfer(accountA, accountB.toAddress, issue.quantity, asset, version = TxVersion.V1)
val reissue = TxHelpers.reissue(asset, accountA, issue.quantity, version = TxVersion.V1)
val illegalReissue = TxHelpers.reissue(asset, accountB, issue.quantity, version = TxVersion.V1)
(genesis, issue, transfer, reissue, illegalReissue)
}
property("Can issue smart asset with script") {
val acc = TxHelpers.signer(1)
val genesis = TxHelpers.genesis(acc.toAddress)
val issue = TxHelpers.issue(acc, 100, script = Some(ExprScript(CONST_BOOLEAN(true)).explicitGet()))
assertDiffAndState(Seq(TestBlock.create(Seq(genesis))), TestBlock.create(Seq(issue)), smartEnabledFS) {
case (blockDiff, newState) =>
newState.assetDescription(IssuedAsset(issue.id())) shouldBe Some(
AssetDescription(
issue.assetId,
issue.sender,
issue.name,
issue.description,
issue.decimals,
issue.reissuable,
BigInt(issue.quantity),
Height @@ 2,
issue.script.map(
s =>
AssetScriptInfo(
s,
Script
.estimate(s, ScriptEstimatorV1, useContractVerifierLimit = false)
.explicitGet()
)
),
0L,
issue.decimals == 0 && issue.quantity == 1 && !issue.reissuable
)
)
blockDiff.transactions.contains(issue.id()) shouldBe true
newState.transactionInfo(issue.id()).isDefined shouldBe true
newState.transactionInfo(issue.id()).isDefined shouldEqual true
}
}
property("Can transfer when script evaluates to TRUE") {
val (gen, issue, transfer, _, _) = genesisIssueTransferReissue("true")
assertDiffAndState(Seq(TestBlock.create(gen)), TestBlock.create(Seq(issue, transfer)), smartEnabledFS) {
case (blockDiff, newState) =>
val totalPortfolioDiff = Monoid.combineAll(blockDiff.portfolios.values)
totalPortfolioDiff.assets(IssuedAsset(issue.id())) shouldEqual issue.quantity
newState.balance(newState.resolveAlias(transfer.recipient).explicitGet(), IssuedAsset(issue.id())) shouldEqual transfer.amount
}
}
property("Cannot transfer when script evaluates to FALSE") {
val (gen, issue, transfer, _, _) = genesisIssueTransferReissue("false")
assertDiffEi(Seq(TestBlock.create(gen)), TestBlock.create(Seq(issue, transfer)), smartEnabledFS)(
ei => ei should produce("TransactionNotAllowedByScript")
)
}
property("Cannot reissue when script evaluates to FALSE") {
val (gen, issue, _, reissue, _) = genesisIssueTransferReissue("false")
assertDiffEi(Seq(TestBlock.create(gen)), TestBlock.create(Seq(issue, reissue)), smartEnabledFS)(
ei => ei should produce("TransactionNotAllowedByScript")
)
}
property("Only issuer can reissue") {
val (gen, issue, _, _, illegalReissue) = genesisIssueTransferReissue("true")
assertDiffEi(Seq(TestBlock.create(gen)), TestBlock.create(Seq(issue, illegalReissue)), smartEnabledFS) { ei =>
ei should produce("Asset was issued by other address")
}
}
val assetInfoUpdateEnabled: FunctionalitySettings = TestFunctionalitySettings.Enabled
.copy(
preActivatedFeatures = TestFunctionalitySettings.Enabled.preActivatedFeatures + (BlockchainFeatures.BlockV5.id -> 0) + (BlockchainFeatures.NG.id -> 0),
minAssetInfoUpdateInterval = 100
)
property("Can't update before activation") {
val (gen, issue, update) = genesisIssueUpdate
assertDiffEi(Seq(TestBlock.create(gen)), TestBlock.create(Seq(issue, update))) { ei =>
ei should produce("Ride V4, VRF, Protobuf, Failed transactions feature has not been activated yet")
}
}
property(s"Can't update right before ${assetInfoUpdateEnabled.minAssetInfoUpdateInterval} blocks") {
val blocksCount = assetInfoUpdateEnabled.minAssetInfoUpdateInterval - 2
val (gen, issue, update) = genesisIssueUpdate
val blocks = Seq.fill(blocksCount)(TestBlock.create(Seq.empty, Block.ProtoBlockVersion))
assertDiffEi(TestBlock.create(gen :+ issue) +: blocks, TestBlock.create(Seq(update), Block.ProtoBlockVersion), assetInfoUpdateEnabled) { ei =>
ei should produce(
s"Can't update info of asset with id=${issue.id()} " +
s"before ${assetInfoUpdateEnabled.minAssetInfoUpdateInterval + 1} block, " +
s"current height=${blocks.size + 2}, minUpdateInfoInterval=${assetInfoUpdateEnabled.minAssetInfoUpdateInterval}"
)
}
}
property(s"Can update after ${assetInfoUpdateEnabled.minAssetInfoUpdateInterval} blocks") {
val (gen, issue, update) = genesisIssueUpdate
val blocks =
TestBlock.create(gen :+ issue) +: Seq.fill(assetInfoUpdateEnabled.minAssetInfoUpdateInterval)(
TestBlock.create(Seq.empty, Block.ProtoBlockVersion)
)
assertDiffEi(blocks, TestBlock.create(Seq(update), Block.ProtoBlockVersion), assetInfoUpdateEnabled) { ei =>
val info = ei
.explicitGet()
.updatedAssets(update.assetId)
.left
.get
info.name.toStringUtf8 shouldEqual update.name
info.description.toStringUtf8 shouldEqual update.description
}
}
property(s"Can update with CompositeBlockchain") {
val (gen, issues, signer, update1) = genesisIssueUpdateWithSecondAsset
withDomain(domainSettingsWithFS(assetInfoUpdateEnabled.copy(minAssetInfoUpdateInterval = 0))) { d =>
val blockchain = d.blockchainUpdater
val genesisBlock = TestBlock.create(gen ++ issues)
d.appendBlock(genesisBlock)
val (keyBlock, mbs) =
UnsafeBlocks.unsafeChainBaseAndMicro(
genesisBlock.id(),
Nil,
Seq(Seq(update1)),
signer,
Block.ProtoBlockVersion,
genesisBlock.header.timestamp + 100
)
d.appendBlock(keyBlock)
val microBlockId = d.appendMicroBlock(mbs.head)
val issue = issues(0)
val issue1 = issues(1)
{ // Check liquid block
val desc = blockchain.assetDescription(issue.asset).get
desc.name shouldBe issue.name
desc.description shouldBe issue.description
val desc1 = blockchain.assetDescription(issue1.asset).get
desc1.name.toStringUtf8 shouldBe update1.name
desc1.description.toStringUtf8 shouldBe update1.description
desc.lastUpdatedAt shouldBe 1
desc1.lastUpdatedAt shouldBe blockchain.height
}
val (keyBlock1, _) =
UnsafeBlocks.unsafeChainBaseAndMicro(microBlockId, Nil, Nil, signer, Block.ProtoBlockVersion, keyBlock.header.timestamp + 100)
d.appendBlock(keyBlock1)
{ // Check after new key block
val desc = blockchain.assetDescription(issue.asset).get
desc.name shouldBe issue.name
desc.description shouldBe issue.description
val desc1 = blockchain.assetDescription(issue1.asset).get
desc1.name.toStringUtf8 shouldBe update1.name
desc1.description.toStringUtf8 shouldBe update1.description
desc.lastUpdatedAt shouldBe 1
desc1.lastUpdatedAt shouldBe (blockchain.height - 1)
}
}
}
property("Asset V4 complexity limit is 4000") {
val exprV4WithComplexityBetween3000And4000 =
"""
| {-#STDLIB_VERSION 4 #-}
| {-#SCRIPT_TYPE ASSET #-}
| {-#CONTENT_TYPE EXPRESSION #-}
|
| groth16Verify_15inputs(base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK')
""".stripMargin
val exprV4WithComplexityAbove4000 =
"""
| {-#STDLIB_VERSION 4 #-}
| {-#SCRIPT_TYPE ASSET #-}
| {-#CONTENT_TYPE EXPRESSION #-}
|
| groth16Verify_15inputs(base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK') &&
| groth16Verify_15inputs(base64'ZGdnZHMK',base64'ZGdnZHMK',base64'ZGdnZHMK')
""".stripMargin
val rideV4Activated = TestFunctionalitySettings.Enabled.copy(
preActivatedFeatures = Map(
BlockchainFeatures.Ride4DApps.id -> 0,
BlockchainFeatures.BlockV5.id -> 0
)
)
val (genesis1, issue1, _, _, _) = genesisIssueTransferReissue(exprV4WithComplexityBetween3000And4000, V4)
assertDiffAndState(Seq(TestBlock.create(genesis1)), TestBlock.create(Seq(issue1)), rideV4Activated) {
case (blockDiff, _) =>
val totalPortfolioDiff = Monoid.combineAll(blockDiff.portfolios.values)
totalPortfolioDiff.assets(IssuedAsset(issue1.id())) shouldEqual issue1.quantity
}
val (genesis2, issue2, _, _, _) = genesisIssueTransferReissue(exprV4WithComplexityAbove4000, V4)
assertDiffEi(Seq(TestBlock.create(genesis2)), TestBlock.create(Seq(issue2)), rideV4Activated) {
_ should produce("Script is too complex: 5207 > 4000")
}
}
private def genesisIssueUpdate = {
val accountA = TxHelpers.signer(1)
val accountB = TxHelpers.signer(2)
val genesis = Seq(accountA, accountB).map(acc => TxHelpers.genesis(acc.toAddress, Long.MaxValue / 100))
val issue = TxHelpers.issue(accountA, 100, reissuable = false)
val asset = issue.asset
val updateAsset = TxHelpers.updateAssetInfo(asset.id, sender = accountA)
(genesis, issue, updateAsset)
}
private def genesisIssueUpdateWithSecondAsset = {
val (genesis1, issue1, _) = genesisIssueUpdate
val accountC = TxHelpers.signer(1)
val genesis2 = TxHelpers.genesis(accountC.toAddress, Long.MaxValue / 100)
val issue2 = TxHelpers.issue(accountC, issue1.quantity, reissuable = false)
val update2 = TxHelpers.updateAssetInfo(issue2.asset.id, "Invalid", "Invalid", accountC)
(genesis1 :+ genesis2, Seq(issue1, issue2), accountC, update2)
}
property("estimation overflow") {
val testScript = TestCompiler(V3).compileExpression {
val n = 65
s"""
| func f0() = true
| ${(0 until n).map(i => s"func f${i + 1}() = if (f$i()) then f$i() else f$i()").mkString("\\n")}
| f$n()
""".stripMargin
}
def t = System.currentTimeMillis()
val sender = accountGen.sample.get
val genesis = GenesisTransaction.create(sender.toAddress, ENOUGH_AMT, t).explicitGet()
def issue(script: Script) =
IssueTransaction.selfSigned(2.toByte, sender, "name", "", ENOUGH_AMT, 0, true, Some(script), 100000000, t).explicitGet()
def setAssetScript(asset: IssuedAsset) =
SetAssetScriptTransaction.selfSigned(2.toByte, sender, asset, Some(testScript), 100000000, t).explicitGet()
def settings(checkNegative: Boolean = false, checkSumOverflow: Boolean = false): FunctionalitySettings = {
TestFunctionalitySettings
.withFeatures(BlockV5)
.copy(
estimationOverflowFixHeight = if (checkNegative) 0 else 999,
estimatorSumOverflowFixHeight = if (checkSumOverflow) 0 else 999
)
}
def assert(preparingTxs: Seq[Transaction], scriptedTx: () => Transaction) = {
withDomain(domainSettingsWithFS(settings())) { db =>
db.appendBlock(preparingTxs: _*)
val tx = scriptedTx()
db.appendBlock(tx)
db.liquidDiff.errorMessage(tx.id()) shouldBe None
}
withDomain(domainSettingsWithFS(settings(checkNegative = true))) { db =>
db.appendBlock(preparingTxs: _*)
(the[Exception] thrownBy db.appendBlock(scriptedTx())).getMessage should include("Unexpected negative complexity")
}
withDomain(domainSettingsWithFS(settings(checkSumOverflow = true))) { db =>
db.appendBlock(preparingTxs: _*)
(the[Exception] thrownBy db.appendBlock(scriptedTx())).getMessage should include("Illegal script")
}
}
val emptyIssue = issue(TestCompiler(V3).compileExpression("true"))
assert(Seq(genesis, emptyIssue), () => setAssetScript(IssuedAsset(emptyIssue.id())))
assert(Seq(genesis), () => issue(testScript))
}
}
| wavesplatform/Waves | node/src/test/scala/com/wavesplatform/state/diffs/AssetTransactionsDiffTest.scala | Scala | mit | 21,120 |
package com.sageserpent.plutonium
import java.lang.reflect.{InvocationTargetException, Method}
import com.sageserpent.plutonium.ItemExtensionApi.UniqueItemSpecification
import com.sageserpent.plutonium.Patch.MethodPieces
import scala.reflect.runtime.universe
import scala.reflect.runtime.universe._
import scalaz.{-\\/, \\/, \\/-}
object Patch {
type WrappedArgument =
\\/[AnyRef, UniqueItemSpecification]
def wrap(argument: AnyRef): WrappedArgument = argument match {
case argumentRecorder: Recorder =>
\\/-(argumentRecorder.uniqueItemSpecification)
case _ => -\\/(argument)
}
def apply(targetRecorder: Recorder,
method: Method,
arguments: Seq[AnyRef]) = {
val methodPieces = MethodPieces(method.getDeclaringClass,
method.getName,
method.getParameterTypes)
new Patch(methodPieces,
targetRecorder.uniqueItemSpecification,
arguments map wrap)
}
case class MethodPieces(declaringClassOfMethod: Class[_],
methodName: String,
methodParameterTypes: Seq[Class[_]]) {
def method =
declaringClassOfMethod.getMethod(methodName, methodParameterTypes: _*)
}
}
case class Patch(methodPieces: MethodPieces,
override val targetItemSpecification: UniqueItemSpecification,
wrappedArguments: Seq[Patch.WrappedArgument])
extends AbstractPatch {
import Patch._
override def toString: String =
s"Patch for: '$targetItemSpecification', method: '${method.getName}', arguments: '${wrappedArguments.toList}''"
override def rewriteItemTypeTags(
uniqueItemSpecificationToTypeTagMap: collection.Map[
UniqueItemSpecification,
TypeTag[_]]): AbstractPatch = {
val rewrittenTargetItemSpecification: UniqueItemSpecification =
UniqueItemSpecification(
targetItemSpecification.id,
uniqueItemSpecificationToTypeTagMap(targetItemSpecification))
val rewrittenArguments: Seq[WrappedArgument] = wrappedArguments map (_.map(
argumentUniqueItemSpecification =>
UniqueItemSpecification(argumentUniqueItemSpecification.id,
uniqueItemSpecificationToTypeTagMap(
argumentUniqueItemSpecification))))
new Patch(methodPieces,
rewrittenTargetItemSpecification,
rewrittenArguments)
}
@transient
override lazy val method = methodPieces.method
override val argumentItemSpecifications: Seq[UniqueItemSpecification] =
wrappedArguments collect {
case \\/-(uniqueItemSpecification) => uniqueItemSpecification
}
def unwrap(identifiedItemAccess: IdentifiedItemAccess)(
wrappedArgument: WrappedArgument) =
wrappedArgument.fold(
identity,
identifiedItemAccess.reconstitute(_).asInstanceOf[AnyRef])
def apply(identifiedItemAccess: IdentifiedItemAccess): Unit = {
val targetBeingPatched =
identifiedItemAccess.reconstitute(targetItemSpecification)
try {
method.invoke(targetBeingPatched,
wrappedArguments map unwrap(identifiedItemAccess): _*)
} catch {
case exception: InvocationTargetException =>
throw exception.getTargetException
}
}
def checkInvariants(identifiedItemAccess: IdentifiedItemAccess): Unit = {
identifiedItemAccess
.reconstitute(targetItemSpecification)
.asInstanceOf[ItemExtensionApi]
.checkInvariant()
for (argument <- argumentItemSpecifications map identifiedItemAccess.reconstitute) {
argument.asInstanceOf[ItemExtensionApi].checkInvariant()
}
}
}
| sageserpent-open/open-plutonium | src/main/scala/com/sageserpent/plutonium/Patch.scala | Scala | mit | 3,706 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.io.IOException
import java.util.{List => JList}
import javax.security.auth.login.LoginException
import scala.collection.JavaConverters._
import org.apache.commons.logging.Log
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.conf.HiveConf.ConfVars
import org.apache.hadoop.hive.shims.Utils
import org.apache.hadoop.security.{SecurityUtil, UserGroupInformation}
import org.apache.hive.service.{AbstractService, Service, ServiceException}
import org.apache.hive.service.Service.STATE
import org.apache.hive.service.auth.HiveAuthFactory
import org.apache.hive.service.cli._
import org.apache.hive.service.server.HiveServer2
import org.slf4j.Logger
import org.apache.spark.sql.SQLContext
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.thriftserver.ReflectionUtils._
private[hive] class SparkSQLCLIService(hiveServer: HiveServer2, sqlContext: SQLContext)
extends CLIService(hiveServer)
with ReflectedCompositeService {
override def init(hiveConf: HiveConf): Unit = {
setSuperField(this, "hiveConf", hiveConf)
val sparkSqlSessionManager = new SparkSQLSessionManager(hiveServer, sqlContext)
setSuperField(this, "sessionManager", sparkSqlSessionManager)
addService(sparkSqlSessionManager)
var sparkServiceUGI: UserGroupInformation = null
var httpUGI: UserGroupInformation = null
if (UserGroupInformation.isSecurityEnabled) {
try {
val principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL)
val keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB)
if (principal.isEmpty || keyTabFile.isEmpty) {
throw new IOException(
"HiveServer2 Kerberos principal or keytab is not correctly configured")
}
val originalUgi = UserGroupInformation.getCurrentUser
sparkServiceUGI = if (HiveAuthFactory.needUgiLogin(originalUgi,
SecurityUtil.getServerPrincipal(principal, "0.0.0.0"), keyTabFile)) {
HiveAuthFactory.loginFromKeytab(hiveConf)
Utils.getUGI()
} else {
originalUgi
}
setSuperField(this, "serviceUGI", sparkServiceUGI)
} catch {
case e @ (_: IOException | _: LoginException) =>
throw new ServiceException("Unable to login to kerberos with given principal/keytab", e)
}
// Try creating spnego UGI if it is configured.
val principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_PRINCIPAL).trim
val keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_KEYTAB).trim
if (principal.nonEmpty && keyTabFile.nonEmpty) {
try {
httpUGI = HiveAuthFactory.loginFromSpnegoKeytabAndReturnUGI(hiveConf)
setSuperField(this, "httpUGI", httpUGI)
} catch {
case e: IOException =>
throw new ServiceException("Unable to login to spnego with given principal " +
s"$principal and keytab $keyTabFile: $e", e)
}
}
}
initCompositeService(hiveConf)
}
override def getInfo(sessionHandle: SessionHandle, getInfoType: GetInfoType): GetInfoValue = {
getInfoType match {
case GetInfoType.CLI_SERVER_NAME => new GetInfoValue("Spark SQL")
case GetInfoType.CLI_DBMS_NAME => new GetInfoValue("Spark SQL")
case GetInfoType.CLI_DBMS_VER => new GetInfoValue(sqlContext.sparkContext.version)
case _ => super.getInfo(sessionHandle, getInfoType)
}
}
}
private[thriftserver] trait ReflectedCompositeService { this: AbstractService =>
def initCompositeService(hiveConf: HiveConf): Unit = {
// Emulating `CompositeService.init(hiveConf)`
val serviceList = getAncestorField[JList[Service]](this, 2, "serviceList")
serviceList.asScala.foreach(_.init(hiveConf))
// Emulating `AbstractService.init(hiveConf)`
invoke(classOf[AbstractService], this, "ensureCurrentState", classOf[STATE] -> STATE.NOTINITED)
setAncestorField(this, 3, "hiveConf", hiveConf)
invoke(classOf[AbstractService], this, "changeState", classOf[STATE] -> STATE.INITED)
if (HiveUtils.isHive23) {
getAncestorField[Logger](this, 3, "LOG").info(s"Service: $getName is inited.")
} else {
getAncestorField[Log](this, 3, "LOG").info(s"Service: $getName is inited.")
}
}
}
| goldmedal/spark | sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIService.scala | Scala | apache-2.0 | 5,150 |
package deburnat.transade.gui.north
import swing.{Publisher, Point, event}
import event._
import javax.swing.{JFileChooser, filechooser}
import filechooser.FileNameExtensionFilter
import deburnat.transade.gui.admins.GuiAdmin.{xml, _xml, view, tRead}
import java.io.File
import deburnat.transade.gui.components.MonoTextField
/**
* Project name: transade
* @author Patrick Meppe ([email protected])
* Description:
* An algorithm for the transfer of selected/adapted data
* from one repository to another.
*
* Date: 1/1/14
* Time: 12:00 AM
*/
/**
* This is an extender of the scala.swing.MonoTextField class.
* By additionally extending the scala.swing.Publisher trait it inherits the ability to be listened
* by other components such as the TabbedPane component below.
* @param templates see TemplatesComboBox.scala
*/
protected[gui] class TransFileChooser(templates: TemplatesComboBox)
extends MonoTextField with Publisher{
/**
* This method is used to open the file chooser.
* @param path The current path
* @param point The location where the file chooser should appear.
* @return The newly chosen path otherwise the path prior to the method invocation.
*/
private def openFileChooserAt(path: String, point: Point): String = {
val file = new File(path)
/* An extension of the javax.swing.JFileChooser was chosen over the scala.swing.FileChooser
* because it was the only way to obtain a file choose whose location can be changed at will.
* The original JFileChooser automatically sets the chooser component in the middle of its
* parent component.
*/
object MoveableFileChooser extends JFileChooser(if(file.exists) file else new File("")){
override protected def createDialog(parent: java.awt.Component) = {
val dialog = super.createDialog(parent)
dialog.setLocation(point) //dialog.setResizable(false)
dialog
}
setFileSelectionMode(JFileChooser.FILES_ONLY)
setFileFilter(new FileNameExtensionFilter(_xml+" "+ view.read("files"), xml)) //only .xml files can be chosen
}
if(MoveableFileChooser.showOpenDialog(null) == JFileChooser.APPROVE_OPTION){
//Set the TemplatesComboBox to its default value because the TransFileChooser is currently being used
templates.reset
val path = MoveableFileChooser.getSelectedFile.getAbsolutePath
publish(TransFileChosenEvent(path))
foreground = on
path
}else path
/* Here is the scala version of the FileChooser
* val fileChooser = new FileChooser(new File("./")){
* fileSelectionMode = FileChooser.SelectionMode.FilesOnly
* fileFilter = new FileNameExtensionFilter(".xml files", "xml")
* //peer.setLocation(point) //This statement doesn't work.
* }
* if(fileChooser.showOpenDialog(null) == FileChooser.Result.Approve)
* ...
*/
}
//the component itself
tooltip = tRead("filechooser")
val vText = view.read("textfilechooser")
text = vText //default settings
foreground = off
def empty = text.trim == vText && foreground == off
listenTo(keys, mouse.clicks, templates)
reactions += {
case e: MouseClicked =>
val mButton = e.peer.getButton
if(mButton == 1) text = openFileChooserAt(text, e.peer.getLocationOnScreen) //left click
else{ //right click (and wheel click)
if(empty) text = ""
requestFocus
}
foreground = on
case KeyPressed(_, Key.Enter,_,_) =>
if(text.endsWith(_xml) && new File(text).exists) publish(TransFileChosenEvent(text))
else text = openFileChooserAt(text, locationOnScreen) //src.bounds.getLocation
case e: TemplateSelectedEvent =>
text = vText
foreground = off
//empty the TransFileChooser because the TemplatesCombobox is currently being used
case e: FocusLost => if(text.trim.isEmpty){
text = vText
foreground = off
}
case e: FocusGained => if(empty){
text = ""
foreground = on
}
}
}
/**
* This event is used to pass the chosen file using the file chooser to the tabbed pane.
* @param xmlFilePath The chosen file xmlFilePath.
*/
protected[gui] case class TransFileChosenEvent(xmlFilePath: String) extends Event
| deburnatshazem/transade | gui/src/main/scala/deburnat/transade/gui/north/TransFileChooser.scala | Scala | apache-2.0 | 4,241 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.jdbc
import java.math.BigDecimal
import java.sql.{Connection, Date, Timestamp}
import java.util.Properties
import org.apache.spark.tags.DockerTest
@DockerTest
class MySQLIntegrationSuite extends DockerJDBCIntegrationSuite {
override val db = new DatabaseOnDocker {
override val imageName = "mysql:5.7.9"
override val env = Map(
"MYSQL_ROOT_PASSWORD" -> "rootpass"
)
override val jdbcPort: Int = 3306
override def getJdbcUrl(ip: String, port: Int): String =
s"jdbc:mysql://$ip:$port/mysql?user=root&password=rootpass"
}
override def dataPreparation(conn: Connection): Unit = {
conn.prepareStatement("CREATE DATABASE foo").executeUpdate()
conn.prepareStatement("CREATE TABLE tbl (x INTEGER, y TEXT(8))").executeUpdate()
conn.prepareStatement("INSERT INTO tbl VALUES (42,'fred')").executeUpdate()
conn.prepareStatement("INSERT INTO tbl VALUES (17,'dave')").executeUpdate()
conn.prepareStatement("CREATE TABLE numbers (onebit BIT(1), tenbits BIT(10), "
+ "small SMALLINT, med MEDIUMINT, nor INT, big BIGINT, deci DECIMAL(40,20), flt FLOAT, "
+ "dbl DOUBLE)").executeUpdate()
conn.prepareStatement("INSERT INTO numbers VALUES (b'0', b'1000100101', "
+ "17, 77777, 123456789, 123456789012345, 123456789012345.123456789012345, "
+ "42.75, 1.0000000000000002)").executeUpdate()
conn.prepareStatement("CREATE TABLE dates (d DATE, t TIME, dt DATETIME, ts TIMESTAMP, "
+ "yr YEAR)").executeUpdate()
conn.prepareStatement("INSERT INTO dates VALUES ('1991-11-09', '13:31:24', "
+ "'1996-01-01 01:23:45', '2009-02-13 23:31:30', '2001')").executeUpdate()
// TODO: Test locale conversion for strings.
conn.prepareStatement("CREATE TABLE strings (a CHAR(10), b VARCHAR(10), c TINYTEXT, "
+ "d TEXT, e MEDIUMTEXT, f LONGTEXT, g BINARY(4), h VARBINARY(10), i BLOB)"
).executeUpdate()
conn.prepareStatement("INSERT INTO strings VALUES ('the', 'quick', 'brown', 'fox', " +
"'jumps', 'over', 'the', 'lazy', 'dog')").executeUpdate()
}
test("Basic test") {
val df = sqlContext.read.jdbc(jdbcUrl, "tbl", new Properties)
val rows = df.collect()
assert(rows.length == 2)
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types.length == 2)
assert(types(0).equals("class java.lang.Integer"))
assert(types(1).equals("class java.lang.String"))
}
test("Numeric types") {
val df = sqlContext.read.jdbc(jdbcUrl, "numbers", new Properties)
val rows = df.collect()
assert(rows.length == 1)
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types.length == 9)
assert(types(0).equals("class java.lang.Boolean"))
assert(types(1).equals("class java.lang.Long"))
assert(types(2).equals("class java.lang.Integer"))
assert(types(3).equals("class java.lang.Integer"))
assert(types(4).equals("class java.lang.Integer"))
assert(types(5).equals("class java.lang.Long"))
assert(types(6).equals("class java.math.BigDecimal"))
assert(types(7).equals("class java.lang.Double"))
assert(types(8).equals("class java.lang.Double"))
assert(rows(0).getBoolean(0) == false)
assert(rows(0).getLong(1) == 0x225)
assert(rows(0).getInt(2) == 17)
assert(rows(0).getInt(3) == 77777)
assert(rows(0).getInt(4) == 123456789)
assert(rows(0).getLong(5) == 123456789012345L)
val bd = new BigDecimal("123456789012345.12345678901234500000")
assert(rows(0).getAs[BigDecimal](6).equals(bd))
assert(rows(0).getDouble(7) == 42.75)
assert(rows(0).getDouble(8) == 1.0000000000000002)
}
test("Date types") {
val df = sqlContext.read.jdbc(jdbcUrl, "dates", new Properties)
val rows = df.collect()
assert(rows.length == 1)
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types.length == 5)
assert(types(0).equals("class java.sql.Date"))
assert(types(1).equals("class java.sql.Timestamp"))
assert(types(2).equals("class java.sql.Timestamp"))
assert(types(3).equals("class java.sql.Timestamp"))
assert(types(4).equals("class java.sql.Date"))
assert(rows(0).getAs[Date](0).equals(Date.valueOf("1991-11-09")))
assert(rows(0).getAs[Timestamp](1).equals(Timestamp.valueOf("1970-01-01 13:31:24")))
assert(rows(0).getAs[Timestamp](2).equals(Timestamp.valueOf("1996-01-01 01:23:45")))
assert(rows(0).getAs[Timestamp](3).equals(Timestamp.valueOf("2009-02-13 23:31:30")))
assert(rows(0).getAs[Date](4).equals(Date.valueOf("2001-01-01")))
}
test("String types") {
val df = sqlContext.read.jdbc(jdbcUrl, "strings", new Properties)
val rows = df.collect()
assert(rows.length == 1)
val types = rows(0).toSeq.map(x => x.getClass.toString)
assert(types.length == 9)
assert(types(0).equals("class java.lang.String"))
assert(types(1).equals("class java.lang.String"))
assert(types(2).equals("class java.lang.String"))
assert(types(3).equals("class java.lang.String"))
assert(types(4).equals("class java.lang.String"))
assert(types(5).equals("class java.lang.String"))
assert(types(6).equals("class [B"))
assert(types(7).equals("class [B"))
assert(types(8).equals("class [B"))
assert(rows(0).getString(0).equals("the"))
assert(rows(0).getString(1).equals("quick"))
assert(rows(0).getString(2).equals("brown"))
assert(rows(0).getString(3).equals("fox"))
assert(rows(0).getString(4).equals("jumps"))
assert(rows(0).getString(5).equals("over"))
assert(java.util.Arrays.equals(rows(0).getAs[Array[Byte]](6), Array[Byte](116, 104, 101, 0)))
assert(java.util.Arrays.equals(rows(0).getAs[Array[Byte]](7), Array[Byte](108, 97, 122, 121)))
assert(java.util.Arrays.equals(rows(0).getAs[Array[Byte]](8), Array[Byte](100, 111, 103)))
}
test("Basic write test") {
val df1 = sqlContext.read.jdbc(jdbcUrl, "numbers", new Properties)
val df2 = sqlContext.read.jdbc(jdbcUrl, "dates", new Properties)
val df3 = sqlContext.read.jdbc(jdbcUrl, "strings", new Properties)
df1.write.jdbc(jdbcUrl, "numberscopy", new Properties)
df2.write.jdbc(jdbcUrl, "datescopy", new Properties)
df3.write.jdbc(jdbcUrl, "stringscopy", new Properties)
}
}
| chenc10/Spark-PAF | docker-integration-tests/src/test/scala/org/apache/spark/sql/jdbc/MySQLIntegrationSuite.scala | Scala | apache-2.0 | 7,057 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.integration
import kafka.consumer.SimpleConsumer
import org.scalatest.junit.JUnit3Suite
import java.util.Properties
import kafka.producer.{SyncProducerConfig, SyncProducer}
trait ProducerConsumerTestHarness extends JUnit3Suite {
val port: Int
val host = "localhost"
var producer: SyncProducer = null
var consumer: SimpleConsumer = null
override def setUp() {
val props = new Properties()
props.put("host", host)
props.put("port", port.toString)
props.put("buffer.size", "65536")
props.put("connect.timeout.ms", "100000")
props.put("reconnect.interval", "10000")
producer = new SyncProducer(new SyncProducerConfig(props))
consumer = new SimpleConsumer(host,
port,
1000000,
64*1024)
super.setUp
}
override def tearDown() {
super.tearDown
producer.close()
consumer.close()
}
}
| tcrayford/hafka | kafka/core/src/test/scala/unit/kafka/integration/ProducerConsumerTestHarness.scala | Scala | bsd-3-clause | 1,593 |
package io.fintrospect.parameters
import com.twitter.finagle.http._
import com.twitter.finagle.http.exp.{Multipart, MultipartDecoder}
import io.fintrospect.ContentTypes.MULTIPART_FORM
import io.fintrospect.util.{Extraction, ExtractionError, ExtractionFailed, Extractor}
import scala.collection.MapView
import scala.util.{Failure, Success, Try}
case class MultiPartFormBody(formContents: Seq[FormField[_] with Extractor[Form, _]],
validator: FormValidator, extractor: FormFieldExtractor)
extends Body[Form] {
override val contentType = MULTIPART_FORM
override def iterator = formContents.iterator
override def -->(value: Form): Seq[RequestBinding] =
Seq(new RequestBinding(null, req => {
val fields = value.fields.flatMap(f => f._2.map(g => SimpleElement(f._1, g))).toSeq
val files = value.files.flatMap(f => f._2.map(_.toFileElement(f._1))).toSeq
val next = RequestBuilder()
.url("http://notreallyaserver")
.addHeaders(Map(req.headerMap.toSeq: _*))
.add(fields ++ files)
.buildFormPost(multipart = true)
next.uri = req.uri
next
}))
override def <--?(message: Message): Extraction[Form] = message match {
case r: Request =>
Try {
val multipart = MultipartDecoder.decode(r).get
validator(formContents, Form(multipart.attributes, filterOutFilesWithNoFilename(multipart)))
} match {
case Success(form) => extractor(formContents, form)
case Failure(_) => ExtractionFailed(formContents.filter(_.required).map(param => ExtractionError(param, "Could not parse")))
}
case _ => ExtractionFailed(formContents.map(f => ExtractionError(f, "Could not parse")))
}
private def filterOutFilesWithNoFilename(multipart: Multipart): Map[String, Seq[MultiPartFile]] = multipart.files
.view
.mapValues(_.filterNot(_.fileName.isEmpty)
.map(MultiPartFile(_)))
.filterNot(_._2.isEmpty).toMap
} | daviddenton/fintrospect | core/src/main/scala/io/fintrospect/parameters/MultiPartFormBody.scala | Scala | apache-2.0 | 1,961 |
package pimpathon.java.util
import java.util.{Calendar, Date}
import pimpathon.PSpec
import pimpathon.any._
import pimpathon.java.util.date._
class DateSpec extends PSpec {
"addDay" in
on(-1, 1, 7).calling(date(2015, 3, 24).addDay).produces(date(2015, 3, 23), date(2015, 3, 25), date(2015, 4, 1))
private def date(year: Int, month: Int, day: Int): Date =
Calendar.getInstance().tap(_.set(year, month, day, 0, 0, 0), _.set(Calendar.MILLISECOND, 0)).getTime
} | stacycurl/pimpathon | src/test/scala/pimpathon/java/util/DateTest.scala | Scala | apache-2.0 | 473 |
import sbt._
import Keys._
import scala.annotation.tailrec
import bintray.Plugin.bintrayPublishSettings
import bintray.Keys.{repository, bintrayOrganization, bintray}
import com.typesafe.tools.mima.plugin.MimaPlugin.mimaDefaultSettings
import com.typesafe.tools.mima.plugin.MimaKeys.{previousArtifact, binaryIssueFilters}
import java.io.{
BufferedOutputStream,
FileOutputStream,
BufferedWriter,
FileWriter
}
import scala.collection.mutable
import scala.util.Properties
import org.scalajs.core.ir
import org.scalajs.core.ir.Utils.escapeJS
import org.scalajs.sbtplugin._
import org.scalajs.jsenv.{JSEnv, RetryingComJSEnv}
import org.scalajs.jsenv.rhino.RhinoJSEnv
import org.scalajs.jsenv.nodejs.NodeJSEnv
import org.scalajs.jsenv.phantomjs.PhantomJSEnv
import ScalaJSPlugin.autoImport._
import ExternalCompile.scalaJSExternalCompileSettings
import Implicits._
import org.scalajs.core.tools.sourcemap._
import org.scalajs.core.tools.io.MemVirtualJSFile
import org.scalajs.core.tools.sem.CheckedBehavior
import sbtassembly.AssemblyPlugin.autoImport._
object Build extends sbt.Build {
val isGeneratingEclipse =
Properties.envOrElse("GENERATING_ECLIPSE", "false").toBoolean
val fetchScalaSource = taskKey[File](
"Fetches the scala source for the current scala version")
val shouldPartest = settingKey[Boolean](
"Whether we should partest the current scala version (and fail if we can't)")
val previousVersion = "0.6.5"
val previousSJSBinaryVersion =
ScalaJSCrossVersion.binaryScalaJSVersion(previousVersion)
val previousBinaryCrossVersion =
CrossVersion.binaryMapped(v => s"sjs${previousSJSBinaryVersion}_$v")
val scalaVersionsUsedForPublishing: Set[String] =
Set("2.10.6", "2.11.7", "2.12.0-M3")
val newScalaBinaryVersionsInThisRelease: Set[String] =
Set()
val javaVersion = settingKey[Int](
"The major Java SDK version that should be assumed for compatibility. " +
"Defaults to what sbt is running with.")
val javaDocBaseURL: String = "http://docs.oracle.com/javase/8/docs/api/"
val previousArtifactSetting: Setting[_] = {
previousArtifact := {
val scalaV = scalaVersion.value
val scalaBinaryV = scalaBinaryVersion.value
if (!scalaVersionsUsedForPublishing.contains(scalaV)) {
// This artifact will not be published. Binary compatibility is irrelevant.
None
} else if (newScalaBinaryVersionsInThisRelease.contains(scalaBinaryV)) {
// New in this release, no binary compatibility to comply to
None
} else if (scalaBinaryV == "2.12.0-M3") {
// See #1865: MiMa is much too noisy with 2.12.0-M3 to be useful
None
} else {
val thisProjectID = projectID.value
val previousCrossVersion = thisProjectID.crossVersion match {
case ScalaJSCrossVersion.binary => previousBinaryCrossVersion
case crossVersion => crossVersion
}
val prevProjectID =
(thisProjectID.organization % thisProjectID.name % previousVersion)
.cross(previousCrossVersion)
.extra(thisProjectID.extraAttributes.toSeq: _*)
Some(CrossVersion(scalaV, scalaBinaryV)(prevProjectID).cross(CrossVersion.Disabled))
}
}
}
val commonSettings = Seq(
scalaVersion := "2.11.7",
organization := "org.scala-js",
version := scalaJSVersion,
normalizedName ~= {
_.replace("scala.js", "scalajs").replace("scala-js", "scalajs")
},
homepage := Some(url("http://scala-js.org/")),
licenses += ("BSD New",
url("https://github.com/scala-js/scala-js/blob/master/LICENSE")),
scmInfo := Some(ScmInfo(
url("https://github.com/scala-js/scala-js"),
"scm:git:[email protected]:scala-js/scala-js.git",
Some("scm:git:[email protected]:scala-js/scala-js.git"))),
shouldPartest := {
val testListDir = (
(resourceDirectory in (partestSuite, Test)).value / "scala"
/ "tools" / "partest" / "scalajs" / scalaVersion.value
)
testListDir.exists
},
scalacOptions ++= Seq(
"-deprecation",
"-unchecked",
"-feature",
"-encoding", "utf8"
),
// Scaladoc linking
apiURL := {
val name = normalizedName.value
Some(url(s"http://www.scala-js.org/api/$name/$scalaJSVersion/"))
},
autoAPIMappings := true,
// Add Java Scaladoc mapping
apiMappings += {
val rtJar = {
System.getProperty("sun.boot.class.path")
.split(java.io.File.pathSeparator)
.find(_.endsWith(java.io.File.separator + "rt.jar")).get
}
file(rtJar) -> url(javaDocBaseURL)
},
/* Patch the ScalaDoc we generate.
*
* After executing the normal doc command, copy everything to the
* `patched-api` directory (same internal directory structure) while
* patching the following:
*
* - Append `additional-doc-styles.css` to `lib/template.css`
* - Fix external links to the JavaDoc, i.e. change
* `${javaDocBaseURL}index.html#java.lang.String` to
* `${javaDocBaseURL}index.html?java/lang/String.html`
*/
doc in Compile := {
// Where to store the patched docs
val outDir = crossTarget.value / "patched-api"
// Find all files in the current docs
val docPaths = {
val docDir = (doc in Compile).value
Path.selectSubpaths(docDir, new SimpleFileFilter(_.isFile)).toMap
}
/* File with our CSS styles (needs to be canonical so that the
* comparison below works)
*/
val additionalStylesFile =
(root.base / "assets/additional-doc-styles.css").getCanonicalFile
// Regex and replacement function for JavaDoc linking
val javadocAPIRe =
s"""\\"(\\\\Q${javaDocBaseURL}index.html\\\\E)#([^"]*)\\"""".r
val logger = streams.value.log
val errorsSeen = mutable.Set.empty[String]
val fixJavaDocLink = { (m: scala.util.matching.Regex.Match) =>
val frag = m.group(2)
// Fail when encountering links to class members
if (frag.contains("@") && !errorsSeen.contains(frag)) {
errorsSeen += frag
logger.error(s"Cannot fix JavaDoc link to member: $frag")
}
m.group(1) + "?" + frag.replace('.', '/') + ".html"
}
FileFunction.cached(streams.value.cacheDirectory,
FilesInfo.lastModified, FilesInfo.exists) { files =>
for {
file <- files
if file != additionalStylesFile
} yield {
val relPath = docPaths(file)
val outFile = outDir / relPath
if (relPath == "lib/template.css") {
val styles = IO.read(additionalStylesFile)
IO.copyFile(file, outFile)
IO.append(outFile, styles)
} else if (relPath.endsWith(".html")) {
val content = IO.read(file)
val patched = javadocAPIRe.replaceAllIn(content, fixJavaDocLink)
IO.write(outFile, patched)
} else {
IO.copyFile(file, outFile)
}
outFile
}
} (docPaths.keySet + additionalStylesFile)
if (errorsSeen.size > 0) sys.error("ScalaDoc patching had errors")
else outDir
}
) ++ mimaDefaultSettings
val noClassFilesSettings: Setting[_] = (
scalacOptions in (Compile, compile) ++= {
if (isGeneratingEclipse) Seq()
else Seq("-Yskip:cleanup,icode,jvm")
}
)
val publishSettings = Seq(
publishMavenStyle := true,
publishTo := {
val nexus = "https://oss.sonatype.org/"
if (isSnapshot.value)
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
},
pomExtra := (
<developers>
<developer>
<id>sjrd</id>
<name>Sébastien Doeraene</name>
<url>https://github.com/sjrd/</url>
</developer>
<developer>
<id>gzm0</id>
<name>Tobias Schlatter</name>
<url>https://github.com/gzm0/</url>
</developer>
<developer>
<id>nicolasstucki</id>
<name>Nicolas Stucki</name>
<url>https://github.com/nicolasstucki/</url>
</developer>
</developers>
),
pomIncludeRepository := { _ => false }
)
val fatalWarningsSettings = Seq(
// The pattern matcher used to exceed its analysis budget before 2.11.5
scalacOptions ++= {
scalaVersion.value.split('.') match {
case Array("2", "10", _) => Nil
case Array("2", "11", x)
if x.takeWhile(_.isDigit).toInt <= 4 => Nil
case _ => Seq("-Xfatal-warnings")
}
},
scalacOptions in (Compile, doc) := {
val baseOptions = (scalacOptions in (Compile, doc)).value
/* - need JDK7 to link the doc to java.nio.charset.StandardCharsets
* - in Scala 2.10, some ScalaDoc links fail
*/
val fatalInDoc =
javaVersion.value >= 7 && scalaBinaryVersion.value != "2.10"
if (fatalInDoc) baseOptions
else baseOptions.filterNot(_ == "-Xfatal-warnings")
}
)
private def publishToScalaJSRepoSettings = Seq(
publishTo := {
Seq("PUBLISH_USER", "PUBLISH_PASS").map(Properties.envOrNone) match {
case Seq(Some(user), Some(pass)) =>
val snapshotsOrReleases =
if (scalaJSIsSnapshotVersion) "snapshots" else "releases"
Some(Resolver.sftp(
s"scala-js-$snapshotsOrReleases",
"repo.scala-js.org",
s"/home/scalajsrepo/www/repo/$snapshotsOrReleases")(
Resolver.ivyStylePatterns) as (user, pass))
case _ =>
None
}
}
)
private def publishToBintraySettings = (
bintrayPublishSettings
) ++ Seq(
repository in bintray := "scala-js-releases",
bintrayOrganization in bintray := Some("scala-js")
)
val publishIvySettings = (
if (Properties.envOrNone("PUBLISH_TO_BINTRAY") == Some("true"))
publishToBintraySettings
else
publishToScalaJSRepoSettings
) ++ Seq(
publishMavenStyle := false
)
val myScalaJSSettings = ScalaJSPluginInternal.scalaJSAbstractSettings ++ Seq(
autoCompilerPlugins := true,
scalaJSOptimizerOptions ~= (_.withCheckScalaJSIR(true)),
testFrameworks +=
TestFramework("org.scalajs.jasminetest.JasmineFramework"),
// Link source maps
scalacOptions ++= {
if (isGeneratingEclipse) Seq()
else if (scalaJSIsSnapshotVersion) Seq()
else Seq(
// Link source maps to github sources
"-P:scalajs:mapSourceURI:" + root.base.toURI +
"->https://raw.githubusercontent.com/scala-js/scala-js/v" +
scalaJSVersion + "/"
)
}
)
implicit class ProjectOps(val project: Project) extends AnyVal {
/** Uses the Scala.js compiler plugin. */
def withScalaJSCompiler: Project =
if (isGeneratingEclipse) project
else project.dependsOn(compiler % "plugin")
/** Depends on library as if (exportJars in library) was set to false. */
def dependsOnLibraryNoJar: Project = {
if (isGeneratingEclipse) {
project.dependsOn(library)
} else {
project.settings(
internalDependencyClasspath in Compile ++= {
val prods = (products in (library, Compile)).value
val analysis = (compile in (library, Compile)).value
prods.map(p => Classpaths.analyzed(p, analysis))
}
)
}
}
/** Depends on the sources of another project. */
def dependsOnSource(dependency: Project): Project = {
if (isGeneratingEclipse) {
project.dependsOn(dependency)
} else {
project.settings(
unmanagedSourceDirectories in Compile +=
(scalaSource in (dependency, Compile)).value
)
}
}
}
override lazy val settings = super.settings ++ Seq(
// Most of the projects cross-compile
crossScalaVersions := Seq(
"2.10.2",
"2.10.3",
"2.10.4",
"2.10.5",
"2.10.6",
"2.11.0",
"2.11.1",
"2.11.2",
"2.11.4",
"2.11.5",
"2.11.6",
"2.11.7",
"2.12.0-M3"
),
// Default stage
scalaJSStage in Global := PreLinkStage,
// JDK version we are running with
javaVersion in Global := {
val v = System.getProperty("java.version")
v.substring(0, 3) match {
case "1.8" => 8
case "1.7" => 7
case "1.6" => 6
case _ =>
sLog.value.warn(s"Unknown JDK version $v. Assuming max compat.")
Int.MaxValue
}
}
)
lazy val root: Project = Project(
id = "scalajs",
base = file("."),
settings = commonSettings ++ Seq(
name := "Scala.js",
publishArtifact in Compile := false,
clean := clean.dependsOn(
clean in compiler,
clean in irProject, clean in irProjectJS,
clean in tools, clean in toolsJS, clean in jsEnvs,
clean in testAdapter, clean in plugin,
clean in javalanglib, clean in javalib, clean in scalalib,
clean in libraryAux, clean in library, clean in javalibEx,
clean in stubs, clean in cli,
clean in testInterface, clean in jasmineTestFramework,
clean in jUnitRuntime, clean in jUnitPlugin,
clean in examples, clean in helloworld,
clean in reversi, clean in testingExample,
clean in testSuite, clean in testSuiteJVM, clean in noIrCheckTest,
clean in javalibExTestSuite,
clean in partest, clean in partestSuite).value,
publish := {},
publishLocal := {}
)
)
val commonIrProjectSettings = (
commonSettings ++ publishSettings ++ fatalWarningsSettings
) ++ Seq(
name := "Scala.js IR",
/* Scala.js 0.6.6 will break binary compatibility of the IR
*/
// previousArtifactSetting,
binaryIssueFilters ++= BinaryIncompatibilities.IR,
exportJars := true // required so ScalaDoc linking works
)
lazy val irProject: Project = Project(
id = "ir",
base = file("ir"),
settings = commonIrProjectSettings
)
lazy val irProjectJS: Project = Project(
id = "irJS",
base = file("ir/.js"),
settings = commonIrProjectSettings ++ myScalaJSSettings ++ Seq(
crossVersion := ScalaJSCrossVersion.binary,
unmanagedSourceDirectories in Compile +=
(scalaSource in Compile in irProject).value
)
).withScalaJSCompiler.dependsOn(javalibEx)
lazy val compiler: Project = Project(
id = "compiler",
base = file("compiler"),
settings = commonSettings ++ publishSettings ++ Seq(
name := "Scala.js compiler",
crossVersion := CrossVersion.full, // because compiler api is not binary compatible
libraryDependencies ++= Seq(
"org.scala-lang" % "scala-compiler" % scalaVersion.value,
"org.scala-lang" % "scala-reflect" % scalaVersion.value,
"com.novocode" % "junit-interface" % "0.9" % "test"
),
testOptions += Tests.Setup { () =>
val testOutDir = (streams.value.cacheDirectory / "scalajs-compiler-test")
IO.createDirectory(testOutDir)
sys.props("scala.scalajs.compiler.test.output") =
testOutDir.getAbsolutePath
sys.props("scala.scalajs.compiler.test.scalajslib") =
(packageBin in (library, Compile)).value.getAbsolutePath
sys.props("scala.scalajs.compiler.test.scalalib") = {
def isScalaLib(att: Attributed[File]) = {
att.metadata.get(moduleID.key).exists { mId =>
mId.organization == "org.scala-lang" &&
mId.name == "scala-library" &&
mId.revision == scalaVersion.value
}
}
val lib = (managedClasspath in Test).value.find(isScalaLib)
lib.map(_.data.getAbsolutePath).getOrElse {
streams.value.log.error("Couldn't find Scala library on the classpath. CP: " + (managedClasspath in Test).value); ""
}
}
},
exportJars := true
)
).dependsOnSource(irProject)
val commonToolsSettings = (
commonSettings ++ publishSettings ++ fatalWarningsSettings
) ++ Seq(
name := "Scala.js tools",
unmanagedSourceDirectories in Compile +=
baseDirectory.value.getParentFile / "shared/src/main/scala",
sourceGenerators in Compile <+= Def.task {
ScalaJSEnvGenerator.generateEnvHolder(
baseDirectory.value.getParentFile,
(sourceManaged in Compile).value)
},
/* Scala.js 0.6.6 will break binary compatibility of the tools
*/
// previousArtifactSetting,
binaryIssueFilters ++= BinaryIncompatibilities.Tools
)
lazy val tools: Project = Project(
id = "tools",
base = file("tools/jvm"),
settings = commonToolsSettings ++ Seq(
libraryDependencies ++= Seq(
"com.google.javascript" % "closure-compiler" % "v20130603",
"com.googlecode.json-simple" % "json-simple" % "1.1.1",
"com.novocode" % "junit-interface" % "0.9" % "test"
)
)
).dependsOn(irProject)
lazy val toolsJS: Project = Project(
id = "toolsJS",
base = file("tools/js"),
settings = myScalaJSSettings ++ commonToolsSettings ++ Seq(
crossVersion := ScalaJSCrossVersion.binary
) ++ inConfig(Test) {
// Redefine test to run Node.js and link HelloWorld
test := {
if (scalaJSStage.value == Stage.PreLink)
error("Can't run toolsJS/test in preLink stage")
val cp = {
for (e <- (fullClasspath in Test).value)
yield s""""${escapeJS(e.data.getAbsolutePath)}""""
}
val code = {
s"""
var lib = scalajs.QuickLinker().linkTestSuiteNode(${cp.mkString(", ")});
var __ScalaJSEnv = null;
eval("(function() { 'use strict'; " +
lib + ";" +
"scalajs.TestRunner().runTests();" +
"}).call(this);");
"""
}
val launcher = new MemVirtualJSFile("Generated launcher file")
.withContent(code)
val runner = jsEnv.value.jsRunner(scalaJSExecClasspath.value,
launcher, streams.value.log, scalaJSConsole.value)
runner.run()
}
}
).withScalaJSCompiler.dependsOn(javalibEx, testSuite % "test->test", irProjectJS)
lazy val jsEnvs: Project = Project(
id = "jsEnvs",
base = file("js-envs"),
settings = (
commonSettings ++ publishSettings ++ fatalWarningsSettings
) ++ Seq(
name := "Scala.js JS Envs",
libraryDependencies ++= Seq(
"io.apigee" % "rhino" % "1.7R5pre4",
"org.webjars" % "envjs" % "1.2",
"com.novocode" % "junit-interface" % "0.9" % "test"
) ++ ScalaJSPluginInternal.phantomJSJettyModules.map(_ % "provided"),
previousArtifactSetting,
binaryIssueFilters ++= BinaryIncompatibilities.JSEnvs
)
).dependsOn(tools)
lazy val testAdapter = Project(
id = "testAdapter",
base = file("test-adapter"),
settings = (
commonSettings ++ publishSettings ++ fatalWarningsSettings
) ++ Seq(
name := "Scala.js sbt test adapter",
libraryDependencies += "org.scala-sbt" % "test-interface" % "1.0",
previousArtifactSetting,
binaryIssueFilters ++= BinaryIncompatibilities.TestAdapter
)
).dependsOn(jsEnvs)
lazy val plugin: Project = Project(
id = "sbtPlugin",
base = file("sbt-plugin"),
settings = (
commonSettings ++ publishIvySettings ++ fatalWarningsSettings
) ++ Seq(
name := "Scala.js sbt plugin",
normalizedName := "sbt-scalajs",
name in bintray := "sbt-scalajs-plugin", // "sbt-scalajs" was taken
sbtPlugin := true,
scalaBinaryVersion :=
CrossVersion.binaryScalaVersion(scalaVersion.value),
previousArtifactSetting,
binaryIssueFilters ++= BinaryIncompatibilities.SbtPlugin,
// Add API mappings for sbt (seems they don't export their API URL)
apiMappings ++= {
val deps = (externalDependencyClasspath in Compile).value
val sbtJars = deps filter { attributed =>
val p = attributed.data.getPath
p.contains("/org.scala-sbt/") && p.endsWith(".jar")
}
val docUrl =
url(s"http://www.scala-sbt.org/${sbtVersion.value}/api/")
sbtJars.map(_.data -> docUrl).toMap
}
)
).dependsOn(tools, jsEnvs, testAdapter)
lazy val delambdafySetting = {
scalacOptions ++= (
if (isGeneratingEclipse) Seq()
else if (scalaBinaryVersion.value == "2.10") Seq()
else Seq("-Ydelambdafy:method"))
}
private def serializeHardcodedIR(base: File,
infoAndTree: (ir.Infos.ClassInfo, ir.Trees.ClassDef)): File = {
// We assume that there are no weird characters in the full name
val fullName = ir.Definitions.decodeClassName(infoAndTree._1.encodedName)
val output = base / (fullName.replace('.', '/') + ".sjsir")
if (!output.exists()) {
IO.createDirectory(output.getParentFile)
val stream = new BufferedOutputStream(new FileOutputStream(output))
try {
ir.InfoSerializers.serialize(stream, infoAndTree._1)
ir.Serializers.serialize(stream, infoAndTree._2)
} finally {
stream.close()
}
}
output
}
lazy val javalanglib: Project = Project(
id = "javalanglib",
base = file("javalanglib"),
settings = (
commonSettings ++ myScalaJSSettings ++ fatalWarningsSettings
) ++ Seq(
name := "java.lang library for Scala.js",
publishArtifact in Compile := false,
delambdafySetting,
noClassFilesSettings,
resourceGenerators in Compile <+= Def.task {
val base = (resourceManaged in Compile).value
Seq(
serializeHardcodedIR(base, JavaLangObject.InfoAndTree),
serializeHardcodedIR(base, JavaLangString.InfoAndTree)
)
}
) ++ (
scalaJSExternalCompileSettings
)
).withScalaJSCompiler.dependsOnLibraryNoJar
lazy val javalib: Project = Project(
id = "javalib",
base = file("javalib"),
settings = (
commonSettings ++ myScalaJSSettings ++ fatalWarningsSettings
) ++ Seq(
name := "Java library for Scala.js",
publishArtifact in Compile := false,
delambdafySetting,
noClassFilesSettings
) ++ (
scalaJSExternalCompileSettings
)
).withScalaJSCompiler.dependsOnLibraryNoJar
lazy val scalalib: Project = Project(
id = "scalalib",
base = file("scalalib"),
settings = commonSettings ++ myScalaJSSettings ++ Seq(
name := "Scala library for Scala.js",
publishArtifact in Compile := false,
delambdafySetting,
noClassFilesSettings,
// The Scala lib is full of warnings we don't want to see
scalacOptions ~= (_.filterNot(
Set("-deprecation", "-unchecked", "-feature") contains _)),
scalacOptions ++= List(
// Tell plugin to hack fix bad classOf trees
"-P:scalajs:fixClassOf",
// Link source maps to github sources of original Scalalib
"-P:scalajs:mapSourceURI:" +
(artifactPath in fetchScalaSource).value.toURI +
"->https://raw.githubusercontent.com/scala/scala/v" +
scalaVersion.value + "/src/library/"
),
artifactPath in fetchScalaSource :=
target.value / "scalaSources" / scalaVersion.value,
fetchScalaSource := {
val s = streams.value
val cacheDir = s.cacheDirectory
val ver = scalaVersion.value
val trgDir = (artifactPath in fetchScalaSource).value
val report = updateClassifiers.value
val scalaLibSourcesJar = report.select(
configuration = Set("compile"),
module = moduleFilter(name = "scala-library"),
artifact = artifactFilter(`type` = "src")).headOption.getOrElse {
sys.error(s"Could not fetch scala-library sources for version $ver")
}
FileFunction.cached(cacheDir / s"fetchScalaSource-$ver",
FilesInfo.lastModified, FilesInfo.exists) { dependencies =>
s.log.info(s"Unpacking Scala library sources to $trgDir...")
if (trgDir.exists)
IO.delete(trgDir)
IO.createDirectory(trgDir)
IO.unzip(scalaLibSourcesJar, trgDir)
} (Set(scalaLibSourcesJar))
trgDir
},
unmanagedSourceDirectories in Compile := {
// Calculates all prefixes of the current Scala version
// (including the empty prefix) to construct override
// directories like the following:
// - override-2.10.2-RC1
// - override-2.10.2
// - override-2.10
// - override-2
// - override
val ver = scalaVersion.value
val base = baseDirectory.value
val parts = ver.split(Array('.','-'))
val verList = parts.inits.map { ps =>
val len = ps.mkString(".").length
// re-read version, since we lost '.' and '-'
ver.substring(0, len)
}
def dirStr(v: String) =
if (v.isEmpty) "overrides" else s"overrides-$v"
val dirs = verList.map(base / dirStr(_)).filter(_.exists)
dirs.toSeq // most specific shadow less specific
},
// Compute sources
// Files in earlier src dirs shadow files in later dirs
sources in Compile := {
// Sources coming from the sources of Scala
val scalaSrcDir = fetchScalaSource.value
// All source directories (overrides shadow scalaSrcDir)
val sourceDirectories =
(unmanagedSourceDirectories in Compile).value :+ scalaSrcDir
// Filter sources with overrides
def normPath(f: File): String =
f.getPath.replace(java.io.File.separator, "/")
val sources = mutable.ListBuffer.empty[File]
val paths = mutable.Set.empty[String]
for {
srcDir <- sourceDirectories
normSrcDir = normPath(srcDir)
src <- (srcDir ** "*.scala").get
} {
val normSrc = normPath(src)
val path = normSrc.substring(normSrcDir.length)
val useless =
path.contains("/scala/collection/parallel/") ||
path.contains("/scala/util/parsing/")
if (!useless) {
if (paths.add(path))
sources += src
else
streams.value.log.debug(s"not including $src")
}
}
sources.result()
},
// Continuation plugin (when using 2.10.x)
autoCompilerPlugins := true,
libraryDependencies ++= {
val ver = scalaVersion.value
if (ver.startsWith("2.10."))
Seq(compilerPlugin("org.scala-lang.plugins" % "continuations" % ver))
else
Nil
},
scalacOptions ++= {
if (scalaVersion.value.startsWith("2.10."))
Seq("-P:continuations:enable")
else
Nil
}
) ++ (
scalaJSExternalCompileSettings
)
).withScalaJSCompiler.dependsOnLibraryNoJar
lazy val libraryAux: Project = Project(
id = "libraryAux",
base = file("library-aux"),
settings = (
commonSettings ++ myScalaJSSettings ++ fatalWarningsSettings
) ++ Seq(
name := "Scala.js aux library",
publishArtifact in Compile := false,
delambdafySetting,
noClassFilesSettings
) ++ (
scalaJSExternalCompileSettings
)
).withScalaJSCompiler.dependsOnLibraryNoJar
lazy val library: Project = Project(
id = "library",
base = file("library"),
settings = (
commonSettings ++ publishSettings ++ myScalaJSSettings ++ fatalWarningsSettings
) ++ Seq(
name := "Scala.js library",
delambdafySetting,
scalacOptions in (Compile, doc) ++= Seq("-implicits", "-groups"),
exportJars := !isGeneratingEclipse,
previousArtifactSetting,
binaryIssueFilters ++= BinaryIncompatibilities.Library,
libraryDependencies +=
"org.scala-lang" % "scala-reflect" % scalaVersion.value % "provided"
) ++ (
scalaJSExternalCompileSettings
) ++ inConfig(Compile)(Seq(
/* Add the .sjsir files from other lib projects
* (but not .class files)
*/
mappings in packageBin := {
/* From library, we must take everyting, except the
* java.nio.TypedArrayBufferBridge object, whose actual
* implementation is in javalib.
*/
val superMappings = (mappings in packageBin).value
val libraryMappings = superMappings.filter(
_._2.replace('\\\\', '/') !=
"scala/scalajs/js/typedarray/TypedArrayBufferBridge$.sjsir")
val filter = ("*.sjsir": NameFilter)
val javalibProducts = (products in javalib).value
val javalibMappings =
javalibProducts.flatMap(base => Path.selectSubpaths(base, filter))
val javalibFilteredMappings = javalibMappings.filter(
_._2.replace('\\\\', '/') != "java/lang/MathJDK8Bridge$.sjsir")
val otherProducts = (
(products in javalanglib).value ++
(products in scalalib).value ++
(products in libraryAux).value)
val otherMappings =
otherProducts.flatMap(base => Path.selectSubpaths(base, filter))
libraryMappings ++ otherMappings ++ javalibFilteredMappings
}
))
).withScalaJSCompiler
lazy val javalibEx: Project = Project(
id = "javalibEx",
base = file("javalib-ex"),
settings = (
commonSettings ++ publishSettings ++ myScalaJSSettings ++ fatalWarningsSettings
) ++ Seq(
name := "Scala.js JavaLib Ex",
delambdafySetting,
noClassFilesSettings,
exportJars := true,
jsDependencies +=
"org.webjars" % "jszip" % "2.4.0" / "jszip.min.js" commonJSName "JSZip"
) ++ (
scalaJSExternalCompileSettings
)
).withScalaJSCompiler.dependsOn(library)
lazy val stubs: Project = Project(
id = "stubs",
base = file("stubs"),
settings = commonSettings ++ publishSettings ++ Seq(
name := "Scala.js Stubs",
libraryDependencies += "org.scala-lang" % "scala-reflect" % scalaVersion.value,
previousArtifactSetting
)
)
// Scala.js command line interface
lazy val cli: Project = Project(
id = "cli",
base = file("cli"),
settings = (
commonSettings ++ publishSettings ++ fatalWarningsSettings
) ++ Seq(
name := "Scala.js CLI",
libraryDependencies ++= Seq(
"com.github.scopt" %% "scopt" % "3.2.0"
),
previousArtifactSetting,
binaryIssueFilters ++= BinaryIncompatibilities.CLI,
// assembly options
mainClass in assembly := None, // don't want an executable JAR
assemblyOption in assembly ~= { _.copy(includeScala = false) },
assemblyJarName in assembly :=
s"${normalizedName.value}-assembly_${scalaBinaryVersion.value}-${version.value}.jar"
)
).dependsOn(tools)
// Test framework
lazy val testInterface = Project(
id = "testInterface",
base = file("test-interface"),
settings = (
commonSettings ++ publishSettings ++ myScalaJSSettings ++ fatalWarningsSettings
) ++ Seq(
name := "Scala.js test interface",
delambdafySetting,
previousArtifactSetting,
binaryIssueFilters ++= BinaryIncompatibilities.TestInterface
)
).withScalaJSCompiler.dependsOn(library)
lazy val jasmineTestFramework = Project(
id = "jasmineTestFramework",
base = file("jasmine-test-framework"),
settings = (
commonSettings ++ myScalaJSSettings ++ fatalWarningsSettings
) ++ Seq(
name := "Scala.js jasmine test framework",
jsDependencies ++= Seq(
ProvidedJS / "jasmine-polyfills.js",
"org.webjars" % "jasmine" % "1.3.1" /
"jasmine.js" dependsOn "jasmine-polyfills.js"
)
)
).withScalaJSCompiler.dependsOn(library, testInterface)
lazy val jUnitRuntime = Project(
id = "jUnitRuntime",
base = file("junit-runtime"),
settings = commonSettings ++ publishSettings ++ myScalaJSSettings ++
fatalWarningsSettings ++ Seq(name := "Scala.js JUnit test runtime")
).withScalaJSCompiler.dependsOn(testInterface)
lazy val jUnitPlugin = Project(
id = "jUnitPlugin",
base = file("junit-plugin"),
settings = commonSettings ++ publishSettings ++ fatalWarningsSettings ++ Seq(
name := "Scala.js JUnit test plugin",
crossVersion := CrossVersion.full,
libraryDependencies += "org.scala-lang" % "scala-compiler" % scalaVersion.value,
exportJars := true
)
)
// Examples
lazy val examples: Project = Project(
id = "examples",
base = file("examples"),
settings = commonSettings ++ Seq(
name := "Scala.js examples"
)
).aggregate(helloworld, reversi, testingExample)
lazy val exampleSettings = commonSettings ++ myScalaJSSettings ++ fatalWarningsSettings
lazy val helloworld: Project = Project(
id = "helloworld",
base = file("examples") / "helloworld",
settings = exampleSettings ++ Seq(
name := "Hello World - Scala.js example",
moduleName := "helloworld",
persistLauncher := true
)
).withScalaJSCompiler.dependsOn(library)
lazy val reversi = Project(
id = "reversi",
base = file("examples") / "reversi",
settings = exampleSettings ++ Seq(
name := "Reversi - Scala.js example",
moduleName := "reversi"
)
).withScalaJSCompiler.dependsOn(library)
lazy val testingExample = Project(
id = "testingExample",
base = file("examples") / "testing",
settings = exampleSettings ++ Seq(
name := "Testing - Scala.js example",
moduleName := "testing",
jsDependencies ++= Seq(
RuntimeDOM % "test",
"org.webjars" % "jquery" % "1.10.2" / "jquery.js" % "test"
)
)
).withScalaJSCompiler.dependsOn(library, jasmineTestFramework % "test")
// Testing
val testTagSettings = Seq(
testOptions in Test ++= {
@tailrec
def envTagsFor(env: JSEnv): Seq[Tests.Argument] = env match {
case env: RhinoJSEnv =>
val baseArgs = Seq("-trhino")
val args =
if (env.sourceMap) baseArgs :+ "-tsource-maps"
else baseArgs
Seq(Tests.Argument(args: _*))
case env: NodeJSEnv =>
val baseArgs = Seq("-tnodejs", "-ttypedarray")
val args = {
if (env.sourceMap) {
if (!env.hasSourceMapSupport) {
val projectId = thisProject.value.id
sys.error("You must install Node.js source map support to " +
"run the full Scala.js test suite (npm install " +
"source-map-support). To deactivate source map " +
s"tests, do: set postLinkJSEnv in $projectId := " +
"NodeJSEnv().value.withSourceMap(false)")
}
baseArgs :+ "-tsource-maps"
} else
baseArgs
}
Seq(Tests.Argument(args: _*))
case _: PhantomJSEnv =>
Seq(Tests.Argument("-tphantomjs"))
case env: RetryingComJSEnv =>
envTagsFor(env.baseEnv)
case _ =>
throw new AssertionError(
s"Unknown JSEnv of class ${env.getClass.getName}: " +
"don't know what tags to specify for the test suite")
}
val envTags = envTagsFor((jsEnv in Test).value)
val sems = (scalaJSSemantics in Test).value
val semTags = (
if (sems.asInstanceOfs == CheckedBehavior.Compliant)
Seq(Tests.Argument("-tcompliant-asinstanceofs"))
else
Seq()
) ++ (
if (sems.moduleInit == CheckedBehavior.Compliant)
Seq(Tests.Argument("-tcompliant-moduleinit"))
else
Seq()
) ++ (
if (sems.strictFloats) Seq(Tests.Argument("-tstrict-floats"))
else Seq()
)
val stageTag = Tests.Argument((scalaJSStage in Test).value match {
case PreLinkStage => "-tprelink-stage"
case FastOptStage => "-tfastopt-stage"
case FullOptStage => "-tfullopt-stage"
})
val modeTags = (scalaJSOutputMode in Test).value match {
case org.scalajs.core.tools.javascript.OutputMode.ECMAScript6StrongMode =>
Seq(Tests.Argument("-tstrong-mode"))
case _ =>
Seq()
}
envTags ++ semTags ++ (stageTag +: modeTags)
}
)
def testSuiteCommonSettings(isJSTest: Boolean): Seq[Setting[_]] = Seq(
publishArtifact in Compile := false,
scalacOptions ~= (_.filter(_ != "-deprecation")),
// Need reflect for typechecking macros
libraryDependencies +=
"org.scala-lang" % "scala-reflect" % scalaVersion.value % "provided",
testOptions += Tests.Argument(TestFrameworks.JUnit, "-v", "-a"),
unmanagedSourceDirectories in Test ++= {
def includeIf(testDir: File, condition: Boolean): List[File] =
if (condition) List(testDir)
else Nil
val testDir = (sourceDirectory in Test).value
val sharedTestDir =
testDir.getParentFile.getParentFile.getParentFile / "shared/src/test"
includeIf(testDir / "require-jdk7", javaVersion.value >= 7) ++
includeIf(testDir / "require-jdk8", javaVersion.value >= 8) ++
List(sharedTestDir / "scala") ++
includeIf(sharedTestDir / "require-jdk7", javaVersion.value >= 7) ++
includeIf(sharedTestDir / "require-jdk8", javaVersion.value >= 8)
},
sources in Test ++= {
/* Can't add require-sam as unmanagedSourceDirectories because of the use
* of scalacOptions. Hence sources are added individually.
* Note that a testSuite/test will not trigger a compile when sources are
* modified in require-sam
*/
if (isJSTest && scalaBinaryVersion.value != "2.10" &&
scalacOptions.value.contains("-Xexperimental")) {
val sourceDir = (sourceDirectory in Test).value / "require-sam"
(sourceDir ** "*.scala").get
} else {
Nil
}
}
)
lazy val testSuite: Project = Project(
id = "testSuite",
base = file("test-suite/js"),
settings = commonSettings ++ myScalaJSSettings ++ testTagSettings ++
testSuiteCommonSettings(isJSTest = true) ++ Seq(
name := "Scala.js test suite",
jsDependencies += ProvidedJS / "ScalaJSDefinedTestNatives.js" % "test",
scalaJSSemantics ~= (_.withRuntimeClassName(_.fullName match {
case "org.scalajs.testsuite.compiler.ReflectionTest$RenamedTestClass" =>
"renamed.test.Class"
case fullName =>
fullName
})),
/* Generate a scala source file that throws exceptions in
* various places (while attaching the source line to the
* exception). When we catch the exception, we can then
* compare the attached source line and the source line
* calculated via the source maps.
*
* see test-suite/src/test/resources/SourceMapTestTemplate.scala
*/
sourceGenerators in Test <+= Def.task {
val dir = (sourceManaged in Test).value
IO.createDirectory(dir)
val template = IO.read((resourceDirectory in Test).value /
"SourceMapTestTemplate.scala")
def lineNo(cs: CharSequence) =
(0 until cs.length).count(i => cs.charAt(i) == '\\n') + 1
var i = 0
val pat = "/\\\\*{2,3}/".r
val replaced = pat.replaceAllIn(template, { mat =>
val lNo = lineNo(mat.before)
val res =
if (mat.end - mat.start == 5)
// matching a /***/
s"if (TC.is($i)) { throw new TestException($lNo) } else "
else
// matching a /**/
s"; if (TC.is($i)) { throw new TestException($lNo) } ;"
i += 1
res
})
val outFile = dir / "SourceMapTest.scala"
IO.write(outFile, replaced.replace("0/*<testCount>*/", i.toString))
Seq(outFile)
},
scalacOptions in Test ++= {
if (isGeneratingEclipse) {
Seq.empty
} else {
val jar = (packageBin in (jUnitPlugin, Compile)).value
Seq(s"-Xplugin:$jar")
}
}
)
).withScalaJSCompiler.dependsOn(
library, jUnitRuntime, jasmineTestFramework % "test"
)
lazy val testSuiteJVM: Project = Project(
id = "testSuiteJVM",
base = file("test-suite/jvm"),
settings = commonSettings ++ testSuiteCommonSettings(isJSTest = false) ++ Seq(
name := "Scala.js test suite on JVM",
libraryDependencies +=
"com.novocode" % "junit-interface" % "0.11" % "test"
)
)
lazy val noIrCheckTest: Project = Project(
id = "noIrCheckTest",
base = file("no-ir-check-test"),
settings = commonSettings ++ myScalaJSSettings ++ testTagSettings ++ Seq(
name := "Scala.js not IR checked tests",
scalaJSOptimizerOptions ~= (_.
withCheckScalaJSIR(false).
withBypassLinkingErrors(true)
),
publishArtifact in Compile := false
)
).withScalaJSCompiler.dependsOn(library, jasmineTestFramework % "test")
lazy val javalibExTestSuite: Project = Project(
id = "javalibExTestSuite",
base = file("javalib-ex-test-suite"),
settings = (
commonSettings ++ myScalaJSSettings ++ testTagSettings
) ++ Seq(
name := "JavaLib Ex Test Suite",
publishArtifact in Compile := false,
scalacOptions in Test ~= (_.filter(_ != "-deprecation"))
)
).withScalaJSCompiler.dependsOn(javalibEx, jasmineTestFramework % "test")
lazy val partest: Project = Project(
id = "partest",
base = file("partest"),
settings = commonSettings ++ fatalWarningsSettings ++ Seq(
name := "Partest for Scala.js",
moduleName := "scalajs-partest",
resolvers += Resolver.typesafeIvyRepo("releases"),
artifactPath in fetchScalaSource :=
baseDirectory.value / "fetchedSources" / scalaVersion.value,
fetchScalaSource := {
import org.eclipse.jgit.api._
val s = streams.value
val ver = scalaVersion.value
val trgDir = (artifactPath in fetchScalaSource).value
if (!trgDir.exists) {
s.log.info(s"Fetching Scala source version $ver")
// Make parent dirs and stuff
IO.createDirectory(trgDir)
// Clone scala source code
new CloneCommand()
.setDirectory(trgDir)
.setURI("https://github.com/scala/scala.git")
.call()
}
// Checkout proper ref. We do this anyway so we fail if
// something is wrong
val git = Git.open(trgDir)
s.log.info(s"Checking out Scala source version $ver")
git.checkout().setName(s"v$ver").call()
trgDir
},
libraryDependencies ++= {
if (shouldPartest.value)
Seq(
"org.scala-sbt" % "sbt" % sbtVersion.value,
"org.scala-lang.modules" %% "scala-partest" % "1.0.9",
"com.google.javascript" % "closure-compiler" % "v20130603",
"io.apigee" % "rhino" % "1.7R5pre4",
"com.googlecode.json-simple" % "json-simple" % "1.1.1"
)
else Seq()
},
sources in Compile := {
if (shouldPartest.value) {
// Partest sources and some sources of sbtplugin (see above)
val baseSrcs = (sources in Compile).value
// Sources for tools (and hence IR)
val toolSrcs = (sources in (tools, Compile)).value
// Sources for js-envs
val jsenvSrcs = {
val jsenvBase = ((scalaSource in (jsEnvs, Compile)).value /
"org/scalajs/jsenv")
val scalaFilter: FileFilter = "*.scala"
val files = (
(jsenvBase * scalaFilter) +++
(jsenvBase / "nodejs" ** scalaFilter) +++
(jsenvBase / "rhino" ** scalaFilter))
files.get
}
toolSrcs ++ baseSrcs ++ jsenvSrcs
} else Seq()
}
)
).dependsOn(compiler)
lazy val partestSuite: Project = Project(
id = "partestSuite",
base = file("partest-suite"),
settings = commonSettings ++ fatalWarningsSettings ++ Seq(
name := "Scala.js partest suite",
fork in Test := true,
javaOptions in Test += "-Xmx1G",
testFrameworks ++= {
if (shouldPartest.value)
Seq(new TestFramework("scala.tools.partest.scalajs.Framework"))
else Seq()
},
definedTests in Test <++= Def.taskDyn[Seq[sbt.TestDefinition]] {
if (shouldPartest.value) Def.task {
val _ = (fetchScalaSource in partest).value
Seq(new sbt.TestDefinition(
s"partest-${scalaVersion.value}",
// marker fingerprint since there are no test classes
// to be discovered by sbt:
new sbt.testing.AnnotatedFingerprint {
def isModule = true
def annotationName = "partest"
},
true,
Array()
))
} else {
Def.task(Seq())
}
}
)
).dependsOn(partest % "test", library)
}
| ummels/scala-js | project/Build.scala | Scala | bsd-3-clause | 48,019 |
package worker
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.cluster.pubsub.DistributedPubSub
import akka.cluster.pubsub.DistributedPubSubMediator
class PostProcessor extends Actor with ActorLogging {
val mediator = DistributedPubSub(context.system).mediator
mediator ! DistributedPubSubMediator.Subscribe(Master.ResultsTopic, self)
def receive = {
case _: DistributedPubSubMediator.SubscribeAck =>
case WorkResult(workId, result) =>
log.info("Post-processor -> Got work result {} | Work Id {}", result, workId)
}
} | oel/akka-iot-mqtt | src/main/scala/worker/PostProcessor.scala | Scala | lgpl-3.0 | 562 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.api.python
import java.io.{ByteArrayOutputStream, DataOutputStream}
import org.apache.spark.SparkFunSuite
class PythonRDDSuite extends SparkFunSuite {
test("Writing large strings to the worker") {
val input: List[String] = List("a"*100000)
val buffer = new DataOutputStream(new ByteArrayOutputStream)
PythonRDD.writeIteratorToStream(input.iterator, buffer)
}
test("Handle nulls gracefully") {
val buffer = new DataOutputStream(new ByteArrayOutputStream)
// Should not have NPE when write an Iterator with null in it
// The correctness will be tested in Python
PythonRDD.writeIteratorToStream(Iterator("a", null), buffer)
PythonRDD.writeIteratorToStream(Iterator(null, "a"), buffer)
PythonRDD.writeIteratorToStream(Iterator("a".getBytes, null), buffer)
PythonRDD.writeIteratorToStream(Iterator(null, "a".getBytes), buffer)
PythonRDD.writeIteratorToStream(Iterator((null, null), ("a", null), (null, "b")), buffer)
PythonRDD.writeIteratorToStream(
Iterator((null, null), ("a".getBytes, null), (null, "b".getBytes)), buffer)
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | core/src/test/scala/org/apache/spark/api/python/PythonRDDSuite.scala | Scala | apache-2.0 | 1,919 |
package cache
import db.{Authorization, ResolversDao}
import io.flow.dependency.v0.models.Resolver
import io.flow.util.CacheWithFallbackToStaleData
@javax.inject.Singleton
case class ResolversCache @javax.inject.Inject()(
resolversDao: ResolversDao
) extends CacheWithFallbackToStaleData[String, Option[Resolver]] {
override def refresh(resolverId: String): Option[Resolver] = {
resolversDao.findById(Authorization.All, resolverId)
}
def findByResolverId(resolverId: String): Option[Resolver] = {
get(resolverId)
}
}
| flowcommerce/dependency | api/app/cache/ResolversCache.scala | Scala | mit | 539 |
package org.jetbrains.plugins.scala
package codeInspection.booleans
import com.intellij.codeInspection.{ProblemHighlightType, ProblemsHolder}
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.codeInspection.{AbstractFixOnPsiElement, AbstractInspection}
import org.jetbrains.plugins.scala.lang.completion.ScalaKeyword
import org.jetbrains.plugins.scala.lang.psi.api.base.ScLiteral
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import org.jetbrains.plugins.scala.lang.refactoring.util.ScalaRefactoringUtil.getShortText
import scala.Predef._
/**
* Nikolay.Tropin
* 4/23/13
*
*/
class SimplifyBooleanInspection extends AbstractInspection("SimplifyBoolean", "Simplify boolean expression"){
def actionFor(holder: ProblemsHolder): PartialFunction[PsiElement, Any] = {
case _: ScParenthesisedExpr => //do nothing to avoid many similar expressions
case expr: ScExpression if SimplifyBooleanUtil.canBeSimplified(expr) =>
holder.registerProblem(expr, "Simplify boolean expression", ProblemHighlightType.GENERIC_ERROR_OR_WARNING, new SimplifyBooleanQuickFix(expr))
}
}
class SimplifyBooleanQuickFix(expr: ScExpression) extends AbstractFixOnPsiElement("Simplify " + getShortText(expr), expr) {
def doApplyFix(project: Project) {
val scExpr = getElement
if (scExpr.isValid && SimplifyBooleanUtil.canBeSimplified(scExpr)) {
val simplified = SimplifyBooleanUtil.simplify(scExpr)
scExpr.replaceExpression(simplified, removeParenthesis = true)
}
}
}
object SimplifyBooleanUtil {
val boolInfixOperations = Set("==", "!=", "&&", "&", "||", "|", "^")
def canBeSimplified(expr: ScExpression, isTopLevel: Boolean = true): Boolean = {
expr match {
case _: ScLiteral if !isTopLevel => booleanConst(expr).isDefined
case ScParenthesisedExpr(e) => canBeSimplified(e, isTopLevel)
case expression: ScExpression =>
val children = getScExprChildren(expr)
val isBooleanOperation = expression match {
case ScPrefixExpr(operation, operand) => operation.refName == "!" && isOfBooleanType(operand)
case ScInfixExpr(left, oper, right) =>
boolInfixOperations.contains(oper.refName) &&
isOfBooleanType(left) && isOfBooleanType(right)
case _ => false
}
isBooleanOperation && isOfBooleanType(expr) && children.exists(canBeSimplified(_, isTopLevel = false))
}
}
def simplify(expr: ScExpression, isTopLevel: Boolean = true): ScExpression = {
if (canBeSimplified(expr, isTopLevel) && booleanConst(expr).isEmpty) {
val exprCopy = ScalaPsiElementFactory.createExpressionWithContextFromText(expr.getText, expr.getContext, expr)
val children = getScExprChildren(exprCopy)
children.foreach(child => exprCopy.getNode.replaceChild(child.getNode, simplify(child, isTopLevel = false).getNode))
simplifyTrivially(exprCopy)
}
else expr
}
private def isOfBooleanType(expr: ScExpression): Boolean = expr.getType(TypingContext.empty).getOrAny.conforms(lang.psi.types.Boolean, checkWeak = true)
private def getScExprChildren(expr: ScExpression) = expr.children.collect { case expr: ScExpression => expr }.toList
private def booleanConst(expr: ScExpression): Option[Boolean] = expr match {
case literal: ScLiteral =>
literal.getText match {
case "true" => Some(true)
case "false" => Some(false)
case _ => None
}
case _ => None
}
private def simplifyTrivially(expr: ScExpression): ScExpression = expr match {
case parenthesized: ScParenthesisedExpr =>
val copy = parenthesized.copy.asInstanceOf[ScParenthesisedExpr]
copy.replaceExpression(copy.expr.getOrElse(copy), removeParenthesis = true)
case ScPrefixExpr(operation, operand) =>
if (operation.refName != "!") expr
else {
booleanConst(operand) match {
case Some(bool: Boolean) =>
ScalaPsiElementFactory.createExpressionFromText((!bool).toString, expr.getManager)
case None => expr
}
}
case ScInfixExpr(leftExpr, operation, rightExpr) =>
val operName = operation.refName
if (!boolInfixOperations.contains(operName)) expr
else {
booleanConst(leftExpr) match {
case Some(bool: Boolean) => simplifyInfixWithLiteral(bool, operName, rightExpr)
case None => booleanConst(rightExpr) match {
case Some(bool: Boolean) => simplifyInfixWithLiteral(bool, operName, leftExpr)
case None => expr
}
}
}
case _ => expr
}
private def simplifyInfixWithLiteral(value: Boolean, operation: String, expr: ScExpression): ScExpression = {
val manager = expr.getManager
val text: String = booleanConst(expr) match {
case Some(bool: Boolean) =>
val result: Boolean = operation match {
case "==" => bool == value
case "!=" | "^" => bool != value
case "&&" | "&" => bool && value
case "||" | "|" => bool || value
}
result.toString
case _ => (value, operation) match {
case (true, "==") | (false, "!=") | (false, "^") | (true, "&&") | (true, "&") | (false, "||") | (false, "|") => expr.getText
case (false, "==") | (true, "!=") | (true, "^") =>
val negated: ScPrefixExpr = ScalaPsiElementFactory.createExpressionFromText("!a", manager).asInstanceOf[ScPrefixExpr]
val copyExpr = expr.copy.asInstanceOf[ScExpression]
negated.operand.replaceExpression(copyExpr, removeParenthesis = true)
negated.getText
case (true, "||") | (true, "|") =>
ScalaKeyword.TRUE
case (false, "&&") | (false, "&") =>
ScalaKeyword.FALSE
case _ => throw new IllegalArgumentException("Wrong operation")
}
}
ScalaPsiElementFactory.createExpressionFromText(text, manager)
}
}
| advancedxy/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/booleans/SimplifyBooleanInspection.scala | Scala | apache-2.0 | 6,104 |
package gamelogic
import gameserver.GameServer
import networkcom._
import scala.collection.mutable
import scala.scalajs.js.timers.setTimeout
/**
* A Game Hosted by a server.
* Manages all communications and game states during a game.
*/
class GamePlaying(val gameName: String, val password: Int,
val players: Vector[String], val maxNbrCards: Int,
val server: GameServer) {
def closeGame(msg: String): Unit = {
broadcastReliable(ClosingGame(gameName, msg))
}
private var _state: GamePlayingState = WaitingForPlayers
def state: GamePlayingState = _state
private val playersWithPeers: mutable.Map[String, Peer] = mutable.Map()
def playersPeers: Iterable[Peer] = playersWithPeers.values
def peerToPlayer: Map[Peer, String] = playersWithPeers.toSet.map(
(elem: (String, Peer)) => (elem._2, elem._1)
).toMap
private def broadcastReliable(message: InGameMessage): Unit = {
playersWithPeers.values.foreach(server.sendReliable(message, _))
}
private def broadcastOrderedReliable(message: InGameMessage): Unit = {
playersWithPeers.values.foreach(server.sendOrderedReliable(message, _))
}
private def broadcastOrderedReliableButOne(message: InGameMessage, player: String): Unit = {
playersWithPeers.filterNot(_._1 == player).values.foreach(server.sendOrderedReliable(message, _))
}
private def cardMessage2Card(cardMessage: CardMessage): Card =
DefaultCard(DefaultCardValue(cardMessage.value), CardColor(cardMessage.color))
def messageCallback(message: InGameMessage, peer: Peer): Unit = if (state != GamePlayingEnded) {
message match {
case PlayRandomCard(_, player) =>
val gameState = currentGameState
if (gameState.state == PlayingCardState && !gameState.turnOfPlayer._2 && gameState.turnOfPlayer._1 == player) {
val legalActions = gameState.legalActions
// if it is the turn of player player, their hand is not empty, so needless to check
val card = scala.util.Random.shuffle(
gameState
.hands(player)
.filter(card => legalActions(PlayCard(player, card)))
.toList
).head
val action = PlayCard(player, card)
actions :+= action
broadcastOrderedReliable(PlayCardMessage(gameName, player, CardMessage(card.value.value, card.color.color)))
performAction(action(gameState))
}
case PlayCardMessage(_, player, cardMessage) =>
val action = PlayCard(player, cardMessage2Card(cardMessage))
val gameState = currentGameState
if (gameState.legalActions.apply(action)) {
actions :+= action
broadcastOrderedReliable(PlayCardMessage(gameName, player, cardMessage))
performAction(action(gameState))
} else {
println(s"Received card $cardMessage from player $player but not their turn.")
}
case BetTrickNumberMessage(_, player, bet) =>
val action = BetTrickNumber(player, bet)
val gameState = currentGameState
if (gameState.legalActions.apply(action)) {
actions :+= action
broadcastOrderedReliable(BetTrickNumberMessage(gameName, player, bet))
performAction(action(gameState))
} else {
println(s"Received bet $bet of player $player but not their turn.")
}
case PlayerConnecting(gName, pName, pw) if gameName == gName && password == pw && players.contains(pName)
=>
playersWithPeers += (players.find(_ == pName).get -> peer)
val stillWaitFor = players.size - playersWithPeers.size
if (stillWaitFor > 0) {
broadcastOrderedReliable(StillWaitingForPlayers(gameName, stillWaitFor))
} else {
broadcastOrderedReliable(GameStarts(gameName, maxNbrCards))
_state = Playing
setTimeout(1000) {
performAction()
}
}
case InGameChatMessage(g, s, _, p) =>
if (s.trim != "") {
// the time of the message is the instant at which the server receives it
broadcastReliable(InGameChatMessage(g, s, new java.util.Date().getTime, p))
}
case _ =>
println(s"Unknown message: $message")
}
}
private val originalGameState: GameState = GameState.originalState(
scala.util.Random.shuffle(players), maxNbrCards
)
private var actions: List[GameAction] = Nil
def currentGameState: GameState = originalGameState(actions)
private var allCards: List[Card] = Card.shuffledDefaultDeck
def remainingCards(gameState: GameState = currentGameState): List[Card] = {
allCards.filterNot(gameState.distributedCards.contains)
}
private def shuffleCards(): Unit = {
allCards = scala.util.Random.shuffle(allCards)
}
private def givePlayerAHand(player: String, gameState: GameState): PlayerReceivesHand = {
val remaining = remainingCards(gameState)
PlayerReceivesHand(player, remaining.take(gameState.nbrCardsDistributed).toArray)
}
/**
* These four numbers are used to count points during the game.
* In a future version, maybe we'll allow the game host to chose these numbers in order to customize the game further.
*/
private val successBonus: Int = 10
private val failurePenalty: Int = 0
private val bonusPerTrick: Int = 1
private val penaltyPerTrick: Int = 1
private def chooseTrump(gameState: GameState): GameAction = ChooseTrump(remainingCards(gameState).head)
private def performAction(gameState: GameState = currentGameState): Unit = {
gameState.state match {
case GameEnded =>
println(s"Game has ended, winner is ${gameState.points.toList.maxBy(_._2)._1}")
setTimeout(1000) {
closeGame("gameEndedNormally")
}
case DistributingCardState =>
val action = givePlayerAHand(gameState.players.filterNot(gameState.hands.keys.toSet.contains).head, gameState)
val nbrCardsDistributed = gameState.nbrCardsDistributed
actions :+= action
if (nbrCardsDistributed > 1) {
server.sendOrderedReliable(action.toMessage(gameName), playersWithPeers(action.player))
} else {
broadcastOrderedReliableButOne(action.toMessage(gameName), action.player)
}
setTimeout(500) {performAction(action(gameState))}
case ChoosingTrumpState =>
val action = chooseTrump(gameState)
actions :+= action
broadcastOrderedReliable(action.toMessage(gameName))
performAction(action(gameState))
case BettingState =>
shuffleCards() // shuffling, BettingState means that distributing is over, so we can shuffle.
// we do nothing as we should wait for player input
case PlayingCardState =>
// we do nothing as we should wait for player input
case NewDealState =>
setTimeout(4000) {
val action = NewDeal(successBonus, failurePenalty, bonusPerTrick, penaltyPerTrick)
actions :+= action
broadcastOrderedReliable(action.toMessage(gameName))
setTimeout(500) {
performAction(action(gameState))
}
}
case NewHandState =>
setTimeout(2000) {
actions :+= NewHand()
broadcastOrderedReliable(NewHand().toMessage(gameName))
setTimeout(500) {
performAction(NewHand()(gameState))
}
}
}
}
}
sealed trait GamePlayingState
case object WaitingForPlayers extends GamePlayingState
case object Playing extends GamePlayingState
case object GamePlayingEnded extends GamePlayingState
| sherpal/oh-hell-card-game | server/src/main/scala/gamelogic/GamePlaying.scala | Scala | mit | 7,813 |
package business
import models.{Pastes, Profiles}
/**
* Created by justin on 4/15/15.
*/
trait BaseDaoTrait {
def pastes: Pastes
def profiles: Profiles
}
| maximx1/lecarton | app/business/BaseDaoTrait.scala | Scala | mit | 162 |
package com.twitter.diffy
import com.twitter.diffy.analysis._
import com.twitter.diffy.thriftscala._
import com.twitter.diffy.lifter.JsonLifter
import scala.language.postfixOps
object Renderer {
def differences(diffs: Map[String, String]) =
diffs map { case (k, v) => k -> JsonLifter.decode(v) }
def differenceResults(drs: Iterable[DifferenceResult], includeRequestResponses: Boolean = false) =
drs map { differenceResult(_, includeRequestResponses) }
def differenceResult(dr: DifferenceResult, includeRequestResponses: Boolean = false) =
Map(
"id" -> dr.id.toString,
"trace_id" -> dr.traceId,
"timestamp_msec" -> dr.timestampMsec,
"endpoint" -> dr.endpoint,
"differences" -> differences(dr.differences.toMap)
) ++ {
if (includeRequestResponses) {
Map(
"request" -> JsonLifter.decode(dr.request),
"left" -> JsonLifter.decode(dr.responses.primary),
"right" -> JsonLifter.decode(dr.responses.candidate)
)
} else {
Map.empty[String, Any]
}
}
def endpoints(endpoints: Map[String, EndpointMetadata]) =
endpoints map { case (ep, meta) =>
ep -> endpoint(meta)
}
def endpoint(endpoint: EndpointMetadata) = Map(
"total" -> endpoint.total,
"differences" -> endpoint.differences
)
def field(field: FieldMetadata, includeWeight: Boolean) =
Map("differences" -> field.differences) ++ {
if (includeWeight) {
Map("weight" -> field.weight)
} else {
Map.empty[String, Any]
}
}
def field(field: JoinedField, includeWeight: Boolean) =
Map(
"differences" -> field.raw.differences,
"noise" -> field.noise.differences,
"relative_difference" -> field.relativeDifference,
"absolute_difference" -> field.absoluteDifference
) ++ {
if (includeWeight) Map("weight" -> field.raw.weight) else Map.empty
}
def fields(
fields: Map[String, JoinedField],
includeWeight: Boolean = false
) =
fields map { case (path, meta) =>
path -> field(meta, includeWeight)
} toMap
def error(message: String) =
Map("error" -> message)
def success(message: String) =
Map("success" -> message)
} | NateChambers/diffy | src/main/scala/com/twitter/diffy/Renderer.scala | Scala | apache-2.0 | 2,237 |
package org.example.remoting
import java.util.concurrent.atomic.AtomicInteger
import akka.actor._
import akka.util.Timeout
import com.typesafe.config.ConfigFactory
import org.example.remoting.RemotingProtocol._
import scala.concurrent.duration._
/**
* Created by kailianghe on 1/18/15.
*/
// http://tersesystems.com/2014/06/25/akka-clustering/
/**
* Akka remoting works by saying to the actor system either “I want you to create an actor on this remote host”:
* val ref = system.actorOf(FooActor.props.withDeploy(Deploy(scope = RemoteScope(address))))
*
* or “I want a reference to an existing actor on the remote host”:
* val remoteFooActor = context.actorSelection("akka.tcp://[email protected]:2552/user/fooActor")
*
* After calling the actor, messages are sent to the remote server using Protocol Buffers for serialization, and
* reconstituted on the other end.
*
* Clustering allows you to create an actor somewhere on a cluster consisting of nodes which all share the same
* actor system, without knowing exactly which node it is on. Other machines can join and leave the cluster at run time.
*/
object TransformationFrontend {
def main(args: Array[String]): Unit = {
// Override the configuration of the port when specified as program argument
val port = if (args.isEmpty) "0" else args(0)
val config = ConfigFactory.parseString(s"akka.remote.netty.tcp.port=$port").
withFallback(ConfigFactory.parseString("akka.cluster.roles = [frontend]")).
withFallback(ConfigFactory.load("AkkaRemoting.conf")).
withFallback(ConfigFactory.load())
val system = ActorSystem("ClusterSystem", config)
val frontend = system.actorOf(Props[TransformationFrontend], name = "frontend")
val counter = new AtomicInteger
import system.dispatcher
import akka.pattern.ask
system.scheduler.schedule(2 seconds, 4 seconds) {
implicit val timeout = Timeout(5 seconds)
(frontend ? TransformationJob("hello-" + counter.incrementAndGet())) onSuccess {
case result => println(result)
}
}
}
}
class TransformationFrontend extends Actor {
var backends = IndexedSeq.empty[ActorRef]
var jobCounter = 0
def receive = {
case job: TransformationJob if backends.isEmpty =>
sender() ! JobFailed("Service unavailable, try again later", job)
case job: TransformationJob =>
jobCounter += 1
backends(jobCounter % backends.size) forward job
case BackendRegistration if !backends.contains(sender()) =>
context watch sender()
backends = backends :+ sender()
case Terminated(a) =>
backends = backends.filterNot(_ == a)
}
} | hekailiang/akka-play | actor-samples/src/main/scala/org/example/remoting/TransformationFrontend.scala | Scala | apache-2.0 | 2,665 |
package scjson.converter
import minitest.*
import scutil.lang.*
import scjson.ast.*
import JsonFormat.given
object CollectionTest extends SimpleTestSuite {
test("special collections should serialize string maps") {
assertEquals(
JsonWriter[Map[String,Int]] convert Map("a" -> 1),
Validated.valid(JsonValue.obj("a" -> JsonValue.fromInt(1)))
)
}
test("special collections should serialize int maps") {
assertEquals(
JsonWriter[Map[Int,Int]] convert Map(2 -> 3),
Validated.valid(JsonValue.obj("2" -> JsonValue.fromInt(3)))
)
}
test("special collections should serialize long maps") {
assertEquals(
JsonWriter[Map[Long,Int]] convert Map(4L -> 5),
Validated.valid(JsonValue.obj("4" -> JsonValue.fromInt(5)))
)
}
//------------------------------------------------------------------------------
case object X
given XReader:JsonReader[X.type] = Converter.total(_ => X)
given XWriter:JsonWriter[X.type] = Converter.total(_ => JsonValue.Null)
test("keyless maps should serialize without having a key writer") {
assertEquals(
JsonWriter[Map[X.type,String]] convert Map(X -> "1"),
Validated.valid(JsonValue.arr(JsonValue.arr(JsonValue.Null, JsonValue.fromString("1"))))
)
}
test("keyless maps should deserialize without having a key reader") {
assertEquals(
JsonReader[Map[X.type,String]] convert JsonValue.arr(JsonValue.arr(JsonValue.Null, JsonValue.fromString("1"))),
Validated.valid(Map(X -> "1"))
)
}
//------------------------------------------------------------------------------
test("tuples should serialize 2-tuples") {
assertEquals(
JsonWriter[(String,Int)] convert (("a", 1)),
Validated.valid(JsonValue.arr(JsonValue.fromString("a"), JsonValue.fromInt(1)))
)
}
test("tuples should serialize 3-tuples") {
assertEquals(
JsonWriter[(String,Int,Boolean)] convert (("a", 1, true)),
Validated.valid(JsonValue.arr(JsonValue.fromString("a"), JsonValue.fromInt(1), JsonValue.True))
)
}
test("tuples should parse 2-tuples") {
assertEquals(
JsonReader[(String,Int)] convert JsonValue.arr(JsonValue.fromString("a"), JsonValue.fromInt(1)),
Validated.valid(("a", 1))
)
}
test("tuples should parse 3-tuples") {
assertEquals(
JsonReader[(String,Int,Boolean)] convert JsonValue.arr(JsonValue.fromString("a"), JsonValue.fromInt(1), JsonValue.True),
Validated.valid(("a", 1, true))
)
}
//------------------------------------------------------------------------------
/*
test("keyed tuples should serialize 2-tuples") {
assertEquals(
JsonWriter[(String,Int)] convert (("a", 1)),
Validated.valid(JsonValue.obj("1" -> JsonValue.fromString("a"), "2" -> JsonValue.fromInt(1)))
)
}
test("keyed tuples should serialize 3-tuples") {
assertEquals(
JsonWriter[(String,Int,Boolean)] convert (("a", 1, true)),
Validated.valid(JsonValue.obj("1" -> JsonValue.fromString("a"), "2" -> JsonValue.fromInt(1), "3" -> JsonValue.True))
)
}
test("keyed tuples should parse 2-tuples") {
assertEquals(
JsonReader[(String,Int)] convert JsonValue.obj("1" -> JsonValue.fromString("a"), "2" -> JsonValue.fromInt(1)),
Validated.valid(("a", 1))
)
}
test("keyed tuples should parse 3-tuples") {
assertEquals(
JsonReader[(String,Int,Boolean)] convert JsonValue.obj("1" -> JsonValue.fromString("a"), "2" -> JsonValue.fromInt(1), "3" -> JsonValue.True),
Validated.valid(("a", 1, true))
)
}
*/
}
| ritschwumm/scjson | modules/converter/src/test/scala/CollectionTest.scala | Scala | bsd-2-clause | 3,417 |
package net.composmin.akkahttp
import org.scalatest.{BeforeAndAfterEach, FunSuite}
/**
* Created by cfegan on 5/06/2016.
*/
class EnvDumperTest extends FunSuite with BeforeAndAfterEach {
override def beforeEach() {
}
override def afterEach() {
}
test("testDumpEnv") {
println(EnvDumper.dumpEnv())
}
}
| ComposMin/akka-http-example | src/test/scala/net/composmin/akkahttp/EnvDumperTest.scala | Scala | mit | 329 |
/**********************************************************************************
* Copyright (c) 2011, Monnet Project
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of the Monnet Project nor the
* names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE MONNET PROJECT BE LIABLE FOR ANY
* DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*********************************************************************************/
package eu.monnetproject.gelato.statements
import java.net.URI
import scala.util.parsing.combinator._
/**
* A gelato statement
*
* @author John McCrae
*/
trait GelatoStatement
case class DefiniteStatement(val lhs : List[Mode], val rhs : Mode) extends GelatoStatement {
override def toString = lhs.mkString(", ") + " -> " + rhs
}
case class QuestionStatement(val target : Mode) extends GelatoStatement {
override def toString = "?" + target
}
sealed trait Mode {
def predicate : Predicate
}
case class Past(val mode : Mode) extends Mode {
def predicate = mode.predicate
}
case class Future(val mode : Mode) extends Mode {
def predicate = mode.predicate
}
case class Is(val predicate : Predicate) extends Mode {
override def toString = predicate.toString
}
case class Not(val predicate : Predicate) extends Mode {
override def toString = "!" + predicate
}
case class May(val predicate : Predicate) extends Mode {
override def toString = "%" + predicate
}
case class MayNot(val predicate : Predicate) extends Mode {
override def toString = "%!" + predicate
}
case class Must(val predicate : Predicate) extends Mode {
override def toString = "#"+predicate
}
case class MustNot(val predicate : Predicate) extends Mode {
override def toString = "#!"+predicate
}
sealed trait Predicate
case class UnaryPredicate(pred : URI, arg : Argument) extends Predicate {
override def toString = "<"+pred+">("+arg+")"
}
case class BinaryPredicate(pred : URI, arg1 : Argument, arg2 : Argument) extends Predicate {
override def toString = "<"+pred+">("+arg1+","+arg2+")"
}
sealed trait Argument
case class Variable(name : Symbol) extends Argument {
override def toString = "?"+name.name
}
case class Constant(individual : URI) extends Argument {
override def toString = "<"+individual+">"
}
object GelatoParser extends JavaTokenParsers {
def stat = ((mode*) <~ " -> ") ~ (mode*)
def mode =
"%" ~> "!" ~> pred ^^ { case p => MayNot(p) } |
"#" ~> "!" ~> pred ^^ { case p => MustNot(p) } |
"!" ~> pred ^^ { case p => Not(p) } |
"%" ~> pred ^^ { case p => May(p) } |
"#" ~> pred ^^ { case p => Must(p) } |
pred ^^ { case p => Is(p) }
def pred = (pred2 <~ "(") ~ (arg <~ ")") ^^ { case p ~ a => UnaryPredicate(p,a) } |
(pred2 <~ "(") ~ (arg <~ ",") ~ (arg <~ ")") ^^ {
case p ~ a1 ~ a2 => BinaryPredicate(p,a1,a2)
}
def pred2 = "<" ~> ("""[^>]+""") <~ ">" ^^ { case uri => URI.create(uri)}
def arg = "?" ~> ("""\\w+""") ^^ { case sym => Variable(Symbol(sym))} |
"<" ~> ("""[^>]+""") <~ ">" ^^ { case uri => Constant(URI.create(uri))}
}
| monnetproject/kap | gelato/src/main/scala/eu/monnetproject/gelato/statements/GelatoStatement.scala | Scala | bsd-3-clause | 4,397 |
package org.allenai.common
import org.allenai.common.JsonFormats._
import org.allenai.common.testkit.UnitSpec
import spray.json._
import spray.json.DefaultJsonProtocol._
import scala.util.{ Try, Success, Failure }
class JsonFormatsSpec extends UnitSpec {
case class Foo(name: String)
implicit val fooFormat = jsonFormat1(Foo.apply)
"ThrowableWriter" should "write message and stackTrace" in {
val e = new Exception("my message")
val json = e.toJson
val jsonObj = json.asJsObject
assert(jsonObj.fields("message") === JsString("my message"))
assert(jsonObj.fields("stackTrace") !== JsString("()"))
}
"TryWriter" should "write success" in {
val success: Try[Foo] = Success(Foo("foo"))
val js = success.toJson
assert(js === JsObject("success" -> JsObject("name" -> JsString("foo"))))
}
it should "write failure" in {
val failure: Try[Foo] = Failure(new IllegalArgumentException("bar"))
val js = failure.toJson
val failureJs = js.asJsObject.fields("failure").asJsObject
assert(failureJs.fields("message") === JsString("bar"))
}
}
| ryanai3/common | core/src/test/scala/org/allenai/common/JsonFormatsSpec.scala | Scala | apache-2.0 | 1,097 |
package ch.acmesoftware.orientDbScalaDsl
import com.orientechnologies.orient.core.metadata.schema.OType._
import scala.collection.JavaConverters._
class VertexTypeDslSpec extends Spec {
"VertexTypeDsl" should "create vertex type by label" in {
notTx(g => {
g.dsl createVertexType "NoProp"
})
tx(g => {
val res = g.getVertexType("NoProp")
res should not be null
res.isVertexType should equal(true)
res.properties().size() should equal(0)
})
}
it should "create vertex type by label with properties" in {
notTx(g => {
g.dsl createVertexType "Person" withProperty "name" -> STRING and "active" -> BOOLEAN
})
tx(g => {
val res = g.getVertexType("Person")
res should not be null
res.isVertexType should equal(true)
res.properties().size() should equal(2)
res.properties().asScala.count(prop => prop.getName.equals("name") && prop.getType.eq(STRING)) should equal(1)
res.properties().asScala.count(prop => prop.getName.equals("active") && prop.getType.eq(BOOLEAN)) should equal(1)
})
}
it should "create vertex type by label with unique index" in {
notTx(g => {
g.dsl createVertexType "City" withProperty "name" -> STRING unique "name"
})
tx(g => {
val res = g.getVertexType("City")
res should not be null
res.isVertexType should equal(true)
res.properties().size() should equal(1)
res.properties().asScala.count(prop => prop.getName.equals("name") && prop.getType.eq(STRING)) should equal(1)
res.getClassIndexes.asScala.size should equal(1)
res.getClassIndexes.asScala.count(index => index.getName.startsWith("name-unique")) should equal(1)
})
}
}
| acme-software/orientdb-scala-dsl | src/test/scala/ch/acmesoftware/orientDbScalaDsl/VertexTypeDslSpec.scala | Scala | mit | 1,737 |
package domino.configuration_watching
import domino.capsule.CapsuleContext
import org.osgi.framework.{ServiceRegistration, BundleContext}
import domino.scala_osgi_metatype.interfaces.{ObjectClassDefinition, MetaTypeProvider}
import domino.scala_osgi_metatype.builders.SingleMetaTypeProvider
import domino.service_consuming.ServiceConsuming
import org.osgi.service.cm.{ManagedServiceFactory, ManagedService}
/**
* Provides convenient methods to add a configuration or factory configuration watcher capsule to the current capsule scope.
*
* @groupname WatchConfigurations Watch configurations
* @groupdesc WatchConfigurations Methods for listening to configuration updates
* @groupname WatchFactoryConfigurations Watch factory configurations
* @groupdesc WatchFactoryConfigurations Methods for listening to factory configuration additions, updates and removals
*/
trait ConfigurationWatching {
/** Dependency */
protected def capsuleContext: CapsuleContext
/** Dependency */
protected def bundleContext: BundleContext
/** Dependency */
protected def serviceConsuming: ServiceConsuming
/**
* Executes the given handler with the initial configuration or an empty map if none exists. Whenever
* the configuration is changed, the capsules registered in the handler are stopped and the handler is executed
* again with the new configuration.
*
* @group WatchConfigurations
* @param servicePid service PID
* @param metaTypeProvider optional metatype provider
* @param f handler
* @return the managed service registration
*/
def whenConfigurationActive(servicePid: String, metaTypeProvider: Option[MetaTypeProvider] = None)
(f: (Map[String, Any]) => Unit): ServiceRegistration[ManagedService] = {
val s = new ConfigurationWatcherCapsule(servicePid, f, metaTypeProvider, serviceConsuming, bundleContext,
capsuleContext)
capsuleContext.addCapsule(s)
s.reg
}
/**
* Like the same-named method which expects the service PID but takes the service PID from the given object class
* definition and registers a corresponding meta type provider so a nice configuration GUI will be created.
*
* @group WatchConfigurations
* @param objectClassDefinition object class definition
* @param f handler
* @return the managed service registration
*/
def whenConfigurationActive(objectClassDefinition: ObjectClassDefinition)
(f: (Map[String, Any]) => Unit): ServiceRegistration[ManagedService] = {
val metaTypeProvider = new SingleMetaTypeProvider(objectClassDefinition)
whenConfigurationActive(objectClassDefinition.id, Some(metaTypeProvider))(f)
}
/**
* Executes the given handler whenever a new factory configuration is created. Whenever a factory configuration
* is changed, the correct capsules registered in the corresponding handler are stopped and the handler is
* executed again with the new factory configuration. When the factory configuration is removed, the corresponding
* capsules are stopped.
*
* @group WatchFactoryConfigurations
* @param servicePid service PID
* @param name descriptive name for the factory
* @param metaTypeProvider optional metatype provider
* @param f handler
* @return the managed service factory registration
*/
def whenFactoryConfigurationActive(servicePid: String, name: String, metaTypeProvider: Option[MetaTypeProvider] = None)
(f: (Map[String, Any], String) => Unit): ServiceRegistration[ManagedServiceFactory] = {
val s = new FactoryConfigurationWatcherCapsule(servicePid, name, f, metaTypeProvider, serviceConsuming,
bundleContext, capsuleContext)
capsuleContext.addCapsule(s)
s.reg
}
/**
* Like the same-named method which expects the service PID but takes the service PID from the given object class
* definition and registers a corresponding meta type provider so a nice configuration GUI will be created.
*
* @group WatchFactoryConfigurations
* @param objectClassDefinition object class definition
* @param f handler
* @return the managed service factory registration
*/
def whenFactoryConfigurationActive(objectClassDefinition: ObjectClassDefinition)
(f: (Map[String, Any], String) => Unit): ServiceRegistration[ManagedServiceFactory] = {
val metaTypeProvider = new SingleMetaTypeProvider(objectClassDefinition)
whenFactoryConfigurationActive(objectClassDefinition.id, objectClassDefinition.name, Some(metaTypeProvider))(f)
}
}
| helgoboss/domino | src/main/scala/domino/configuration_watching/ConfigurationWatching.scala | Scala | mit | 4,599 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600e.v2
import uk.gov.hmrc.ct.box.{CtBoxIdentifier, CtOptionalBoolean, Input}
case class E1010(value: Option[Boolean]) extends CtBoxIdentifier("Claiming exemption all or part") with CtOptionalBoolean with Input
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600e/v2/E1010.scala | Scala | apache-2.0 | 843 |
package org.automanlang.core.exception
case class FailedComputationException(err: String) extends Exception
| dbarowy/AutoMan | libautoman/src/main/scala/org/automanlang/core/exception/FailedComputationException.scala | Scala | gpl-2.0 | 109 |
import sbt._
/**
* Copied, with some modifications, from https://github.com/milessabin/shapeless/blob/master/project/Boilerplate.scala
*
* Generate a range of boilerplate classes, those offering alternatives with 0-22 params
* and would be tedious to craft by hand
*/
object Boilerplate {
import scala.StringContext._
implicit class BlockHelper(val sc: StringContext) extends AnyVal {
def block(args: Any*): String = {
val interpolated = sc.standardInterpolator(treatEscapes, args)
val rawLines = interpolated split '\n'
val trimmedLines = rawLines map { _ dropWhile (_.isWhitespace) }
trimmedLines mkString "\n"
}
}
val templates: Seq[Template] = List(
GenHListerInstances, GenFnForTupleInstances,
GenFuncInterfaces, GenFuncSyntax,
GenJavaArgsClasses, GenJavaArgsMethods
)
/** Returns a seq of the generated files. As a side-effect, it actually generates them... */
def gen(dir : File) = for(t <- templates) yield {
val tgtFile = dir / t.packageName / t.filename
IO.write(tgtFile, t.body)
tgtFile
}
/*
Blocks in the templates below use a custom interpolator, combined with post-processing to produce the body
- The contents of the `header` val is output first
- Then the first block of lines beginning with '|'
- Then the block of lines beginning with '-' is replicated once for each arity,
with the `templateVals` already pre-populated with relevant relevant vals for that arity
- Then the last block of lines prefixed with '|'
The block otherwise behaves as a standard interpolated string with regards to variable substitution.
*/
object GenHListerInstances extends ScalaTemplate {
val filename = "HListerInstances.scala"
def content(tv: TemplateVals): String = {
import tv._
block"""
|package mist.api.internal
|import shadedshapeless._
|
|trait HListerInstances extends LowerPriorityHLister {
|
- implicit def hlister${arity}[${`A..N`}]: Aux[${`(A..N)`}, ${`A::N`}] = new HLister[${`(A..N)`}] {
- type Out = ${`A::N`}
- def apply(a: ${`(A..N)`}): ${`A::N`} = ${`(a._1::a._n)`}
- }
|}
"""
}
}
object GenFnForTupleInstances extends ScalaTemplate {
val filename = "FnForTuple.scala"
def content(tv: TemplateVals): String = {
import tv._
val FnIn = if (arity == 1) "A" else tv.`(A..N)`
val Fn = s"$FnIn => Res"
val fnApply = if (arity == 1) "f(in)" else "f.tupled(in)"
block"""
|package mist.api.internal
|import scala.annotation.implicitNotFound
|
|trait FnForTuple[In, F] {
| type Out
| def apply(f: F, in: In): Out
|}
|trait FnForTupleInstances {
| @implicitNotFound("couldn't find FnForTuple for {$$F} instance. Ensure that your function is receiving the same parameters as declared in Arg")
| type Aux[In, F, Out0] = FnForTuple[In, F] { type Out = Out0 }
- implicit def fn${arity}[${`A..N`}, Res]: Aux[$FnIn, $Fn, Res] = new FnForTuple[$FnIn, $Fn] {
- type Out = Res
- def apply(f: $Fn, in: $FnIn): Res = $fnApply
- }
|}
|object FnForTuple extends FnForTupleInstances
"""
}
}
object GenFuncInterfaces extends JavaTemplate {
val filename = "functions.scala"
def content(tv: TemplateVals) = {
import tv._
block"""
|package mist.api.jdsl
|
-@FunctionalInterface
-trait Func${arity}[${`-T1..N`}, +R] extends java.io.Serializable {
- @throws(classOf[Exception])
- def apply(${`a1:T1..aN:TN`}): R
-}
|
"""
}
}
object GenFuncSyntax extends JavaTemplate {
val filename = "functionsSyntax.scala"
def content(tv: TemplateVals) = {
import tv._
block"""
|package mist.api.jdsl
|
|trait FuncSyntax {
|
- implicit class FuncSyntax${arity}[${`-T1..N`}, R](f: Func${arity}[${`T1..N`}, R]) {
- def toScalaFunc: Function${arity}[${`T1..N`}, R]= (${`a1:T1..aN:TN`}) => f.apply(${`a1..aN`})
- }
|
|}
|object FuncSyntax extends FuncSyntax
"""
}
}
object GenJavaArgsClasses extends JavaTemplate {
val filename = "args.scala"
override def range: Range.Inclusive = (2 to 21)
def content(tv: TemplateVals) = {
import tv._
val extrasMethod = {
if (arity != 21) {
block"""
-
- def withMistExtras(): Args${arity}[${`T1..N-1`}, MistExtras] =
- new Args${arity}[${`T1..N-1`}, MistExtras](${`a1..aN-1`}, MistExtras.mistExtras)
"""
} else ""
}
block"""
|package mist.api.jdsl
|
|import org.apache.spark.api.java.JavaSparkContext
|import org.apache.spark.sql.SparkSession
|import org.apache.spark.streaming.api.java.JavaStreamingContext
|import mist.api._
|import FuncSyntax._
|import mist.api.SparkArgs._
|import mist.api.ArgDef
|
-class Args${arity-1}[${`T1..N-1`}](${`ArgDef1..n-1`}){
-
- /**
- * Define job execution that use JavaSparkContext for invocation
- */
- def onSparkContext[R](f: Func${arity}[${`T1..N-1`}, JavaSparkContext, R]): RawHandle[R] = {
- (${`a1&aN-1`} & javaSparkContextArg).apply(f.toScalaFunc)
- }
-
- /**
- * Define job execution that use JavaStreamingContext for invocation
- */
- def onStreamingContext[R](f: Func${arity}[${`T1..N-1`}, JavaStreamingContext, R]): RawHandle[R] = {
- (${`a1&aN-1`} & javaStreamingContextArg).apply(f.toScalaFunc)
- }
-
- def onSparkSession[R](f: Func${arity}[${`T1..N-1`}, SparkSession, R]): RawHandle[R] = {
- (${`a1&aN-1`} & sparkSessionArg).apply(f.toScalaFunc)
- }
-
- def onSparkSessionWithHive[R](f: Func${arity}[${`T1..N-1`}, SparkSession, R]): RawHandle[R] = {
- (${`a1&aN-1`} & sparkSessionWithHiveArg).apply(f.toScalaFunc)
- }
-
- def extract(ctx: FnContext): Extraction[${`(T1..N-1)`}] = (${`a1&aN-1`}).extract(ctx)
-
${extrasMethod}
-}
"""
}
}
object GenJavaArgsMethods extends JavaTemplate {
val filename = "WithArgs.scala"
override def range: Range.Inclusive = (1 to 20)
def content(tv: TemplateVals) = {
import tv._
block"""
|package mist.api.jdsl
|
|import mist.api.ArgDef
|
|trait WithArgs {
|
- /**
- * Declare ${arity} required arguments for job
- */
- def withArgs[${`T1..N`}](${`JArg1..n`}): Args${arity}[${`T1..N`}] =
- new Args${arity}(${`a1..aN_asScala`})
|
|}
|
|object WithArgs extends WithArgs
"""
}
}
trait Template { self =>
def packageName: String
def createVals(arity: Int): TemplateVals = new TemplateVals(arity)
def filename: String
def content(tv: TemplateVals): String
def range = 1 to 22
def body: String = {
val rawContents = range map { n => content(createVals(n)) split '\n' filterNot (_.isEmpty) }
val preBody = rawContents.head takeWhile (_ startsWith "|") map (_.tail)
val instances = rawContents flatMap {_ filter (_ startsWith "-") map (_.tail) }
val postBody = rawContents.head dropWhile (_ startsWith "|") dropWhile (_ startsWith "-") map (_.tail)
(preBody ++ instances ++ postBody) mkString "\n"
}
}
trait ScalaTemplate extends Template {
override def packageName = "mist/api/internal"
}
trait JavaTemplate extends Template {
override def packageName = "mist/api/jdsl"
}
class TemplateVals(val arity: Int) {
val synTypes = (0 until arity) map (n => (n+'A').toChar)
val synVals = (0 until arity) map (n => (n+'a').toChar)
val synTypedVals = (synVals zip synTypes) map { case (v,t) => v + ":" + t}
val `A..N` = synTypes.mkString(", ")
val `A..N,Res` = (synTypes :+ "Res") mkString ", "
val `a..n` = synVals.mkString(", ")
val `A::N` = (synTypes :+ "HNil") mkString "::"
val `a::n` = (synVals :+ "HNil") mkString "::"
val `_.._` = Seq.fill(arity)("_").mkString(", ")
val `(A..N)` = if (arity == 1) "Tuple1[A]" else synTypes.mkString("(", ", ", ")")
val `(_.._)` = if (arity == 1) "Tuple1[_]" else Seq.fill(arity)("_").mkString("(", ", ", ")")
val `(a..n)` = if (arity == 1) "Tuple1(a)" else synVals.mkString("(", ", ", ")")
val `(a._1::a._n)` = ((1 to arity).map(i => s"a._$i") :+ "HNil").mkString("::")
val `a:A..n:N` = synTypedVals mkString ", "
val synJavaTypes = (1 to arity) map (n => "T" + n)
val synJavaVals = (1 to arity) map (n => "a" + n)
val `T1..N` = synJavaTypes.mkString(",")
val `(T1..N)` = if (arity == 1) "T1" else synJavaTypes.mkString("(", ", ", ")")
val `(T1..N-1)` = if (arity == 1) "T1" else synJavaTypes.dropRight(1).mkString("(", ", ", ")")
val `T1..N-1` = synJavaTypes.dropRight(1).mkString(",")
val `-T1..N` = synJavaTypes.map("-" + _).mkString(",")
val `a1:T1..aN:TN` = (synJavaVals zip synJavaTypes).map({case (a, t) => a + ":" + t}).mkString(",")
val `a1..aN` = synJavaVals.mkString(",")
val `a1..aN_asScala` = synJavaVals.map(a => a + ".asScala").mkString(",")
val `a1..aN-1` = synJavaVals.dropRight(1).mkString(",")
val `a1&aN` = synJavaVals.mkString(" & ")
val `a1&aN-1` = synJavaVals.dropRight(1).mkString(" & ")
val `ArgDef1..n` = {
val types = synJavaTypes.map(t => s"ArgDef[$t]")
(types zip synJavaVals).map({case (t, a) => a + ":" + t}).mkString(" ,")
}
val `JArg1..n` = {
val types = synJavaTypes.map(t => s"JArg[$t]")
(types zip synJavaVals).map({case (t, a) => a + ":" + t}).mkString(" ,")
}
val `ArgDef1..n-1` = {
val types = synJavaTypes.map(t => s"ArgDef[$t]")
(types zip synJavaVals).dropRight(1).map({case (t, a) => a + ":" + t}).mkString(" ,")
}
}
} | Hydrospheredata/mist | project/Boilerplate.scala | Scala | apache-2.0 | 10,437 |
package scalaprops
import scalaprops.derive.Singletons
object Util {
def compareGenHelper[T](first: Gen[T], second: Gen[T])(
len: Int
): Boolean = {
val seed = System.currentTimeMillis()
val generated = first.infiniteStream(seed = seed).zip(second.infiniteStream(seed = seed)).take(len)
generated.forall { case (a, b) => a == b }
}
/**
* Ask each `Gen[T]` a sequence of values, given the same parameters and initial seed,
* and return false if both sequences aren't equal.
*/
def compareGen[T](first: Gen[T], second: Gen[T]): Boolean = {
compareGenHelper(first, second)(100)
}
def compareCogenHelper[T: Gen](r: Rand)(first: Cogen[T], second: Cogen[T])(
len: Int
): Boolean = {
val values = Gen.infinite(Gen.defaultSize, r, Gen[T]).take(len).toList
val s = CogenState(r, Gen[T])
val firstSeeds = values.scanLeft(s)((x, y) => first.cogen(y, x))
val secondSeeds = values.scanLeft(s)((x, y) => second.cogen(y, x))
val seeds = firstSeeds zip secondSeeds
seeds.forall { case (a, b) =>
val s = System.currentTimeMillis()
val size = 20
(a.rand == b.rand) && {
a.gen.samples(seed = s, listSize = size) == b.gen.samples(seed = s, listSize = size)
}
}
}
def compareCogen[T: Gen](first: Cogen[T], second: Cogen[T]): Boolean =
compareCogenHelper(Rand.standard(System.currentTimeMillis()))(first, second)(50)
def compareShrink[T: Gen](first: Shrink[T], second: Shrink[T]): Property =
Property.forAll { t: T => first(t) == second(t) }
def validateSingletons[T: Singletons](expected: T*): Boolean = {
val found = Singletons[T].apply()
found == expected
}
}
| scalaprops/scalaprops-shapeless | test/shared/src/test/scala-2/scalaprops/Util.scala | Scala | apache-2.0 | 1,686 |
package play_crawler
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.{SQLContext, SaveMode}
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.sql.functions._
/**
* Created by mukul on 22/1/17.
*/
object scrap {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setMaster("local[*]").setAppName("dftry")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
val rootLogger = Logger.getRootLogger()
rootLogger.setLevel(Level.ERROR)
val playScrapCSVPath = "/home/mukul/IdeaProjects/spark/src/main/resources/scrap.csv"
val playScrapDF = sqlContext.read
.format("com.databricks.spark.csv")
.option("header","true")
.load(playScrapCSVPath)
playScrapDF.show()
playScrapDF.cache()
playScrapDF
.coalesce(10)
.write
.mode(SaveMode.Overwrite)
.format("com.databricks.spark.csv")
.option("header","true")
.save("/home/mukul/Documents/Play_Crawler/static/play-scrap-csv")
val total = playScrapDF.count()
val genreDF = playScrapDF
.select(playScrapDF("genre"), playScrapDF("name"))
.groupBy("genre")
.agg(
count("*").alias("no_of_Apps"),
count("*").multiply(100).divide(total).cast("integer").alias("percentage")
)
genreDF.show()
genreDF
.coalesce(1)
.write
.mode(SaveMode.Overwrite)
.format("com.databricks.spark.csv")
.option("header","true")
.save("/home/mukul/Documents/Play_Crawler/static/genre-csv")
val downloadsDF = playScrapDF
.select(playScrapDF("downloads"), playScrapDF("name"))
.groupBy("downloads")
.agg(
count("*").alias("no_of_Apps"),
count("*").multiply(100).divide(total).cast("integer").alias("percentage")
)
downloadsDF.show()
downloadsDF
.coalesce(1)
.write
.mode(SaveMode.Overwrite)
.format("com.databricks.spark.csv")
.option("header","true")
.save("/home/mukul/Documents/Play_Crawler/static/downloads-csv")
val contentRateDF = playScrapDF
.select(playScrapDF("contentRating"), playScrapDF("name"))
.groupBy("contentRating")
.agg(
count("*").alias("no_of_Apps"),
count("*").multiply(100).divide(total).cast("integer").alias("percentage")
)
contentRateDF.show()
contentRateDF
.coalesce(1)
.write
.mode(SaveMode.Overwrite)
.format("com.databricks.spark.csv")
.option("header","true")
.save("/home/mukul/Documents/Play_Crawler/static/content-rate-csv")
val scoreClassDF = playScrapDF
.select(playScrapDF("scoreClass"), playScrapDF("name"))
.groupBy("scoreClass")
.agg(
count("*").alias("no_of_Apps"),
count("*").multiply(100).divide(total).cast("integer").alias("percentage")
)
scoreClassDF.show()
scoreClassDF
.coalesce(1)
.write
.mode(SaveMode.Overwrite)
.format("com.databricks.spark.csv")
.option("header","true")
.save("/home/mukul/Documents/Play_Crawler/static/score-class-csv")
playScrapDF.unpersist()
}
} | devmukul44/Play_Crawler | play-analytics-core/src/main/scala/play_crawler/scrap.scala | Scala | mit | 3,171 |
package com.mdsol.mauth
import java.nio.charset.StandardCharsets
import java.security.Security
import java.util.UUID
import com.mdsol.mauth.test.utils.FixturesLoader
import com.mdsol.mauth.util.MAuthKeysHelper.{getPrivateKeyFromString, getPublicKeyFromString}
import com.mdsol.mauth.util.MAuthSignatureHelper
import org.bouncycastle.jce.provider.BouncyCastleProvider
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
class MAuthSignatureHelperSpec extends AnyFlatSpec with Matchers {
Security.addProvider(new BouncyCastleProvider)
private val CLIENT_APP_UUID = "92a1869e-c80d-4f06-8775-6c4ebb0758e0"
private val CLIENT_REQUEST_METHOD = "GET"
private val CLIENT_REQUEST_PATH = "/resource/path"
private val CLIENT_REQUEST_PAYLOAD = "message here"
private val CLIENT_REQUEST_QUERY_PARAMETERS = "key1=value1&key2=value2"
private val TEST_EPOCH_TIME = 1424700000L
private val TEST_PRIVATE_KEY = getPrivateKeyFromString(FixturesLoader.getPrivateKey2)
// the same test data with ruby and python
private val CLIENT_APP_UUID_V2 = "5ff4257e-9c16-11e0-b048-0026bbfffe5e"
private val CLIENT_REQUEST_METHOD_V2 = "PUT"
private val CLIENT_REQUEST_PATH_V2 = "/v1/pictures"
private val CLIENT_REQUEST_QUERY_PARAMETERS_V2 = "key=-_.~ !@#$%^*()+{}|:\\"'`<>?&∞=v&キ=v&0=v&a=v&a=b&a=c&a=a&k=&k=v"
private val TEST_EPOCH_TIME_V2 = "1309891855"
behavior of "MAuthSignatureHelper"
it should "correctly generate string to sign for mAuth V1" in {
val expectedString = CLIENT_REQUEST_METHOD + "\\n" +
CLIENT_REQUEST_PATH + "\\n" +
CLIENT_REQUEST_PAYLOAD + "\\n" +
CLIENT_APP_UUID + "\\n" + String.valueOf(TEST_EPOCH_TIME)
MAuthSignatureHelper.generateUnencryptedSignature(
UUID.fromString(CLIENT_APP_UUID),
CLIENT_REQUEST_METHOD,
CLIENT_REQUEST_PATH,
CLIENT_REQUEST_PAYLOAD,
String.valueOf(TEST_EPOCH_TIME)
) shouldBe expectedString
}
it should "correctly generate string to sign for mAuth V2" in {
val paylodDigest = MAuthSignatureHelper.getHexEncodedDigestedString("message here")
val expectedString = CLIENT_REQUEST_METHOD + "\\n" +
CLIENT_REQUEST_PATH + "\\n" +
paylodDigest + "\\n" +
CLIENT_APP_UUID + "\\n" + String.valueOf(TEST_EPOCH_TIME) + "\\n" +
CLIENT_REQUEST_QUERY_PARAMETERS
MAuthSignatureHelper.generateStringToSignV2(
UUID.fromString(CLIENT_APP_UUID),
CLIENT_REQUEST_METHOD,
CLIENT_REQUEST_PATH,
CLIENT_REQUEST_QUERY_PARAMETERS,
CLIENT_REQUEST_PAYLOAD.getBytes(StandardCharsets.UTF_8),
String.valueOf(TEST_EPOCH_TIME)
) shouldBe expectedString
}
it should "correctly encode query string sort by code point" in {
val queryString = "∞=v&キ=v&0=v&a=v"
val expectedString = "0=v&a=v&%E2%88%9E=v&%E3%82%AD=v"
MAuthSignatureHelper.generateEncryptedQueryParams(queryString) shouldBe expectedString
}
it should "correctly encode query string sort by value if keys are the same" in {
val queryString = "a=b&a=c&a=a"
val expectedString = "a=a&a=b&a=c"
MAuthSignatureHelper.generateEncryptedQueryParams(queryString) shouldBe expectedString
}
it should "correctly encode query string with empty values" in {
val queryString = "k=&k=v"
MAuthSignatureHelper.generateEncryptedQueryParams(queryString) shouldBe queryString
}
it should "correctly encode query string" in {
val queryString = "key=The string ü@foo-bar"
val expectedString = "key=The%20string%20%C3%BC%40foo-bar"
MAuthSignatureHelper.generateEncryptedQueryParams(queryString) shouldBe expectedString
}
it should "correctly encode query string with special chars" in {
val queryString = "key=-_.~ !@#$%^*()+{}|:\\"'`<>?"
val expectedString = "key=-_.~%20%21%40%23%24%25%5E%2A%28%29%2B%7B%7D%7C%3A%22%27%60%3C%3E%3F"
MAuthSignatureHelper.generateEncryptedQueryParams(queryString) shouldBe expectedString
}
it should "correctly generate signature for V1" in {
val testString = "Hello world"
val expectedString = "F/GAuGYEykrtrmIE/XtETSi0QUoKxUwwTXljT1tUiqNHmyH2NRhKQ1flqusaB7H" +
"6bwPBb+FzXzfmiO32lJs6SxMjltqM/FjwucVNhn1BW+KXFnZniPh3M0+FwwspksX9xc/KcWEPebtIIEM5c" +
"X2rBl43xlvwYtS/+D+obo1AVPv2l5qd+Gwl9b61kYF/aoPGx+bVnmWZK8e8BZxZOjjGjmQAOYRYgGWzolL" +
"LnzIZ6xy6efY3D9jPXXDqgnqWQvwLStkKJIydrkXUTd0m36X6mD00qHgI7xoYSLgqxNSg1EgO8yuette8B" +
"Kl9D+YbIEJ3xFnaZmCfVGks0M9tmZ2PXg==".stripMargin.replaceAll("\\n", "")
MAuthSignatureHelper.encryptSignature(TEST_PRIVATE_KEY, testString) shouldBe expectedString
}
it should "correctly generate signature for V1 Unicode" in {
val testString = "こんにちはÆ"
val expectedString = ("cHrT3G7zCA2aRcY5jtvNlm0orafBOn924rQ9aSQS1lvNCwbg/LMnTsV+jHZUtOy" +
"DFSvErBwd9ga1FrsjOQDfhNoU1K+pVQ11nHU23cHQi0bsYByKPIDh1jMW4wNtP+A7Z/Xh0CIESBc+SaeIjP" +
"znMunocwci34kN4AXWudkZ2+xZxqfZiX6TVrwmREppsgoZD2ODVt6FtnBvcGd0sRAa9A3Iy+EaB8wOM5kaU" +
"yusfGcxeCYuCGN1FHjd1AkBkm2I4wbsxQInKDyYQXjMv3fA5oMw4nxhL/AJzUx3xWWCG5pub1/YB3jWwQgt" +
"Gjpxvb5LhHT9S/JtuT8RU01JukC8dQ==").stripMargin.replaceAll("\\n", "")
MAuthSignatureHelper.encryptSignature(TEST_PRIVATE_KEY, testString) shouldBe expectedString
}
it should "correctly generate signature for V2" in {
val testString = "Hello world"
val expectedString = ("KODkSEnqjr52EWOFvrRj2igwMR8EHsFYpBzDSEWge7UenB3u8OKP1nXeg1oJ0X" +
"1z8S+fpODMOh6NaGalEZgoyk0VRZ/BhFRiOg/xCMm6DA2J48EtBt8DYONVKTp4W2e2OU68NMGlj2upkjSs" +
"iD8MoIu2SHYwdkjx4PwKl2sPbQtKnsyl6kgSfhGd+1WsgTELDfeNdy3mSX7iJtKkpmUV5DZ1P0BcPCLbh/" +
"2KfAHx4sDIHFUf+U06ei/WVNzz1l5+fpwE0EV/lxtMLcCFUVQlM9li8Yjpsh0EbwzuV24pMB0xhwvci4B7" +
"JSYbLK76JUBthhwzUtXzyuzfQi4lNeXR7g==").stripMargin.replaceAll("\\n", "")
MAuthSignatureHelper.encryptSignatureRSA(TEST_PRIVATE_KEY, testString) shouldBe expectedString
}
it should "correctly generate signature for V2 Unicode" in {
val testString = "こんにちはÆ"
val expectedString = ("F9OqgCXr6vKAVBoU8Iogg09HhMZ+FpcJ8Q8DJ/M82vCDjVdxYQ1BYpuyXWN2jI" +
"H5CWKnYvXxF49aKwiXuo7bgUArNZZJuwRzI5hSEwsY6weVzlsO8DmdDR62MKozK9NBEr7nnVka8NFEWrpr" +
"WNPrgvy//YK5NAPSt+tLq/7qk5+qJZRjAjAhl09FD2pzYNGZkLx24UuPPfPSkvQKcybcAgY5y17FNkQTYY" +
"udjBy2hG6Df+Op77VjKx5yaLHZfoKcOmxc6UdE09kkoS5rsW2Y65kLi4xWbLK3i+VUC+WCqL8Vt7McJFMA" +
"wOyACDJPr4Z3VtHUZgnT9b5n7c7U/CItRg==").stripMargin.replaceAll("\\n", "")
MAuthSignatureHelper.encryptSignatureRSA(TEST_PRIVATE_KEY, testString) shouldBe expectedString
}
it should "verify signature for V2" in {
val testString = "Hello world"
val signString = MAuthSignatureHelper.encryptSignatureRSA(getPrivateKeyFromString(FixturesLoader.getPrivateKey), testString)
MAuthSignatureHelper.verifyRSA(testString, signString, getPublicKeyFromString(FixturesLoader.getPublicKey)) shouldBe true
}
it should "correctly generate signature of binary body for V2" in {
val testString = MAuthSignatureHelper.generateStringToSignV2(
UUID.fromString(CLIENT_APP_UUID_V2),
CLIENT_REQUEST_METHOD_V2,
CLIENT_REQUEST_PATH_V2,
CLIENT_REQUEST_QUERY_PARAMETERS_V2,
FixturesLoader.getBinaryFileBody,
TEST_EPOCH_TIME_V2
)
val expectedString = ("GpZIRB8RIxlfsjcROBElMEwa0r7jr632GkBe+R8lOv72vVV7bFMbJwQUHYm6vL/N" +
"KC7g4lJwvWcF60lllIUGwv/KWUOQwerqo5yCNoNumxjgDKjq7ILl8iFxsrV9LdvxwGyEBEwAPKzoTmW9xrad" +
"xmjn4ZZVMnQKEMns6iViBkwaAW2alp4ZtVfJIZHRRyiuFnITWH1PniyG0kI4Li16kY25VfmzfNkdAi0Cnl27" +
"Cy1+DtAl1zVnz6ObMAdtmsEtplvlqsRCRsdd37VfuUxUlolNpr5brjzTwXksScUjX80/HMnui5ZlFORGjHeb" +
"eZG5QVCouZPKBWTWsELGx1iyaw==").stripMargin.replaceAll("\\n", "")
MAuthSignatureHelper.encryptSignatureRSA(TEST_PRIVATE_KEY, testString) shouldBe expectedString
}
it should "correctly generate signature with empty body for V2" in {
val testString = MAuthSignatureHelper.generateStringToSignV2(
UUID.fromString(CLIENT_APP_UUID_V2),
CLIENT_REQUEST_METHOD_V2,
CLIENT_REQUEST_PATH_V2,
CLIENT_REQUEST_QUERY_PARAMETERS_V2,
Array.empty,
TEST_EPOCH_TIME_V2
)
val expectedString = ("jDB6fhwUA11ZSLb2W4ueS4l9hsguqmgcRez58kUo25iuMT5Uj9wWz+coHSpOd39B0" +
"cNW5D5UY6nWifw4RJIv/q8MdqS43WVgnCDSrNsSxpQ/ic6U3I3151S69PzSRZ+aR/I5A85Q9FgWB6wDNf4iX/" +
"BmZopfd5XjsLEyDymTRYedmB4DmONlTrsjVPs1DS2xY5xQyxIcxEUpVGDfTNroRTu5REBTttWbUB7BRXhKCc2" +
"pfRnUYPBo4Fa7nM8lI7J1/jUasMMLelr6hvcc6t21RCHhf4p9VlpokUOdN8slXU/kkC+OMUE04I021AUnZSpd" +
"hd/IoVR1JJDancBRzWA2HQ==").stripMargin.replaceAll("\\n", "")
MAuthSignatureHelper.encryptSignatureRSA(TEST_PRIVATE_KEY, testString) shouldBe expectedString
}
}
| mdsol/mauth-java-client | modules/mauth-common/src/test/scala/com/mdsol/mauth/MAuthSignatureHelperSpec.scala | Scala | mit | 8,679 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.test
import java.io.File
import java.util.Locale
import java.util.concurrent.ConcurrentLinkedQueue
import scala.collection.JavaConverters._
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.parquet.hadoop.ParquetFileReader
import org.apache.parquet.hadoop.util.HadoopInputFile
import org.apache.parquet.schema.PrimitiveType
import org.apache.parquet.schema.PrimitiveType.PrimitiveTypeName
import org.apache.parquet.schema.Type.Repetition
import org.scalatest.BeforeAndAfter
import org.apache.spark.SparkContext
import org.apache.spark.internal.io.FileCommitProtocol.TaskCommitMessage
import org.apache.spark.internal.io.HadoopMapReduceCommitProtocol
import org.apache.spark.scheduler.{SparkListener, SparkListenerJobStart}
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.TableIdentifier
import org.apache.spark.sql.catalyst.plans.logical.{AppendData, LogicalPlan, OverwriteByExpression}
import org.apache.spark.sql.execution.QueryExecution
import org.apache.spark.sql.execution.datasources.DataSourceUtils
import org.apache.spark.sql.execution.datasources.noop.NoopDataSource
import org.apache.spark.sql.execution.datasources.parquet.SpecificParquetRecordReaderBase
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.sql.util.QueryExecutionListener
import org.apache.spark.util.Utils
object LastOptions {
var parameters: Map[String, String] = null
var schema: Option[StructType] = null
var saveMode: SaveMode = null
def clear(): Unit = {
parameters = null
schema = null
saveMode = null
}
}
/** Dummy provider. */
class DefaultSource
extends RelationProvider
with SchemaRelationProvider
with CreatableRelationProvider {
case class FakeRelation(sqlContext: SQLContext) extends BaseRelation {
override def schema: StructType = StructType(Seq(StructField("a", StringType)))
}
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String],
schema: StructType
): BaseRelation = {
LastOptions.parameters = parameters
LastOptions.schema = Some(schema)
FakeRelation(sqlContext)
}
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String]
): BaseRelation = {
LastOptions.parameters = parameters
LastOptions.schema = None
FakeRelation(sqlContext)
}
override def createRelation(
sqlContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
LastOptions.parameters = parameters
LastOptions.schema = None
LastOptions.saveMode = mode
FakeRelation(sqlContext)
}
}
/** Dummy provider with only RelationProvider and CreatableRelationProvider. */
class DefaultSourceWithoutUserSpecifiedSchema
extends RelationProvider
with CreatableRelationProvider {
case class FakeRelation(sqlContext: SQLContext) extends BaseRelation {
override def schema: StructType = StructType(Seq(StructField("a", StringType)))
}
override def createRelation(
sqlContext: SQLContext,
parameters: Map[String, String]): BaseRelation = {
FakeRelation(sqlContext)
}
override def createRelation(
sqlContext: SQLContext,
mode: SaveMode,
parameters: Map[String, String],
data: DataFrame): BaseRelation = {
FakeRelation(sqlContext)
}
}
object MessageCapturingCommitProtocol {
val commitMessages = new ConcurrentLinkedQueue[TaskCommitMessage]()
}
class MessageCapturingCommitProtocol(jobId: String, path: String)
extends HadoopMapReduceCommitProtocol(jobId, path) {
// captures commit messages for testing
override def onTaskCommit(msg: TaskCommitMessage): Unit = {
MessageCapturingCommitProtocol.commitMessages.offer(msg)
}
}
class DataFrameReaderWriterSuite extends QueryTest with SharedSQLContext with BeforeAndAfter {
import testImplicits._
private val userSchema = new StructType().add("s", StringType)
private val userSchemaString = "s STRING"
private val textSchema = new StructType().add("value", StringType)
private val data = Seq("1", "2", "3")
private val dir = Utils.createTempDir(namePrefix = "input").getCanonicalPath
before {
Utils.deleteRecursively(new File(dir))
}
test("writeStream cannot be called on non-streaming datasets") {
val e = intercept[AnalysisException] {
spark.read
.format("org.apache.spark.sql.test")
.load()
.writeStream
.start()
}
Seq("'writeStream'", "only", "streaming Dataset/DataFrame").foreach { s =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(s.toLowerCase(Locale.ROOT)))
}
}
test("resolve default source") {
spark.read
.format("org.apache.spark.sql.test")
.load()
.write
.format("org.apache.spark.sql.test")
.save()
}
test("resolve default source without extending SchemaRelationProvider") {
spark.read
.format("org.apache.spark.sql.test.DefaultSourceWithoutUserSpecifiedSchema")
.load()
.write
.format("org.apache.spark.sql.test.DefaultSourceWithoutUserSpecifiedSchema")
.save()
}
test("resolve full class") {
spark.read
.format("org.apache.spark.sql.test.DefaultSource")
.load()
.write
.format("org.apache.spark.sql.test")
.save()
}
test("options") {
val map = new java.util.HashMap[String, String]
map.put("opt3", "3")
val df = spark.read
.format("org.apache.spark.sql.test")
.option("opt1", "1")
.options(Map("opt2" -> "2"))
.options(map)
.load()
assert(LastOptions.parameters("opt1") == "1")
assert(LastOptions.parameters("opt2") == "2")
assert(LastOptions.parameters("opt3") == "3")
LastOptions.clear()
df.write
.format("org.apache.spark.sql.test")
.option("opt1", "1")
.options(Map("opt2" -> "2"))
.options(map)
.save()
assert(LastOptions.parameters("opt1") == "1")
assert(LastOptions.parameters("opt2") == "2")
assert(LastOptions.parameters("opt3") == "3")
}
test("pass partitionBy as options") {
Seq(true, false).foreach { flag =>
withSQLConf(SQLConf.LEGACY_PASS_PARTITION_BY_AS_OPTIONS.key -> s"$flag") {
Seq(1).toDF.write
.format("org.apache.spark.sql.test")
.partitionBy("col1", "col2")
.save()
if (flag) {
val partColumns = LastOptions.parameters(DataSourceUtils.PARTITIONING_COLUMNS_KEY)
assert(DataSourceUtils.decodePartitioningColumns(partColumns) === Seq("col1", "col2"))
} else {
assert(!LastOptions.parameters.contains(DataSourceUtils.PARTITIONING_COLUMNS_KEY))
}
}
}
}
test("save mode") {
spark.range(10).write
.format("org.apache.spark.sql.test")
.mode(SaveMode.ErrorIfExists)
.save()
assert(LastOptions.saveMode === SaveMode.ErrorIfExists)
spark.range(10).write
.format("org.apache.spark.sql.test")
.mode(SaveMode.Append)
.save()
assert(LastOptions.saveMode === SaveMode.Append)
// By default the save mode is `ErrorIfExists` for data source v1.
spark.range(10).write
.format("org.apache.spark.sql.test")
.save()
assert(LastOptions.saveMode === SaveMode.ErrorIfExists)
spark.range(10).write
.format("org.apache.spark.sql.test")
.mode("default")
.save()
assert(LastOptions.saveMode === SaveMode.ErrorIfExists)
}
test("save mode for data source v2") {
var plan: LogicalPlan = null
val listener = new QueryExecutionListener {
override def onSuccess(funcName: String, qe: QueryExecution, durationNs: Long): Unit = {
plan = qe.analyzed
}
override def onFailure(funcName: String, qe: QueryExecution, exception: Exception): Unit = {}
}
spark.listenerManager.register(listener)
try {
// append mode creates `AppendData`
spark.range(10).write
.format(classOf[NoopDataSource].getName)
.mode(SaveMode.Append)
.save()
sparkContext.listenerBus.waitUntilEmpty(1000)
assert(plan.isInstanceOf[AppendData])
// overwrite mode creates `OverwriteByExpression`
spark.range(10).write
.format(classOf[NoopDataSource].getName)
.mode(SaveMode.Overwrite)
.save()
sparkContext.listenerBus.waitUntilEmpty(1000)
assert(plan.isInstanceOf[OverwriteByExpression])
// By default the save mode is `ErrorIfExists` for data source v2.
spark.range(10).write
.format(classOf[NoopDataSource].getName)
.save()
sparkContext.listenerBus.waitUntilEmpty(1000)
assert(plan.isInstanceOf[AppendData])
spark.range(10).write
.format(classOf[NoopDataSource].getName)
.mode("default")
.save()
sparkContext.listenerBus.waitUntilEmpty(1000)
assert(plan.isInstanceOf[AppendData])
} finally {
spark.listenerManager.unregister(listener)
}
}
test("test path option in load") {
spark.read
.format("org.apache.spark.sql.test")
.option("intOpt", 56)
.load("/test")
assert(LastOptions.parameters("intOpt") == "56")
assert(LastOptions.parameters("path") == "/test")
LastOptions.clear()
spark.read
.format("org.apache.spark.sql.test")
.option("intOpt", 55)
.load()
assert(LastOptions.parameters("intOpt") == "55")
assert(!LastOptions.parameters.contains("path"))
LastOptions.clear()
spark.read
.format("org.apache.spark.sql.test")
.option("intOpt", 54)
.load("/test", "/test1", "/test2")
assert(LastOptions.parameters("intOpt") == "54")
assert(!LastOptions.parameters.contains("path"))
}
test("test different data types for options") {
val df = spark.read
.format("org.apache.spark.sql.test")
.option("intOpt", 56)
.option("boolOpt", false)
.option("doubleOpt", 6.7)
.load("/test")
assert(LastOptions.parameters("intOpt") == "56")
assert(LastOptions.parameters("boolOpt") == "false")
assert(LastOptions.parameters("doubleOpt") == "6.7")
LastOptions.clear()
df.write
.format("org.apache.spark.sql.test")
.option("intOpt", 56)
.option("boolOpt", false)
.option("doubleOpt", 6.7)
.save("/test")
assert(LastOptions.parameters("intOpt") == "56")
assert(LastOptions.parameters("boolOpt") == "false")
assert(LastOptions.parameters("doubleOpt") == "6.7")
}
test("check jdbc() does not support partitioning, bucketBy or sortBy") {
val df = spark.read.text(Utils.createTempDir(namePrefix = "text").getCanonicalPath)
var w = df.write.partitionBy("value")
var e = intercept[AnalysisException](w.jdbc(null, null, null))
Seq("jdbc", "partitioning").foreach { s =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(s.toLowerCase(Locale.ROOT)))
}
w = df.write.bucketBy(2, "value")
e = intercept[AnalysisException](w.jdbc(null, null, null))
Seq("jdbc", "does not support bucketBy right now").foreach { s =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(s.toLowerCase(Locale.ROOT)))
}
w = df.write.sortBy("value")
e = intercept[AnalysisException](w.jdbc(null, null, null))
Seq("sortBy must be used together with bucketBy").foreach { s =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(s.toLowerCase(Locale.ROOT)))
}
w = df.write.bucketBy(2, "value").sortBy("value")
e = intercept[AnalysisException](w.jdbc(null, null, null))
Seq("jdbc", "does not support bucketBy and sortBy right now").foreach { s =>
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(s.toLowerCase(Locale.ROOT)))
}
}
test("prevent all column partitioning") {
withTempDir { dir =>
val path = dir.getCanonicalPath
intercept[AnalysisException] {
spark.range(10).write.format("parquet").mode("overwrite").partitionBy("id").save(path)
}
intercept[AnalysisException] {
spark.range(10).write.format("csv").mode("overwrite").partitionBy("id").save(path)
}
}
}
test("load API") {
spark.read.format("org.apache.spark.sql.test").load()
spark.read.format("org.apache.spark.sql.test").load(dir)
spark.read.format("org.apache.spark.sql.test").load(dir, dir, dir)
spark.read.format("org.apache.spark.sql.test").load(Seq(dir, dir): _*)
Option(dir).map(spark.read.format("org.apache.spark.sql.test").load)
}
test("write path implements onTaskCommit API correctly") {
withSQLConf(
"spark.sql.sources.commitProtocolClass" ->
classOf[MessageCapturingCommitProtocol].getCanonicalName) {
withTempDir { dir =>
val path = dir.getCanonicalPath
MessageCapturingCommitProtocol.commitMessages.clear()
spark.range(10).repartition(10).write.mode("overwrite").parquet(path)
assert(MessageCapturingCommitProtocol.commitMessages.size() == 10)
}
}
}
test("read a data source that does not extend SchemaRelationProvider") {
val dfReader = spark.read
.option("from", "1")
.option("TO", "10")
.format("org.apache.spark.sql.sources.SimpleScanSource")
// when users do not specify the schema
checkAnswer(dfReader.load(), spark.range(1, 11).toDF())
// when users specify the schema
val inputSchema = new StructType().add("s", IntegerType, nullable = false)
val e = intercept[AnalysisException] { dfReader.schema(inputSchema).load() }
assert(e.getMessage.contains(
"org.apache.spark.sql.sources.SimpleScanSource does not allow user-specified schemas"))
}
test("read a data source that does not extend RelationProvider") {
val dfReader = spark.read
.option("from", "1")
.option("TO", "10")
.option("option_with_underscores", "someval")
.option("option.with.dots", "someval")
.format("org.apache.spark.sql.sources.AllDataTypesScanSource")
// when users do not specify the schema
val e = intercept[AnalysisException] { dfReader.load() }
assert(e.getMessage.contains("A schema needs to be specified when using"))
// when users specify the schema
val inputSchema = new StructType().add("s", StringType, nullable = false)
assert(dfReader.schema(inputSchema).load().count() == 10)
}
test("text - API and behavior regarding schema") {
// Writer
spark.createDataset(data).write.mode(SaveMode.Overwrite).text(dir)
testRead(spark.read.text(dir), data, textSchema)
// Reader, without user specified schema
testRead(spark.read.text(), Seq.empty, textSchema)
testRead(spark.read.text(dir, dir, dir), data ++ data ++ data, textSchema)
testRead(spark.read.text(Seq(dir, dir): _*), data ++ data, textSchema)
// Test explicit calls to single arg method - SPARK-16009
testRead(Option(dir).map(spark.read.text).get, data, textSchema)
// Reader, with user specified schema, should just apply user schema on the file data
testRead(spark.read.schema(userSchema).text(), Seq.empty, userSchema)
testRead(spark.read.schema(userSchema).text(dir), data, userSchema)
testRead(spark.read.schema(userSchema).text(dir, dir), data ++ data, userSchema)
testRead(spark.read.schema(userSchema).text(Seq(dir, dir): _*), data ++ data, userSchema)
}
test("textFile - API and behavior regarding schema") {
spark.createDataset(data).write.mode(SaveMode.Overwrite).text(dir)
// Reader, without user specified schema
testRead(spark.read.textFile().toDF(), Seq.empty, textSchema)
testRead(spark.read.textFile(dir).toDF(), data, textSchema)
testRead(spark.read.textFile(dir, dir).toDF(), data ++ data, textSchema)
testRead(spark.read.textFile(Seq(dir, dir): _*).toDF(), data ++ data, textSchema)
// Test explicit calls to single arg method - SPARK-16009
testRead(Option(dir).map(spark.read.text).get, data, textSchema)
// Reader, with user specified schema, should just apply user schema on the file data
val e = intercept[AnalysisException] { spark.read.schema(userSchema).textFile() }
assert(e.getMessage.toLowerCase(Locale.ROOT).contains(
"user specified schema not supported"))
intercept[AnalysisException] { spark.read.schema(userSchema).textFile(dir) }
intercept[AnalysisException] { spark.read.schema(userSchema).textFile(dir, dir) }
intercept[AnalysisException] { spark.read.schema(userSchema).textFile(Seq(dir, dir): _*) }
}
test("csv - API and behavior regarding schema") {
// Writer
spark.createDataset(data).toDF("str").write.mode(SaveMode.Overwrite).csv(dir)
val df = spark.read.csv(dir)
checkAnswer(df, spark.createDataset(data).toDF())
val schema = df.schema
// Reader, without user specified schema
val message = intercept[AnalysisException] {
testRead(spark.read.csv(), Seq.empty, schema)
}.getMessage
assert(message.contains("Unable to infer schema for CSV. It must be specified manually."))
testRead(spark.read.csv(dir), data, schema)
testRead(spark.read.csv(dir, dir), data ++ data, schema)
testRead(spark.read.csv(Seq(dir, dir): _*), data ++ data, schema)
// Test explicit calls to single arg method - SPARK-16009
testRead(Option(dir).map(spark.read.csv).get, data, schema)
// Reader, with user specified schema, should just apply user schema on the file data
testRead(spark.read.schema(userSchema).csv(), Seq.empty, userSchema)
testRead(spark.read.schema(userSchema).csv(dir), data, userSchema)
testRead(spark.read.schema(userSchema).csv(dir, dir), data ++ data, userSchema)
testRead(spark.read.schema(userSchema).csv(Seq(dir, dir): _*), data ++ data, userSchema)
}
test("json - API and behavior regarding schema") {
// Writer
spark.createDataset(data).toDF("str").write.mode(SaveMode.Overwrite).json(dir)
val df = spark.read.json(dir)
checkAnswer(df, spark.createDataset(data).toDF())
val schema = df.schema
// Reader, without user specified schema
intercept[AnalysisException] {
testRead(spark.read.json(), Seq.empty, schema)
}
testRead(spark.read.json(dir), data, schema)
testRead(spark.read.json(dir, dir), data ++ data, schema)
testRead(spark.read.json(Seq(dir, dir): _*), data ++ data, schema)
// Test explicit calls to single arg method - SPARK-16009
testRead(Option(dir).map(spark.read.json).get, data, schema)
// Reader, with user specified schema, data should be nulls as schema in file different
// from user schema
val expData = Seq[String](null, null, null)
testRead(spark.read.schema(userSchema).json(), Seq.empty, userSchema)
testRead(spark.read.schema(userSchema).json(dir), expData, userSchema)
testRead(spark.read.schema(userSchema).json(dir, dir), expData ++ expData, userSchema)
testRead(spark.read.schema(userSchema).json(Seq(dir, dir): _*), expData ++ expData, userSchema)
}
test("parquet - API and behavior regarding schema") {
// Writer
spark.createDataset(data).toDF("str").write.mode(SaveMode.Overwrite).parquet(dir)
val df = spark.read.parquet(dir)
checkAnswer(df, spark.createDataset(data).toDF())
val schema = df.schema
// Reader, without user specified schema
intercept[AnalysisException] {
testRead(spark.read.parquet(), Seq.empty, schema)
}
testRead(spark.read.parquet(dir), data, schema)
testRead(spark.read.parquet(dir, dir), data ++ data, schema)
testRead(spark.read.parquet(Seq(dir, dir): _*), data ++ data, schema)
// Test explicit calls to single arg method - SPARK-16009
testRead(Option(dir).map(spark.read.parquet).get, data, schema)
// Reader, with user specified schema, data should be nulls as schema in file different
// from user schema
val expData = Seq[String](null, null, null)
testRead(spark.read.schema(userSchema).parquet(), Seq.empty, userSchema)
testRead(spark.read.schema(userSchema).parquet(dir), expData, userSchema)
testRead(spark.read.schema(userSchema).parquet(dir, dir), expData ++ expData, userSchema)
testRead(
spark.read.schema(userSchema).parquet(Seq(dir, dir): _*), expData ++ expData, userSchema)
}
test("orc - API and behavior regarding schema") {
withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> "native") {
// Writer
spark.createDataset(data).toDF("str").write.mode(SaveMode.Overwrite).orc(dir)
val df = spark.read.orc(dir)
checkAnswer(df, spark.createDataset(data).toDF())
val schema = df.schema
// Reader, without user specified schema
intercept[AnalysisException] {
testRead(spark.read.orc(), Seq.empty, schema)
}
testRead(spark.read.orc(dir), data, schema)
testRead(spark.read.orc(dir, dir), data ++ data, schema)
testRead(spark.read.orc(Seq(dir, dir): _*), data ++ data, schema)
// Test explicit calls to single arg method - SPARK-16009
testRead(Option(dir).map(spark.read.orc).get, data, schema)
// Reader, with user specified schema, data should be nulls as schema in file different
// from user schema
val expData = Seq[String](null, null, null)
testRead(spark.read.schema(userSchema).orc(), Seq.empty, userSchema)
testRead(spark.read.schema(userSchema).orc(dir), expData, userSchema)
testRead(spark.read.schema(userSchema).orc(dir, dir), expData ++ expData, userSchema)
testRead(
spark.read.schema(userSchema).orc(Seq(dir, dir): _*), expData ++ expData, userSchema)
}
}
test("column nullability and comment - write and then read") {
withSQLConf(SQLConf.ORC_IMPLEMENTATION.key -> "native") {
Seq("json", "orc", "parquet", "csv").foreach { format =>
val schema = StructType(
StructField("cl1", IntegerType, nullable = false).withComment("test") ::
StructField("cl2", IntegerType, nullable = true) ::
StructField("cl3", IntegerType, nullable = true) :: Nil)
val row = Row(3, null, 4)
val df = spark.createDataFrame(sparkContext.parallelize(row :: Nil), schema)
// if we write and then read, the read will enforce schema to be nullable
val tableName = "tab"
withTable(tableName) {
df.write.format(format).mode("overwrite").saveAsTable(tableName)
// Verify the DDL command result: DESCRIBE TABLE
checkAnswer(
sql(s"desc $tableName").select("col_name", "comment").where($"comment" === "test"),
Row("cl1", "test") :: Nil)
// Verify the schema
val expectedFields = schema.fields.map(f => f.copy(nullable = true))
assert(spark.table(tableName).schema === schema.copy(fields = expectedFields))
}
}
}
}
test("parquet - column nullability -- write only") {
val schema = StructType(
StructField("cl1", IntegerType, nullable = false) ::
StructField("cl2", IntegerType, nullable = true) :: Nil)
val row = Row(3, 4)
val df = spark.createDataFrame(sparkContext.parallelize(row :: Nil), schema)
withTempPath { dir =>
val path = dir.getAbsolutePath
df.write.mode("overwrite").parquet(path)
val file = SpecificParquetRecordReaderBase.listDirectory(dir).get(0)
val hadoopInputFile = HadoopInputFile.fromPath(new Path(file), new Configuration())
val f = ParquetFileReader.open(hadoopInputFile)
val parquetSchema = f.getFileMetaData.getSchema.getColumns.asScala
.map(_.getPrimitiveType)
f.close()
// the write keeps nullable info from the schema
val expectedParquetSchema = Seq(
new PrimitiveType(Repetition.REQUIRED, PrimitiveTypeName.INT32, "cl1"),
new PrimitiveType(Repetition.OPTIONAL, PrimitiveTypeName.INT32, "cl2")
)
assert (expectedParquetSchema === parquetSchema)
}
}
test("SPARK-17230: write out results of decimal calculation") {
val df = spark.range(99, 101)
.selectExpr("id", "cast(id as long) * cast('1.0' as decimal(38, 18)) as num")
df.write.mode(SaveMode.Overwrite).parquet(dir)
val df2 = spark.read.parquet(dir)
checkAnswer(df2, df)
}
private def testRead(
df: => DataFrame,
expectedResult: Seq[String],
expectedSchema: StructType): Unit = {
checkAnswer(df, spark.createDataset(expectedResult).toDF())
assert(df.schema === expectedSchema)
}
test("saveAsTable with mode Append should not fail if the table not exists " +
"but a same-name temp view exist") {
withTable("same_name") {
withTempView("same_name") {
spark.range(10).createTempView("same_name")
spark.range(20).write.mode(SaveMode.Append).saveAsTable("same_name")
assert(
spark.sessionState.catalog.tableExists(TableIdentifier("same_name", Some("default"))))
}
}
}
test("saveAsTable with mode Append should not fail if the table already exists " +
"and a same-name temp view exist") {
withTable("same_name") {
withTempView("same_name") {
val format = spark.sessionState.conf.defaultDataSourceName
sql(s"CREATE TABLE same_name(id LONG) USING $format")
spark.range(10).createTempView("same_name")
spark.range(20).write.mode(SaveMode.Append).saveAsTable("same_name")
checkAnswer(spark.table("same_name"), spark.range(10).toDF())
checkAnswer(spark.table("default.same_name"), spark.range(20).toDF())
}
}
}
test("saveAsTable with mode ErrorIfExists should not fail if the table not exists " +
"but a same-name temp view exist") {
withTable("same_name") {
withTempView("same_name") {
spark.range(10).createTempView("same_name")
spark.range(20).write.mode(SaveMode.ErrorIfExists).saveAsTable("same_name")
assert(
spark.sessionState.catalog.tableExists(TableIdentifier("same_name", Some("default"))))
}
}
}
test("saveAsTable with mode Overwrite should not drop the temp view if the table not exists " +
"but a same-name temp view exist") {
withTable("same_name") {
withTempView("same_name") {
spark.range(10).createTempView("same_name")
spark.range(20).write.mode(SaveMode.Overwrite).saveAsTable("same_name")
assert(spark.sessionState.catalog.getTempView("same_name").isDefined)
assert(
spark.sessionState.catalog.tableExists(TableIdentifier("same_name", Some("default"))))
}
}
}
test("saveAsTable with mode Overwrite should not fail if the table already exists " +
"and a same-name temp view exist") {
withTable("same_name") {
withTempView("same_name") {
sql("CREATE TABLE same_name(id LONG) USING parquet")
spark.range(10).createTempView("same_name")
spark.range(20).write.mode(SaveMode.Overwrite).saveAsTable("same_name")
checkAnswer(spark.table("same_name"), spark.range(10).toDF())
checkAnswer(spark.table("default.same_name"), spark.range(20).toDF())
}
}
}
test("saveAsTable with mode Ignore should create the table if the table not exists " +
"but a same-name temp view exist") {
withTable("same_name") {
withTempView("same_name") {
spark.range(10).createTempView("same_name")
spark.range(20).write.mode(SaveMode.Ignore).saveAsTable("same_name")
assert(
spark.sessionState.catalog.tableExists(TableIdentifier("same_name", Some("default"))))
}
}
}
test("SPARK-18510: use user specified types for partition columns in file sources") {
import org.apache.spark.sql.functions.udf
withTempDir { src =>
val createArray = udf { (length: Long) =>
for (i <- 1 to length.toInt) yield i.toString
}
spark.range(4).select(createArray('id + 1) as 'ex, 'id, 'id % 4 as 'part).coalesce(1).write
.partitionBy("part", "id")
.mode("overwrite")
.parquet(src.toString)
// Specify a random ordering of the schema, partition column in the middle, etc.
// Also let's say that the partition columns are Strings instead of Longs.
// partition columns should go to the end
val schema = new StructType()
.add("id", StringType)
.add("ex", ArrayType(StringType))
val df = spark.read
.schema(schema)
.format("parquet")
.load(src.toString)
assert(df.schema.toList === List(
StructField("ex", ArrayType(StringType)),
StructField("part", IntegerType), // inferred partitionColumn dataType
StructField("id", StringType))) // used user provided partitionColumn dataType
checkAnswer(
df,
// notice how `part` is ordered before `id`
Row(Array("1"), 0, "0") :: Row(Array("1", "2"), 1, "1") ::
Row(Array("1", "2", "3"), 2, "2") :: Row(Array("1", "2", "3", "4"), 3, "3") :: Nil
)
}
}
test("SPARK-18899: append to a bucketed table using DataFrameWriter with mismatched bucketing") {
withTable("t") {
Seq(1 -> "a", 2 -> "b").toDF("i", "j").write.bucketBy(2, "i").saveAsTable("t")
val e = intercept[AnalysisException] {
Seq(3 -> "c").toDF("i", "j").write.bucketBy(3, "i").mode("append").saveAsTable("t")
}
assert(e.message.contains("Specified bucketing does not match that of the existing table"))
}
}
test("SPARK-18912: number of columns mismatch for non-file-based data source table") {
withTable("t") {
sql("CREATE TABLE t USING org.apache.spark.sql.test.DefaultSource")
val e = intercept[AnalysisException] {
Seq(1 -> "a").toDF("a", "b").write
.format("org.apache.spark.sql.test.DefaultSource")
.mode("append").saveAsTable("t")
}
assert(e.message.contains("The column number of the existing table"))
}
}
test("SPARK-18913: append to a table with special column names") {
withTable("t") {
Seq(1 -> "a").toDF("x.x", "y.y").write.saveAsTable("t")
Seq(2 -> "b").toDF("x.x", "y.y").write.mode("append").saveAsTable("t")
checkAnswer(spark.table("t"), Row(1, "a") :: Row(2, "b") :: Nil)
}
}
test("SPARK-16848: table API throws an exception for user specified schema") {
withTable("t") {
val schema = StructType(StructField("a", StringType) :: Nil)
val e = intercept[AnalysisException] {
spark.read.schema(schema).table("t")
}.getMessage
assert(e.contains("User specified schema not supported with `table`"))
}
}
test("SPARK-20431: Specify a schema by using a DDL-formatted string") {
spark.createDataset(data).write.mode(SaveMode.Overwrite).text(dir)
testRead(spark.read.schema(userSchemaString).text(), Seq.empty, userSchema)
testRead(spark.read.schema(userSchemaString).text(dir), data, userSchema)
testRead(spark.read.schema(userSchemaString).text(dir, dir), data ++ data, userSchema)
testRead(spark.read.schema(userSchemaString).text(Seq(dir, dir): _*), data ++ data, userSchema)
}
test("SPARK-20460 Check name duplication in buckets") {
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
var errorMsg = intercept[AnalysisException] {
Seq((1, 1)).toDF("col", c0).write.bucketBy(2, c0, c1).saveAsTable("t")
}.getMessage
assert(errorMsg.contains("Found duplicate column(s) in the bucket definition"))
errorMsg = intercept[AnalysisException] {
Seq((1, 1)).toDF("col", c0).write.bucketBy(2, "col").sortBy(c0, c1).saveAsTable("t")
}.getMessage
assert(errorMsg.contains("Found duplicate column(s) in the sort definition"))
}
}
}
test("SPARK-20460 Check name duplication in schema") {
def checkWriteDataColumnDuplication(
format: String, colName0: String, colName1: String, tempDir: File): Unit = {
val errorMsg = intercept[AnalysisException] {
Seq((1, 1)).toDF(colName0, colName1).write.format(format).mode("overwrite")
.save(tempDir.getAbsolutePath)
}.getMessage
assert(errorMsg.contains("Found duplicate column(s) when inserting into"))
}
def checkReadUserSpecifiedDataColumnDuplication(
df: DataFrame, format: String, colName0: String, colName1: String, tempDir: File): Unit = {
val testDir = Utils.createTempDir(tempDir.getAbsolutePath)
df.write.format(format).mode("overwrite").save(testDir.getAbsolutePath)
val errorMsg = intercept[AnalysisException] {
spark.read.format(format).schema(s"$colName0 INT, $colName1 INT")
.load(testDir.getAbsolutePath)
}.getMessage
assert(errorMsg.contains("Found duplicate column(s) in the data schema:"))
}
def checkReadPartitionColumnDuplication(
format: String, colName0: String, colName1: String, tempDir: File): Unit = {
val testDir = Utils.createTempDir(tempDir.getAbsolutePath)
Seq(1).toDF("col").write.format(format).mode("overwrite")
.save(s"${testDir.getAbsolutePath}/$colName0=1/$colName1=1")
val errorMsg = intercept[AnalysisException] {
spark.read.format(format).load(testDir.getAbsolutePath)
}.getMessage
assert(errorMsg.contains("Found duplicate column(s) in the partition schema:"))
}
Seq((true, ("a", "a")), (false, ("aA", "Aa"))).foreach { case (caseSensitive, (c0, c1)) =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive.toString) {
withTempDir { src =>
// Check CSV format
checkWriteDataColumnDuplication("csv", c0, c1, src)
checkReadUserSpecifiedDataColumnDuplication(
Seq((1, 1)).toDF("c0", "c1"), "csv", c0, c1, src)
// If `inferSchema` is true, a CSV format is duplicate-safe (See SPARK-16896)
var testDir = Utils.createTempDir(src.getAbsolutePath)
Seq("a,a", "1,1").toDF().coalesce(1).write.mode("overwrite").text(testDir.getAbsolutePath)
val df = spark.read.format("csv").option("inferSchema", true).option("header", true)
.load(testDir.getAbsolutePath)
checkAnswer(df, Row(1, 1))
checkReadPartitionColumnDuplication("csv", c0, c1, src)
// Check JSON format
checkWriteDataColumnDuplication("json", c0, c1, src)
checkReadUserSpecifiedDataColumnDuplication(
Seq((1, 1)).toDF("c0", "c1"), "json", c0, c1, src)
// Inferred schema cases
testDir = Utils.createTempDir(src.getAbsolutePath)
Seq(s"""{"$c0":3, "$c1":5}""").toDF().write.mode("overwrite")
.text(testDir.getAbsolutePath)
val errorMsg = intercept[AnalysisException] {
spark.read.format("json").option("inferSchema", true).load(testDir.getAbsolutePath)
}.getMessage
assert(errorMsg.contains("Found duplicate column(s) in the data schema:"))
checkReadPartitionColumnDuplication("json", c0, c1, src)
// Check Parquet format
checkWriteDataColumnDuplication("parquet", c0, c1, src)
checkReadUserSpecifiedDataColumnDuplication(
Seq((1, 1)).toDF("c0", "c1"), "parquet", c0, c1, src)
checkReadPartitionColumnDuplication("parquet", c0, c1, src)
// Check ORC format
checkWriteDataColumnDuplication("orc", c0, c1, src)
checkReadUserSpecifiedDataColumnDuplication(
Seq((1, 1)).toDF("c0", "c1"), "orc", c0, c1, src)
checkReadPartitionColumnDuplication("orc", c0, c1, src)
}
}
}
}
test("Insert overwrite table command should output correct schema: basic") {
withTable("tbl", "tbl2") {
withView("view1") {
val df = spark.range(10).toDF("id")
df.write.format("parquet").saveAsTable("tbl")
spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl")
spark.sql("CREATE TABLE tbl2(ID long) USING parquet")
spark.sql("INSERT OVERWRITE TABLE tbl2 SELECT ID FROM view1")
val identifier = TableIdentifier("tbl2")
val location = spark.sessionState.catalog.getTableMetadata(identifier).location.toString
val expectedSchema = StructType(Seq(StructField("ID", LongType, true)))
assert(spark.read.parquet(location).schema == expectedSchema)
checkAnswer(spark.table("tbl2"), df)
}
}
}
test("Insert overwrite table command should output correct schema: complex") {
withTable("tbl", "tbl2") {
withView("view1") {
val df = spark.range(10).map(x => (x, x.toInt, x.toInt)).toDF("col1", "col2", "col3")
df.write.format("parquet").saveAsTable("tbl")
spark.sql("CREATE VIEW view1 AS SELECT * FROM tbl")
spark.sql("CREATE TABLE tbl2(COL1 long, COL2 int, COL3 int) USING parquet PARTITIONED " +
"BY (COL2) CLUSTERED BY (COL3) INTO 3 BUCKETS")
spark.sql("INSERT OVERWRITE TABLE tbl2 SELECT COL1, COL2, COL3 FROM view1")
val identifier = TableIdentifier("tbl2")
val location = spark.sessionState.catalog.getTableMetadata(identifier).location.toString
val expectedSchema = StructType(Seq(
StructField("COL1", LongType, true),
StructField("COL3", IntegerType, true),
StructField("COL2", IntegerType, true)))
assert(spark.read.parquet(location).schema == expectedSchema)
checkAnswer(spark.table("tbl2"), df)
}
}
}
test("Create table as select command should output correct schema: basic") {
withTable("tbl", "tbl2") {
withView("view1") {
val df = spark.range(10).toDF("id")
df.write.format("parquet").saveAsTable("tbl")
spark.sql("CREATE VIEW view1 AS SELECT id FROM tbl")
spark.sql("CREATE TABLE tbl2 USING parquet AS SELECT ID FROM view1")
val identifier = TableIdentifier("tbl2")
val location = spark.sessionState.catalog.getTableMetadata(identifier).location.toString
val expectedSchema = StructType(Seq(StructField("ID", LongType, true)))
assert(spark.read.parquet(location).schema == expectedSchema)
checkAnswer(spark.table("tbl2"), df)
}
}
}
test("Create table as select command should output correct schema: complex") {
withTable("tbl", "tbl2") {
withView("view1") {
val df = spark.range(10).map(x => (x, x.toInt, x.toInt)).toDF("col1", "col2", "col3")
df.write.format("parquet").saveAsTable("tbl")
spark.sql("CREATE VIEW view1 AS SELECT * FROM tbl")
spark.sql("CREATE TABLE tbl2 USING parquet PARTITIONED BY (COL2) " +
"CLUSTERED BY (COL3) INTO 3 BUCKETS AS SELECT COL1, COL2, COL3 FROM view1")
val identifier = TableIdentifier("tbl2")
val location = spark.sessionState.catalog.getTableMetadata(identifier).location.toString
val expectedSchema = StructType(Seq(
StructField("COL1", LongType, true),
StructField("COL3", IntegerType, true),
StructField("COL2", IntegerType, true)))
assert(spark.read.parquet(location).schema == expectedSchema)
checkAnswer(spark.table("tbl2"), df)
}
}
}
test("use Spark jobs to list files") {
withSQLConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD.key -> "1") {
withTempDir { dir =>
val jobDescriptions = new ConcurrentLinkedQueue[String]()
val jobListener = new SparkListener {
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
jobDescriptions.add(jobStart.properties.getProperty(SparkContext.SPARK_JOB_DESCRIPTION))
}
}
sparkContext.addSparkListener(jobListener)
try {
spark.range(0, 3).map(i => (i, i))
.write.partitionBy("_1").mode("overwrite").parquet(dir.getCanonicalPath)
// normal file paths
checkDatasetUnorderly(
spark.read.parquet(dir.getCanonicalPath).as[(Long, Long)],
0L -> 0L, 1L -> 1L, 2L -> 2L)
sparkContext.listenerBus.waitUntilEmpty(10000)
assert(jobDescriptions.asScala.toList.exists(
_.contains("Listing leaf files and directories for 3 paths")))
} finally {
sparkContext.removeSparkListener(jobListener)
}
}
}
}
}
| LantaoJin/spark | sql/core/src/test/scala/org/apache/spark/sql/test/DataFrameReaderWriterSuite.scala | Scala | apache-2.0 | 41,239 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.zeppelin.rinterpreter
import java.io.{BufferedInputStream, File, FileInputStream}
import java.nio.file.{Files, Paths}
import java.util._
import org.apache.commons.codec.binary.{Base64, StringUtils}
import org.apache.zeppelin.interpreter.Interpreter.FormType
import org.apache.zeppelin.interpreter.{InterpreterContext, _}
import org.apache.zeppelin.scheduler.Scheduler
import org.apache.zeppelin.spark.SparkInterpreter
import org.jsoup.Jsoup
import org.jsoup.nodes._
import org.jsoup.select.Elements
import org.slf4j.{Logger, LoggerFactory}
import scala.collection.JavaConversions._
import scala.io.Source
abstract class RInterpreter(properties : Properties, startSpark : Boolean = true) extends Interpreter (properties) {
protected val logger: Logger = RInterpreter.logger
logger.trace("Initialising an RInterpreter of class " + this.getClass.getName)
def getrContext: RContext = rContext
protected lazy val rContext : RContext = synchronized{ RContext(properties, this.getInterpreterGroup().getId()) }
def open: Unit = rContext.synchronized {
logger.trace("RInterpreter opening")
// We leave this as an Option[] because the pattern of nesting SparkInterpreter inside of wrapper interpreters
// has changed several times, and this allows us to fail more gracefully and handle those changes in one place.
val intp : Option[SparkInterpreter] = getSparkInterpreter()
rContext.open(intp)
rContext.testRPackage("htmltools", message =
"""You can continue
| without it, but some interactive visualizations will fail.
| You can install it from cran."""")
rContext.testRPackage("repr", license = true, message =
"""You can continue
| without it, but some forms of output from the REPL may not appear properly."""")
rContext.testRPackage("base64enc", license = true, message =
"""You can continue
| without it, but the REPL may not show images properly.""")
rContext.testRPackage("evaluate", license = false, message =
"""
|The REPL needs this to run. It can be installed from CRAN
| Thanks to Hadley Wickham and Yihui Xie for graciously making evaluate available under an Apache-compatible
| license so it can be used with this project.""".stripMargin)
}
def getSparkInterpreter() : Option[SparkInterpreter] =
getSparkInterpreter(getInterpreterInTheSameSessionByClassName(classOf[SparkInterpreter].getName))
def getSparkInterpreter(p1 : Interpreter) : Option[SparkInterpreter] = p1 match {
case s : SparkInterpreter => Some[SparkInterpreter](s)
case lzy : LazyOpenInterpreter => {
val p = lzy.getInnerInterpreter
lzy.open()
return getSparkInterpreter(p)
}
case w : WrappedInterpreter => return getSparkInterpreter(w.getInnerInterpreter)
case _ => None
}
def close: Unit = {
rContext.close
}
def getProgress(context :InterpreterContext): Int = rContext.getProgress
def cancel(context:InterpreterContext) : Unit = {}
def getFormType: FormType = {
return FormType.NONE
}
override def getScheduler : Scheduler = rContext.getScheduler
// TODO: completion is disabled because it could not be tested with current Zeppelin code
/*def completion(buf :String,cursor : Int) : List[String] = Array[String]("").toList
private[rinterpreter] def hiddenCompletion(buf :String,cursor : Int) : List[String] =
rContext.evalS1(s"""
|rzeppelin:::.z.completion("$buf", $cursor)
""".stripMargin).toList*/
}
object RInterpreter {
private val logger: Logger = LoggerFactory.getLogger(getClass)
logger.trace("logging inside the RInterpreter singleton")
// Some R interactive visualization packages insist on producing HTML that refers to javascript
// or css by file path. These functions are intended to load those files and embed them into the
// HTML as Base64 encoded DataURIs.
//FIXME These don't error but may not yet properly be converting script links
def scriptToBase(doc : Element, testAttr : String, tag : String, mime : String): Unit = {
val elems : Elements = doc.getElementsByTag(tag)
elems.filter( (e : Element) => {
e.attributes().hasKey(testAttr) && e.attr(testAttr) != "" && e.attr(testAttr).slice(0,1) == "/"
}).foreach(scriptToBase(_, testAttr, mime))
}
def scriptToBase(node : Element, field : String, mime : String) : Unit = node.attr(field) match {
case x if Files.exists(Paths.get(x)) => node.attr(field, dataURI(x, mime))
case x if x.slice(0,4) == "http" => {}
case x if x.contains("ajax") => {}
case x if x.contains("googleapis") => {}
case x if x.slice(0,2) == "//" => node.attr(field, "http:" + x)
case _ => {}
}
def dataURI(file : String, mime : String) : String = {
val fp = new File(file)
val fdata = new Array[Byte](fp.length().toInt)
val fin = new BufferedInputStream(new FileInputStream(fp))
try {
fin.read(fdata)
} finally {
fin.close()
}
s"""data:${mime};base64,""" + StringUtils.newStringUtf8(Base64.encodeBase64(fdata, false))
}
// The purpose here is to deal with knitr producing HTML with script and css tags outside the <body>
def processHTML(input: Array[String]): String = processHTML(input.mkString("\\n"))
def processHTML(input: String) : String = {
val doc : Document = Jsoup.parse(input)
processHTML(doc)
}
private def processHTML(doc : Document) : String = {
val bod : Element = doc.body()
val head : Element = doc.head()
// Try to ignore the knitr script that breaks zeppelin display
head.getElementsByTag("script").reverseIterator.foreach(bod.prependChild(_))
// Only get css from head if it links to a file
head.getElementsByTag("link").foreach(bod.prependChild(_))
scriptToBase(bod, "href", "link", "text/css")
scriptToBase(bod, "src", "script", "text/javascript")
bod.html()
}
}
| tinkoff-dwh/zeppelin | r/src/main/scala/org/apache/zeppelin/rinterpreter/RInterpreter.scala | Scala | apache-2.0 | 6,722 |
package epic.parser.models
/*
Copyright 2012 David Hall
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import epic.framework._
import epic.parser._
import breeze.linalg._
import breeze.optimize._
import epic.trees.{ProcessedTreebank, AnnotatedLabel, TreeInstance}
import breeze.config.{CommandLineParser, Help}
import breeze.util.SerializableLogging
import epic.parser.projections.{GrammarRefinements, OracleParser, ParserChartConstraintsFactory}
import epic.util.CacheBroker
import epic.parser.ParserParams.XbarGrammar
import breeze.util._
import epic.trees.annotations._
import java.io.File
import epic.constraints.{ChartConstraints, CachedChartConstraintsFactory}
import breeze.util.Implicits._
import breeze.optimize.FirstOrderMinimizer.OptParams
import epic.parser.ParseEval.Statistics
import epic.features.LongestFrequentSuffixFeaturizer.LongestFrequentSuffix
import epic.features.LongestFrequentSuffixFeaturizer
import epic.util.Optional
import epic.dense.AdadeltaGradientDescentDVD
/**
* The main entry point for training discriminative parsers.
* Has a main method inherited from ParserPipeline.
* Use --help to see options, or just look at the Params class.
*
*
*/
object ParserTrainer extends epic.parser.ParserPipeline with SerializableLogging {
case class Params(@Help(text="What parser to build. LatentModelFactory,StructModelFactory,LexModelFactory,SpanModelFactory")
modelFactory: ParserExtractableModelFactory[AnnotatedLabel, String],
@Help(text="Name for the parser for saving and logging. will be inferrred if not provided.")
name: String = null,
implicit val cache: CacheBroker,
@Help(text="path for a baseline parser for computing constraints. will be built automatically if not provided.")
parser: File = null,
opt: OptParams,
@Help(text="Use Adadelta instead of Adagrad (hardcoded in here...)")
useAdadelta: Boolean = false,
@Help(text="Make training batches deterministic; useful for debugging / regression testing")
determinizeTraining: Boolean = false,
@Help(text="How often to run on the dev set.")
iterationsPerEval: Int = 100,
@Help(text="How many iterations to run.")
maxIterations: Int = 1002,
@Help(text="How often to look at a small set of the dev set.")
iterPerValidate: Int = 30,
@Help(text="How many threads to use, default is to use whatever Scala thinks is best.")
threads: Int = -1,
@Help(text="Should we randomize weights? Some models will force randomization.")
randomize: Boolean = false,
@Help(text="Should we enforce reachability? Can be useful if we're pruning the gold tree.")
enforceReachability: Boolean = true,
@Help(text="Whether or not we use constraints. Not using constraints is very slow.")
useConstraints: Boolean = true,
@Help(text="Should we check the gradient to make sure it's coded correctly?")
checkGradient: Boolean = false,
@Help(text="check specific indices, in addition to doing a full search.")
checkGradientsAt: String = null,
@Help(text="Max parse length")
maxParseLength: Int = 70,
@Help(text="Compute log likelihood on the training set")
computeTrainLL: Boolean = true,
annotator: TreeAnnotator[AnnotatedLabel, String, AnnotatedLabel] = GenerativeParser.defaultAnnotator())
protected val paramManifest = manifest[Params]
def trainParser( trainTrees: IndexedSeq[TreeInstance[AnnotatedLabel, String]],
validate: (Parser[AnnotatedLabel, String]) => Statistics, params: Params) = {
import params._
// if (threads >= 1)
// collection.parallel.ForkJoinTasks.defaultForkJoinPool.setParallelism(params.threads)
val initialParser = params.parser match {
case null =>
val (grammar, lexicon) = XbarGrammar().xbarGrammar(trainTrees)
GenerativeParser.annotatedParser(grammar, lexicon, annotator, trainTrees)
// GenerativeParser.annotatedParser(grammar, lexicon, Xbarize(), trainTrees)
case f =>
readObject[Parser[AnnotatedLabel, String]](f)
}
val constraints = {
val maxMarginalized = initialParser.copy(marginalFactory=initialParser.marginalFactory match {
case StandardChartFactory(ref, mm) => StandardChartFactory(ref, maxMarginal = true)
case x => x
})
val uncached = new ParserChartConstraintsFactory[AnnotatedLabel, String](maxMarginalized, {(_:AnnotatedLabel).isIntermediate})
new CachedChartConstraintsFactory[AnnotatedLabel, String](uncached)
}
var theTrees = trainTrees.toIndexedSeq.filterNot(sentTooLong(_, params.maxParseLength))
if (useConstraints && enforceReachability) {
val treebankGrammar = GenerativeParser.annotated(initialParser.topology, initialParser.lexicon, TreeAnnotator.identity, trainTrees)
val markovizedGrammar = GenerativeParser.annotated(initialParser.topology, initialParser.lexicon, annotator, trainTrees)
val proj = new OracleParser(treebankGrammar, markovizedGrammar)
theTrees = theTrees.par.map(ti => ti.copy(tree=proj.forTree(ti.tree, ti.words, constraints.constraints(ti.words)))).seq.toIndexedSeq
}
val baseMeasure = if (useConstraints) {
constraints
} else {
ChartConstraints.Factory.noSparsity[AnnotatedLabel, String]
}
val model = modelFactory.make(theTrees, initialParser.topology, initialParser.lexicon, constraints)
val obj = new ModelObjective(model, theTrees, params.threads)
val cachedObj = new CachedBatchDiffFunction(obj)
val init = obj.initialWeightVector(randomize)
if (checkGradient) {
val cachedObj2 = new CachedBatchDiffFunction(new ModelObjective(model, theTrees.take(opt.batchSize), params.threads))
val indices = (0 until 10).map(i => if (i < 0) model.featureIndex.size + i else i)
println("testIndices: " + indices)
GradientTester.testIndices(cachedObj2, obj.initialWeightVector(randomize = true), indices, toString={(i: Int) => model.featureIndex.get(i).toString}, skipZeros = true)
println("test")
GradientTester.test(cachedObj2, obj.initialWeightVector(randomize = true), toString={(i: Int) => model.featureIndex.get(i).toString}, skipZeros = false)
}
type OptState = FirstOrderMinimizer[DenseVector[Double], BatchDiffFunction[DenseVector[Double]]]#State
def evalAndCache(pair: (OptState, Int)) {
val (state, iter) = pair
val weights = state.x
if (iter % iterPerValidate == 0) {
logger.info("Validating...")
val parser = model.extractParser(weights)
val stats = validate(parser)
logger.info("Overall statistics for validation: " + stats)
}
}
val name = Option(params.name).orElse(Option(model.getClass.getSimpleName).filter(_.nonEmpty)).getOrElse("DiscrimParser")
val itr: Iterator[FirstOrderMinimizer[DenseVector[Double], BatchDiffFunction[DenseVector[Double]]]#State] = if (determinizeTraining) {
val scanningBatchesObj = cachedObj.withScanningBatches(params.opt.batchSize)
if (useAdadelta) {
println("OPTIMIZATION: Adadelta")
new AdadeltaGradientDescentDVD(params.opt.maxIterations).iterations(scanningBatchesObj, init).
asInstanceOf[Iterator[FirstOrderMinimizer[DenseVector[Double], BatchDiffFunction[DenseVector[Double]]]#State]]
} else {
println("OPTIMIZATION: Adagrad")
params.opt.iterations(scanningBatchesObj, init).asInstanceOf[Iterator[FirstOrderMinimizer[DenseVector[Double], BatchDiffFunction[DenseVector[Double]]]#State]]
}
} else {
if (useAdadelta) {
println("OPTIMIZATION: Adadelta")
new AdadeltaGradientDescentDVD(params.opt.maxIterations).iterations(cachedObj.withRandomBatches(params.opt.batchSize), init).
asInstanceOf[Iterator[FirstOrderMinimizer[DenseVector[Double], BatchDiffFunction[DenseVector[Double]]]#State]]
} else {
println("OPTIMIZATION: Adagrad")
params.opt.iterations(cachedObj, init)
}
}
for ((state, iter) <- itr.take(maxIterations).zipWithIndex.tee(evalAndCache _)
if iter != 0 && iter % iterationsPerEval == 0 || evaluateNow) yield try {
val parser = model.extractParser(state.x)
if (iter + iterationsPerEval >= maxIterations && computeTrainLL) {
computeLL(trainTrees, model, state.x)
}
(s"$name-$iter", parser)
} catch {
case e: Exception => e.printStackTrace(); throw e
}
}
def sentTooLong(p: TreeInstance[AnnotatedLabel, String], maxLength: Int): Boolean = {
p.words.count(x => x == "'s" || x(0).isLetterOrDigit) > maxLength
}
def evaluateNow = {
val sentinel = new File("EVALUATE_NOW")
if (sentinel.exists()) {
sentinel.delete()
logger.info("Evaluating now!!!!")
true
} else {
false
}
}
def computeLL(trainTrees: IndexedSeq[TreeInstance[AnnotatedLabel, String]], model: Model[TreeInstance[AnnotatedLabel, String]], weights: DenseVector[Double]) {
println("Computing final log likelihood on the whole training set...")
val inf = model.inferenceFromWeights(weights).forTesting
val ll = trainTrees.par.aggregate(0.0)((currLL, trainTree) => {
try {
val s = inf.scorer(trainTree)
currLL + inf.goldMarginal(s, trainTree).logPartition - inf.marginal(s, trainTree).logPartition
} catch {
case e: Exception => println("Couldn't parse")
currLL
}
}, _ + _)
println("Log likelihood on " + trainTrees.size + " examples: " + ll)
}
}
object Suffixes extends SerializableLogging {
def main(args: Array[String]):Unit = {
val tb = CommandLineParser.readIn[ProcessedTreebank](args)
val counts = GenerativeParser.extractCounts(tb.trainTrees)._1
val marginalized: Counter[String, Double] = sum(counts(::, *))
val lfs = LongestFrequentSuffixFeaturizer(marginalized)
for(ti <- tb.trainTrees) {
val suffixes = lfs.lookupSentence(ti.words)
println("original: " + ti.words.mkString(" "))
println("suffixes: " + suffixes.mkString(" "))
}
}
}
| langkilde/epic | src/main/scala/epic/parser/models/ParserTrainer.scala | Scala | apache-2.0 | 11,079 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.