code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
package com.whisk.docker import com.github.dockerjava.core.DockerClientConfig import org.slf4j.LoggerFactory import scala.concurrent.duration._ import scala.concurrent.{Await, ExecutionContext, Future} trait DockerKit { implicit val docker: Docker = new Docker(DockerClientConfig.createDefaultConfigBuilder().build()) private lazy val log = LoggerFactory.getLogger(this.getClass) val PullImagesTimeout = 20.minutes val StartContainersTimeout = 20.seconds val StopContainersTimeout = 10.seconds // we need ExecutionContext in order to run docker.init() / docker.stop() there implicit def dockerExecutionContext: ExecutionContext = ExecutionContext.global def dockerContainers: List[DockerContainer] = Nil def listImages(): Future[Set[String]] = { import scala.collection.JavaConverters._ Future(docker.client.listImagesCmd().exec().asScala.flatMap(_.getRepoTags).toSet) } def stopRmAll(): Future[Seq[DockerContainer]] = Future.traverse(dockerContainers)(_.remove(force = true)) def pullImages(): Future[Seq[DockerContainer]] = { listImages().flatMap { images => val containersToPull = dockerContainers.filterNot { c => val cImage = if (c.image.contains(":")) c.image else c.image + ":latest" images(cImage) } Future.traverse(containersToPull)(_.pull()) } } def initReadyAll(): Future[Seq[(DockerContainer, Boolean)]] = Future.traverse(dockerContainers)(_.init()).flatMap(Future.traverse(_)(c => c.isReady().map(c -> _).recover { case e => log.error(e.getMessage, e) c -> false })) def startAllOrFail(): Unit = { Await.result(pullImages(), PullImagesTimeout) val allRunning: Boolean = try { val future: Future[Boolean] = initReadyAll().map(_.map(_._2).forall(identity)) Await.result(future, StartContainersTimeout) } catch { case e: Exception => log.error("Exception during container initialization", e) false } if (!allRunning) { Await.ready(stopRmAll(), StopContainersTimeout) throw new RuntimeException("Cannot run all required containers") } } def stopAllQuietly(): Unit = { try { Await.ready(stopRmAll(), StopContainersTimeout) } catch { case e: Throwable => log.error(e.getMessage, e) } } }
AdAgility/docker-it-scala
core/src/main/scala/com/whisk/docker/DockerKit.scala
Scala
mit
2,332
/* * Copyright 2017 PayPal * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.squbs.unicomplex.dummycubesvc import akka.actor.{Actor, ActorLogging, ActorRef, Props} import akka.http.scaladsl.server.Route import akka.pattern.ask import org.squbs.lifecycle.{GracefulStop, GracefulStopHelper} import org.squbs.unicomplex.Timeouts._ import org.squbs.unicomplex.{Ping, Pong, RouteDefinition} class PingPongSvc extends RouteDefinition{ def route: Route = path("ping") { get { onSuccess((context.actorOf(Props(classOf[PingPongClient])) ? "ping").mapTo[String]) { case value => complete(value) } } } ~ path("pong") { get { onSuccess((context.actorOf(Props(classOf[PingPongClient])) ? "pong").mapTo[String]) { case value => complete(value) } } } } private class PingPongClient extends Actor with ActorLogging { private val pingPongActor = context.actorSelection("/user/DummyCubeSvc/PingPongPlayer") def ping(responder: ActorRef): Receive = { case Pong => responder ! Pong.toString } def pong(responder: ActorRef): Receive = { case Ping => responder ! Ping.toString } def receive: Receive = { case "ping" => pingPongActor ! Ping context.become(ping(sender())) case "pong" => pingPongActor ! Pong context.become(pong(sender())) } } class PingPongActor extends Actor with ActorLogging with GracefulStopHelper{ def receive = { case GracefulStop => defaultLeafActorStop case Ping => sender() ! Pong case Pong => sender() ! Ping } }
akara/squbs
squbs-unicomplex/src/test/scala/org/squbs/unicomplex/dummycubesvc/PingPongSvc.scala
Scala
apache-2.0
2,076
package seqd object Generator { object Id { private[this] val workerMask = 0x000000000001F000L private[this] val datacenterMask = 0x00000000003E0000L private[this] val timestampMask = 0xFFFFFFFFFFC00000L def unapply(l: Long): Option[(Long, Long, Long, Long)] = Some( (l & timestampMask) >> timestampLeftShift, (l & datacenterMask) >> datacenterIdShift, (l & workerMask) >> workerIdShift, l & sequenceMask) } case class State(lts: Long, seq: Long) { def next(timestamp: Long, tick: => Long): State = if (lts == timestamp) ((seq + 1) & sequenceMask) match { case 0 => @annotation.tailrec def wind(ts: Long): Long = if (ts > lts) ts else wind(tick) State(wind(timestamp), 0L) case s => State(timestamp, s) } else State(timestamp, 0L) } val defaultTwepoch = 1288834974657L // Tue, 21 Mar 2006 20:50:14.000 GMT private[this] val workerIdBits = 5L private[this] val datacenterIdBits = 5L private[this] val maxWorkerId = -1L ^ (-1L << workerIdBits) private[this] val maxDatacenterId = -1L ^ (-1L << datacenterIdBits) private[this] val sequenceBits = 12L private[seqd] val workerIdShift = sequenceBits private[seqd] val datacenterIdShift = sequenceBits + workerIdBits private[seqd] val timestampLeftShift = sequenceBits + workerIdBits + datacenterIdBits private[seqd] val sequenceMask = -1L ^ (-1L << sequenceBits) def apply (twepoch: Long = defaultTwepoch, workerId: Long = 0, datacenterId: Long = 0, prev: State = State(-1, 0), clock: Clock = Clock.default): Either[String, Generator] = if (workerId > maxWorkerId || workerId < 0) Left(s"worker Id can't be greater than $maxWorkerId or less than 0") else if (datacenterId > maxDatacenterId || datacenterId < 0) Left(s"datacenter Id can't be greater than $maxDatacenterId or less than 0") else Right(new Generator(twepoch, workerId, datacenterId, prev, clock)) } class Generator private[seqd] (twepoch: Long, workerId: Long, datacenterId: Long, prev: Generator.State, clock: Clock) { import Generator._ private[this] var state = prev def next(): Either[String, Long] = synchronized { var timestamp = clock() if (timestamp < state.lts) Left(s"clock is moving backwards. Refusing to generate id for ${state.lts - timestamp} milliseconds") else { state = state.next(timestamp, clock()) Right( ((state.lts - twepoch) << timestampLeftShift) | (datacenterId << datacenterIdShift) | (workerId << workerIdShift) | state.seq) } } }
softprops/seqd
seqd-core/src/main/scala/Generator.scala
Scala
mit
2,723
/* * Copyright 2014–2020 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package slamdata import sbt._, Keys._ import bintray.{BintrayKeys, BintrayPlugin}, BintrayKeys._ import sbtghactions.GitHubActionsPlugin, GitHubActionsPlugin.autoImport._ import scala.{sys, Some} import scala.collection.immutable.Seq object SbtSlamDataPlugin extends SbtSlamDataBase { override def requires = super.requires && BintrayPlugin object autoImport extends autoImport { lazy val noPublishSettings = Seq( publish := {}, publishLocal := {}, bintrayRelease := {}, publishArtifact := false, skip in publish := true, bintrayEnsureBintrayPackageExists := {}) } import autoImport._ override def projectSettings = super.projectSettings ++ addCommandAlias("releaseSnapshot", "; project /; reload; checkLocalEvictions; bintrayEnsureBintrayPackageExists; publish; bintrayRelease") ++ Seq( sbtPlugin := true, bintrayOrganization := Some("slamdata-inc"), bintrayRepository := "sbt-plugins", bintrayReleaseOnPublish := false, publishMavenStyle := false, // it's annoying that sbt-bintray doesn't do this for us credentials ++= { if (githubIsWorkflowBuild.value) { val creds = for { user <- sys.env.get("BINTRAY_USER") pass <- sys.env.get("BINTRAY_PASS") } yield Credentials("Bintray API Realm", "api.bintray.com", user, pass) creds.toSeq } else { Seq() } }) override def buildSettings = super.buildSettings ++ Seq( secrets += file("credentials.yml.enc"), transferPublishAndTagResources := { transferToBaseDir("plugin", (ThisBuild / baseDirectory).value, "credentials.yml.enc") transferPublishAndTagResources.value }) protected val autoImporter = autoImport }
slamdata/sbt-slamdata
plugin/src/main/scala/slamdata/SbtSlamDataPlugin.scala
Scala
apache-2.0
2,421
package gui import javax.swing._ import java.awt.event.{InputEvent, ActionEvent, ActionListener, KeyEvent} /** Class for creating MenuBar **/ class MenuBar extends JMenuBar with ActionListener { /** All items in the file menu */ private val fileMenuItems: Array[String] = Array[String]("File", "Exit") /** All items in the file */ private val adbMenuItems: Array[String] = Array[String]("ADB", "Restart", "Show devices") /** All items in the help menu */ private val helpMenuItems: Array[String] = Array[String]("Help", "About") { /** File menu */ val fileMenu: JMenu = new JMenu(fileMenuItems(0)) this.add(fileMenu) /** Exit submenu */ val exitMenuItem: JMenuItem = new JMenuItem(fileMenuItems(1)) exitMenuItem.setMnemonic(KeyEvent.VK_E) exitMenuItem.setActionCommand(fileMenuItems(1)) exitMenuItem.addActionListener(this) /** Adb menu */ val adbMenu: JMenu = new JMenu(adbMenuItems(0)) this.add(adbMenu) /** Adb Restart submenu */ val restartAdbMenu: JMenuItem = new JMenuItem(adbMenuItems(1)) restartAdbMenu.setMnemonic(KeyEvent.VK_R) restartAdbMenu.setActionCommand(adbMenuItems(1)) restartAdbMenu.addActionListener(this) /** Adb Show Devices submenu */ val adbShowDevicesMenu: JMenuItem = new JMenuItem(adbMenuItems(2)) adbShowDevicesMenu.setMnemonic(KeyEvent.VK_S) adbShowDevicesMenu.setActionCommand(adbMenuItems(2)) adbShowDevicesMenu.addActionListener(this) /** Help Menu*/ val helpMenu: JMenu = new JMenu(helpMenuItems(0)) this.add(helpMenu) /** About submenu */ val aboutMenuItem: JMenuItem = new JMenuItem(helpMenuItems(1)) aboutMenuItem.setMnemonic(KeyEvent.VK_A) aboutMenuItem.setActionCommand(helpMenuItems(1)) aboutMenuItem.addActionListener(this) fileMenu.add(exitMenuItem) adbMenu.add(restartAdbMenu) adbMenu.add(adbShowDevicesMenu) helpMenu.add(aboutMenuItem) } /** Functions to be defined in implementing class */ def showAdb(): Unit = {} def restartAdb(): Unit = {} /** * When a menu item is clicked. * @param e Event */ def actionPerformed(e: ActionEvent) { if (e.getActionCommand == fileMenuItems(1)) { // Exit System.exit(0) } else if (e.getActionCommand == adbMenuItems(1)) { // Restart ADB restartAdb() } else if (e.getActionCommand == adbMenuItems(2)) { // Show ADB devices showAdb() } else if (e.getActionCommand == helpMenuItems(1)) { JOptionPane.showMessageDialog(this, "© Slide 2015 - Loren Kuich\\nhttp://www.slide-app.com", "About", JOptionPane.PLAIN_MESSAGE) } } }
muthai/slide-desktop
src/gui/MenuBar.scala
Scala
gpl-2.0
2,858
/*********************************************************************** * Copyright (c) 2013-2016 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. *************************************************************************/ package org.locationtech.geomesa.kafka import java.io.Closeable import java.{util => ju} import com.typesafe.scalalogging.LazyLogging import com.vividsolutions.jts.geom.Envelope import kafka.producer.{Producer, ProducerConfig} import org.geotools.data.store.{ContentEntry, ContentFeatureStore} import org.geotools.data.{FeatureReader, FeatureWriter, Query} import org.geotools.feature.FeatureCollection import org.geotools.feature.collection.BridgeIterator import org.geotools.geometry.jts.ReferencedEnvelope import org.locationtech.geomesa.features.ScalaSimpleFeature import org.locationtech.geomesa.kafka.KafkaDataStore.FeatureSourceFactory import org.locationtech.geomesa.utils.geotools._ import org.locationtech.geomesa.utils.text.ObjectPoolFactory import org.opengis.feature.simple.{SimpleFeature, SimpleFeatureType} import org.opengis.filter.identity.FeatureId import org.opengis.filter.{Filter, Id} import scala.collection.JavaConversions._ class KafkaProducerFeatureStore(entry: ContentEntry, sft: SimpleFeatureType, topic: String, broker: String, producer: Producer[Array[Byte], Array[Byte]], q: Query = null) extends ContentFeatureStore(entry, q) with Closeable with LazyLogging { private val writerPool = ObjectPoolFactory(new ModifyingFeatureWriter(query), 5) override def getBoundsInternal(query: Query) = ReferencedEnvelope.create(new Envelope(-180, 180, -90, 90), CRS_EPSG_4326) override def buildFeatureType(): SimpleFeatureType = sft override def addFeatures(featureCollection: FeatureCollection[SimpleFeatureType, SimpleFeature]): ju.List[FeatureId] = { writerPool.withResource { fw => val ret = Array.ofDim[FeatureId](featureCollection.size()) fw.setIter(new BridgeIterator[SimpleFeature](featureCollection.features())) var i = 0 while(fw.hasNext) { val sf = fw.next() ret(i) = sf.getIdentifier i+=1 fw.write() } ret.toList } } override def removeFeatures(filter: Filter): Unit = filter match { case Filter.INCLUDE => clearFeatures() case _ => super.removeFeatures(filter) } def clearFeatures(): Unit = { val msg = GeoMessage.clear() logger.debug("sending message: {}", msg) val encoder = new KafkaGeoMessageEncoder(sft) producer.send(encoder.encodeClearMessage(topic, msg)) } override def getWriterInternal(query: Query, flags: Int) = new ModifyingFeatureWriter(query) class ModifyingFeatureWriter(query: Query) extends FeatureWriter[SimpleFeatureType, SimpleFeature] with LazyLogging { val msgEncoder = new KafkaGeoMessageEncoder(sft) val reuse = new ScalaSimpleFeature("", sft) private var id = 1L def getNextId: String = { val ret = id id += 1 s"$ret" } var toModify: Iterator[SimpleFeature] = if(query == null) Iterator[SimpleFeature]() else if(query.getFilter == null) Iterator.continually { reuse.getIdentifier.setID(getNextId) reuse } else query.getFilter match { case ids: Id => ids.getIDs.map(id => new ScalaSimpleFeature(id.toString, sft)).iterator case Filter.INCLUDE => Iterator.continually(new ScalaSimpleFeature("", sft)) } def setIter(iter: Iterator[SimpleFeature]): Unit = { toModify = iter } var curFeature: SimpleFeature = null override def getFeatureType: SimpleFeatureType = sft override def next(): SimpleFeature = { curFeature = toModify.next() curFeature } override def remove(): Unit = { val msg = GeoMessage.delete(curFeature.getID) curFeature = null send(msg) } override def write(): Unit = { val msg = GeoMessage.createOrUpdate(curFeature) curFeature = null send(msg) } override def hasNext: Boolean = toModify.hasNext override def close(): Unit = {} private def send(msg: GeoMessage): Unit = { logger.debug("sending message: {}", msg) producer.send(msgEncoder.encodeMessage(topic, msg)) } } override def getCountInternal(query: Query): Int = 0 override def getReaderInternal(query: Query): FeatureReader[SimpleFeatureType, SimpleFeature] = null override def close(): Unit = producer.close() } object KafkaProducerFeatureStoreFactory { def apply(broker: String): FeatureSourceFactory = { val config = { val props = new ju.Properties() props.put("metadata.broker.list", broker) props.put("serializer.class", "kafka.serializer.DefaultEncoder") new ProducerConfig(props) } (entry: ContentEntry, schemaManager: KafkaDataStoreSchemaManager) => { val fc = schemaManager.getFeatureConfig(entry.getTypeName) val kafkaProducer = new Producer[Array[Byte], Array[Byte]](config) new KafkaProducerFeatureStore(entry, fc.sft, fc.topic, broker, kafkaProducer) } } }
mdzimmerman/geomesa
geomesa-kafka/geomesa-kafka-datastore/src/main/scala/org/locationtech/geomesa/kafka/KafkaProducerFeatureStore.scala
Scala
apache-2.0
5,519
// package benchmarks // package cec // package cec2013 // package niching // import benchmarks.cec.Helper // import benchmarks.dimension._ // import benchmarks.matrix._ // import shapeless._ // import shapeless.ops.nat.ToInt // trait F8Params[N <: Nat] { val params: Dimension[N, Int] } // trait F9Params[N <: Nat, A] { val params: (Dimension6[Dimension[N, A]], A) } // trait F10Params[N <: Nat, A] { val params: (Dimension8[Dimension[N, A]], A) } // trait F11Params[N <: Nat, A] { // val params: (Dimension6[Dimension[N, A]], Dimension6[Matrix[N, N, A]], A) // } // trait F12Params[N <: Nat, A] { // val params: (Dimension8[Dimension[N, A]], Dimension8[Matrix[N, N, A]], A) // } // sealed trait CEC2013Sized[N <: Nat] // trait Params { // implicit object cec2013Sized2 extends CEC2013Sized[nat._2] // implicit object cec2013Sized3 extends CEC2013Sized[nat._3] // implicit object cec2013Sized5 extends CEC2013Sized[nat._5] // implicit object cec2013Sized10 extends CEC2013Sized[nat._10] // implicit object cec2013Sized20 extends CEC2013Sized[nat._20] // implicit val f8Params2 = new F8Params[nat._2] { // val params = Sized(3, 4) // } // implicit val f8Params16 = new F8Params[nat._16] { // val params = Sized(1, 1, 1, 2, 1, 1, 1, 2, 1, 1, 1, 3, 1, 1, 1, 4) // } // implicit def cec2013NichingF9Params[N <: Nat: ToInt: CEC2013Sized] = new F9Params[N, Double] { // val helper = Helper("cec2013/niching") // val dim = implicitly[ToInt[N]].apply // val params = ( // Sized.wrap(helper.shiftsFromResource[N](s"CF1_M_D${dim}_opt.dat").toVector), // -2500.0 // ) // } // implicit def cec2013NichingF10Params[N <: Nat: ToInt: CEC2013Sized] = new F10Params[N, Double] { // val helper = Helper("cec2013/niching") // val dim = implicitly[ToInt[N]].apply // val params = ( // Sized.wrap(helper.shiftsFromResource[N](s"CF2_M_D${dim}_opt.dat").toVector), // -2500.0 // ) // } // implicit def cec2013NichingF11Params[N <: Nat: GTEq1: CEC2013Sized: ToInt] = new F11Params[N, Double] { // val helper = Helper("cec2013/niching") // val dim = implicitly[ToInt[N]].apply // val params = ( // Sized.wrap(helper.shiftsFromResource[N](s"CF3_M_D${dim}_opt.dat").toVector), // helper.matricesFromResource[nat._6, N](s"CF3_M_D${dim}.dat"), // -4000.0 // ) // } // implicit def cec2013NichingF12Params[N <: Nat: GTEq1: CEC2013Sized: ToInt] = new F12Params[N, Double] { // val helper = Helper("cec2013/niching") // val dim = implicitly[ToInt[N]].apply // val params = ( // Sized.wrap(helper.shiftsFromResource[N](s"CF4_M_D${dim}_opt.dat").toVector), // helper.matricesFromResource[nat._8, N](s"CF4_M_D${dim}.dat"), // -4000.0 // ) // } // }
cirg-up/benchmarks
src/main/scala/benchmarks/cec/cec2013/niching/Params.scala
Scala
apache-2.0
2,830
package is.hail.methods import breeze.linalg.{*, DenseMatrix, DenseVector} import is.hail.HailContext import is.hail.annotations._ import is.hail.expr.ir.functions.MatrixToTableFunction import is.hail.expr.ir.{ExecuteContext, MatrixValue, TableValue} import is.hail.types._ import is.hail.types.physical.{PCanonicalStruct, PStruct, PType} import is.hail.types.virtual._ import is.hail.rvd.{RVD, RVDContext, RVDType} import is.hail.sparkextras.ContextRDD import is.hail.utils._ import org.apache.spark.mllib.linalg.Vectors import org.apache.spark.mllib.linalg.distributed.{IndexedRow, IndexedRowMatrix} import org.apache.spark.sql.Row case class PCA(entryField: String, k: Int, computeLoadings: Boolean) extends MatrixToTableFunction { override def typ(childType: MatrixType): TableType = { TableType( childType.rowKeyStruct ++ TStruct("loadings" -> TArray(TFloat64)), childType.rowKey, TStruct("eigenvalues" -> TArray(TFloat64), "scores" -> TArray(childType.colKeyStruct ++ TStruct("scores" -> TArray(TFloat64))))) } def preservesPartitionCounts: Boolean = false def execute(ctx: ExecuteContext, mv: MatrixValue): TableValue = { if (k < 1) fatal(s"""requested invalid number of components: $k | Expect componenents >= 1""".stripMargin) val rowMatrix = mv.toRowMatrix(entryField) val indexedRows = rowMatrix.rows.map { case (i, a) => IndexedRow(i, Vectors.dense(a)) } .cache() val irm = new IndexedRowMatrix(indexedRows, rowMatrix.nRows, rowMatrix.nCols) info(s"pca: running PCA with $k components...") val svd = irm.computeSVD(k, computeLoadings) if (svd.s.size < k) fatal( s"Found only ${ svd.s.size } non-zero (or nearly zero) eigenvalues, " + s"but user requested ${ k } principal components.") def collectRowKeys(): Array[Annotation] = { val rowKeyIdx = mv.typ.rowKeyFieldIdx val rowKeyTypes = mv.typ.rowKeyStruct.types mv.rvd.toUnsafeRows.map[Any] { r => Row.fromSeq(rowKeyIdx.map(i => Annotation.copy(rowKeyTypes(i), r(i)))) } .collect() } val rowType = PCanonicalStruct.canonical(TStruct(mv.typ.rowKey.zip(mv.typ.rowKeyStruct.types): _*) ++ TStruct("loadings" -> TArray(TFloat64))) .setRequired(true) .asInstanceOf[PStruct] val rowKeysBc = HailContext.backend.broadcast(collectRowKeys()) val localRowKeySignature = mv.typ.rowKeyStruct.types val crdd: ContextRDD[Long] = if (computeLoadings) { ContextRDD.weaken(svd.U.rows).cmapPartitions { (ctx, it) => val rvb = ctx.rvb it.map { ir => rvb.start(rowType) rvb.startStruct() val rowKeys = rowKeysBc.value(ir.index.toInt).asInstanceOf[Row] var j = 0 while (j < localRowKeySignature.length) { rvb.addAnnotation(localRowKeySignature(j), rowKeys.get(j)) j += 1 } rvb.startArray(k) var i = 0 while (i < k) { rvb.addDouble(ir.vector(i)) i += 1 } rvb.endArray() rvb.endStruct() rvb.end() } } } else ContextRDD.empty() val rvd = RVD.coerce(ctx, RVDType(rowType, mv.typ.rowKey), crdd) val (t1, f1) = mv.typ.globalType.insert(TArray(TFloat64), "eigenvalues") val (globalScoreType, f3) = mv.typ.colKeyStruct.insert(TArray(TFloat64), "scores") val (newGlobalType, f2) = t1.insert(TArray(globalScoreType), "scores") val data = if (!svd.V.isTransposed) svd.V.asInstanceOf[org.apache.spark.mllib.linalg.DenseMatrix].values else svd.V.toArray val V = new DenseMatrix[Double](svd.V.numRows, svd.V.numCols, data) val S = DenseVector(svd.s.toArray) val eigenvalues = svd.s.toArray.map(math.pow(_, 2)) val scaledEigenvectors = V(*, ::) *:* S val scores = (0 until mv.nCols).iterator.map { i => (0 until k).iterator.map { j => scaledEigenvectors(i, j) }.toFastIndexedSeq }.toFastIndexedSeq val g1 = f1(mv.globals.value, eigenvalues.toFastIndexedSeq) val globalScores = mv.colValues.safeJavaValue.zipWithIndex.map { case (cv, i) => f3(mv.typ.extractColKey(cv.asInstanceOf[Row]), scores(i)) } val newGlobal = f2(g1, globalScores) TableValue(ctx, TableType(rowType.virtualType, mv.typ.rowKey, newGlobalType.asInstanceOf[TStruct]), BroadcastRow(ctx, newGlobal.asInstanceOf[Row], newGlobalType.asInstanceOf[TStruct]), rvd) } }
cseed/hail
hail/src/main/scala/is/hail/methods/PCA.scala
Scala
mit
4,513
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest.tagobjects import org.scalatest.Tag /** * Tag object that indicates a test is disk-intensive (<em>i.e.</em>, consumes a lot of disk-IO bandwidth when it runs). * * <p> * The corresponding tag annotation for this tag object is <code>org.scalatest.tags.Disk</code>. * This tag object can be used to tag test functions (in style traits other than <code>Spec</code>, in which tests are methods not functions) as being disk-intensive. * See the "tagging tests" section in the documentation for your chosen styles to see the syntax. Here's an example for <code>FlatSpec</code>: * </p> * * <pre class="stHighlight"> * package org.scalatest.examples.tagobjects.disk * * import org.scalatest._ * import tagobjects.Disk * * class SetSpec extends FlatSpec { * * "An empty Set" should "have size 0" taggedAs(Disk) in { * assert(Set.empty.size === 0) * } * } * </pre> */ object Disk extends Tag("org.scalatest.tags.Disk")
travisbrown/scalatest
src/main/scala/org/scalatest/tagobjects/Disk.scala
Scala
apache-2.0
1,568
package imperial package mocks import collection.{mutable => mut} import imperial.measures.Histogram import imperial.health.{HealthCheck, HealthCheckable} class MockArmoury extends RootArmoury { val metricMap: mut.Map[String, Any] = mut.Map.empty def getOrFetch[T](k: String)(v: T): T = metricMap.getOrElseUpdate(k, v).asInstanceOf[T] def describe: String = { metricMap.map{ case (k,v) => s"$k = $v}"}.mkString("\\n") } override def gauge[A](name: String)(f: => A): MockGauge[A] = getOrFetch(name){ new MockGauge(f) } override def meter(name: String) : MockMeter = getOrFetch(name){ new MockMeter } override def timer(name: String) : MockTimer = getOrFetch(name){ new MockTimer } override def counter(name: String) : MockCounter = getOrFetch(name){ new MockCounter } override def histogram(name: String): Histogram = ??? def healthCheck[T] (name: String, unhealthyMessage: String = "Health check failed") (payload: => T) (implicit checkable: HealthCheckable[T]) : HealthCheck = ??? }
thecoda/scala-imperial
src/test/scala/imperial/mocks/MockArmoury.scala
Scala
apache-2.0
1,077
package org.monarchinitiative.dosdp.cli import java.io.File import caseapp._ import caseapp.core.Error.MalformedValue import caseapp.core.argparser.{ArgParser, SimpleArgParser} import com.github.tototoshi.csv.{CSVFormat, DefaultCSVFormat, TSVFormat} import io.circe.generic.auto._ import io.circe.yaml.parser import org.monarchinitiative.dosdp.cli.Config.{AllAxioms, AxiomKind, BoolValue, FalseValue, LogicalAxioms, MultiArgList, inputDOSDPFrom} import org.monarchinitiative.dosdp.{DOSDP, OBOPrefixes, Utilities} import org.semanticweb.owlapi.model.OWLOntology import zio._ import zio.blocking.Blocking import scala.io.Source @AppName("dosdp-tools") @ProgName("dosdp-tools") sealed trait Config { def run: ZIO[ZEnv, DOSDPError, Unit] } final case class CommonOptions( @HelpMessage("OWL ontology (provide labels, query axioms)") @ValueDescription("file or URI") ontology: Option[String], @HelpMessage("A catalog XML file to use for resolving mapping ontology IRIs to URLs") @ValueDescription("file") @Name("catalog") catalog: Option[String], @HelpMessage("DOSDP file (YAML). If a local file is not found at the given path, the path will be attempted as a URL.") @ValueDescription("file") template: String, @HelpMessage("CURIE prefixes (YAML)") @ValueDescription("file") prefixes: Option[String], @HelpMessage("Assume prefixes are OBO ontologies; predefine rdf, rdfs, owl, dc, dct, skos, obo, and oio") @ValueDescription("true|false") oboPrefixes: BoolValue = FalseValue, @HelpMessage("Output file (OWL or TSV)") @ValueDescription("file") outfile: String = "dosdp.out", @HelpMessage("Tabular format: TSV (default) or CSV") @ValueDescription("tsv|csv") tableFormat: String = "tsv", @HelpMessage("List of patterns (without file extension) to process in batch (space separated, enclose list in quotes)") @ValueDescription("names") batchPatterns: MultiArgList = MultiArgList(Nil) ) { def inputDOSDP: IO[DOSDPError, DOSDP] = inputDOSDPFrom(template) def prefixesMap: ZIO[Any, DOSDPError, PartialFunction[String, String]] = { val possiblePrefixMap = prefixes.map { prefixesPath => val prefixesFile = new File(prefixesPath) for { prefixesText <- ZIO.effect(Source.fromFile(prefixesFile, "UTF-8")).bracketAuto(s => ZIO.effect(s.mkString)) .mapError(e => DOSDPError(s"Could not read prefixes file at $prefixesPath", e)) prefixesJson <- ZIO.fromEither(parser.parse(prefixesText)) .mapError(e => DOSDPError(s"Invalid JSON format for prefixes file at $prefixesPath", e)) prefixMap <- ZIO.fromEither(prefixesJson.as[Map[String, String]]) .mapError(e => DOSDPError(s"JSON for prefixes file at $prefixesPath should be a simple map of strings", e)) } yield prefixMap } for { prefixMapOpt <- ZIO.foreach(possiblePrefixMap)(identity) specifiedPrefixes = prefixMapOpt.getOrElse(Map.empty) } yield if (oboPrefixes.bool) specifiedPrefixes.orElse(OBOPrefixes) else specifiedPrefixes } def ontologyOpt: ZIO[Blocking, DOSDPError, Option[OWLOntology]] = ZIO.foreach(ontology)(ontPath => Utilities.loadOntology(ontPath, catalog)) } @CommandName("terms") @HelpMessage("dump terms referenced in TSV input and a Dead Simple OWL Design Pattern") final case class TermsConfig(@Recurse common: CommonOptions, @HelpMessage("Input file (TSV or CSV)") @ValueDescription("file") infile: String = "fillers.tsv") extends Config { override def run: ZIO[zio.ZEnv, DOSDPError, Unit] = Terms.run(this) } @CommandName("generate") @HelpMessage("generate ontology axioms for TSV input to a Dead Simple OWL Design Pattern") final case class GenerateConfig(@Recurse common: CommonOptions, @HelpMessage("Input file (TSV or CSV)") @ValueDescription("file") infile: String = "fillers.tsv", @HelpMessage("Restrict generated axioms to 'logical', 'annotation', or 'all' (default)") @ValueDescription("all|logical|annotation") restrictAxiomsTo: AxiomKind = AllAxioms, @HelpMessage("Data column containing local axiom output restrictions") @ValueDescription("name") restrictAxiomsColumn: Option[String], @HelpMessage("Compute defined class IRI from pattern IRI and variable fillers") @ValueDescription("true|false") generateDefinedClass: BoolValue = FalseValue, @HelpMessage("Add axiom annotation to generated axioms linking to pattern IRI") @ValueDescription("true|false") addAxiomSourceAnnotation: BoolValue = FalseValue, @HelpMessage("IRI for annotation property to use to link generated axioms to pattern IRI") @ValueDescription("IRI") axiomSourceAnnotationProperty: String = "http://www.geneontology.org/formats/oboInOwl#source" ) extends Config { override def run: ZIO[zio.ZEnv, DOSDPError, Unit] = Generate.run(this) } @CommandName("prototype") @HelpMessage("output \\"prototype\\" axioms using default fillers for a pattern or folder of patterns") final case class PrototypeConfig(@Recurse common: CommonOptions) extends Config { override def run: ZIO[zio.ZEnv, DOSDPError, Unit] = Prototype.run(this) } @CommandName("query") @HelpMessage("query an ontology for terms matching a Dead Simple OWL Design Pattern") final case class QueryConfig(@Recurse common: CommonOptions, @HelpMessage("Reasoner to use for expanding variable constraints. Valid options are ELK, HermiT, or JFact.") @ValueDescription("elk|hermit|jfact") reasoner: Option[String], @HelpMessage("Print generated query without running against ontology") @ValueDescription("true|false") printQuery: BoolValue = FalseValue, @HelpMessage("Restrict queried axioms to 'logical', 'annotation', or 'all' (default)") @ValueDescription("all|logical|annotation") restrictAxiomsTo: AxiomKind = LogicalAxioms ) extends Config { override def run: ZIO[zio.ZEnv, DOSDPError, Unit] = Query.run(this) } object Config { def tabularFormat(arg: String): Either[DOSDPError, CSVFormat] = arg.toLowerCase match { case "csv" => Right(new DefaultCSVFormat {}) case "tsv" => Right(new TSVFormat {}) case other => Left(DOSDPError(s"Invalid tabular format requested: $other")) } def inputDOSDPFrom(location: String): IO[DOSDPError, DOSDP] = for { file <- ZIO.effectTotal(new File(location)) fileExists <- ZIO.effect(file.exists).mapError(e => DOSDPError(s"Could not read pattern file at $location", e)) sourceZ = if (fileExists) ZIO.effect(Source.fromFile(file, "UTF-8")) else ZIO.effect(Source.fromURL(location, "UTF-8")) dosdpText <- sourceZ.bracketAuto(s => ZIO.effect(s.mkString)).mapError(e => DOSDPError(s"Could not read pattern file at $location", e)) json <- ZIO.fromEither(parser.parse(dosdpText)).mapError(e => DOSDPError(s"Invalid JSON format for pattern file at $location", e)) dosdp <- ZIO.fromEither(json.as[DOSDP]).mapError(e => DOSDPError(s"JSON does not conform to DOS-DP schema for pattern file at $location", e)) } yield dosdp /** * This works around some confusing behavior in case-app boolean parsing */ sealed trait BoolValue { def bool: Boolean } case object TrueValue extends BoolValue { def bool = true } case object FalseValue extends BoolValue { def bool = false } implicit val boolArgParser: ArgParser[BoolValue] = SimpleArgParser.from[BoolValue]("boolean value") { arg => arg.toLowerCase match { case "true" => Right(TrueValue) case "false" => Right(FalseValue) case "1" => Right(TrueValue) case "0" => Right(FalseValue) case _ => Left(MalformedValue("boolean value", arg)) } } final case class MultiArgList(items: List[String]) implicit val listArgParser: ArgParser[MultiArgList] = SimpleArgParser.from[MultiArgList]("multiple values") { arg => val trimmed = arg.trim if (trimmed.isEmpty) Left(MalformedValue("empty list input", arg)) else Right(MultiArgList(arg.split(" ", -1).toList)) } sealed trait AxiomKind case object LogicalAxioms extends AxiomKind case object AnnotationAxioms extends AxiomKind case object AllAxioms extends AxiomKind implicit val axiomKindArgParser: ArgParser[AxiomKind] = SimpleArgParser.from[AxiomKind]("axiom kind")(parseAxiomKind) def parseAxiomKind(arg: String): Either[MalformedValue, AxiomKind] = { arg.toLowerCase match { case "all" => Right(AllAxioms) case "logical" => Right(LogicalAxioms) case "annotation" => Right(AnnotationAxioms) case _ => Left(MalformedValue("Not a valid axiom type", arg)) } } } final case class DOSDPError(msg: String, cause: Throwable) extends Exception(msg, cause) object DOSDPError { def apply(msg: String): DOSDPError = DOSDPError(msg, new Exception(msg)) }
balhoff/dosdp-scala
src/main/scala/org/monarchinitiative/dosdp/cli/Config.scala
Scala
mit
10,589
// NOTE: commented out in order to avoid scope pollution for typecheckError // package scala.meta.tests // package api import org.scalatest._ import org.scalameta.tests._ class PublicSuite extends FunSuite { test("quasiquotes without import") { assert(typecheckError(""" q"hello" """) === "value q is not a member of StringContext") } test("quasiquotes without static dialect") { val currentDialect = scala.meta.Dialect.current assert(typecheckError(""" import scala.meta._ implicit val dialect: scala.meta.Dialect = ??? q"hello" """) === s"dialect does not have precise enough type to be used in quasiquotes (to fix this, import something from scala.dialects, e.g. scala.meta.dialects.${currentDialect.name})") } test("quasiquotes when everything's correct") { assert(typecheckError(""" import scala.meta._ import scala.meta.dialects.Scala211 q"hello" """) === "") } test("InputLike.parse without import") { assert(typecheckError(""" "".parse[scala.meta.Term] """) === "value parse is not a member of String") } test("InputLike.parse without input-likeness") { assert(typecheckError(""" import scala.meta._ 1.parse[Term] """) === "don't know how to convert Int to scala.meta.inputs.Input") } test("InputLike.parse without parseability") { assert(typecheckError(""" import scala.meta._ import scala.meta.dialects.Scala211 "".parse[Int] """) === "don't know how to parse into Int") } test("InputLike.parse when everything's correct (static dialect)") { assert(typecheckError(""" import scala.meta._ import scala.meta.dialects.Scala211 "".parse[Term] """) === "") } test("InputLike.parse when everything's correct (dynamic dialect)") { assert(typecheckError(""" import scala.meta._ implicit val dialect: scala.meta.Dialect = ??? "".parse[Term] """) === "") } test("InputLike.parse with various input types") { assert(typecheckError(""" import scala.meta._ import scala.meta.dialects.Scala211 (??? : Input).parse[Term] (??? : String).parse[Term] (??? : java.io.File).parse[Term] (??? : Tokens).parse[Term] (??? : Array[Char]).parse[Term] """) === "") } // NOTE: this works because implicit scope for Scala211 includes meta.`package` test("Dialect.parse without import") { assert(typecheckError(""" scala.meta.dialects.Scala211("").parse[scala.meta.Term] """) === "") } test("Dialect.parse without input-likeness") { assert(typecheckError(""" scala.meta.dialects.Scala211(1).parse[scala.meta.Term] """) === "don't know how to convert Int to scala.meta.inputs.Input") } test("Dialect.parse without parseability") { assert(typecheckError(""" scala.meta.dialects.Scala211("").parse[Int] """) === "don't know how to parse into Int") } test("Dialect.parse with various input types") { assert(typecheckError(""" scala.meta.dialects.Scala211(??? : scala.meta.Input).parse[scala.meta.Term] scala.meta.dialects.Scala211(??? : String).parse[scala.meta.Term] scala.meta.dialects.Scala211(??? : java.io.File).parse[scala.meta.Term] scala.meta.dialects.Scala211(??? : scala.meta.Tokens).parse[scala.meta.Term] scala.meta.dialects.Scala211(??? : Array[Char]).parse[scala.meta.Term] """) === "") } test("tokenize without import") { assert(typecheckError(""" "".tokenize """) === "value tokenize is not a member of String") } test("tokenize without input-likeness") { assert(typecheckError(""" import scala.meta._ 1.tokenize """) === "don't know how to convert Int to scala.meta.inputs.Input") } test("tokenize when everything's correct (static dialect)") { assert(typecheckError(""" import scala.meta._ import scala.meta.dialects.Scala211 "".tokenize """) === "") } test("tokenize when everything's correct (dynamic dialect)") { assert(typecheckError(""" import scala.meta._ implicit val dialect: scala.meta.Dialect = ??? "".tokenize """) === "") } test("tokenize with various input types") { assert(typecheckError(""" import scala.meta._ import scala.meta.dialects.Scala211 (??? : Input).tokenize (??? : String).tokenize (??? : java.io.File).tokenize (??? : Tokens).tokenize (??? : Array[Char]).tokenize """) === "") } // NOTE: this works because implicit scope for Scala211 includes meta.`package` test("Dialect.tokenize without import") { assert(typecheckError(""" scala.meta.dialects.Scala211("").tokenize """) === "") } test("Dialect.tokenize without input-likeness") { assert(typecheckError(""" scala.meta.dialects.Scala211(1).tokenize """) === "don't know how to convert Int to scala.meta.inputs.Input") } test("Dialect.tokenize when everything's correct") { assert(typecheckError(""" scala.meta.dialects.Scala211("").tokenize """) === "") } test("Dialect.tokenize with various input types") { assert(typecheckError(""" scala.meta.dialects.Scala211(??? : scala.meta.Input).tokenize scala.meta.dialects.Scala211(??? : String).tokenize scala.meta.dialects.Scala211(??? : java.io.File).tokenize scala.meta.dialects.Scala211(??? : scala.meta.Tokens).tokenize scala.meta.dialects.Scala211(??? : Array[Char]).tokenize """) === "") } test("show[Syntax] without import") { assert(typecheckError(""" (??? : scala.meta.Tree).show[Syntax] """) === "not found: type Syntax") } test("show[Syntax] when everything's correct (static dialect)") { assert(typecheckError(""" import scala.meta._ import scala.meta.dialects.Scala211 (??? : Tree).show[Syntax] (??? : Tree).syntax """) === "") } test("show[Syntax] when everything's correct (dynamic dialect)") { assert(typecheckError(""" import scala.meta._ implicit val dialect: scala.meta.Dialect = ??? (??? : Tree).show[Syntax] (??? : Tree).syntax dialect(??? : Tree).syntax """) === "") } test("show[Structure] without import") { assert(typecheckError(""" (??? : scala.meta.Tree).show[Structure] """) === "not found: type Structure") } test("show[Structure] when everything's correct") { assert(typecheckError(""" import scala.meta._ (??? : Tree).show[Structure] (??? : Tree).structure """) === "") } test("Token.is[T] without import") { assert(typecheckError(""" (??? : scala.meta.Token).is[scala.meta.Token] (??? : scala.meta.Token).is[scala.meta.Token.Ident] """) === "") } test("Tree.is[T] without import") { assert(typecheckError(""" (??? : scala.meta.Tree).is[scala.meta.Tree] (??? : scala.meta.Tree).is[scala.meta.Type] """) === "") } }
Dveim/scalameta
scalameta/scalameta/src/test/scala/scala/meta/tests/api/PublicSuite.scala
Scala
bsd-3-clause
7,007
package mesosphere.marathon package core.deployment.impl import akka.Done import akka.actor._ import akka.event.EventStream import com.typesafe.scalalogging.StrictLogging import mesosphere.marathon.core.deployment._ import mesosphere.marathon.core.deployment.impl.DeploymentActor.{ Cancel, Fail, NextStep } import mesosphere.marathon.core.deployment.impl.DeploymentManagerActor.DeploymentFinished import mesosphere.marathon.core.event.{ DeploymentStatus, DeploymentStepFailure, DeploymentStepSuccess } import mesosphere.marathon.core.health.HealthCheckManager import mesosphere.marathon.core.instance.Instance import mesosphere.marathon.core.launchqueue.LaunchQueue import mesosphere.marathon.core.pod.PodDefinition import mesosphere.marathon.core.readiness.ReadinessCheckExecutor import mesosphere.marathon.core.task.termination.{ KillReason, KillService } import mesosphere.marathon.core.task.tracker.InstanceTracker import mesosphere.marathon.state.{ AppDefinition, RunSpec } import mesosphere.mesos.Constraints import scala.async.Async._ import scala.concurrent.{ Future, Promise } import scala.util.{ Failure, Success } private class DeploymentActor( deploymentManager: ActorRef, promise: Promise[Done], killService: KillService, scheduler: SchedulerActions, plan: DeploymentPlan, instanceTracker: InstanceTracker, launchQueue: LaunchQueue, healthCheckManager: HealthCheckManager, eventBus: EventStream, readinessCheckExecutor: ReadinessCheckExecutor) extends Actor with StrictLogging { import context.dispatcher val steps = plan.steps.iterator var currentStepNr: Int = 0 override def preStart(): Unit = { self ! NextStep } override def postStop(): Unit = { deploymentManager ! DeploymentFinished(plan) } def receive: Receive = { case NextStep if steps.hasNext => val step = steps.next() currentStepNr += 1 logger.debug(s"Process next deployment step: stepNumber=$currentStepNr step=$step planId=${plan.id}") deploymentManager ! DeploymentStepInfo(plan, step, currentStepNr) performStep(step) onComplete { case Success(_) => self ! NextStep case Failure(t) => self ! Fail(t) } case NextStep => // no more steps, we're done logger.debug(s"No more deployment steps to process: planId=${plan.id}") promise.success(Done) context.stop(self) case Cancel(t) => promise.failure(t) context.stop(self) case Fail(t) => logger.debug(s"Deployment failed: planId=${plan.id}", t) promise.failure(t) context.stop(self) } // scalastyle:off def performStep(step: DeploymentStep): Future[Unit] = { logger.debug(s"Perform deployment step: step=$step planId=${plan.id}") if (step.actions.isEmpty) { Future.successful(()) } else { val status = DeploymentStatus(plan, step) eventBus.publish(status) val futures = step.actions.map { action => action.runSpec match { case app: AppDefinition => healthCheckManager.addAllFor(app, Seq.empty) case pod: PodDefinition => //ignore: no marathon based health check for pods } action match { case StartApplication(run, scaleTo) => startRunnable(run, scaleTo, status) case ScaleApplication(run, scaleTo, toKill) => scaleRunnable(run, scaleTo, toKill, status) case RestartApplication(run) => restartRunnable(run, status) case StopApplication(run) => stopRunnable(run.withInstances(0)) } } Future.sequence(futures).map(_ => ()) andThen { case Success(_) => logger.debug(s"Deployment step successful: step=$step plandId=${plan.id}") eventBus.publish(DeploymentStepSuccess(plan, step)) case Failure(e) => logger.debug(s"Deployment step failed: step=$step plandId=${plan.id}", e) eventBus.publish(DeploymentStepFailure(plan, step)) } } } // scalastyle:on def startRunnable(runnableSpec: RunSpec, scaleTo: Int, status: DeploymentStatus): Future[Unit] = { val promise = Promise[Unit]() instanceTracker.specInstances(runnableSpec.id).map { instances => context.actorOf(AppStartActor.props(deploymentManager, status, scheduler, launchQueue, instanceTracker, eventBus, readinessCheckExecutor, runnableSpec, scaleTo, instances, promise)) } promise.future } @SuppressWarnings(Array("all")) /* async/await */ def scaleRunnable(runnableSpec: RunSpec, scaleTo: Int, toKill: Option[Seq[Instance]], status: DeploymentStatus): Future[Unit] = { logger.debug("Scale runnable {}", runnableSpec) def killToMeetConstraints(notSentencedAndRunning: Seq[Instance], toKillCount: Int) = { Constraints.selectInstancesToKill(runnableSpec, notSentencedAndRunning, toKillCount) } async { val instances = await(instanceTracker.specInstances(runnableSpec.id)) val runningInstances = instances.filter(_.state.condition.isActive) val ScalingProposition(tasksToKill, tasksToStart) = ScalingProposition.propose( runningInstances, toKill, killToMeetConstraints, scaleTo, runnableSpec.killSelection) def killTasksIfNeeded: Future[Unit] = { logger.debug("Kill tasks if needed") tasksToKill.fold(Future.successful(())) { tasks => logger.debug("Kill tasks {}", tasks) killService.killInstances(tasks, KillReason.DeploymentScaling).map(_ => ()) } } await(killTasksIfNeeded) def startTasksIfNeeded: Future[Unit] = { tasksToStart.fold(Future.successful(())) { tasksToStart => logger.debug(s"Start next $tasksToStart tasks") val promise = Promise[Unit]() context.actorOf(TaskStartActor.props(deploymentManager, status, scheduler, launchQueue, instanceTracker, eventBus, readinessCheckExecutor, runnableSpec, scaleTo, promise)) promise.future } } await(startTasksIfNeeded) } } @SuppressWarnings(Array("all")) /* async/await */ def stopRunnable(runnableSpec: RunSpec): Future[Unit] = async { val instances = await(instanceTracker.specInstances(runnableSpec.id)) val launchedInstances = instances.filter(_.isLaunched) // TODO: the launch queue is purged in stopRunnable, but it would make sense to do that before calling kill(tasks) await(killService.killInstances(launchedInstances, KillReason.DeletingApp)) scheduler.stopRunSpec(runnableSpec) } def restartRunnable(run: RunSpec, status: DeploymentStatus): Future[Unit] = { if (run.instances == 0) { Future.successful(()) } else { val promise = Promise[Unit]() context.actorOf(TaskReplaceActor.props(deploymentManager, status, killService, launchQueue, instanceTracker, eventBus, readinessCheckExecutor, run, promise)) promise.future } } } object DeploymentActor { case object NextStep case object Finished case class Cancel(reason: Throwable) case class Fail(reason: Throwable) case class DeploymentActionInfo(plan: DeploymentPlan, step: DeploymentStep, action: DeploymentAction) @SuppressWarnings(Array("MaxParameters")) def props( deploymentManager: ActorRef, promise: Promise[Done], killService: KillService, scheduler: SchedulerActions, plan: DeploymentPlan, taskTracker: InstanceTracker, launchQueue: LaunchQueue, healthCheckManager: HealthCheckManager, eventBus: EventStream, readinessCheckExecutor: ReadinessCheckExecutor): Props = { Props(new DeploymentActor( deploymentManager, promise, killService, scheduler, plan, taskTracker, launchQueue, healthCheckManager, eventBus, readinessCheckExecutor )) } }
natemurthy/marathon
src/main/scala/mesosphere/marathon/core/deployment/impl/DeploymentActor.scala
Scala
apache-2.0
7,809
package controllers.admin import play.api.mvc._ import models._ import play.api.libs.json._ import play.api.libs.json.JsObject import play.api.libs.json.JsString import reactivemongo.bson.BSONObjectID import scala.concurrent.{ExecutionContext, Future} import ExecutionContext.Implicits.global import play.modules.reactivemongo.json.BSONFormats._ import play.api.Logger object MockGroups extends Controller { // use by Json : from scala to json private implicit object MockGroupsOptionsDataWrites extends Writes[(String, String)] { def writes(data: (String, String)): JsValue = { JsObject( List( "id" -> JsString(data._1), "name" -> JsString(data._2) )) } } /** * All Mock Grous attached to groups selected. * * @return JSON */ def findAll(groups: String) = Action.async { val futureDataList = MockGroup.findInGroups(groups) futureDataList.map { list => Ok(Json.toJson(Map("data" -> Json.toJson(list)))) } } /** * Display the 'edit form' of a existing MockGroup. * * @param id Id of the mockGroup to edit */ def edit(id: String) = Action.async { val futureMockGroup = MockGroup.findById(BSONObjectID(id)) futureMockGroup.map { mockGroup => Ok(Json.toJson(mockGroup)).as(JSON) } } /** * Insert or update a mockGroup. */ def create = Action.async(parse.json) { request => val id = BSONObjectID.generate val json = request.body.as[JsObject] ++ Json.obj("_id" -> id) try { json.validate(MockGroup.mockGroupFormat).map { mockGroup => { MockGroup.insert(mockGroup).map { lastError => if (lastError.ok) { Ok(id.stringify) } else { BadRequest("Detected error on insert :%s".format(lastError)) } } } }.recoverTotal { case e => Future.successful(BadRequest("Detected error on validation : " + JsError.toFlatJson(e))) } } catch { case e: Throwable => { Logger.error("Error:", e) Future.successful(BadRequest("Internal error : " + e.getMessage)) } } } /** * Update a group. */ def update(id: String) = Action.async(parse.json) { request => val idg = BSONObjectID.parse(id).toOption.get val json = JsObject(request.body.as[JsObject].fields.filterNot(f => f._1 == "_id")) ++ Json.obj("_id" -> idg) try { json.validate(MockGroup.mockGroupFormat).map { mockGroup => { MockGroup.update(mockGroup).map { lastError => if (lastError.ok) { Ok(id) } else { BadRequest("Detected error on update :%s".format(lastError)) } } } }.recoverTotal { case e => Future.successful(BadRequest("Detected error on validation : " + JsError.toFlatJson(e))) } } catch { case e: Throwable => { Logger.error("Error:", e) Future.successful(BadRequest("Internal error : " + e.getMessage)) } } } /** * Handle mockGroup deletion. */ def delete(id: String) = Action.async(parse.tolerantText) { request => MockGroup.delete(id).map { lastError => if (lastError.ok) { Ok(id) } else { BadRequest("Detected error:%s".format(lastError)) } } } }
soapower/soapower
app/controllers/admin/MockGroups.scala
Scala
gpl-3.0
3,543
/* * Copyright 2022 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package iht.utils import iht.FakeIhtApp import iht.config.AppConfig import iht.models.application.exemptions.{AllExemptions, BasicExemptionElement, PartnerExemption} import iht.testhelpers.CommonBuilder class EstateNotDeclarableHelperTest extends FakeIhtApp with EstateNotDeclarableHelper { implicit val appConfig: AppConfig = app.injector.instanceOf[AppConfig] "EstateNotDeclarableHelper" when { "isEstateOverGrossEstateLimit is called" must { "return true if the estate is over the gross estate limit" in { val appDetails = CommonBuilder.buildApplicationDetails.copy(allAssets = Some(CommonBuilder.buildAllAssets.copy( money = Some(CommonBuilder.buildShareableBasicElement.copy(value = Some(2000000)))))) assert(isEstateOverGrossEstateLimit(appDetails)) } "return false if the estate is not over the gross estate limit" in { val appDetails = CommonBuilder.buildApplicationDetails.copy(allAssets = Some(CommonBuilder.buildAllAssets.copy( money = Some(CommonBuilder.buildShareableBasicElement.copy(value = Some(2000)))))) assert(!isEstateOverGrossEstateLimit(appDetails)) } } "isEstateValueMoreThanTaxThresholdBeforeExemptionsStarted" must { "return true if the estate value is more than the tax threshold " + "before exemptions has been started" in { val appDetails = CommonBuilder.buildApplicationDetails.copy(allAssets = Some(CommonBuilder.buildAllAssets.copy( money = Some(CommonBuilder.buildShareableBasicElement.copy(value = Some(326000)))))) assert(isEstateValueMoreThanTaxThresholdBeforeExemptionsStarted(appDetails)) } "return false if the estate value is not more than the tax threshold " + "before exemptions has been started" in { val appDetails = CommonBuilder.buildApplicationDetails.copy(allAssets = Some(CommonBuilder.buildAllAssets.copy( money = Some(CommonBuilder.buildShareableBasicElement.copy(value = Some(20000)))))) assert(!isEstateValueMoreThanTaxThresholdBeforeExemptionsStarted(appDetails)) } } "isEstateValueMoreThanTaxThresholdBeforeTnrbStarted" must { "return true if the estate value is more than the tax threshold " + "before TNRB has been started" in { val regDetails = CommonBuilder.buildRegistrationDetails4 val appDetails = CommonBuilder.buildApplicationDetails.copy( allAssets = Some(CommonBuilder.buildAllAssets.copy( money = Some(CommonBuilder.buildShareableBasicElement.copy(value = Some(326000))))), allExemptions = Some(CommonBuilder.buildAllExemptions.copy( partner = Some(CommonBuilder.buildPartnerExemption) )), widowCheck = None) assert(isEstateValueMoreThanTaxThresholdBeforeTnrbStarted(appDetails, regDetails)) } "return false if the estate value is not more than the tax threshold " + "before TNRB has been started" in { val regDetails = CommonBuilder.buildRegistrationDetails4 val appDetails = CommonBuilder.buildApplicationDetails.copy( allAssets = Some(CommonBuilder.buildAllAssets.copy( money = Some(CommonBuilder.buildShareableBasicElement.copy(value = Some(326000))))), allExemptions = Some(CommonBuilder.buildAllExemptions.copy( partner = Some(CommonBuilder.buildPartnerExemption.copy(totalAssets = Some(BigDecimal(26000)))) )), widowCheck = None) assert(!isEstateValueMoreThanTaxThresholdBeforeTnrbStarted(appDetails, regDetails)) } } "isEstateValueMoreThanTaxThresholdBeforeTnrbFinished" must { "return true if the estate value is more than the tax threshold " + "after TNRB has been started but before TNRB has been finished" in { val regDetails = CommonBuilder.buildRegistrationDetails4 val appDetails = CommonBuilder.buildApplicationDetails.copy( allAssets = Some(CommonBuilder.buildAllAssets.copy( money = Some(CommonBuilder.buildShareableBasicElement.copy(value = Some(326000))))), allExemptions = Some(AllExemptions( partner = Some(PartnerExemption( isAssetForDeceasedPartner = Some(false), isPartnerHomeInUK = None, firstName = None, lastName = None, dateOfBirth = None, nino = None, totalAssets = None)), charity = Some(BasicExemptionElement(Some(false))), qualifyingBody = Some(BasicExemptionElement(Some(false))))), widowCheck = Some(CommonBuilder.buildWidowedCheck), increaseIhtThreshold = Some(CommonBuilder.buildTnrbEligibility)) assert(isEstateValueMoreThanTaxThresholdBeforeTnrbFinished(appDetails, regDetails)) } "return false if the estate value is not more than the tax threshold " + "after TNRB has been started but before TNRB has been finished" in { val regDetails = CommonBuilder.buildRegistrationDetails4 val appDetails = CommonBuilder.buildApplicationDetails.copy( allAssets = Some(CommonBuilder.buildAllAssets.copy( money = Some(CommonBuilder.buildShareableBasicElement.copy(value = Some(324000))))), allExemptions = Some(AllExemptions( partner = Some(PartnerExemption( isAssetForDeceasedPartner = Some(false), isPartnerHomeInUK = None, firstName = None, lastName = None, dateOfBirth = None, nino = None, totalAssets = None)), charity = Some(BasicExemptionElement(Some(false))), qualifyingBody = Some(BasicExemptionElement(Some(false))))), widowCheck = Some(CommonBuilder.buildWidowedCheck), increaseIhtThreshold = Some(CommonBuilder.buildTnrbEligibility)) assert(!isEstateValueMoreThanTaxThresholdBeforeTnrbFinished(appDetails, regDetails)) } } } }
hmrc/iht-frontend
test/iht/utils/EstateNotDeclarableHelperTest.scala
Scala
apache-2.0
6,690
package jp.hotbrain.makecsv import java.io.{ByteArrayInputStream, ByteArrayOutputStream, OutputStream} import java.nio.charset.StandardCharsets import org.junit.Assert._ import org.junit.Test /** * Created by hidek on 2016/09/10. */ class XsvConvertTest { class XsvConv(val body: String) extends EncoderIf { def export(os: OutputStream): Unit = { os.write(body.getBytes(StandardCharsets.UTF_8)) } } case class XsvEnc(aesParam: Option[AesParam], gzip: Boolean) extends FileConfig class XsvDecoder() extends DecoderIf { private[this] lazy val _buf = new ByteArrayOutputStream() override def exec(callback: OutputStream => Unit): Unit = callback(_buf) def buf(): Array[Byte] = _buf.toByteArray } @Test def aesTest(): Unit = { val body = """group,name,from_date |"00001","グループ1","2016/09/03 22:37:59" |"00002","ぐる~ぷ2","2016/09/03 22:37:59" |"00003","Group3","2016/09/03 22:37:59" |"00009","LastGroup","2016/09/03 22:37:59" | """.stripMargin val conf = new XsvConv(body) Seq( None, Option(AesParam("32", "1234", "7056")), Option(AesParam("24", "0123456789abcdef0123456789ABCDEF", "9876543210fedcba9876543210FEDCBA")) ).flatMap(x => Seq(XsvEnc(x, false), XsvEnc(x, true))).foreach(z => aesTestSub(conf, z)) } private[this] def aesTestSub(conf: XsvConv, enc: XsvEnc): Unit = { val buf = new ByteArrayOutputStream() Encoder.encode(conf, enc, buf) buf.close() val arr = buf.toByteArray println(s"""${enc.toString}: ${arr.length}""") val inpBuf = new ByteArrayInputStream(arr) val dec = new XsvDecoder() Decoder.decode(dec, inpBuf, enc) val result = new String(dec.buf(), StandardCharsets.UTF_8) assertEquals(enc.toString, conf.body, result) } }
HidekiTak/make_csv
src/test/scala/jp/hotbrain/makecsv/XsvConvertTest.scala
Scala
apache-2.0
1,835
/* * Copyright 2016 rdbc contributors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.rdbc.tck import akka.actor.ActorSystem import akka.stream.Materializer import akka.stream.scaladsl.{Sink, Source} import io.rdbc.sapi._ import io.rdbc.sapi.exceptions.InvalidQueryException import io.rdbc.tck.util.Subscribers import scala.concurrent.Future trait NonExistingTableSpec extends RdbcSpec { protected implicit def system: ActorSystem protected implicit def materializer: Materializer "Error should be returned when referencing a non-existent table when" - { stmtTest("Select", _.statement(sql"select * from nonexistent"), errPos = 15) stmtTest("Insert", _.statement(sql"insert into nonexistent values (1)"), errPos = 13) stmtTest("Returning insert", _.statement(sql"insert into nonexistent values (1)", StatementOptions.ReturnGenKeys), errPos = 13 ) stmtTest("Delete", _.statement(sql"delete from nonexistent"), errPos = 13) stmtTest("Update", _.statement(sql"update nonexistent set x = 1"), errPos = 8) stmtTest("DDL", _.statement(sql"drop table nonexistent"), errPos = 12) } "Streaming arguments should" - { "fail with an InvalidQueryException" - { "when statement references a non-existing table" in { c => val stmt = c.statement("insert into nonexistent values (:x)") val src = Source(Vector(Vector(1), Vector(2))).runWith(Sink.asPublisher(fanout = false)) assertInvalidQueryThrown(errPos = 13) { stmt.streamArgsByIdx(src).get } } } } private def stmtTest(stmtType: String, stmt: Connection => ExecutableStatement, errPos: Int): Unit = { s"executing a $stmtType for" - { executedFor("nothing", _.execute()) executedFor("set", _.executeForSet()) executedFor("value", _.executeForValue(_.int(1))) executedFor("first row", _.executeForFirstRow()) executedFor("generated key", _.executeForKey[String]) executedFor("stream", stmt => { val rs = stmt.stream() val subscriber = Subscribers.eager() rs.subscribe(subscriber) subscriber.rows }) def executedFor[A](executorName: String, executor: ExecutableStatement => Future[A]): Unit = { s"executed for $executorName" in { c => assertInvalidQueryThrown(errPos) { executor(stmt(c)).get } } } } } private def assertInvalidQueryThrown(errPos: Int)(body: => Any): Unit = { val e = intercept[InvalidQueryException] { body } e.errorPosition.fold(alert("non-fatal: no error position reported")) { pos => pos shouldBe errPos } } }
rdbc-io/rdbc
rdbc-tck/src/main/scala/io/rdbc/tck/NonExistingTableSpec.scala
Scala
apache-2.0
3,211
import java.net.InetSocketAddress import java.security.spec.{PKCS8EncodedKeySpec, X509EncodedKeySpec} import java.security.{PrivateKey, PublicKey, KeyFactory, KeyPair} import scala.concurrent.duration._ import akka.io.Tcp.{Write, Received} import akka.actor.{ Actor, Props, ActorSystem } import akka.testkit._ import akka.event.Logging import rtmp.amf.{AmfMixedMap, AmfNull} import rtmp.packet.{Notify, Invoke, Packet} import rtmp.{Message, HandshakeDataProvider, ConnHandler} import org.scalatest.{ BeforeAndAfterAll, FlatSpec } import org.scalatest.concurrent._ import org.scalatest.matchers.ShouldMatchers import com.typesafe.config.ConfigFactory /** * Test parsing of the packets received by server during publish live stream from client */ /* class PublishStreamTest(_system: ActorSystem) extends RtmpStreamTest(_system: ActorSystem) { def this() = this(ActorSystem("PublishStreamTest", ConfigFactory.load)) override protected def dumpDir:String = "dump/publish" "An ConnHandler" should "be able to correctly register with the controller" in { // Check that ConnHandler send RegisterHandler message to the controller val msg = clientHandlerProbe.receiveOne(1000.millisecond) } "An ConnHandler" should "be able to correctly process handshake" in { // Check handshake response testInputResponse("in_1.rtmp", "out_1.rtmp") } "An ConnHandler" should "be able to correctly parse client messages" in { // Dump in_2.rtmp contain client handshake response and invoke connect // packet splitted to the two chunks connActor ! Received(readData("in_2.rtmp")) verifyReceivedPackets(List[Packet]( Invoke("connect", 1, List( Map( "app" -> "live", "type" -> "nonprivate", "flashVer" -> "FMLE/3.0 (compatible; Lavf55.2.0)", "tcUrl" -> "rtmp://127.0.0.1:1935/live" ) )) )) testInputPackets("in_3.rtmp", List[Packet]()) testInputPackets("in_4.rtmp", List[Packet]( Invoke("releaseStream", 2, List(AmfNull(), "mystream.sdp")), Invoke("FCPublish", 3, List(AmfNull(), "mystream.sdp")), Invoke("createStream", 4, List(AmfNull())) )) // Invoke publish ( mystream.sdp, live ) here !! // Invoke(publish, 5, List(AmfNull(), 'mystream.sdp', 'live')) testInputPackets("in_5.rtmp", List[Packet]()) testInputPackets("in_6.rtmp", List[Packet]( Invoke("publish", 5, List(AmfNull(), "mystream.sdp", "live")) )) // OnMetadata ( notify ) somewhere here and Video packets follow up testInputPackets("in_7.rtmp", List[Packet]( Notify("@setDataFrame", List( "onMetaData", AmfMixedMap(Map( "duration" -> 0.0, "filesize" -> 0.0, "creation_time" -> "2013-06-21 21:02:32", "videocodecid" -> 2.0, "height" -> 270.0, "videodatarate" -> 195.3125, "compatible_brands" -> "qt ", "encoder" -> "Lavf55.2.0", "minor_version" -> "537199360", "major_brand" -> "qt ", "width" -> 480.0, "framerate" -> 2997 )) )) )) // Video/Audio data here testInputPackets("in_8.rtmp", List[Packet]()) testInputPackets("in_9.rtmp", List[Packet]()) testInputPackets("in_10.rtmp", List[Packet]()) testInputPackets("in_11.rtmp", List[Packet]()) testInputPackets("in_12.rtmp", List[Packet]()) testInputPackets("in_13.rtmp", List[Packet]()) } } */
vimvim/AkkaTest
src/test/scala/PublishStreamTest.scala
Scala
agpl-3.0
3,476
package colossus.metrics import org.scalatest._ import scala.concurrent.duration._ import MetricValues._ class GaugeSpec extends WordSpec with MustMatchers with BeforeAndAfterAll { "BasicGauge" must { "set a value" in { val params = GaugeParams("/") val g = new BasicGauge(params) g.set(Some(5L)) g.value must equal (Some(5L)) g.set(None) g.value must equal (None) } "expire value" in { val params = GaugeParams("/", expireAfter = 1.second, expireTo = Some(9876)) val g = new BasicGauge(params) g.set(Some(5)) g.value must equal(Some(5)) g.tick(500.milliseconds) g.value must equal(Some(5)) g.tick(501.milliseconds) g.value must equal(Some(9876)) } } "ConcreteGauge" must { "set tagged values" in { val params = GaugeParams("/") val g = new ConcreteGauge(params) g.set(4, Map("foo" -> "a")) g.set(5, Map("foo" -> "b")) val expected = Map( MetricAddress.Root -> Map(Map("foo" -> "a") -> SumValue(4L), Map("foo" -> "b") -> SumValue(5L)) ) g.metrics(CollectionContext(Map(), 1.second)) must equal(expected) } "remove unset values" in { val params = GaugeParams("/", expireAfter = 1.second) val g = new ConcreteGauge(params) g.set(4, Map("foo" -> "a")) g.set(5, Map("foo" -> "b")) g.set(None, Map("foo" -> "a")) val expected = Map( MetricAddress.Root -> Map(Map("foo" -> "b") -> SumValue(5L)) ) g.metrics(CollectionContext(Map(), 1.second)) must equal(expected) } "remove expired values" in { val params = GaugeParams("/", expireAfter = 1.second) val g = new ConcreteGauge(params) g.set(4, Map("foo" -> "a")) g.set(5, Map("foo" -> "b")) g.tick(500.milliseconds) g.set(6, Map("foo" -> "b")) g.tick(501.milliseconds) val expected = Map( MetricAddress.Root -> Map(Map("foo" -> "b") -> SumValue(6L)) ) g.metrics(CollectionContext(Map(), 1.second)) must equal(expected) } } }
noikiy/colossus
colossus-metrics/src/test/scala/colossus/metrics/GaugeSpec.scala
Scala
apache-2.0
2,100
/** * Copyright (C) 2010 Orbeon, Inc. * * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU Lesser General Public License as published by the Free Software Foundation; either version * 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more details. * * The full text of the license is available at http://www.gnu.org/copyleft/lesser.html */ package org.orbeon.oxf.xforms.control.controls import java.io.File import java.net.{URI, URLEncoder} import org.apache.commons.lang3.StringUtils import org.orbeon.dom.Element import org.orbeon.oxf.common.{OXFException, ValidationException} import org.orbeon.oxf.http.Headers import org.orbeon.oxf.util.PathUtils._ import org.orbeon.oxf.util.StringUtils._ import org.orbeon.oxf.util.{NetUtils, SecureUtils} import org.orbeon.oxf.xforms.XFormsConstants._ import org.orbeon.oxf.xforms.control._ import org.orbeon.oxf.xforms.control.controls.XFormsUploadControl._ import org.orbeon.oxf.xforms.event.XFormsEvent._ import org.orbeon.oxf.xforms.event.events._ import org.orbeon.oxf.xforms.event.{Dispatch, XFormsEvent} import org.orbeon.oxf.xforms.upload.UploaderServer import org.orbeon.oxf.xforms.xbl.XBLContainer import org.orbeon.oxf.xforms.{XFormsContainingDocument, XFormsUtils} import org.orbeon.oxf.xml.Dom4j import org.orbeon.oxf.xml.XMLConstants._ import org.xml.sax.helpers.AttributesImpl import org.orbeon.xforms.XFormsId import scala.util.control.NonFatal /** * Represents an xf:upload control. */ class XFormsUploadControl(container: XBLContainer, parent: XFormsControl, element: Element, id: String) extends XFormsSingleNodeControl(container, parent, element, id) with XFormsValueControl with FocusableTrait with FileMetadata { def supportedFileMetadata = FileMetadata.AllMetadataNames // NOTE: `mediatype` is deprecated as of XForms 2.0, use `accept` instead def acceptValue = extensionAttributeValue(ACCEPT_QNAME) orElse extensionAttributeValue(MEDIATYPE_QNAME) override def evaluateImpl(relevant: Boolean, parentRelevant: Boolean): Unit = { super.evaluateImpl(relevant, parentRelevant) evaluateFileMetadata(relevant) } override def markDirtyImpl(): Unit = { super.markDirtyImpl() markFileMetadataDirty() } // NOTE: Perform all actions at target, so that user event handlers are called after these operations. override def performTargetAction(event: XFormsEvent): Unit = { super.performTargetAction(event) event match { case _: XXFormsUploadStartEvent ⇒ // Upload started containingDocument.startUpload(getUploadUniqueId) case _: XXFormsUploadProgressEvent ⇒ // NOP: upload progress information will be sent through the diff process case _: XXFormsUploadCancelEvent ⇒ // Upload canceled by the user containingDocument.endUpload(getUploadUniqueId) UploaderServer.removeUploadProgress(NetUtils.getExternalContext.getRequest, this) case doneEvent: XXFormsUploadDoneEvent ⇒ // Upload done: process upload to this control // Notify that the upload has ended containingDocument.endUpload(getUploadUniqueId) UploaderServer.removeUploadProgress(NetUtils.getExternalContext.getRequest, this) handleUploadedFile(doneEvent.file, doneEvent.filename, doneEvent.contentType, doneEvent.contentLength) visitWithAncestors() case _: XXFormsUploadErrorEvent ⇒ // Upload error: sent by the client in case of error containingDocument.endUpload(getUploadUniqueId) UploaderServer.removeUploadProgress(NetUtils.getExternalContext.getRequest, this) case _ ⇒ } } override def performDefaultAction(event: XFormsEvent): Unit = { super.performDefaultAction(event) event match { case errorEvent: XXFormsUploadErrorEvent ⇒ // Upload error: sent by the client in case of error // It would be good to support i18n at the XForms engine level, but form authors can handle // xxforms-upload-error in a custom way if needed. This is what Form Runner does. containingDocument.addMessageToRun("There was an error during the upload.", "modal") case _ ⇒ } } override def onDestroy(): Unit = { super.onDestroy() // Make sure to consider any upload associated with this control as ended containingDocument.endUpload(getUploadUniqueId) } // TODO: Need to move to using actual unique ids here, see: // http://wiki.orbeon.com/forms/projects/core-xforms-engine-improvements#TOC-Improvement-to-client-side-server-s def getUploadUniqueId = getEffectiveId // Called either upon Ajax xxforms-upload-done or upon client form POST (`replace="all"`) def handleUploadedFile(value: String, filename: String, mediatype: String, size: String): Unit = if (size != "0" || filename != "") { // Set value of uploaded file into the instance (will be xs:anyURI or xs:base64Binary) storeExternalValueAndMetadata(value, filename, mediatype, size) } // This can only be called from the client to clear the value override def storeExternalValue(value: String): Unit = { assert(value == "") storeExternalValueAndMetadata(value, "", "", "") } private def storeExternalValueAndMetadata(rawNewValue: String, filename: String, mediatype: String, size: String): Unit = { def isFileURL(url: String) = NetUtils.getProtocol(url) == "file" def deleteFileIfPossible(url: String): Unit = if (isFileURL(url)) try { val file = new File(new URI(splitQuery(url)._1)) if (file.exists) { if (file.delete()) debug("deleted temporary file upon upload", Seq("path" → file.getCanonicalPath)) else warn("could not delete temporary file upon upload", Seq("path" → file.getCanonicalPath)) } } catch { case NonFatal(_) ⇒ error("could not delete temporary file upon upload", Seq("path" → url)) } // Clean values val newValue = rawNewValue.trimAllToEmpty val oldValue = getValue.trimAllToEmpty try { // Only process if the new value is different from the old one val valueToStore = if (isFileURL(newValue)) { // Setting new file val convertedValue = { val isTargetBase64 = Set(XS_BASE64BINARY_QNAME, XFORMS_BASE64BINARY_QNAME)(valueType) if (isTargetBase64) { // Convert value to Base64 and delete incoming file val converted = NetUtils.anyURIToBase64Binary(newValue) deleteFileIfPossible(newValue) converted } else { // Leave value as is and make file expire with session val newFile = NetUtils.renameAndExpireWithSession(newValue, logger.getLogger) val newFileURL = newFile.toURI.toString // The result is a file: append a MAC hmacURL(newFileURL, Option(filename), Option(mediatype), Option(size)) } } // Store the converted value convertedValue } else if (StringUtils.isEmpty(newValue)) { // Setting blank value if (StringUtils.isNotEmpty(oldValue)) // TODO: This should probably take place during refresh instead. Dispatch.dispatchEvent(new XFormsDeselectEvent(this, EmptyGetter)) // Try to delete temporary file associated with old value if any deleteFileIfPossible(oldValue) // Store blank value "" } else // Only accept file or blank throw new OXFException("Unexpected incoming value for xf:upload: " + newValue) // Store the value doStoreExternalValue(valueToStore) // NOTE: We used to call markFileMetadataDirty() here, but it was wrong, because getBackCopy would then // obtain the new data, and control diffs wouldn't work properly. This was done for XFormsSubmissionUtils, // which is now modified to use boundFileMediatype/boundFilename instead. // Filename, mediatype and size setFilename(filename) setFileMediatype(mediatype) setFileSize(size) } catch { case NonFatal(t) ⇒ throw new ValidationException(t, getLocationData) } } // Don't expose an external value override def evaluateExternalValue(): Unit = setExternalValue(null) override def compareExternalUseExternalValue( previousExternalValue : Option[String], previousControl : Option[XFormsControl] ): Boolean = previousControl match { case Some(other: XFormsUploadControl) ⇒ compareFileMetadata(other) && super.compareExternalUseExternalValue(previousExternalValue, previousControl) case _ ⇒ false } override def addAjaxExtensionAttributes(attributesImpl: AttributesImpl, previousControlOpt: Option[XFormsControl]) = { var added = super.addAjaxExtensionAttributes(attributesImpl, previousControlOpt) added |= addFileMetadataAttributes(attributesImpl, previousControlOpt.asInstanceOf[Option[FileMetadata]]) added } override def findAriaByControlEffectiveId = Some( XFormsUtils.namespaceId( containingDocument, XFormsId.appendToEffectiveId(getEffectiveId, COMPONENT_SEPARATOR + "xforms-input") ) ) override def getBackCopy: AnyRef = { val cloned = super.getBackCopy.asInstanceOf[XFormsUploadControl] updateFileMetadataCopy(cloned) cloned } } object XFormsUploadControl { // XForms 1.1 mediatype is space-separated, XForms 2 accept is comma-separated like in HTML def mediatypeToAccept(s: String): String = s.splitTo() mkString "," // Append metadata and MAC to the URl // The idea is that whenever the upload control stores a local file: URL, that URL contains a MAC (message // authentication code). This allows: // // - making sure that the URL has not been tampered with (e.g. xf:output now uses this so that you can't point it to // any file: URL) // - easily searching instance for uploaded resources // // The MAC includes the URL protocol, path and metadata def hmacURL(url: String, filename: Option[String], mediatype: Option[String], size: Option[String]) = { val candidates = Seq( "filename" → filename, "mediatype" → mediatype, "size" → size ) val query = candidates collect { case (name, Some(value)) ⇒ name + '=' + URLEncoder.encode(value, "utf-8") } mkString "&" val urlWithQuery = NetUtils.appendQueryString(url, query) NetUtils.appendQueryString(urlWithQuery, "mac=" + hmac(urlWithQuery)) } // Get the MAC for a given string def hmac(value: String) = SecureUtils.hmacString(value, "hex") // Remove the MAC from the URL def removeMAC(url: String) = { val uri = new URI(url) // NOTE: Use getRawQuery, as the query might encode & and =, and we should not decode them before decoding the query val query = Option(uri.getRawQuery) map decodeSimpleQuery getOrElse Seq() val filteredQuery = query filterNot (_._1 == "mac") map { case (name, value) ⇒ name + '=' + URLEncoder.encode(value, "utf-8") } mkString "&" NetUtils.appendQueryString(url.substring(0, url.indexOf('?')), filteredQuery) } // For Java callers def getParameterOrNull(url: String, name: String) = getFirstQueryParameter(url, name).orNull // Get the MAC from the URL def getMAC(url: String) = getFirstQueryParameter(url, "mac") // Check that the given URL as a correct MAC def verifyMAC(url: String) = getMAC(url) match { case Some(mac) ⇒ hmac(removeMAC(url)) == mac case None ⇒ false } /** * Handle a construct of the form: * * <xxf:files> * <parameter> * <name>xforms-element-27</name> * <filename>my-filename.jpg</filename> * <content-type>image/jpeg</content-type> * <content-length>33204</content-length> * <value xmlns:request="http://orbeon.org/oxf/xml/request-private" xsi:type="xs:anyURI">file:/temp/upload_432dfead_11f1a9836128000_00000107.tmp</value> * </parameter> * <parameter> * ... * </parameter> * </xxf:files> */ def handleSubmittedFiles(containingDocument: XFormsContainingDocument, filesElement: Element): Unit = for { (name, value, filename, mediatype, size) ← iterateFileElement(filesElement) // In case of xf:repeat, the name of the template will not match an existing control // In addition, only set value on forControl control if specified uploadControl ← Option(containingDocument.getControlByEffectiveId(name).asInstanceOf[XFormsUploadControl]) } uploadControl.handleUploadedFile(value, filename, mediatype, size) // Check if an <xxf:files> element actually contains file uploads to process def hasSubmittedFiles(filesElement: Element) = iterateFileElement(filesElement).nonEmpty private def iterateFileElement(filesElement: Element) = for { parameterElement ← Option(filesElement).toIterator flatMap Dom4j.elements // Extract all parameters name = parameterElement.element("name").getTextTrim value = parameterElement.element("value").getTextTrim filename = Option(parameterElement.element("filename")) map (_.getTextTrim) getOrElse "" mediatype = Option(parameterElement.element(Headers.ContentTypeLower)) map (_.getTextTrim) getOrElse "" size = parameterElement.element(Headers.ContentLengthLower).getTextTrim // A file was selected in the UI (the file may be empty) if size != "0" || filename != "" } yield (name, value, filename, mediatype, size) }
brunobuzzi/orbeon-forms
xforms/jvm/src/main/scala/org/orbeon/oxf/xforms/control/controls/XFormsUploadControl.scala
Scala
lgpl-2.1
13,990
package mybot import scala.util.parsing.combinator._ /** * Created by cebrian on 18/10/14. */ trait Parser extends JavaTokenParsers { // Parser lazy val parser : Parser[List[ServerMsg]] = repsep(opcode,"|") // Server Messages lazy val opcode : Parser[ServerMsg] = welcomeOp | reactOp | goodbyeOp lazy val welcomeOp : Parser[Welcome] = "Welcome(" ~> repsep(kv,",") <~ ")" ^^ { case kvs => { val attrs = kvs.toMap Welcome(attrs("name"),attrs("apocalypse").toInt,attrs("round").toInt) } } // React(generation=int,name=string,time=int,view=string,energy=string,master=int:int,coltlision=int:int,slaves=int,...) lazy val reactOp : Parser[React] = "React(" ~> kvs <~ ")" ^^ { case kvs => val extraMap = List("generation","name","time","view","energy","master","collision","slaves") .foldLeft(kvs){case (m,s) => m-s} val extra = extraMap.map(t => t._1+"="+t._2).mkString(",") match { case "" => None case s:String => Some(s) } React(kvs("generation").toInt, kvs("name"), kvs("time").toInt, kvs("view"), kvs("energy").toInt, toTuple(kvs.get("master")), toTuple(kvs.get("collision")), kvs.get("slaves").map(_.toInt), extra) } // Goodbye(energy=int) lazy val goodbyeOp : Parser[Goodbye] = "Goodbye(energy=" ~> wholeNumber <~ ")" ^^ { case energy => Goodbye(energy.toInt) } // Helper functions private def kv : Parser[(String,String)] = ident ~ "=" ~ regex("""[\\?_WMmSsPpBb:\\-\\/\\w\\d]+""".r) ^^ { case key ~ "=" ~ value => (key,value) } private def kvs : Parser[Map[String,String]] = repsep(kv,",") ^^ {_.toMap} private def toTuple(data:Option[String]) = data.map { s => val array = s.split(":") (array(0).toInt,array(1).toInt) } } object ProtocolTranslator extends Parser{ def fromServer(input:String) = parseAll(parser,input) match { case Success(result, _) => result case result : NoSuccess => sys.error(result.get) } def toServer(messages : List[ClientMsg]) = messages.mkString("|") }
tonicebrian/scalatron-bot-example
src/main/scala/mybot/Parser.scala
Scala
apache-2.0
2,118
object Test: def main(args:Array[String]):Unit = val x1 = X.andThen(1){case x if (x%2 == 0) => x} val x2 = Macro.mThen{case x:Int if (x%2 == 0) => x}
dotty-staging/dotty
tests/run-macros/tasty-overload-secondargs/Test_2.scala
Scala
apache-2.0
160
/* Copyright 2014 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.scalding import org.specs._ import org.scalacheck.Arbitrary import org.scalacheck.Arbitrary.arbitrary import org.scalacheck.Properties import org.scalacheck.Prop.forAll import org.scalacheck.Gen._ import scala.util.Success class ConfigTest extends Specification { "A Config" should { "cascadingAppJar works" in { val cls = getClass Config.default.setCascadingAppJar(cls) .getCascadingAppJar must be_==(Some(Success(cls))) } "default has serialization set" in { val sers = Config.default.get("io.serializations").get.split(",").toList sers.last must be_==(classOf[com.twitter.chill.hadoop.KryoSerialization].getName) } "default has chill configured" in { Config.default.get(com.twitter.chill.config.ConfiguredInstantiator.KEY).isDefined must beTrue } "setting timestamp twice does not change it" in { val date = RichDate.now val (oldDate, newConf) = Config.empty.maybeSetSubmittedTimestamp(date) oldDate.isEmpty must beTrue newConf.getSubmittedTimestamp must be_==(Some(date)) val (stillOld, new2) = newConf.maybeSetSubmittedTimestamp(date + Seconds(1)) stillOld must be_==(Some(date)) new2 must be_==(newConf) } } } object ConfigProps extends Properties("Config") { implicit def arbConfig: Arbitrary[Config] = Arbitrary(Arbitrary.arbitrary[Map[String, String]].map(Config(_))) property(".+(k, v).get(k) == Some(v)") = forAll { (c: Config, k: String, v: String) => (c + (k, v)).get(k) == Some(v) } property(".-(k).get(k) == None") = forAll { (c: Config, k: String) => (c - k).get(k) == None } property("++ unions keys") = forAll { (c1: Config, c2: Config) => (c1 ++ c2).toMap.keySet == (c1.toMap.keySet | c2.toMap.keySet) } property("++ == c2.orElse(c1)") = forAll { (c1: Config, c2: Config, keys: Set[String]) => val merged = c1 ++ c2 val testKeys = c1.toMap.keySet | c2.toMap.keySet ++ keys testKeys.forall { k => merged.get(k) == c2.get(k).orElse(c1.get(k)) } } }
wanyifu/scaldingtest
scalding-core/src/test/scala/com/twitter/scalding/ConfigTest.scala
Scala
apache-2.0
2,609
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package ai.h2o.sparkling.backend.external import ai.h2o.sparkling.H2OConf import io.fabric8.kubernetes.client.DefaultKubernetesClient import org.apache.spark.expose.Logging object K8sExternalBackendClient extends K8sHeadlessService with K8sH2OStatefulSet with Logging { def stopExternalH2OOnKubernetes(conf: H2OConf): Unit = { val client = new DefaultKubernetesClient deleteH2OHeadlessService(client, conf) deleteH2OStatefulSet(client, conf) } def startExternalH2OOnKubernetes(conf: H2OConf): Unit = { val client = new DefaultKubernetesClient stopExternalH2OOnKubernetes(conf) installH2OHeadlessService(client, conf) installH2OStatefulSet(client, conf, getH2OHeadlessServiceURL(conf)) conf.setH2OCluster(s"${getH2OHeadlessServiceURL(conf)}:54321") } }
h2oai/sparkling-water
core/src/main/scala/ai/h2o/sparkling/backend/external/K8sExternalBackendClient.scala
Scala
apache-2.0
1,598
package funsets /** * 2. Purely Functional Sets. */ object FunSets { /** * We represent a set by its characteristic function, i.e. * its `contains` predicate. */ type Set = Int => Boolean /** * Indicates whether a set contains a given element. */ def contains(s: Set, elem: Int): Boolean = s(elem) /** * Returns the set of the one given element. */ def singletonSet(elem: Int): Set = x => x == elem /** * Returns the union of the two given sets, * the sets of all elements that are in either `s` or `t`. */ def union(s: Set, t: Set): Set = x => s(x) || t(x) /** * Returns the intersection of the two given sets, * the set of all elements that are both in `s` and `t`. */ def intersect(s: Set, t: Set): Set = x => s(x) && t(x) /** * Returns the difference of the two given sets, * the set of all elements of `s` that are not in `t`. */ def diff(s: Set, t: Set): Set = x => s(x) && !t(x) /** * Returns the subset of `s` for which `p` holds. */ def filter(s: Set, p: Int => Boolean): Set = intersect(s, p) /** * The bounds for `forall` and `exists` are +/- 1000. */ val bound = 1000 /** * Returns whether all bounded integers within `s` satisfy `p`. */ def forall(s: Set, p: Int => Boolean): Boolean = { def iter(a: Int): Boolean = { if (a > bound) true else if (s(a) && !p(a)) false else iter(a + 1) } iter(-bound) } /** * Returns whether there exists a bounded integer within `s` * that satisfies `p`. */ def exists(s: Set, p: Int => Boolean): Boolean = !forall(s, x => !p(x)) /** * Returns a set transformed by applying `f` to each element of `s`. */ def map(s: Set, f: Int => Int): Set = x => { exists(s, elem => x == f(elem)) } /** * Displays the contents of a set */ def toString(s: Set): String = { val xs = for (i <- -bound to bound if contains(s, i)) yield i xs.mkString("{", ",", "}") } /** * Prints the contents of a set on the console. */ def printSet(s: Set) { println(toString(s)) } }
adihubba/progfun1
progfun1-funsets/src/main/scala/funsets/FunSets.scala
Scala
mit
2,104
package com.arcusys.valamis.lesson.scorm.model.manifest /** * A rule executed each time after an attempt on a descendant activity terminates to see if this activity needs to exit * @param conditions Set of conditions that define whether the rule will be applied or not */ class ExitConditionRule(val conditions: RuleConditionSet)
ViLPy/Valamis
valamis-scorm-lesson/src/main/scala/com/arcusys/valamis/lesson/scorm/model/manifest/ExitConditionRule.scala
Scala
lgpl-3.0
334
package com.github.gigurra.glasciia import com.github.gigurra.glasciia.GameEvent.InputEvent /** * Created by johan on 2016-10-31. */ case class Act(scenes: Vector[Scene], var sceneIndex: Int = 0) extends InputEventHandler { require(sceneIndex >= 0, s"Cannot create scene with sceneIndex < 0") require(sceneIndex < scenes.length, s"Cannot create scene with sceneIndex >= scenes.length") def currentScene: Scene = scenes(sceneIndex) def finished: Boolean = _finished def size: Int = scenes.length def length: Int = size def last: Scene = scenes.last def update(time: Long): Unit = { if (!finished) { checkStartScene(time) updateScene(time) checkMoveToNextScene(time) } } def forceFinish(): Unit = { _finished = true } def onEnd(): Unit = {} val inputHandler = new PartialFunction[InputEvent, Unit] { def actualHandler: PartialFunction[InputEvent, Unit] = currentScene.inputHandler override def isDefinedAt(event: InputEvent): Boolean = actualHandler.isDefinedAt(event) override def applyOrElse[A1 <: InputEvent, B1 >: Unit](event: A1, default: (A1) => B1): B1 = { if (!finished && isDefinedAt(event)) { apply(event) } else { default(event) } } override def apply(event: InputEvent): Unit = { actualHandler.apply(event) } } private def updateScene(time: Long): Unit = { currentScene.update(time - currentSceneStartedAt) } private def checkMoveToNextScene(time: Long): Unit = { if (currentScene.finished) { val prevSceneIndex = sceneIndex sceneIndex = math.min(length - 1, sceneIndex + 1) if (prevSceneIndex == length - 1) { _finished = true onEnd() } } } private def checkStartScene(time: Long): Unit = { if (!currentScene.begun) { currentScene.begin() currentSceneStartedAt = time } } private var _finished = scenes.isEmpty private var currentSceneStartedAt = 0L } object Act { def apply(scenes: Scene*): Act = new Act(scenes.toVector) }
GiGurra/glasciia
glasciia-core/src/main/scala/com/github/gigurra/glasciia/Act.scala
Scala
mit
2,071
package examples.strictcontenttypes import com.twitter.finagle.http.Method.Get import com.twitter.finagle.http.Request import com.twitter.finagle.http.filter.Cors import com.twitter.finagle.http.filter.Cors.HttpFilter import com.twitter.finagle.http.path.Root import com.twitter.finagle.{Http, Service} import com.twitter.util.Await import io.fintrospect.ContentTypes.{APPLICATION_JSON, APPLICATION_XML} import io.fintrospect.filters.RequestFilters import io.fintrospect.parameters.Path import io.fintrospect.renderers.simplejson.SimpleJson import io.fintrospect.util.StrictContentTypeNegotiation import io.fintrospect.{RouteModule, RouteSpec} /** * Shows how to add routes which can serve multiple content types using strict content-type negotiation. * Basically, the Accept header is checked against the list of supplied services and a match found. If there is no * Accept header set in the request, the first service in the list is used. This means that there is NO sophisticated * content negotiation implemented, although Wildcard Accept headers is supported to match the first supplied mapping service. */ object StrictMultiContentTypeRoute extends App { private def serveJson(name: String) = Service.mk { req: Request => import io.fintrospect.formats.Argo.JsonFormat._ import io.fintrospect.formats.Argo.ResponseBuilder._ Ok(obj("field" -> string(name))) } private def serveXml(name: String) = Service.mk { import io.fintrospect.formats.Xml.ResponseBuilder._ req: Request => Ok(<root> <field> {name} </field> </root>) } val route = RouteSpec() .producing(APPLICATION_XML, APPLICATION_JSON) .at(Get) / "multi" / Path.string("name") bindTo StrictContentTypeNegotiation(APPLICATION_XML -> serveXml, APPLICATION_JSON -> serveJson) val jsonOnlyRoute = RouteSpec() .producing(APPLICATION_JSON) .at(Get) / "json" / Path.string("name") bindTo ((s) => RequestFilters.StrictAccept(APPLICATION_JSON).andThen(serveJson(s))) println("See the service description at: http://localhost:8080 . The route at /multi should match wildcard Accept headers set in a browser.") Await.ready( Http.serve(":8080", new HttpFilter(Cors.UnsafePermissivePolicy) .andThen(RouteModule(Root, SimpleJson()).withRoute(route).toService)) ) }
daviddenton/fintrospect
src/main/scala/examples/strictcontenttypes/StrictMultiContentTypeRoute.scala
Scala
apache-2.0
2,333
package uk.ac.ncl.openlab.intake24.services.fooddb.admin import uk.ac.ncl.openlab.intake24.errors.{LookupError, UnexpectedDatabaseError} import uk.ac.ncl.openlab.intake24.{NewNutrientTableRecord, NutrientTable, NutrientTableRecord} case class SingleNutrientTypeUpdate(nutrientTableRecordId: String, newValue: Option[Double]) trait NutrientTablesAdminService { def listNutrientTables(): Either[UnexpectedDatabaseError, Map[String, NutrientTable]] def searchNutrientTableRecords(nutrientTableId: String, query: Option[String]): Either[UnexpectedDatabaseError, Seq[NutrientTableRecord]] def getNutrientTable(id: String): Either[LookupError, NutrientTable] def createNutrientTable(data: NutrientTable): Either[UnexpectedDatabaseError, Unit] def updateNutrientTable(id: String, data: NutrientTable): Either[LookupError, Unit] def createOrUpdateNutrientTable(data: NutrientTable): Either[UnexpectedDatabaseError, Unit] def deleteNutrientTable(id: String): Either[LookupError, Unit] def deleteAllNutrientTables(): Either[UnexpectedDatabaseError, Unit] def createNutrientTableRecords(records: Seq[NewNutrientTableRecord]): Either[UnexpectedDatabaseError, Unit] def createOrUpdateNutrientTableRecords(records: Seq[NewNutrientTableRecord]): Either[UnexpectedDatabaseError, Unit] def updateSingleNutrientType(nutrientTableId: String, nutrientTypeId: Long, updates: Seq[SingleNutrientTypeUpdate]): Either[UnexpectedDatabaseError, Unit] def getNutrientTableRecordIds(nutrientTableId: String): Either[UnexpectedDatabaseError, Seq[String]] }
digitalinteraction/intake24
FoodDataServices/src/main/scala/uk/ac/ncl/openlab/intake24/services/fooddb/admin/NutrientTablesAdminService.scala
Scala
apache-2.0
1,568
/* * Copyright (c) 2013, Scodec * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * 3. Neither the name of the copyright holder nor the names of its contributors * may be used to endorse or promote products derived from this software without * specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package scodec.bits import java.util.concurrent.TimeUnit import org.openjdk.jmh.annotations.{Benchmark, BenchmarkMode, Mode, OutputTimeUnit, Scope, State} @State(Scope.Benchmark) @BenchmarkMode(Array(Mode.AverageTime)) @OutputTimeUnit(TimeUnit.MICROSECONDS) class ScodecBitsBenchmark { val N = 100000L val M = 1024L val bitChunks_N = (0L until N).map(b => BitVector(b.toByte)).toList val byteChunks_N = (0L until N).map(b => ByteVector(b.toByte)).toList val bytes_N = Array.tabulate(N.toInt)(i => i.toByte) val bitVector_N = bitChunks_N.foldLeft(BitVector.empty)(_ ++ _) val byteVector_N = byteChunks_N.foldLeft(ByteVector.empty)(_ ++ _) val bitChunks_M = (0L until M).map(b => BitVector(b.toByte)).toList val byteChunks_M = (0L until M).map(b => ByteVector(b.toByte)).toList val bytes_M = Array.tabulate(M.toInt)(i => i.toByte) val bitVector_M = bitChunks_M.foldLeft(BitVector.empty)(_ ++ _) val bitVector_M_compact = bitVector_M.copy val byteVector_M = byteChunks_M.foldLeft(ByteVector.empty)(_ ++ _) @Benchmark def listCons_N(): Int = bitChunks_N.foldLeft(List[BitVector]())((t, h) => h :: t).size @Benchmark def vectorSnoc_N(): Int = bitChunks_N.foldLeft(Vector[BitVector]())((a, b) => a :+ b).size @Benchmark def listCons_M(): Int = bitChunks_M.foldLeft(List[BitVector]())((t, h) => h :: t).size @Benchmark def vectorSnoc_M(): Int = bitChunks_M.foldLeft(Vector[BitVector]())((a, b) => a :+ b).size // N @Benchmark def bitVectorAppendSnoc_N(): Long = bitChunks_N.foldLeft(BitVector.empty)(_ ++ _).size @Benchmark def byteVectorAppendSnoc_N(): Long = byteChunks_N.foldLeft(ByteVector.empty)(_ ++ _).size @Benchmark def byteVectorSnoc_N(): Long = bytes_N.foldLeft(ByteVector.empty)(_ :+ _).size @Benchmark def byteVectorSnocUnboxed_N(): Long = { var b = ByteVector.empty var i = 0 while (i < bytes_N.length) { b = b :+ bytes_N(i) i += 1 } b.size } @Benchmark def bitVectorStride_N(): Long = (0L until (N / 512)).foldLeft(bitVector_N)((b, _) => b.drop(512 * 8)).size @Benchmark def byteVectorStride_N(): Long = (0L until (N / 512)).foldLeft(byteVector_N)((b, _) => b.drop(512)).size @Benchmark def bitVectorTake_N(): Long = (N until 0L by -512L).foldLeft(bitVector_N)((b, n) => b.take(n)).size @Benchmark def byteVectorTake_N(): Long = (N until 0 by -512).foldLeft(byteVector_N)((b, n) => b.take(n)).size // M @Benchmark def bitVectorAppendSnoc_M(): Long = bitChunks_M.foldLeft(BitVector.empty)(_ ++ _).size @Benchmark def byteVectorAppendSnoc_M(): Long = byteChunks_M.foldLeft(ByteVector.empty)(_ ++ _).size @Benchmark def byteVectorSnoc_M(): Long = bytes_M.foldLeft(ByteVector.empty)(_ :+ _).size @Benchmark def byteVectorSnocUnboxed_M(): Long = { var b = ByteVector.empty var i = 0 while (i < bytes_M.length) { b = b :+ bytes_M(i) i += 1 } b.size } @Benchmark def bitVectorStride_M(): Long = (0L until (M / 512)).foldLeft(bitVector_M)((b, _) => b.drop(512L * 8)).size @Benchmark def byteVectorStride_M(): Long = (0L until (M / 512)).foldLeft(byteVector_M)((b, _) => b.drop(512L)).size @Benchmark def bitVectorTake_M(): Long = (M until 0L by -512L).foldLeft(bitVector_M)((b, n) => b.take(n)).size @Benchmark def byteVectorTake_M(): Long = (M until 0 by -512).foldLeft(byteVector_M)((b, n) => b.take(n)).size @Benchmark def toBase64(): String = bitVector_M.toBase64 @Benchmark def toBase64_compact(): String = bitVector_M_compact.toBase64 @Benchmark def toBase64_JRE(): String = java.util.Base64.getEncoder.encodeToString(bitVector_M.toByteArray) @Benchmark def toBase64_JRE_compact(): String = java.util.Base64.getEncoder.encodeToString(bitVector_M_compact.toByteArray) private val bitVector_M_b64 = bitVector_M.toBase64 @Benchmark def fromBase64(): Option[ByteVector] = ByteVector.fromBase64(bitVector_M_b64) @Benchmark def fromBase64_JRE(): Array[Byte] = java.util.Base64.getDecoder.decode(bitVector_M_b64) private val crc32 = crc(hex"04c11db7".bits, hex"ffffffff".bits, true, true, hex"ffffffff".bits) private val crc32v = (a: BitVector) => crc .builderGeneric(hex"04c11db7".bits, hex"ffffffff".bits, true, true, hex"ffffffff".bits) .updated(a) .result private val crc32i = crc.int32(0x04c11db7, 0xffffffff, true, true, 0xffffffff).andThen(i => BitVector.fromInt(i)) @Benchmark def crc32_M(): BitVector = crc32(bitVector_M) @Benchmark def crc32v_M(): BitVector = crc32v(bitVector_M) @Benchmark def crc32i_M(): BitVector = crc32i(bitVector_M) }
scodec/scodec-bits
benchmark/src/main/scala/ScodecBitsBenchmark.scala
Scala
bsd-3-clause
6,223
package no.penger.crud import unfiltered.filter.Plan.Intent import unfiltered.filter.request.ContextPath import unfiltered.response._ trait resources { val whitelist = List( "/slick-crud/crud.js", "/slick-crud/crud.css", "/slick-crud/3rdparty/jquery-1.11.1.min.js" ) /* this will serve slick-cruds frontend resources, you can override it if you want to do that yourself */ def resourceIntent: Intent = { case ContextPath(_, resource) if whitelist.contains(resource) => Option(getClass.getResourceAsStream(resource)).fold[ResponseFunction[Any]](NotFound) ( is ⇒ ResponseString(io.Source.fromInputStream(is).mkString) ) } }
pengerno/slick-crud
unfiltered/src/main/scala/no/penger/crud/resources.scala
Scala
apache-2.0
673
import io.gatling.core.Predef._ import io.gatling.http.Predef._ object PatientQuestionnaires { var showFullYear = exec(http("Show year") .get("") .queryParam("filer", "year")) }
silverbullet-dk/opentele-performance-tests
src/test/scala/user-files/simulations/processes/clinician/PatientQuestionnaires.scala
Scala
apache-2.0
187
object test { def verifyKeyword(keyword : String, source : java.io.File, pos : Int) = { assert(keyword != null); } def verifyKeyword(source : java.io.File, pos : Int) = verifyKeyword("", source, pos); }
lampepfl/dotty
tests/untried/neg/t520.scala
Scala
apache-2.0
228
/** * Copyright (C) 2014 TU Berlin ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package eu.stratosphere.benchmarks.systemml.flink import java.io.FileWriter import java.nio.file.{Files, Paths} import com.typesafe.config.Config import org.peelframework.core.beans.data.{DataSet, ExperimentOutput} import org.peelframework.core.beans.experiment.Experiment import org.peelframework.core.beans.system.System import org.peelframework.core.util.{Version, shell} import org.peelframework.flink.beans.system.Flink import spray.json._ /** An `Experiment` implementation which handles the execution of a single Flink job. */ class FlinkExperimentSysML( command: String, systems: Set[System], runner : Flink, runs : Int, inputs : Set[DataSet], outputs: Set[ExperimentOutput], name : String, config : Config) extends Experiment(command, systems, runner, runs, inputs, outputs, name, config) { def this( command: String, runner : Flink, runs : Int, inputs : Set[DataSet], outputs: Set[ExperimentOutput], name : String, config : Config) = this(command, Set.empty[System], runner, runs, inputs, outputs, name, config) override def run(id: Int, force: Boolean): Experiment.Run[Flink] = { new FlinkExperimentSysML.SingleJobRun(id, this, force) } def copy(name: String = name, config: Config = config) = { new FlinkExperimentSysML(command, systems, runner, runs, inputs, outputs, name, config) } } object FlinkExperimentSysML { case class State( command : String, runnerID : String, runnerName : String, runnerVersion : String, var runExitCode : Option[Int] = None, var runTime : Long = 0, var plnExitCode : Option[Int] = None) extends Experiment.RunState object StateProtocol extends DefaultJsonProtocol with NullOptions { implicit val stateFormat = jsonFormat7(State) } /** A private inner class encapsulating the logic of single run. */ class SingleJobRun(val id: Int, val exp: FlinkExperimentSysML, val force: Boolean) extends Experiment.SingleJobRun[Flink, State] { import eu.stratosphere.benchmarks.systemml.flink.FlinkExperimentSysML.StateProtocol._ val runnerLogPath = exp.config.getString(s"system.${exp.runner.configKey}.path.log") override def isSuccessful = state.runExitCode.getOrElse(-1) == 0 // FIXME: && state.plnExitCode.getOrElse(-1) == 0 override protected def loadState(): State = { if (Files.isRegularFile(Paths.get(s"$home/state.json"))) { try { scala.io.Source.fromFile(s"$home/state.json").mkString.parseJson.convertTo[State] } catch { case e: Throwable => State(command, exp.runner.beanName, exp.runner.name, exp.runner.version) } } else { State(command, exp.runner.beanName, exp.runner.name, exp.runner.version) } } override protected def writeState() = { val fw = new FileWriter(s"$home/state.json") fw.write(state.toJson.prettyPrint) fw.close() } override protected def runJob() = { // try to get the experiment run plan val (plnExit, _) = { // assemble options val opts = Seq( if (Version(exp.runner.version) <= Version("0.8")) Some("-e") else Option.empty[String] ) // execute command Experiment.time(this !(s"info ${opts.flatten.mkString(" ")} ${command.trim}", s"$home/run.pln", s"$home/run.pln")) } state.plnExitCode = Some(plnExit) // try to execute the experiment run plan val (runExit, t) = Experiment.time(this !(s"run ${command.trim}", s"$home/run.out", s"$home/run.err")) state.runTime = t state.runExitCode = Some(runExit) } override def cancelJob() = { val ids = (shell !! s"${exp.config.getString(s"system.${exp.runner.configKey}.path.home")}/bin/flink list -r | tail -n +2 | head -n 1 | cut -d':' -f4 | tr -d ' '").split(Array('\\n', ' ')) for (id <- ids) shell ! s"${exp.config.getString(s"system.${exp.runner.configKey}.path.home")}/bin/flink cancel -i $id" state.runTime = exp.config.getLong("experiment.timeout") * 1000 state.runExitCode = Some(-1) } private def !(command: String, outFile: String, errFile: String) = { val expHdp = s"export HADOOP_HOME=${exp.config.getString(s"system.hadoop-2.path.home")}" val runExp = s"${exp.config.getString(s"system.${exp.runner.configKey}.path.home")}/bin/flink ${command.trim} > $outFile 2> $errFile" shell ! s"$expHdp ; $runExp" } } }
fschueler/sysml-benchmark
sysml-benchmark-peelextensions/src/main/scala/eu/stratosphere/benchmarks/systemml/flink/FlinkExperimentSysML.scala
Scala
apache-2.0
5,471
/* * Copyright (c) 2013, Hidekatsu Hirose * Copyright (c) 2013, Hirose-Zouen * This file is subject to the terms and conditions defined in * This file is subject to the terms and conditions defined in * file 'LICENSE.txt', which is part of this source code package. */ package org.hirosezouen.hznet import scala.actors._ import scala.actors.Actor._ import org.hirosezouen.hzutil.HZActor._ import org.hirosezouen.hzutil.HZIO._ import org.hirosezouen.hzutil.HZLog._ import HZSocketServer._ object HZEchoServer { implicit val logger = getLogger(this.getClass.getName) def main(args: Array[String]) { log_info("HZEchoServer:Start") val port = if(args.length < 1) { log_error("Error : Argument required.") sys.exit(1) } else { args(0).toInt } var actors: Set[Actor] = Set.empty actors += startInputActor(System.in) { case "q" | "Q" => { exit(HZNormalStoped()) } } actors += startSocketServer(HZSoServerConf(port), SocketIOStaticDataBuilder, self) { case (_, HZIOStart(so_desc,_,_)) => { log_info("Client connected:%s".format(so_desc)) } case (_, HZDataReceived(receivedData)) => { log_info(new String(receivedData)) self ! HZDataSending(receivedData) } case (_, HZIOStop(_,reason,_,_,_)) => { log_info("Connection closed:%s".format(reason)) } } self.trapExit = true var loopFlag = true var mf: () => Unit = null def mainFun1() = receive { case Exit(stopedActor: Actor, reason) => { log_debug("main:mainFun1:Exit(%s,%s)".format(stopedActor,reason)) actors -= stopedActor if(actors.isEmpty) { loopFlag = false } else { actors.foreach(_ ! HZStop()) System.in.close() /* InputAcotorはclose()の例外で停止する */ mf = mainFun2 } } } def mainFun2() = receive { case Exit(stopedActor: Actor, reason) => { log_debug("main:mainFun2:Exit(%s,%s)".format(stopedActor,reason)) actors -= stopedActor if(actors.isEmpty) loopFlag = false } } /* * メイン処理 */ mf = mainFun1 while(loopFlag) { mf() } log_info("HZEchoServer:end") } }
chokopapashi/HZUtils1.6.x_Scala2.10.5
src/main/scala/org/hirosezouen/hznet/HZEchoServer.scala
Scala
bsd-3-clause
2,821
/* * Copyright (c) 2014-2018 by The Monix Project Developers. * See the project homepage at: https://monix.io * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monix.tail import cats.effect.Sync import cats.laws._ import cats.laws.discipline._ import monix.eval.{Coeval, Task} import monix.execution.cancelables.BooleanCancelable import monix.execution.exceptions.DummyException import monix.execution.internal.Platform import monix.tail.batches.BatchCursor import org.scalacheck.Test import org.scalacheck.Test.Parameters import scala.util.Failure object IterantTakeEveryNthSuite extends BaseTestSuite { override lazy val checkConfig: Parameters = { if (Platform.isJVM) Test.Parameters.default.withMaxSize(256) else Test.Parameters.default.withMaxSize(32) } def naiveImp[F[_], A](iter: Iterant[F, A], takeEveryNth: Int)(implicit F: Sync[F]): Iterant[F, A] = iter .zipWithIndex .flatMap { case (a, idx) => if ((idx + 1) % takeEveryNth == 0) Iterant[F].pure(a) else Iterant[F].empty } test("naiveImp smoke test") { implicit s => val input = List(1, 2, 3, 4, 5, 6) val iter = Iterant[Coeval].fromList(input) assertEquals(naiveImp(iter, 1).toListL.value(), input) assertEquals(naiveImp(iter, 2).toListL.value(), List(2, 4, 6)) assertEquals(naiveImp(iter, 3).toListL.value(), List(3, 6)) assertEquals(naiveImp(iter, input.length + 1).toListL.value(), List.empty[Int]) } test("Iterant[Task].takeEveryNth equivalence with naiveImp") { implicit s => check3 { (list: List[Int], idx: Int, nr: Int) => val stream = arbitraryListToIterant[Task, Int](list, math.abs(idx) + 1, allowErrors = false) val length = list.length // scale down from (Int.MinValue to Int.MaxValue) to (1 to (length + 1)) range val n = math.round( (length * (nr.toDouble - Int.MinValue.toDouble)) / (Int.MaxValue.toDouble - Int.MinValue.toDouble) + 1 ).toInt val actual = stream.takeEveryNth(n).toListL.runToFuture val expected = naiveImp(stream, n).toListL.runToFuture s.tick() actual.value <-> expected.value } } test("Iterant.takeEveryNth protects against broken batches") { implicit s => check1 { (iter: Iterant[Task, Int]) => val dummy = DummyException("dummy") val suffix = Iterant[Task].nextBatchS[Int](new ThrowExceptionBatch(dummy), Task.now(Iterant[Task].empty)) val stream = iter.onErrorIgnore ++ suffix val received = stream.takeEveryNth(1) received <-> iter.onErrorIgnore ++ Iterant[Task].haltS[Int](Some(dummy)) } } test("Iterant.takeEveryNth protects against broken cursors") { implicit s => check1 { (iter: Iterant[Task, Int]) => val dummy = DummyException("dummy") val suffix = Iterant[Task].nextCursorS[Int](new ThrowExceptionCursor(dummy), Task.now(Iterant[Task].empty)) val stream = iter.onErrorIgnore ++ suffix val received = stream.takeEveryNth(1) received <-> iter.onErrorIgnore ++ Iterant[Task].haltS[Int](Some(dummy)) } } test("Iterant.takeEveryNth triggers early stop on exception") { _ => check1 { (iter: Iterant[Coeval, Int]) => val cancelable = BooleanCancelable() val dummy = DummyException("dummy") val suffix = Iterant[Coeval].nextCursorS[Int](new ThrowExceptionCursor(dummy), Coeval.now(Iterant[Coeval].empty)) val stream = (iter.onErrorIgnore ++ suffix).guarantee(Coeval.eval(cancelable.cancel())) assertEquals(stream.takeEveryNth(1).toListL.runTry(), Failure(dummy)) cancelable.isCanceled } } test("Iterant.takeEveryNth throws on invalid n") { implicit s => val source = Iterant[Coeval].nextCursorS(BatchCursor(1,2,3), Coeval.now(Iterant[Coeval].empty[Int])) intercept[IllegalArgumentException] { source.takeEveryNth(0).completedL.value() } } }
Wogan/monix
monix-tail/shared/src/test/scala/monix/tail/IterantTakeEveryNthSuite.scala
Scala
apache-2.0
4,419
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. package com.azure.cosmos.spark.diagnostics import com.azure.cosmos.implementation.spark.OperationContext import com.azure.cosmos.implementation.{RxDocumentServiceRequest, RxDocumentServiceResponse} import com.azure.cosmos.models.FeedResponse import org.slf4j.{Logger, LoggerFactory} private[spark] class DefaultMinimalSlf4jLogger(classType: Class[_]) extends ILogger { // Make the log field transient so that objects with Logging can // be serialized and used on another machine @transient private lazy val log: Logger = LoggerFactory.getLogger(logName) // Method to get the logger name for this object protected def logName: String = { // Ignore trailing $'s in the class names for Scala objects classType.getName.stripSuffix("$") } // Log methods that take only a String def logInfo(msg: => String) { if (log.isInfoEnabled) log.info(msg) } def logDebug(msg: => String) { if (log.isDebugEnabled) log.debug(msg) } def logTrace(msg: => String) { if (log.isTraceEnabled) log.trace(msg) } def logWarning(msg: => String) { if (log.isWarnEnabled) log.warn(msg) } def logError(msg: => String) { if (log.isErrorEnabled) log.error(msg) } // Log methods that take Throwables (Exceptions/Errors) too def logInfo(msg: => String, throwable: Throwable) { if (log.isInfoEnabled) log.info(msg, throwable) } def isDebugLogEnabled: Boolean = { log.isDebugEnabled() } def logDebug(msg: => String, throwable: Throwable) { if (log.isDebugEnabled) log.debug(msg, throwable) } def logTrace(msg: => String, throwable: Throwable) { if (log.isTraceEnabled) log.trace(msg, throwable) } def logWarning(msg: => String, throwable: Throwable) { if (log.isWarnEnabled) log.warn(msg, throwable) } def logError(msg: => String, throwable: Throwable) { if (log.isErrorEnabled) log.error(msg, throwable) } override def logItemWriteCompletion(writeOperation: WriteOperation): Unit = { } override def logItemWriteSkipped(writeOperation: WriteOperation, detail: => String): Unit = { } override def logItemWriteFailure(writeOperation: WriteOperation): Unit = { } override def logItemWriteFailure(writeOperation: WriteOperation, throwable: Throwable): Unit = { } override def logItemWriteDetails(writeOperation: WriteOperation, detail: => String): Unit = { } override def requestListener(context: OperationContext, request: RxDocumentServiceRequest): Unit = { } override def responseListener(context: OperationContext, response: RxDocumentServiceResponse): Unit = { } override def feedResponseReceivedListener ( context: OperationContext, response: FeedResponse[_] ): Unit = {} override def feedResponseProcessedListener ( context: OperationContext, response: FeedResponse[_] ): Unit = {} override def exceptionListener(context: OperationContext, exception: Throwable): Unit = { } }
Azure/azure-sdk-for-java
sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/diagnostics/DefaultMinimalSlf4jLogger.scala
Scala
mit
3,036
/* * Copyright 2017-2020 47 Degrees Open Source <https://www.47deg.com> * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package higherkindness.mu.rpc.internal.metrics import cats.effect.IO import cats.effect.concurrent.Ref import cats.syntax.apply._ import higherkindness.mu.rpc.internal.interceptors.GrpcMethodInfo import io.grpc.Status case class MetricsOpsRegister( increaseActiveCallsReg: Ref[IO, List[(GrpcMethodInfo, Option[String])]], decreaseActiveCallsReg: Ref[IO, List[(GrpcMethodInfo, Option[String])]], recordMessageSentReg: Ref[IO, List[(GrpcMethodInfo, Option[String])]], recordMessageReceivedReg: Ref[IO, List[(GrpcMethodInfo, Option[String])]], recordHeadersTimeReg: Ref[IO, List[(GrpcMethodInfo, Long, Option[String])]], recordTotalTimeReg: Ref[IO, List[(GrpcMethodInfo, Status, Long, Option[String])]] ) extends MetricsOps[IO] { def increaseActiveCalls(methodInfo: GrpcMethodInfo, classifier: Option[String]): IO[Unit] = increaseActiveCallsReg.update(_ :+ ((methodInfo, classifier))) def decreaseActiveCalls(methodInfo: GrpcMethodInfo, classifier: Option[String]): IO[Unit] = decreaseActiveCallsReg.update(_ :+ ((methodInfo, classifier))) def recordMessageSent(methodInfo: GrpcMethodInfo, classifier: Option[String]): IO[Unit] = recordMessageSentReg.update(_ :+ ((methodInfo, classifier))) def recordMessageReceived(methodInfo: GrpcMethodInfo, classifier: Option[String]): IO[Unit] = recordMessageReceivedReg.update(_ :+ ((methodInfo, classifier))) def recordHeadersTime( methodInfo: GrpcMethodInfo, elapsed: Long, classifier: Option[String] ): IO[Unit] = recordHeadersTimeReg.update(_ :+ ((methodInfo, elapsed, classifier))) def recordTotalTime( methodInfo: GrpcMethodInfo, status: Status, elapsed: Long, classifier: Option[String] ): IO[Unit] = recordTotalTimeReg.update(_ :+ ((methodInfo, status, elapsed, classifier))) } object MetricsOpsRegister { def build: IO[MetricsOpsRegister] = ( Ref.of[IO, List[(GrpcMethodInfo, Option[String])]](Nil), Ref.of[IO, List[(GrpcMethodInfo, Option[String])]](Nil), Ref.of[IO, List[(GrpcMethodInfo, Option[String])]](Nil), Ref.of[IO, List[(GrpcMethodInfo, Option[String])]](Nil), Ref.of[IO, List[(GrpcMethodInfo, Long, Option[String])]](Nil), Ref.of[IO, List[(GrpcMethodInfo, Status, Long, Option[String])]](Nil) ).mapN(MetricsOpsRegister.apply) }
frees-io/freestyle-rpc
modules/tests/src/test/scala/higherkindness/mu/rpc/internal/metrics/MetricsOpsRegister.scala
Scala
apache-2.0
2,977
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.tuning import java.util.{List => JList} import scala.collection.JavaConverters._ import scala.language.existentials import org.apache.hadoop.fs.Path import org.json4s.DefaultFormats import org.apache.spark.annotation.Since import org.apache.spark.internal.Logging import org.apache.spark.ml.{Estimator, Model} import org.apache.spark.ml.evaluation.Evaluator import org.apache.spark.ml.param.{DoubleParam, ParamMap, ParamValidators} import org.apache.spark.ml.util._ import org.apache.spark.sql.{DataFrame, Dataset} import org.apache.spark.sql.types.StructType /** * Params for [[TrainValidationSplit]] and [[TrainValidationSplitModel]]. */ private[ml] trait TrainValidationSplitParams extends ValidatorParams { /** * Param for ratio between train and validation data. Must be between 0 and 1. * Default: 0.75 * * @group param */ val trainRatio: DoubleParam = new DoubleParam(this, "trainRatio", "ratio between training set and validation set (>= 0 && <= 1)", ParamValidators.inRange(0, 1)) /** @group getParam */ def getTrainRatio: Double = $(trainRatio) setDefault(trainRatio -> 0.75) } /** * Validation for hyper-parameter tuning. * Randomly splits the input dataset into train and validation sets, * and uses evaluation metric on the validation set to select the best model. * Similar to [[CrossValidator]], but only splits the set once. */ @Since("1.5.0") class TrainValidationSplit @Since("1.5.0") (@Since("1.5.0") override val uid: String) extends Estimator[TrainValidationSplitModel] with TrainValidationSplitParams with MLWritable with Logging { @Since("1.5.0") def this() = this(Identifiable.randomUID("tvs")) /** @group setParam */ @Since("1.5.0") def setEstimator(value: Estimator[_]): this.type = set(estimator, value) /** @group setParam */ @Since("1.5.0") def setEstimatorParamMaps(value: Array[ParamMap]): this.type = set(estimatorParamMaps, value) /** @group setParam */ @Since("1.5.0") def setEvaluator(value: Evaluator): this.type = set(evaluator, value) /** @group setParam */ @Since("1.5.0") def setTrainRatio(value: Double): this.type = set(trainRatio, value) /** @group setParam */ @Since("2.0.0") def setSeed(value: Long): this.type = set(seed, value) @Since("2.0.0") override def fit(dataset: Dataset[_]): TrainValidationSplitModel = { val schema = dataset.schema transformSchema(schema, logging = true) val est = $(estimator) val eval = $(evaluator) val epm = $(estimatorParamMaps) val numModels = epm.length val metrics = new Array[Double](epm.length) val Array(trainingDataset, validationDataset) = dataset.randomSplit(Array($(trainRatio), 1 - $(trainRatio)), $(seed)) trainingDataset.cache() validationDataset.cache() // multi-model training logDebug(s"Train split with multiple sets of parameters.") val models = est.fit(trainingDataset, epm).asInstanceOf[Seq[Model[_]]] trainingDataset.unpersist() var i = 0 while (i < numModels) { // TODO: duplicate evaluator to take extra params from input val metric = eval.evaluate(models(i).transform(validationDataset, epm(i))) logDebug(s"Got metric $metric for model trained with ${epm(i)}.") metrics(i) += metric i += 1 } validationDataset.unpersist() logInfo(s"Train validation split metrics: ${metrics.toSeq}") val (bestMetric, bestIndex) = if (eval.isLargerBetter) metrics.zipWithIndex.maxBy(_._1) else metrics.zipWithIndex.minBy(_._1) logInfo(s"Best set of parameters:\\n${epm(bestIndex)}") logInfo(s"Best train validation split metric: $bestMetric.") val bestModel = est.fit(dataset, epm(bestIndex)).asInstanceOf[Model[_]] copyValues(new TrainValidationSplitModel(uid, bestModel, metrics).setParent(this)) } @Since("1.5.0") override def transformSchema(schema: StructType): StructType = transformSchemaImpl(schema) @Since("1.5.0") override def copy(extra: ParamMap): TrainValidationSplit = { val copied = defaultCopy(extra).asInstanceOf[TrainValidationSplit] if (copied.isDefined(estimator)) { copied.setEstimator(copied.getEstimator.copy(extra)) } if (copied.isDefined(evaluator)) { copied.setEvaluator(copied.getEvaluator.copy(extra)) } copied } @Since("2.0.0") override def write: MLWriter = new TrainValidationSplit.TrainValidationSplitWriter(this) } @Since("2.0.0") object TrainValidationSplit extends MLReadable[TrainValidationSplit] { @Since("2.0.0") override def read: MLReader[TrainValidationSplit] = new TrainValidationSplitReader @Since("2.0.0") override def load(path: String): TrainValidationSplit = super.load(path) private[TrainValidationSplit] class TrainValidationSplitWriter(instance: TrainValidationSplit) extends MLWriter { ValidatorParams.validateParams(instance) override protected def saveImpl(path: String): Unit = ValidatorParams.saveImpl(path, instance, sc) } private class TrainValidationSplitReader extends MLReader[TrainValidationSplit] { /** Checked against metadata when loading model */ private val className = classOf[TrainValidationSplit].getName override def load(path: String): TrainValidationSplit = { implicit val format = DefaultFormats val (metadata, estimator, evaluator, estimatorParamMaps) = ValidatorParams.loadImpl(path, sc, className) val trainRatio = (metadata.params \\ "trainRatio").extract[Double] val seed = (metadata.params \\ "seed").extract[Long] new TrainValidationSplit(metadata.uid) .setEstimator(estimator) .setEvaluator(evaluator) .setEstimatorParamMaps(estimatorParamMaps) .setTrainRatio(trainRatio) .setSeed(seed) } } } /** * Model from train validation split. * * @param uid Id. * @param bestModel Estimator determined best model. * @param validationMetrics Evaluated validation metrics. */ @Since("1.5.0") class TrainValidationSplitModel private[ml] ( @Since("1.5.0") override val uid: String, @Since("1.5.0") val bestModel: Model[_], @Since("1.5.0") val validationMetrics: Array[Double]) extends Model[TrainValidationSplitModel] with TrainValidationSplitParams with MLWritable { /** A Python-friendly auxiliary constructor. */ private[ml] def this(uid: String, bestModel: Model[_], validationMetrics: JList[Double]) = { this(uid, bestModel, validationMetrics.asScala.toArray) } @Since("2.0.0") override def transform(dataset: Dataset[_]): DataFrame = { transformSchema(dataset.schema, logging = true) bestModel.transform(dataset) } @Since("1.5.0") override def transformSchema(schema: StructType): StructType = { bestModel.transformSchema(schema) } @Since("1.5.0") override def copy(extra: ParamMap): TrainValidationSplitModel = { val copied = new TrainValidationSplitModel ( uid, bestModel.copy(extra).asInstanceOf[Model[_]], validationMetrics.clone()) copyValues(copied, extra) } @Since("2.0.0") override def write: MLWriter = new TrainValidationSplitModel.TrainValidationSplitModelWriter(this) } @Since("2.0.0") object TrainValidationSplitModel extends MLReadable[TrainValidationSplitModel] { @Since("2.0.0") override def read: MLReader[TrainValidationSplitModel] = new TrainValidationSplitModelReader @Since("2.0.0") override def load(path: String): TrainValidationSplitModel = super.load(path) private[TrainValidationSplitModel] class TrainValidationSplitModelWriter(instance: TrainValidationSplitModel) extends MLWriter { ValidatorParams.validateParams(instance) override protected def saveImpl(path: String): Unit = { import org.json4s.JsonDSL._ val extraMetadata = "validationMetrics" -> instance.validationMetrics.toSeq ValidatorParams.saveImpl(path, instance, sc, Some(extraMetadata)) val bestModelPath = new Path(path, "bestModel").toString instance.bestModel.asInstanceOf[MLWritable].save(bestModelPath) } } private class TrainValidationSplitModelReader extends MLReader[TrainValidationSplitModel] { /** Checked against metadata when loading model */ private val className = classOf[TrainValidationSplitModel].getName override def load(path: String): TrainValidationSplitModel = { implicit val format = DefaultFormats val (metadata, estimator, evaluator, estimatorParamMaps) = ValidatorParams.loadImpl(path, sc, className) val trainRatio = (metadata.params \\ "trainRatio").extract[Double] val seed = (metadata.params \\ "seed").extract[Long] val bestModelPath = new Path(path, "bestModel").toString val bestModel = DefaultParamsReader.loadParamsInstance[Model[_]](bestModelPath, sc) val validationMetrics = (metadata.metadata \\ "validationMetrics").extract[Seq[Double]].toArray val model = new TrainValidationSplitModel(metadata.uid, bestModel, validationMetrics) model.set(model.estimator, estimator) .set(model.evaluator, evaluator) .set(model.estimatorParamMaps, estimatorParamMaps) .set(model.trainRatio, trainRatio) .set(model.seed, seed) } } }
gioenn/xSpark
mllib/src/main/scala/org/apache/spark/ml/tuning/TrainValidationSplit.scala
Scala
apache-2.0
10,038
package com.ponkotuy.proxy import io.lemonlabs.uri.Uri import com.ponkotuy.intercept.Interceptor import io.netty.buffer.ByteBuf import io.netty.channel.ChannelHandlerContext import io.netty.handler.codec.http.{HttpHeaders, HttpRequest} import org.littleshoot.proxy.{HttpFilters, HttpFiltersAdapter, HttpFiltersSourceAdapter} class KCFiltersSource(hosts: Set[String], interceptor: Interceptor) extends HttpFiltersSourceAdapter { private val noopFilters = new HttpFiltersAdapter(null) override def filterRequest(originalRequest: HttpRequest, ctx: ChannelHandlerContext): HttpFilters = if (hosts(HttpHeaders.getHost(originalRequest))) new AggregateContentFilters(originalRequest, ctx) { def finished(requestContent: ByteBuf, responseContent: ByteBuf): Unit = { val uri = Uri.parse(originalRequest.getUri) interceptor.input(uri, requestContent, responseContent) } } else noopFilters }
ttdoda/MyFleetGirls
client/src/main/scala/com/ponkotuy/proxy/KCFiltersSource.scala
Scala
mit
947
package de.tototec.cmvn.cmdoption import de.tototec.cmdoption.CmdCommand import de.tototec.cmdoption.CmdOption @CmdCommand(names = Array("--generate"), description = "Generate files based on configuration") class GenerateCmd extends HelpAwareCmd { @CmdOption(names = Array("--force"), description = "Generate all files, even those already generated and up-to-date") var force: Boolean = false }
ToToTec/cmvn
de.tototec.cmvn/src/main/scala/de/tototec/cmvn/cmdoption/GenerateCmd.scala
Scala
apache-2.0
402
/* * Copyright (c) 2014 Oculus Info Inc. * http://www.oculusinfo.com/ * * Released under the MIT License. * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the "Software"), to deal in * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to do * so, subject to the following conditions: * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ package com.oculusinfo.tilegen.graph.cluster import org.apache.spark.SparkContext import org.apache.spark.SparkContext._ import org.apache.spark.graphx._ import scala.reflect.ClassTag import org.apache.spark.broadcast.Broadcast import org.apache.spark.graphx.Graph.graphToGraphOps import scala.math.BigDecimal.double2bigDecimal import org.apache.spark.HashPartitioner import org.apache.spark.rdd.RDD /** * Provides low level louvain community detection algorithm functions. Generally used by LouvainHarness * to coordinate the correct execution of the algorithm though its several stages. * * For details on the sequential algorithm see: Fast unfolding of communities in large networks, Blondel 2008 * * Code adapted from Sotera's graphX implementation of the distributed Louvain modularity algorithm * https://github.com/Sotera/spark-distributed-louvain-modularity */ object LouvainCore { /** * Generates a new graph of type Graph[VertexState,Long] based on an input graph of type. * Graph[VD,Long]. The resulting graph can be used for louvain computation. * */ def createLouvainGraph[VD: ClassTag](graph: Graph[VD,Long]) : Graph[VertexState,Long]= { // Create the initial Louvain graph. val nodeWeightMapFunc = (e:EdgeTriplet[VD,Long]) => Iterator((e.srcId,e.attr), (e.dstId,e.attr)) val nodeWeightReduceFunc = (e1:Long,e2:Long) => e1+e2 val nodeWeights = graph.mapReduceTriplets(nodeWeightMapFunc,nodeWeightReduceFunc) val nodeDegrees = graph.degrees // get number of unweighted degrees for each community // TODO -- try to combine the outDegree calc with the mapReduceTriplet func above? val louvainGraph = graph.outerJoinVertices(nodeWeights)((vid,data,weightOption)=> { val weight = weightOption.getOrElse(0L) val state = new VertexState() state.community = vid // init each node in it's own community state.changed = false state.communitySigmaTot = weight // communitySigmaTot init as sum of all edge weights incident on this node state.internalWeight = 0L state.internalNodes = 1L state.nodeWeight = weight // nodeWeight also init as sum of all edge weights incident on this node state.nodeDegree = 0 state.extraAttributes = if ((data==null) || (data.getClass.getName != "java.lang.String")) { "" } else { data.toString } state }).outerJoinVertices(nodeDegrees)((vid,state,degreeOption)=> { // append degree info onto each node val degree = degreeOption.getOrElse(0) state.nodeDegree = degree state })//.partitionBy(PartitionStrategy.EdgePartition2D).groupEdges(_+_) return louvainGraph } /** * Transform a graph from [VD,Long] to a a [VertexState,Long] graph and label each vertex with a community * to maximize global modularity (without compressing the graph) */ def louvainFromStandardGraph[VD: ClassTag](sc:SparkContext,graph:Graph[VD,Long], minProgress:Int=1,progressCounter:Int=1) : (Double,Graph[VertexState,Long],Int) = { val louvainGraph = createLouvainGraph(graph) return louvain(sc,louvainGraph,minProgress,progressCounter) } /** * For a graph of type Graph[VertexState,Long] label each vertex with a community to maximize global modularity. * (without compressing the graph) */ def louvain(sc:SparkContext, graph:Graph[VertexState,Long], minProgress:Int=1,progressCounter:Int=1) : (Double,Graph[VertexState,Long],Int)= { var louvainGraph = graph.cache() val graphWeight = louvainGraph.vertices.values.map(vdata=> vdata.internalWeight+vdata.nodeWeight).reduce(_+_) var totalGraphWeight = sc.broadcast(graphWeight) println("totalEdgeWeight: "+totalGraphWeight.value) // gather community information from each vertex's local neighborhood var msgRDD = louvainGraph.mapReduceTriplets(sendMsg,mergeMsg) var activeMessages = msgRDD.count() //materializes the msgRDD and caches it in memory var updated = 0L - minProgress var even = false var count = 0 val maxIter = 100000 var stop = 0 var updatedLastPhase = 0L do { count += 1 even = ! even // label each vertex with its best community based on neighboring community information val labeledVerts = louvainVertJoin(louvainGraph,msgRDD,totalGraphWeight,even).cache() // calculate new sigma total value for each community (total weight of each community) val communtiyUpdate = labeledVerts .map( {case (vid,vdata) => (vdata.community,vdata.nodeWeight+vdata.internalWeight)}) .reduceByKey(_+_).cache() // map each vertex ID to its updated community information val communityMapping = labeledVerts .map( {case (vid,vdata) => (vdata.community,vid)}) .join(communtiyUpdate) .map({case (community,(vid,sigmaTot)) => (vid,(community,sigmaTot)) }) .cache() // join the community labeled vertices with the updated community info val updatedVerts = labeledVerts.join(communityMapping).map({ case (vid,(vdata,communityTuple) ) => vdata.community = communityTuple._1 vdata.communitySigmaTot = communityTuple._2 (vid,vdata) }).cache() updatedVerts.count() labeledVerts.unpersist(blocking = false) communtiyUpdate.unpersist(blocking=false) communityMapping.unpersist(blocking=false) val prevG = louvainGraph louvainGraph = louvainGraph.outerJoinVertices(updatedVerts)((vid, old, newOpt) => newOpt.getOrElse(old)) louvainGraph.cache() // gather community information from each vertex's local neighborhood val oldMsgs = msgRDD msgRDD = louvainGraph.mapReduceTriplets(sendMsg, mergeMsg).cache() activeMessages = msgRDD.count() // materializes the graph by forcing computation oldMsgs.unpersist(blocking=false) updatedVerts.unpersist(blocking=false) prevG.unpersistVertices(blocking=false) // half of the communites can swtich on even cycles // and the other half on odd cycles (to prevent deadlocks) // so we only want to look for progess on odd cycles (after all vertcies have had a chance to move) if (even) updated = 0 updated = updated + louvainGraph.vertices.filter(_._2.changed).count if (!even) { println(" # vertices moved: "+java.text.NumberFormat.getInstance().format(updated)) if (updated >= updatedLastPhase - minProgress) stop += 1 updatedLastPhase = updated } } while ( stop <= progressCounter && (even || (updated > 0 && count < maxIter))) println("\\nCompleted in "+count+" cycles") // Use each vertex's neighboring community data to calculate the global modularity of the graph val newVerts = louvainGraph.vertices.innerJoin(msgRDD)((vid,vdata,msgs)=> { // sum the nodes internal weight and all of its edges that are in its community val community = vdata.community var k_i_in = vdata.internalWeight var sigmaTot = vdata.communitySigmaTot.toDouble msgs.foreach({ case( (communityId,sigmaTotal),communityEdgeWeight ) => if (vdata.community == communityId) k_i_in += communityEdgeWeight}) val M = totalGraphWeight.value val k_i = vdata.nodeWeight + vdata.internalWeight var q = (k_i_in.toDouble / M) - ( ( sigmaTot *k_i) / math.pow(M, 2) ) //println(s"vid: $vid community: $community $q = ($k_i_in / $M) - ( ($sigmaTot * $k_i) / math.pow($M, 2) )") if (q < 0) 0 else q }) val actualQ = newVerts.values.reduce(_+_) //--------------------------- //Rename parent communities, if needed, based on max nodeWeight in each community val nodesByCommunity = louvainGraph.vertices.map(n => { // re-map nodes so communityID is key (n._2.community, (n._1, n._2)) }) // (community, (id, nodeWeight)) val communitiesWithMaxWeight = nodesByCommunity.reduceByKey((a,b) => { // find node with max nodeWeight in each community if (a._2.nodeWeight > b._2.nodeWeight) a else b }).map(n => (n._1, (n._2._1, n._2._2.nodeWeight))) val finalNodes = nodesByCommunity // join results together, and for each community .join(communitiesWithMaxWeight) // set communityID = nodeID with max weight .map(n => { val id = n._2._1._1 val newCommunityID = n._2._2._1 val vdata = n._2._1._2 vdata.community = newCommunityID (id, vdata) }) val finalLouvainGraph = Graph(finalNodes,louvainGraph.edges) //re-create graph for this hierarchy using new community IDs // return the modularity value of the graph along with the // graph. vertices are labeled with their community return (actualQ,finalLouvainGraph,count/2) //return (actualQ,louvainGraph,count/2) } /** * Creates the messages passed between each vertex to convey neighborhood community data. */ private def sendMsg(et:EdgeTriplet[VertexState,Long]) = { val m1 = (et.dstId,Map((et.srcAttr.community,et.srcAttr.communitySigmaTot)->et.attr)) val m2 = (et.srcId,Map((et.dstAttr.community,et.dstAttr.communitySigmaTot)->et.attr)) Iterator(m1, m2) } /** * Merge neighborhood community data into a single message for each vertex */ private def mergeMsg(m1:Map[(Long,Long),Long],m2:Map[(Long,Long),Long]) ={ val newMap = scala.collection.mutable.HashMap[(Long,Long),Long]() m1.foreach({case (k,v)=> if (newMap.contains(k)) newMap(k) = newMap(k) + v else newMap(k) = v }) m2.foreach({case (k,v)=> if (newMap.contains(k)) newMap(k) = newMap(k) + v else newMap(k) = v }) newMap.toMap } /** * Join vertices with community data form their neighborhood and select the best community for each vertex to maximize change in modularity. * Returns a new set of vertices with the updated vertex state. */ private def louvainVertJoin(louvainGraph:Graph[VertexState,Long], msgRDD:VertexRDD[Map[(Long,Long),Long]], totalEdgeWeight:Broadcast[Long], even:Boolean) = { louvainGraph.vertices.innerJoin(msgRDD)( (vid, vdata, msgs)=> { var bestCommunity = vdata.community var startingCommunityId = bestCommunity var maxDeltaQ = BigDecimal(0.0); var bestSigmaTot = 0L msgs.foreach({ case( (communityId,sigmaTotal),communityEdgeWeight ) => val deltaQ = q(startingCommunityId, communityId, sigmaTotal, communityEdgeWeight, vdata.nodeWeight, vdata.internalWeight,totalEdgeWeight.value) //println(" communtiy: "+communityId+" sigma:"+sigmaTotal+" edgeweight:"+communityEdgeWeight+" q:"+deltaQ) if (deltaQ > maxDeltaQ || (deltaQ > 0 && (deltaQ == maxDeltaQ && communityId > bestCommunity))){ maxDeltaQ = deltaQ bestCommunity = communityId bestSigmaTot = sigmaTotal } }) // only allow changes from low to high communties on even cyces and high to low on odd cycles if ( vdata.community != bestCommunity && ( (even && vdata.community > bestCommunity) || (!even && vdata.community < bestCommunity) ) ){ //println(" "+vid+" SWITCHED from "+vdata.community+" to "+bestCommunity) vdata.community = bestCommunity vdata.communitySigmaTot = bestSigmaTot vdata.changed = true } else{ vdata.changed = false } vdata }) } /** * Returns the change in modularity that would result from a vertex moving to a specified community. */ private def q(currCommunityId:Long, testCommunityId:Long, testSigmaTot:Long, edgeWeightInCommunity:Long, nodeWeight:Long, internalWeight:Long, totalEdgeWeight:Long) : BigDecimal = { val isCurrentCommunity = (currCommunityId.equals(testCommunityId)); val M = BigDecimal(totalEdgeWeight); val k_i_in_L = if (isCurrentCommunity) edgeWeightInCommunity + internalWeight else edgeWeightInCommunity; val k_i_in = BigDecimal(k_i_in_L); val k_i = BigDecimal(nodeWeight + internalWeight); val sigma_tot = if (isCurrentCommunity) BigDecimal(testSigmaTot) - k_i else BigDecimal(testSigmaTot); var deltaQ = BigDecimal(0.0); if (!(isCurrentCommunity && sigma_tot.equals(0.0))) { deltaQ = k_i_in - ( k_i * sigma_tot / M) //println(s" $deltaQ = $k_i_in - ( $k_i * $sigma_tot / $M") } return deltaQ; } /** * Compress a graph by its communities, aggregate both internal node weights and edge * weights within communities. */ def compressGraph(graph:Graph[VertexState,Long],debug:Boolean=true) : Graph[VertexState,Long] = { // aggregate the edge weights of self loops. edges with both src and dst in the same community. // WARNING can not use graph.mapReduceTriplets because we are mapping to new vertexIds val internalEdgeWeights = graph.triplets.flatMap(et=>{ if (et.srcAttr.community == et.dstAttr.community){ Iterator( ( et.srcAttr.community, 2*et.attr) ) // count the weight from both nodes // count the weight from both nodes } else Iterator.empty }).reduceByKey(_+_) // aggregate the internal weights and internal number of nodes for each new community val internalNodeStats = graph.vertices.values.map(vdata=> (vdata.community,(vdata.internalWeight, vdata.internalNodes))) .reduceByKey((a,b) => (a._1 + b._1, a._2 + b._2)) // and save 'extraAttributes' for each new community (just save extraAttributes string for parent community node) val internalExtraAttributes = graph.vertices.flatMap(nodes => { if (nodes._1 == nodes._2.community) { Iterator( (nodes._2.community, nodes._2.extraAttributes) ) } else Iterator.empty }) // combine internal stats and extraAttributes results for each community val allInternalStats = internalNodeStats.leftOuterJoin(internalExtraAttributes).map({case (vid,(stats,attribute2Option)) => val attr2 = attribute2Option.getOrElse(("")) (vid, (stats._1, stats._2, attr2)) }) // join internal weights and self edges to find new internal weight of each community val newVerts = allInternalStats.leftOuterJoin(internalEdgeWeights).map({case (vid,(stats1,weight2Option)) => val weight2 = weight2Option.getOrElse((0L)) val state = new VertexState() state.community = vid state.changed = false state.communitySigmaTot = 0L state.internalWeight = stats1._1 + weight2 state.nodeWeight = 0L state.internalNodes = stats1._2 state.nodeDegree = 0 state.extraAttributes = stats1._3 (vid,state) }).cache() // translate each vertex edge to a community edge val edges = graph.triplets.flatMap(et=> { val src = math.min(et.srcAttr.community,et.dstAttr.community) val dst = math.max(et.srcAttr.community,et.dstAttr.community) if (src != dst) Iterator(new Edge(src, dst, et.attr)) else Iterator.empty }).cache() // generate a new graph where each community of the previous // graph is now represented as a single vertex val compressedGraph = (Graph(newVerts, tempPartitionBy(edges, PartitionStrategy.EdgePartition2D))) .groupEdges(_+_) //val compressedGraph = Graph(newVerts,edges) // //.partitionBy(PartitionStrategy.EdgePartition2D).groupEdges(_+_) // calculate the weighted degree of each node val nodeWeightMapFunc = (e:EdgeTriplet[VertexState,Long]) => Iterator((e.srcId,e.attr), (e.dstId,e.attr)) val nodeWeightReduceFunc = (e1:Long,e2:Long) => e1+e2 val nodeWeights = compressedGraph.mapReduceTriplets(nodeWeightMapFunc,nodeWeightReduceFunc) val nodeDegrees = compressedGraph.degrees // get number of unweighted degrees for each community // TODO -- try to combine the outDegree calc with the mapReduceTriplet func above? // fill in the weighted (and unweighted) degree of each node val louvainGraph = compressedGraph.outerJoinVertices(nodeWeights)((vid,data,weightOption)=> { val weight = weightOption.getOrElse(0L) data.communitySigmaTot = weight +data.internalWeight data.nodeWeight = weight data }).outerJoinVertices(nodeDegrees)((vid,data,degreeOption)=> { val degree = degreeOption.getOrElse(0) data.nodeDegree = degree data }).cache() louvainGraph.vertices.count() louvainGraph.triplets.count() // materialize the graph newVerts.unpersist(blocking=false) edges.unpersist(blocking=false) return louvainGraph } // debug printing private def printlouvain(graph:Graph[VertexState,Long]) = { print("\\ncommunity label snapshot\\n(vid,community,sigmaTot)\\n") graph.vertices.mapValues((vid,vdata)=> (vdata.community,vdata.communitySigmaTot)).collect().foreach(f=>println(" "+f)) } // debug printing private def printedgetriplets(graph:Graph[VertexState,Long]) = { print("\\ncommunity label snapshot FROM TRIPLETS\\n(vid,community,sigmaTot)\\n") (graph.triplets.flatMap(e=> Iterator((e.srcId,e.srcAttr.community,e.srcAttr.communitySigmaTot), (e.dstId,e.dstAttr.community,e.dstAttr.communitySigmaTot))).collect()).foreach(f=>println(" "+f)) } // TODO -- temporary PartitionBy func to use as a work-around because the graphX version is currently broken in Spark 1.0 // (see https://github.com/apache/spark/pull/908.patch ) def tempPartitionBy[ED](edges: RDD[Edge[ED]], partitionStrategy: PartitionStrategy): RDD[Edge[ED]] = { val numPartitions = edges.partitions.size edges.map(e => (partitionStrategy.getPartition(e.srcId, e.dstId, numPartitions), e)) .partitionBy(new HashPartitioner(numPartitions)) .mapPartitions(_.map(_._2), preservesPartitioning = true) } }
aashish24/aperture-tiles
tile-generation/src/main/scala/com/oculusinfo/tilegen/graph/cluster/LouvainCore.scala
Scala
mit
19,124
/* Copyright 2009-2021 EPFL, Lausanne */ package stainless package frontends.scalac import extraction.xlang.{trees => xt} import scala.tools.nsc import scala.tools.nsc._ import stainless.frontend.{ UnsupportedCodeException, CallBack } /** Extract each compilation unit and forward them to the Compiler callback */ trait StainlessExtraction extends SubComponent with CodeExtraction with FragmentChecker { import global._ val phaseName = "stainless" val ctx: inox.Context import ctx.given protected val callback: CallBack def newPhase(prev: nsc.Phase): StdPhase = new Phase(prev) protected def onRun(run: () => Unit): Unit = { run() } class Phase(prev: nsc.Phase) extends StdPhase(prev) { override def apply(u: CompilationUnit): Unit = { val file = u.source.file.absolute.path val checker = new Checker checker(u.body) // then check ghost accesses val ghostChecker = new GhostAnnotationChecker ghostChecker(u.body) if (!hasErrors()) { val (unit, classes, functions, typeDefs) = extractUnit(u) callback(file, unit, classes, functions, typeDefs) } } override def run(): Unit = { onRun(() => super.run()) } } }
epfl-lara/stainless
frontends/scalac/src/main/scala/stainless/frontends/scalac/StainlessExtraction.scala
Scala
apache-2.0
1,229
package com.twitter.finagle.service import com.twitter.finagle.stats.InMemoryStatsReceiver import com.twitter.util.TimeConversions._ import com.twitter.util._ import com.twitter.finagle._ import com.twitter.finagle.context.Contexts import com.twitter.finagle.tracing._ import org.mockito.ArgumentCaptor import org.mockito.Mockito.{atLeastOnce, spy, verify} import org.scalatest.FunSuite import org.scalatest.junit.JUnitRunner import org.junit.runner.RunWith import org.scalatest.mock.MockitoSugar import scala.collection.JavaConverters._ import scala.language.reflectiveCalls private object TimeoutFilterTest { class TimeoutFilterHelper { val timer = new MockTimer val promise = new Promise[String] { @volatile var interrupted: Option[Throwable] = None setInterruptHandler { case exc => interrupted = Some(exc) } } val service = new Service[String, String] { def apply(request: String) = promise } val timeout = 1.second val exception = new IndividualRequestTimeoutException(timeout) val timeoutFilter = new TimeoutFilter[String, String](timeout, exception, timer) val timeoutService = timeoutFilter.andThen(service) } } @RunWith(classOf[JUnitRunner]) class TimeoutFilterTest extends FunSuite with MockitoSugar { import TimeoutFilterTest.TimeoutFilterHelper test("TimeoutFilter should request succeeds when the service succeeds") { val h = new TimeoutFilterHelper import h._ promise.setValue("1") val res = timeoutService("blah") assert(res.isDefined) assert(Await.result(res) == "1") } test("TimeoutFilter should times out a request that is not successful, cancels underlying") { val h = new TimeoutFilterHelper import h._ Time.withCurrentTimeFrozen { tc: TimeControl => val res = timeoutService("blah") assert(!res.isDefined) assert(promise.interrupted == None) tc.advance(2.seconds) timer.tick() assert(res.isDefined) val t = promise.interrupted intercept[java.util.concurrent.TimeoutException] { throw t.get } intercept[IndividualRequestTimeoutException] { Await.result(res) } } } class DeadlineCtx(val timeout: Duration) { val service = new Service[Unit, Option[Deadline]] { def apply(req: Unit) = Future.value(Deadline.current) } val timer = new MockTimer val exception = new IndividualRequestTimeoutException(timeout) val statsReceiver = new InMemoryStatsReceiver val timeoutFilter = new TimeoutFilter[Unit, Option[Deadline]](timeout, exception, timer, statsReceiver) val timeoutService = timeoutFilter andThen service } test("deadlines, finite timeout") { val ctx = new DeadlineCtx(1.second) import ctx._ Time.withCurrentTimeFrozen { tc => assert(Await.result(timeoutService((): Unit)) == Some(Deadline(Time.now, Time.now+1.second))) // Adjust existing ones. val f = Contexts.broadcast.let(Deadline, Deadline(Time.now-1.second, Time.now+200.milliseconds)) { timeoutService((): Unit) } assert(Await.result(f) == Some(Deadline(Time.now, Time.now+200.milliseconds))) } } test("deadlines, infinite timeout") { val ctx = new DeadlineCtx(Duration.Top) import ctx._ Time.withCurrentTimeFrozen { tc => assert(Await.result(timeoutService((): Unit)) == Some(Deadline(Time.now, Time.Top))) // Adjust existing ones val f = Contexts.broadcast.let(Deadline, Deadline(Time.now-1.second, Time.now+1.second)) { timeoutService((): Unit) } assert(Await.result(f) == Some(Deadline(Time.now, Time.now+1.second))) } } test("bug verification: TimeoutFilter incorrectly sends expired deadlines") { val ctx = new DeadlineCtx(1.second) import ctx._ Time.withCurrentTimeFrozen { tc => val now = Time.now val f = Contexts.broadcast.let(Deadline, Deadline(now, now+1.second)) { tc.advance(5.seconds) timeoutService((): Unit) } assert(Await.result(f) == Some(Deadline(now + 5.seconds, now + 1.second))) assert(statsReceiver.stats(Seq("expired_deadline_ms"))(0) == 4.seconds.inMillis) } } private def verifyFilterAddedOrNot( timoutModule: Stackable[ServiceFactory[Int, Int]] ) = { val svc = Service.mk { i: Int => Future.value(i) } val svcFactory = ServiceFactory.const(svc) val stack = timoutModule.toStack(Stack.Leaf(Stack.Role("test"), svcFactory)) def assertNoTimeoutFilter(duration: Duration): Unit = { val params = Stack.Params.empty + TimeoutFilter.Param(duration) val made = stack.make(params) // this relies on the fact that we do not compose // with a TimeoutFilter if the duration is not appropriate. assert(svcFactory == made) } assertNoTimeoutFilter(Duration.Bottom) assertNoTimeoutFilter(Duration.Top) assertNoTimeoutFilter(Duration.Undefined) assertNoTimeoutFilter(Duration.Zero) assertNoTimeoutFilter(-1.second) def assertTimeoutFilter(duration: Duration): Unit = { val params = Stack.Params.empty + TimeoutFilter.Param(duration) val made = stack.make(params) // this relies on the fact that we do compose // with a TimeoutFilter if the duration is appropriate. assert(svcFactory != made) } assertTimeoutFilter(10.seconds) } test("filter added or not to clientModule based on duration") { verifyFilterAddedOrNot(TimeoutFilter.clientModule[Int, Int]) } test("filter added or not to serverModule based on duration") { verifyFilterAddedOrNot(TimeoutFilter.serverModule[Int, Int]) } }
sveinnfannar/finagle
finagle-core/src/test/scala/com/twitter/finagle/service/TimeoutFilterTest.scala
Scala
apache-2.0
5,662
/* * Copyright 2001-2013 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest.suiteprop import org.scalatest._ import SharedHelpers._ import prop.TableDrivenPropertyChecks class SuiteMatrix extends PropSpec with Matchers with TableDrivenPropertyChecks { property("When info appears in the code of a successful test, it should be reported in the TestSucceeded.") { new InfoInsideTestFiredAfterTestExamples { forAll (examples) { suite => val (testStartingIndex, testSucceededIndex) = getIndexesForTestInformerEventOrderTests(suite, suite.theTestName, suite.msg) testStartingIndex should be < testSucceededIndex } } } // Add a property for completely empty suites and their empty tags property("should, if no test is marked as ignored and there are no tests tagged, return an empty tags map") { new InfoInsideTestFiredAfterTestExamples { forAll (examples) { suite => suite.tags should be (empty) } } } property("should, if the first test is marked as ignored, return a tags map from the tags method that says the first test is ignored") { new FirstTestIgnoredExamples { forAll (examples) { suite => val firstTestName = suite.theTestNames(0) suite.tags should be (Map(firstTestName -> Set("org.scalatest.Ignore"))) } } } property("should, if the second test is marked as ignored, return a tags map from the tags method that says the second test is ignored") { new SecondTestIgnoredExamples { forAll (examples) { suite => val secondTestName = suite.theTestNames(1) suite.tags should be (Map(secondTestName -> Set("org.scalatest.Ignore"))) } } } property("should, if two tests is marked as ignored, return a tags map from the tags method that says that both tests are ignored") { new TwoTestsIgnoredExamples { forAll (examples) { suite => val firstTestName = suite.theTestNames(0) val secondTestName = suite.theTestNames(1) suite.tags should be (Map(firstTestName -> Set("org.scalatest.Ignore"), secondTestName -> Set("org.scalatest.Ignore"))) } } } property("should, if both the second test is marked as ignored and both are marked Slow, return a tags map from the tags method that says the second test is ignored and both are Slow") { new TwoSlowTestsExample { forAll (examples) { suite => val firstTestName = suite.theTestNames(0) val secondTestName = suite.theTestNames(1) suite.tags should be ( Map( firstTestName -> Set("org.scalatest.SlowAsMolasses"), secondTestName -> Set("org.scalatest.Ignore", "org.scalatest.SlowAsMolasses") ) ) } } } property("should, if both tests are marked Slow and the first test Weak, return a tags map from the tags method that says both are Slow and the first also Weak") { new TwoSlowAndOneWeakTestExamples { forAll (examples) { suite => val firstTestName = suite.theTestNames(0) val secondTestName = suite.theTestNames(1) suite.tags should be ( Map( firstTestName -> Set("org.scalatest.SlowAsMolasses", "org.scalatest.WeakAsAKitten"), secondTestName -> Set("org.scalatest.SlowAsMolasses") ) ) } } } }
dotty-staging/scalatest
scalatest-test/src/test/scala/org/scalatest/suiteprop/SuiteMatrix.scala
Scala
apache-2.0
3,910
/* * Copyright 2012-2015 Comcast Cable Communications Management, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.comcast.money.http.client import java.io.Closeable import com.comcast.money.core.{ Tracer, Money } import com.comcast.money.core.Tracers._ import com.comcast.money.internal.SpanLocal import org.apache.http.client.methods.HttpUriRequest import org.apache.http.client.{ HttpClient, ResponseHandler } import org.apache.http.conn.ClientConnectionManager import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.params.HttpParams import org.apache.http.protocol.HttpContext import org.apache.http.{ HttpHost, HttpRequest, HttpResponse } import scala.util.Try object TraceFriendlyHttpSupport { def wrapSimpleExecute(httpRequest: HttpRequest, tracer: Tracer)(f: => HttpResponse): HttpResponse = { var responseCode = 0L try { // Put the X-MoneyTrace header in the request... addTraceHeader(httpRequest) // Time the execution of the request... val response = timed(HttpTraceConfig.HttpResponseTimeTraceKey, tracer)(f) // Get the response code, will be 0 if response is null responseCode = getResponseCode(response) response } finally { tracer.record(HttpTraceConfig.HttpResponseCodeTraceKey, responseCode) } } def getResponseCode(response: HttpResponse): Long = Option(response.getStatusLine).map { statusLine => statusLine.getStatusCode.toLong } getOrElse 0L def addTraceHeader(httpRequest: HttpRequest) { if (httpRequest != null) { SpanLocal.current.foreach { span => httpRequest.setHeader("X-MoneyTrace", span.toHttpHeader) } } } } /** * Provides a thin wrapper around HttpClient to allow support tracing */ class TraceFriendlyHttpClient(wrapee: HttpClient) extends HttpClient with java.io.Closeable { import com.comcast.money.http.client.TraceFriendlyHttpSupport._ val tracer = Money.tracer override def getParams: HttpParams = wrapee.getParams override def getConnectionManager: ClientConnectionManager = wrapee.getConnectionManager override def execute(request: HttpUriRequest): HttpResponse = wrapSimpleExecute(request, tracer) { wrapee.execute(request) } override def execute(request: HttpUriRequest, context: HttpContext): HttpResponse = wrapSimpleExecute(request, tracer) { wrapee.execute(request, context) } override def execute(target: HttpHost, request: HttpRequest): HttpResponse = wrapSimpleExecute(request, tracer) { wrapee.execute(target, request) } override def execute(target: HttpHost, request: HttpRequest, context: HttpContext): HttpResponse = wrapSimpleExecute( request, tracer ) { wrapee.execute(target, request, context) } /** * We are making a big assertion of how the response handler code works; it is expected that they * call one of the execute methods above that are instrumented. In that case, the http code will have been * instrumented already. If a client does something other than calling the already instrumented execute method * then we are screwed */ override def execute[T](request: HttpUriRequest, responseHandler: ResponseHandler[_ <: T]): T = { wrapee.execute(request, responseHandler) } override def execute[T](request: HttpUriRequest, responseHandler: ResponseHandler[_ <: T], context: HttpContext): T = { wrapee.execute(request, responseHandler, context) } override def execute[T](target: HttpHost, request: HttpRequest, responseHandler: ResponseHandler[_ <: T]): T = { wrapee.execute(target, request, responseHandler) } override def execute[T](target: HttpHost, request: HttpRequest, responseHandler: ResponseHandler[_ <: T], context: HttpContext): T = { wrapee.execute(target, request, responseHandler, context) } override def close(): Unit = { if (wrapee.isInstanceOf[CloseableHttpClient]) wrapee.asInstanceOf[CloseableHttpClient].close() else if (wrapee.isInstanceOf[Closeable]) wrapee.asInstanceOf[Closeable].close() else if (wrapee.isInstanceOf[AutoCloseable]) wrapee.asInstanceOf[AutoCloseable].close() } } class TraceFriendlyResponseHandler[T](wrapee: ResponseHandler[_ <: T], tracer: Tracer) extends ResponseHandler[T] { import com.comcast.money.http.client.TraceFriendlyHttpSupport._ override def handleResponse(response: HttpResponse): T = { // we always want to handle the response, so swallow any exception here Try { tracer.record(HttpTraceConfig.HttpResponseCodeTraceKey, getResponseCode(response)) } wrapee.handleResponse(response) } }
derjust/money
money-http-client/src/main/scala/com/comcast/money/http/client/TraceFriendlyHttpClient.scala
Scala
apache-2.0
5,173
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.streaming.kinesis import java.util.UUID import java.util.concurrent.ConcurrentHashMap import scala.collection.JavaConverters._ import scala.collection.mutable import scala.util.control.NonFatal import com.amazonaws.services.kinesis.clientlibrary.interfaces.{IRecordProcessor, IRecordProcessorCheckpointer, IRecordProcessorFactory} import com.amazonaws.services.kinesis.clientlibrary.lib.worker.{InitialPositionInStream, KinesisClientLibConfiguration, Worker} import com.amazonaws.services.kinesis.model.Record import org.apache.spark.internal.Logging import org.apache.spark.storage.{StorageLevel, StreamBlockId} import org.apache.spark.streaming.Duration import org.apache.spark.streaming.receiver.{BlockGenerator, BlockGeneratorListener, Receiver} import org.apache.spark.util.Utils /** * Custom AWS Kinesis-specific implementation of Spark Streaming's Receiver. * This implementation relies on the Kinesis Client Library (KCL) Worker as described here: * https://github.com/awslabs/amazon-kinesis-client * * The way this Receiver works is as follows: * * - The receiver starts a KCL Worker, which is essentially runs a threadpool of multiple * KinesisRecordProcessor * - Each KinesisRecordProcessor receives data from a Kinesis shard in batches. Each batch is * inserted into a Block Generator, and the corresponding range of sequence numbers is recorded. * - When the block generator defines a block, then the recorded sequence number ranges that were * inserted into the block are recorded separately for being used later. * - When the block is ready to be pushed, the block is pushed and the ranges are reported as * metadata of the block. In addition, the ranges are used to find out the latest sequence * number for each shard that can be checkpointed through the DynamoDB. * - Periodically, each KinesisRecordProcessor checkpoints the latest successfully stored sequence * number for it own shard. * * @param streamName Kinesis stream name * @param endpointUrl Url of Kinesis service (e.g., https://kinesis.us-east-1.amazonaws.com) * @param regionName Region name used by the Kinesis Client Library for * DynamoDB (lease coordination and checkpointing) and CloudWatch (metrics) * @param initialPositionInStream In the absence of Kinesis checkpoint info, this is the * worker's initial starting position in the stream. * The values are either the beginning of the stream * per Kinesis' limit of 24 hours * (InitialPositionInStream.TRIM_HORIZON) or * the tip of the stream (InitialPositionInStream.LATEST). * @param checkpointAppName Kinesis application name. Kinesis Apps are mapped to Kinesis Streams * by the Kinesis Client Library. If you change the App name or Stream name, * the KCL will throw errors. This usually requires deleting the backing * DynamoDB table with the same name this Kinesis application. * @param checkpointInterval Checkpoint interval for Kinesis checkpointing. * See the Kinesis Spark Streaming documentation for more * details on the different types of checkpoints. * @param storageLevel Storage level to use for storing the received objects * @param kinesisCreds SparkAWSCredentials instance that will be used to generate the * AWSCredentialsProvider passed to the KCL to authorize Kinesis API calls. * @param cloudWatchCreds Optional SparkAWSCredentials instance that will be used to generate the * AWSCredentialsProvider passed to the KCL to authorize CloudWatch API * calls. Will use kinesisCreds if value is None. * @param dynamoDBCreds Optional SparkAWSCredentials instance that will be used to generate the * AWSCredentialsProvider passed to the KCL to authorize DynamoDB API calls. * Will use kinesisCreds if value is None. */ private[kinesis] class KinesisReceiver[T]( val streamName: String, endpointUrl: String, regionName: String, initialPositionInStream: InitialPositionInStream, checkpointAppName: String, checkpointInterval: Duration, storageLevel: StorageLevel, messageHandler: Record => T, kinesisCreds: SparkAWSCredentials, dynamoDBCreds: Option[SparkAWSCredentials], cloudWatchCreds: Option[SparkAWSCredentials]) extends Receiver[T](storageLevel) with Logging { receiver => /* * ================================================================================= * The following vars are initialize in the onStart() method which executes in the * Spark worker after this Receiver is serialized and shipped to the worker. * ================================================================================= */ /** * workerId is used by the KCL should be based on the ip address of the actual Spark Worker * where this code runs (not the driver's IP address.) */ @volatile private var workerId: String = null /** * Worker is the core client abstraction from the Kinesis Client Library (KCL). * A worker can process more than one shards from the given stream. * Each shard is assigned its own IRecordProcessor and the worker run multiple such * processors. */ @volatile private var worker: Worker = null @volatile private var workerThread: Thread = null /** BlockGenerator used to generates blocks out of Kinesis data */ @volatile private var blockGenerator: BlockGenerator = null /** * Sequence number ranges added to the current block being generated. * Accessing and updating of this map is synchronized by locks in BlockGenerator. */ private val seqNumRangesInCurrentBlock = new mutable.ArrayBuffer[SequenceNumberRange] /** Sequence number ranges of data added to each generated block */ private val blockIdToSeqNumRanges = new ConcurrentHashMap[StreamBlockId, SequenceNumberRanges] /** * The centralized kinesisCheckpointer that checkpoints based on the given checkpointInterval. */ @volatile private var kinesisCheckpointer: KinesisCheckpointer = null /** * Latest sequence number ranges that have been stored successfully. * This is used for checkpointing through KCL */ private val shardIdToLatestStoredSeqNum = new ConcurrentHashMap[String, String] /** * This is called when the KinesisReceiver starts and must be non-blocking. * The KCL creates and manages the receiving/processing thread pool through Worker.run(). */ override def onStart() { blockGenerator = supervisor.createBlockGenerator(new GeneratedBlockHandler) workerId = Utils.localHostName() + ":" + UUID.randomUUID() kinesisCheckpointer = new KinesisCheckpointer(receiver, checkpointInterval, workerId) val kinesisProvider = kinesisCreds.provider val kinesisClientLibConfiguration = new KinesisClientLibConfiguration( checkpointAppName, streamName, kinesisProvider, dynamoDBCreds.map(_.provider).getOrElse(kinesisProvider), cloudWatchCreds.map(_.provider).getOrElse(kinesisProvider), workerId) .withKinesisEndpoint(endpointUrl) .withInitialPositionInStream(initialPositionInStream) .withTaskBackoffTimeMillis(500) .withRegionName(regionName) /* * RecordProcessorFactory creates impls of IRecordProcessor. * IRecordProcessor adapts the KCL to our Spark KinesisReceiver via the * IRecordProcessor.processRecords() method. * We're using our custom KinesisRecordProcessor in this case. */ val recordProcessorFactory = new IRecordProcessorFactory { override def createProcessor: IRecordProcessor = new KinesisRecordProcessor(receiver, workerId) } worker = new Worker(recordProcessorFactory, kinesisClientLibConfiguration) workerThread = new Thread() { override def run(): Unit = { try { worker.run() } catch { case NonFatal(e) => restart("Error running the KCL worker in Receiver", e) } } } blockIdToSeqNumRanges.clear() blockGenerator.start() workerThread.setName(s"Kinesis Receiver ${streamId}") workerThread.setDaemon(true) workerThread.start() logInfo(s"Started receiver with workerId $workerId") } /** * This is called when the KinesisReceiver stops. * The KCL worker.shutdown() method stops the receiving/processing threads. * The KCL will do its best to drain and checkpoint any in-flight records upon shutdown. */ override def onStop() { if (workerThread != null) { if (worker != null) { worker.shutdown() worker = null } workerThread.join() workerThread = null logInfo(s"Stopped receiver for workerId $workerId") } workerId = null if (kinesisCheckpointer != null) { kinesisCheckpointer.shutdown() kinesisCheckpointer = null } } /** Add records of the given shard to the current block being generated */ private[kinesis] def addRecords(shardId: String, records: java.util.List[Record]): Unit = { if (records.size > 0) { val dataIterator = records.iterator().asScala.map(messageHandler) val metadata = SequenceNumberRange(streamName, shardId, records.get(0).getSequenceNumber(), records.get(records.size() - 1).getSequenceNumber(), records.size()) blockGenerator.addMultipleDataWithCallback(dataIterator, metadata) } } /** Return the current rate limit defined in [[BlockGenerator]]. */ private[kinesis] def getCurrentLimit: Int = { assert(blockGenerator != null) math.min(blockGenerator.getCurrentLimit, Int.MaxValue).toInt } /** Get the latest sequence number for the given shard that can be checkpointed through KCL */ private[kinesis] def getLatestSeqNumToCheckpoint(shardId: String): Option[String] = { Option(shardIdToLatestStoredSeqNum.get(shardId)) } /** * Set the checkpointer that will be used to checkpoint sequence numbers to DynamoDB for the * given shardId. */ def setCheckpointer(shardId: String, checkpointer: IRecordProcessorCheckpointer): Unit = { assert(kinesisCheckpointer != null, "Kinesis Checkpointer not initialized!") kinesisCheckpointer.setCheckpointer(shardId, checkpointer) } /** * Remove the checkpointer for the given shardId. The provided checkpointer will be used to * checkpoint one last time for the given shard. If `checkpointer` is `null`, then we will not * checkpoint. */ def removeCheckpointer(shardId: String, checkpointer: IRecordProcessorCheckpointer): Unit = { assert(kinesisCheckpointer != null, "Kinesis Checkpointer not initialized!") kinesisCheckpointer.removeCheckpointer(shardId, checkpointer) } /** * Remember the range of sequence numbers that was added to the currently active block. * Internally, this is synchronized with `finalizeRangesForCurrentBlock()`. */ private def rememberAddedRange(range: SequenceNumberRange): Unit = { seqNumRangesInCurrentBlock += range } /** * Finalize the ranges added to the block that was active and prepare the ranges buffer * for next block. Internally, this is synchronized with `rememberAddedRange()`. */ private def finalizeRangesForCurrentBlock(blockId: StreamBlockId): Unit = { blockIdToSeqNumRanges.put(blockId, SequenceNumberRanges(seqNumRangesInCurrentBlock.toArray)) seqNumRangesInCurrentBlock.clear() logDebug(s"Generated block $blockId has $blockIdToSeqNumRanges") } /** Store the block along with its associated ranges */ private def storeBlockWithRanges( blockId: StreamBlockId, arrayBuffer: mutable.ArrayBuffer[T]): Unit = { val rangesToReportOption = Option(blockIdToSeqNumRanges.remove(blockId)) if (rangesToReportOption.isEmpty) { stop("Error while storing block into Spark, could not find sequence number ranges " + s"for block $blockId") return } val rangesToReport = rangesToReportOption.get var attempt = 0 var stored = false var throwable: Throwable = null while (!stored && attempt <= 3) { try { store(arrayBuffer, rangesToReport) stored = true } catch { case NonFatal(th) => attempt += 1 throwable = th } } if (!stored) { stop("Error while storing block into Spark", throwable) } // Update the latest sequence number that have been successfully stored for each shard // Note that we are doing this sequentially because the array of sequence number ranges // is assumed to be rangesToReport.ranges.foreach { range => shardIdToLatestStoredSeqNum.put(range.shardId, range.toSeqNumber) } } /** * Class to handle blocks generated by this receiver's block generator. Specifically, in * the context of the Kinesis Receiver, this handler does the following. * * - When an array of records is added to the current active block in the block generator, * this handler keeps track of the corresponding sequence number range. * - When the currently active block is ready to sealed (not more records), this handler * keep track of the list of ranges added into this block in another H */ private class GeneratedBlockHandler extends BlockGeneratorListener { /** * Callback method called after a data item is added into the BlockGenerator. * The data addition, block generation, and calls to onAddData and onGenerateBlock * are all synchronized through the same lock. */ def onAddData(data: Any, metadata: Any): Unit = { rememberAddedRange(metadata.asInstanceOf[SequenceNumberRange]) } /** * Callback method called after a block has been generated. * The data addition, block generation, and calls to onAddData and onGenerateBlock * are all synchronized through the same lock. */ def onGenerateBlock(blockId: StreamBlockId): Unit = { finalizeRangesForCurrentBlock(blockId) } /** Callback method called when a block is ready to be pushed / stored. */ def onPushBlock(blockId: StreamBlockId, arrayBuffer: mutable.ArrayBuffer[_]): Unit = { storeBlockWithRanges(blockId, arrayBuffer.asInstanceOf[mutable.ArrayBuffer[T]]) } /** Callback called in case of any error in internal of the BlockGenerator */ def onError(message: String, throwable: Throwable): Unit = { reportError(message, throwable) } } }
mike0sv/spark
external/kinesis-asl/src/main/scala/org/apache/spark/streaming/kinesis/KinesisReceiver.scala
Scala
apache-2.0
15,548
/* * Copyright 2015 Mediative * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.mediative.sparrow class CodecLimitations211Test extends CodecLimitationsTestBase { import CodecLimitationsTest._ "toRDD should" - { "successfully marshall RDD => DataFrame => RDD an object containing" - { "Int, Double" in { assertCodec(TestToRdd4(1, 2.0)) } } } }
jonas/sparrow
core/src/test/scala-2.11/com.mediative.sparrow/CodecLimitations211Test.scala
Scala
apache-2.0
906
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.test import java.io.File import java.util.UUID import scala.concurrent.duration._ import scala.language.implicitConversions import scala.util.Try import scala.util.control.NonFatal import org.apache.hadoop.conf.Configuration import org.scalatest.BeforeAndAfterAll import org.scalatest.concurrent.Eventually import org.apache.spark.SparkFunSuite import org.apache.spark.sql._ import org.apache.spark.sql.catalyst.analysis.NoSuchTableException import org.apache.spark.sql.catalyst.catalog.SessionCatalog.DEFAULT_DATABASE import org.apache.spark.sql.catalyst.FunctionIdentifier import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.catalyst.util._ import org.apache.spark.sql.execution.FilterExec import org.apache.spark.util.{UninterruptibleThread, Utils} /** * Helper trait that should be extended by all SQL test suites. * * This allows subclasses to plugin a custom [[SQLContext]]. It comes with test data * prepared in advance as well as all implicit conversions used extensively by dataframes. * To use implicit methods, import `testImplicits._` instead of through the [[SQLContext]]. * * Subclasses should *not* create [[SQLContext]]s in the test suite constructor, which is * prone to leaving multiple overlapping [[org.apache.spark.SparkContext]]s in the same JVM. */ private[sql] trait SQLTestUtils extends SparkFunSuite with Eventually with BeforeAndAfterAll with SQLTestData { self => protected def sparkContext = spark.sparkContext // Whether to materialize all test data before the first test is run private var loadTestDataBeforeTests = false // Shorthand for running a query using our SQLContext protected lazy val sql = spark.sql _ /** * A helper object for importing SQL implicits. * * Note that the alternative of importing `spark.implicits._` is not possible here. * This is because we create the [[SQLContext]] immediately before the first test is run, * but the implicits import is needed in the constructor. */ protected object testImplicits extends SQLImplicits { protected override def _sqlContext: SQLContext = self.spark.sqlContext } /** * Materialize the test data immediately after the [[SQLContext]] is set up. * This is necessary if the data is accessed by name but not through direct reference. */ protected def setupTestData(): Unit = { loadTestDataBeforeTests = true } protected override def beforeAll(): Unit = { super.beforeAll() if (loadTestDataBeforeTests) { loadTestData() } } /** * Sets all SQL configurations specified in `pairs`, calls `f`, and then restore all SQL * configurations. * * @todo Probably this method should be moved to a more general place */ protected def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = { val (keys, values) = pairs.unzip val currentValues = keys.map { key => if (spark.conf.contains(key)) { Some(spark.conf.get(key)) } else { None } } (keys, values).zipped.foreach(spark.conf.set) try f finally { keys.zip(currentValues).foreach { case (key, Some(value)) => spark.conf.set(key, value) case (key, None) => spark.conf.unset(key) } } } /** * Generates a temporary path without creating the actual file/directory, then pass it to `f`. If * a file/directory is created there by `f`, it will be delete after `f` returns. * * @todo Probably this method should be moved to a more general place */ protected def withTempPath(f: File => Unit): Unit = { val path = Utils.createTempDir() path.delete() try f(path) finally Utils.deleteRecursively(path) } /** * Waits for all tasks on all executors to be finished. */ protected def waitForTasksToFinish(): Unit = { eventually(timeout(10.seconds)) { assert(spark.sparkContext.statusTracker .getExecutorInfos.map(_.numRunningTasks()).sum == 0) } } /** * Creates a temporary directory, which is then passed to `f` and will be deleted after `f` * returns. * * @todo Probably this method should be moved to a more general place */ protected def withTempDir(f: File => Unit): Unit = { val dir = Utils.createTempDir().getCanonicalFile try f(dir) finally { // wait for all tasks to finish before deleting files waitForTasksToFinish() Utils.deleteRecursively(dir) } } /** * Drops functions after calling `f`. A function is represented by (functionName, isTemporary). */ protected def withUserDefinedFunction(functions: (String, Boolean)*)(f: => Unit): Unit = { try { f } catch { case cause: Throwable => throw cause } finally { // If the test failed part way, we don't want to mask the failure by failing to remove // temp tables that never got created. functions.foreach { case (functionName, isTemporary) => val withTemporary = if (isTemporary) "TEMPORARY" else "" spark.sql(s"DROP $withTemporary FUNCTION IF EXISTS $functionName") assert( !spark.sessionState.catalog.functionExists(FunctionIdentifier(functionName)), s"Function $functionName should have been dropped. But, it still exists.") } } } /** * Drops temporary table `tableName` after calling `f`. */ protected def withTempView(tableNames: String*)(f: => Unit): Unit = { try f finally { // If the test failed part way, we don't want to mask the failure by failing to remove // temp tables that never got created. try tableNames.foreach(spark.catalog.dropTempView) catch { case _: NoSuchTableException => } } } /** * Drops table `tableName` after calling `f`. */ protected def withTable(tableNames: String*)(f: => Unit): Unit = { try f finally { tableNames.foreach { name => spark.sql(s"DROP TABLE IF EXISTS $name") } } } /** * Drops view `viewName` after calling `f`. */ protected def withView(viewNames: String*)(f: => Unit): Unit = { try f finally { viewNames.foreach { name => spark.sql(s"DROP VIEW IF EXISTS $name") } } } /** * Creates a temporary database and switches current database to it before executing `f`. This * database is dropped after `f` returns. * * Note that this method doesn't switch current database before executing `f`. */ protected def withTempDatabase(f: String => Unit): Unit = { val dbName = s"db_${UUID.randomUUID().toString.replace('-', '_')}" try { spark.sql(s"CREATE DATABASE $dbName") } catch { case cause: Throwable => fail("Failed to create temporary database", cause) } try f(dbName) finally { if (spark.catalog.currentDatabase == dbName) { spark.sql(s"USE ${DEFAULT_DATABASE}") } spark.sql(s"DROP DATABASE $dbName CASCADE") } } /** * Activates database `db` before executing `f`, then switches back to `default` database after * `f` returns. */ protected def activateDatabase(db: String)(f: => Unit): Unit = { spark.sessionState.catalog.setCurrentDatabase(db) try f finally spark.sessionState.catalog.setCurrentDatabase("default") } /** * Strip Spark-side filtering in order to check if a datasource filters rows correctly. */ protected def stripSparkFilter(df: DataFrame): DataFrame = { val schema = df.schema val withoutFilters = df.queryExecution.sparkPlan.transform { case FilterExec(_, child) => child } spark.internalCreateDataFrame(withoutFilters.execute(), schema) } /** * Turn a logical plan into a [[DataFrame]]. This should be removed once we have an easier * way to construct [[DataFrame]] directly out of local data without relying on implicits. */ protected implicit def logicalPlanToSparkQuery(plan: LogicalPlan): DataFrame = { Dataset.ofRows(spark, plan) } /** * Disable stdout and stderr when running the test. To not output the logs to the console, * ConsoleAppender's `follow` should be set to `true` so that it will honors reassignments of * System.out or System.err. Otherwise, ConsoleAppender will still output to the console even if * we change System.out and System.err. */ protected def testQuietly(name: String)(f: => Unit): Unit = { test(name) { quietly { f } } } /** Run a test on a separate [[UninterruptibleThread]]. */ protected def testWithUninterruptibleThread(name: String, quietly: Boolean = false) (body: => Unit): Unit = { val timeoutMillis = 10000 @transient var ex: Throwable = null def runOnThread(): Unit = { val thread = new UninterruptibleThread(s"Testing thread for test $name") { override def run(): Unit = { try { body } catch { case NonFatal(e) => ex = e } } } thread.setDaemon(true) thread.start() thread.join(timeoutMillis) if (thread.isAlive) { thread.interrupt() // If this interrupt does not work, then this thread is most likely running something that // is not interruptible. There is not much point to wait for the thread to termniate, and // we rather let the JVM terminate the thread on exit. fail( s"Test '$name' running on o.a.s.util.UninterruptibleThread timed out after" + s" $timeoutMillis ms") } else if (ex != null) { throw ex } } if (quietly) { testQuietly(name) { runOnThread() } } else { test(name) { runOnThread() } } } } private[sql] object SQLTestUtils { def compareAnswers( sparkAnswer: Seq[Row], expectedAnswer: Seq[Row], sort: Boolean): Option[String] = { def prepareAnswer(answer: Seq[Row]): Seq[Row] = { // Converts data to types that we can do equality comparison using Scala collections. // For BigDecimal type, the Scala type has a better definition of equality test (similar to // Java's java.math.BigDecimal.compareTo). // For binary arrays, we convert it to Seq to avoid of calling java.util.Arrays.equals for // equality test. // This function is copied from Catalyst's QueryTest val converted: Seq[Row] = answer.map { s => Row.fromSeq(s.toSeq.map { case d: java.math.BigDecimal => BigDecimal(d) case b: Array[Byte] => b.toSeq case o => o }) } if (sort) { converted.sortBy(_.toString()) } else { converted } } if (prepareAnswer(expectedAnswer) != prepareAnswer(sparkAnswer)) { val errorMessage = s""" | == Results == | ${sideBySide( s"== Expected Answer - ${expectedAnswer.size} ==" +: prepareAnswer(expectedAnswer).map(_.toString()), s"== Actual Answer - ${sparkAnswer.size} ==" +: prepareAnswer(sparkAnswer).map(_.toString())).mkString("\\n")} """.stripMargin Some(errorMessage) } else { None } } }
u2009cf/spark-radar
sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala
Scala
apache-2.0
11,979
/* Copyright 2015 Mario Pastorelli ([email protected]) * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package purecsv.safe import scala.collection.immutable import scala.util.{Failure, Success, Try} package object tryutil { implicit class IterableOfTry[A](iter: Iterable[Try[A]]) { /** A tuple composed by the successes and the failures */ lazy val getSuccessesAndFailures: (immutable.List[(Int,A)], immutable.List[(Int,Throwable)]) = { val successes = scala.collection.mutable.Buffer[(Int,A)]() val failures = scala.collection.mutable.Buffer[(Int,Throwable)]() iter.zipWithIndex.foreach { case (Success(a),i) => successes += (i+1 -> a) case (Failure(f),i) => failures += (i+1 -> f) } (successes.toList,failures.toList) } } implicit class IteratorOfTry[A](iter: Iterator[Try[A]]) extends IterableOfTry[A](iter.toIterable) }
jyt109/PureCSV
src/main/scala/purecsv/safe/tryutil/TryUtil.scala
Scala
apache-2.0
1,476
package breeze.linalg import org.scalatest.FunSuite /** * TODO * * @author dlwh **/ class BitVectorTest extends FunSuite { test("Ones") { val as = BitVector.ones(5) val expected = BitVector(5)(0, 1, 2, 3, 4) assert( as === expected) } test("Or") { val as = BitVector(10)(1, 3, 5, 7) val bs = BitVector(10)(1, 2, 3, 5) assert( (as | bs) === BitVector(10)(1, 2, 3, 5, 7)) } test("And") { val as = BitVector(10)(1, 3, 5, 7) val bs = BitVector(10)(1, 2, 3, 5) assert( (as & bs) === BitVector(10)(1, 3, 5)) } test("Xor") { val as = BitVector(10)(1, 3, 5, 7) val bs = BitVector(10)(1, 2, 3, 5) assert( (as ^^ bs) === BitVector(10)(2, 7)) } test("Eq") { val as = BitVector(10)(1, 3, 5, 7) val bs = BitVector(10)(1, 2, 3, 5) assert( (as :== bs) === BitVector(10)(0, 1, 3, 4, 5, 6, 8,9)) } test("Ne") { val as = BitVector(10)(1, 3, 5, 7) val bs = BitVector(10)(1, 2, 3, 5) assert( (as :!= bs) === BitVector(10)(2, 7)) } test("Op Not") { val a = BitVector(10)(2, 7) assert(!a === BitVector(10)(0,1,3,4, 5,6, 8,9)) } test("MulInner") { val a = BitVector (false, false, true , true, false, true, true) val b = SparseVector(1, 0, 2, 0, 3, 4, 0) val bd = DenseVector(1, 0, 2, 0, 3, 4, 0) b.compact() assert( (a dot b) === (b dot a)) assert( (a dot b) === 6) assert( (a dot bd) === 6) } test("axpy") { val a = BitVector (false, false, true , true, false, true, true) val b = SparseVector(1, 0, 2, 0, 3, 4, 0) b.compact() val bd = DenseVector(1, 0, 2, 0, 3, 4, 0) axpy(3, a, b) axpy(3, a, bd) assert(b === SparseVector(1, 0, 5, 3, 3, 7, 3)) assert(bd === DenseVector(1, 0, 5, 3, 3, 7, 3)) } test("sum") { assert(sum(BitVector(false, false, true)) === true) assert(sum(BitVector(false, false, false)) === false) } test("product") { assert(product(BitVector(true, true, true)) === true) assert(product(BitVector(false, false, true)) === false) assert(product(BitVector(false, false, false)) === false) } test("mapActivePairs doesn't touch false entries") { val a = BitVector(10)(1,3,5,7) a.mapActivePairs((k,v)=>assert(v)) } }
crealytics/breeze
math/src/test/scala/breeze/linalg/BitVectorTest.scala
Scala
apache-2.0
2,342
package org.json4s package native import org.json4s.JsonAST.JField import org.json4s.native.Document._ import org.json4s.prefs.EmptyValueStrategy trait JsonMethods extends org.json4s.JsonMethods[Document] { def parse[A: AsJsonInput](in: A, useBigDecimalForDouble: Boolean = false, useBigIntForLong: Boolean = true): JValue = JsonParser.parse(in, useBigDecimalForDouble = useBigDecimalForDouble, useBigIntForLong = useBigIntForLong) override def parseOpt[A: AsJsonInput]( in: A, useBigDecimalForDouble: Boolean = false, useBigIntForLong: Boolean = true ): Option[JValue] = { try { JsonParser .parse( s = AsJsonInput.asJsonInput(in).toReader(), useBigDecimalForDouble = useBigDecimalForDouble, useBigIntForLong = useBigIntForLong ) .toOption } catch { case _: Exception => None } } /** * Renders JSON. * @see Printer#compact * @see Printer#pretty */ def render( value: JValue, alwaysEscapeUnicode: Boolean = false, emptyValueStrategy: EmptyValueStrategy = EmptyValueStrategy.default ): Document = emptyValueStrategy.replaceEmpty(value) match { case null => text("null") case JBool(true) => text("true") case JBool(false) => text("false") case JDouble(n) => text(StreamingJsonWriter.handleInfinity(n)) case JDecimal(n) => text(n.toString) case JLong(n) => text(n.toString) case JInt(n) => text(n.toString) case JNull => text("null") case JNothing => sys.error("can't render 'nothing'") case JString(null) => text("null") case JString(s) => text("\\"" + ParserUtil.quote(s, alwaysEscapeUnicode) + "\\"") case JArray(arr) => text("[") :: series(trimArr(arr).map(render(_, alwaysEscapeUnicode, emptyValueStrategy))) :: text("]") case JSet(set) => text("[") :: series(trimArr(set).map(render(_, alwaysEscapeUnicode, emptyValueStrategy))) :: text("]") case JObject(obj) => val nested = break :: fields(trimObj(obj).map { case (n, v) => text("\\"" + ParserUtil.quote(n, alwaysEscapeUnicode) + "\\":") :: render( v, alwaysEscapeUnicode, emptyValueStrategy ) }) text("{") :: nest(2, nested) :: break :: text("}") } private def trimArr(xs: Iterable[JValue]) = xs.withFilter(_ != JNothing) private def trimObj(xs: List[JField]) = xs.filter(_._2 != JNothing) private def series(docs: Iterable[Document]) = punctuate(text(","), docs) private def fields(docs: List[Document]) = punctuate(text(",") :: break, docs) private def punctuate(p: Document, docs: Iterable[Document]): Document = if (docs.isEmpty) empty else docs.reduceLeft((d1, d2) => d1 :: p :: d2) def compact(d: Document): String = Printer.compact(d) def pretty(d: Document): String = Printer.pretty(d) } object JsonMethods extends native.JsonMethods
json4s/json4s
native-core/shared/src/main/scala/org/json4s/native/JsonMethods.scala
Scala
apache-2.0
2,953
package almhirt.aggregates import scalaz._, Scalaz._ import almhirt.common._ /** * The version of an aggregate root. Used for optimistic concurrency and always starts with 0 whereas 0 * means that the aggregate root is in state [[Vacat]] */ final case class AggregateRootVersion(val value: Long) extends AnyVal with Ordered[AggregateRootVersion] { def compare(that: AggregateRootVersion) = this.value.compareTo(that.value) def inc() = AggregateRootVersion(value + 1L) } object ValidatedAggregateRootVersion { def apply(value: Long): AlmValidation[AggregateRootVersion] = if (value >= 0L) AggregateRootVersion(value).success else BadDataProblem(s"$value is not a valid version. it must be greater or equal than 0").failure }
chridou/almhirt
almhirt-common/src/main/scala/almhirt/aggregates/AggregateRootVersion.scala
Scala
apache-2.0
756
/* * Original implementation (C) 2009-2016 Lightbend Inc. (https://www.lightbend.com). * Adapted and extended in 2016 by Eugene Yokota * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package gigahorse package support.asynchttpclient import shaded.ahc.org.asynchttpclient._ import scala.concurrent.duration._ import com.typesafe.sslconfig.ssl._ import shaded.ahc.io.netty.handler.ssl.SslContextBuilder import shaded.ahc.io.netty.handler.ssl.util.InsecureTrustManagerFactory import shaded.ahc.org.asynchttpclient.netty.ssl.JsseSslEngineFactory object AhcConfig { import AhcHttpClient.buildRealm /** Build `AsyncHttpClientConfig` */ def buildConfig(config: Config): AsyncHttpClientConfig = { val builder = new DefaultAsyncHttpClientConfig.Builder() // timeouts builder.setConnectTimeout(toMillis(config.connectTimeout)) builder.setRequestTimeout(toMillis(config.requestTimeout)) builder.setReadTimeout(toMillis(config.readTimeout)) // builder.setWebSocketTimeout(toMillis(config.webSocketIdleTimeout)) // http builder.setFollowRedirect(config.followRedirects) builder.setMaxRedirects(config.maxRedirects) builder.setCompressionEnforced(config.compressionEnforced) config.userAgentOpt foreach { builder.setUserAgent } config.authOpt foreach { x => builder.setRealm(buildRealm(x)) } builder.setMaxRequestRetry(config.maxRequestRetry) builder.setDisableUrlEncodingForBoundRequests(config.disableUrlEncoding) builder.setUseProxyProperties(config.useProxyProperties) // keep-alive builder.setKeepAlive(config.keepAlive) builder.setPooledConnectionIdleTimeout(toMillis(config.pooledConnectionIdleTimeout)) builder.setConnectionTtl(toMillis(config.connectionTtl)) builder.setMaxConnectionsPerHost(config.maxConnectionsPerHost) builder.setMaxConnections(config.maxConnections) configureSsl(config.ssl, builder) // websocket builder.setWebSocketMaxFrameSize(config.webSocketMaxFrameSize.bytes.toInt) builder.build() } def toMillis(duration: Duration): Int = if (duration.isFinite) duration.toMillis.toInt else -1 def configureSsl(sslConfig: SSLConfigSettings, builder: DefaultAsyncHttpClientConfig.Builder): Unit = { // context! val (sslContext, _) = SSL.buildContext(sslConfig) // protocols! val defaultParams = sslContext.getDefaultSSLParameters val defaultProtocols = defaultParams.getProtocols val protocols = configureProtocols(defaultProtocols, sslConfig) defaultParams.setProtocols(protocols) builder.setEnabledProtocols(protocols) // ciphers! val defaultCiphers = defaultParams.getCipherSuites builder.setEnabledCipherSuites(defaultCiphers) builder.setAcceptAnyCertificate(sslConfig.loose.acceptAnyCertificate) // If you wan't to accept any certificate you also want to use a loose netty based loose SslContext // Never use this in production. if (sslConfig.loose.acceptAnyCertificate) { builder.setSslContext(SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build()) } else { builder.setSslEngineFactory(new JsseSslEngineFactory(sslContext)) } } def configureProtocols(existingProtocols: Array[String], sslConfig: SSLConfigSettings): Array[String] = { val definedProtocols = sslConfig.enabledProtocols match { case Some(configuredProtocols) => // If we are given a specific list of protocols, then return it in exactly that order, // assuming that it's actually possible in the SSL context. configuredProtocols.filter(existingProtocols.contains).toArray case None => // Otherwise, we return the default protocols in the given list. Protocols.recommendedProtocols.filter(existingProtocols.contains).toArray } definedProtocols } }
eed3si9n/gigahorse
asynchttpclient/src/main/scala/gigahorse/support/asynchttpclient/AhcConfig.scala
Scala
apache-2.0
4,485
/* * Copyright 2001-2005 Stephen Colebourne * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.joda.time.field import org.joda.time.DurationFieldType /** * Duration field class representing a field with a fixed unit length. * <p> * PreciseDurationField is thread-safe and immutable. * * @author Stephen Colebourne * @author Brian S O'Neill * @since 1.0 */ @SerialVersionUID(-8346152187724495365L) class PreciseDurationField extends BaseDurationField { /** The size of the unit */ private final val iUnitMillis: Long = 0L /** * Constructor. * * @param type the field type * @param unitMillis the unit milliseconds */ def this(`type`: DurationFieldType, unitMillis: Long) { this() `super`(`type`) iUnitMillis = unitMillis } /** * This field is precise. * * @return true always */ final def isPrecise: Boolean = { return true } /** * Returns the amount of milliseconds per unit value of this field. * * @return the unit size of this field, in milliseconds */ final def getUnitMillis: Long = { return iUnitMillis } /** * Get the value of this field from the milliseconds. * * @param duration the milliseconds to query, which may be negative * @param instant ignored * @return the value of the field, in the units of the field, which may be * negative */ def getValueAsLong(duration: Long, instant: Long): Long = { return duration / iUnitMillis } /** * Get the millisecond duration of this field from its value. * * @param value the value of the field, which may be negative * @param instant ignored * @return the milliseconds that the field represents, which may be * negative */ def getMillis(value: Int, instant: Long): Long = { return value * iUnitMillis } /** * Get the millisecond duration of this field from its value. * * @param value the value of the field, which may be negative * @param instant ignored * @return the milliseconds that the field represents, which may be * negative */ def getMillis(value: Long, instant: Long): Long = { return FieldUtils.safeMultiply(value, iUnitMillis) } def add(instant: Long, value: Int): Long = { val addition: Long = value * iUnitMillis return FieldUtils.safeAdd(instant, addition) } def add(instant: Long, value: Long): Long = { val addition: Long = FieldUtils.safeMultiply(value, iUnitMillis) return FieldUtils.safeAdd(instant, addition) } def getDifferenceAsLong(minuendInstant: Long, subtrahendInstant: Long): Long = { val difference: Long = FieldUtils.safeSubtract(minuendInstant, subtrahendInstant) return difference / iUnitMillis } /** * Compares this duration field to another. * Two fields are equal if of the same type and duration. * * @param obj the object to compare to * @return if equal */ override def equals(obj: AnyRef): Boolean = { if (this eq obj) { return true } else if (obj.isInstanceOf[PreciseDurationField]) { val other: PreciseDurationField = obj.asInstanceOf[PreciseDurationField] return (getType eq other.getType) && (iUnitMillis == other.iUnitMillis) } return false } /** * Gets a hash code for this instance. * * @return a suitable hashcode */ override def hashCode: Int = { val millis: Long = iUnitMillis var hash: Int = (millis ^ (millis >>> 32)).toInt hash += getType.hashCode return hash } }
aparo/scalajs-joda
src/main/scala/org/joda/time/field/PreciseDurationField.scala
Scala
apache-2.0
4,063
package coursier.publish /** All things related to checksums. */ package object checksum
alexarchambault/coursier
modules/publish/src/main/scala/coursier/publish/checksum/package.scala
Scala
apache-2.0
92
package com.seanshubin.uptodate.integration import java.io.IOException import java.nio.charset.{Charset, StandardCharsets} import java.nio.file._ import java.nio.file.attribute.BasicFileAttributes import org.scalatest.{FunSuite, Matchers} import scala.collection.mutable.ArrayBuffer class FileSystemTest extends FunSuite with Matchers { val charset = StandardCharsets.UTF_8 def createFileSystem(): FileSystemImpl = { val charsetName: String = "utf-8" val charset: Charset = Charset.forName(charsetName) val fileSystem = new FileSystemImpl(charset) fileSystem } test("store and load file") { val fileSystem = createFileSystem() val file: Path = Paths.get("target", "test-store-and-load-file.txt") val content: String = "Hello, world!" deleteFileIfExists(file) assert(fileSystem.fileExists(file) === false) fileSystem.storeString(file, content) assert(fileSystem.fileExists(file) === true) val actual = fileSystem.loadString(file) assert(content === actual) deleteFile(file) assert(fileSystem.fileExists(file) === false) } test("walk file tree") { val fileSystem = createFileSystem() val baseDir = Paths.get("target", "test-find-pom") val samplePomFile = baseDir.resolve("pom.xml") fileSystem.ensureDirectoriesExist(baseDir) fileSystem.storeString(samplePomFile, "<xml/>") val found = new ArrayBuffer[Path]() fileSystem.walkFileTree(baseDir, new FileVisitor[Path] { override def visitFileFailed(file: Path, exc: IOException): FileVisitResult = ??? override def visitFile(file: Path, attributes: BasicFileAttributes): FileVisitResult = { found.append(file) FileVisitResult.CONTINUE } override def preVisitDirectory(dir: Path, attrs: BasicFileAttributes): FileVisitResult = FileVisitResult.CONTINUE override def postVisitDirectory(dir: Path, exc: IOException): FileVisitResult = FileVisitResult.CONTINUE }) assert(found.map(_.toString).contains(samplePomFile.toString)) } test("data io") { val fileSystem = createFileSystem() val file: Path = Paths.get("target", "test-data-io.txt") val intWritten = 12345 val stringWritten = "Hello, world!" deleteFileIfExists(file) assert(fileSystem.fileExists(file) === false) val dataOutput = fileSystem.dataOutputFor(file) dataOutput.writeInt(intWritten) dataOutput.writeUTF(stringWritten) dataOutput.close() assert(fileSystem.fileExists(file) === true) val dataInput = fileSystem.dataInputFor(file) val intRead = dataInput.readInt() val stringRead = dataInput.readUTF() assert(intRead === intWritten) assert(stringRead === stringWritten) deleteFile(file) assert(fileSystem.fileExists(file) === false) } test("last modified") { val fileSystem = createFileSystem() val file: Path = Paths.get("target", "test-last-modified.txt") val content: String = "Hello, world!" deleteFileIfExists(file) assert(fileSystem.fileExists(file) === false) val beforeCreateSeconds = System.currentTimeMillis() / 1000 fileSystem.storeString(file, content) val afterCreateSeconds = System.currentTimeMillis() / 1000 assert(fileSystem.fileExists(file) === true) val lastModifiedSeconds = fileSystem.lastModified(file) / 1000 lastModifiedSeconds should be >= beforeCreateSeconds lastModifiedSeconds should be <= afterCreateSeconds deleteFile(file) assert(fileSystem.fileExists(file) === false) } def deleteFile(path: Path): Unit = Files.delete(path) def deleteFileIfExists(path: Path): Unit = Files.deleteIfExists(path) def stringToBytes(s: String, charset: Charset): Array[Byte] = s.getBytes(charset) }
SeanShubin/up-to-date
integration/src/test/scala/com/seanshubin/uptodate/integration/FileSystemTest.scala
Scala
unlicense
3,740
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.jackrabbit.oak.bark.web.view import scala.collection.JavaConversions.{ asScalaBuffer, iterableAsScalaIterable, seqAsJavaList } import org.apache.jackrabbit.oak.api.{ PropertyState, Tree, Type } import org.apache.jackrabbit.oak.bark.web.BaseTemplatePage import org.apache.jackrabbit.oak.commons.PathUtils import org.apache.wicket.Component import org.apache.wicket.markup.html.WebMarkupContainer import org.apache.wicket.markup.html.basic.Label import org.apache.wicket.markup.html.form.{ Button, DropDownChoice, RequiredTextField, StatelessForm } import org.apache.wicket.markup.html.link.BookmarkablePageLink import org.apache.wicket.markup.html.panel.FeedbackPanel import org.apache.wicket.markup.repeater.Item import org.apache.wicket.markup.repeater.data.{ DataView, ListDataProvider } import org.apache.wicket.model.{ LoadableDetachableModel, Model, PropertyModel } import org.apache.wicket.request.http.flow.AbortWithHttpErrorCodeException import org.apache.wicket.request.mapper.parameter.PageParameters import org.apache.jackrabbit.oak.spi.query.PropertyValues import org.apache.jackrabbit.oak.namepath.NamePathMapper import org.apache.wicket.markup.html.form.IChoiceRenderer import org.apache.jackrabbit.oak.plugins.value.Conversions import javax.jcr.PropertyType import org.apache.jackrabbit.oak.plugins.memory.PropertyStates import org.apache.wicket.markup.html.link.StatelessLink import org.apache.wicket.model.IModel class View(pp: PageParameters) extends BaseTemplatePage(pp) { def this() = this(null); val path: String = if (pp != null) { pp.get("p").toString("/"); } else { "/" } val root: LoadableDetachableModel[Tree] = new LoadableDetachableModel[Tree]() { def load(): Tree = { if (oakRoot.isEmpty) { throw new AbortWithHttpErrorCodeException(404); } try { val t = oakRoot.get.getTree(path); if (t.exists()) { return t; } throw new AbortWithHttpErrorCodeException(404); } catch { case e: IllegalArgumentException ⇒ throw new AbortWithHttpErrorCodeException(404); } } } // // -- setStatelessHint(true); buildBC(root.getObject(), path); buildChildren(root.getObject(), path); buildProps(root.getObject()); add(buildFormContainer().setVisibilityAllowed(!getS.isRO)); // // -- private[view] def buildBC(root: Tree, path: String) { add(new BookmarkablePageLink("root", classOf[View])); add(new Label("current", root.getName())); val c: List[String] = PathUtils.elements(path).toList.dropRight(1); add(new DataView[String]("paths", new ListDataProvider(c)) { override def populateItem(item: Item[String]) { val p: String = item.getModelObject(); val link = selfBPL("segment", path.substring(0, path.indexOf(p) + p.length())); link.add(new Label("name", p)); item.add(link); } }); } private[view] def buildChildren(root: Tree, path: String) { val c: List[String] = root.getChildren().map(x ⇒ x.getName()).toList; add(new DataView[String]("children", new ListDataProvider(c)) { override def populateItem(item: Item[String]) { val p: String = item.getModelObject(); val link = selfBPL("child", PathUtils.concat(path, p)); link.add(new Label("name", p)); item.add(link); } }); } private[view] def selfBPL(id: String, path: String): BookmarkablePageLink[View] = { val pp: PageParameters = new PageParameters(); if (!"/".equals(path)) { pp.set("p", path); } return new BookmarkablePageLink(id, classOf[View], pp); } private[view] def buildProps(root: Tree) { val p: List[(String, String, Type[_])] = root.getProperties().map(x ⇒ (x.getName(), psAsString(x), x.getType())).toList; add(new DataView[(String, String, Type[_])]("properties", new ListDataProvider(p)) { override def populateItem(item: Item[(String, String, Type[_])]) { val p: (String, String, Type[_]) = item.getModelObject(); item.add(new Label("name", p._1)); item.add(new Label("value", p._2)); item.add(new Label("type", p._3.toString())); } }); } private[view] def psAsString(ps: PropertyState): String = { if (ps.isArray()) { return "[" + ps.getValue(Type.STRINGS).foldLeft("")((s, v) ⇒ v + ", " + s) + "]"; } return ps.getValue(Type.STRING); } // //-- // var addName: String = ""; private[view] def buildFormContainer(): Component = { val con = new WebMarkupContainer("addFormContainer"); con.add(buildForm); con.add(buildPropertyForm); con.add(new FeedbackPanel("feedback")); return con; } private[view] def buildForm(): Component = { val form = new StatelessForm[Void]("addForm"); form.setOutputMarkupId(true); val a = new RequiredTextField[String]("add", new PropertyModel[String]( this, "addName")) a.setLabel(new Model("Node name")); val submit = new Button("submit") { override def onSubmit() = try { val c = oakRoot.get.getTree(path).addChild(addName); c.setProperty("jcr:primaryType", "nt:unstructured", Type.NAME); setResponseToMe(); } catch { case e: Exception ⇒ { e.printStackTrace() error(e.getMessage()); } } }; form.add(a); form.add(submit); form.setDefaultButton(submit); return form; } var addPName: String = ""; var addPVal: String = ""; var addPType: Int = Type.STRING.tag; private[view] def buildPropertyForm(): Component = { val form = new StatelessForm[Void]("addPropertyForm"); form.setOutputMarkupId(true); val n = new RequiredTextField[String]("name", new PropertyModel[String]( this, "addPName")) n.setLabel(new Model("Property name")); val v = new RequiredTextField[String]("val", new PropertyModel[String]( this, "addPVal")) v.setLabel(new Model("Property value")); val t = new DropDownChoice[Int]("ptype", new PropertyModel[Int](this, "addPType"), List[Int](Type.STRING.tag, Type.BOOLEAN.tag, Type.DATE.tag, Type.DECIMAL.tag, Type.DOUBLE.tag, Type.LONG.tag, Type.NAME.tag, Type.REFERENCE.tag, Type.WEAKREFERENCE.tag), new TypeChoiceRenderer()); val submit = new Button("submit") { override def onSubmit() = try { val p: PropertyState = addPType match { case PropertyType.STRING ⇒ PropertyStates.createProperty(addPName, addPVal, addPType); case PropertyType.BOOLEAN ⇒ PropertyStates.createProperty(addPName, addPVal, addPType); case PropertyType.DECIMAL ⇒ PropertyStates.createProperty(addPName, addPVal, addPType); case PropertyType.DOUBLE ⇒ PropertyStates.createProperty(addPName, addPVal, addPType); case PropertyType.LONG ⇒ PropertyStates.createProperty(addPName, addPVal, addPType); case PropertyType.DATE ⇒ PropertyStates.createProperty(addPName, addPVal, addPType); case PropertyType.NAME ⇒ PropertyStates.createProperty(addPName, addPVal, Type.NAME); case PropertyType.REFERENCE ⇒ PropertyStates.createProperty(addPName, addPVal, Type.REFERENCE); case PropertyType.WEAKREFERENCE ⇒ PropertyStates.createProperty(addPName, addPVal, Type.WEAKREFERENCE); } oakRoot.get.getTree(path).setProperty(p); setResponseToMe(); } catch { case e: Exception ⇒ { e.printStackTrace() error(e.getMessage()); } } }; form.add(n); form.add(v); form.add(t); form.add(submit); form.setDefaultButton(submit); return form; } def setResponseToMe() = { val pp: PageParameters = new PageParameters(); if (!"/".equals(path)) { pp.set("p", path); } setResponsePage(classOf[View], pp); } // --------------------------------------------------------------------------------------------------- add(new WebMarkupContainer("dirty").setVisibilityAllowed(oakRoot.isDefined && oakRoot.get.hasPendingChanges())); add(new StatelessLink("commit") { override def onClick() = oakRoot match { case Some(r) ⇒ { r.commit(); setResponseToMe(); } case _ ⇒ ; } }); add(new StatelessLink("rebase") { override def onClick() = oakRoot match { case Some(r) ⇒ { r.rebase(); setResponseToMe(); } case _ ⇒ ; } }); add(new StatelessLink("refresh") { override def onClick() = oakRoot match { case Some(r) ⇒ { r.refresh(); setResponseToMe(); } case _ ⇒ ; } }); // --------------------------------------------------------------------------------------------------- private class TypeChoiceRenderer extends IChoiceRenderer[Int] { override def getDisplayValue(id: Int) = Type.fromTag(id, false).toString(); override def getIdValue(id: Int, index: Int) = id.toString; def getObject(id: String, choices: IModel[_ <: java.util.List[_ <: Int]]): Int = { id.toInt } } }
alexparvulescu/bark-oak
src/main/scala/org/apache/jackrabbit/oak/bark/web/view/View.scala
Scala
apache-2.0
9,951
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.gearpump.streaming.appmaster import org.apache.gearpump.Time.MilliSeconds import org.apache.gearpump.cluster.AppMasterToMaster.AppMasterSummary import org.apache.gearpump.cluster.{ApplicationStatus, UserConfig} import org.apache.gearpump.streaming.appmaster.AppMaster.ExecutorBrief import org.apache.gearpump.streaming.{ExecutorId, LifeTime, ProcessorId} import org.apache.gearpump.util.Graph import org.apache.gearpump.util.HistoryMetricsService.HistoryMetricsConfig /** Stream application summary, used for REST API */ case class StreamAppMasterSummary( appType: String = "streaming", appId: Int, appName: String = null, actorPath: String = null, clock: MilliSeconds = 0L, status: ApplicationStatus = ApplicationStatus.ACTIVE, startTime: MilliSeconds = 0L, uptime: MilliSeconds = 0L, user: String = null, homeDirectory: String = "", logFile: String = "", dag: Graph[ProcessorId, String] = null, executors: List[ExecutorBrief] = null, processors: Map[ProcessorId, ProcessorSummary] = Map.empty[ProcessorId, ProcessorSummary], // Hiearachy level for each processor processorLevels: Map[ProcessorId, Int] = Map.empty[ProcessorId, Int], historyMetricsConfig: HistoryMetricsConfig = null) extends AppMasterSummary case class TaskCount(count: Int) case class ProcessorSummary( id: ProcessorId, taskClass: String, parallelism: Int, description: String, taskConf: UserConfig, life: LifeTime, executors: List[ExecutorId], taskCount: Map[ExecutorId, TaskCount])
manuzhang/incubator-gearpump
streaming/src/main/scala/org/apache/gearpump/streaming/appmaster/StreamAppMasterSummary.scala
Scala
apache-2.0
2,386
/** * Copyright (C) 2015 Orbeon, Inc. * * This program is free software; you can redistribute it and/or modify it under the terms of the * GNU Lesser General Public License as published by the Free Software Foundation; either version * 2.1 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * See the GNU Lesser General Public License for more details. * * The full text of the license is available at http://www.gnu.org/copyleft/lesser.html */ package org.orbeon.oxf.fr import org.orbeon.oxf.fb.FormBuilder._ import org.orbeon.oxf.fr.XMLNames._ import org.orbeon.oxf.util.ScalaUtils import org.orbeon.saxon.om.{NodeInfo, SequenceIterator} import org.orbeon.scaxon.XML._ trait FormRunnerEmail { // Given a form body and instance data: // // - find all controls with the given conjunction of class names // - for each control, find the associated bind // - return all data holders in the instance data to which the bind would apply // // The use case is, for example, to find all data holders pointed to by controls with the class // `fr-email-recipient` and, optionally, `fr-email-attachment`. // //@XPathFunction def searchHoldersForClassTopLevelOnly( body : NodeInfo, data : NodeInfo, classNames : String ): SequenceIterator = searchHolderInDoc( controls = body descendant * filter IsControl, inDoc = body, contextItem = data.rootElement, classNames = classNames ) // Given a form head, form body and instance data: // // - find all section templates in use // - for each section // - determine the associated data holder in instance data // - find the inline binding associated with the section template // - find all controls with the given conjunction of class names in the section template // - for each control, find the associated bind in the section template // - return all data holders in the instance data to which the bind would apply // // The use case is, for example, to find all data holders pointed to by controls with the class // `fr-email-recipient` and, optionally, `fr-email-attachment`, which appear within section templates. // //@XPathFunction def searchHoldersForClassUseSectionTemplates( head : NodeInfo, body : NodeInfo, data : NodeInfo, classNames : String ): SequenceIterator = for { section ← findSectionsWithTemplates(body) sectionName ← getControlNameOpt(section).toList sectionHolder ← findDataHoldersInDocument(body, sectionName, data.rootElement) binding ← bindingForSection(head, section).toList holder ← searchHolderInDoc( controls = binding.rootElement / XBLTemplateTest descendant * filter IsControl, inDoc = binding, contextItem = sectionHolder, classNames = classNames ) } yield holder private def searchHolderInDoc( controls : Seq[NodeInfo], inDoc : NodeInfo, contextItem : NodeInfo, classNames : String ): Seq[NodeInfo] = { val classNamesList = ScalaUtils.split[List](classNames) for { control ← controls controlClasses = control.attClasses if classNamesList forall controlClasses.contains bindId ← control /@ "bind" map (_.stringValue) bindName ← controlNameFromIdOpt(bindId).toList holder ← findDataHoldersInDocument(inDoc, bindName, contextItem) } yield holder } private def bindingForSection(head: NodeInfo, section: NodeInfo) = { val mapping = sectionTemplateXBLBindingsByURIQualifiedName(head / XBLXBLTest) sectionTemplateBindingName(section) flatMap mapping.get } }
joansmith/orbeon-forms
src/main/scala/org/orbeon/oxf/fr/FormRunnerEmail.scala
Scala
lgpl-2.1
3,952
/* * Copyright 2014–2017 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.yggdrasil.table import quasar.yggdrasil._ import quasar.precog.util._ import quasar.precog._ import quasar.yggdrasil.util.CPathUtils import quasar.precog.common._ import quasar.yggdrasil.bytecode._ import quasar.precog.util.RingDeque import TransSpecModule._ import quasar.blueeyes._, json._ import scalaz._, Scalaz._, Ordering._ import java.nio.CharBuffer import java.time.ZonedDateTime import scala.annotation.{switch, tailrec} import scala.collection.mutable import scala.specialized trait Slice { source => import Slice._ import TableModule._ def size: Int def isEmpty: Boolean = size == 0 def nonEmpty = !isEmpty def columns: Map[ColumnRef, Column] def logicalColumns: JType => Set[Column] = { jtpe => // TODO Use a flatMap and: // If ColumnRef(_, CArrayType(_)) and jType has a JArrayFixedT of this type, // then we need to map these to multiple columns. // Else if Schema.includes(...), then return List(col). // Otherwise return Nil. columns collect { case (ColumnRef(cpath, ctype), col) if Schema.includes(jtpe, cpath, ctype) => col } toSet } lazy val valueColumns: Set[Column] = columns collect { case (ColumnRef(CPath.Identity, _), col) => col } toSet def isDefinedAt(row: Int) = columns.values.exists(_.isDefinedAt(row)) def definedAt: BitSet = { val defined = BitSetUtil.create() columns foreach { case (_, col) => defined.or(col.definedAt(0, size)) } defined } def mapRoot(f: CF1): Slice = new Slice { val size = source.size val columns: Map[ColumnRef, Column] = { val resultColumns = for { col <- source.columns collect { case (ref, col) if ref.selector == CPath.Identity => col } result <- f(col) } yield result resultColumns.groupBy(_.tpe) map { case (tpe, cols) => (ColumnRef(CPath.Identity, tpe), cols.reduceLeft((c1, c2) => Column.unionRightSemigroup.append(c1, c2))) } } } def mapColumns(f: CF1): Slice = new Slice { val size = source.size val columns: Map[ColumnRef, Column] = { val resultColumns: Seq[(ColumnRef, Column)] = for { (ref, col) <- source.columns.toSeq result <- f(col) } yield (ref.copy(ctype = result.tpe), result) resultColumns.groupBy(_._1) map { case (ref, pairs) => (ref, pairs.map(_._2).reduceLeft((c1, c2) => Column.unionRightSemigroup.append(c1, c2))) } toMap } } def toArray[A](implicit tpe0: CValueType[A]) = new Slice { val size = source.size val cols0 = (source.columns).toList sortBy { case (ref, _) => ref.selector } val cols = cols0 map { case (_, col) => col } def inflate[@specialized A: CTag](cols: Array[Int => A], row: Int) = { val as = new Array[A](cols.length) var i = 0 while (i < cols.length) { as(i) = cols(i)(row) i += 1 } as } def loopForall[A <: Column](cols: Array[A])(row: Int) = !cols.isEmpty && Loop.forall(cols)(_ isDefinedAt row) val columns: Map[ColumnRef, Column] = { Map((ColumnRef(CPath(CPathArray), CArrayType(tpe0)), tpe0 match { case CLong => val longcols = cols.collect { case (col: LongColumn) => col }.toArray new HomogeneousArrayColumn[Long] { private val cols: Array[Int => Long] = longcols map { col => col.apply _ } val tpe = CArrayType(CLong) def isDefinedAt(row: Int) = loopForall[LongColumn](longcols)(row) def apply(row: Int): Array[Long] = inflate(cols, row) } case CDouble => val doublecols = cols.collect { case (col: DoubleColumn) => col }.toArray new HomogeneousArrayColumn[Double] { private val cols: Array[Int => Double] = doublecols map { x => x(_) } val tpe = CArrayType(CDouble) def isDefinedAt(row: Int) = loopForall[DoubleColumn](doublecols)(row) def apply(row: Int): Array[Double] = inflate(cols, row) } case CNum => val numcols = cols.collect { case (col: NumColumn) => col }.toArray new HomogeneousArrayColumn[BigDecimal] { private val cols: Array[Int => BigDecimal] = numcols map { x => x(_) } val tpe = CArrayType(CNum) def isDefinedAt(row: Int) = loopForall[NumColumn](numcols)(row) def apply(row: Int): Array[BigDecimal] = inflate(cols, row) } case CBoolean => val boolcols = cols.collect { case (col: BoolColumn) => col }.toArray new HomogeneousArrayColumn[Boolean] { private val cols: Array[Int => Boolean] = boolcols map { x => x(_) } val tpe = CArrayType(CBoolean) def isDefinedAt(row: Int) = loopForall[BoolColumn](boolcols)(row) def apply(row: Int): Array[Boolean] = inflate(cols, row) } case CString => val strcols = cols.collect { case (col: StrColumn) => col }.toArray new HomogeneousArrayColumn[String] { private val cols: Array[Int => String] = strcols map { x => x(_) } val tpe = CArrayType(CString) def isDefinedAt(row: Int) = loopForall[StrColumn](strcols)(row) def apply(row: Int): Array[String] = inflate(cols, row) } case _ => sys.error("unsupported type") })) } } /** * Transform this slice such that its columns are only defined for row indices * in the given BitSet. */ def redefineWith(s: BitSet): Slice = mapColumns(cf.util.filter(0, size, s)) def definedConst(value: CValue): Slice = new Slice { val size = source.size val columns = { Map( value match { case CString(s) => (ColumnRef(CPath.Identity, CString), new StrColumn { def isDefinedAt(row: Int) = source.isDefinedAt(row) def apply(row: Int) = s }) case CBoolean(b) => (ColumnRef(CPath.Identity, CBoolean), new BoolColumn { def isDefinedAt(row: Int) = source.isDefinedAt(row) def apply(row: Int) = b }) case CLong(l) => (ColumnRef(CPath.Identity, CLong), new LongColumn { def isDefinedAt(row: Int) = source.isDefinedAt(row) def apply(row: Int) = l }) case CDouble(d) => (ColumnRef(CPath.Identity, CDouble), new DoubleColumn { def isDefinedAt(row: Int) = source.isDefinedAt(row) def apply(row: Int) = d }) case CNum(n) => (ColumnRef(CPath.Identity, CNum), new NumColumn { def isDefinedAt(row: Int) = source.isDefinedAt(row) def apply(row: Int) = n }) case CDate(d) => (ColumnRef(CPath.Identity, CDate), new DateColumn { def isDefinedAt(row: Int) = source.isDefinedAt(row) def apply(row: Int) = d }) case CPeriod(p) => (ColumnRef(CPath.Identity, CPeriod), new PeriodColumn { def isDefinedAt(row: Int) = source.isDefinedAt(row) def apply(row: Int) = p }) case value: CArray[a] => (ColumnRef(CPath.Identity, value.cType), new HomogeneousArrayColumn[a] { val tpe = value.cType def isDefinedAt(row: Int) = source.isDefinedAt(row) def apply(row: Int) = value.value }) case CNull => (ColumnRef(CPath.Identity, CNull), new NullColumn { def isDefinedAt(row: Int) = source.isDefinedAt(row) }) case CEmptyObject => (ColumnRef(CPath.Identity, CEmptyObject), new EmptyObjectColumn { def isDefinedAt(row: Int) = source.isDefinedAt(row) }) case CEmptyArray => (ColumnRef(CPath.Identity, CEmptyArray), new EmptyArrayColumn { def isDefinedAt(row: Int) = source.isDefinedAt(row) }) case CUndefined => sys.error("Cannot define a constant undefined value") } ) } } def deref(node: CPathNode): Slice = new Slice { val size = source.size val columns = node match { case CPathIndex(i) => source.columns collect { case (ColumnRef(CPath(CPathArray, xs @ _ *), CArrayType(elemType)), col: HomogeneousArrayColumn[_]) => (ColumnRef(CPath(xs: _*), elemType), col.select(i)) case (ColumnRef(CPath(CPathIndex(`i`), xs @ _ *), ctype), col) => (ColumnRef(CPath(xs: _*), ctype), col) } case _ => source.columns collect { case (ColumnRef(CPath(`node`, xs @ _ *), ctype), col) => (ColumnRef(CPath(xs: _*), ctype), col) } } } def wrap(wrapper: CPathNode): Slice = new Slice { val size = source.size // This is a little weird; CPathArray actually wraps in CPathIndex(0). // Unfortunately, CArrayType(_) cannot wrap CNullTypes, so we can't just // arbitrarily wrap everything in a CPathArray. val columns = wrapper match { case CPathArray => source.columns map { case (ColumnRef(CPath(nodes @ _ *), ctype), col) => (ColumnRef(CPath(CPathIndex(0) +: nodes: _*), ctype), col) } case _ => source.columns map { case (ColumnRef(CPath(nodes @ _ *), ctype), col) => (ColumnRef(CPath(wrapper +: nodes: _*), ctype), col) } } } // ARRAYS: // TODO Here, if we delete a JPathIndex/JArrayFixedT, then we need to // construct a new Homo*ArrayColumn that has some indices missing. // // -- I've added a col.without(indicies) method to H*ArrayColumn to support // this operation. // def delete(jtype: JType): Slice = new Slice { def fixArrays(columns: Map[ColumnRef, Column]): Map[ColumnRef, Column] = { columns.toSeq .sortBy(_._1) .foldLeft((Map.empty[Vector[CPathNode], Int], Map.empty[ColumnRef, Column])) { case ((arrayPaths, acc), (ColumnRef(jpath, ctype), col)) => val (arrayPaths0, nodes) = jpath.nodes.foldLeft((arrayPaths, Vector.empty[CPathNode])) { case ((ap, nodes), CPathIndex(_)) => val idx = ap.getOrElse(nodes, -1) + 1 (ap + (nodes -> idx), nodes :+ CPathIndex(idx)) case ((ap, nodes), fieldNode) => (ap, nodes :+ fieldNode) } (arrayPaths0, acc + (ColumnRef(CPath(nodes: _*), ctype) -> col)) } ._2 } // Used for homogeneous arrays. Constructs a function, suitable for use in a // flatMap, that will modify the homogeneous array according to `jType`. // def flattenDeleteTree[A](jType: JType, cType: CValueType[A], cPath: CPath): A => Option[A] = { val delete: A => Option[A] = _ => None val retain: A => Option[A] = Some(_) (jType, cType, cPath) match { case (JUnionT(aJType, bJType), _, _) => flattenDeleteTree(aJType, cType, cPath) andThen (_ flatMap flattenDeleteTree(bJType, cType, cPath)) case (JTextT, CString, CPath.Identity) => delete case (JBooleanT, CBoolean, CPath.Identity) => delete case (JNumberT, CLong | CDouble | CNum, CPath.Identity) => delete case (JObjectUnfixedT, _, CPath(CPathField(_), _ *)) => delete case (JObjectFixedT(fields), _, CPath(CPathField(name), cPath @ _ *)) => fields get name map (flattenDeleteTree(_, cType, CPath(cPath: _*))) getOrElse (retain) case (JArrayUnfixedT, _, CPath(CPathArray | CPathIndex(_), _ *)) => delete case (JArrayFixedT(elems), cType, CPath(CPathIndex(i), cPath @ _ *)) => elems get i map (flattenDeleteTree(_, cType, CPath(cPath: _*))) getOrElse (retain) case (JArrayFixedT(elems), CArrayType(cElemType), CPath(CPathArray, cPath @ _ *)) => val mappers = elems mapValues (flattenDeleteTree(_, cElemType, CPath(cPath: _*))) xs => Some(xs.zipWithIndex map { case (x, j) => mappers get j match { case Some(f) => f(x) case None => x } }) case (JArrayHomogeneousT(jType), CArrayType(cType), CPath(CPathArray, _ *)) if Schema.ctypes(jType)(cType) => delete case _ => retain } } val size = source.size val columns = fixArrays(source.columns flatMap { case (ColumnRef(cpath, ctype), _) if Schema.includes(jtype, cpath, ctype) => None case (ref @ ColumnRef(cpath, ctype: CArrayType[a]), col: HomogeneousArrayColumn[_]) if ctype == col.tpe => val trans = flattenDeleteTree(jtype, ctype, cpath) Some((ref, new HomogeneousArrayColumn[a] { val tpe = ctype def isDefinedAt(row: Int) = col.isDefinedAt(row) def apply(row: Int): Array[a] = trans(col(row).asInstanceOf[Array[a]]) getOrElse sys.error("Oh dear, this cannot be happening to me.") })) case (ref, col) => Some((ref, col)) }) } def deleteFields(prefixes: scala.collection.Set[CPathField]) = new Slice { private val (removed, withoutPrefixes) = source.columns partition { case (ColumnRef(CPath(head @ CPathField(_), _ @_ *), _), _) => prefixes contains head case _ => false } private val becomeEmpty = BitSetUtil.filteredRange(0, source.size) { i => Column.isDefinedAt(removed.values.toArray, i) && !Column.isDefinedAt(withoutPrefixes.values.toArray, i) } private val ref = ColumnRef(CPath.Identity, CEmptyObject) // The object might have become empty. Make the // EmptyObjectColumn defined at the row position. private lazy val emptyObjectColumn = withoutPrefixes get ref map { c => new EmptyObjectColumn { def isDefinedAt(row: Int) = c.isDefinedAt(row) || becomeEmpty(row) } } getOrElse { new EmptyObjectColumn { def isDefinedAt(row: Int) = becomeEmpty(row) } } val size = source.size val columns = if (becomeEmpty.isEmpty) withoutPrefixes else withoutPrefixes + (ref -> emptyObjectColumn) } def typed(jtpe: JType): Slice = new Slice { val size = source.size val columns = source.columns filter { case (ColumnRef(path, ctpe), _) => Schema.requiredBy(jtpe, path, ctpe) } } def typedSubsumes(jtpe: JType): Slice = { val tuples: Seq[(CPath, CType)] = source.columns.map({ case (ColumnRef(path, ctpe), _) => (path, ctpe) })(collection.breakOut) val columns = if (Schema.subsumes(tuples, jtpe)) { source.columns filter { case (ColumnRef(path, ctpe), _) => Schema.requiredBy(jtpe, path, ctpe) } } else { Map.empty[ColumnRef, Column] } Slice(columns, source.size) } /** * returns a BoolColumn that is true if row subsumes jtype, false otherwise (unless undefined) * determine if the supplied jtype subsumes all the columns * if false, return a BoolColumn with all falses, defined by union * if true, collect just those columns that the jtype specifies * then on a row-by-row basis, using a BitSet, we use `Schema.findTypes(...)` to determine the Boolean values */ def isType(jtpe: JType): Slice = new Slice { val size = source.size val pathsAndTypes: Seq[(CPath, CType)] = source.columns.toSeq map { case (ColumnRef(selector, ctype), _) => (selector, ctype) } // we cannot just use subsumes because there could be rows with undefineds in them val subsumes = Schema.subsumes(pathsAndTypes, jtpe) val definedBits = (source.columns).values.map(_.definedAt(0, size)).reduceOption(_ | _) getOrElse new BitSet val columns = if (subsumes) { val cols = source.columns filter { case (ColumnRef(path, ctpe), _) => Schema.requiredBy(jtpe, path, ctpe) } val included = Schema.findTypes(jtpe, CPath.Identity, cols, size) val includedBits = BitSetUtil.filteredRange(0, size)(included) Map(ColumnRef(CPath.Identity, CBoolean) -> BoolColumn.Either(definedBits, includedBits)) } else { Map(ColumnRef(CPath.Identity, CBoolean) -> BoolColumn.False(definedBits)) } } def arraySwap(index: Int) = new Slice { val size = source.size val columns = source.columns.collect { case (ColumnRef(cPath @ CPath(CPathArray, _ *), cType), col: HomogeneousArrayColumn[a]) => (ColumnRef(cPath, cType), new HomogeneousArrayColumn[a] { val tpe = col.tpe def isDefinedAt(row: Int) = col.isDefinedAt(row) def apply(row: Int) = { val xs = col(row) if (index >= xs.length) xs else { val ys = tpe.elemType.classTag.newArray(xs.length) var i = 1 while (i < ys.length) { ys(i) = xs(i) i += 1 } ys(0) = xs(index) ys(index) = xs(0) ys } } }) case (ColumnRef(CPath(CPathIndex(0), xs @ _ *), ctype), col) => (ColumnRef(CPath(CPathIndex(index) +: xs: _*), ctype), col) case (ColumnRef(CPath(CPathIndex(`index`), xs @ _ *), ctype), col) => (ColumnRef(CPath(CPathIndex(0) +: xs: _*), ctype), col) case c @ (ColumnRef(CPath(CPathIndex(i), xs @ _ *), ctype), col) => c } } // Takes an array where the indices correspond to indices in this slice, // and the values give the indices in the sparsened slice. def sparsen(index: Array[Int], toSize: Int): Slice = new Slice { val size = toSize val columns = source.columns lazyMapValues { col => cf.util.Sparsen(index, toSize)(col).get //sparsen is total } } def remap(indices: ArrayIntList) = new Slice { val size = indices.size val columns: Map[ColumnRef, Column] = source.columns lazyMapValues { col => cf.util.RemapIndices(indices).apply(col).get } } def map(from: CPath, to: CPath)(f: CF1): Slice = new Slice { val size = source.size val columns: Map[ColumnRef, Column] = { val resultColumns = for { col <- source.columns collect { case (ref, col) if ref.selector.hasPrefix(from) => col } result <- f(col) } yield result resultColumns.groupBy(_.tpe) map { case (tpe, cols) => (ColumnRef(to, tpe), cols.reduceLeft((c1, c2) => Column.unionRightSemigroup.append(c1, c2))) } } } def map2(froml: CPath, fromr: CPath, to: CPath)(f: CF2): Slice = new Slice { val size = source.size val columns: Map[ColumnRef, Column] = { val resultColumns = for { left <- source.columns collect { case (ref, col) if ref.selector.hasPrefix(froml) => col } right <- source.columns collect { case (ref, col) if ref.selector.hasPrefix(fromr) => col } result <- f(left, right) } yield result resultColumns.groupBy(_.tpe) map { case (tpe, cols) => (ColumnRef(to, tpe), cols.reduceLeft((c1, c2) => Column.unionRightSemigroup.append(c1, c2))) } } } def filterDefined(filter: Slice, definedness: Definedness) = { new Slice { private val colValues = filter.columns.values.toArray lazy val defined = definedness match { case AnyDefined => BitSetUtil.filteredRange(0, source.size) { i => colValues.exists(_.isDefinedAt(i)) } case AllDefined => if (colValues.isEmpty) new BitSet else BitSetUtil.filteredRange(0, source.size) { i => colValues.forall(_.isDefinedAt(i)) } } val size = source.size val columns: Map[ColumnRef, Column] = source.columns lazyMapValues { col => cf.util.filter(0, source.size, defined)(col).get } } } def compact(filter: Slice, definedness: Definedness): Slice = { new Slice { private val cols = filter.columns lazy val retained = definedness match { case AnyDefined => { val acc = new ArrayIntList Loop.range(0, filter.size) { i => if (cols.values.toArray.exists(_.isDefinedAt(i))) acc.add(i) } acc } case AllDefined => { val acc = new ArrayIntList val (numCols, otherCols) = cols partition { case (ColumnRef(_, ctype), _) => ctype.isNumeric } val grouped = numCols groupBy { case (ColumnRef(cpath, _), _) => cpath } Loop.range(0, filter.size) { i => val numBools = grouped.values map { case refs => refs.values.toArray.exists(_.isDefinedAt(i)) } val numBool = numBools reduce { _ && _ } val otherBool = otherCols.values.toArray.forall(_.isDefinedAt(i)) if (otherBool && numBool) acc.add(i) } acc } } lazy val size = retained.size lazy val columns: Map[ColumnRef, Column] = source.columns lazyMapValues { col => (col |> cf.util.RemapIndices(retained)).get } } } def retain(refs: Set[ColumnRef]) = { new Slice { val size = source.size val columns: Map[ColumnRef, Column] = source.columns.filterKeys(refs) } } /** * Assumes that this and the previous slice (if any) are sorted. */ def distinct(prevFilter: Option[Slice], filter: Slice): Slice = { new Slice { lazy val retained: ArrayIntList = { val acc = new ArrayIntList def findSelfDistinct(prevRow: Int, curRow: Int) = { val selfComparator = rowComparatorFor(filter, filter)(_.columns.keys map (_.selector)) @tailrec def findSelfDistinct0(prevRow: Int, curRow: Int): ArrayIntList = { if (curRow >= filter.size) acc else { val retain = selfComparator.compare(prevRow, curRow) != EQ if (retain) acc.add(curRow) findSelfDistinct0(if (retain) curRow else prevRow, curRow + 1) } } findSelfDistinct0(prevRow, curRow) } def findStraddlingDistinct(prev: Slice, prevRow: Int, curRow: Int) = { val straddleComparator = rowComparatorFor(prev, filter)(_.columns.keys map (_.selector)) @tailrec def findStraddlingDistinct0(prevRow: Int, curRow: Int): ArrayIntList = { if (curRow >= filter.size) acc else { val retain = straddleComparator.compare(prevRow, curRow) != EQ if (retain) acc.add(curRow) if (retain) findSelfDistinct(curRow, curRow + 1) else findStraddlingDistinct0(prevRow, curRow + 1) } } findStraddlingDistinct0(prevRow, curRow) } val lastDefined = prevFilter.flatMap { slice => (slice.size - 1 to 0 by -1).find(row => slice.columns.values.exists(_.isDefinedAt(row))) }.map { (prevFilter.get, _) } val firstDefined = (0 until filter.size).find(i => filter.columns.values.exists(_.isDefinedAt(i))) (lastDefined, firstDefined) match { case (Some((prev, i)), Some(j)) => findStraddlingDistinct(prev, i, j) case (_, Some(j)) => acc.add(j); findSelfDistinct(j, j + 1) case _ => acc } } lazy val size = retained.size lazy val columns: Map[ColumnRef, Column] = source.columns lazyMapValues { col => (col |> cf.util.RemapIndices(retained)).get } } } def order: spire.algebra.Order[Int] = if (columns.size == 1) { val col = columns.head._2 Column.rowOrder(col) } else { // The 2 cases are handled differently. In the first case, we don't have // any pesky homogeneous arrays and only 1 column per path. In this case, // we don't need to use the CPathTraversal machinery. type GroupedCols = Either[Map[CPath, Column], Map[CPath, Set[Column]]] val grouped = columns.foldLeft(Left(Map.empty): GroupedCols) { case (Left(acc), (ColumnRef(path, CArrayType(_)), col)) => val acc0 = acc.map { case (k, v) => (k, Set(v)) } Right(acc0 + (path -> Set(col))) case (Left(acc), (ColumnRef(path, _), col)) => acc get path map { col0 => val acc0 = acc.map { case (k, v) => (k, Set(v)) } Right(acc0 + (path -> Set(col0, col))) } getOrElse Left(acc + (path -> col)) case (Right(acc), (ColumnRef(path, _), col)) => Right(acc + (path -> (acc.getOrElse(path, Set.empty[Column]) + col))) } grouped match { case Left(cols0) => val cols = cols0.toList .sortBy(_._1) .map { case (_, col) => Column.rowOrder(col) } .toArray new spire.algebra.Order[Int] { def compare(i: Int, j: Int): Int = { var k = 0 while (k < cols.length) { val cmp = cols(k).compare(i, j) if (cmp != 0) return cmp k += 1 } 0 } } case Right(cols) => val paths = cols.keys.toList val traversal = CPathTraversal(paths) traversal.rowOrder(paths, cols) } } def sortWith(keySlice: Slice, sortOrder: DesiredSortOrder = SortAscending): (Slice, Slice) = { // We filter out rows that are completely undefined. val order: Array[Int] = Array.range(0, source.size) filter { row => keySlice.isDefinedAt(row) && source.isDefinedAt(row) } val rowOrder = if (sortOrder == SortAscending) keySlice.order else keySlice.order.reverse spire.math.MergeSort.sort(order)(rowOrder, implicitly) val remapOrder = new ArrayIntList(order.size) var i = 0 while (i < order.length) { remapOrder.add(i, order(i)) i += 1 } val sortedSlice = source.remap(remapOrder) val sortedKeySlice = keySlice.remap(remapOrder) // TODO Remove the duplicate distinct call. Should be able to handle this in 1 pass. (sortedSlice.distinct(None, sortedKeySlice), sortedKeySlice.distinct(None, sortedKeySlice)) } def sortBy(prefixes: Vector[CPath], sortOrder: DesiredSortOrder = SortAscending): Slice = { // TODO This is slow... Faster would require a prefix map or something... argh. val keySlice = new Slice { val size = source.size val columns: Map[ColumnRef, Column] = { prefixes.zipWithIndex.flatMap({ case (prefix, i) => source.columns collect { case (ColumnRef(path, tpe), col) if path hasPrefix prefix => (ColumnRef(CPathIndex(i) \ path, tpe), col) } })(collection.breakOut) } } source.sortWith(keySlice)._1 } /** * Split the table at the specified index, exclusive. The * new prefix will contain all indices less than that index, and * the new suffix will contain indices >= that index. */ def split(idx: Int): (Slice, Slice) = { (take(idx), drop(idx)) } def take(sz: Int): Slice = { if (sz >= source.size) { source } else { new Slice { val size = sz val columns = source.columns lazyMapValues { col => (col |> cf.util.RemapFilter(_ < sz, 0)).get } } } } def drop(sz: Int): Slice = { if (sz <= 0) { source } else { new Slice { val size = source.size - sz val columns = source.columns lazyMapValues { col => (col |> cf.util.RemapFilter(_ < size, sz)).get } } } } def takeRange(startIndex: Int, numberToTake: Int): Slice = { val take2 = math.min(this.size, startIndex + numberToTake) - startIndex new Slice { val size = take2 val columns = source.columns lazyMapValues { col => (col |> cf.util.RemapFilter(_ < take2, startIndex)).get } } } def zip(other: Slice): Slice = { new Slice { val size = source.size min other.size val columns: Map[ColumnRef, Column] = other.columns.foldLeft(source.columns) { case (acc, (ref, col)) => acc + (ref -> (acc get ref flatMap { c => cf.util.UnionRight(c, col) } getOrElse col)) } } } /** * This creates a new slice with the same size and columns as this slice, but * whose values have been materialized and stored in arrays. */ def materialized: Slice = { new Slice { val size = source.size val columns = source.columns lazyMapValues { case col: BoolColumn => val defined = col.definedAt(0, source.size) val values = BitSetUtil.filteredRange(0, source.size) { row => defined(row) && col(row) } ArrayBoolColumn(defined, values) case col: LongColumn => val defined = col.definedAt(0, source.size) val values = new Array[Long](source.size) Loop.range(0, source.size) { row => if (defined(row)) values(row) = col(row) } ArrayLongColumn(defined, values) case col: DoubleColumn => val defined = col.definedAt(0, source.size) val values = new Array[Double](source.size) Loop.range(0, source.size) { row => if (defined(row)) values(row) = col(row) } ArrayDoubleColumn(defined, values) case col: NumColumn => val defined = col.definedAt(0, source.size) val values = new Array[BigDecimal](source.size) Loop.range(0, source.size) { row => if (defined(row)) values(row) = col(row) } ArrayNumColumn(defined, values) case col: StrColumn => val defined = col.definedAt(0, source.size) val values = new Array[String](source.size) Loop.range(0, source.size) { row => if (defined(row)) values(row) = col(row) } ArrayStrColumn(defined, values) case col: DateColumn => val defined = col.definedAt(0, source.size) val values = new Array[ZonedDateTime](source.size) Loop.range(0, source.size) { row => if (defined(row)) values(row) = col(row) } ArrayDateColumn(defined, values) case col: PeriodColumn => val defined = col.definedAt(0, source.size) val values = new Array[Period](source.size) Loop.range(0, source.size) { row => if (defined(row)) values(row) = col(row) } ArrayPeriodColumn(defined, values) case col: EmptyArrayColumn => val ncol = MutableEmptyArrayColumn.empty() Loop.range(0, source.size) { row => ncol.update(row, col.isDefinedAt(row)) } ncol case col: EmptyObjectColumn => val ncol = MutableEmptyObjectColumn.empty() Loop.range(0, source.size) { row => ncol.update(row, col.isDefinedAt(row)) } ncol case col: NullColumn => val ncol = MutableNullColumn.empty() Loop.range(0, source.size) { row => ncol.update(row, col.isDefinedAt(row)) } ncol case col => sys.error("Cannot materialise non-standard (extensible) column") } } } def renderJson[M[+ _]](delimiter: String)(implicit M: Monad[M]): (StreamT[M, CharBuffer], Boolean) = { if (columns.isEmpty) { (StreamT.empty[M, CharBuffer], false) } else { val BufferSize = 1024 * 10 // 10 KB val optSchema = { def insert(target: SchemaNode, ref: ColumnRef, col: Column): SchemaNode = { val ColumnRef(selector, ctype) = ref selector.nodes match { case CPathField(name) :: tail => { target match { case SchemaNode.Obj(nodes) => { val subTarget = nodes get name getOrElse SchemaNode.Union(Set()) val result = insert(subTarget, ColumnRef(CPath(tail), ctype), col) SchemaNode.Obj(nodes + (name -> result)) } case SchemaNode.Union(nodes) => { val objNode = nodes find { case _: SchemaNode.Obj => true case _ => false } val subTarget = objNode getOrElse SchemaNode.Obj(Map()) SchemaNode.Union(nodes - subTarget + insert(subTarget, ref, col)) } case node => SchemaNode.Union(Set(node, insert(SchemaNode.Obj(Map()), ref, col))) } } case CPathIndex(idx) :: tail => { target match { case SchemaNode.Arr(map) => { val subTarget = map get idx getOrElse SchemaNode.Union(Set()) val result = insert(subTarget, ColumnRef(CPath(tail), ctype), col) SchemaNode.Arr(map + (idx -> result)) } case SchemaNode.Union(nodes) => { val objNode = nodes find { case _: SchemaNode.Arr => true case _ => false } val subTarget = objNode getOrElse SchemaNode.Arr(Map()) SchemaNode.Union(nodes - subTarget + insert(subTarget, ref, col)) } case node => SchemaNode.Union(Set(node, insert(SchemaNode.Arr(Map()), ref, col))) } } case CPathMeta(_) :: _ => target case CPathArray :: _ => sys.error("todo") case Nil => { val node = SchemaNode.Leaf(ctype, col) target match { case SchemaNode.Union(nodes) => SchemaNode.Union(nodes + node) case oldNode => SchemaNode.Union(Set(oldNode, node)) } } } } def normalize(schema: SchemaNode): Option[SchemaNode] = schema match { case SchemaNode.Obj(nodes) => { val nodes2 = nodes flatMap { case (key, value) => normalize(value) map { key -> _ } } val back = if (nodes2.isEmpty) None else Some(SchemaNode.Obj(nodes2)) back foreach { obj => obj.keys = new Array[String](nodes2.size) obj.values = new Array[SchemaNode](nodes2.size) } var i = 0 back foreach { obj => for ((key, value) <- nodes2) { obj.keys(i) = key obj.values(i) = value i += 1 } } back } case SchemaNode.Arr(map) => { val map2 = map flatMap { case (idx, value) => normalize(value) map { idx -> _ } } val back = if (map2.isEmpty) None else Some(SchemaNode.Arr(map2)) back foreach { arr => arr.nodes = new Array[SchemaNode](map2.size) } var i = 0 back foreach { arr => val values = map2.toSeq sortBy { _._1 } map { _._2 } for (value <- values) { arr.nodes(i) = value i += 1 } } back } case SchemaNode.Union(nodes) => { val nodes2 = nodes flatMap normalize if (nodes2.isEmpty) None else if (nodes2.size == 1) nodes2.headOption else { val union = SchemaNode.Union(nodes2) union.possibilities = nodes2.toArray Some(union) } } case lf: SchemaNode.Leaf => Some(lf) } val schema = columns.foldLeft(SchemaNode.Union(Set()): SchemaNode) { case (acc, (ref, col)) => insert(acc, ref, col) } normalize(schema) } // don't remove! @tailrec bugs if you use optSchema.map if (optSchema.isDefined) { val schema = optSchema.get val depth = { def loop(schema: SchemaNode): Int = schema match { case obj: SchemaNode.Obj => 4 + (obj.values map loop max) case arr: SchemaNode.Arr => 2 + (arr.nodes map loop max) case union: SchemaNode.Union => union.possibilities map loop max case SchemaNode.Leaf(_, _) => 0 } loop(schema) } // we have the schema, now emit var buffer = CharBuffer.allocate(BufferSize) val vector = new mutable.ArrayBuffer[CharBuffer](math.max(1, size / 10)) @inline def checkPush(length: Int) { if (buffer.remaining < length) { buffer.flip() vector += buffer buffer = CharBuffer.allocate(BufferSize) } } @inline def push(c: Char) { checkPush(1) buffer.put(c) } @inline def pushStr(str: String) { checkPush(str.length) buffer.put(str) } val in = new RingDeque[String](depth + 1) val inFlags = new RingDeque[Boolean](depth + 1) @inline def pushIn(str: String, flag: Boolean) { in.pushBack(str) inFlags.pushBack(flag) } @inline def popIn() { in.popBack() inFlags.popBack() } @inline @tailrec def flushIn() { if (!in.isEmpty) { val str = in.popFront() val flag = inFlags.popFront() if (flag) { renderString(str) } else { checkPush(str.length) buffer.put(str) } flushIn() } } // emitters @inline @tailrec def renderString(str: String, idx: Int = 0) { if (idx == 0) { push('"') } if (idx < str.length) { val c = str.charAt(idx) (c: @switch) match { case '"' => pushStr("\\\"") case '\\' => pushStr("\\\\") case '\b' => pushStr("\\b") case '\f' => pushStr("\\f") case '\n' => pushStr("\\n") case '\r' => pushStr("\\r") case '\t' => pushStr("\\t") case c => { if ((c >= '\u0000' && c < '\u001f') || (c >= '\u0080' && c < '\u00a0') || (c >= '\u2000' && c < '\u2100')) { pushStr("\\u") pushStr("%04x".format(Character.codePointAt(str, idx))) } else { push(c) } } } renderString(str, idx + 1) } else { push('"') } } @inline def renderLong(ln: Long) { @inline @tailrec def power10(ln: Long, seed: Long = 1): Long = { // note: we could be doing binary search here if (seed * 10 < 0) // overflow seed else if (seed * 10 > ln) seed else power10(ln, seed * 10) } @inline @tailrec def renderPositive(ln: Long, power: Long) { if (power > 0) { val c = Character.forDigit((ln / power % 10).toInt, 10) push(c) renderPositive(ln, power / 10) } } if (ln == Long.MinValue) { val MinString = "-9223372036854775808" checkPush(MinString.length) buffer.put(MinString) } else if (ln == 0) { push('0') } else if (ln < 0) { push('-') val ln2 = ln * -1 renderPositive(ln2, power10(ln2)) } else { renderPositive(ln, power10(ln)) } } // TODO is this a problem? @inline def renderDouble(d: Double) { val str = d.toString checkPush(str.length) buffer.put(str) } // TODO is this a problem? @inline def renderNum(d: BigDecimal) { val str = d.toString checkPush(str.length) buffer.put(str) } @inline def renderBoolean(b: Boolean) { if (b) { pushStr("true") } else { pushStr("false") } } @inline def renderNull() { pushStr("null") } @inline def renderEmptyObject() { pushStr("{}") } @inline def renderEmptyArray() { pushStr("[]") } @inline def renderDate(date: ZonedDateTime) { renderString(date.toString) } @inline def renderPeriod(period: Period) { renderString(period.toString) } @inline def renderArray[A](array: Array[A]) { renderString(array.deep.toString) } def traverseSchema(row: Int, schema: SchemaNode): Boolean = schema match { case obj: SchemaNode.Obj => { val keys = obj.keys val values = obj.values @inline @tailrec def loop(idx: Int, done: Boolean): Boolean = { if (idx < keys.length) { val key = keys(idx) val value = values(idx) if (done) { pushIn(",", false) } pushIn(key, true) pushIn(":", false) val emitted = traverseSchema(row, value) if (!emitted) { // less efficient popIn() popIn() if (done) { popIn() } } loop(idx + 1, done || emitted) } else { done } } pushIn("{", false) val done = loop(0, false) if (done) { push('}') } else { popIn() } done } case arr: SchemaNode.Arr => { val values = arr.nodes @inline @tailrec def loop(idx: Int, done: Boolean): Boolean = { if (idx < values.length) { val value = values(idx) if (done) { pushIn(",", false) } val emitted = traverseSchema(row, value) if (!emitted && done) { // less efficient popIn() } loop(idx + 1, done || emitted) } else { done } } pushIn("[", false) val done = loop(0, false) if (done) { push(']') } else { popIn() } done } case union: SchemaNode.Union => { val pos = union.possibilities @inline @tailrec def loop(idx: Int): Boolean = { if (idx < pos.length) { traverseSchema(row, pos(idx)) || loop(idx + 1) } else { false } } loop(0) } case SchemaNode.Leaf(tpe, col) => tpe match { case CString => { val specCol = col.asInstanceOf[StrColumn] if (specCol.isDefinedAt(row)) { flushIn() renderString(specCol(row)) true } else { false } } case CBoolean => { val specCol = col.asInstanceOf[BoolColumn] if (specCol.isDefinedAt(row)) { flushIn() renderBoolean(specCol(row)) true } else { false } } case CLong => { val specCol = col.asInstanceOf[LongColumn] if (specCol.isDefinedAt(row)) { flushIn() renderLong(specCol(row)) true } else { false } } case CDouble => { val specCol = col.asInstanceOf[DoubleColumn] if (specCol.isDefinedAt(row)) { flushIn() renderDouble(specCol(row)) true } else { false } } case CNum => { val specCol = col.asInstanceOf[NumColumn] if (specCol.isDefinedAt(row)) { flushIn() renderNum(specCol(row)) true } else { false } } case CNull => { val specCol = col.asInstanceOf[NullColumn] if (specCol.isDefinedAt(row)) { flushIn() renderNull() true } else { false } } case CEmptyObject => { val specCol = col.asInstanceOf[EmptyObjectColumn] if (specCol.isDefinedAt(row)) { flushIn() renderEmptyObject() true } else { false } } case CEmptyArray => { val specCol = col.asInstanceOf[EmptyArrayColumn] if (specCol.isDefinedAt(row)) { flushIn() renderEmptyArray() true } else { false } } case CDate => { val specCol = col.asInstanceOf[DateColumn] if (specCol.isDefinedAt(row)) { flushIn() renderDate(specCol(row)) true } else { false } } case CPeriod => { val specCol = col.asInstanceOf[PeriodColumn] if (specCol.isDefinedAt(row)) { flushIn() renderPeriod(specCol(row)) true } else { false } } case CArrayType(_) => { val specCol = col.asInstanceOf[HomogeneousArrayColumn[_]] if (specCol.isDefinedAt(row)) { flushIn() renderArray(specCol(row)) true } else { false } } case CUndefined => false } } @tailrec def render(row: Int, delimit: Boolean): Boolean = { if (row < size) { if (delimit) { pushIn(delimiter, false) } val rowRendered = traverseSchema(row, schema) if (delimit && !rowRendered) { popIn() } render(row + 1, delimit || rowRendered) } else { delimit } } val rendered = render(0, false) buffer.flip() vector += buffer val stream = StreamT.unfoldM(0) { idx => val back = if (idx < vector.length) Some((vector(idx), idx + 1)) else None M.point(back) } (stream, rendered) } else StreamT.empty[M, CharBuffer] -> false } } def toRValue(row: Int): RValue = { columns.foldLeft[RValue](CUndefined) { case (rv, (ColumnRef(selector, _), col)) if col.isDefinedAt(row) => rv.unsafeInsert(selector, col.cValue(row)) case (rv, _) => rv } } def toJValue(row: Int) = { columns.foldLeft[JValue](JUndefined) { case (jv, (ColumnRef(selector, _), col)) if col.isDefinedAt(row) => CPathUtils.cPathToJPaths(selector, col.cValue(row)).foldLeft(jv) { case (jv, (path, value)) => jv.unsafeInsert(path, value.toJValue) } case (jv, _) => jv } } def toJson(row: Int): Option[JValue] = { toJValue(row) match { case JUndefined => None case jv => Some(jv) } } def toJsonElements: Vector[JValue] = { @tailrec def rec(i: Int, acc: Vector[JValue]): Vector[JValue] = { if (i < source.size) { toJValue(i) match { case JUndefined => rec(i + 1, acc) case jv => rec(i + 1, acc :+ jv) } } else acc } rec(0, Vector()) } def toString(row: Int): Option[String] = { (columns.toList.sortBy(_._1) map { case (ref, col) => ref.toString + ": " + (if (col.isDefinedAt(row)) col.strValue(row) else "(undefined)") }) match { case Nil => None case l => Some(l.mkString("[", ", ", "]")) } } def toJsonString(prefix: String = ""): String = { (0 until size).map(i => prefix + " " + toJson(i)).mkString("\n") } override def toString = (0 until size).map(toString(_).getOrElse("")).mkString("\n") } object Slice { def empty: Slice = Slice(Map.empty, 0) def apply(columns0: Map[ColumnRef, Column], dataSize: Int): Slice = { new Slice { val size = dataSize val columns = columns0 } } def updateRefs(rv: RValue, into: Map[ColumnRef, ArrayColumn[_]], sliceIndex: Int, sliceSize: Int): Map[ColumnRef, ArrayColumn[_]] = { rv.flattenWithPath.foldLeft(into) { case (acc, (cpath, CUndefined)) => acc case (acc, (cpath, cvalue)) => val ref = ColumnRef(cpath, (cvalue.cType)) val updatedColumn: ArrayColumn[_] = cvalue match { case CBoolean(b) => acc.getOrElse(ref, ArrayBoolColumn.empty()).asInstanceOf[ArrayBoolColumn].unsafeTap { c => c.update(sliceIndex, b) } case CLong(d) => acc.getOrElse(ref, ArrayLongColumn.empty(sliceSize)).asInstanceOf[ArrayLongColumn].unsafeTap { c => c.update(sliceIndex, d.toLong) } case CDouble(d) => acc.getOrElse(ref, ArrayDoubleColumn.empty(sliceSize)).asInstanceOf[ArrayDoubleColumn].unsafeTap { c => c.update(sliceIndex, d.toDouble) } case CNum(d) => acc.getOrElse(ref, ArrayNumColumn.empty(sliceSize)).asInstanceOf[ArrayNumColumn].unsafeTap { c => c.update(sliceIndex, d) } case CString(s) => acc.getOrElse(ref, ArrayStrColumn.empty(sliceSize)).asInstanceOf[ArrayStrColumn].unsafeTap { c => c.update(sliceIndex, s) } case CDate(d) => acc.getOrElse(ref, ArrayDateColumn.empty(sliceSize)).asInstanceOf[ArrayDateColumn].unsafeTap { c => c.update(sliceIndex, d) } case CPeriod(p) => acc.getOrElse(ref, ArrayPeriodColumn.empty(sliceSize)).asInstanceOf[ArrayPeriodColumn].unsafeTap { c => c.update(sliceIndex, p) } case CArray(arr, cType) => acc.getOrElse(ref, ArrayHomogeneousArrayColumn.empty(sliceSize)(cType)).asInstanceOf[ArrayHomogeneousArrayColumn[cType.tpe]].unsafeTap { c => c.update(sliceIndex, arr) } case CEmptyArray => acc.getOrElse(ref, MutableEmptyArrayColumn.empty()).asInstanceOf[MutableEmptyArrayColumn].unsafeTap { c => c.update(sliceIndex, true) } case CEmptyObject => acc.getOrElse(ref, MutableEmptyObjectColumn.empty()).asInstanceOf[MutableEmptyObjectColumn].unsafeTap { c => c.update(sliceIndex, true) } case CNull => acc.getOrElse(ref, MutableNullColumn.empty()).asInstanceOf[MutableNullColumn].unsafeTap { c => c.update(sliceIndex, true) } case x => sys.error(s"Unexpected arg $x") } acc + (ref -> updatedColumn) } } def fromJValues(values: Stream[JValue]): Slice = fromRValues(values.map(RValue.fromJValue).collect({ case Some(x) => x })) def fromRValues(values: Stream[RValue]): Slice = { val sliceSize = values.size @tailrec def buildColArrays(from: Stream[RValue], into: Map[ColumnRef, ArrayColumn[_]], sliceIndex: Int): (Map[ColumnRef, ArrayColumn[_]], Int) = { from match { case jv #:: xs => val refs = updateRefs(jv, into, sliceIndex, sliceSize) buildColArrays(xs, refs, sliceIndex + 1) case _ => (into, sliceIndex) } } new Slice { val (columns, size) = buildColArrays(values, Map.empty[ColumnRef, ArrayColumn[_]], 0) } } /** * Concatenate multiple slices into 1 big slice. The slices will be * concatenated in the order they appear in `slices`. */ def concat(slices: Seq[Slice]): Slice = { val (_columns, _size) = slices.foldLeft((Map.empty[ColumnRef, List[(Int, Column)]], 0)) { case ((cols, offset), slice) if slice.size > 0 => (slice.columns.foldLeft(cols) { case (acc, (ref, col)) => acc + (ref -> ((offset, col) :: acc.getOrElse(ref, Nil))) }, offset + slice.size) case ((cols, offset), _) => (cols, offset) } val slice = new Slice { val size = _size val columns = _columns.flatMap { case (ref, parts) => cf.util.NConcat(parts) map ((ref, _)) } } slice } def rowComparatorFor(s1: Slice, s2: Slice)(keyf: Slice => Iterable[CPath]): RowComparator = { val paths = (keyf(s1) ++ keyf(s2)).toList val traversal = CPathTraversal(paths) val lCols = s1.columns groupBy (_._1.selector) map { case (path, m) => path -> m.values.toSet } val rCols = s2.columns groupBy (_._1.selector) map { case (path, m) => path -> m.values.toSet } val allPaths = (lCols.keys ++ rCols.keys).toList val order = traversal.rowOrder(allPaths, lCols, Some(rCols)) new RowComparator { def compare(r1: Int, r2: Int): Ordering = scalaz.Ordering.fromInt(order.compare(r1, r2)) } } /** * Given a JValue, an existing map of columnrefs to column data, * a sliceIndex, and a sliceSize, return an updated map. */ def withIdsAndValues(jv: JValue, into: Map[ColumnRef, ArrayColumn[_]], sliceIndex: Int, sliceSize: Int, remapPath: Option[JPath => CPath] = None): Map[ColumnRef, ArrayColumn[_]] = { jv.flattenWithPath.foldLeft(into) { case (acc, (jpath, JUndefined)) => acc case (acc, (jpath, v)) => val ctype = CType.forJValue(v) getOrElse { sys.error("Cannot determine ctype for " + v + " at " + jpath + " in " + jv) } val ref = ColumnRef(remapPath.map(_ (jpath)).getOrElse(CPath(jpath)), ctype) val updatedColumn: ArrayColumn[_] = v match { case JBool(b) => acc.getOrElse(ref, ArrayBoolColumn.empty()).asInstanceOf[ArrayBoolColumn].unsafeTap { c => c.update(sliceIndex, b) } case JNum(d) => ctype match { case CLong => acc.getOrElse(ref, ArrayLongColumn.empty(sliceSize)).asInstanceOf[ArrayLongColumn].unsafeTap { c => c.update(sliceIndex, d.toLong) } case CDouble => acc.getOrElse(ref, ArrayDoubleColumn.empty(sliceSize)).asInstanceOf[ArrayDoubleColumn].unsafeTap { c => c.update(sliceIndex, d.toDouble) } case CNum => acc.getOrElse(ref, ArrayNumColumn.empty(sliceSize)).asInstanceOf[ArrayNumColumn].unsafeTap { c => c.update(sliceIndex, d) } case _ => sys.error("non-numeric type reached") } case JString(s) => acc.getOrElse(ref, ArrayStrColumn.empty(sliceSize)).asInstanceOf[ArrayStrColumn].unsafeTap { c => c.update(sliceIndex, s) } case JArray(Nil) => acc.getOrElse(ref, MutableEmptyArrayColumn.empty()).asInstanceOf[MutableEmptyArrayColumn].unsafeTap { c => c.update(sliceIndex, true) } case JObject.empty => acc.getOrElse(ref, MutableEmptyObjectColumn.empty()).asInstanceOf[MutableEmptyObjectColumn].unsafeTap { c => c.update(sliceIndex, true) } case JNull => acc.getOrElse(ref, MutableNullColumn.empty()).asInstanceOf[MutableNullColumn].unsafeTap { c => c.update(sliceIndex, true) } case _ => sys.error("non-flattened value reached") } acc + (ref -> updatedColumn) } } private sealed trait SchemaNode private object SchemaNode { final case class Obj(nodes: Map[String, SchemaNode]) extends SchemaNode { final var keys: Array[String] = _ final var values: Array[SchemaNode] = _ } final case class Arr(map: Map[Int, SchemaNode]) extends SchemaNode { final var nodes: Array[SchemaNode] = _ } final case class Union(nodes: Set[SchemaNode]) extends SchemaNode { final var possibilities: Array[SchemaNode] = _ } final case class Leaf(tpe: CType, col: Column) extends SchemaNode } }
drostron/quasar
yggdrasil/src/main/scala/quasar/yggdrasil/table/Slice.scala
Scala
apache-2.0
59,367
package in.thirumal.zookeeper.group import scala.io.StdIn.readLine /** * Lists the members inside a ZooKeeper group * Created by thirumal on 20/11/15. */ object ListGroup { /** * Main function * @param args Command line arguments: ListGroup <zkHosts> <groupName> */ def main(args: Array[String]): Unit = { // Read the hosts and group name if not given val (hosts, groupName) = if (args.length != 2) { (readLine("ZooKeeper hosts: "), readLine("Group Name: ")) } else { (args(0), args(1)) } // List the members of the group println("Group Members:" + new ZooKeeperGroup(hosts).listGroup(groupName).mkString(", ")) } }
zapstar/ZooKeeperExamples
src/main/scala/in/thirumal/zookeeper/group/ListGroup.scala
Scala
mit
678
package org.homermultitext.edmodel import org.scalatest.FlatSpec class DeletionSpec extends FlatSpec { "A deletion" should "keep the deleted text as a reading" in { val example = """urn:cts:greekLit:tlg0012.tlg001.demo:24.212#<l n="212" >ανδρι <add>παρα</add> κρατὲρῳ τοῦ εγω <del>δε</del> <w part="N">μ<unclear instant="false">ε</unclear><gap extent="2" unit="letters"/>ον</w> ἧπαρ εχοιμι</l>""" val delReader = TeiReaderOld(example) val delTokens = delReader.tokens /* val analyses = TeiReaderOld.fromString(example).map(_.analysis) println("DeletionSpec: tokenized into " + analyses.size + " analyses.") for (a <- analyses) { val rdgs = a.readings.mkString("++") //println(rdgs + " ALT " + a.alternateReading) } */ println("PARSED deletion example and got " + delTokens.size + " tokens") for ((t,i) <- delTokens.zipWithIndex) { println("\t" + i + " = " + t.analysis.readWithDiplomatic) } } it should "have an empty string as the atlernate" in pending it should "create a valid CitableNode for an alternate reading" in pending /*{ val xml = "urn:cts:greekLit:tlg0012.tlg001.demo:10.534#<l n=\"534\">ψεύσομαι. ἢ έτυμόν <del>τοι</del> ἐρέω, κέλεται δέ με θυμός·</l>" TeiReaderOld.clear val tokens = TeiReaderOld.fromString(xml) val analyses = tokens.map(_.analysis) for (tkn <- analyses) { //println(s"${tkn.editionUrn}==${tkn.readWithAlternate}") } for (tkn <- tokens) { //println(s"${tkn.analysis.editionUrn}==${tkn.analysis.readWithAlternate}") } }*/ }
homermultitext/edmodel
src/test/scala/org/homermultitext/edmodel/old/DeletionSpec.scala
Scala
gpl-3.0
1,689
package com.twitter.finagle.ssl.client import com.twitter.finagle.{Address, SslHostVerificationException} import com.twitter.util.Try import java.security.cert.X509Certificate import javax.net.ssl.SSLSession import sun.security.util.HostnameChecker private[finagle] object HostnameVerifier extends SslClientSessionVerifier { private[this] val checker = HostnameChecker.getInstance(HostnameChecker.TYPE_TLS) /** * Run hostname verification on the session. This will fail with a * [[com.twitter.finagle.SslHostVerificationException]] if the certificate is * invalid for the given session. * * This uses [[sun.security.util.HostnameChecker]]. Any bugs are theirs. */ def apply( address: Address, config: SslClientConfiguration, session: SSLSession ): Boolean = { config.hostname match { case Some(host) => // We take the first certificate from the given `getPeerCertificates` array since the expected // array structure is peer's own certificate first followed by any certificate authorities. val isValid = session.getPeerCertificates.headOption.exists { case x509: X509Certificate => Try(checker.`match`(host, x509)).isReturn case _ => false } if (isValid) true else throw new SslHostVerificationException(session.getPeerPrincipal.getName) case None => SslClientSessionVerifier.AlwaysValid(address, config, session) } } }
koshelev/finagle
finagle-core/src/main/scala/com/twitter/finagle/ssl/client/HostnameVerifier.scala
Scala
apache-2.0
1,453
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.predictionio.core import org.apache.predictionio.annotation.DeveloperApi import org.apache.spark.SparkContext /** :: DeveloperApi :: * Base class of all preparator controller classes * * Dev note: Probably will add an extra parameter for ad hoc JSON formatter * * @tparam TD Training data class * @tparam PD Prepared data class */ @DeveloperApi abstract class BasePreparator[TD, PD] extends AbstractDoer { /** :: DeveloperApi :: * Engine developers should not use this directly. This is called by training * workflow to prepare data before handing it over to algorithm * * @param sc Spark context * @param td Training data * @return Prepared data */ @DeveloperApi def prepareBase(sc: SparkContext, td: TD): PD }
pferrel/PredictionIO
core/src/main/scala/org/apache/predictionio/core/BasePreparator.scala
Scala
apache-2.0
1,586
package com.danielasfregola.twitter4s.http.clients.rest.statuses.parameters import com.danielasfregola.twitter4s.entities.enums.TweetMode import org.specs2.mutable.SpecificationLike class PostParametersSpec extends SpecificationLike { "PostParameters" should { "correctly represents each field as the respective request parameter" in { PostParameters( trim_user = true, tweet_mode = TweetMode.Extended ).toString shouldEqual "trim_user=true&tweet_mode=extended" } "doesn't provide request parameter if the respective field is empty (tweet_mode is classic)" in { PostParameters( trim_user = false, tweet_mode = TweetMode.Classic ).toString shouldEqual "trim_user=false" } } }
DanielaSfregola/twitter4s
src/test/scala/com/danielasfregola/twitter4s/http/clients/rest/statuses/parameters/PostParametersSpec.scala
Scala
apache-2.0
755
/* * Copyright 2014 Eric Zoerner * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package scalable.client.chat.views import java.util import javafx.beans.value.{ ObservableValue, ChangeListener } import javafx.collections.ListChangeListener import javafx.geometry.{ VPos, HPos, Insets } import javafx.scene.Node import javafx.scene.layout.Region import javafx.scene.web.WebView import javafx.concurrent.Worker.State import scala.collection.JavaConverters._ import scalafx.application.Platform /** * Code adapted from <a href="https://github.com/frtj/javafx_examples">github.com/frtj/javafx_examples</a> * * @author Eric Zoerner <a href="mailto:[email protected]">[email protected]</a> */ object Browser { private val ContentId = "browser_content" def getHtml(content: String): String = { "<html><body>" + "<div id=\\"" + ContentId + "\\">" + content + "</div>" + "</body></html>" } } import scalable.client.chat.views.Browser._ class Browser(content: String) extends Region { val webView = new WebView() val webEngine = webView.getEngine webView.setPrefHeight(5) setPadding(new Insets(20)) widthProperty().addListener(new ChangeListener[Any] { override def changed(observable: ObservableValue[_], oldValue: Any, newValue: Any): Unit = { val width = newValue.asInstanceOf[Double] webView.setPrefWidth(width) adjustHeight() } }) webEngine.getLoadWorker.stateProperty.addListener(new ChangeListener[State] { override def changed(arg0: ObservableValue[_ <: State], oldState: State, newState: State): Unit = if (newState == State.SUCCEEDED) adjustHeight() }) // http://stackoverflow.com/questions/11206942/how-to-hide-scrollbars-in-the-javafx-webview webView.getChildrenUnmodifiable.addListener(new ListChangeListener[Node] { def onChanged(change: ListChangeListener.Change[_ <: Node]) = { val scrolls: util.Set[Node] = webView.lookupAll(".scroll-bar") for (scroll ← scrolls.asScala) { scroll.setVisible(false) } } }) setContent(content) getChildren.add(webView) def setContent(content: String) = { Platform.runLater { webEngine.loadContent(getHtml(content)) Platform.runLater(adjustHeight()) } } protected override def layoutChildren() = { val w: Double = getWidth val h: Double = getHeight layoutInArea(webView, 0, 0, w, h, 0, HPos.CENTER, VPos.CENTER) } private def adjustHeight(): Unit = { Platform.runLater { val result: Any = webEngine.executeScript("var e = document.getElementById('" + ContentId + "');" + "e ? e.offsetHeight : null") result match { case i: Integer ⇒ var height = i.toDouble height = height + 20 webView.setPrefHeight(height) case _ ⇒ } } } }
ezoerner/scalable-chat
client/src/main/scala/scalable/client/chat/views/Browser.scala
Scala
apache-2.0
3,344
package config import com.mongodb.casbah.Imports._ import com.typesafe.config.ConfigFactory object OauthConfig { def get = { val config = ConfigFactory.load("oauth") OauthConfig( config.getString("clientId"), config.getString("clientSecret"), config.getString("redirectUri")) } } case class OauthConfig( clientId: String, clientSecret: String, redirectUri: String) trait MongoClientConfig { def mongoDbUri: String val mongoClient = MongoClient(MongoClientURI(mongoDbUri)) } case class KafkaConfig(server: String) case class BlogQueryConfig(mongoDbUri: String, title: String) extends MongoClientConfig case class DiscussionQueryConfig(mongoDbUri: String) extends MongoClientConfig case class BlogQueryBuilderConfig(mongoDbUri: String) extends MongoClientConfig case class DiscussionQueryBuilderConfig(mongoDbUri: String) extends MongoClientConfig object DevelopmentKafkaConfig extends KafkaConfig("localhost:9094") object DevelopmentDiscussionQueryConfig extends DiscussionQueryConfig("mongodb://localhost/blog") object DevelopmentBlogQueryConfig extends BlogQueryConfig("mongodb://localhost/blog", "Blogok") object DevelopmentBlogQueryBuilderConfig extends BlogQueryBuilderConfig("mongodb://localhost/blog") object DevelopmentDiscussionQueryBuilderConfig extends DiscussionQueryBuilderConfig("mongodb://localhost/blog") object ProductionKafkaConfig extends KafkaConfig("kafka:9092") object ProductionDiscussionQueryConfig extends DiscussionQueryConfig("mongodb://mongodb/blog") object ProductionBlogQueryConfig extends BlogQueryConfig("mongodb://mongodb/blog", "Blogok") object ProductionBlogQueryBuilderConfig extends BlogQueryBuilderConfig("mongodb://mongodb/blog") object ProductionDiscussionQueryBuilderConfig extends DiscussionQueryBuilderConfig("mongodb://mongodb/blog")
enpassant/rapids
modules/config/src/main/scala/config/KafkaConfig.scala
Scala
apache-2.0
1,849
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.hive import java.net.URI import org.apache.spark.sql.{AnalysisException, Dataset, Row, SaveMode, SparkSession} import org.apache.spark.sql.catalyst.catalog.{CatalogTable, CatalogTableType, CatalogUtils} import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan import org.apache.spark.sql.execution.command.{AlterTableRecoverPartitionsCommand, AtomicRunnableCommand} import org.apache.spark.sql.execution.datasources.{DataSource, HadoopFsRelation} import org.apache.spark.sql.sources.BaseRelation import org.apache.spark.util.CarbonReflectionUtils /** * Create table 'using carbondata' and insert the query result into it. * * @param table the Catalog Table * @param mode SaveMode:Ignore,OverWrite,ErrorIfExists,Append * @param query the query whose result will be insert into the new relation * */ case class CreateCarbonSourceTableAsSelectCommand( table: CatalogTable, mode: SaveMode, query: LogicalPlan) extends AtomicRunnableCommand { override def processMetadata(sparkSession: SparkSession): Seq[Row] = { Seq.empty } override def processData(sparkSession: SparkSession): Seq[Row] = { assert(table.tableType != CatalogTableType.VIEW) assert(table.provider.isDefined) val sessionState = sparkSession.sessionState val db = table.identifier.database.getOrElse(sessionState.catalog.getCurrentDatabase) val tableIdentWithDB = table.identifier.copy(database = Some(db)) val tableName = tableIdentWithDB.unquotedString setAuditTable(db, table.identifier.table) if (sessionState.catalog.tableExists(tableIdentWithDB)) { assert(mode != SaveMode.Overwrite, s"Expect the table $tableName has been dropped when the save mode is Overwrite") if (mode == SaveMode.ErrorIfExists) { throw new AnalysisException(s"Table $tableName already exists. You need to drop it first.") } if (mode == SaveMode.Ignore) { // Since the table already exists and the save mode is Ignore, we will just return. return Seq.empty } saveDataIntoTable( sparkSession, table, table.storage.locationUri, query, SaveMode.Append, tableExists = true) } else { assert(table.schema.isEmpty) val tableLocation = if (table.tableType == CatalogTableType.MANAGED) { Some(sessionState.catalog.defaultTablePath(table.identifier)) } else { table.storage.locationUri } val result = saveDataIntoTable( sparkSession, table, tableLocation, query, SaveMode.Overwrite, tableExists = false) result match { case fs: HadoopFsRelation if table.partitionColumnNames.nonEmpty && sparkSession.sqlContext.conf.manageFilesourcePartitions => // Need to recover partitions into the metastore so our saved data is visible. sessionState.executePlan(AlterTableRecoverPartitionsCommand(table.identifier)).toRdd } } Seq.empty[Row] } private def saveDataIntoTable( session: SparkSession, table: CatalogTable, tableLocation: Option[URI], data: LogicalPlan, mode: SaveMode, tableExists: Boolean): BaseRelation = { // Create the relation based on the input logical plan: `data`. val pathOption = tableLocation.map("path" -> CatalogUtils.URIToString(_)) val dataSource = DataSource( session, className = table.provider.get, partitionColumns = table.partitionColumnNames, bucketSpec = table.bucketSpec, options = table.storage.properties ++ pathOption, catalogTable = if (tableExists) { Some(table) } else { None }) try { val physicalPlan = session.sessionState.executePlan(data).executedPlan CarbonReflectionUtils.invokeWriteAndReadMethod(dataSource, Dataset.ofRows(session, query), data, session, mode, query, physicalPlan) } catch { case ex: AnalysisException => logError(s"Failed to write to table ${ table.identifier.unquotedString }", ex) throw ex } } override protected def opName: String = "CREATE TABLE AS SELECT" }
zzcclp/carbondata
integration/spark/src/main/scala/org/apache/spark/sql/hive/CreateCarbonSourceTableAsSelectCommand.scala
Scala
apache-2.0
4,995
package domala.tests.layered.repository.rdb import domala._ import domala.tests.layered.domain import domala.tests.layered.repository.EmpRepository import scala.reflect.ClassTag case class Emp( @Id @GeneratedValue(GenerationType.IDENTITY) id: Option[domain.ID[domain.Emp]], name: domain.Name[domain.Emp], age: domain.Age, @domala.Version version: domain.Version ) extends domain.Emp object Emp { def of(source: domain.Emp): Emp = if (source == null) null else Emp( source.id, source.name, source.age, source.version ) } @Dao trait EmpDao extends EmpRepository { @Script( """ CREATE TABLE emp( id INT NOT NULL IDENTITY PRIMARY KEY, name VARCHAR(20) NOT NULL, age INT NOT NULL, version INT NOT NULL ); """) def create(): Unit @Script( """ DROP TABLE emp; """) def drop(): Unit def save(entities: Seq[domain.Emp]): Array[Int] = saveImpl(entities.map(Emp.of)) @BatchInsert protected def saveImpl(entities: Seq[Emp]): Array[Int] def findByIds[R: ClassTag](id: Seq[domain.ID[domain.Emp]])(mapper: Iterator[domain.Emp] => R): R = findByIdsImpl[R](id)(mapper) @Select(""" SELECT * FROM emp WHERE id IN /* id */() """, strategy = SelectType.ITERATOR) protected def findByIdsImpl[R: ClassTag](id: Seq[domain.ID[domain.Emp]])(mapper: Iterator[Emp] => R): R def findAll[R: ClassTag](mapper: Iterator[domain.Emp] => R): R = findAllImpl[R](mapper) @Select(""" SELECT * FROM emp """, strategy = SelectType.ITERATOR) protected def findAllImpl[R: ClassTag](mapper: Iterator[Emp] => R): R def entry(entity: domain.Emp): Int = saveImpl(Seq(Emp.of(entity))).head }
bakenezumi/domala
paradise/src/test/scala/domala/tests/layered/repository/rdb/dao.scala
Scala
apache-2.0
1,658
/** * Copyright (c) 2016 Intel Corporation  * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * *       http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.ml.org.trustedanalytics.sparktk.deeptrees.param import org.json4s._ import org.json4s.jackson.JsonMethods._ //import org.json4s.JsonDSL._ import org.apache.spark.annotation.{ DeveloperApi, Experimental } import org.apache.spark.ml.util.Identifiable import org.apache.spark.mllib.linalg.{ DenseVector, SparseVector, Vector, Vectors } import org.json4s.jackson.JsonMethods.{ parse => parseJson } private[ml] object Param { /** Decodes a param value from JSON. */ def jsonDecode[T](json: String): T = { parse(json) match { case JString(x) => x.asInstanceOf[T] case JObject(v) => val keys = v.map(_._1) assert(keys.contains("type") && keys.contains("values"), s"Expect a JSON serialized vector but cannot find fields 'type' and 'values' in $json.") JsonVectorConverter.fromJson(json).asInstanceOf[T] case _ => throw new NotImplementedError( "The default jsonDecode only supports string and vector. " + s"${this.getClass.getName} must override jsonDecode to support its value type.") } } }
aayushidwivedi01/spark-tk
sparktk-core/src/main/scala/org/apache/spark/ml/org/trustedanalytics/sparktk/deeptrees/param/params.scala
Scala
apache-2.0
1,814
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.executor import java.io.{Externalizable, ObjectInput, ObjectOutput} import java.lang.Thread.UncaughtExceptionHandler import java.nio.ByteBuffer import java.util.Properties import java.util.concurrent.{ConcurrentHashMap, CountDownLatch, TimeUnit} import java.util.concurrent.atomic.AtomicBoolean import scala.collection.immutable import scala.collection.mutable.{ArrayBuffer, Map} import scala.concurrent.duration._ import org.mockito.ArgumentCaptor import org.mockito.ArgumentMatchers.{any, eq => meq} import org.mockito.Mockito.{inOrder, verify, when} import org.mockito.invocation.InvocationOnMock import org.mockito.stubbing.Answer import org.scalatest.PrivateMethodTester import org.scalatest.concurrent.Eventually import org.scalatest.mockito.MockitoSugar import org.apache.spark._ import org.apache.spark.TaskState.TaskState import org.apache.spark.broadcast.Broadcast import org.apache.spark.internal.config._ import org.apache.spark.internal.config.UI._ import org.apache.spark.memory.TestMemoryManager import org.apache.spark.metrics.MetricsSystem import org.apache.spark.rdd.RDD import org.apache.spark.resource.ResourceInformation import org.apache.spark.rpc.{RpcEndpointRef, RpcEnv, RpcTimeout} import org.apache.spark.scheduler.{DirectTaskResult, FakeTask, ResultTask, Task, TaskDescription} import org.apache.spark.serializer.{JavaSerializer, SerializerInstance, SerializerManager} import org.apache.spark.shuffle.FetchFailedException import org.apache.spark.storage.{BlockManager, BlockManagerId} import org.apache.spark.util.{LongAccumulator, UninterruptibleThread} class ExecutorSuite extends SparkFunSuite with LocalSparkContext with MockitoSugar with Eventually with PrivateMethodTester { override def afterEach() { // Unset any latches after each test; each test that needs them initializes new ones. ExecutorSuiteHelper.latches = null super.afterEach() } test("SPARK-15963: Catch `TaskKilledException` correctly in Executor.TaskRunner") { // mock some objects to make Executor.launchTask() happy val conf = new SparkConf val serializer = new JavaSerializer(conf) val env = createMockEnv(conf, serializer) val serializedTask = serializer.newInstance().serialize(new FakeTask(0, 0)) val taskDescription = createFakeTaskDescription(serializedTask) // we use latches to force the program to run in this order: // +-----------------------------+---------------------------------------+ // | main test thread | worker thread | // +-----------------------------+---------------------------------------+ // | executor.launchTask() | | // | | TaskRunner.run() begins | // | | ... | // | | execBackend.statusUpdate // 1st time | // | executor.killAllTasks(true) | | // | | ... | // | | task = ser.deserialize | // | | ... | // | | execBackend.statusUpdate // 2nd time | // | | ... | // | | TaskRunner.run() ends | // | check results | | // +-----------------------------+---------------------------------------+ val executorSuiteHelper = new ExecutorSuiteHelper val mockExecutorBackend = mock[ExecutorBackend] when(mockExecutorBackend.statusUpdate(any(), any(), any())) .thenAnswer(new Answer[Unit] { var firstTime = true override def answer(invocationOnMock: InvocationOnMock): Unit = { if (firstTime) { executorSuiteHelper.latch1.countDown() // here between latch1 and latch2, executor.killAllTasks() is called executorSuiteHelper.latch2.await() firstTime = false } else { // save the returned `taskState` and `testFailedReason` into `executorSuiteHelper` val taskState = invocationOnMock.getArguments()(1).asInstanceOf[TaskState] executorSuiteHelper.taskState = taskState val taskEndReason = invocationOnMock.getArguments()(2).asInstanceOf[ByteBuffer] executorSuiteHelper.testFailedReason = serializer.newInstance().deserialize(taskEndReason) // let the main test thread check `taskState` and `testFailedReason` executorSuiteHelper.latch3.countDown() } } }) var executor: Executor = null try { executor = new Executor("id", "localhost", env, userClassPath = Nil, isLocal = true) // the task will be launched in a dedicated worker thread executor.launchTask(mockExecutorBackend, taskDescription) if (!executorSuiteHelper.latch1.await(5, TimeUnit.SECONDS)) { fail("executor did not send first status update in time") } // we know the task will be started, but not yet deserialized, because of the latches we // use in mockExecutorBackend. executor.killAllTasks(true, "test") executorSuiteHelper.latch2.countDown() if (!executorSuiteHelper.latch3.await(5, TimeUnit.SECONDS)) { fail("executor did not send second status update in time") } // `testFailedReason` should be `TaskKilled`; `taskState` should be `KILLED` assert(executorSuiteHelper.testFailedReason.isInstanceOf[TaskKilled]) assert(executorSuiteHelper.testFailedReason.toErrorString === "TaskKilled (test)") assert(executorSuiteHelper.taskState === TaskState.KILLED) } finally { if (executor != null) { executor.stop() } } } test("SPARK-19276: Handle FetchFailedExceptions that are hidden by user exceptions") { val conf = new SparkConf().setMaster("local").setAppName("executor suite test") sc = new SparkContext(conf) val serializer = SparkEnv.get.closureSerializer.newInstance() val resultFunc = (context: TaskContext, itr: Iterator[Int]) => itr.size // Submit a job where a fetch failure is thrown, but user code has a try/catch which hides // the fetch failure. The executor should still tell the driver that the task failed due to a // fetch failure, not a generic exception from user code. val inputRDD = new FetchFailureThrowingRDD(sc) val secondRDD = new FetchFailureHidingRDD(sc, inputRDD, throwOOM = false, interrupt = false) val taskBinary = sc.broadcast(serializer.serialize((secondRDD, resultFunc)).array()) val taskDescription = createResultTaskDescription(serializer, taskBinary, secondRDD, 1) val failReason = runTaskAndGetFailReason(taskDescription) assert(failReason.isInstanceOf[FetchFailed]) } test("Executor's worker threads should be UninterruptibleThread") { val conf = new SparkConf() .setMaster("local") .setAppName("executor thread test") .set(UI_ENABLED.key, "false") sc = new SparkContext(conf) val executorThread = sc.parallelize(Seq(1), 1).map { _ => Thread.currentThread.getClass.getName }.collect().head assert(executorThread === classOf[UninterruptibleThread].getName) } test("SPARK-19276: OOMs correctly handled with a FetchFailure") { val (failReason, uncaughtExceptionHandler) = testFetchFailureHandling(true) assert(failReason.isInstanceOf[ExceptionFailure]) val exceptionCaptor = ArgumentCaptor.forClass(classOf[Throwable]) verify(uncaughtExceptionHandler).uncaughtException(any(), exceptionCaptor.capture()) assert(exceptionCaptor.getAllValues.size === 1) assert(exceptionCaptor.getAllValues().get(0).isInstanceOf[OutOfMemoryError]) } test("SPARK-23816: interrupts are not masked by a FetchFailure") { // If killing the task causes a fetch failure, we still treat it as a task that was killed, // as the fetch failure could easily be caused by interrupting the thread. val (failReason, _) = testFetchFailureHandling(false) assert(failReason.isInstanceOf[TaskKilled]) } /** * Helper for testing some cases where a FetchFailure should *not* get sent back, because it's * superseded by another error, either an OOM or intentionally killing a task. * @param oom if true, throw an OOM after the FetchFailure; else, interrupt the task after the * FetchFailure * @param poll if true, poll executor metrics after launching task */ private def testFetchFailureHandling( oom: Boolean, poll: Boolean = false): (TaskFailedReason, UncaughtExceptionHandler) = { // when there is a fatal error like an OOM, we don't do normal fetch failure handling, since it // may be a false positive. And we should call the uncaught exception handler. // SPARK-23816 also handle interrupts the same way, as killing an obsolete speculative task // does not represent a real fetch failure. val conf = new SparkConf().setMaster("local").setAppName("executor suite test") sc = new SparkContext(conf) val serializer = SparkEnv.get.closureSerializer.newInstance() val resultFunc = (context: TaskContext, itr: Iterator[Int]) => itr.size // Submit a job where a fetch failure is thrown, but then there is an OOM or interrupt. We // should treat the fetch failure as a false positive, and do normal OOM or interrupt handling. val inputRDD = new FetchFailureThrowingRDD(sc) // helper to coordinate between the task thread and this thread that will kill the task // (and to poll executor metrics if necessary) ExecutorSuiteHelper.latches = new ExecutorSuiteHelper val secondRDD = new FetchFailureHidingRDD(sc, inputRDD, throwOOM = oom, interrupt = !oom) val taskBinary = sc.broadcast(serializer.serialize((secondRDD, resultFunc)).array()) val taskDescription = createResultTaskDescription(serializer, taskBinary, secondRDD, 1) runTaskGetFailReasonAndExceptionHandler(taskDescription, killTask = !oom, poll) } test("Gracefully handle error in task deserialization") { val conf = new SparkConf val serializer = new JavaSerializer(conf) val env = createMockEnv(conf, serializer) val serializedTask = serializer.newInstance().serialize(new NonDeserializableTask) val taskDescription = createFakeTaskDescription(serializedTask) val failReason = runTaskAndGetFailReason(taskDescription) failReason match { case ef: ExceptionFailure => assert(ef.exception.isDefined) assert(ef.exception.get.getMessage() === NonDeserializableTask.errorMsg) case _ => fail(s"unexpected failure type: $failReason") } } test("Heartbeat should drop zero accumulator updates") { heartbeatZeroAccumulatorUpdateTest(true) } test("Heartbeat should not drop zero accumulator updates when the conf is disabled") { heartbeatZeroAccumulatorUpdateTest(false) } private def withHeartbeatExecutor(confs: (String, String)*) (f: (Executor, ArrayBuffer[Heartbeat]) => Unit): Unit = { val conf = new SparkConf confs.foreach { case (k, v) => conf.set(k, v) } val serializer = new JavaSerializer(conf) val env = createMockEnv(conf, serializer) val executor = new Executor("id", "localhost", SparkEnv.get, userClassPath = Nil, isLocal = true) val executorClass = classOf[Executor] // Save all heartbeats sent into an ArrayBuffer for verification val heartbeats = ArrayBuffer[Heartbeat]() val mockReceiver = mock[RpcEndpointRef] when(mockReceiver.askSync(any[Heartbeat], any[RpcTimeout])(any)) .thenAnswer((invocation: InvocationOnMock) => { val args = invocation.getArguments() heartbeats += args(0).asInstanceOf[Heartbeat] HeartbeatResponse(false) }) val receiverRef = executorClass.getDeclaredField("heartbeatReceiverRef") receiverRef.setAccessible(true) receiverRef.set(executor, mockReceiver) f(executor, heartbeats) } private def heartbeatZeroAccumulatorUpdateTest(dropZeroMetrics: Boolean): Unit = { val c = EXECUTOR_HEARTBEAT_DROP_ZERO_ACCUMULATOR_UPDATES.key -> dropZeroMetrics.toString withHeartbeatExecutor(c) { (executor, heartbeats) => val reportHeartbeat = PrivateMethod[Unit]('reportHeartBeat) // When no tasks are running, there should be no accumulators sent in heartbeat executor.invokePrivate(reportHeartbeat()) // invokeReportHeartbeat(executor) assert(heartbeats.length == 1) assert(heartbeats(0).accumUpdates.length == 0, "No updates should be sent when no tasks are running") // When we start a task with a nonzero accumulator, that should end up in the heartbeat val metrics = new TaskMetrics() val nonZeroAccumulator = new LongAccumulator() nonZeroAccumulator.add(1) metrics.registerAccumulator(nonZeroAccumulator) val executorClass = classOf[Executor] val tasksMap = { val field = executorClass.getDeclaredField("org$apache$spark$executor$Executor$$runningTasks") field.setAccessible(true) field.get(executor).asInstanceOf[ConcurrentHashMap[Long, executor.TaskRunner]] } val mockTaskRunner = mock[executor.TaskRunner] val mockTask = mock[Task[Any]] when(mockTask.metrics).thenReturn(metrics) when(mockTaskRunner.taskId).thenReturn(6) when(mockTaskRunner.task).thenReturn(mockTask) when(mockTaskRunner.startGCTime).thenReturn(1) tasksMap.put(6, mockTaskRunner) executor.invokePrivate(reportHeartbeat()) assert(heartbeats.length == 2) val updates = heartbeats(1).accumUpdates assert(updates.length == 1 && updates(0)._1 == 6, "Heartbeat should only send update for the one task running") val accumsSent = updates(0)._2.length assert(accumsSent > 0, "The nonzero accumulator we added should be sent") if (dropZeroMetrics) { assert(accumsSent == metrics.accumulators().count(!_.isZero), "The number of accumulators sent should match the number of nonzero accumulators") } else { assert(accumsSent == metrics.accumulators().length, "The number of accumulators sent should match the number of total accumulators") } } } test("Send task executor metrics in DirectTaskResult") { // Run a successful, trivial result task // We need to ensure, however, that executor metrics are polled after the task is started // so this requires some coordination using ExecutorSuiteHelper. val conf = new SparkConf().setMaster("local").setAppName("executor suite test") sc = new SparkContext(conf) val serializer = SparkEnv.get.closureSerializer.newInstance() ExecutorSuiteHelper.latches = new ExecutorSuiteHelper val resultFunc = (context: TaskContext, itr: Iterator[Int]) => { // latch1 tells the test that the task is running, so it can ask the metricsPoller // to poll; latch2 waits for the polling to be done ExecutorSuiteHelper.latches.latch1.countDown() ExecutorSuiteHelper.latches.latch2.await(5, TimeUnit.SECONDS) itr.size } val rdd = new RDD[Int](sc, Nil) { override def compute(split: Partition, context: TaskContext): Iterator[Int] = { Iterator(1) } override protected def getPartitions: Array[Partition] = { Array(new SimplePartition) } } val taskBinary = sc.broadcast(serializer.serialize((rdd, resultFunc)).array()) val taskDescription = createResultTaskDescription(serializer, taskBinary, rdd, 0) val mockBackend = mock[ExecutorBackend] var executor: Executor = null try { executor = new Executor("id", "localhost", SparkEnv.get, userClassPath = Nil, isLocal = true) executor.launchTask(mockBackend, taskDescription) // Ensure that the executor's metricsPoller is polled so that values are recorded for // the task metrics ExecutorSuiteHelper.latches.latch1.await(5, TimeUnit.SECONDS) executor.metricsPoller.poll() ExecutorSuiteHelper.latches.latch2.countDown() eventually(timeout(5.seconds), interval(10.milliseconds)) { assert(executor.numRunningTasks === 0) } } finally { if (executor != null) { executor.stop() } } // Verify that peak values for task metrics get sent in the TaskResult val orderedMock = inOrder(mockBackend) val statusCaptor = ArgumentCaptor.forClass(classOf[ByteBuffer]) orderedMock.verify(mockBackend) .statusUpdate(meq(0L), meq(TaskState.RUNNING), statusCaptor.capture()) orderedMock.verify(mockBackend) .statusUpdate(meq(0L), meq(TaskState.FINISHED), statusCaptor.capture()) val resultData = statusCaptor.getAllValues.get(1) val result = serializer.deserialize[DirectTaskResult[Int]](resultData) val taskMetrics = new ExecutorMetrics(result.metricPeaks) assert(taskMetrics.getMetricValue("JVMHeapMemory") > 0) } test("Send task executor metrics in TaskKilled") { val (taskFailedReason, _) = testFetchFailureHandling(false, true) assert(taskFailedReason.isInstanceOf[TaskKilled]) val metrics = taskFailedReason.asInstanceOf[TaskKilled].metricPeaks.toArray val taskMetrics = new ExecutorMetrics(metrics) assert(taskMetrics.getMetricValue("JVMHeapMemory") > 0) } test("Send task executor metrics in ExceptionFailure") { val (taskFailedReason, _) = testFetchFailureHandling(true, true) assert(taskFailedReason.isInstanceOf[ExceptionFailure]) val metrics = taskFailedReason.asInstanceOf[ExceptionFailure].metricPeaks.toArray val taskMetrics = new ExecutorMetrics(metrics) assert(taskMetrics.getMetricValue("JVMHeapMemory") > 0) } private def createMockEnv(conf: SparkConf, serializer: JavaSerializer): SparkEnv = { val mockEnv = mock[SparkEnv] val mockRpcEnv = mock[RpcEnv] val mockMetricsSystem = mock[MetricsSystem] val mockBlockManager = mock[BlockManager] when(mockEnv.conf).thenReturn(conf) when(mockEnv.serializer).thenReturn(serializer) when(mockEnv.serializerManager).thenReturn(mock[SerializerManager]) when(mockEnv.rpcEnv).thenReturn(mockRpcEnv) when(mockEnv.metricsSystem).thenReturn(mockMetricsSystem) when(mockEnv.memoryManager).thenReturn(new TestMemoryManager(conf)) when(mockEnv.closureSerializer).thenReturn(serializer) when(mockBlockManager.blockManagerId).thenReturn(BlockManagerId("1", "hostA", 1234)) when(mockEnv.blockManager).thenReturn(mockBlockManager) SparkEnv.set(mockEnv) mockEnv } private def createResultTaskDescription( serializer: SerializerInstance, taskBinary: Broadcast[Array[Byte]], rdd: RDD[Int], stageId: Int): TaskDescription = { val serializedTaskMetrics = serializer.serialize(TaskMetrics.registered).array() val task = new ResultTask( stageId = stageId, stageAttemptId = 0, taskBinary = taskBinary, partition = rdd.partitions(0), locs = Seq(), outputId = 0, localProperties = new Properties(), serializedTaskMetrics = serializedTaskMetrics ) val serTask = serializer.serialize(task) createFakeTaskDescription(serTask) } private def createFakeTaskDescription(serializedTask: ByteBuffer): TaskDescription = { new TaskDescription( taskId = 0, attemptNumber = 0, executorId = "", name = "", index = 0, partitionId = 0, addedFiles = Map[String, Long](), addedJars = Map[String, Long](), properties = new Properties, resources = immutable.Map[String, ResourceInformation](), serializedTask) } private def runTaskAndGetFailReason(taskDescription: TaskDescription): TaskFailedReason = { runTaskGetFailReasonAndExceptionHandler(taskDescription, false)._1 } private def runTaskGetFailReasonAndExceptionHandler( taskDescription: TaskDescription, killTask: Boolean, poll: Boolean = false): (TaskFailedReason, UncaughtExceptionHandler) = { val mockBackend = mock[ExecutorBackend] val mockUncaughtExceptionHandler = mock[UncaughtExceptionHandler] var executor: Executor = null val timedOut = new AtomicBoolean(false) try { executor = new Executor("id", "localhost", SparkEnv.get, userClassPath = Nil, isLocal = true, uncaughtExceptionHandler = mockUncaughtExceptionHandler) // the task will be launched in a dedicated worker thread executor.launchTask(mockBackend, taskDescription) if (killTask) { val killingThread = new Thread("kill-task") { override def run(): Unit = { // wait to kill the task until it has thrown a fetch failure if (ExecutorSuiteHelper.latches.latch1.await(10, TimeUnit.SECONDS)) { // now we can kill the task // but before that, ensure that the executor's metricsPoller is polled if (poll) { executor.metricsPoller.poll() } executor.killAllTasks(true, "Killed task, eg. because of speculative execution") } else { timedOut.set(true) } } } killingThread.start() } else { if (ExecutorSuiteHelper.latches != null) { ExecutorSuiteHelper.latches.latch1.await(5, TimeUnit.SECONDS) if (poll) { executor.metricsPoller.poll() } ExecutorSuiteHelper.latches.latch2.countDown() } } eventually(timeout(5.seconds), interval(10.milliseconds)) { assert(executor.numRunningTasks === 0) } assert(!timedOut.get(), "timed out waiting to be ready to kill tasks") } finally { if (executor != null) { executor.stop() } } val orderedMock = inOrder(mockBackend) val statusCaptor = ArgumentCaptor.forClass(classOf[ByteBuffer]) orderedMock.verify(mockBackend) .statusUpdate(meq(0L), meq(TaskState.RUNNING), statusCaptor.capture()) val finalState = if (killTask) TaskState.KILLED else TaskState.FAILED orderedMock.verify(mockBackend) .statusUpdate(meq(0L), meq(finalState), statusCaptor.capture()) // first statusUpdate for RUNNING has empty data assert(statusCaptor.getAllValues().get(0).remaining() === 0) // second update is more interesting val failureData = statusCaptor.getAllValues.get(1) val failReason = SparkEnv.get.closureSerializer.newInstance().deserialize[TaskFailedReason](failureData) (failReason, mockUncaughtExceptionHandler) } } class FetchFailureThrowingRDD(sc: SparkContext) extends RDD[Int](sc, Nil) { override def compute(split: Partition, context: TaskContext): Iterator[Int] = { new Iterator[Int] { override def hasNext: Boolean = true override def next(): Int = { throw new FetchFailedException( bmAddress = BlockManagerId("1", "hostA", 1234), shuffleId = 0, mapId = 0L, mapIndex = 0, reduceId = 0, message = "fake fetch failure" ) } } } override protected def getPartitions: Array[Partition] = { Array(new SimplePartition) } } class SimplePartition extends Partition { override def index: Int = 0 } // NOTE: When instantiating this class, except with throwOOM = false and interrupt = false, // ExecutorSuiteHelper.latches need to be set (not null). class FetchFailureHidingRDD( sc: SparkContext, val input: FetchFailureThrowingRDD, throwOOM: Boolean, interrupt: Boolean) extends RDD[Int](input) { override def compute(split: Partition, context: TaskContext): Iterator[Int] = { val inItr = input.compute(split, context) try { Iterator(inItr.size) } catch { case t: Throwable => if (throwOOM) { // Allow executor metrics to be polled (if necessary) before throwing the OOMError ExecutorSuiteHelper.latches.latch1.countDown() ExecutorSuiteHelper.latches.latch2.await(5, TimeUnit.SECONDS) // scalastyle:off throwerror throw new OutOfMemoryError("OOM while handling another exception") // scalastyle:on throwerror } else if (interrupt) { // make sure our test is setup correctly assert(TaskContext.get().asInstanceOf[TaskContextImpl].fetchFailed.isDefined) // signal we are ready for executor metrics to be polled (if necessary) and for // the task to get killed ExecutorSuiteHelper.latches.latch1.countDown() // then wait for another thread in the test to kill the task -- this latch // is never actually decremented, we just wait to get killed. ExecutorSuiteHelper.latches.latch2.await(10, TimeUnit.SECONDS) throw new IllegalStateException("timed out waiting to be interrupted") } else { throw new RuntimeException("User Exception that hides the original exception", t) } } } override protected def getPartitions: Array[Partition] = { Array(new SimplePartition) } } // Helps to test("SPARK-15963") private class ExecutorSuiteHelper { val latch1 = new CountDownLatch(1) val latch2 = new CountDownLatch(1) val latch3 = new CountDownLatch(1) @volatile var taskState: TaskState = _ @volatile var testFailedReason: TaskFailedReason = _ } // Helper for coordinating killing tasks as well as polling executor metrics private object ExecutorSuiteHelper { var latches: ExecutorSuiteHelper = null } private class NonDeserializableTask extends FakeTask(0, 0) with Externalizable { def writeExternal(out: ObjectOutput): Unit = {} def readExternal(in: ObjectInput): Unit = { throw new RuntimeException(NonDeserializableTask.errorMsg) } } private object NonDeserializableTask { val errorMsg = "failure in deserialization" }
bdrillard/spark
core/src/test/scala/org/apache/spark/executor/ExecutorSuite.scala
Scala
apache-2.0
26,987
package notebook.kernel.pfork import java.io.{EOFException, ObjectInputStream, ObjectOutputStream, File} import java.net._ import java.util.concurrent.atomic.AtomicBoolean import java.util.concurrent.Executors import scala.collection.mutable import scala.collection.JavaConversions._ import scala.collection.mutable.ListBuffer import scala.concurrent._ import duration.Duration import com.sun.org.apache.xpath.internal.functions.FuncTrue import org.apache.commons.exec._ import org.apache.log4j.PropertyConfigurator import org.slf4j.LoggerFactory import com.typesafe.config.{ConfigFactory, Config} import play.api.{Play, Logger} trait ForkableProcess { /** * Called in the remote VM. Can return any useful information to the server through the return * @param args * @return */ def init(args: Seq[String]):String def waitForExit() } /** * I am so sick of this being a thing that gets implemented everywhere. Let's abstract. */ class BetterFork[A <: ForkableProcess : reflect.ClassTag](config:Config, executionContext: ExecutionContext) { private implicit val ec = executionContext import BetterFork._ val processClass = (implicitly[reflect.ClassTag[A]]).runtimeClass def workingDirectory = new File(if (config.hasPath("wd")) config.getString("wd") else ".") def heap: Long = if (config.hasPath("heap")) config.getBytes("heap") else defaultHeap def stack: Long = if (config.hasPath("stack")) config.getBytes("stack") else -1 def permGen: Long = if (config.hasPath("permGen")) config.getBytes("permGen") else -1 def reservedCodeCache: Long = if (config.hasPath("reservedCodeCache")) config.getBytes("reservedCodeCache") else -1 def server: Boolean = true def debugPort: Option[Int] = if (config.hasPath("debug.port")) Some(config.getInt("debug.port")) else None def logLevel: String = if (config.hasPath("log.level")) config.getString("log.level") else "info" def vmArgs:List[String] = if (config.hasPath("vmArgs")) config.getStringList("vmArgs").toList else Nil def classPath: IndexedSeq[String] = defaultClassPath def classPathString = classPath.mkString(File.pathSeparator) def jvmArgs = { val builder = IndexedSeq.newBuilder[String] def ifNonNeg(value: Long, prefix: String) { if (value >= 0) { builder += (prefix + value) } } ifNonNeg(heap, "-Xmx") ifNonNeg(stack, "-Xss") ifNonNeg(permGen, "-XX:MaxPermSize=") ifNonNeg(reservedCodeCache, "-XX:ReservedCodeCacheSize=") if (server) builder += "-server" debugPort.foreach { p => builder ++= IndexedSeq("-Xdebug", "-Xrunjdwp:transport=dt_socket,server=y,suspend=n,address="+p) } builder ++= vmArgs builder.result() } implicit protected def int2SuffixOps(i: Int) = new SuffixOps(i) protected final class SuffixOps(i: Int) { def k: Long = i.toLong << 10 def m: Long = i.toLong << 20 def g: Long = i.toLong << 30 } def execute(args: String*): Future[ProcessInfo] = { /* DK: Bi-directional liveness can be detected via redirected System.in (child), System.out (parent), avoids need for socket... */ val ss = new ServerSocket(0) val cmd = new CommandLine(javaHome + "/bin/java") .addArguments(jvmArgs.toArray) .addArgument(classOf[ChildProcessMain].getName) .addArgument(processClass.getName) .addArgument(ss.getLocalPort.toString) .addArgument(logLevel) .addArguments(args.toArray) Future { log.info("Spawning %s".format(cmd.toString)) // use environment because classpaths can be longer here than as a command line arg val environment = System.getenv + ("CLASSPATH" -> (sys.env.get("HADOOP_CONF_DIR").map(_ + ":").getOrElse("")+classPathString)) val exec = new KillableExecutor val completion = Promise[Int] exec.setWorkingDirectory(workingDirectory) exec.execute(cmd, environment, new ExecuteResultHandler { Logger.info(s"Spawning $cmd") Logger.info(s"With Env $environment") Logger.info(s"In working directory $workingDirectory") def onProcessFailed(e: ExecuteException) { e.printStackTrace } def onProcessComplete(exitValue: Int) { completion.success(exitValue) } }) val socket = ss.accept() serverSockets += socket try { val ois = new ObjectInputStream(socket.getInputStream) val resp = ois.readObject().asInstanceOf[String] new ProcessInfo(() => exec.kill(), resp, completion.future) } catch { case ex:SocketException => throw new ExecuteException("Failed to start process %s".format(cmd), 1, ex) case ex:EOFException => throw new ExecuteException("Failed to start process %s".format(cmd), 1, ex) } } } } class ProcessInfo(killer: () => Unit, val initReturn: String, val completion: Future[Int]) { def kill() { killer() } } object BetterFork { // Keeps server sockets around so they are not GC'd private val serverSockets = new ListBuffer[Socket]() // →→→→→→→→→→→→→ NEEDED WHEN running in SBT/Play ... def defaultClassPath: IndexedSeq[String] = { def urls(cl:ClassLoader, acc:IndexedSeq[String]=IndexedSeq.empty):IndexedSeq[String] = { if (cl != null) { val us = if (!cl.isInstanceOf[URLClassLoader]) { //println(" ----- ") //println(cl.getClass.getSimpleName) acc } else { acc ++ (cl.asInstanceOf[URLClassLoader].getURLs map { u => val f = new File(u.getFile) URLDecoder.decode(f.getAbsolutePath, "UTF8") }) } urls(cl.getParent, us) } else { acc } } val loader = Play.current.classloader val gurls = urls(loader).distinct.filter(!_.contains("logback-classic"))//.filter(!_.contains("sbt/")) gurls } def defaultHeap = Runtime.getRuntime.maxMemory /* Override to expose ability to forcibly kill the process */ private class KillableExecutor extends DefaultExecutor { val killed = new AtomicBoolean(false) setWatchdog(new ExecuteWatchdog(ExecuteWatchdog.INFINITE_TIMEOUT) { override def start(p: Process) { if (killed.get()) p.destroy() } }) def kill() { if (killed.compareAndSet(false, true)) Option(getExecutorThread()) foreach(_.interrupt()) } } private lazy val javaHome = System.getProperty("java.home") private lazy val log = LoggerFactory.getLogger(getClass()) private[pfork] def main(args: Array[String]) { val className = args(0) val parentPort = args(1).toInt val logLevel = args(2) val kernelId = args(3) val path = args(4) val remainingArgs = args.drop(5).toIndexedSeq val propLog = new java.util.Properties() propLog.load(getClass().getResourceAsStream("/log4j.subprocess.properties")) val cleanPath = path.replaceAll("/", "\\\\").replaceAll("\"", "").replaceAll("'", "") propLog.setProperty("log4j.appender.rolling.File", s"logs/sn-session-$kernelId-$cleanPath.log") propLog.setProperty("log4j.rootLogger", s"$logLevel, rolling") PropertyConfigurator.configure(propLog) log.info("Remote process starting") val socket = new Socket("127.0.0.1", parentPort) val hostedClass = Class.forName(className).newInstance().asInstanceOf[ForkableProcess] val result = hostedClass.init(remainingArgs) val oos = new ObjectOutputStream(socket.getOutputStream) oos.writeObject(result) oos.flush() val executorService = Executors.newFixedThreadPool(10) implicit val ec = ExecutionContext.fromExecutorService(executorService) val parentDone = Future { socket.getInputStream.read() } val localDone = Future{ hostedClass.waitForExit() } val done = Future.firstCompletedOf(Seq(parentDone, localDone)) try { Await.result(done, Duration.Inf) } finally { log.warn("Parent process stopped; exiting.") sys.exit(0) } } }
jayfans3/spark-notebook
modules/subprocess/src/main/scala/notebook/kernel/pfork/BetterFork.scala
Scala
apache-2.0
7,982
/* * Copyright © 2020 University of Texas at Arlington * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package edu.uta.diablo object Normalizer { import AST._ import Translator.bag val debug = false def print ( e: Expr ): String = edu.uta.diql.core.Pretty.print(e.toString) /** rename the variables in the lambda abstraction to prevent name capture */ def renameVars ( f: Lambda ): Lambda = f match { case Lambda(p,b) => val m = patvars(p).map((_,newvar)) Lambda(m.foldLeft(p){ case (r,(from,to)) => subst(from,to,r) }, m.foldLeft(b){ case (r,(from,to)) => subst(from,Var(to),r) }) } def isSimple ( e: Expr ): Boolean = e match { case Var(_) => true case StringConst(_) => true case CharConst(_) => true case IntConst(_) => true case LongConst(_) => true case DoubleConst(_) => true case BoolConst(_) => true case Nth(u,_) => isSimple(u) case Project(u,_) => isSimple(u) case VectorIndex(u,i) => isSimple(u) && isSimple(i) case MatrixIndex(u,i,j) => isSimple(u) && isSimple(i) && isSimple(j) case Tuple(cs) => cs.isEmpty || cs.map(isSimple).reduce(_&&_) case Record(cs) => cs.isEmpty || cs.map{ case (_,u) => isSimple(u) }.reduce(_&&_) case Collection(_,cs) => cs.isEmpty || cs.map(isSimple).reduce(_&&_) case Empty(_) => true case Elem(_,x) => isSimple(x) case Merge(x,y) => isSimple(x) && isSimple(y) case ExternalVar(_,_) => true case _ => false } def freeEnv ( p: Pattern, env: Map[String,Expr] ): Map[String,Expr] = env.filter(x => !capture(x._1,p)) def bindEnv ( p: Pattern, e: Expr ): Map[String,Expr] = (p,e) match { case (TuplePat(ps),Tuple(ts)) => (ps zip ts).map{ case (q,x) => bindEnv(q,x) }.reduce(_++_) case (TuplePat(ps),u) => ps.zipWithIndex.map{ case (q,i) => bindEnv(q,Nth(u,i+1)) }.reduce(_++_) case (VarPat(v),_) => Map(v->e) case _ => Map() } def substE ( e: Expr, env: Map[String,Expr] ): Expr = env.foldLeft[Expr](e) { case (r,(v,u)) => subst(v,u,r) } def substP ( p: Pattern, env: Map[String,String] ): Pattern = p match { case VarPat(v) => if (env.contains(v)) VarPat(env(v)) else p case TuplePat(ts) => TuplePat(ts.map(substP(_,env))) case _ => p } def comprVars ( qs: List[Qualifier] ): List[String] = qs.flatMap { case Generator(p,_) => patvars(p) case LetBinding(p,_) => patvars(p) case GroupByQual(p,_) => patvars(p) case _ => Nil } def notGrouped ( qs: List[Qualifier] ): Boolean = qs.forall{ case GroupByQual(_,_) => false; case _ => true } def notGrouped ( p: Pattern, m: Monoid, head: Expr, qs: List[Qualifier] ): Boolean = qs match { case GroupByQual(gp,ge)::r if gp == p => notGrouped(p,m,head,r) case GroupByQual(gp,_)::r => patvars(p).map( s => occurrences(s,Comprehension(m,head,r)) ).sum == 0 case _::r => notGrouped(p,m,head,r) case Nil => true } def renameVars ( e: Comprehension ): Comprehension = e match { case Comprehension(m,h,qs) => val vs = comprVars(qs) val env = vs.map(_ -> newvar).toMap val enve = env.map{ case (v,w) => (v,Var(w)) } val nqs = qs.map { case Generator(p,u) => Generator(substP(p,env),substE(u,enve)) case LetBinding(p,u) => LetBinding(substP(p,env),substE(u,enve)) case GroupByQual(p,k) => GroupByQual(substP(p,env),substE(k,enve)) case Predicate(u) => Predicate(substE(u,enve)) } Comprehension(m,substE(h,enve),nqs) } /** Normalize a comprehension */ def normalize ( m: Monoid, head: Expr, qs: List[Qualifier], env: Map[String,Expr], opts: Map[String,Expr] ): List[Qualifier] = qs match { case Nil => List(LetBinding(VarPat("@result"),normalize(substE(head,env)))) case Generator(p,c@Comprehension(_,_,s))::r if notGrouped(s) => val Comprehension(_,h,s) = renameVars(c) normalize(m,head,(s:+LetBinding(p,h))++r,env,opts) case Generator(p,Elem(_,u))::r => normalize(m,head,LetBinding(p,u)::r,env,opts) case Generator(_,Empty(_))::r => Nil case Generator(p,u)::r if m == BaseMonoid("option") && occurrences(patvars(p),Comprehension(m,head,r)) == 0 && notGrouped(r) => normalize(m,head,r,env,opts) case Generator(p@VarPat(v),u@Var(w))::r if (u.tpe match { case ParametricType("option",_) => true; case _ => false }) => if (opts.contains(w)) normalize(m,head,r,freeEnv(p,env)+((v,opts(w))),opts) else Generator(p,substE(u,env))::normalize(m,head,r,freeEnv(p,env),opts+(w->Var(v))) case Generator(p,u)::r => Generator(p,normalize(substE(u,env)))::normalize(m,head,r,freeEnv(p,env),opts) case LetBinding(TuplePat(ps),Tuple(es))::r => normalize(m,head,(ps zip es).map{ case (p,e) => LetBinding(p,e) }++r,env,opts) case LetBinding(p,u)::r => if (notGrouped(p,m,head,r)) normalize(m,head,r,bindEnv(p,normalize(substE(u,env)))++freeEnv(p,env),opts) else LetBinding(p,normalize(substE(u,env)))::normalize(m,head,r,env,opts) case Predicate(BoolConst(false))::r => Nil case Predicate(BoolConst(true))::r => normalize(m,head,r,env,opts) case Predicate(u)::r => Predicate(substE(u,env))::normalize(m,head,r,env,opts) case GroupByQual(p,u)::r => // lift all env vars except the group-by pattern vars val nenv = freeEnv(p,env).map{ case (v,x) => (v,Elem(bag,x)) } GroupByQual(p,normalize(substE(u,env)))::normalize(bag,head,r,nenv,Map()) } /** normalize an expression */ def normalize ( e: Expr ): Expr = e match { case Apply(Lambda(p@VarPat(v),b),u) => val nu = normalize(u) val nb = normalize(b) normalize(if (isSimple(nu) || occurrences(v,nb) <= 1) subst(Var(v),nu,nb) else Let(p,nu,nb)) case Let(VarPat(v),u,b) if isSimple(u) || occurrences(v,b) <= 1 => normalize(subst(Var(v),u,b)) case Comprehension(m,h,List()) => Elem(m,normalize(h)) case Comprehension(m,h,Predicate(p)::qs) => IfE(p,Comprehension(m,h,qs),Empty(m)) case Comprehension(m,h,Generator(p,c@Comprehension(_,_,_))::qs) => val Comprehension(_,h2,s) = renameVars(c) normalize(Comprehension(m,h,(s:+LetBinding(p,h2))++qs)) case Comprehension(m,h,qs) => normalize(m,h,qs,Map(),Map()) match { case nqs:+LetBinding(VarPat("@result"),nh) => val nc = Comprehension(m,nh,nqs) if (nc == e) apply(nc,normalize) else normalize(nc) case _ => Empty(m) } case reduce(m,Elem(_,x)) => normalize(x) case reduce(m,Empty(_)) => Empty(m) case IfE(BoolConst(true),e1,_) => normalize(e1) case IfE(BoolConst(false),_,e2) => normalize(e2) case Call(a,List(Tuple(s))) => val pat = """_(\\d+)""".r a match { case pat(x) if x.toInt <= s.length => normalize(s(x.toInt-1)) case _ => Call(a,List(Tuple(s.map(normalize)))) } case Call("!",List(Call("||",List(x,y)))) => normalize(Call("&&",List(Call("!",List(x)),Call("!",List(y))))) case Call("!",List(Call("&&",List(x,y)))) => normalize(Call("||",List(Call("!",List(x)),Call("!",List(y))))) case Call("!",List(Call("!",List(x)))) => normalize(x) case Call("!",List(Call("!=",List(x,y)))) => normalize(Call("==",List(x,y))) case Call("&&",List(BoolConst(b),x)) => if (b) normalize(x) else BoolConst(false) case Call("&&",List(x,BoolConst(b))) => if (b) normalize(x) else BoolConst(false) case Call("||",List(BoolConst(b),x)) => if (b) BoolConst(true) else normalize(x) case Call("||",List(x,BoolConst(b))) => if (b) BoolConst(true) else normalize(x) case Nth(Tuple(es),n) => normalize(es(n-1)) case Project(Record(es),a) => normalize(es(a)) case _ => apply(e,normalize) } def normalizeAll ( e: Expr ): Expr = { var olde = e var ne = olde do { olde = ne ne = normalize(ne) } while (olde != ne) ne } }
fegaras/DIQL
src/diablo/scala/edu/uta/diablo/Normalizer.scala
Scala
apache-2.0
9,459
package sorm.core import org.scalatest.FunSuite import org.scalatest.matchers.ShouldMatchers import org.junit.runner.RunWith import org.scalatest.junit.JUnitRunner @RunWith(classOf[JUnitRunner]) class PathSuite extends FunSuite with ShouldMatchers { import Path._ test("pathAndRemainder failure"){ pending } test("pathAndRemainder braced parsing"){ partAndRemainder("(asdf)") should be === (Part.Braced("asdf"), "") partAndRemainder("(asdf).sdf") should be === (Part.Braced("asdf"), ".sdf") partAndRemainder("(342).sdf") should be === (Part.Braced("342"), ".sdf") } test("pathAndRemainder dotted parsing"){ partAndRemainder("sdf") should be === (Part.Dotted("sdf"), "") partAndRemainder("sdf.dksfje") should be === (Part.Dotted("sdf"), ".dksfje") partAndRemainder(".sdf.dksfje") should be === (Part.Dotted("sdf"), ".dksfje") } }
sorm/sorm
src/test/scala/sorm/core/PathSuite.scala
Scala
mit
879
/* * Copyright (c) 2002-2018 "Neo Technology," * Network Engine for Objects in Lund AB [http://neotechnology.com] * * This file is part of Neo4j. * * Neo4j is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 3 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ package org.neo4j.cypher.internal.compiler.v2_3.ast.convert.plannerQuery import org.neo4j.cypher.internal.compiler.v2_3.ast.convert.plannerQuery.ClauseConverters._ import org.neo4j.cypher.internal.compiler.v2_3.planner._ import org.neo4j.cypher.internal.frontend.v2_3.{Foldable, ast} import org.neo4j.cypher.internal.frontend.v2_3.ast._ object StatementConverters { implicit class SingleQueryPartConverter(val q: SingleQuery) { def asPlannerQueryBuilder: PlannerQueryBuilder = q.clauses.foldLeft(PlannerQueryBuilder.empty) { case (acc, clause) => clause.addToLogicalPlanInput(acc) } } val NODE_BLACKLIST: Set[Class[_ <: ASTNode]] = Set( classOf[And], classOf[Or], // classOf[ReturnAll], classOf[UnaliasedReturnItem] ) import Foldable._ def findBlacklistedNodes(node: AnyRef): Seq[ASTNode] = { node.treeFold(Seq.empty[ASTNode]) { case node: ASTNode if NODE_BLACKLIST.contains(node.getClass) => (acc, children) => children(acc :+ node) } } implicit class QueryConverter(val query: Query) { def asUnionQuery: UnionQuery = { val nodes = findBlacklistedNodes(query) require(nodes.isEmpty, "Found a blacklisted AST node: " + nodes.head.toString) query match { case Query(None, queryPart: SingleQuery) => val builder = queryPart.asPlannerQueryBuilder UnionQuery(Seq(builder.build()), distinct = false, builder.returns) case Query(None, u: ast.Union) => val queries: Seq[SingleQuery] = u.unionedQueries val distinct = u match { case _: UnionAll => false case _: UnionDistinct => true } val plannedQueries: Seq[PlannerQueryBuilder] = queries.reverseMap(x => x.asPlannerQueryBuilder) //UNION requires all queries to return the same identifiers assert(plannedQueries.nonEmpty) val returns = plannedQueries.head.returns assert(plannedQueries.forall(_.returns == returns)) UnionQuery(plannedQueries.map(_.build()), distinct, returns) case _ => throw new CantHandleQueryException } } } }
HuangLS/neo4j
community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/ast/convert/plannerQuery/StatementConverters.scala
Scala
apache-2.0
2,961
object Macros { def foo[U <: String]: Unit = macro Impls.foo[U] } object Test extends App { import Macros._ foo[Int] }
yusuke2255/dotty
tests/untried/neg/macro-invalidusage-badbounds/Macros_Test_2.scala
Scala
bsd-3-clause
126
/* * Copyright 2014–2018 SlamData Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package quasar.physical.mongodb import slamdata.Predef._ import quasar._ import scala.collection.immutable.ListMap import scalaz._, Scalaz._ class BsonCodecSpecs_1_0 extends BsonCodecSpecs(BsonVersion.`1.0`) class BsonCodecSpecs_1_1 extends BsonCodecSpecs(BsonVersion.`1.1`) abstract class BsonCodecSpecs(v: BsonVersion) extends quasar.Qspec { import BsonCodec._ import DataGenerators._ implicit val ShowData = new Show[Data] { override def show(v: Data) = Cord(v.toString) } implicit val ShowBson = new Show[Bson] { override def show(v: Bson) = Cord(v.toString) } "fromData" should { "fail with bad Id" in { fromData(v, Data.Id("invalid")) must beLeftDisjunction } "of double does convert to Bson.Dec not Bson.Dec128" >> prop { (d: Double) => fromData(v, Data.Dec(d)) must_== \\/-(Bson.Dec(d)) } val resType = if (v lt BsonVersion.`1.1`) "Bson.Dec" else "Bson.Dec128" s"of bigdecimal that do not fit a double convert to $resType" in { val b = BigDecimal("123456E789") val r = v match { case BsonVersion.`1.0` => Bson.Dec(Double.PositiveInfinity) case BsonVersion.`1.1` => Bson.Dec128(b) } fromData(v, Data.Dec(b)) must_== \\/-(r) } "be isomorphic for preserved values" >> prop { (data: Data) => // (fromData >=> toData) == identity, except for values that are known not to be preserved import Data._ def preserved(d: Data): Boolean = d match { case Int(x) => x.isValidLong case Interval(i) => false case OffsetDateTime(_) => false case OffsetDate(_) => false case OffsetTime(_) => false case LocalDateTime(_) => false case LocalDate(_) => false case LocalTime(_) => false case Arr(value) => value.forall(preserved) case Obj(value) => value.values.forall(preserved) case _ => true } preserved(data) ==> { fromData(v, data).map(toData) must beRightDisjunction(data) } } implicit val arbitraryBson = BsonGen.arbBson(v) "be 'semi'-isomorphic for all Bson values" >> prop { (bson: Bson) => // (toData >=> fromData >=> toData) == toData val data = toData(bson) fromData(v, data).map(toData _) must beRightDisjunction(data) } } "toData" should { "convert MinKey to NA" in { toData(Bson.MinKey) must_== Data.NA } "be 'semi'-isomorphic for all Data values" >> prop { (data: Data) => // (fromData >=> toData >=> fromData) == fromData // Which is to say, every Bson value that results from conversion // can be converted to Data and back to Bson, recovering the same // Bson value. val r = fromData(v, data) r.isRight ==> { r.flatMap(bson => fromData(v, toData(bson))) must_== r } } } "round trip to repr (all Data types)" >> prop { (data: Data) => val r = fromData(v, data) r.isRight ==> { val wrapped = r.map(bson => Bson.Doc(ListMap("value" -> bson))) wrapped.map(w => Bson.fromRepr(w.repr)) must_== wrapped } } }
jedesah/Quasar
mongodb/src/test/scala/quasar/physical/mongodb/bsoncodec.scala
Scala
apache-2.0
3,789
package TAPL2.SimpleBool import TAPL2.TyArith.TypedBool import TAPL2.Untyped.VarApp import TAPL2.Util._ import TAPL2.{Term, Ty} case class TmVar(i: String) extends Term case class TmAbs(v: String, ty: Ty, t: Term) extends Term case class TmApp(t1: Term, t2: Term) extends Term case object TmTrue extends Term case object TmFalse extends Term case class TmIf(cond: Term, t1: Term, t2: Term) extends Term case class TyArr(t1: Ty, t2: Ty) extends Ty case object TyBool extends Ty /* <5> */ object Typed { trait Parser[F <: {val pE : PackratParser[Term]; val pT : PackratParser[Ty]}] extends VarApp.Parser[F] { lexical.delimiters += ("\\\\", ".", "(", ")", ":", "->") private val pAbsE: (=> F) => PackratParser[Term] = l => { lazy val e = l.pE lazy val t = l.pT ("\\\\" ~> lcid) ~ (":" ~> t) ~ ("." ~> e) ^^ { case x ~ t0 ~ e0 => TmAbs(x, t0, e0) } ||| "(" ~> e <~ ")" } val pTypedE: (=> F) => PackratParser[Term] = l => pVarAppE(l) ||| pAbsE(l) val pTypedT: (=> F) => PackratParser[Ty] = l => { lazy val t = l.pT t ~ ("->" ~> t) ^^ { case t1 ~ t2 => TyArr(t1, t2) } ||| "(" ~> t <~ ")" } } } object SimpleBool { trait Parser[L <: {val pE : PackratParser[Term]; val pT : PackratParser[Ty]}] extends Typed.Parser[L] with TypedBool.Parser[L] { val pSimpleBoolE: (=> L) => PackratParser[Term] = l => pTypedE(l) ||| pTypedBoolE(l) val pSimpleBoolT: (=> L) => PackratParser[Ty] = l => pTypedT(l) ||| pTypedBoolT(l) } } object TestSimpleBool { class List[E, T](pe: PackratParser[E], pt: PackratParser[T]) { val pE = pe val pT = pt } def parseAndPrint(inp: String) = { def parser(l: => List[Term, Ty]): List[Term, Ty] = { val lang = new SimpleBool.Parser[List[Term, Ty]] {} new List[Term, Ty](lang.pSimpleBoolE(l), lang.pSimpleBoolT(l)) } val t = phrase(fix(parser).pE)(new lexical.Scanner(inp)) if (t.successful) println(t.get) else scala.sys.error(t.toString) } }
hy-zhang/parser
Scala/Old/TAPL2/SimpleBool/SimpleBool.scala
Scala
bsd-3-clause
2,001
package com.gravity.goose import scala.io.Source import sys.process._ object FetchMany { def main(args: Array[String]) { try { val config: Configuration = new Configuration config.enableImageFetching = true config.imagemagickConvertPath = "/usr/bin/convert" config.imagemagickIdentifyPath = "/usr/bin/identify" config.localStoragePath = "/tmp/goose" config.minBytesForImages = 4500 val goose = new Goose(config) var i = 0 for(line <- Source.fromFile(args(0) + "urllist.txt").getLines()) { val out = new java.io.FileWriter(args(0) + i) val url: String = line println("FETCH: Goose is fetching into " + i + ": " + url) var done: Boolean = false for(attempt <- 1 to 5) { try { if(!done) { println("FETCH: -- Attempt " + attempt) val article = goose.extractContent(url) println("FETCH: -- Got: " + article.title) out.write(article.cleanedArticleText + "\\n" + article.topImage.imageSrc + "\\n" + article.title) done = true } } catch { case e: Exception => { e.printStackTrace() } } } out.close i = i + 1 } } catch { case e: Exception => { e.printStackTrace(); } } } }
raisercostin/goose
src/main/scala/com/gravity/goose/FetchMany.scala
Scala
apache-2.0
1,403
package core.exception /** * 权限错误 * * Created by zephyre on 1/22/16. */ class ForbiddenException(message: String, cause: Throwable) extends RuntimeException(message, cause) { def this() = this(null, null) def this(message: String) = this(message, null) def this(cause: Throwable) = this(null, cause) } object ForbiddenException { def apply(message: String, cause: Throwable) = new ForbiddenException(message, cause) def apply(message: String) = new ForbiddenException(message) def apply(cause: Throwable) = new ForbiddenException(cause) def apply() = new ForbiddenException() }
Lvxingpai/Hanse
app/core/exception/ForbiddenException.scala
Scala
apache-2.0
612
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.mllib.clustering import scala.collection.JavaConverters._ import org.json4s._ import org.json4s.JsonDSL._ import org.json4s.jackson.JsonMethods._ import org.apache.spark.annotation.Since import org.apache.spark.api.java.JavaRDD import org.apache.spark.mllib.linalg.Vector import org.apache.spark.mllib.pmml.PMMLExportable import org.apache.spark.mllib.util.{Loader, Saveable} import org.apache.spark.rdd.RDD import org.apache.spark.SparkContext import org.apache.spark.sql.SQLContext import org.apache.spark.sql.Row /** * A clustering model for K-means. Each point belongs to the cluster with the closest center. */ @Since("0.8.0") class KMeansModel @Since("1.1.0") (@Since("1.0.0") val clusterCenters: Array[Vector]) extends Saveable with Serializable with PMMLExportable { /** * A Java-friendly constructor that takes an Iterable of Vectors. */ @Since("1.4.0") def this(centers: java.lang.Iterable[Vector]) = this(centers.asScala.toArray) /** * Total number of clusters. */ @Since("0.8.0") def k: Int = clusterCenters.length /** * Returns the cluster index that a given point belongs to. */ @Since("0.8.0") def predict(point: Vector): Int = { KMeans.findClosest(clusterCentersWithNorm, new VectorWithNorm(point))._1 } /** * Maps given points to their cluster indices. */ @Since("1.0.0") def predict(points: RDD[Vector]): RDD[Int] = { val centersWithNorm = clusterCentersWithNorm val bcCentersWithNorm = points.context.broadcast(centersWithNorm) points.map(p => KMeans.findClosest(bcCentersWithNorm.value, new VectorWithNorm(p))._1) } /** * Maps given points to their cluster indices. */ @Since("1.0.0") def predict(points: JavaRDD[Vector]): JavaRDD[java.lang.Integer] = predict(points.rdd).toJavaRDD().asInstanceOf[JavaRDD[java.lang.Integer]] /** * Return the K-means cost (sum of squared distances of points to their nearest center) for this * model on the given data. */ @Since("0.8.0") def computeCost(data: RDD[Vector]): Double = { val centersWithNorm = clusterCentersWithNorm val bcCentersWithNorm = data.context.broadcast(centersWithNorm) data.map(p => KMeans.pointCost(bcCentersWithNorm.value, new VectorWithNorm(p))).sum() } private def clusterCentersWithNorm: Iterable[VectorWithNorm] = clusterCenters.map(new VectorWithNorm(_)) @Since("1.4.0") override def save(sc: SparkContext, path: String): Unit = { KMeansModel.SaveLoadV1_0.save(sc, this, path) } override protected def formatVersion: String = "1.0" } @Since("1.4.0") object KMeansModel extends Loader[KMeansModel] { @Since("1.4.0") override def load(sc: SparkContext, path: String): KMeansModel = { KMeansModel.SaveLoadV1_0.load(sc, path) } private case class Cluster(id: Int, point: Vector) private object Cluster { def apply(r: Row): Cluster = { Cluster(r.getInt(0), r.getAs[Vector](1)) } } private[clustering] object SaveLoadV1_0 { private val thisFormatVersion = "1.0" private[clustering] val thisClassName = "org.apache.spark.mllib.clustering.KMeansModel" def save(sc: SparkContext, model: KMeansModel, path: String): Unit = { val sqlContext = SQLContext.getOrCreate(sc) import sqlContext.implicits._ val metadata = compact(render( ("class" -> thisClassName) ~ ("version" -> thisFormatVersion) ~ ("k" -> model.k))) sc.parallelize(Seq(metadata), 1).saveAsTextFile(Loader.metadataPath(path)) val dataRDD = sc.parallelize(model.clusterCenters.zipWithIndex).map { case (point, id) => Cluster(id, point) }.toDF() dataRDD.write.parquet(Loader.dataPath(path)) } def load(sc: SparkContext, path: String): KMeansModel = { implicit val formats = DefaultFormats val sqlContext = SQLContext.getOrCreate(sc) val (className, formatVersion, metadata) = Loader.loadMetadata(sc, path) assert(className == thisClassName) assert(formatVersion == thisFormatVersion) val k = (metadata \ "k").extract[Int] val centroids = sqlContext.read.parquet(Loader.dataPath(path)) Loader.checkSchema[Cluster](centroids.schema) val localCentroids = centroids.map(Cluster.apply).collect() assert(k == localCentroids.size) new KMeansModel(localCentroids.sortBy(_.id).map(_.point)) } } }
chenc10/Spark-PAF
mllib/src/main/scala/org/apache/spark/mllib/clustering/KMeansModel.scala
Scala
apache-2.0
5,195
package com.nthportal.versions package object v2 { val V = Version val EV = ExtendedVersion }
NthPortal/versions
src/main/scala/com/nthportal/versions/v2/package.scala
Scala
apache-2.0
100
package org.leialearns.logic.utilities import scala.util.control.Breaks import org.scalatest.FunSuite import org.slf4j.LoggerFactory import java.io.{IOException, StringReader} class TestPrefixFree extends FunSuite { val logger = LoggerFactory.getLogger(getClass) def singlePrefixEncodeBigInteger(n: BigInt) { val encoded = PrefixFreeBigInt.prefixEncode(n) val encodedJava = PrefixFreeBigInt.prefixEncode(n.bigInteger) assert(encodedJava == encoded) logger.info(s"Prefix-free: $n: [$encoded]") val encodedReader = new StringReader(encoded) val decoded = PrefixFreeBigInt.prefixDecode(encodedReader) intercept[IOException] { PrefixFreeBigInt.readBit(encodedReader) } assert(n == decoded) val bitReader = new StringReader(encoded) val loop = new Breaks var i = 0 loop.breakable { while (true) { try { PrefixFreeBigInt.readBit(bitReader) i += 1 } catch { case exception: IOException => loop.break() } } } val length = PrefixFreeBigInt.descriptionLength(n) assert(i == length) } test("Composition of prefixDecode and prefixEncode should be the identity function") { for (i <- 0 to 40) { singlePrefixEncodeBigInteger(i) } for (i <- 1 to 16) { val n = BigInt.int2bigInt(2).pow(i) - 1 val m = n - 1 singlePrefixEncodeBigInteger(m) singlePrefixEncodeBigInteger(n) } } }
jeroenvanmaanen/leia
prefixencoding/src/test/java/org/leialearns/logic/utilities/TestPrefixFree.scala
Scala
lgpl-2.1
1,470
package chess class SituationTest extends ChessTest { "a game" should { "detect" in { "check" in { "by rook" in { (""" K r """ as White).check must beTrue } "by knight" in { (""" n K """ as White).check must beTrue } "not" in { (""" n K """ as White).check must beFalse } } "check mate" in { "by rook" in { (""" PP K r """ as White).checkMate must beTrue } "by knight" in { (""" PPn KR """ as White).checkMate must beTrue } "not" in { (""" n K """ as White).checkMate must beFalse } } "stale mate" in { "stuck in a corner" in { (""" prr K """ as White).staleMate must beTrue } "not" in { (""" b K """ as White).staleMate must beFalse } } } } }
cxd4/scalachess
src/test/scala/SituationTest.scala
Scala
mit
913
/* __ *\\ ** ________ ___ / / ___ __ ____ Scala.js API ** ** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL ** ** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-lang.org/ ** ** /____/\\___/_/ |_/____/_/ | |__/ /____/ ** ** |/____/ ** \\* */ package scala.scalajs.js.timers import scala.scalajs.js /** * <span class="badge badge-non-std" style="float: right;">Non-Standard</span> * Raw JavaScript timer methods. * * The methods on this object expose the raw JavaScript methods for timers. In * general it is more advisable to use the methods directly defined on * [[timers]] as they are more Scala-like. */ @js.native object RawTimers extends js.GlobalScope { /** Schedule [[handler]] for execution in [[interval]] milliseconds. * * @param handler the function to call after [[interval]] has passed * @param interval duration in milliseconds to wait * @return A handle that can be used to cancel the timeout by passing it * to [[clearTimeout]]. */ def setTimeout(handler: js.Function0[Any], interval: Double): SetTimeoutHandle = js.native /** Cancel a timeout execution * @param handle The handle returned by [[setTimeout]] */ def clearTimeout(handle: SetTimeoutHandle): Unit = js.native /** Schedule [[handler]] for repeated execution every [[interval]] * milliseconds. * * @param handler the function to call after each [[interval]] * @param interval duration in milliseconds between executions * @return A handle that can be used to cancel the interval by passing it * to [[clearInterval]]. */ def setInterval(handler: js.Function0[Any], interval: Double): SetIntervalHandle = js.native /** Cancel an interval execution * @param handle The handle returned by [[setInterval]] */ def clearInterval(handle: SetIntervalHandle): Unit = js.native }
CapeSepias/scala-js
library/src/main/scala/scala/scalajs/js/timers/RawTimers.scala
Scala
bsd-3-clause
2,144
package sample { package lib { import scala.xml.{Text, NodeSeq} import net.liftweb.common.{Box,Full,Empty,Failure} import net.liftweb.util.NamedPF import net.liftweb.util.Helpers._ import net.liftweb.http.{SHtml,S,RewriteRequest,RewriteResponse,ParsePath} import net.liftweb.sitemap.Loc import net.liftweb.mapper.{OrderBy,Ascending,By} import net.liftweb.textile.TextileParser import net.liftweb.textile.TextileParser.WikiURLInfo import sample.model.WikiEntry /** * A wiki location * * @param page - the name of the page * @param edit - are we viewing or editing the page? */ case class Page(title: String, edit: Boolean) { /** * Get the underly database record for this page. * When requested, the page will go to the database and look * for an entry who's name matches that passed as the * paramerter to the Page instance. */ lazy val data: WikiEntry = WikiEntry.find(By(WikiEntry.name, title)) openOr WikiEntry.create.name(title) } /** * The WikiStuff object that provides menu, URL rewriting, * and snippet support for the page that displays wiki contents */ object Wiki extends Loc[Page] { object AllLoc extends Page("all", false) // the name of the page def name = "wiki" // the default parameters (used for generating the menu listing) def defaultValue = Full(Page("HomePage", false)) def params = Nil /** * Is the current page an "edit" or "view"? * This confusses most newbies, as its not always clear * where these values come from. * As this Loc defines its own RewritePF, it added extra S.params * to the incoming request, which is what this requestValue is accessing. */ def currentEdit = requestValue.is.map(_.edit) openOr false /** * Check for page-specific snippets and do appropriate dispatching */ override val snippets: SnippetTest = { case ("wiki", Full(AllLoc)) => showAll _ case ("wiki", Full(wp @ Page(_ , true))) => editRecord(wp.data) case ("wiki", Full(wp @ Page(_ , false))) if !wp.data.saved_? => editRecord(wp.data) case ("wiki", Full(wp: Page)) => displayRecord(wp.data) } /** * Generate a link based on the current page */ val link = new Loc.Link[Page](List("wiki"), false){ override def createLink(in: Page) = if(in.edit) Full(Text("/wiki/edit/"+urlEncode(in.title))) else Full(Text("/wiki/"+urlEncode(in.title))) } /** * What's the text of the link? */ val text = new Loc.LinkText(calcLinkText _) def calcLinkText(in: Page): NodeSeq = if(in.edit) Text("Wiki edit "+in.title) else Text("Wiki "+in.title) /** * Rewrite the request so that we a) get friendly URLs and * b) get the appropriate page and context (edit or view) params * in order to pass them to the Page */ override val rewrite: LocRewrite = Full(NamedPF("Wiki Rewrite"){ case RewriteRequest(ParsePath("wiki" :: "edit" :: title :: Nil, _, _,_),_, _) => (RewriteResponse("wiki" :: Nil), Page(title, true)) case RewriteRequest(ParsePath("wiki" :: title :: Nil, _, _,_),_,_) => (RewriteResponse("wiki" :: Nil), Page(title, false)) }) /** * Show all the pages that are part of the wiki */ def showAll(in: NodeSeq): NodeSeq = WikiEntry.findAll(OrderBy(WikiEntry.name, Ascending)).flatMap(entry => <div><a href={url(entry.name)}>{entry.name}</a></div>) /** * Make the URL to be accessed based up the page in the Page. * The createLink method already ensures that spaces etc are encoded */ def url(title: String) = createLink(Page(title, false)) /** * The render bind for actually displaying the wiki contents. This will * look just like what you are familiar with for creating snippets */ def editRecord(r: WikiEntry) = { val isNew = !r.saved_? val pageName = r.name.is "a [href]" #> url(pageName) & "form [action]" #> url(pageName) & "textarea" #> r.entry.toForm & "type=submit" #> SHtml.submit(isNew ? "Add" | "Save", () => r.save) & "message" #> (if(isNew) Text("Create Entry named '"+pageName+"'") else Text("Edit entry named '"+pageName+"'")) & "edit ^*" #> NodeSeq.Empty } /** * Pretty much the same as the editRecord method, but obviously * minus the err, editing. */ def displayRecord(entry: WikiEntry) = "content" #> TextileParser.toHtml(entry.entry, textileWriter) & "a [href]" #> createLink(Page(entry.name, true)) & "view ^*" #> NodeSeq.Empty private val textileWriter = Some((info: WikiURLInfo) => info match { case WikiURLInfo(page, _) => (stringUrl(page), Text(page), None) }) def stringUrl(page: String) = url(page).map(_.text) getOrElse "" } }}
timperrett/lift-in-action
chapter-7/src/main/scala/sample/lib/Wiki.scala
Scala
apache-2.0
4,719
val list = List(1,2,3,4,5) println(f"\\n=> list: $list%-35s") val filtered_mapped = list filter (_ < 4) map (_ * 2) println(f"\\n=> filtered_mapped list: $filtered_mapped%-35s") println
nvijayap/scala-ruby
scala/filter_map.scala
Scala
apache-2.0
202
package scala.tools.nsc import java.nio.file.attribute.BasicFileAttributes import java.nio.file.{FileVisitResult, Files, Path, SimpleFileVisitor} import difflib.DiffUtils import scala.jdk.CollectionConverters._ import scala.reflect.io.PlainNioFile import scala.tools.nsc.backend.jvm.AsmUtils object FileUtils { def createDir(dir: Path, s: String): Path = { val subDir = dir.resolve(s) Files.createDirectories(subDir) } def assertDirectorySame(dir1: Path, dir2: Path, dir2Label: String): Unit = { val diffs = FileUtils.diff(dir1, dir2) def diffText = { val builder = new java.lang.StringBuilder var showDetail = 1 // limit printing of diff to first class diffs.foreach { diff => val showDiff = { try showDetail > 0 finally showDetail -= 1 } diff.diffString(builder, showDiff) } builder.toString } assert(diffs.isEmpty, s"Difference detected between recompiling $dir2Label Run:\\njardiff -r $dir1 $dir2\\n$diffText") } sealed abstract class Diff(path: Path) { def diffString(builder: java.lang.StringBuilder, showDiff: Boolean): Unit = builder.append(toString) } final case class ContentsDiffer(relativePath: Path, path1: Path, path2: Path, left: Array[Byte], right: Array[Byte]) extends Diff(relativePath) { override def toString: String = { s"ContentsDiffer($relativePath)" } override def diffString(builder: java.lang.StringBuilder, showDiff: Boolean): Unit = { builder.append(productPrefix).append("(").append(relativePath).append(")") if (relativePath.getFileName.toString.endsWith(".class")) { if (showDiff) { val class1 = AsmUtils.readClass(path1.toFile.getAbsolutePath) val class2 = AsmUtils.readClass(path2.toFile.getAbsolutePath) val text1 = AsmUtils.textify(class1) val text2 = AsmUtils.textify(class2) builder.append(unifiedDiff(path1, path2, text1, text2)) } else { builder.append("[diff suppressed for brevity]") } } } } final case class Missing(relativePath: Path, foundPath: Path) extends Diff(relativePath) def diff(dir1: Path, dir2: Path): List[Diff] = { import Ordering.Implicits._ val diffs = collection.mutable.ListBuffer[Diff]() def allFiles(dir: Path): Map[Path, Map[String, Path]] = { val classFiles: List[(Path, Path)] = Files.walk(dir).iterator().asScala.map(x => (dir.relativize(x), x)).toList.filter(_._2.getFileName.toString.endsWith(".class")).toList classFiles.groupBy(_._1).view.mapValues(ps => ps.map { case (_, p) => (p.getFileName.toString, p)}.toMap).toMap } val dir1Files = allFiles(dir1) val dir2Files = allFiles(dir2) val allSubDirs = dir1Files.keySet ++ dir2Files.keySet for (subDir <- allSubDirs.toList.sortBy(_.iterator().asScala.map(_.toString).to(Seq))) { val files1 = dir1Files.getOrElse(subDir, Map.empty) val files2 = dir2Files.getOrElse(subDir, Map.empty) val allFileNames = files1.keySet ++ files2.keySet for (name <- allFileNames.toList.sorted) { (files1.get(name), files2.get(name)) match { case (Some(file1), Some(file2)) => val bytes1 = Files.readAllBytes(file1) val bytes2 = Files.readAllBytes(file2) if (!java.util.Arrays.equals(bytes1, bytes2)) { diffs += ContentsDiffer(dir1.relativize(file1), file1, file2, bytes1, bytes2) } case (Some(file1), None) => val relativePath = file1.relativize(dir1) diffs += Missing(relativePath, file1) case (None, Some(file2)) => val relativePath = file2.relativize(dir2) diffs += Missing(relativePath, file2) case (None, None) => throw new IllegalStateException() } } } diffs.toList } def deleteRecursive(f: Path) = new PlainNioFile(f).delete() def copyRecursive(src: Path, dest: Path): Unit = { class CopyVisitor(src: Path, dest: Path) extends SimpleFileVisitor[Path] { override def preVisitDirectory(dir: Path, attrs: BasicFileAttributes): FileVisitResult = { Files.createDirectories(dest.resolve(src.relativize(dir))) super.preVisitDirectory(dir, attrs) } override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult = { Files.copy(file, dest.resolve(src.relativize(file))) super.visitFile(file, attrs) } } Files.walkFileTree(src, new CopyVisitor(src, dest)) } private def unifiedDiff(path1: Path, path2: Path, text1: String, text2: String) = { def lines(s: String) = { val result = new java.util.ArrayList[String]() s.linesIterator.foreach(result.add) result } val lines1 = lines(text1) val lines2 = lines(text2) val patch = DiffUtils.diff(lines1, lines2) val value = DiffUtils.generateUnifiedDiff(path1.toString, path2.toString, lines1, patch, 10) val diffToString = value.asScala.mkString("\\n") diffToString } }
scala/scala
test/junit/scala/tools/nsc/FileUtils.scala
Scala
apache-2.0
5,070
/* ,i::, :;;;;;;; ;:,,::;. 1ft1;::;1tL t1;::;1, :;::; _____ __ ___ __ fCLff ;:: tfLLC / ___/ / |/ /____ _ _____ / /_ CLft11 :,, i1tffLi \\__ \\ ____ / /|_/ // __ `// ___// __ \\ 1t1i .;; .1tf ___/ //___// / / // /_/ // /__ / / / / CLt1i :,: .1tfL. /____/ /_/ /_/ \\__,_/ \\___//_/ /_/ Lft1,:;: , 1tfL: ;it1i ,,,:::;;;::1tti s_mach.explain_json .t1i .,::;;; ;1tt Copyright (c) 2016 S-Mach, Inc. Lft11ii;::;ii1tfL: Author: [email protected] .L1 1tt1ttt,,Li ...1LLLL... */ package s_mach.explain_json import org.scalatest.{FlatSpec, Matchers} class JsonStringBuilderTest extends FlatSpec with Matchers { def mkBuilder() = JsonStringBuilder(1024) "JsonStringBuilder.append(Boolean)" should "append a boolean to built JSON" in { { val builder = mkBuilder() builder.append(true) builder.build() shouldBe "true" } { val builder = mkBuilder() builder.append(false) builder.build() shouldBe "false" } } "JsonStringBuilder.append(Int)" should "append a number to built JSON" in { val builder = mkBuilder() builder.append(123) builder.build() shouldBe "123" } "JsonStringBuilder.append(Long)" should "append a number to built JSON" in { val builder = mkBuilder() builder.append(123l) builder.build() shouldBe "123" } "JsonStringBuilder.append(BigDecimal)" should "append a number to built JSON" in { val builder = mkBuilder() builder.append(BigDecimal("123.00")) builder.build() shouldBe "123.00" } "JsonStringBuilder.append(String)" should "append a string to built JSON" in { val builder = mkBuilder() builder.append("abc123") builder.build() shouldBe """"abc123"""" } "JsonStringBuilder.append(null)" should "append a string to built JSON" in { val builder = mkBuilder() builder.append(null) builder.build() shouldBe "null" } "JsonStringBuilder.appendObject" should "append an empty object to built JSON" in { val builder = mkBuilder() builder.appendObject { } builder.build() shouldBe "{}" } "JsonStringBuilder.appendField" should "append fields contained in an object to built JSON" in { val builder = mkBuilder() builder.appendObject { builder.appendField("1") { builder.append(true) } builder.appendField("2") { builder.append(123) } } builder.build() shouldBe """{"1":true,"2":123}""" } "JsonStringBuilder.appendArray" should "append an empty array to built JSON" in { val builder = mkBuilder() builder.appendArray { } builder.build() shouldBe "[]" } "JsonStringBuilder.append/appendArray" should "append values in an array to built JSON" in { val builder = mkBuilder() builder.appendArray { builder.append(true) builder.append(123) builder.appendObject { } } builder.build() shouldBe "[true,123,{}]" } "JsonStringBuilder.lastIsNull" should "return TRUE if last appended JSON is null" in { val builder = mkBuilder() builder.lastIsNull shouldBe false builder.append(null) builder.lastIsNull shouldBe true } "JsonStringBuilder.lastIsEmptyString" should "return TRUE if last appended JSON is empty string" in { val builder = mkBuilder() builder.lastIsEmptyString shouldBe false builder.append("") builder.lastIsEmptyString shouldBe true } "JsonStringBuilder.lastIsEmptyObject" should "return TRUE if last appended JSON is an empty object" in { val builder = mkBuilder() builder.lastIsEmptyObject shouldBe false builder.appendObject { builder.appendField("1") { builder.append(true) } } builder.lastIsEmptyObject shouldBe false builder.appendObject { } builder.lastIsEmptyObject shouldBe true } "JsonStringBuilder.lastIsEmptyArray" should "return TRUE if last appended JSON is an empty array" in { val builder = mkBuilder() builder.lastIsEmptyArray shouldBe false builder.appendArray { builder.append(true) } builder.lastIsEmptyArray shouldBe false builder.appendArray { } builder.lastIsEmptyArray shouldBe true } "JsonStringBuilder.lastIsEmpty" should "return TRUE if last appended JSON is an empty object, empty array, empty string or null" in { val builder = mkBuilder() builder.lastIsEmpty shouldBe false builder.append(null) builder.lastIsEmpty shouldBe true builder.append(123) builder.lastIsEmpty shouldBe false builder.append("") builder.lastIsEmpty shouldBe true builder.appendObject { builder.appendField("1") { builder.append(true) } } builder.lastIsEmpty shouldBe false builder.appendObject { } builder.lastIsEmpty shouldBe true builder.appendObject { builder.appendField("1") { builder.append("") } builder.lastIsEmpty shouldBe true } builder.lastIsEmpty shouldBe false builder.appendArray { builder.append(true) } builder.lastIsEmpty shouldBe false builder.appendArray { } builder.lastIsEmpty shouldBe true } "JsonStringBuilder.save" should "save the current length of the accumulated JSON output" in { val builder = mkBuilder() builder.append(123) builder.save() shouldBe 4 builder.appendObject { builder.appendField("1") { builder.append(true) } } // "123,{"1":true}," // 123456789012345 builder.save() shouldBe 15 builder.appendObject { builder.save() shouldBe 16 builder.appendField("2") { builder.append(false) } builder.save() shouldBe 26 } // "123,{"1":true},{"2":false}" // 12345678901234567890123456 builder.save() shouldBe 27 builder.appendArray { } // "123,{"1":true},{"2":false},[]," // 123456789012345678901234567890 builder.save() shouldBe 30 builder.appendArray { builder.append("abc") builder.append("123") } // "123,{"1":true},{"2":false},[],["abc","123"]," // 12345678901234567890123456789012345678901234 builder.save() shouldBe 44 } "JsonStringBuilder.restore" should "restore the state of the accumulated JSON output to a previous saved state" in { val builder = mkBuilder() builder.append(123) val saved = builder.save() builder.append(true) builder.restore(saved) builder.build() shouldBe "123" val saved2 = builder.save() builder.appendObject { builder.appendField("1") { builder.append(true) } } builder.restore(saved2) builder.build() shouldBe "123" builder.appendObject { builder.appendField("1") { builder.append(true) } val saved3 = builder.save() builder.appendField("2") { builder.append(123) } builder.restore(saved3) } builder.build() shouldBe """123,{"1":true}""" val saved4 = builder.save() builder.appendArray { } builder.restore(saved4) builder.build() shouldBe """123,{"1":true}""" builder.appendArray { builder.append(true) val saved5 = builder.save() builder.append(123) builder.restore(saved5) } builder.build() shouldBe """123,{"1":true},[true]""" } }
S-Mach/s_mach.explain
explain_json/src/test/scala/s_mach/explain_json/JsonStringBuilderTest.scala
Scala
mit
7,537
package com.github.mogproject.dockersbttest.example import org.specs2.mutable._ import com.redis.RedisClientPool trait context extends BeforeAfter { lazy val clients = new RedisClientPool(host = "localhost", port = 6379, database = 0) val keys = (1 to 100).map { i => f"test-sbt-parallel-$i%03d" } // takes over 100 * 100 ms = 10 seconds def insertWithSleep(valueSuffix: String) = keys.foreach { key => clients.withClient(_.set(key, s"${key}-${valueSuffix}")) Thread.sleep(100L) } def before = clear def after = clear def clear = keys.foreach { key => clients.withClient(_.del(key)) } } object RedisConnectionSpec1 extends Specification { "test1" should { "insert 100 records and delete 1 record" in new context { insertWithSleep("test1") clients.withClient(_.del("test-sbt-parallel-100")) clients.withClient(_.dbsize) must beSome(99L) } } } object RedisConnectionSpec2 extends Specification { "test2" should { "insert 100 records and delete 1 record" in new context { insertWithSleep("test2") clients.withClient(_.del("test-sbt-parallel-099")) clients.withClient(_.dbsize) must beSome(99L) } } } object RedisConnectionSpec3 extends Specification { "test3" should { "insert 100 records and delete 1 record" in new context { insertWithSleep("test3") clients.withClient(_.del("test-sbt-parallel-098")) clients.withClient(_.dbsize) must beSome(99L) } } } object RedisConnectionSpec4 extends Specification { "test4" should { "insert 100 records and delete 1 record" in new context { insertWithSleep("test4") clients.withClient(_.del("test-sbt-parallel-097")) clients.withClient(_.dbsize) must beSome(99L) } } }
mogproject/docker-sbt-test
example/src/test/scala/com/github/mogproject/dockersbttest/example/RedisConnectionSpec.scala
Scala
apache-2.0
1,760
package com.verizon.bda.trapezium.dal.lucene import java.io.{File, _} import java.sql.Time import com.verizon.bda.trapezium.dal.exceptions.LuceneDAOException import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{FileStatus, FileSystem, FileUtil, PathFilter, Path => HadoopPath} import org.apache.log4j.Logger import org.apache.lucene.analysis.Analyzer import org.apache.lucene.analysis.core.KeywordAnalyzer import org.apache.lucene.analysis.standard.StandardAnalyzer import org.apache.lucene.document.Field import org.apache.lucene.index.IndexWriterConfig.OpenMode import org.apache.lucene.index._ import org.apache.lucene.store.{Directory, LockFactory, MMapDirectory, NoLockFactory} import org.apache.solr.store.hdfs.HdfsDirectory import org.apache.spark.broadcast.Broadcast import org.apache.spark.ml.linalg.Vectors import org.apache.spark.rdd.RDD import org.apache.spark.sql.functions._ import org.apache.spark.sql.types.{ArrayType, StructField, StructType} import org.apache.spark.sql.{DataFrame, Row} import org.apache.spark.storage.StorageLevel import org.apache.spark.util.{DalUtils, RDDUtils} import org.apache.spark.{SparkConf, SparkContext} import scala.collection.mutable import scala.util.Random class LuceneDAO(val location: String, val searchFields: Set[String], val searchAndStoredFields: Set[String], val storedFields: Set[String], val luceneAnalyzer: String = "keyword", val stored: Boolean = false, storageLevel: StorageLevel = StorageLevel.DISK_ONLY) extends Serializable { import LuceneDAO._ @transient lazy val log = Logger.getLogger(classOf[LuceneDAO]) @transient private lazy val analyzer: Analyzer = LuceneDAO.getAnalyzer(luceneAnalyzer) @transient private lazy val store: Field.Store = if (stored) Field.Store.YES else Field.Store.NO log.info(s"Using ${luceneAnalyzer} analyzer") @transient lazy val converter = OLAPConverter(searchFields, store, searchAndStoredFields, storedFields) @transient private var _dictionary: DictionaryManager = _ def encodeDictionary(df: DataFrame): LuceneDAO = { if (_dictionary != null) return this _dictionary = new DictionaryManager searchAndStoredFields.foreach(f => { val selectedDim = if (df.schema(f).dataType.isInstanceOf[ArrayType]) { df.select(explode(df(f))) } else { df.select(df(f)) } val filteredDim = selectedDim.rdd.map(_.getAs[String](0)).filter(_ != null) _dictionary.addToDictionary(f, filteredDim) }) this } /** * 2 step process to generate feature vectors: * Step 1: Collect all the featureColumns in arrFeatures that * should go into feature vector. * Step 2: Pass arrFeatures and the featureColumns to featureIndexUdf * to generate the vector */ def vectorize(df: DataFrame, vectorizedColumn: String, dimension: Seq[String], features: Seq[String]): DataFrame = { val dfWithFeatures = df.withColumn("arrFeatures", array(features.map(df(_)): _*)) val key = this var udfObject = ObjectManager.get(key) if (udfObject.isEmpty) { val broadcastDictionary: Broadcast[DictionaryManager] = df.sqlContext.sparkContext.broadcast(_dictionary) log.info("Creating a broadcast dictionary") var newUdfObject = new FeatureUDF(broadcastDictionary) ObjectManager.put(key, newUdfObject) udfObject = ObjectManager.get(key) } val udf = udfObject.get.asInstanceOf[FeatureUDF] .featureIndexUdf(array(dimension.map(lit(_)): _*), dfWithFeatures("arrFeatures")) val vectorized = dfWithFeatures.withColumn(vectorizedColumn, udf) val finalDF = vectorized.drop("arrFeatures") finalDF } /** * Dispose all objects as part of this call. */ def close(): Unit = { val key = this if (!ObjectManager.get(key).isEmpty) { val broadcast = ObjectManager.get(key).get.asInstanceOf[FeatureUDF].broadcastDictionary broadcast.destroy() } ObjectManager.removeAll() } // TODO: If index already exist we have to merge dictionary and update indices def index(dataframe: DataFrame, time: Time): Unit = { val locationPath = location.stripSuffix("/") + "/" val indicesPath = locationPath + INDICES_PREFIX val dictionaryPath = locationPath + DICTIONARY_PREFIX val schemaPath = locationPath + SCHEMA_PREFIX val path = new HadoopPath(locationPath) val conf = new Configuration val fs = FileSystem.get(path.toUri, conf) if (fs.exists(path)) { log.warn(s"deleting indices at location ${path.getName}") fs.delete(path, true) } fs.mkdirs(path) encodeDictionary(dataframe) val sc: SparkContext = dataframe.rdd.sparkContext val dictionaryBr = dataframe.rdd.context.broadcast(dictionary) val parallelism = dataframe.rdd.context.defaultParallelism val inSchema = dataframe.schema dataframe.coalesce(parallelism).rdd.mapPartitionsWithIndex((i, itr) => { val sparkConf = new SparkConf() val localDir = new File(DalUtils.getLocalDir(sparkConf)) log.info(s"Created ${localDir} to write lucene shuffle") val indexWriterConfig = new IndexWriterConfig(analyzer) indexWriterConfig.setOpenMode(OpenMode.CREATE) // Open a directory on Standalone/YARN/Mesos disk cache val shuffleIndexFile = DalUtils.getTempFile(PREFIX, localDir) val shuffleIndexPath = shuffleIndexFile.toPath log.info(s"Created ${shuffleIndexPath} to write lucene partition $i shuffle") converter.setSchema(inSchema) converter.setDictionary(dictionaryBr.value) log.info(s"codec used for index creation ${indexWriterConfig.getCodec.getName}") val directory = new MMapDirectory(shuffleIndexPath) val indexWriter = new IndexWriter(directory, indexWriterConfig) itr.foreach { r => { try { val d = converter.rowToDoc(r) indexWriter.addDocument(d) } catch { case e: Exception => { throw new LuceneDAOException(s"Error with adding row ${r} " + s"to document ${e.getStackTraceString}", e) } } } } log.info(indexWriter.getConfig.getCodec.getName) indexWriter.commit() log.debug("Number of documents indexed in this partition: " + indexWriter.maxDoc()) indexWriter.close() directory.close() // Remove the lock for Solr HDFS/Local Uploads, Windows will handle / differently val lockFile = new File(shuffleIndexFile + File.separator + IndexWriter.WRITE_LOCK_NAME) if (!lockFile.delete()) { log.error(s"Error deleting lock file after index writer is closed ${lockFile.getAbsolutePath()}") } val conf = new Configuration // TODO: Use Java FileSystem API to clean up "/" hardcoding val fs = FileSystem.get(conf) val partitionLocation = indicesPath + "/" + SUFFIX + i + "/" + INDEX_PREFIX val dstPath = new HadoopPath(partitionLocation) val srcPath = new HadoopPath(shuffleIndexPath.toString) fs.copyFromLocalFile(true, srcPath, dstPath) log.info(s"Copied indices from ${srcPath.toString} to deep storage ${dstPath.toString}") if (shuffleIndexFile.exists() && !shuffleIndexFile.delete()) { log.error(s"Error while deleting temp file ${shuffleIndexFile.getAbsolutePath()}") } Iterator.empty }).count() val filesList = fs.listFiles(path, true) while (filesList.hasNext()) log.debug(filesList.next().getPath.toString()) FileSystem.closeAll() dictionary.save(dictionaryPath)(sc) // save the schema object sc.parallelize(dataframe.schema, 1).saveAsObjectFile(schemaPath) log.info("Number of partitions: " + dataframe.rdd.getNumPartitions) } // TODO: load logic will move to LuceneRDD @transient private var _shards: RDD[LuceneShard] = _ def load(sc: SparkContext, preload: Boolean = true): Unit = { val indexPath = location.stripSuffix("/") + "/" + INDICES_PREFIX val dictionaryPath = location.stripSuffix("/") + "/" + DICTIONARY_PREFIX val schemaPath = location.stripSuffix("/") + "/" + SCHEMA_PREFIX // load the schema object into RDD val schemaRDD = sc.objectFile(schemaPath).map { x: Any => x.asInstanceOf[StructField] } val schema = StructType(schemaRDD.collect) val indexDir = new HadoopPath(indexPath) val fs = FileSystem.get(indexDir.toUri, sc.hadoopConfiguration) val status: Array[FileStatus] = fs.listStatus(indexDir, new PathFilter { override def accept(path: HadoopPath): Boolean = { path.getName.startsWith(SUFFIX) } }) val numPartitions = status.length log.info(s"LuceneDAO_Load: Loading ${numPartitions} indices from path ${indexPath}") val partitionIds = sc.parallelize((0 until numPartitions).toList, sc.defaultParallelism) /* val f = (indices: Iterator[Int]) => { converter.setSchema(schema) indices.map((index: Int) => { val sparkConf = new SparkConf() val localDir = new File(DalUtils.getLocalDir(sparkConf)) log.info(s"LuceneDAO_Load: Created ${localDir} to write lucene shuffle") val hdfsPath = indexPath + "/" + SUFFIX + index + "/" + INDEX_PREFIX // Open a directory on Standalone/YARN/Mesos disk cache val shuffleIndexFile = DalUtils.getTempFile(PREFIX, localDir) val shuffleIndexPath = shuffleIndexFile.toPath log.info(s"LuceneDAO_Load: Created ${shuffleIndexPath} to read lucene partition $index shuffle") val shard: Option[LuceneShard] = { log.info(s"Copying data from deep storage: ${hdfsPath} to " + s"local shuffle: ${shuffleIndexPath}") try { LuceneShard.copyToLocal(hdfsPath, shuffleIndexPath.toString) Some(LuceneShard(hdfsPath, shuffleIndexFile.getAbsolutePath, preload, converter, luceneAnalyzer)) } catch { case e: IOException => throw new LuceneDAOException(s"Copy from: ${hdfsPath} to local " + s"shuffle: ${shuffleIndexPath} failed", e) case x: Throwable => throw new RuntimeException(x) } } shard.get }) } */ val copyTask = new CopyTask(converter, schema, indexPath, SUFFIX, INDEX_PREFIX, PREFIX, preload, luceneAnalyzer) _shards = RDDUtils.mapPartitionsInternal(partitionIds, copyTask.copy) _shards.cache() log.info("Number of shards: " + shards.count()) if (_dictionary == null) _dictionary = new DictionaryManager else _dictionary.clear() _dictionary.load(dictionaryPath)(sc) log.info(s"dictionary stats ${_dictionary}") } /** * @param sc * @param outputPath desired outpath of merged shards * @param numShards total shards */ def merge(sc: SparkContext, outputPath: String, numShards: Int): Unit = { val indexPath = location.stripSuffix("/") + "/" + INDICES_PREFIX val indexDir = new HadoopPath(indexPath) val fs = FileSystem.get(indexDir.toUri, sc.hadoopConfiguration) val dictionaryPath = location.stripSuffix("/") + "/" + DICTIONARY_PREFIX val schemaPath = location.stripSuffix("/") + "/" + SCHEMA_PREFIX val files: Seq[String] = fs.listStatus(indexDir, new PathFilter { override def accept(path: HadoopPath): Boolean = { path.getName.startsWith(SUFFIX) } }).map(status => s"${status.getPath.toString}/${INDEX_PREFIX}") val mergePathPrefix = outputPath.stripSuffix("/") + "/" + INDICES_PREFIX + "/" sc.parallelize(files, numShards).mapPartitionsWithIndex((partition, fileIterator) => { val mergePath = mergePathPrefix + s"${SUFFIX}${partition}" + "/" + INDEX_PREFIX val conf = new Configuration val mergedIndex = new HdfsDirectory(new HadoopPath(mergePath), NoLockFactory.INSTANCE.asInstanceOf[LockFactory], conf, 4096) val mergePolicy: TieredMergePolicy = new TieredMergePolicy() mergePolicy.setNoCFSRatio(0.0) mergePolicy.setMaxMergeAtOnce(10000) mergePolicy.setSegmentsPerTier(10000) val writerConfig = new IndexWriterConfig() .setOpenMode(OpenMode.CREATE) .setUseCompoundFile(false) .setRAMBufferSizeMB(1024.0) .setMergePolicy(mergePolicy) log.info(s"Using mergePolicy: ${mergePolicy}") val files = fileIterator.toArray val writer = new IndexWriter(mergedIndex, writerConfig) val indexes = new Array[Directory](files.length) var i = 0 while (i < files.length) { val fileName = files(i) indexes(i) = new HdfsDirectory(new HadoopPath(fileName), NoLockFactory.INSTANCE.asInstanceOf[LockFactory], conf, 4096) i += 1 } log.info(s"Logically merging ${files.size} shards into one shard") val start = System.currentTimeMillis writer.addIndexes(indexes: _*) val elapsedSecs = (System.currentTimeMillis() - start) / 1000.0f log.info(s"Logical merge took ${elapsedSecs} secs") writer.close() Iterator.empty }).count() val outputDictionaryPath = new HadoopPath(outputPath.stripSuffix("/") + "/" + DICTIONARY_PREFIX) val outputSchemaPath = new HadoopPath(outputPath.stripSuffix("/") + "/" + SCHEMA_PREFIX) FileUtil.copy(fs, new HadoopPath(dictionaryPath), fs, outputDictionaryPath, false, true, sc.hadoopConfiguration) FileUtil.copy(fs, new HadoopPath(schemaPath), fs, outputSchemaPath, false, true, sc.hadoopConfiguration) } def shards(): RDD[LuceneShard] = _shards def dictionary(): DictionaryManager = _dictionary def count(queryStr: String): Long = { if (shards == null) throw new LuceneDAOException(s"count called with null shards") shards.map(new CountTask(queryStr).count).sum().toLong } def search(queryStr: String, columns: Seq[String], sample: Double): RDD[Row] = { if (shards == null) throw new LuceneDAOException(s"search called with null shards") val searchTask = new SearchTask(converter, columns, queryStr, sample) /* val f = (shard: LuceneShard) => { converter.setColumns(columns) shard.search(queryStr, columns, sample) } */ val rows = shards.flatMap(searchTask.search) rows } // search a query and retrieve for all stored fields def search(queryStr: String, sample: Double = 1.0): RDD[Row] = { search(queryStr, storedFields.toSeq ++ searchAndStoredFields.toSeq, sample) } private val aggFunctions = Set("sum", "count_approx", "count", "sketch") // TODO: Aggregator will be instantiated based on the operator and measure // Eventually they will extend Expression from Catalyst but run columnar processing private def getAggregator(aggFunc: String): OLAPAggregator = { if (aggFunctions.contains(aggFunc)) { aggFunc match { case "sum" => new Sum case "count_approx" => new CardinalityEstimator case "count" => new Cardinality case "sketch" => new SketchAggregator } } else { throw new LuceneDAOException(s"unsupported aggFunc $aggFunc " + s"supported ${aggFunctions.mkString(",")}") } } // TODO: Multiple measures can be aggregated at same time def aggregate(queryStr: String, measure: String, aggFunc: String): Any = { if (shards == null) throw new LuceneDAOException(s"aggregate called with null shards") log.info(s"query ${queryStr} measure ${measure}, aggFunc $aggFunc") val seqOp = (agg: OLAPAggregator, shard: LuceneShard) => { shard.aggregate( queryStr, measure, agg) } val agg = getAggregator(aggFunc) val aggStart = System.nanoTime() agg.init(1) val results = shards.treeAggregate(agg)(seqOp, combOp) log.info(f"OLAP aggragation time ${(System.nanoTime() - aggStart) * 1e-9}%6.3f sec") results.eval()(0) } // TODO: time-series and group should be combined in group, bucket boundaries on any measure // is feasible to generate like time-series def group(queryStr: String, dimension: String, measure: String, aggFunc: String): Map[String, Any] = { if (shards == null) throw new LuceneDAOException(s"group called with null shards") val dimRange = dictionary.getRange(dimension) val dimOffset = dimRange._1 val dimSize = dimRange._2 - dimRange._1 + 1 log.info(s"query ${queryStr} dimension ${dimension}, " + s"range [${dimRange._1}, ${dimRange._2}] measure ${measure}") // TODO: Aggregator is picked based on the SQL functions sum, countDistinct, count val agg = getAggregator(aggFunc) // TODO: If aggregator is not initialized from driver and broadcasted, merge fails on NPE // TODO: RDD aggregate needs to be looked into val conf = shards.sparkContext.getConf val executorAggregate = conf.get("spark.trapezium.executoraggregate", "false").toBoolean log.info(s"executorAggregate ${executorAggregate}") val depth = conf.get("spark.trapezium.aggregationdepth", "2").toInt log.info(s"aggregation depth ${depth}") val seqOp = (agg: OLAPAggregator, shard: LuceneShard) => shard.group( queryStr, dimension, dimOffset, measure, agg = agg) agg.init(dimSize) val groups = if (executorAggregate) { val groupStart = System.nanoTime() val partitions = shards.sparkContext.defaultParallelism val executorId = Math.floor(Random.nextDouble() * partitions).toInt val results = RDDUtils.treeAggregateExecutor(agg)( shards, seqOp, (agg, other) => agg.merge(other), depth, executorId).map(_.eval().zipWithIndex) results.count() log.info(f"OLAP group time ${(System.nanoTime() - groupStart) * 1e-9}%6.3f sec") results.collect()(0) } else { val groupStart = System.nanoTime() // TODO: optimize on agg broadcast agg.init(dimSize) val results = shards.treeAggregate(agg)(seqOp, combOp, depth) log.info(f"OLAP group time ${(System.nanoTime() - groupStart) * 1e-9}%6.3f sec") results.eval().zipWithIndex } val transformed = groups.map { case (value: Any, index: Int) => (dictionary.getFeatureName(dimOffset + index), value) }.toMap transformed } // TODO: time-series for multiple measure can be aggregated in same call def timeseries(queryStr: String, minTime: Long, maxTime: Long, rollup: Long, measure: String, aggFunc: String): Array[Any] = { if (shards == null) throw new LuceneDAOException(s"timeseries called with null shards") log.info(s"query ${queryStr} measure ${measure}, time [$minTime, $maxTime] rollup $rollup") val dimSize = Math.floor((maxTime - minTime) / rollup).toInt log.info(s"Calculated time series size ${dimSize} " + s"from [$maxTime, $minTime] with rollup $rollup") val seqOp = (agg: OLAPAggregator, shard: LuceneShard) => { shard.timeseries( queryStr, minTime, maxTime, rollup, measure, agg) } val agg = getAggregator(aggFunc) val tsStart = System.nanoTime() agg.init(dimSize) val results = shards.treeAggregate(agg)(seqOp, combOp) log.info(f"OLAP timeseries time ${(System.nanoTime() - tsStart) * 1e-9}%6.3f sec") results.eval } def facet(queryStr: String, dimension: String): Map[String, Long] = { if (shards == null) throw new LuceneDAOException(s"timeseries called with null shards") log.info(s"query ${queryStr} dimension ${dimension}") val agg = getAggregator("sum") val dimRange = dictionary.getRange(dimension) val dimSize = dimRange._2 - dimRange._1 + 1 val dimOffset = dimRange._1 val fTask = new FacetTask(queryStr, dimension, dimOffset) // val seqOp = facet( val facetStart = System.nanoTime() agg.init(dimSize) val results = shards.treeAggregate(agg)(fTask.facet, combOp) log.info(f"OLAP facet time ${(System.nanoTime() - facetStart) * 1e-9}%6.3f sec") val facets = results.eval().zipWithIndex val transformed = facets.map { case (value: Any, index: Int) => (dictionary.getFeatureName(dimOffset + index), value.asInstanceOf[Long]) }.toMap transformed } } /** * A plain simple wrapper on the UDF that helps to hold the broadcasted dictionary for the * current request session * * @param broadcastDictionary */ class FeatureUDF(val broadcastDictionary: Broadcast[DictionaryManager]) extends Serializable { /** * Udf to compute the indices and values for sparse vector. * Here s is storedDimension and m is featureColumns mapped as Array * corresponding to the dimensions. Support measures that are MapType * with keys as the hierarchical dimension */ val featureIndexUdf = udf { (s: mutable.WrappedArray[String], m: mutable.WrappedArray[Map[String, Double]]) => val indVal = s.zip(m).flatMap { x => if (x._2 == null) { Map[Int, Double]() } else { val output: Map[Int, Double] = x._2.map(kv => (broadcastDictionary.value.indexOf(x._1, kv._1), kv._2)) output.filter(_._1 >= 0) } }.sortBy(_._1) Vectors.sparse(broadcastDictionary.value.size, indVal.map(_._1).toArray, indVal.map(_._2).toArray) } } /** * A simple holder of objects for LuceneDAO instance. Just so that * the instance leevl fields are not exposed for serialization * unnecessarily. * Created by sankma8 on 3/19/18. */ object ObjectManager { val repository = scala.collection.mutable.Map[Object, Object]() def put(key: Object, value: Object): Option[Object] = { repository.put(key, value) } def remove(key: Object): Option[Object] = { repository.remove(key) } def removeAll(): Unit = { repository.clear() } def get(key: Object): Option[Object] = { repository.get(key) } } object LuceneDAO { val PREFIX = "trapezium-lucenedao" val SUFFIX = "part-" val INDICES_PREFIX = "indices" val INDEX_PREFIX = "index" val DICTIONARY_PREFIX = "dictionary" val SCHEMA_PREFIX = "schema" def getAnalyzer(analyzerStr: String): Analyzer = { val analyzer: Analyzer = analyzerStr match { case "keyword" => new KeywordAnalyzer() case "standard" => new StandardAnalyzer() case _ => throw new LuceneDAOException("supported analyzers are keyword/standard") } analyzer } // TODO: Look into treeAggregate architecture for multiple queries def combOp = (agg: OLAPAggregator, other: OLAPAggregator) => { agg.merge(other) } } trait Task extends Serializable { } class FacetTask(queryStr: String, dimension: String, dimOffset: Int) extends Task { def facet(agg: OLAPAggregator, shard: LuceneShard): OLAPAggregator = { shard.facet( queryStr, dimension, dimOffset, agg) } } class CountTask(queryStr: String) extends Task { def count(shard: LuceneShard): Long = { shard.count(queryStr) } } class SearchTask(converter: OLAPConverter, columns: Seq[String], queryStr: String, sample: Double) extends Task { @transient lazy val log = Logger.getLogger(classOf[SearchTask]) def search(shard: LuceneShard) = { log.info("Inside search ...") converter.setColumns(columns) shard.search(queryStr, columns, sample) } } class CopyTask(converter: OLAPConverter, schema: StructType, indexPath: String, SUFFIX: String, INDEX_PREFIX: String, PREFIX: String, preload: Boolean, luceneAnalyzer: String) extends Task { @transient lazy val log = Logger.getLogger(classOf[CopyTask]) def copy(indices: Iterator[Int]) = { log.info("Inside Copy...") converter.setSchema(schema) indices.map((index: Int) => { val sparkConf = new SparkConf() val localDir = new File(DalUtils.getLocalDir(sparkConf)) log.info(s"LuceneDAO_Load: Created ${localDir} to write lucene shuffle") val hdfsPath = indexPath + "/" + SUFFIX + index + "/" + INDEX_PREFIX // Open a directory on Standalone/YARN/Mesos disk cache val shuffleIndexFile = DalUtils.getTempFile(PREFIX, localDir) val shuffleIndexPath = shuffleIndexFile.toPath log.info(s"LuceneDAO_Load: Created ${shuffleIndexPath} to read lucene partition $index shuffle") val shard: Option[LuceneShard] = { log.info(s"Copying data from deep storage: ${hdfsPath} to " + s"local shuffle: ${shuffleIndexPath}") try { LuceneShard.copyToLocal(hdfsPath, shuffleIndexPath.toString) Some(LuceneShard(hdfsPath, shuffleIndexFile.getAbsolutePath, preload, converter, luceneAnalyzer)) } catch { case e: IOException => throw new LuceneDAOException(s"Copy from: ${hdfsPath} to local " + s"shuffle: ${shuffleIndexPath} failed", e) case x: Throwable => throw new RuntimeException(x) } } shard.get }) } }
Verizon/trapezium
dal/src/main/scala/com/verizon/bda/trapezium/dal/lucene/LuceneDAO.scala
Scala
apache-2.0
25,439
package breeze.linalg.support /* * * Copyright 2015 David Hall * * Licensed under the Apache License, Version 2.0 (the "License") * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * / */ import breeze.generic.UFunc import breeze.math.Complex import breeze.macros.cforRange import scala.reflect.ClassTag import scala.{specialized => spec} /** * UFunc for being able to map the keys and values in a value collection * to new values. * * @author dramage * @author dlwh */ trait CanMapValues[From, @specialized(Int, Float, Long, Double) V, @specialized(Int, Float, Long, Double) V2, +To] { // def apply(from: From, fn: V=>V2): To = map(from, fn) def map(from: From, fn: V => V2): To def mapActive(from: From, fn: V=>V2): To } object CanMapValues extends CanMapValuesLowPrio { trait DenseCanMapValues[From, V, V2, To] extends CanMapValues[From, V, V2, To] { final def mapActive(from: From, fn: V=>V2): To = map(from, fn) } implicit def canMapSelfDouble[V2]: CanMapValues[Double, Double, V2, V2] = canMapSelf[Double, V2] implicit def canMapSelfInt[V2]: CanMapValues[Int, Int, V2, V2] = canMapSelf[Int, V2] implicit def canMapSelfFloat[V2]: CanMapValues[Float, Float, V2, V2] = canMapSelf[Float, V2] implicit def canMapSelfLong[V2]: CanMapValues[Long, Long, V2, V2] = canMapSelf[Long, V2] implicit def canMapSelfShort[V2]: CanMapValues[Short, Short, V2, V2] = canMapSelf[Short, V2] implicit def canMapSelfByte[V2]: CanMapValues[Byte, Byte, V2, V2] = canMapSelf[Byte, V2] implicit def canMapSelfChar[V2]: CanMapValues[Char, Char, V2, V2] = canMapSelf[Char, V2] // // Arrays // class OpArray[@spec(Double, Int, Float, Long) A, @spec(Double, Int, Float, Long) B: ClassTag] extends CanMapValues[Array[A], A, B, Array[B]] { /**Maps all values from the given collection. */ def map(from: Array[A], fn: (A) => B): Array[B] = { val arr = new Array[B](from.length) cforRange(0 until from.length) { i => arr(i) = fn(from(i)) } arr } override def mapActive(from: Array[A], fn: A => B): Array[B] = map(from, fn) } implicit def opArray[@spec A, @spec B: ClassTag]: OpArray[A, B] = new OpArray[A, B] implicit object OpArrayII extends OpArray[Int, Int] implicit object OpArraySS extends OpArray[Short, Short] implicit object OpArrayLL extends OpArray[Long, Long] implicit object OpArrayFF extends OpArray[Float, Float] implicit object OpArrayDD extends OpArray[Double, Double] implicit object OpArrayCC extends OpArray[Complex, Complex] implicit object OpArrayID extends OpArray[Int, Double] implicit object OpArraySD extends OpArray[Short, Double] implicit object OpArrayLD extends OpArray[Long, Double] implicit object OpArrayFD extends OpArray[Float, Double] } sealed trait CanMapValuesLowPrio { self: CanMapValues.type => def canMapSelf[V, V2]: CanMapValues[V, V, V2, V2] = { new CanMapValues[V, V, V2, V2] { def map(from: V, fn: (V) => V2): V2 = fn(from) def mapActive(from: V, fn: (V) => V2): V2 = fn(from) } } }
scalanlp/breeze
math/src/main/scala/breeze/linalg/support/CanMapValues.scala
Scala
apache-2.0
3,518
package org.bitcoins.rpc.common import org.bitcoins.rpc.client.common.BitcoindVersion import org.bitcoins.rpc.client.common.BitcoindVersion.{V19, V20, V21} import org.bitcoins.testkit.util.BitcoindRpcTest class BitcoindVersionTest extends BitcoindRpcTest { behavior of "BitcoindVersion" it should "return version 21" in { val version = BitcoindVersion.fromNetworkVersion(210100) assert(version.equals(V21)) } it should "return version 20" in { val version = BitcoindVersion.fromNetworkVersion(200309) assert(version.equals(V20)) } it should "return version 19" in { val version = BitcoindVersion.fromNetworkVersion(190100) assert(version.equals(V19)) } }
bitcoin-s/bitcoin-s
bitcoind-rpc-test/src/test/scala/org/bitcoins/rpc/common/BitcoindVersionTest.scala
Scala
mit
697
package com.twitter.server import com.twitter.app.GlobalFlag import com.twitter.app.lifecycle.Event import com.twitter.app.lifecycle.Event.{PrebindWarmup, WarmupComplete} import com.twitter.finagle.http.Method.Post import com.twitter.finagle.http.{HttpMuxer, Route, RouteIndex} import com.twitter.server.handler._ import java.lang.management.ManagementFactory import java.util.concurrent.atomic.AtomicBoolean import scala.collection.JavaConverters._ trait Lifecycle { self: TwitterServer => // Mesos/Aurora lifecycle endpoints val group = "Misc" HttpMuxer.addHandler( Route( pattern = "/abortabortabort", handler = new AbortHandler, index = Some(RouteIndex(alias = "Abort Server", group = group, method = Post)) ) ) HttpMuxer.addHandler( Route( pattern = "/quitquitquit", handler = new ShutdownHandler(this), index = Some(RouteIndex(alias = "Quit Server", group = group, method = Post)) ) ) HttpMuxer.addHandler( Route( pattern = "/health", handler = new ReplyHandler("OK\\n"), index = Some(RouteIndex(alias = "Health", group = group)) ) ) } object promoteBeforeServing extends GlobalFlag[Boolean]( true, "Promote objects in young generation to old generation before serving requests. " + "May shorten the following gc pauses by avoiding the copying back and forth between survivor " + "spaces of a service's long lived objects." ) object Lifecycle { private[server] class PromoteToOldGen( runtimeArgs: Seq[String] = ManagementFactory.getRuntimeMXBean.getInputArguments.asScala.toList) { private[this] val hasPromoted = new AtomicBoolean(false) private[server] def promoted: Boolean = hasPromoted.get() /** * Depending on the VM flags used, `System.gc` will not have the effect we want. */ private[this] def shouldExplicitGc: Boolean = { !runtimeArgs.contains("-XX:+ExplicitGCInvokesConcurrent") } /** * Helps make early gc pauses more consistent. * * Ideally this runs in the moments right before your service starts accepting traffic. */ def beforeServing(): Unit = { if (promoteBeforeServing() && shouldExplicitGc && hasPromoted.compareAndSet(false, true)) { // This relies on the side-effect of a full gc that all objects in the // young generation are promoted to the old generation. // If this side-effect were to disappear in a future version // of the jdk, it would not be disastrous. However, then we may choose // to add a `System.promoteAllLiveObjects()` hook. System.gc() } } } private[Lifecycle] object Warmup { /** * Initialize warmup code. Ensures that the /health endpoint will not return on "OK" response. */ def initializeWarmup(): Unit = HttpMuxer.addHandler(Route("/health", new ReplyHandler(""))) /** * Prebind warmup code. Used for warmup tasks that we want to run before we * accept traffic. */ def prebindWarmup(): Unit = new PromoteToOldGen().beforeServing() /** * The service is bound to a port and warmed up, announce health. */ def warmupComplete(): Unit = HttpMuxer.addHandler(Route("/health", new ReplyHandler("OK\\n"))) } /** * A [[Warmup]] that is detached from a [[TwitterServer]] lifecycle. As there is no guarantee that * this trait is used within the context of a [[TwitterServer]], the behavior and expectations of * this warmup are determined by the implementor. */ @deprecated( "Warmup behavior is a TwitterServer lifecycle concern. Please mixin Warmup to your TwitterServer.", "2020-06-25") trait DetatchedWarmup { Warmup.initializeWarmup() /** * Prebind warmup code. Used for warmup tasks that we want to run before we * accept traffic. */ def prebindWarmup(): Unit = Warmup.prebindWarmup() /** * The service is bound to a port and warmed up, announce health. */ def warmupComplete(): Unit = Warmup.warmupComplete() } /** * Give the application control over when to present to Mesos as being ready * for traffic. When the method `warmupComplete()` is invoked, the application * is considered ready. * @note Mesos doesn't gate traffic on /health so all pre-bind warmup needs to * happen in `prebindWarmup()` */ trait Warmup { self: TwitterServer => Warmup.initializeWarmup() override protected[twitter] def startupCompletionEvent: Event = WarmupComplete /** * Prebind warmup code. Used for warmup tasks that we want to run before we * accept traffic. */ def prebindWarmup(): Unit = observe(PrebindWarmup) { Warmup.prebindWarmup() } /** * The service is bound to a port and warmed up, announce health. */ def warmupComplete(): Unit = observe(WarmupComplete) { Warmup.warmupComplete() } } }
twitter/twitter-server
server/src/main/scala/com/twitter/server/Lifecycle.scala
Scala
apache-2.0
4,967
package com.monsanto.arch.kamon.spray.can import java.util.concurrent.TimeUnit import com.typesafe.config.{Config, ConfigFactory} import scala.concurrent.duration.FiniteDuration /** Container for the settings of the `KamonHttp` extension. All of the configuration is placed under the path * `spray.can.server`. Refer to the `reference.conf` for rmore details. * * @param config the configuration to use * * @author Daniel Solano Gómez */ class KamonHttpSettings(config: Config) { config.checkValid(ConfigFactory.defaultReference(), "spray.can.kamon") /** Scoped configuration. */ private val kamonConfig = config.getConfig("spray.can.kamon") /** The interval between each request to Spray for its statistics. */ val refreshInterval: FiniteDuration = FiniteDuration( kamonConfig.getDuration("refresh-interval", TimeUnit.MILLISECONDS), TimeUnit.MILLISECONDS) }
MonsantoCo/spray-kamon-metrics
src/main/scala/com/monsanto/arch/kamon/spray/can/KamonHttpSettings.scala
Scala
bsd-3-clause
897
package sri.universal.navigation import scala.scalajs.js /** * make sure you check for null cases even its defined. */ @js.native trait NavigationStateUtils extends js.Object { def getParent(state: NavigationState): js.UndefOr[NavigationParentState] = js.native def get(state: NavigationState, key: String): js.UndefOr[NavigationState] = js.native def indexOf(state: NavigationState, key: String): js.UndefOr[Integer] = js.native def push(state: NavigationState, newChildState: NavigationState): NavigationParentState = js.native def pop(state: NavigationParentState): NavigationParentState = js.native def reset(state: NavigationState, nextChildren: js.Array[NavigationState] = ???, nextIndex: Int = ???): NavigationState = js.native def set(state: NavigationState = ???, key: String, nextChildren: js.Array[NavigationState], nextIndex: Int): NavigationState = js.native def jumpToIndex(state: NavigationState, index: Int): NavigationState = js.native def jumpTo(state: NavigationState, key: String): NavigationState = js.native def replaceAt(state: NavigationState, key: String, newState: NavigationState): NavigationState = js.native def replaceAtIndex(state: NavigationState, index: Int, newState: NavigationState): NavigationState = js.native }
chandu0101/sri
universal/src/main/scala/sri/universal/navigation/NavigationStateUtils.scala
Scala
apache-2.0
1,289
package com.natalinobusa.wavr import akka.actor.Actor import com.datastax.driver.core._ trait CassandraClientActor extends Actor { // Cassandra Driver Futures to scala futures import CassandraQuery.ResultSet._ // Get the implict Actor execution context import context.dispatcher // postFix conversions import scala.language.postfixOps // translations and piping import scala.collection.JavaConversions._ import akka.pattern.pipe //init the cassandra cluster object val client: Cluster = init("127.0.0.1"); //connect to cassandra val session = client.connect("ks") def init(node: String) = { Cluster.builder() .addContactPoint(node) .build(); } def getClusterName() = { client.getMetadata().getClusterName() } def getTransactions(aid:String, cid:String) = { val statement = session.prepare("SELECT v,d FROM tb WHERE a1=? and b1=? and b2>2 LIMIT 2;") val query = statement.bind(aid,cid) //session.executeAsync(query) map(_.all().getInt('v').toString() ) //.toList.mkString(",")) // map(_.all().map( r => (r.getInt('v'), r.getInt('v')) ).toList) val resultSetFuture = session.executeAsync(query) resultSetFuture map ( _.all() map (r => (r.getInt("d"), r.getInt("v")) ) toList ) } def term(): Unit = { client.shutdown(); } }
natalinobusa/wavr
src/main/scala/com/natalinobusa/wavr/CassandraClient.scala
Scala
apache-2.0
1,356
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.openwhisk.extension.whisk import io.gatling.core.Predef._ import io.gatling.core.action.Action import io.gatling.core.action.builder.ActionBuilder import io.gatling.core.session.Expression import io.gatling.core.structure.ScenarioContext import io.gatling.http.request.builder.{Http, HttpRequestBuilder} import spray.json.DefaultJsonProtocol._ import spray.json._ case class OpenWhiskActionBuilderBase(requestName: Expression[String]) { implicit private val http = new Http(requestName) /** Call the `/api/v1`-endpoint of the specified system */ def info() = { OpenWhiskActionBuilder(http.get("/api/v1")) } /** * Specify authentication data. This is needed to perform operations on namespaces or working with entities. * * @param uuid The UUID of the namespace * @param key The key of the namespace */ def authenticate(uuid: Expression[String], key: Expression[String]) = { OpenWhiskActionBuilderWithNamespace(uuid, key) } } case class OpenWhiskActionBuilderWithNamespace(private val uuid: Expression[String], private val key: Expression[String], private val namespace: String = "_")(implicit private val http: Http) { /** * Specify on which namespace you want to perform any action. * * @param namespace The namespace you want to use. */ def namespace(namespace: String) = { OpenWhiskActionBuilderWithNamespace(uuid, key, namespace) } /** List all namespaces you have access to, with your current authentication. */ def list() = { OpenWhiskActionBuilder(http.get("/api/v1/namespaces").basicAuth(uuid, key)) } /** * Perform any request against the actions-API. E.g. creating, invoking or deleting actions. * * @param actionName Name of the action in the Whisk-system. */ def action(actionName: String) = { OpenWhiskActionBuilderWithAction(uuid, key, namespace, actionName) } } case class OpenWhiskActionBuilderWithAction(private val uuid: Expression[String], private val key: Expression[String], private val namespace: String, private val action: String)(implicit private val http: Http) { private val path: Expression[String] = s"/api/v1/namespaces/$namespace/actions/$action" /** Fetch the action from OpenWhisk */ def get() = { OpenWhiskActionBuilder(http.get(path).basicAuth(uuid, key)) } /** Delete the action from OpenWhisk */ def delete() = { OpenWhiskActionBuilder(http.delete(path).basicAuth(uuid, key)) } /** * Create the action in OpenWhisk. * * @param code The code of the action to create. * @param kind The kind of the action you want to create. Default is `nodejs:default`. * @param main Main method of your action. This is only needed for java actions. */ def create(code: Expression[String], kind: Expression[String] = "nodejs:default", main: Expression[String] = "") = { val json: Expression[String] = session => { code(session).flatMap { c => kind(session).flatMap { k => main(session).map { m => val exec = Map("kind" -> k, "code" -> c) ++ (if (m.size > 0) Map("main" -> m) else Map[String, String]()) JsObject("exec" -> exec.toJson).compactPrint } } } } OpenWhiskActionBuilder(http.put(path).basicAuth(uuid, key).body(StringBody(json))) } /** Invoke the action. */ def invoke() = { OpenWhiskActionBuilder(http.post(path).queryParam("blocking", "true").basicAuth(uuid, key)) } } case class OpenWhiskActionBuilder(http: HttpRequestBuilder) extends ActionBuilder { override def build(ctx: ScenarioContext, next: Action): Action = { http.build(ctx, next) } }
starpit/openwhisk
tests/performance/gatling_tests/src/gatling/scala/org/apache/openwhisk/extension/whisk/OpenWhiskActionBuilder.scala
Scala
apache-2.0
4,680
package service import java.util.GregorianCalendar import dao.SemesterDao import database.SemesterDb import javax.inject.{Inject, Singleton} import org.joda.time.LocalDate import service.actor.NaturalDescribableYear @Singleton final class SemesterService @Inject()(private val semesterDao: SemesterDao) { def createSemester(year: NaturalDescribableYear) = (semesters _ andThen semesterDao.createManyPartial) (year) def semesters(year: NaturalDescribableYear): List[SemesterDb] = List(summerSemester(year), winterSemester(year)) def summerSemester(year: NaturalDescribableYear) = { val start = new LocalDate(year.year, 3, 1) val end = new LocalDate(year.year, 8, 31) makeSemester(s"Sommersemester ${year.long}", s"SoSe ${year.short}", start, end) } def winterSemester(year: NaturalDescribableYear) = { val nextYear = NaturalDescribableYear(year.year + 1) val start = new LocalDate(year.year, 9, 1) val maxDayConcerningLeapYear = if (new GregorianCalendar().isLeapYear(nextYear.year)) 29 else 28 val end = new LocalDate(nextYear.year, 2, maxDayConcerningLeapYear) makeSemester(s"Wintersemester ${year.long}/${nextYear.long}", s"WS ${year.short}/${nextYear.short}", start, end) } private def makeSemester(label: String, abbrev: String, start: LocalDate, end: LocalDate) = { import utils.date.DateTimeOps.LocalDateConverter SemesterDb(label, abbrev, start.sqlDate, end.sqlDate, end.minusWeeks(examWeekPadding).sqlDate) } private def examWeekPadding = 2 }
THK-ADV/lwm-reloaded
app/service/SemesterService.scala
Scala
mit
1,520