code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* @author Carol Alexandru
*
* Copyright 2015 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.features
import org.scalatest.Matchers
import org.scalatest.FlatSpec
import com.signalcollect.util.TestAnnouncements
import com.signalcollect._
import com.signalcollect.util.TestAnnouncements
import com.signalcollect.examples.PageRankEdge
import com.signalcollect.examples.PageRankVertex
class MultipleGraphsSpec extends FlatSpec with Matchers with TestAnnouncements {
def createComputation(): Graph[_, _] = {
val graph = TestConfig.graphProvider().build
graph.addVertex(new PageRankVertex(1))
graph.addVertex(new PageRankVertex(2))
graph.addEdge(1, new PageRankEdge(2))
graph.addEdge(2, new PageRankEdge(1))
graph
}
"Signal/Collect" should "support running multiple graph instances on the same actor system" in {
val graph1 = createComputation()
val graph2 = createComputation()
val graph3 = createComputation()
graph1.execute
graph2.execute
graph3.execute
graph1.awaitIdle
graph2.awaitIdle
graph3.awaitIdle
graph1.shutdown
graph2.shutdown
graph3.shutdown
}
}
| hicolour/signal-collect | src/test/scala/com/signalcollect/features/MultipleGraphs.scala | Scala | apache-2.0 | 1,724 |
package example
object Example1 extends App {
// import scalaxy.loops.optimize
val n = 10
// optimize { 10 }
{
for (i <- 0 to n) {
println(i)
}
}
println {
for (i <- 0 to n) yield {
i + 2
}
}
println {
for (i <- 0 to n; if i % 2 == 1) yield {
i + 2
}
}
println {
for (i <- 0 to n; j <- i to 1 by -1; if i % 2 == 1) yield { i + j }
}
}
| nativelibs4java/scalaxy-streams | Resources/example1.scala | Scala | bsd-3-clause | 412 |
/**
* Copyright 2011-2012 eBusiness Information, Groupe Excilys (www.excilys.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.excilys.ebi.gatling.http.check.header
import java.net.URLDecoder
import scala.collection.JavaConversions.asScalaBuffer
import com.excilys.ebi.gatling.core.check.ExtractorFactory
import com.excilys.ebi.gatling.core.check.extractor.Extractor
import com.excilys.ebi.gatling.core.check.extractor.regex.RegexExtractor
import com.excilys.ebi.gatling.core.config.GatlingConfiguration.configuration
import com.excilys.ebi.gatling.core.session.{ Session, EvaluatableString }
import com.excilys.ebi.gatling.http.Headers
import com.excilys.ebi.gatling.http.check.HttpMultipleCheckBuilder
import com.excilys.ebi.gatling.http.request.HttpPhase.HeadersReceived
import com.excilys.ebi.gatling.http.response.ExtendedResponse
object HttpHeaderRegexCheckBuilder extends Extractor {
private def findExtractorFactory(occurrence: Int): ExtractorFactory[ExtendedResponse, (String, String), String] =
(response: ExtendedResponse) =>
(headerAndPattern: (String, String)) => {
findAllExtractorFactory(response)(headerAndPattern) match {
case Some(results) if results.isDefinedAt(occurrence) => results(occurrence)
case _ => None
}
}
private val findAllExtractorFactory: ExtractorFactory[ExtendedResponse, (String, String), Seq[String]] = (response: ExtendedResponse) =>
(headerAndPattern: (String, String)) => {
val (headerName, pattern) = headerAndPattern
val decodedHeaderValues = Option(response.getHeaders(headerName))
.map { headerValues =>
if (headerName == Headers.Names.LOCATION)
headerValues.map(URLDecoder.decode(_, configuration.simulation.encoding))
else
headerValues.toSeq
}.getOrElse(Nil)
decodedHeaderValues.foldLeft(Seq.empty[String]) { (matches, header) =>
new RegexExtractor(header).extractMultiple(pattern).map(_ ++ matches).getOrElse(matches)
}
}
private val countExtractorFactory: ExtractorFactory[ExtendedResponse, (String, String), Int] =
(response: ExtendedResponse) => (headerAndPattern: (String, String)) => findAllExtractorFactory(response)(headerAndPattern).map(_.length).orElse(0)
def headerRegex(headerName: EvaluatableString, pattern: EvaluatableString) = {
val expression = (s: Session) => (headerName(s), pattern(s))
new HttpMultipleCheckBuilder(findExtractorFactory, findAllExtractorFactory, countExtractorFactory, expression, HeadersReceived)
}
}
| Tjoene/thesis | Case_Programs/gatling-1.4.0/gatling-http/src/main/scala/com/excilys/ebi/gatling/http/check/header/HttpHeaderRegexCheckBuilder.scala | Scala | gpl-2.0 | 3,009 |
package com.twitter.zipkin.config
import com.sun.net.httpserver.HttpExchange
import com.twitter.ostrich.admin.CustomHttpHandler
import org.specs.Specification
import org.specs.mock.{JMocker, ClassMocker}
import com.twitter.zipkin.config.sampler.AdjustableRateConfig
/**
* Test endpoints for getting and setting configurations for sample rate and storage request rate
*/
class ConfigRequestHandlerSpec extends Specification with JMocker with ClassMocker {
"ConfigRequestHandler" should {
val sampleRateConfig = mock[AdjustableRateConfig]
val exchange = mock[HttpExchange]
val customHttpHandler = mock[CustomHttpHandler]
val handler = new ConfigRequestHandler(sampleRateConfig) {
override def render(body: String, exchange: HttpExchange, code: Int) {
customHttpHandler.render(body, exchange, code)
}
}
"sampleRate" in {
"get" in {
expect {
one(exchange).getRequestMethod willReturn "GET"
one(sampleRateConfig).get willReturn 0.5
one(customHttpHandler).render("0.5", exchange, 200)
}
handler.handle(exchange, List("config", "sampleRate"), List.empty[(String, String)])
}
"set" in {
expect {
one(exchange).getRequestMethod willReturn "POST"
one(sampleRateConfig).set(0.3)
one(customHttpHandler).render("success", exchange, 200)
}
handler.handle(exchange, List("config", "sampleRate"), List(("value", "0.3")))
}
}
}
}
| cogitate/twitter-zipkin-uuid | zipkin-collector-core/src/test/scala/com/twitter/zipkin/config/ConfigRequestHandlerSpec.scala | Scala | apache-2.0 | 1,507 |
package io.finch
import cats.Id
import cats.effect.Effect
import com.twitter.finagle.http.Method
import com.twitter.util._
import scala.concurrent.duration.Duration
/**
* A result returned from an [[Endpoint]]. This models `Option[(Input, Future[Output])]` and
* represents two cases:
*
* - Endpoint is matched (think of 200).
* - Endpoint is not matched (think of 404, 405, etc).
*
* In its current state, `EndpointResult.NotMatched` represented with two cases:
*
* - `EndpointResult.NotMatched` (very generic result usually indicating 404)
* - `EndpointResult.NotMatched.MethodNotAllowed` (indicates 405)
*
*/
sealed abstract class EndpointResult[F[_], +A] {
/**
* Whether the [[Endpoint]] is matched on a given [[Input]].
*/
final def isMatched: Boolean = this match {
case EndpointResult.Matched(_, _, _) => true
case _ => false
}
/**
* Returns the remainder of the [[Input]] after an [[Endpoint]] is matched.
*/
final def remainder: Option[Input] = this match {
case EndpointResult.Matched(rem, _, _) => Some(rem)
case _ => None
}
/**
* Returns the [[Trace]] if an [[Endpoint]] is matched.
*/
final def trace: Option[Trace] = this match {
case EndpointResult.Matched(_, trc, _) => Some(trc)
case _ => None
}
def awaitOutput(d: Duration = Duration.Inf)(implicit F: Effect[F]): Option[Either[Throwable, Output[A]]] = this match {
case EndpointResult.Matched(_, _, out) =>
try {
F.toIO(out).unsafeRunTimed(d) match {
case Some(a) => Some(Right(a))
case _ => Some(Left(new TimeoutException(s"Output wasn't returned in time: $d")))
}
} catch {
case e: Throwable => Some(Left(e))
}
case _ => None
}
def awaitOutputUnsafe(d: Duration = Duration.Inf)(implicit F: Effect[F]): Option[Output[A]] =
awaitOutput(d).map {
case Right(r) => r
case Left(ex) => throw ex
}
def awaitValue(d: Duration = Duration.Inf)(implicit F: Effect[F]): Option[Either[Throwable, A]]=
awaitOutput(d).map {
case Right(oa) => Right(oa.value)
case Left(ob) => Left(ob)
}
def awaitValueUnsafe(d: Duration = Duration.Inf)(implicit F: Effect[F]): Option[A] =
awaitOutputUnsafe(d).map(oa => oa.value)
}
object EndpointResult {
final case class Matched[F[_], A](
rem: Input,
trc: Trace,
out: F[Output[A]]
) extends EndpointResult[F, A]
abstract class NotMatched[F[_]] extends EndpointResult[F, Nothing]
object NotMatched extends NotMatched[Id] {
final case class MethodNotAllowed[F[_]](allowed: List[Method]) extends NotMatched[F]
def apply[F[_]]: NotMatched[F] = NotMatched.asInstanceOf[NotMatched[F]]
}
implicit class EndpointResultOps[F[_], A](val self: EndpointResult[F, A]) extends AnyVal {
/**
* Returns the [[Output]] if an [[Endpoint]] is matched.
*/
final def output: Option[F[Output[A]]] = self match {
case EndpointResult.Matched(_, _, out) => Some(out)
case _ => None
}
}
}
| ImLiar/finch | core/src/main/scala/io/finch/EndpointResult.scala | Scala | apache-2.0 | 3,034 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.template.similarproduct
import org.apache.predictionio.controller.PAlgorithm
import org.apache.predictionio.controller.Params
import org.apache.predictionio.controller.IPersistentModel
import org.apache.predictionio.controller.IPersistentModelLoader
import org.apache.predictionio.data.storage.BiMap
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.{Rating => MLlibRating}
import grizzled.slf4j.Logger
import scala.collection.mutable.PriorityQueue
case class ALSAlgorithmParams(
rank: Int,
numIterations: Int,
lambda: Double,
seed: Option[Long]) extends Params
class ALSModel(
val productFeatures: RDD[(Int, Array[Double])],
val itemStringIntMap: BiMap[String, Int],
val items: Map[Int, Item]
) extends IPersistentModel[ALSAlgorithmParams] with Serializable {
@transient lazy val itemIntStringMap = itemStringIntMap.inverse
def save(id: String, params: ALSAlgorithmParams,
sc: SparkContext): Boolean = {
productFeatures.saveAsObjectFile(s"/tmp/${id}/productFeatures")
sc.parallelize(Seq(itemStringIntMap))
.saveAsObjectFile(s"/tmp/${id}/itemStringIntMap")
sc.parallelize(Seq(items))
.saveAsObjectFile(s"/tmp/${id}/items")
true
}
override def toString = {
s" productFeatures: [${productFeatures.count()}]" +
s"(${productFeatures.take(2).toList}...)" +
s" itemStringIntMap: [${itemStringIntMap.size}]" +
s"(${itemStringIntMap.take(2).toString}...)]" +
s" items: [${items.size}]" +
s"(${items.take(2).toString}...)]"
}
}
object ALSModel
extends IPersistentModelLoader[ALSAlgorithmParams, ALSModel] {
def apply(id: String, params: ALSAlgorithmParams,
sc: Option[SparkContext]) = {
new ALSModel(
productFeatures = sc.get.objectFile(s"/tmp/${id}/productFeatures"),
itemStringIntMap = sc.get
.objectFile[BiMap[String, Int]](s"/tmp/${id}/itemStringIntMap").first,
items = sc.get
.objectFile[Map[Int, Item]](s"/tmp/${id}/items").first)
}
}
/**
* Use ALS to build item x feature matrix
*/
class ALSAlgorithm(val ap: ALSAlgorithmParams)
extends PAlgorithm[PreparedData, ALSModel, Query, PredictedResult] {
@transient lazy val logger = Logger[this.type]
def train(sc: SparkContext, data: PreparedData): ALSModel = {
require(!data.viewEvents.take(1).isEmpty,
s"viewEvents in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
require(!data.users.take(1).isEmpty,
s"users in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
require(!data.items.take(1).isEmpty,
s"items in PreparedData cannot be empty." +
" Please check if DataSource generates TrainingData" +
" and Preprator generates PreparedData correctly.")
// create User and item's String ID to integer index BiMap
val userStringIntMap = BiMap.stringInt(data.users.keys)
val itemStringIntMap = BiMap.stringInt(data.items.keys)
// collect Item as Map and convert ID to Int index
val items: Map[Int, Item] = data.items.map { case (id, item) =>
(itemStringIntMap(id), item)
}.collectAsMap.toMap
val mllibRatings = data.viewEvents
.map { r =>
// Convert user and item String IDs to Int index for MLlib
val uindex = userStringIntMap.getOrElse(r.user, -1)
val iindex = itemStringIntMap.getOrElse(r.item, -1)
if (uindex == -1)
logger.info(s"Couldn't convert nonexistent user ID ${r.user}"
+ " to Int index.")
if (iindex == -1)
logger.info(s"Couldn't convert nonexistent item ID ${r.item}"
+ " to Int index.")
((uindex, iindex), 1)
}.filter { case ((u, i), v) =>
// keep events with valid user and item index
(u != -1) && (i != -1)
}.reduceByKey(_ + _) // aggregate all view events of same user-item pair
.map { case ((u, i), v) =>
// MLlibRating requires integer index for user and item
MLlibRating(u, i, v)
}
.cache()
// MLLib ALS cannot handle empty training data.
require(!mllibRatings.take(1).isEmpty,
s"mllibRatings cannot be empty." +
" Please check if your events contain valid user and item ID.")
// seed for MLlib ALS
val seed = ap.seed.getOrElse(System.nanoTime)
val m = ALS.trainImplicit(
ratings = mllibRatings,
rank = ap.rank,
iterations = ap.numIterations,
lambda = ap.lambda,
blocks = -1,
alpha = 1.0,
seed = seed)
new ALSModel(
productFeatures = m.productFeatures,
itemStringIntMap = itemStringIntMap,
items = items
)
}
def predict(model: ALSModel, query: Query): PredictedResult = {
// convert items to Int index
val queryList: Set[Int] = query.items.map(model.itemStringIntMap.get(_))
.flatten.toSet
val queryFeatures: Vector[Array[Double]] = queryList.toVector.par
.map { item =>
// productFeatures may not contain the requested item
val qf: Option[Array[Double]] = model.productFeatures
.lookup(item).headOption
qf
}.seq.flatten
val whiteList: Option[Set[Int]] = query.whiteList.map( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val blackList: Option[Set[Int]] = query.blackList.map ( set =>
set.map(model.itemStringIntMap.get(_)).flatten
)
val ord = Ordering.by[(Int, Double), Double](_._2).reverse
val indexScores: Array[(Int, Double)] = if (queryFeatures.isEmpty) {
logger.info(s"No productFeatures vector for query items ${query.items}.")
Array[(Int, Double)]()
} else {
model.productFeatures
.mapValues { f =>
queryFeatures.map{ qf =>
cosine(qf, f)
}.reduce(_ + _)
}
.filter(_._2 > 0) // keep items with score > 0
.collect()
}
val filteredScore = indexScores.view.filter { case (i, v) =>
isCandidateItem(
i = i,
items = model.items,
categories = query.categories,
queryList = queryList,
whiteList = whiteList,
blackList = blackList
)
}
val topScores = getTopN(filteredScore, query.num)(ord).toArray
val itemScores = topScores.map { case (i, s) =>
new ItemScore(
item = model.itemIntStringMap(i),
score = s
)
}
new PredictedResult(itemScores)
}
private
def getTopN[T](s: Seq[T], n: Int)(implicit ord: Ordering[T]): Seq[T] = {
val q = PriorityQueue()
for (x <- s) {
if (q.size < n)
q.enqueue(x)
else {
// q is full
if (ord.compare(x, q.head) < 0) {
q.dequeue()
q.enqueue(x)
}
}
}
q.dequeueAll.toSeq.reverse
}
private
def cosine(v1: Array[Double], v2: Array[Double]): Double = {
val size = v1.size
var i = 0
var n1: Double = 0
var n2: Double = 0
var d: Double = 0
while (i < size) {
n1 += v1(i) * v1(i)
n2 += v2(i) * v2(i)
d += v1(i) * v2(i)
i += 1
}
val n1n2 = (math.sqrt(n1) * math.sqrt(n2))
if (n1n2 == 0) 0 else (d / n1n2)
}
private
def isCandidateItem(
i: Int,
items: Map[Int, Item],
categories: Option[Set[String]],
queryList: Set[Int],
whiteList: Option[Set[Int]],
blackList: Option[Set[Int]]
): Boolean = {
whiteList.map(_.contains(i)).getOrElse(true) &&
blackList.map(!_.contains(i)).getOrElse(true) &&
// discard items in query as well
(!queryList.contains(i)) &&
// filter categories
categories.map { cat =>
items(i).categories.map { itemCat =>
// keep this item if has ovelap categories with the query
!(itemCat.toSet.intersect(cat).isEmpty)
}.getOrElse(false) // discard this item if it has no categories
}.getOrElse(true)
}
}
| pferrel/PredictionIO | examples/scala-parallel-similarproduct/multi/src/main/scala/ALSAlgorithm.scala | Scala | apache-2.0 | 8,947 |
package jgo.tools.compiler
package interm
object LabelGroup {
private var curId: Long = 1
val User = new LabelGroup("<user>") //we should never actually see that string
}
final class LabelGroup(val id: String) {
def this() = {
this(LabelGroup.curId.toString)
LabelGroup.curId += 1
}
}
sealed class Label(val tag: String, val group: LabelGroup) {
def this(tag: String) = this(tag, new LabelGroup)
override def toString = tag + " " + group.id
}
final class UserLabel(val name: String) extends Label(name, LabelGroup.User) {
override def toString = name
}
| thomasmodeneis/jgo | src/src/main/scala/jgo/tools/compiler/interm/Label.scala | Scala | gpl-3.0 | 582 |
/*
* Copyright 2014–2017 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.fs.mount
import slamdata.Predef._
import quasar.fp.ski._
import quasar.contrib.pathy.APath
import argonaut._, Argonaut._, DecodeResultScalaz._
import monocle.Iso
import pathy.Path._
import scalaz.syntax.bifunctor._
import scalaz.syntax.foldable._
import scalaz.std.tuple._
import scalaz.std.list._
final case class MountingsConfig(toMap: Map[APath, MountConfig]) extends AnyVal
object MountingsConfig {
import APath._
val empty: MountingsConfig =
MountingsConfig(Map())
val mapIso: Iso[MountingsConfig, Map[APath, MountConfig]] =
Iso((_: MountingsConfig).toMap)(MountingsConfig(_))
implicit val mountingsConfigEncodeJson: EncodeJson[MountingsConfig] =
EncodeJson.of[Map[String, MountConfig]]
.contramap(_.toMap.map(_.leftMap(posixCodec.printPath(_))))
implicit val mountingsConfigDecodeJson: DecodeJson[MountingsConfig] =
DecodeJson.of[Map[String, MountConfig]]
.flatMap(m0 => DecodeJson(κ(m0.toList.foldLeftM(Map[APath, MountConfig]()) {
case (m, (s, mc)) => jString(s).as[APath].map(p => m + (p -> mc))
}))).map(MountingsConfig(_))
}
| drostron/quasar | core/src/main/scala/quasar/fs/mount/MountingsConfig.scala | Scala | apache-2.0 | 1,717 |
package poly.collection
import poly.collection.specgroup._
/**
* @author Tongfei Chen
*/
trait IntervalMap[@sp(di) R, +V] extends Map[Interval[R], V] {
def keySet: IntervalSet[R]
def pairsCovering(x: R): Iterable[(Interval[R], V)]
def pairsOverlappingWith(r: Interval[R]): Iterable[(Interval[R], V)]
def valuesCovering(x: R): Iterable[V]
def valuesOverlappingWith(r: Interval[R]): Iterable[V]
}
| ctongfei/poly-collection | interval/src/main/scala/poly/collection/IntervalMap.scala | Scala | mit | 415 |
package uk.ac.ncl.openlab.intake24.systemsql.admin
import javax.inject.{Inject, Named}
import javax.sql.DataSource
import uk.ac.ncl.openlab.intake24.errors.UnexpectedDatabaseError
import uk.ac.ncl.openlab.intake24.services.systemdb.admin.{SigninAttempt, SigninLogService}
import uk.ac.ncl.openlab.intake24.sql.{SqlDataService, SqlResourceLoader}
import anorm.SQL
class SigninLogImpl @Inject()(@Named("intake24_system") val dataSource: DataSource) extends SigninLogService with SqlDataService with SqlResourceLoader {
def logSigninAttempt(event: SigninAttempt): Either[UnexpectedDatabaseError, Unit] = tryWithConnection {
implicit conn =>
SQL("INSERT INTO signin_log VALUES (DEFAULT, DEFAULT,{ip},{provider},{provider_key},{success},{userId},{message},{user_agent})")
.on('ip -> event.remoteAddress, 'provider -> event.provider, 'provider_key -> event.providerKey, 'success -> event.success, 'userId -> event.userId, 'message -> event.message,
'user_agent -> event.userAgent)
.execute()
Right(())
}
}
| digitalinteraction/intake24 | SystemDataSQL/src/main/scala/uk/ac/ncl/openlab/intake24/systemsql/admin/SigninLogImpl.scala | Scala | apache-2.0 | 1,050 |
package sri.mobile.examples.movies.android
import sri.mobile.ReactNative
import sri.mobile.all._
import sri.mobile.components.android.ToolbarAndroid
import sri.mobile.examples.images.AndroidBackWhiteImage
import sri.universal.router
import sri.universal.router.{NavigatorRoute, UniversalRouterComponent, UniversalRouterCtrl}
import sri.universal.styles.UniversalStyleSheet
import scala.scalajs.js
import scala.scalajs.js.annotation.ScalaJSDefined
object DefaultAndroidNavigationBar {
var rctrl: UniversalRouterCtrl = _
ReactNative.BackAndroid.addEventListener("hardwareBackPress", () => {
if (rctrl != null && rctrl.navigator.getCurrentRoutes().length > 1) {
rctrl.navigator.pop()
true
} else false
})
@ScalaJSDefined
class Component extends UniversalRouterComponent[Props, Unit] {
def render() = {
val androidback: js.UndefOr[js.Any] = if (previousRoute.isDefined) AndroidBackWhiteImage else js.undefined
ToolbarAndroid(
style = props.style.toolbar,
actions = Seq(),
navIconDynamic = androidback,
onIconClicked = () => navigateBack(),
titleColor = "white",
title = props.route.title.toString)()
}
override def componentDidMount(): Unit = {
rctrl = getRouterCtrl()
}
}
/**
* style for navigation bar
*/
trait Style extends UniversalStyleSheet {
def toolbar = style(
height := 56,
backgroundColor := "#a9a9a9"
)
}
object DefaultTheme extends Style {
override val toolbar = super.toolbar
}
case class Props(route: NavigatorRoute, style: Style)
js.constructorOf[Component].contextTypes = router.routerContextTypes
def apply(route: NavigatorRoute, style: Style = DefaultTheme, key: js.UndefOr[String] = js.undefined, ref: js.Function1[Component, Unit] = null) = makeElement[Component](props = Props(route, style), key = key, ref = ref)
}
| chandu0101/sri | mobile-examples/src/main/scala/sri/mobile/examples/movies/android/DefaultAndroidNavigationBar.scala | Scala | apache-2.0 | 1,911 |
package com.heluna.actor
import akka.actor.Actor
import com.typesafe.scalalogging.slf4j.Logging
import java.util.Date
import com.heluna.cache.Redis
import java.text.SimpleDateFormat
import scala.concurrent.duration._
import scala.language.postfixOps
import scala.concurrent.ExecutionContext.Implicits.global
import com.heluna.util.BlockedUtil
import com.heluna.actor.MetricsActor._
/**
* Created with IntelliJ IDEA.
* User: markbe
* Date: 5/1/13
* Time: 1:37 PM
*/
class MetricsActor extends Actor with Redis with Logging {
def receive = {
case Connection => resendOnException(Connection) {
redis.incr(metricsKey + "/connection")
}
case Message => resendOnException(Message) {
redis.incr(metricsKey + "/message")
}
case Blocked => resendOnException(Blocked) {
val b = redis.incr("blocked").getOrElse(0L)
redis.incr(metricsKey + "/blocked")
redis.publish(BlockedUtil.CHANNEL, b.toString)
}
case msg => logger error "Got unknown message in MetricsActor: " + msg.toString + " " + self.path.name + " at " + new Date().getTime
}
private def resendOnException(event: Event)(block: => Unit) =
try {
block
} catch {
case e: Exception =>
// Redis has gone away, just reschedule the message
context.system.scheduler.scheduleOnce(1 minute, self, event)
}
private val formatter = new SimpleDateFormat("yyyy/MM/dd")
private def metricsKey = formatter.format(new Date())
}
object MetricsActor {
sealed trait Event
case object Connection extends Event
case object Message extends Event
case object Blocked extends Event
}
| ministryofjustice/maildrop | smtp/src/main/scala/com/heluna/actor/MetricsActor.scala | Scala | mit | 1,635 |
package scala.tools.tastytest
import scala.jdk.CollectionConverters._
object Diff {
def splitIntoLines(string: String): Seq[String] =
string.trim.replace("\r\n", "\n").split("\n").toSeq
def compareContents(output: String, check: String): String =
compareContents(splitIntoLines(output), splitIntoLines(check))
def compareContents(output: Seq[String], check: Seq[String]): String = {
val diff = difflib.DiffUtils.diff(check.asJava, output.asJava)
if (diff.getDeltas.isEmpty)
""
else
difflib.DiffUtils
.generateUnifiedDiff(
"check",
"output",
check.asJava,
diff,
1
)
.toArray()
.mkString("\n")
}
}
| scala/scala | src/tastytest/scala/tools/tastytest/Diff.scala | Scala | apache-2.0 | 722 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.joins
import java.io.{ByteArrayInputStream, ByteArrayOutputStream, ObjectInputStream, ObjectOutputStream}
import scala.collection.mutable.ArrayBuffer
import scala.util.Random
import org.apache.spark.SparkConf
import org.apache.spark.internal.config._
import org.apache.spark.internal.config.Kryo._
import org.apache.spark.memory.{TaskMemoryManager, UnifiedMemoryManager}
import org.apache.spark.serializer.KryoSerializer
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.map.BytesToBytesMap
import org.apache.spark.unsafe.types.UTF8String
import org.apache.spark.util.collection.CompactBuffer
class HashedRelationSuite extends SharedSparkSession {
val mm = new TaskMemoryManager(
new UnifiedMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"),
Long.MaxValue,
Long.MaxValue / 2,
1),
0)
val rand = new Random(100)
// key arrays used for building different relations under test
val contiguousArray = (0 until 1000)
val sparseArray = (0 until 1000 by 10)
val randomArray = (0 until 1000).filter(_ => rand.nextBoolean())
val singleKey = Seq(BoundReference(0, LongType, false))
val projection = UnsafeProjection.create(singleKey)
// build the corresponding rows for each array type
val contiguousRows = contiguousArray.map(i => projection(InternalRow(i.toLong)).copy())
val sparseRows = sparseArray.map(i => projection(InternalRow(i.toLong)).copy())
val randomRows = randomArray.map(i => projection(InternalRow(i.toLong)).copy())
test("UnsafeHashedRelation") {
val schema = StructType(StructField("a", IntegerType, true) :: Nil)
val data = Array(InternalRow(0), InternalRow(1), InternalRow(2), InternalRow(2))
val toUnsafe = UnsafeProjection.create(schema)
val unsafeData = data.map(toUnsafe(_).copy())
val buildKey = Seq(BoundReference(0, IntegerType, false))
val hashed = UnsafeHashedRelation(unsafeData.iterator, buildKey, 1, mm)
assert(hashed.isInstanceOf[UnsafeHashedRelation])
assert(hashed.get(unsafeData(0)).toArray === Array(unsafeData(0)))
assert(hashed.get(unsafeData(1)).toArray === Array(unsafeData(1)))
assert(hashed.get(toUnsafe(InternalRow(10))) === null)
val data2 = CompactBuffer[InternalRow](unsafeData(2).copy())
data2 += unsafeData(2).copy()
assert(hashed.get(unsafeData(2)).toArray === data2.toArray)
val os = new ByteArrayOutputStream()
val out = new ObjectOutputStream(os)
hashed.asInstanceOf[UnsafeHashedRelation].writeExternal(out)
out.flush()
val in = new ObjectInputStream(new ByteArrayInputStream(os.toByteArray))
val hashed2 = new UnsafeHashedRelation()
hashed2.readExternal(in)
assert(hashed2.get(unsafeData(0)).toArray === Array(unsafeData(0)))
assert(hashed2.get(unsafeData(1)).toArray === Array(unsafeData(1)))
assert(hashed2.get(toUnsafe(InternalRow(10))) === null)
assert(hashed2.get(unsafeData(2)).toArray === data2)
val os2 = new ByteArrayOutputStream()
val out2 = new ObjectOutputStream(os2)
hashed2.asInstanceOf[UnsafeHashedRelation].writeExternal(out2)
out2.flush()
// This depends on that the order of items in BytesToBytesMap.iterator() is exactly the same
// as they are inserted
assert(java.util.Arrays.equals(os2.toByteArray, os.toByteArray))
}
test("test serialization empty hash map") {
val taskMemoryManager = new TaskMemoryManager(
new UnifiedMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"),
Long.MaxValue,
Long.MaxValue / 2,
1),
0)
val binaryMap = new BytesToBytesMap(taskMemoryManager, 1, 1)
val os = new ByteArrayOutputStream()
val out = new ObjectOutputStream(os)
val hashed = new UnsafeHashedRelation(1, 1, binaryMap)
hashed.writeExternal(out)
out.flush()
val in = new ObjectInputStream(new ByteArrayInputStream(os.toByteArray))
val hashed2 = new UnsafeHashedRelation()
hashed2.readExternal(in)
val schema = StructType(StructField("a", IntegerType, true) :: Nil)
val toUnsafe = UnsafeProjection.create(schema)
val row = toUnsafe(InternalRow(0))
assert(hashed2.get(row) === null)
val os2 = new ByteArrayOutputStream()
val out2 = new ObjectOutputStream(os2)
hashed2.writeExternal(out2)
out2.flush()
assert(java.util.Arrays.equals(os2.toByteArray, os.toByteArray))
}
test("LongToUnsafeRowMap") {
val unsafeProj = UnsafeProjection.create(
Seq(BoundReference(0, LongType, false), BoundReference(1, IntegerType, true)))
val rows = (0 until 100).map(i => unsafeProj(InternalRow(Int.int2long(i), i + 1)).copy())
val key = Seq(BoundReference(0, LongType, false))
val longRelation = LongHashedRelation(rows.iterator, key, 10, mm)
assert(longRelation.keyIsUnique)
(0 until 100).foreach { i =>
val row = longRelation.getValue(i)
assert(row.getLong(0) === i)
assert(row.getInt(1) === i + 1)
}
val longRelation2 = LongHashedRelation(rows.iterator ++ rows.iterator, key, 100, mm)
assert(!longRelation2.keyIsUnique)
(0 until 100).foreach { i =>
val rows = longRelation2.get(i).toArray
assert(rows.length === 2)
assert(rows(0).getLong(0) === i)
assert(rows(0).getInt(1) === i + 1)
assert(rows(1).getLong(0) === i)
assert(rows(1).getInt(1) === i + 1)
}
val os = new ByteArrayOutputStream()
val out = new ObjectOutputStream(os)
longRelation2.writeExternal(out)
out.flush()
val in = new ObjectInputStream(new ByteArrayInputStream(os.toByteArray))
val relation = new LongHashedRelation()
relation.readExternal(in)
assert(!relation.keyIsUnique)
(0 until 100).foreach { i =>
val rows = relation.get(i).toArray
assert(rows.length === 2)
assert(rows(0).getLong(0) === i)
assert(rows(0).getInt(1) === i + 1)
assert(rows(1).getLong(0) === i)
assert(rows(1).getInt(1) === i + 1)
}
}
test("LongToUnsafeRowMap with very wide range") {
val taskMemoryManager = new TaskMemoryManager(
new UnifiedMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"),
Long.MaxValue,
Long.MaxValue / 2,
1),
0)
val unsafeProj = UnsafeProjection.create(Seq(BoundReference(0, LongType, false)))
{
// SPARK-16740
val keys = Seq(0L, Long.MaxValue, Long.MaxValue)
val map = new LongToUnsafeRowMap(taskMemoryManager, 1)
keys.foreach { k =>
map.append(k, unsafeProj(InternalRow(k)))
}
map.optimize()
val row = unsafeProj(InternalRow(0L)).copy()
keys.foreach { k =>
assert(map.getValue(k, row) eq row)
assert(row.getLong(0) === k)
}
map.free()
}
{
// SPARK-16802
val keys = Seq(Long.MaxValue, Long.MaxValue - 10)
val map = new LongToUnsafeRowMap(taskMemoryManager, 1)
keys.foreach { k =>
map.append(k, unsafeProj(InternalRow(k)))
}
map.optimize()
val row = unsafeProj(InternalRow(0L)).copy()
keys.foreach { k =>
assert(map.getValue(k, row) eq row)
assert(row.getLong(0) === k)
}
assert(map.getValue(Long.MinValue, row) eq null)
map.free()
}
}
test("LongToUnsafeRowMap with random keys") {
val taskMemoryManager = new TaskMemoryManager(
new UnifiedMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"),
Long.MaxValue,
Long.MaxValue / 2,
1),
0)
val unsafeProj = UnsafeProjection.create(Seq(BoundReference(0, LongType, false)))
val N = 1000000
val rand = new Random
val keys = (0 to N).map(x => rand.nextLong()).toArray
val map = new LongToUnsafeRowMap(taskMemoryManager, 10)
keys.foreach { k =>
map.append(k, unsafeProj(InternalRow(k)))
}
map.optimize()
val os = new ByteArrayOutputStream()
val out = new ObjectOutputStream(os)
map.writeExternal(out)
out.flush()
val in = new ObjectInputStream(new ByteArrayInputStream(os.toByteArray))
val map2 = new LongToUnsafeRowMap(taskMemoryManager, 1)
map2.readExternal(in)
val row = unsafeProj(InternalRow(0L)).copy()
keys.foreach { k =>
val r = map2.get(k, row)
assert(r.hasNext)
var c = 0
while (r.hasNext) {
val rr = r.next()
assert(rr.getLong(0) === k)
c += 1
}
}
var i = 0
while (i < N * 10) {
val k = rand.nextLong()
val r = map2.get(k, row)
if (r != null) {
assert(r.hasNext)
while (r.hasNext) {
assert(r.next().getLong(0) === k)
}
}
i += 1
}
map.free()
}
test("SPARK-24257: insert big values into LongToUnsafeRowMap") {
val taskMemoryManager = new TaskMemoryManager(
new UnifiedMemoryManager(
new SparkConf().set(MEMORY_OFFHEAP_ENABLED.key, "false"),
Long.MaxValue,
Long.MaxValue / 2,
1),
0)
val unsafeProj = UnsafeProjection.create(Array[DataType](StringType))
val map = new LongToUnsafeRowMap(taskMemoryManager, 1)
val key = 0L
// the page array is initialized with length 1 << 17 (1M bytes),
// so here we need a value larger than 1 << 18 (2M bytes), to trigger the bug
val bigStr = UTF8String.fromString("x" * (1 << 19))
map.append(key, unsafeProj(InternalRow(bigStr)))
map.optimize()
val resultRow = new UnsafeRow(1)
assert(map.getValue(key, resultRow).getUTF8String(0) === bigStr)
map.free()
}
test("SPARK-24809: Serializing LongToUnsafeRowMap in executor may result in data error") {
val unsafeProj = UnsafeProjection.create(Array[DataType](LongType))
val originalMap = new LongToUnsafeRowMap(mm, 1)
val key1 = 1L
val value1 = 4852306286022334418L
val key2 = 2L
val value2 = 8813607448788216010L
originalMap.append(key1, unsafeProj(InternalRow(value1)))
originalMap.append(key2, unsafeProj(InternalRow(value2)))
originalMap.optimize()
val ser = sparkContext.env.serializer.newInstance()
// Simulate serialize/deserialize twice on driver and executor
val firstTimeSerialized = ser.deserialize[LongToUnsafeRowMap](ser.serialize(originalMap))
val secondTimeSerialized =
ser.deserialize[LongToUnsafeRowMap](ser.serialize(firstTimeSerialized))
val resultRow = new UnsafeRow(1)
assert(secondTimeSerialized.getValue(key1, resultRow).getLong(0) === value1)
assert(secondTimeSerialized.getValue(key2, resultRow).getLong(0) === value2)
originalMap.free()
firstTimeSerialized.free()
secondTimeSerialized.free()
}
test("Spark-14521") {
val ser = new KryoSerializer(
(new SparkConf).set(KRYO_REFERENCE_TRACKING, false)).newInstance()
val key = Seq(BoundReference(0, LongType, false))
// Testing Kryo serialization of HashedRelation
val unsafeProj = UnsafeProjection.create(
Seq(BoundReference(0, LongType, false), BoundReference(1, IntegerType, true)))
val rows = (0 until 100).map(i => unsafeProj(InternalRow(Int.int2long(i), i + 1)).copy())
val longRelation = LongHashedRelation(rows.iterator ++ rows.iterator, key, 100, mm)
val longRelation2 = ser.deserialize[LongHashedRelation](ser.serialize(longRelation))
(0 until 100).foreach { i =>
val rows = longRelation2.get(i).toArray
assert(rows.length === 2)
assert(rows(0).getLong(0) === i)
assert(rows(0).getInt(1) === i + 1)
assert(rows(1).getLong(0) === i)
assert(rows(1).getInt(1) === i + 1)
}
// Testing Kryo serialization of UnsafeHashedRelation
val unsafeHashed = UnsafeHashedRelation(rows.iterator, key, 1, mm)
val os = new ByteArrayOutputStream()
val out = new ObjectOutputStream(os)
unsafeHashed.asInstanceOf[UnsafeHashedRelation].writeExternal(out)
out.flush()
val unsafeHashed2 = ser.deserialize[UnsafeHashedRelation](ser.serialize(unsafeHashed))
val os2 = new ByteArrayOutputStream()
val out2 = new ObjectOutputStream(os2)
unsafeHashed2.writeExternal(out2)
out2.flush()
assert(java.util.Arrays.equals(os.toByteArray, os2.toByteArray))
}
// This test require 4G heap to run, should run it manually
ignore("build HashedRelation that is larger than 1G") {
val unsafeProj = UnsafeProjection.create(
Seq(BoundReference(0, IntegerType, false),
BoundReference(1, StringType, true)))
val unsafeRow = unsafeProj(InternalRow(0, UTF8String.fromString(" " * 100)))
val key = Seq(BoundReference(0, IntegerType, false))
val rows = (0 until (1 << 24)).iterator.map { i =>
unsafeRow.setInt(0, i % 1000000)
unsafeRow.setInt(1, i)
unsafeRow
}
val unsafeRelation = UnsafeHashedRelation(rows, key, 1000, mm)
assert(unsafeRelation.estimatedSize > (2L << 30))
unsafeRelation.close()
val rows2 = (0 until (1 << 24)).iterator.map { i =>
unsafeRow.setInt(0, i % 1000000)
unsafeRow.setInt(1, i)
unsafeRow
}
val longRelation = LongHashedRelation(rows2, key, 1000, mm)
assert(longRelation.estimatedSize > (2L << 30))
longRelation.close()
}
// This test require 4G heap to run, should run it manually
ignore("build HashedRelation with more than 100 millions rows") {
val unsafeProj = UnsafeProjection.create(
Seq(BoundReference(0, IntegerType, false),
BoundReference(1, StringType, true)))
val unsafeRow = unsafeProj(InternalRow(0, UTF8String.fromString(" " * 100)))
val key = Seq(BoundReference(0, IntegerType, false))
val rows = (0 until (1 << 10)).iterator.map { i =>
unsafeRow.setInt(0, i % 1000000)
unsafeRow.setInt(1, i)
unsafeRow
}
val m = LongHashedRelation(rows, key, 100 << 20, mm)
m.close()
}
test("UnsafeHashedRelation: key set iterator on a contiguous array of keys") {
val hashedRelation = UnsafeHashedRelation(contiguousRows.iterator, singleKey, 1, mm)
val keyIterator = hashedRelation.keys()
assert(keyIterator.map(key => key.getLong(0)).toArray === contiguousArray)
}
test("UnsafeHashedRelation: key set iterator on a sparse array of keys") {
val hashedRelation = UnsafeHashedRelation(sparseRows.iterator, singleKey, 1, mm)
val keyIterator = hashedRelation.keys()
assert(keyIterator.map(key => key.getLong(0)).toArray === sparseArray)
}
test("LongHashedRelation: key set iterator on a contiguous array of keys") {
val longRelation = LongHashedRelation(contiguousRows.iterator, singleKey, 1, mm)
val keyIterator = longRelation.keys()
assert(keyIterator.map(key => key.getLong(0)).toArray === contiguousArray)
}
test("LongToUnsafeRowMap: key set iterator on a contiguous array of keys") {
val rowMap = new LongToUnsafeRowMap(mm, 1)
(contiguousArray, contiguousRows).zipped.map { (i, row) => rowMap.append(i, row) }
var keyIterator = rowMap.keys()
// in sparse mode the keys are unsorted
assert(keyIterator.map(key => key.getLong(0)).toArray.sortWith(_ < _) === contiguousArray)
// in dense mode the keys are already ordered
rowMap.optimize()
keyIterator = rowMap.keys()
assert(keyIterator.map(key => key.getLong(0)).toArray === contiguousArray)
}
test("LongToUnsafeRowMap: key set iterator on a sparse array with equidistant keys") {
val rowMap = new LongToUnsafeRowMap(mm, 1)
(sparseArray, sparseRows).zipped.map { (i, row) => rowMap.append(i, row) }
var keyIterator = rowMap.keys()
assert(keyIterator.map(_.getLong(0)).toArray.sortWith(_ < _) === sparseArray)
rowMap.optimize()
keyIterator = rowMap.keys()
assert(keyIterator.map(_.getLong(0)).toArray === sparseArray)
}
test("LongToUnsafeRowMap: key set iterator on an array with a single key") {
// build several maps each of which has a single valid key
(0 to 1000).foreach { i =>
val rowMap = new LongToUnsafeRowMap(mm, 1)
rowMap.append(i, projection(InternalRow((2 * i + 1).toLong)))
var keyIterator = rowMap.keys()
assert(keyIterator.next().getLong(0) === i)
rowMap.optimize()
keyIterator = rowMap.keys()
assert(keyIterator.next().getLong(0) === i)
rowMap.free()
}
}
test("LongToUnsafeRowMap: multiple hasNext calls before calling next() on the key iterator") {
val rowMap = new LongToUnsafeRowMap(mm, 1)
(randomArray, randomRows).zipped.map { (i, row) => rowMap.append(i, row) }
val buffer = new ArrayBuffer[Long]()
// hasNext should not change the cursor unless the key was read by a next() call
var keyIterator = rowMap.keys()
while (keyIterator.hasNext) {
keyIterator.hasNext
keyIterator.hasNext
buffer.append(keyIterator.next().getLong(0))
}
assert(buffer.sortWith(_ < _) === randomArray)
buffer.clear()
rowMap.optimize()
keyIterator = rowMap.keys()
while (keyIterator.hasNext) {
keyIterator.hasNext
keyIterator.hasNext
buffer.append(keyIterator.next().getLong(0))
}
assert(buffer === randomArray)
}
test("LongToUnsafeRowMap: no explicit hasNext calls on the key iterator") {
val rowMap = new LongToUnsafeRowMap(mm, 1)
(randomArray, randomRows).zipped.map { (i, row) => rowMap.append(i, row) }
val buffer = new ArrayBuffer[Long]()
// call next() until the buffer is filled with all keys
var keyIterator = rowMap.keys()
while (buffer.size < randomArray.size) {
buffer.append(keyIterator.next().getLong(0))
}
// attempt an illegal next() call
val caught = intercept[NoSuchElementException] {
keyIterator.next()
}
assert(caught.getLocalizedMessage === "End of the iterator")
assert(buffer.sortWith(_ < _) === randomArray)
buffer.clear()
rowMap.optimize()
keyIterator = rowMap.keys()
while (buffer.size < randomArray.size) {
buffer.append(keyIterator.next().getLong(0))
}
assert(buffer === randomArray)
}
test("LongToUnsafeRowMap: call hasNext at the end of the iterator") {
val rowMap = new LongToUnsafeRowMap(mm, 1)
(sparseArray, sparseRows).zipped.map { (i, row) => rowMap.append(i, row) }
var keyIterator = rowMap.keys()
assert(keyIterator.map(key => key.getLong(0)).toArray.sortWith(_ < _) === sparseArray)
assert(keyIterator.hasNext == false)
assert(keyIterator.hasNext == false)
rowMap.optimize()
keyIterator = rowMap.keys()
assert(keyIterator.map(key => key.getLong(0)).toArray === sparseArray)
assert(keyIterator.hasNext == false)
assert(keyIterator.hasNext == false)
}
test("LongToUnsafeRowMap: random sequence of hasNext and next() calls on the key iterator") {
val rowMap = new LongToUnsafeRowMap(mm, 1)
(randomArray, randomRows).zipped.map { (i, row) => rowMap.append(i, row) }
val buffer = new ArrayBuffer[Long]()
// call hasNext or next() at random
var keyIterator = rowMap.keys()
while (buffer.size < randomArray.size) {
if (rand.nextBoolean() && keyIterator.hasNext) {
buffer.append(keyIterator.next().getLong(0))
} else {
keyIterator.hasNext
}
}
assert(buffer.sortWith(_ < _) === randomArray)
buffer.clear()
rowMap.optimize()
keyIterator = rowMap.keys()
while (buffer.size < randomArray.size) {
if (rand.nextBoolean() && keyIterator.hasNext) {
buffer.append(keyIterator.next().getLong(0))
} else {
keyIterator.hasNext
}
}
assert(buffer === randomArray)
}
test("HashJoin: packing and unpacking with the same key type in a LongType") {
val row = InternalRow(0.toShort, 1.toShort, 2.toShort, 3.toShort)
val keys = Seq(BoundReference(0, ShortType, false),
BoundReference(1, ShortType, false),
BoundReference(2, ShortType, false),
BoundReference(3, ShortType, false))
val packed = HashJoin.rewriteKeyExpr(keys)
val unsafeProj = UnsafeProjection.create(packed)
val packedKeys = unsafeProj(row)
(0 to 3).foreach { i =>
val key = HashJoin.extractKeyExprAt(keys, i)
val proj = UnsafeProjection.create(key)
assert(proj(packedKeys).getShort(0) == i)
}
}
test("HashJoin: packing and unpacking with various key types in a LongType") {
val row = InternalRow((-1).toByte, (-2).toInt, (-3).toShort)
val keys = Seq(BoundReference(0, ByteType, false),
BoundReference(1, IntegerType, false),
BoundReference(2, ShortType, false))
val packed = HashJoin.rewriteKeyExpr(keys)
val unsafeProj = UnsafeProjection.create(packed)
val packedKeys = unsafeProj(row)
Seq((0, ByteType), (1, IntegerType), (2, ShortType)).foreach { case (i, dt) =>
val key = HashJoin.extractKeyExprAt(keys, i)
val proj = UnsafeProjection.create(key)
assert(proj(packedKeys).get(0, dt) == -i - 1)
}
}
}
| pgandhi999/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/joins/HashedRelationSuite.scala | Scala | apache-2.0 | 21,836 |
package org.allenai.plugins.archetypes
import org.allenai.plugins.CoreSettingsPlugin
import org.allenai.plugins.Ai2ReleasePlugin
import sbt.{ AutoPlugin, Plugins }
object LibraryPlugin extends AutoPlugin {
override def requires: Plugins = CoreSettingsPlugin && Ai2ReleasePlugin
}
| ryanai3/sbt-plugins | src/main/scala/org/allenai/plugins/archetypes/LibraryPlugin.scala | Scala | apache-2.0 | 285 |
package org.scalajs.openui5.sap.m
import org.scalajs.openui5.util.{Settings, SettingsMap, noSettings}
import scala.scalajs.js
import scala.scalajs.js.annotation.{JSName, ScalaJSDefined}
@ScalaJSDefined
trait MultiInputSettings extends InputSettings
object MultiInputSettings extends MultiInputSettingsBuilder(noSettings)
class MultiInputSettingsBuilder(val dict: SettingsMap)
extends Settings[MultiInputSettings, MultiInputSettingsBuilder](new MultiInputSettingsBuilder(_))
with MultiInputSetters[MultiInputSettings, MultiInputSettingsBuilder]
trait MultiInputSetters[T <: js.Object, B <: Settings[T,_]]
extends InputSetters[T, B] {
def enableMultiLineMode(v: Boolean) = setting("enableMultiLineMode", v)
def maxTokens(v: Int) = setting("maxTokens", v)
def tokens(v: js.Array[Token]) = setting("tokens", v)
def tokenChange(v: js.Function) = setting("tokenChange", v)
}
@JSName("sap.m.MultiInput")
@js.native
class MultiInput(id: js.UndefOr[String] = js.native,
settings: js.UndefOr[MultiInputSettings] = js.native) extends Input {
def this(id: String) = this(id, js.undefined)
def this(settings: MultiInputSettings) = this(js.undefined, settings)
def addValidator(validator: js.Function): this.type = js.native
def getTokens(): js.Array[Token] = js.native
}
| lastsys/scalajs-openui5 | src/main/scala/org/scalajs/openui5/sap/m/MultiInput.scala | Scala | mit | 1,310 |
/*
* Copyright (C) 2016-2017 Lightbend Inc. <http://www.lightbend.com>
*/
package akka.persistence.cassandra.compaction
import com.typesafe.config.Config
import LeveledCompactionStrategy._
/*
* https://github.com/apache/cassandra/blob/cassandra-2.2/src/java/org/apache/cassandra/db/compaction/LeveledCompactionStrategy.java
*/
class LeveledCompactionStrategy(config: Config) extends BaseCompactionStrategy(config, ClassName, propertyKeys) {
val ssTableSizeInMB: Long = if (config.hasPath("sstable_size_in_mb")) config.getLong("sstable_size_in_mb") else 160
require(ssTableSizeInMB > 0, s"sstable_size_in_mb must be larger than 0, but was $ssTableSizeInMB")
override def asCQL: String =
s"""{
|'class' : '${LeveledCompactionStrategy.ClassName}',
|${super.asCQL},
|'sstable_size_in_mb' : $ssTableSizeInMB
|}
""".stripMargin.trim
}
object LeveledCompactionStrategy extends CassandraCompactionStrategyConfig[LeveledCompactionStrategy] {
override val ClassName: String = "LeveledCompactionStrategy"
override def propertyKeys: List[String] = (
BaseCompactionStrategy.propertyKeys union List(
"sstable_size_in_mb"
)
).sorted
override def fromConfig(config: Config): LeveledCompactionStrategy = new LeveledCompactionStrategy(config)
}
| ktoso/akka-persistence-cassandra | core/src/main/scala/akka/persistence/cassandra/compaction/LeveledCompactionStrategy.scala | Scala | apache-2.0 | 1,305 |
import stainless.lang._
import stainless.annotation._
import stainless.collection._
object NewImpExamples {
final case class Box(var value: BigInt) extends AnyHeapRef
final case class BoxBox(var inner: Box) extends AnyHeapRef
sealed abstract class Op extends AnyHeapRef
case class Up(var bla: BigInt) extends Op
case class Down() extends Op
// @extern def mystery(x: BigInt): Unit = ???
def f(x: BigInt): BigInt =
-x
def g(c: Boolean, x: BigInt): BigInt =
if (c) f(f(x)) else f(x)
def h(b: Box): Unit = {
reads(Set(b))
modifies(Set(b))
b.value = g(b.value >= 0, b.value)
} ensuring(_ => b.value >= 0)
// Example inc
def inc(b: Box): Unit = {
reads(Set(b))
modifies(Set(b))
b.value = b.value + 1
} ensuring(_ => b.value > old(b.value))
// Example accumulate
def accumulateBox(b1: Box, b2: Box): Unit = {
reads(Set(b1, b2))
modifies(Set(b1))
b1.value += b2.value
}
def accumulateBoxBox(bb: BoxBox, b: Box): Unit = {
reads(Set(bb, bb.inner, b))
modifies(Set(bb.inner))
require(b.value > 0)
accumulateBox(bb.inner, b)
} ensuring(_ => bb.inner.value > old(bb.inner.value))
// Example Ops
def runOp(b: Box, op: Boolean): Unit = {
reads(Set(b))
modifies(Set(b))
if (op) b.value += 1 else b.value -= 1
}
def isWithin(x: BigInt, y: BigInt, k: BigInt): Boolean =
y - k <= x && x <= y + k
def runOps(b: Box, ops: List[Boolean]): Unit = {
reads(Set(b))
modifies(Set(b))
ops match {
case Cons(op, ops) =>
runOp(b, op)
runOps(b, ops)
case _ =>
}
} ensuring(_ => isWithin(b.value, old(b.value), ops.size))
// TODO(gsps): Add local heap to reason about allocation?
// def foo1a(op: Op): BigInt =
// if (op.isInstanceOf[Up]) 1 else -1
// def foo2a(): BigInt = {
// foo1a(Up(2))
// } ensuring(res => res == 1)
def foo1b(op: Op): BigInt = {
reads(Set(op))
op match {
case Up(_) => 1
case Down() => -1
case _ => 0 // TODO(gsps): Assume heap well-typedness in exhaustiveness checks
}
}
// def foo2b(): BigInt = {
// foo1b(Up(2))
// } ensuring(res => res == 1)
def bar(box: Box, x: BigInt): Unit = {
reads(Set(box))
modifies(Set(box))
val y = x + 1
box.value = y
}
// // `StateSpec[S]` is a first-class function acting as a two-state spec
// // It would expand to `(Heap, Heap, S) => Boolean`, and allow `old` to be used.
// def foreach[T](xs: List[T])(f: T => Unit)(spec: StateSpec[List[T]]): Unit = {
// require(f.post ===> spec(xs))
// xs match {
// case Cons(x, xs) => f(x); foreach(xs)(f)
// case _ =>
// }
// } ensuring(_ => spec(xs))
// def runOps2(b: Box, ops: List[Boolean]): Unit = {
// ops.foreach(op => runOp(b, op))(ops => isWithin(b.value, old(b.value), ops.size))
// } ensuring(_ => isWithin(b.value, old(b.value), ops.size))
}
| epfl-lara/stainless | frontends/benchmarks/full-imperative/valid/NewImperative.scala | Scala | apache-2.0 | 2,932 |
package ml.wolfe.util
import java.io._
import java.nio.charset.MalformedInputException
import java.util.concurrent.TimeUnit
import cc.factorie.util.{FastLogging, Logger}
import com.typesafe.scalalogging.slf4j.LazyLogging
import ml.wolfe.util._
import scala.collection.mutable
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
/**
* @author Sebastian Riedel
*/
object Util {
/**
* Loads a resource as stream. This returns either a resource in the classpath,
* or in case no such named resource exists, from the file system.
*/
def getStreamFromClassPathOrFile(name: String): InputStream = {
val is: InputStream = getClass.getClassLoader.getResourceAsStream(name)
if (is == null) {
new FileInputStream(name)
}
else {
is
}
}
/**
* Try a range of encodings on some statement until it doesn't fail.
* @param body body that uses encoding
* @param encodings list of encodings
* @tparam T type of value to return
* @return the value returned by the first successful encoding
*/
def tryEncodings[T](body: String => T, encodings: List[String] = List("UTF-8", "ISO-8859-1")): T = encodings match {
case Nil => sys.error("Need at least one encoding")
case enc :: Nil =>
body(enc)
case enc :: tail =>
try {
body(enc)
} catch {
case e: MalformedInputException => tryEncodings(body, tail)
}
}
def breakpoint() = {
var a = 0 // set breakpoint here for use in macro-generated code...
}
/**
* Recursively descend directory, returning a list of files.
*/
def files(directory: File): Seq[File] = {
if (!directory.exists) throw new Error("File " + directory + " does not exist")
if (directory.isFile) return List(directory)
val result = new ArrayBuffer[File]
for (entry: File <- directory.listFiles) {
if (entry.isFile) result += entry
else if (entry.isDirectory) result ++= files(entry)
}
result
}
/**
* Are x and y approximately equal, to within eps?
*/
def approxEqual(x: Double, y: Double, eps: Double = 1e-10) = {
math.abs(x - y) < eps
}
def sig(x: Double) = 1.0 / (1.0 + math.exp(-x))
def sq(x: Double) = x * x
}
/**
* Code for IRIS dataset.
*/
object Iris {
implicit val classes = Seq(Label("Iris-setosa"), Label("Iris-versicolor"), Label("Iris-virginica"))
case class Label(label: String)
case class IrisData(features: IrisFeatures, irisClass: Label)
case class IrisFeatures(sepalLength: Double, sepalWidth: Double, petalLength: Double, petalWidth: Double)
def loadIris() = {
val stream = Util.getStreamFromClassPathOrFile("ml/wolfe/datasets/iris/iris.data")
val data = for (line <- Source.fromInputStream(stream).getLines().toBuffer if line.trim != "") yield {
val Array(sl, sw, pl, pw, ic) = line.split(",")
IrisData(IrisFeatures(sl.toDouble, sw.toDouble, pl.toDouble, pw.toDouble), Label(ic))
}
stream.close()
data
}
}
object Timer {
val timings = new mutable.HashMap[String, Long]()
def time[A](name: String)(f: => A) = {
val start = System.nanoTime
val result = f
val time: Long = TimeUnit.MILLISECONDS.convert(System.nanoTime - start, TimeUnit.NANOSECONDS)
timings(name) = time
result
}
def reported(name: String): Long = timings.getOrElse(name, -1)
def reportedVerbose(name: String): String = getTimeString(reported(name))
override def toString = timings.map({ case (name, time) => s"$name: ${getTimeString(time)}" }).mkString("\n")
}
class ProgressBar(goal: Int, reportInterval: Int = 1) extends LazyLogging {
//fixme: can lead to getting stuck in ~very long
private var completed: Int = 1
private var startTime = 0l
def start() = {
startTime = System.currentTimeMillis()
}
def apply(msg: => String = "", lineBreak: Boolean = false) {
if (completed == 0 && startTime == 0) start()
if (completed % reportInterval == 0) {
val percent = completed.toDouble / goal * 100
val diffTime = System.currentTimeMillis() - startTime
val estimatedTime = (((diffTime * (goal.toDouble / completed)) - diffTime) / 1000).toInt
logger.info("[%6.2f".format(percent) + "%" + " %d/%d ".format(completed, goal) +
"%8s".format("~" + getTimeString(estimatedTime)) + "]\t" + msg + "\r")
//if (lineBreak) logger.info("")
}
if (goal == completed) logger.info("")
completed += 1
//printWriter.flush()
}
}
/**
* Hook into FACTORIE FastLogging that calls ProgressBar
*/
class ProgressLogger(maxIterations: Int, name: String, outputStream: => OutputStream = System.out) extends Logger(name, outputStream) {
val logEveryN = if (Conf.hasPath("logEveryN")) Conf.getInt("logEveryN") else 1
val progressBar = new ProgressBar(maxIterations, logEveryN)
progressBar.start()
override def info(msg: => Any): Unit = progressBar(msg.toString, lineBreak = true)
}
trait ProgressLogging extends FastLogging {
def maxIterations(): Int
override val logger: Logger =
Logger.loggerMap.getOrElseUpdate(this.getClass.getName + "progress", new ProgressLogger(maxIterations(), this.getClass.getName + "progress"))
}
/**
* A wrapper for objects that uses the identity hashmap and equals methods.
* @param value the value to be given an id.
* @tparam T type of value.
*/
class ObjectId[T <: AnyRef](val value: T) {
override def hashCode() = System.identityHashCode(value)
override def equals(obj: scala.Any) = obj match {
case o: ObjectId[_] => o.value eq value
case _ => false
}
override def toString = value.toString
}
/**
* A function that turns "lifted" functions to Options into partial functions such that repeated calls
* in isDefinedAt and apply are avoided by caching results.
* @param f the lifted function to turn into a partial function.
*/
case class CachedPartialFunction[A, B](f: A => Option[B]) extends PartialFunction[A, B] {
private var cacheArg: A = _
private var cacheResult: Option[B] = None
def cache(x: A) = {
if (x != cacheArg) {
cacheArg = x
cacheResult = f(cacheArg)
}
}
def isDefinedAt(x: A) = {
cache(x)
cacheResult.isDefined
}
def apply(x: A) = {
cache(x)
cacheResult.get
}
}
| wolfe-pack/wolfe | wolfe-util/src/main/scala/ml/wolfe/util/Util.scala | Scala | apache-2.0 | 6,256 |
/*
* Copyright 2009-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package json
import org.specs2.mutable.Specification
object XmlBugs extends Specification {
import Xml._
import scala.xml.{Group, Text}
"HarryH's XML parses correctly" in {
val xml1 = <venue><id>123</id></venue>
val xml2 = <venue> <id>{"1"}{"23"}</id> </venue>
Xml.toJson(xml1) must_== Xml.toJson(xml2)
}
"HarryH's XML with attributes parses correctly" in {
val json = toJson(<tips><group type="Nearby"><tip><id>10</id></tip></group></tips>)
compactRender(json) mustEqual """{"tips":{"group":{"type":"Nearby","tip":{"id":"10"}}}}"""
}
"Jono's XML with attributes parses correctly" in {
val example1 = <word term="example" self="http://localhost:8080/word/example" available="true">content</word>
val expected1 = """{"word":"content","self":"http://localhost:8080/word/example","term":"example","available":"true"}"""
val example2 = <word term="example" self="http://localhost:8080/word/example" available="true"></word>
val expected2 = """{"self":"http://localhost:8080/word/example","term":"example","available":"true"}"""
(toJson(example1) diff parse(expected1)) mustEqual Diff(JNothing, JNothing, JNothing)
(toJson(example2) diff parse(expected2)) mustEqual Diff(JNothing, JNothing, JNothing)
}
"Nodes with attributes converted to correct JSON" in {
val xml =
<root>
<n id="10" x="abc" />
<n id="11" x="bcd" />
</root>
val expected = """{"root":{"n":[{"x":"abc","id":"10"},{"x":"bcd","id":"11"}]}}"""
val expected210 = """{"root":{"n":[{"id":"10","x":"abc"},{"id":"11","x":"bcd"}]}}"""
val json = compactRender(toJson(xml))
(json == expected || json == expected210) mustEqual true
}
"XML with empty node is converted correctly to JSON" in {
val xml =
<tips><group type="Foo"></group><group type="Bar"><tip><text>xxx</text></tip><tip><text>yyy</text></tip></group></tips>
val expected = """{"tips":{"group":[{"type":"Foo"},{"type":"Bar","tip":[{"text":"xxx"},{"text":"yyy"}]}]}}"""
compactRender(toJson(xml)) mustEqual expected
}
}
| lzpfmh/framework-2 | core/json/src/test/scala/net/liftweb/json/XmlBugs.scala | Scala | apache-2.0 | 2,718 |
package org.scalafmt.util
import java.io.File
import java.net.URL
import org.apache.commons.io.FileUtils
import org.rauschig.jarchivelib.ArchiverFactory
case class ScalaFile(filename: String, projectUrl: String, commit: String) {
def rawUrl = {
val raw = projectUrl.replace("github.com", "raw.githubusercontent.com")
s"$raw/$commit$filename"
}
def read: String = {
val toRead = new File(FileOps.getFile("target", "repos", repo), filename)
FileOps.readFile(toRead)
}
def githubUrl = s"$projectUrl/blob/$commit$filename"
def githubUrlAtLine(line: Int): String = s"$githubUrl#L$line"
def userRepo = projectUrl.stripPrefix("https://github.com/")
def repo = userRepo.split("/")(1)
def user = userRepo.split("/")(0)
override def toString: String = s"""ScalaFile(
| project: $user
| github: $githubUrl
| raw: $rawUrl
|)""".stripMargin
}
object ScalaFile {
private val tarballName = "repos"
private val tarballNameWithExt = s"$tarballName.tar.gz"
private val reposTarballUrl =
s"https://github.com/scalameta/scalafmt/releases/download/v0.1.4/$tarballNameWithExt"
def getAll: Seq[ScalaFile] = {
val repos = FileOps.getFile("target", tarballName)
if (!repos.isDirectory) createReposDir()
val files = Option(repos.listFiles()).getOrElse {
throw new IllegalStateException(
s"""${repos.getAbsolutePath} is not a directory, run:
|* wget $reposTarballUrl
|* tar xvf $tarballNameWithExt
|""".stripMargin)
}
Seq(files: _*).flatMap { repo =>
val repoPrefix = repo.getPath
val commit = FileOps.readFile(new File(repo, "COMMIT")).trim
val url = FileOps.readFile(new File(repo, "URL")).trim
FileOps
.listFiles(repo)
.withFilter(_.endsWith(".scala"))
.withFilter(includeFile)
.map { sourceFile =>
val filename = sourceFile.stripPrefix(repoPrefix)
ScalaFile(filename.trim, url, commit)
}
}
}
/** If needed, downloads the tarball containing sources from different projects and extracts these files. */
private def createReposDir(): Unit = {
val currentDir = new File(".")
val localTarball = new File(currentDir, tarballNameWithExt)
if (!FileOps.getFile(tarballNameWithExt).isFile) {
downloadReposTar(destination = localTarball)
}
extractReposTar(localTarball, destination = currentDir)
}
private def downloadReposTar(destination: File): Unit = {
val fileToDownload = new URL(reposTarballUrl)
println(s"Downloading $reposTarballUrl...")
FileUtils.copyURLToFile(fileToDownload, destination)
println("Download finished.")
}
private def extractReposTar(tarball: File, destination: File): Unit = {
val archiver = ArchiverFactory.createArchiver("tar", "gz")
println(s"Extracting ${tarball.getAbsolutePath}...")
archiver.extract(tarball, destination)
println("Extracting finished.")
}
private def includeFile(filename: String): Boolean = {
!Seq(
// Computer generated
"library/src/main/scala/scala/scalajs/js/Tuple.scala",
// This fella seems to make the scalac parser hang (???)
"target/repos/scala/test/files/neg/t5510.scala",
// Unicode escapes in weird places
"target/repos/scala/test/files/neg/t8015-ffb.scala",
"target/repos/scala/test/files/pos/t389.scala",
"target/repos/scala/test/files/run/literals.scala",
"target/repos/scala/test/files/run/t3835.scala",
// Scalac parser seems to accept this, though it blows up later
"target/repos/scala/test/files/neg/t8266-invalid-interp.scala",
"target/repos/scala/test/disabled/",
"target/repos/scala/test/files/neg/",
// trailing . after number
"target/repos/scala/test/files/presentation/infix-completion/src/Snippet.scala",
// Unicode escapes in weird places
"target/repos/sbt/main/settings/src/main/scala/sbt/std/InputWrapper.scala",
// uses a package called `macro`
"target/repos/sbt/sbt/src/sbt-test/source-dependencies/inherited-macros",
"target/repos/sbt/sbt/src/sbt-test/source-dependencies/macro",
"target/repos/lila/modules/lobby/src/main/SocketHandler.scala"
).exists(filename.contains)
}
}
| Daxten/scalafmt | testUtils/src/main/scala/org/scalafmt/util/ScalaFile.scala | Scala | apache-2.0 | 4,409 |
object Test {
// Devalify shouldn't optimize this
def theTrap(cond: Boolean, t: => Unit) = {
val a,b = t
if (cond) println(a) else println(b)
}
def main(args: Array[String]): Unit = {
theTrap(true, println(1))
}
}
| som-snytt/dotty | tests/run/byname-param.scala | Scala | apache-2.0 | 237 |
/*
Copyright (c) 2017, Robby, Kansas State University
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package org.sireum
import org.sireum.test._
class BuiltInTypesTest extends TestSuite {
val tests = Tests {
* - assert(z"1" == Z(1))
* - assert(z"1" == Z(1L))
* - assert(z"1" == Z(BigInt(1)))
* - assertMatch(Z(1)) { case z"1" => }
* - assertMatch(Z(2)) { case z"2" => }
* - assert(c"0" == C('0'))
* - assertMatch(C('0')) { case c"0" => }
* - assert(f32"0" == F32(0f))
* - assertMatch(F32(0f)) { case f32"0" => }
* - assert(f64"0" == F64(0d))
* - assertMatch(F64(0d)) { case f64"0" => }
* - assert(string"abc" == String("abc"))
* - assertMatch(String("abc")) { case string"abc" => }
}
}
| sireum/v3-runtime | library/shared/src/test/scala/org/sireum/BuiltInTypesTest.scala | Scala | bsd-2-clause | 2,001 |
package lila
package object qa extends PackageObject with WithPlay {
type Tag = String
type QuestionId = Int
type AnswerId = Int
type RelId = String
type CommentId = String
}
| Happy0/lila | modules/qa/src/main/package.scala | Scala | mit | 187 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s.submit
import java.io.{File, StringWriter}
import java.util.Properties
import scala.collection.JavaConverters._
import scala.io.{Codec, Source}
import io.fabric8.kubernetes.api.model.{ConfigMap, ConfigMapBuilder, KeyToPath}
import org.apache.spark.SparkConf
import org.apache.spark.deploy.k8s.{Constants, KubernetesUtils}
import org.apache.spark.deploy.k8s.Constants.ENV_SPARK_CONF_DIR
import org.apache.spark.internal.Logging
private[spark] object KubernetesClientUtils extends Logging {
// Config map name can be 63 chars at max.
def configMapName(prefix: String): String = s"${prefix.take(54)}-conf-map"
val configMapNameExecutor: String = configMapName(s"spark-exec-${KubernetesUtils.uniqueID()}")
val configMapNameDriver: String = configMapName(s"spark-drv-${KubernetesUtils.uniqueID()}")
private def buildStringFromPropertiesMap(configMapName: String,
propertiesMap: Map[String, String]): String = {
val properties = new Properties()
propertiesMap.foreach { case (k, v) =>
properties.setProperty(k, v)
}
val propertiesWriter = new StringWriter()
properties.store(propertiesWriter,
s"Java properties built from Kubernetes config map with name: $configMapName")
propertiesWriter.toString
}
/**
* Build, file -> 'file's content' map of all the selected files in SPARK_CONF_DIR.
*/
def buildSparkConfDirFilesMap(configMapName: String,
sparkConf: SparkConf, resolvedPropertiesMap: Map[String, String]): Map[String, String] = {
val loadedConfFilesMap = KubernetesClientUtils.loadSparkConfDirFiles(sparkConf)
// Add resolved spark conf to the loaded configuration files map.
if (resolvedPropertiesMap.nonEmpty) {
val resolvedProperties: String = KubernetesClientUtils
.buildStringFromPropertiesMap(configMapName, resolvedPropertiesMap)
loadedConfFilesMap ++ Map(Constants.SPARK_CONF_FILE_NAME -> resolvedProperties)
} else {
loadedConfFilesMap
}
}
def buildKeyToPathObjects(confFilesMap: Map[String, String]): Seq[KeyToPath] = {
confFilesMap.map {
case (fileName: String, _: String) =>
val filePermissionMode = 420 // 420 is decimal for octal literal 0644.
new KeyToPath(fileName, filePermissionMode, fileName)
}.toList.sortBy(x => x.getKey) // List is sorted to make mocking based tests work
}
/**
* Build a Config Map that will hold the content for environment variable SPARK_CONF_DIR
* on remote pods.
*/
def buildConfigMap(configMapName: String, confFileMap: Map[String, String],
withLabels: Map[String, String] = Map()): ConfigMap = {
new ConfigMapBuilder()
.withNewMetadata()
.withName(configMapName)
.withLabels(withLabels.asJava)
.endMetadata()
.addToData(confFileMap.asJava)
.build()
}
private def loadSparkConfDirFiles(conf: SparkConf): Map[String, String] = {
val confDir = Option(conf.getenv(ENV_SPARK_CONF_DIR)).orElse(
conf.getOption("spark.home").map(dir => s"$dir/conf"))
if (confDir.isDefined) {
val confFiles = listConfFiles(confDir.get)
logInfo(s"Spark configuration files loaded from $confDir : ${confFiles.mkString(",")}")
confFiles.map { file =>
val source = Source.fromFile(file)(Codec.UTF8)
val mapping = (file.getName -> source.mkString)
source.close()
mapping
}.toMap
} else {
Map.empty[String, String]
}
}
private def listConfFiles(confDir: String): Seq[File] = {
// We exclude all the template files and user provided spark conf or properties.
// As spark properties are resolved in a different step.
val fileFilter = (f: File) => {
f.isFile && !(f.getName.endsWith("template") ||
f.getName.matches("spark.*(conf|properties)"))
}
val confFiles: Seq[File] = {
val dir = new File(confDir)
if (dir.isDirectory) {
dir.listFiles.filter(x => fileFilter(x)).toSeq
} else {
Nil
}
}
confFiles
}
}
| shuangshuangwang/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/submit/KubernetesClientUtils.scala | Scala | apache-2.0 | 4,861 |
package twitch.api.usernotice
object UserNoticeMessageId extends Enumeration {
type UserNoticeMessageId = Value
val UNKNOWN: UserNoticeMessageId = Value("Unknown")
val RESUBSCRIPTION: UserNoticeMessageId = Value("resub")
val SUBSCRIPTION: UserNoticeMessageId = Value("sub")
val GIFTED_SUBSCRIPTION: UserNoticeMessageId = Value("subgift")
val MYSTERY_GIFTED_SUBSCRIPTION: UserNoticeMessageId = Value("submysterygift")
val GIFT_PAID_UPGRADE: UserNoticeMessageId = Value("giftpaidupgrade")
val CHARITY: UserNoticeMessageId = Value("charity")
} | Cobbleopolis/MonsterTruckBot | modules/twitch/app/twitch/api/usernotice/UserNoticeMessageId.scala | Scala | mit | 576 |
package com.tattyseal.hgp.block
import net.minecraft.block.Block
import net.minecraft.init.{Blocks, Items}
import net.minecraft.item.{ItemStack, ItemBlock}
/**
* Created by Toby on 05/02/2015.
*/
class ItemBlockStainedBlock(block: Block) extends ItemBlock(block)
{
setHasSubtypes(true);
setUnlocalizedName("hgp.coloredStainedPane");
override def getUnlocalizedName(stack: ItemStack): String =
{
return super.getUnlocalizedName() + "." + stack.getItemDamage();
}
override def getMetadata(damage: Int): Int =
{
return damage;
}
override def getItemStackDisplayName(stack: ItemStack): String = new ItemStack(Blocks.stained_glass_pane, 1, stack.getItemDamage()).getDisplayName();
}
| tattyseal/HGP | src/main/scala/com/tattyseal/hgp/block/ItemBlockStainedBlock.scala | Scala | bsd-3-clause | 711 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.classification
import org.apache.hadoop.fs.Path
import org.json4s.DefaultFormats
import org.apache.spark.annotation.Since
import org.apache.spark.ml.PredictorParams
import org.apache.spark.ml.functions.checkNonNegativeWeight
import org.apache.spark.ml.impl.Utils
import org.apache.spark.ml.linalg._
import org.apache.spark.ml.param.{DoubleParam, Param, ParamMap, ParamValidators}
import org.apache.spark.ml.param.shared.HasWeightCol
import org.apache.spark.ml.stat.Summarizer
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.Instrumentation.instrumented
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.sql.{Dataset, Row}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.types._
import org.apache.spark.util.VersionUtils
/**
* Params for Naive Bayes Classifiers.
*/
private[classification] trait NaiveBayesParams extends PredictorParams with HasWeightCol {
/**
* The smoothing parameter.
* (default = 1.0).
* @group param
*/
final val smoothing: DoubleParam = new DoubleParam(this, "smoothing", "The smoothing parameter.",
ParamValidators.gtEq(0))
/** @group getParam */
final def getSmoothing: Double = $(smoothing)
/**
* The model type which is a string (case-sensitive).
* Supported options: "multinomial", "complement", "bernoulli", "gaussian".
* (default = multinomial)
* @group param
*/
final val modelType: Param[String] = new Param[String](this, "modelType", "The model type " +
"which is a string (case-sensitive). Supported options: multinomial (default), complement, " +
"bernoulli and gaussian.",
ParamValidators.inArray[String](NaiveBayes.supportedModelTypes.toArray))
/** @group getParam */
final def getModelType: String = $(modelType)
setDefault(smoothing -> 1.0, modelType -> NaiveBayes.Multinomial)
}
// scalastyle:off line.size.limit
/**
* Naive Bayes Classifiers.
* It supports Multinomial NB
* (see <a href="http://nlp.stanford.edu/IR-book/html/htmledition/naive-bayes-text-classification-1.html">
* here</a>)
* which can handle finitely supported discrete data. For example, by converting documents into
* TF-IDF vectors, it can be used for document classification. By making every vector a
* binary (0/1) data, it can also be used as Bernoulli NB
* (see <a href="http://nlp.stanford.edu/IR-book/html/htmledition/the-bernoulli-model-1.html">
* here</a>).
* The input feature values for Multinomial NB and Bernoulli NB must be nonnegative.
* Since 3.0.0, it supports Complement NB which is an adaptation of the Multinomial NB. Specifically,
* Complement NB uses statistics from the complement of each class to compute the model's coefficients
* The inventors of Complement NB show empirically that the parameter estimates for CNB are more stable
* than those for Multinomial NB. Like Multinomial NB, the input feature values for Complement NB must
* be nonnegative.
* Since 3.0.0, it also supports Gaussian NB
* (see <a href="https://en.wikipedia.org/wiki/Naive_Bayes_classifier#Gaussian_naive_Bayes">
* here</a>)
* which can handle continuous data.
*/
// scalastyle:on line.size.limit
@Since("1.5.0")
class NaiveBayes @Since("1.5.0") (
@Since("1.5.0") override val uid: String)
extends ProbabilisticClassifier[Vector, NaiveBayes, NaiveBayesModel]
with NaiveBayesParams with DefaultParamsWritable {
import NaiveBayes._
@Since("1.5.0")
def this() = this(Identifiable.randomUID("nb"))
/**
* Set the smoothing parameter.
* Default is 1.0.
* @group setParam
*/
@Since("1.5.0")
def setSmoothing(value: Double): this.type = set(smoothing, value)
/**
* Set the model type using a string (case-sensitive).
* Supported options: "multinomial", "complement", "bernoulli", and "gaussian".
* Default is "multinomial"
* @group setParam
*/
@Since("1.5.0")
def setModelType(value: String): this.type = set(modelType, value)
/**
* Sets the value of param [[weightCol]].
* If this is not set or empty, we treat all instance weights as 1.0.
* Default is not set, so all instances have weight one.
*
* @group setParam
*/
@Since("2.1.0")
def setWeightCol(value: String): this.type = set(weightCol, value)
override protected def train(dataset: Dataset[_]): NaiveBayesModel = {
trainWithLabelCheck(dataset, positiveLabel = true)
}
/**
* ml assumes input labels in range [0, numClasses). But this implementation
* is also called by mllib NaiveBayes which allows other kinds of input labels
* such as {-1, +1}. `positiveLabel` is used to determine whether the label
* should be checked and it should be removed when we remove mllib NaiveBayes.
*/
private[spark] def trainWithLabelCheck(
dataset: Dataset[_],
positiveLabel: Boolean): NaiveBayesModel = instrumented { instr =>
instr.logPipelineStage(this)
instr.logDataset(dataset)
instr.logParams(this, labelCol, featuresCol, weightCol, predictionCol, rawPredictionCol,
probabilityCol, modelType, smoothing, thresholds)
if (positiveLabel && isDefined(thresholds)) {
val numClasses = getNumClasses(dataset)
instr.logNumClasses(numClasses)
require($(thresholds).length == numClasses, this.getClass.getSimpleName +
".train() called with non-matching numClasses and thresholds.length." +
s" numClasses=$numClasses, but thresholds has length ${$(thresholds).length}")
}
$(modelType) match {
case Bernoulli | Multinomial | Complement =>
trainDiscreteImpl(dataset, instr)
case Gaussian =>
trainGaussianImpl(dataset, instr)
case _ =>
// This should never happen.
throw new IllegalArgumentException(s"Invalid modelType: ${$(modelType)}.")
}
}
private def trainDiscreteImpl(
dataset: Dataset[_],
instr: Instrumentation): NaiveBayesModel = {
val spark = dataset.sparkSession
import spark.implicits._
val validateUDF = $(modelType) match {
case Multinomial | Complement =>
udf { vector: Vector => requireNonnegativeValues(vector); vector }
case Bernoulli =>
udf { vector: Vector => requireZeroOneBernoulliValues(vector); vector }
}
val w = if (isDefined(weightCol) && $(weightCol).nonEmpty) {
checkNonNegativeWeight(col($(weightCol)).cast(DoubleType))
} else {
lit(1.0)
}
// Aggregates term frequencies per label.
val aggregated = dataset.groupBy(col($(labelCol)))
.agg(sum(w).as("weightSum"), Summarizer.metrics("sum", "count")
.summary(validateUDF(col($(featuresCol))), w).as("summary"))
.select($(labelCol), "weightSum", "summary.sum", "summary.count")
.as[(Double, Double, Vector, Long)]
.collect().sortBy(_._1)
val numFeatures = aggregated.head._3.size
instr.logNumFeatures(numFeatures)
val numSamples = aggregated.map(_._4).sum
instr.logNumExamples(numSamples)
val numLabels = aggregated.length
instr.logNumClasses(numLabels)
val numDocuments = aggregated.map(_._2).sum
instr.logSumOfWeights(numDocuments)
val labelArray = new Array[Double](numLabels)
val piArray = new Array[Double](numLabels)
val thetaArray = new Array[Double](numLabels * numFeatures)
val aggIter = $(modelType) match {
case Multinomial | Bernoulli => aggregated.iterator
case Complement =>
val featureSum = Vectors.zeros(numFeatures)
aggregated.foreach { case (_, _, sumTermFreqs, _) =>
BLAS.axpy(1.0, sumTermFreqs, featureSum)
}
aggregated.iterator.map { case (label, n, sumTermFreqs, count) =>
val comp = featureSum.copy
BLAS.axpy(-1.0, sumTermFreqs, comp)
(label, n, comp, count)
}
}
val lambda = $(smoothing)
val piLogDenom = math.log(numDocuments + numLabels * lambda)
var i = 0
aggIter.foreach { case (label, n, sumTermFreqs, _) =>
labelArray(i) = label
piArray(i) = math.log(n + lambda) - piLogDenom
val thetaLogDenom = $(modelType) match {
case Multinomial | Complement =>
math.log(sumTermFreqs.toArray.sum + numFeatures * lambda)
case Bernoulli => math.log(n + 2.0 * lambda)
}
var j = 0
val offset = i * numFeatures
while (j < numFeatures) {
thetaArray(offset + j) = math.log(sumTermFreqs(j) + lambda) - thetaLogDenom
j += 1
}
i += 1
}
val pi = Vectors.dense(piArray)
$(modelType) match {
case Multinomial | Bernoulli =>
val theta = new DenseMatrix(numLabels, numFeatures, thetaArray, true)
new NaiveBayesModel(uid, pi.compressed, theta.compressed, Matrices.zeros(0, 0))
.setOldLabels(labelArray)
case Complement =>
// Since the CNB compute the coefficient in a complement way.
val theta = new DenseMatrix(numLabels, numFeatures, thetaArray.map(v => -v), true)
new NaiveBayesModel(uid, pi.compressed, theta.compressed, Matrices.zeros(0, 0))
}
}
private def trainGaussianImpl(
dataset: Dataset[_],
instr: Instrumentation): NaiveBayesModel = {
val spark = dataset.sparkSession
import spark.implicits._
val w = if (isDefined(weightCol) && $(weightCol).nonEmpty) {
checkNonNegativeWeight(col($(weightCol)).cast(DoubleType))
} else {
lit(1.0)
}
// Aggregates mean vector and square-sum vector per label.
val aggregated = dataset.groupBy(col($(labelCol)))
.agg(sum(w).as("weightSum"), Summarizer.metrics("mean", "normL2")
.summary(col($(featuresCol)), w).as("summary"))
.select($(labelCol), "weightSum", "summary.mean", "summary.normL2")
.as[(Double, Double, Vector, Vector)]
.map { case (label, weightSum, mean, normL2) =>
(label, weightSum, mean, Vectors.dense(normL2.toArray.map(v => v * v)))
}.collect().sortBy(_._1)
val numFeatures = aggregated.head._3.size
instr.logNumFeatures(numFeatures)
val numLabels = aggregated.length
instr.logNumClasses(numLabels)
val numInstances = aggregated.map(_._2).sum
instr.logSumOfWeights(numInstances)
// If the ratio of data variance between dimensions is too small, it
// will cause numerical errors. To address this, we artificially
// boost the variance by epsilon, a small fraction of the standard
// deviation of the largest dimension.
// Refer to scikit-learn's implementation
// [https://github.com/scikit-learn/scikit-learn/blob/0.21.X/sklearn/naive_bayes.py#L348]
// and discussion [https://github.com/scikit-learn/scikit-learn/pull/5349] for detail.
val epsilon = Iterator.range(0, numFeatures).map { j =>
var globalSum = 0.0
var globalSqrSum = 0.0
aggregated.foreach { case (_, weightSum, mean, squareSum) =>
globalSum += mean(j) * weightSum
globalSqrSum += squareSum(j)
}
globalSqrSum / numInstances -
globalSum * globalSum / numInstances / numInstances
}.max * 1e-9
val piArray = new Array[Double](numLabels)
// thetaArray in Gaussian NB store the means of features per label
val thetaArray = new Array[Double](numLabels * numFeatures)
// thetaArray in Gaussian NB store the variances of features per label
val sigmaArray = new Array[Double](numLabels * numFeatures)
var i = 0
val logNumInstances = math.log(numInstances)
aggregated.foreach { case (_, weightSum, mean, squareSum) =>
piArray(i) = math.log(weightSum) - logNumInstances
var j = 0
val offset = i * numFeatures
while (j < numFeatures) {
val m = mean(j)
thetaArray(offset + j) = m
sigmaArray(offset + j) = epsilon + squareSum(j) / weightSum - m * m
j += 1
}
i += 1
}
val pi = Vectors.dense(piArray)
val theta = new DenseMatrix(numLabels, numFeatures, thetaArray, true)
val sigma = new DenseMatrix(numLabels, numFeatures, sigmaArray, true)
new NaiveBayesModel(uid, pi.compressed, theta.compressed, sigma.compressed)
}
@Since("1.5.0")
override def copy(extra: ParamMap): NaiveBayes = defaultCopy(extra)
}
@Since("1.6.0")
object NaiveBayes extends DefaultParamsReadable[NaiveBayes] {
/** String name for multinomial model type. */
private[classification] val Multinomial: String = "multinomial"
/** String name for Bernoulli model type. */
private[classification] val Bernoulli: String = "bernoulli"
/** String name for Gaussian model type. */
private[classification] val Gaussian: String = "gaussian"
/** String name for Complement model type. */
private[classification] val Complement: String = "complement"
/* Set of modelTypes that NaiveBayes supports */
private[classification] val supportedModelTypes =
Set(Multinomial, Bernoulli, Gaussian, Complement)
private[ml] def requireNonnegativeValues(v: Vector): Unit = {
require(v.nonZeroIterator.forall(_._2 > 0.0),
s"Naive Bayes requires nonnegative feature values but found $v.")
}
private[ml] def requireZeroOneBernoulliValues(v: Vector): Unit = {
require(v.nonZeroIterator.forall(_._2 == 1.0),
s"Bernoulli naive Bayes requires 0 or 1 feature values but found $v.")
}
@Since("1.6.0")
override def load(path: String): NaiveBayes = super.load(path)
}
/**
* Model produced by [[NaiveBayes]]
*
* @param pi log of class priors, whose dimension is C (number of classes)
* @param theta log of class conditional probabilities, whose dimension is C (number of classes)
* by D (number of features)
* @param sigma variance of each feature, whose dimension is C (number of classes)
* by D (number of features). This matrix is only available when modelType
* is set Gaussian.
*/
@Since("1.5.0")
class NaiveBayesModel private[ml] (
@Since("1.5.0") override val uid: String,
@Since("2.0.0") val pi: Vector,
@Since("2.0.0") val theta: Matrix,
@Since("3.0.0") val sigma: Matrix)
extends ProbabilisticClassificationModel[Vector, NaiveBayesModel]
with NaiveBayesParams with MLWritable {
import NaiveBayes._
/**
* mllib NaiveBayes is a wrapper of ml implementation currently.
* Input labels of mllib could be {-1, +1} and mllib NaiveBayesModel exposes labels,
* both of which are different from ml, so we should store the labels sequentially
* to be called by mllib. This should be removed when we remove mllib NaiveBayes.
*/
private[spark] var oldLabels: Array[Double] = null
private[spark] def setOldLabels(labels: Array[Double]): this.type = {
this.oldLabels = labels
this
}
/**
* Bernoulli scoring requires log(condprob) if 1, log(1-condprob) if 0.
* This precomputes log(1.0 - exp(theta)) and its sum which are used for the linear algebra
* application of this condition (in predict function).
*/
@transient private lazy val thetaMinusNegTheta = $(modelType) match {
case Bernoulli =>
theta.map(value => value - math.log1p(-math.exp(value)))
case _ =>
// This should never happen.
throw new IllegalArgumentException(s"Invalid modelType: ${$(modelType)}. " +
"Variables thetaMinusNegTheta should only be precomputed in Bernoulli NB.")
}
@transient private lazy val piMinusThetaSum = $(modelType) match {
case Bernoulli =>
val negTheta = theta.map(value => math.log1p(-math.exp(value)))
val ones = new DenseVector(Array.fill(theta.numCols)(1.0))
val piMinusThetaSum = pi.toDense.copy
BLAS.gemv(1.0, negTheta, ones, 1.0, piMinusThetaSum)
piMinusThetaSum
case _ =>
// This should never happen.
throw new IllegalArgumentException(s"Invalid modelType: ${$(modelType)}. " +
"Variables piMinusThetaSum should only be precomputed in Bernoulli NB.")
}
/**
* Gaussian scoring requires sum of log(Variance).
* This precomputes sum of log(Variance) which are used for the linear algebra
* application of this condition (in predict function).
*/
@transient private lazy val logVarSum = $(modelType) match {
case Gaussian =>
Array.tabulate(numClasses) { i =>
Iterator.range(0, numFeatures).map { j =>
math.log(sigma(i, j))
}.sum
}
case _ =>
// This should never happen.
throw new IllegalArgumentException(s"Invalid modelType: ${$(modelType)}. " +
"Variables logVarSum should only be precomputed in Gaussian NB.")
}
@Since("1.6.0")
override val numFeatures: Int = theta.numCols
@Since("1.5.0")
override val numClasses: Int = pi.size
private def multinomialCalculation(features: Vector) = {
requireNonnegativeValues(features)
val prob = pi.toDense.copy
BLAS.gemv(1.0, theta, features, 1.0, prob)
prob
}
private def complementCalculation(features: Vector) = {
requireNonnegativeValues(features)
val probArray = theta.multiply(features).toArray
// the following lines equal to:
// val logSumExp = math.log(probArray.map(math.exp).sum)
// However, it easily returns Infinity/NaN values.
// Here follows 'scipy.special.logsumexp' (which is used in Scikit-Learn's ComplementNB)
// to compute the log of the sum of exponentials of elements in a numeric-stable way.
val max = probArray.max
var sumExp = 0.0
var j = 0
while (j < probArray.length) {
sumExp += math.exp(probArray(j) - max)
j += 1
}
val logSumExp = math.log(sumExp) + max
j = 0
while (j < probArray.length) {
probArray(j) = probArray(j) - logSumExp
j += 1
}
Vectors.dense(probArray)
}
private def bernoulliCalculation(features: Vector) = {
requireZeroOneBernoulliValues(features)
val prob = piMinusThetaSum.copy
BLAS.gemv(1.0, thetaMinusNegTheta, features, 1.0, prob)
prob
}
private def gaussianCalculation(features: Vector) = {
val prob = Array.ofDim[Double](numClasses)
var i = 0
while (i < numClasses) {
var s = 0.0
var j = 0
while (j < numFeatures) {
val d = features(j) - theta(i, j)
s += d * d / sigma(i, j)
j += 1
}
prob(i) = pi(i) - (s + logVarSum(i)) / 2
i += 1
}
Vectors.dense(prob)
}
@transient private lazy val predictRawFunc = {
$(modelType) match {
case Multinomial =>
features: Vector => multinomialCalculation(features)
case Complement =>
features: Vector => complementCalculation(features)
case Bernoulli =>
features: Vector => bernoulliCalculation(features)
case Gaussian =>
features: Vector => gaussianCalculation(features)
}
}
@Since("3.0.0")
override def predictRaw(features: Vector): Vector = predictRawFunc(features)
override protected def raw2probabilityInPlace(rawPrediction: Vector): Vector = {
rawPrediction match {
case dv: DenseVector =>
Utils.softmax(dv.values)
dv
case sv: SparseVector =>
throw new RuntimeException("Unexpected error in NaiveBayesModel:" +
" raw2probabilityInPlace encountered SparseVector")
}
}
@Since("1.5.0")
override def copy(extra: ParamMap): NaiveBayesModel = {
copyValues(new NaiveBayesModel(uid, pi, theta, sigma).setParent(this.parent), extra)
}
@Since("1.5.0")
override def toString: String = {
s"NaiveBayesModel: uid=$uid, modelType=${$(modelType)}, numClasses=$numClasses, " +
s"numFeatures=$numFeatures"
}
@Since("1.6.0")
override def write: MLWriter = new NaiveBayesModel.NaiveBayesModelWriter(this)
}
@Since("1.6.0")
object NaiveBayesModel extends MLReadable[NaiveBayesModel] {
@Since("1.6.0")
override def read: MLReader[NaiveBayesModel] = new NaiveBayesModelReader
@Since("1.6.0")
override def load(path: String): NaiveBayesModel = super.load(path)
/** [[MLWriter]] instance for [[NaiveBayesModel]] */
private[NaiveBayesModel] class NaiveBayesModelWriter(instance: NaiveBayesModel) extends MLWriter {
import NaiveBayes._
private case class Data(pi: Vector, theta: Matrix, sigma: Matrix)
override protected def saveImpl(path: String): Unit = {
// Save metadata and Params
DefaultParamsWriter.saveMetadata(instance, path, sc)
val dataPath = new Path(path, "data").toString
instance.getModelType match {
case Multinomial | Bernoulli | Complement =>
require(instance.sigma.numRows == 0 && instance.sigma.numCols == 0)
case Gaussian =>
require(instance.sigma.numRows != 0 && instance.sigma.numCols != 0)
}
val data = Data(instance.pi, instance.theta, instance.sigma)
sparkSession.createDataFrame(Seq(data)).repartition(1).write.parquet(dataPath)
}
}
private class NaiveBayesModelReader extends MLReader[NaiveBayesModel] {
/** Checked against metadata when loading model */
private val className = classOf[NaiveBayesModel].getName
override def load(path: String): NaiveBayesModel = {
implicit val format = DefaultFormats
val metadata = DefaultParamsReader.loadMetadata(path, sc, className)
val (major, minor) = VersionUtils.majorMinorVersion(metadata.sparkVersion)
val dataPath = new Path(path, "data").toString
val data = sparkSession.read.parquet(dataPath)
val vecConverted = MLUtils.convertVectorColumnsToML(data, "pi")
val model = if (major.toInt < 3) {
val Row(pi: Vector, theta: Matrix) =
MLUtils.convertMatrixColumnsToML(vecConverted, "theta")
.select("pi", "theta")
.head()
new NaiveBayesModel(metadata.uid, pi, theta, Matrices.zeros(0, 0))
} else {
val Row(pi: Vector, theta: Matrix, sigma: Matrix) =
MLUtils.convertMatrixColumnsToML(vecConverted, "theta", "sigma")
.select("pi", "theta", "sigma")
.head()
new NaiveBayesModel(metadata.uid, pi, theta, sigma)
}
metadata.getAndSetParams(model)
model
}
}
}
| shaneknapp/spark | mllib/src/main/scala/org/apache/spark/ml/classification/NaiveBayes.scala | Scala | apache-2.0 | 22,798 |
package org.jetbrains.plugins.scala.failed.resolve
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReferenceElement
import org.jetbrains.plugins.scala.lang.resolve.ScalaResolveTestCase
import org.junit.Assert._
/**
* @author Nikolay.Tropin
*/
abstract class FailedResolveTest(dirName: String) extends ScalaResolveTestCase {
override def folderPath(): String = s"${super.folderPath()}resolve/failed/$dirName"
override def rootPath(): String = folderPath()
def doTest(): Unit = {
findReferenceAtCaret() match {
case ref: ScReferenceElement =>
val variants = ref.multiResolve(false)
assertTrue(s"Single resolve expected, was: ${variants.length}", variants.length == 1)
}
}
}
| whorbowicz/intellij-scala | test/org/jetbrains/plugins/scala/failed/resolve/FailedResolveTest.scala | Scala | apache-2.0 | 726 |
package glint.messages.server.request
/**
* A push request for matrices containing floats
*
* @param rows The row indices
* @param cols The column indices
* @param values The values to add
*/
private[glint] case class PushMatrixFloat(id: Int, rows: Array[Long], cols: Array[Int], values: Array[Float]) extends Request
| rjagerman/glint | src/main/scala/glint/messages/server/request/PushMatrixFloat.scala | Scala | mit | 331 |
package com.shellhive.angular.components.nodes
import biz.enef.angulate.Directive
import biz.enef.angulate.core.{Attributes, JQLite}
import com.shellhive.components.Icon
import org.scalajs.jquery.{JQuery, JQueryEventObject}
import com.shellhive.i18n._
import scala.scalajs.js
/**
* Created by Omar Castro on 29/05/2016.
*/
class ComponentTitleDirective extends Directive {
override type ScopeType = js.Dynamic
override val restrict = "A"
override val scope = true
override val template = {
import scalatags.Text.all._
val tooltip = div(
cls := "tooltip",
"ng-if".attr := "showTooltip",
"ng-style".attr := "{transform:'translate(-50%) scale('+(1/transformScale())+')'}")(
i18n.help.componentMove.tooltip.translate
)
val titleName = span(cls:="title-name", "ng-bind".attr := "title.name")
val buttonGroup = span(
cls:="button-group",
"ng-if".attr := "title.buttons")(
a("ng-click".attr := "togglecollapse()", "ng-class".attr := s"(collapsed)?'${Icon.chevronUp}':'${Icon.chevronDown}'"),
" ",
a("ng-click".attr := "$emit('removeComponent', data.id)", cls := s"close-button ${Icon.remove}")
)
tooltip.render + titleName.render + buttonGroup.render
}
override def postLink(scope: ScopeType,
element: JQLite,
attrs: Attributes,
controller: ControllerType) = {
}
}
| OmarCastro/ShellHive-scala | client/src/main/scala/com/shellhive/angular/components/nodes/ComponentTitleDirective.scala | Scala | mit | 1,452 |
/******************************************************************************************************************\
* Rapture Core, version 2.0.0. Copyright 2010-2015 Jon Pretty, Propensive Ltd. *
* *
* The primary distribution site is http://rapture.io/ *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in complance *
* with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0. *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed *
* on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License *
* for the specific language governing permissions and limitations under the License. *
\******************************************************************************************************************/
package rapture.core
import language.experimental.macros
import scala.reflect._
import scala.reflect.macros._
import annotation.unchecked._
class AllocApply[T](val unit: Int) extends AnyVal {
def apply()(implicit inst: Alloc0.Invariant[T]): T = inst.instantiate()
def apply[P1](p1: P1)(implicit inst: Alloc1.Invariant[T, P1]): T = inst.instantiate(p1)
def apply[P1, P2](p1: P1, p2: P2)(implicit inst: Alloc2.Invariant[T, P1, P2]): T = inst.instantiate(p1, p2)
def apply[P1, P2, P3](p1: P1, p2: P2, p3: P3)(implicit inst: Alloc3.Invariant[T, P1, P2, P3]): T = inst.instantiate(p1, p2, p3)
def apply[P1, P2, P3, P4](p1: P1, p2: P2, p3: P3, p4: P4)(implicit inst: Alloc4.Invariant[T, P1, P2, P3, P4]): T = inst.instantiate(p1, p2, p3, p4)
}
object Alloc0 {
implicit def alloc0[T]: Alloc0[T] = macro CoreMacros.allocMacro[T]
type Invariant[+T] = Alloc0[T @uncheckedVariance]
}
object Alloc1 {
implicit def alloc1[T, P1]: Alloc1[T, P1] = macro CoreMacros.allocMacro1[T, P1]
type Invariant[+T, P1] = Alloc1[T @uncheckedVariance, P1]
}
object Alloc2 {
implicit def alloc2[T, P1, P2]: Alloc2[T, P1, P2] = macro CoreMacros.allocMacro2[T, P1, P2]
type Invariant[+T, P1, P2] = Alloc2[T @uncheckedVariance, P1, P2]
}
object Alloc3 {
implicit def alloc3[T, P1, P2, P3]: Alloc3[T, P1, P2, P3] = macro CoreMacros.allocMacro3[T, P1, P2, P3]
type Invariant[+T, P1, P2, P3] = Alloc3[T @uncheckedVariance, P1, P2, P3]
}
object Alloc4 {
implicit def alloc4[T, P1, P2, P3, P4]: Alloc4[T, P1, P2, P3, P4] = macro CoreMacros.allocMacro4[T, P1, P2, P3, P4]
type Invariant[+T, P1, P2, P3, P4] = Alloc4[T @uncheckedVariance, P1, P2, P3, P4]
}
@implicitNotFound("No constructor exists for instantiating an object of this type")
trait Alloc0[T] { def instantiate(): T }
@implicitNotFound("No constructor exists for instantiating an object of this type")
trait Alloc1[T, P1] { def instantiate(p1: P1): T }
@implicitNotFound("No constructor exists for instantiating an object of this type")
trait Alloc2[T, P1, P2] { def instantiate(p1: P1, p2: P2): T }
@implicitNotFound("No constructor exists for instantiating an object of this type")
trait Alloc3[T, P1, P2, P3] { def instantiate(p1: P1, p2: P2, p3: P3): T }
@implicitNotFound("No constructor exists for instantiating an object of this type")
trait Alloc4[T, P1, P2, P3, P4] { def instantiate(p1: P1, p2: P2, p3: P3, p4: P4): T }
| utaal/rapture-core | src/alloc.scala | Scala | apache-2.0 | 3,801 |
package ranking
import controllers.routes
import models.db
import ranking.common.{Ranking, RankingElement}
import scalikejdbc._
/**
*
* @author ponkotuy
* Date: 14/12/05.
*/
case object FirstShipRate extends Ranking {
import ranking.common.Ranking._
override val title: String = "初期艦"
override def rankingQuery(limit: Int): List[RankingElement] = {
val counts = db.Ship.countAllShip(sqls"s.id = 1")
.groupBy(t => EvolutionBase(t._1))
.mapValues(_.values.sum)
val masters = db.MasterShipBase.findAllBy(sqls.in(db.MasterShipBase.ms.id, counts.keys.toSeq))
.map { it => it.id -> it }.toMap
val sum = counts.values.sum
counts.toList.sortBy(-_._2).map { case (sid, count) =>
val master = masters(sid)
val url = routes.ViewSta.shipBook(sid).toString()
RankingElement(master.name, <span><strong>{f"$count%,d"}</strong>{s" / $sum"}</span>, url, count)
}
}
override def comment: List[String] = List("進化前で集計しています")
override def divClass: String = colmd3
}
| nekoworkshop/MyFleetGirls | server/app/ranking/FirstShipRate.scala | Scala | mit | 1,051 |
package safe.actor
import akka.actor.{ ActorContext, ActorRef, Props }
import akka.routing.RoundRobinPool
import safe.feature.Feature
import scala.collection.mutable
import scala.reflect.ClassTag
import com.codahale.metrics.MetricRegistry
trait FeatureActorCreation {
protected val count = new java.util.concurrent.atomic.AtomicInteger()
def name: String
def uniqueName: String = name + count.getAndIncrement()
def create(feat: Feature,
listeners: Seq[ActorRef],
poolSize: Int = 1)(implicit context: ActorContext, metrics: Option[MetricRegistry]): Option[ActorRef]
def pool(props: Props, size: Int, name: String)(implicit context: ActorContext): ActorRef = {
if (size == 1) context.actorOf(props, name)
else context.actorOf(RoundRobinPool(size).props(props), name)
}
} | devonbryant/safe | safe-core/src/main/scala/safe/actor/FeatureActorCreation.scala | Scala | epl-1.0 | 836 |
package com.anishathalye.turing.machine
import scala.language.implicitConversions
trait Implicits {
final implicit def tuple2Action(tuple: ((String, Char), (String, Char, Direction))): ((State, Symb), (State, Symb, Direction)) = {
tuple match {
case ((s, c), (ns, nc, d)) => ((s: State, c: Symb), (ns: State, nc: Symb, d))
}
}
final implicit def char2Symb(char: Char): Symb = Symb(char)
final implicit def string2State(string: String): State = State(string)
}
object Implicits extends Implicits
| anishathalye/turing | src/main/scala/com/anishathalye/turing/machine/Implicits.scala | Scala | mit | 524 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.network
import java.nio.ByteBuffer
import scala.collection.mutable.ArrayBuffer
import org.apache.spark.storage.BlockManager
private[spark]
class BufferMessage(id_ : Int, val buffers: ArrayBuffer[ByteBuffer], var ackId: Int)
extends Message(Message.BUFFER_MESSAGE, id_) {
val initialSize = currentSize()
var gotChunkForSendingOnce = false
def size = initialSize
def currentSize() = {
if (buffers == null || buffers.isEmpty) {
0
} else {
buffers.map(_.remaining).reduceLeft(_ + _)
}
}
def getChunkForSending(maxChunkSize: Int): Option[MessageChunk] = {
if (maxChunkSize <= 0) {
throw new Exception("Max chunk size is " + maxChunkSize)
}
if (size == 0 && gotChunkForSendingOnce == false) {
val newChunk = new MessageChunk(
new MessageChunkHeader(typ, id, 0, 0, ackId, senderAddress), null)
gotChunkForSendingOnce = true
return Some(newChunk)
}
while(!buffers.isEmpty) {
val buffer = buffers(0)
if (buffer.remaining == 0) {
BlockManager.dispose(buffer)
buffers -= buffer
} else {
val newBuffer = if (buffer.remaining <= maxChunkSize) {
buffer.duplicate()
} else {
buffer.slice().limit(maxChunkSize).asInstanceOf[ByteBuffer]
}
buffer.position(buffer.position + newBuffer.remaining)
val newChunk = new MessageChunk(new MessageChunkHeader(
typ, id, size, newBuffer.remaining, ackId, senderAddress), newBuffer)
gotChunkForSendingOnce = true
return Some(newChunk)
}
}
None
}
def getChunkForReceiving(chunkSize: Int): Option[MessageChunk] = {
// STRONG ASSUMPTION: BufferMessage created when receiving data has ONLY ONE data buffer
if (buffers.size > 1) {
throw new Exception("Attempting to get chunk from message with multiple data buffers")
}
val buffer = buffers(0)
if (buffer.remaining > 0) {
if (buffer.remaining < chunkSize) {
throw new Exception("Not enough space in data buffer for receiving chunk")
}
val newBuffer = buffer.slice().limit(chunkSize).asInstanceOf[ByteBuffer]
buffer.position(buffer.position + newBuffer.remaining)
val newChunk = new MessageChunk(new MessageChunkHeader(
typ, id, size, newBuffer.remaining, ackId, senderAddress), newBuffer)
return Some(newChunk)
}
None
}
def flip() {
buffers.foreach(_.flip)
}
def hasAckId() = (ackId != 0)
def isCompletelyReceived() = !buffers(0).hasRemaining
override def toString = {
if (hasAckId) {
"BufferAckMessage(aid = " + ackId + ", id = " + id + ", size = " + size + ")"
} else {
"BufferMessage(id = " + id + ", size = " + size + ")"
}
}
}
| mkolod/incubator-spark | core/src/main/scala/org/apache/spark/network/BufferMessage.scala | Scala | apache-2.0 | 3,600 |
package net.fwbrasil.smirror
class TypeParametersSpecTestClass(m1: List[Int]) {
def m2(m3: Array[String]) = null
val m4 = Map[String, Int]()
var m5 = Map[List[Int], String]()
}
class TypeParametersSpec extends SMirrorSpec {
"Constructor parameter" should "return its generic" in
test[TypeParametersSpecTestClass] { (sClass, jClass) =>
sClass.constructors.head.parameters.head.typeArguments should
equal(List(sClassOf[Int]))
}
"Method parameter" should "return its generic" in
test[TypeParametersSpecTestClass] { (sClass, jClass) =>
sClass.methods.find(_.name == "m2")
.get.parameters.head.typeArguments should
equal(List(sClassOf[String]))
}
"Field" should "return its generic" in
test[TypeParametersSpecTestClass] { (sClass, jClass) =>
sClass.vals.head.typeArguments should
equal(List(sClassOf[String], sClassOf[Int]))
}
"Field with nested generic" should "return its first level generic" in
test[TypeParametersSpecTestClass] { (sClass, jClass) =>
sClass.vars.head.typeArguments should
equal(List(sClassOf[List[Any]], sClassOf[String]))
}
} | fwbrasil/smirror | src/test/scala/net/fwbrasil/smirror/TypeParametersSpec.scala | Scala | lgpl-2.1 | 1,102 |
package no.skytteren.elasticala
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Await
import org.scalatest.FunSpec
import org.elasticsearch.client._
import org.elasticsearch.node.NodeBuilder._
import org.elasticsearch.common.settings.Settings
import org.scalatest.BeforeAndAfter
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent._
import org.scalatest.time._
import scala.util.Random
class IndexDSLSpec extends FunSpec with BeforeAndAfterAll with ScalaFutures {
import api._
val node = nodeBuilder.local(true).data(true).settings(Settings.settingsBuilder().put("path.home", "target/es/" + Random.nextString(10) )).build()
val client = Client(node)
override def beforeAll() = {
node.start()
}
describe("Index admin"){
/*
* curl -XCREATE 'http://localhost:9200/indexname'
* curl -XHEAD 'http://localhost:9200/indexname'
* curl -XDELETE 'http://localhost:9200/indexname'
*/
it("should handle index life cycle"){
val index: Index = "indexname"
val result = for{
c <- client.execute(index create)
e <- client.execute(index exists)
r <- client.execute(index refresh)
del <- client.execute(index delete)
} yield{(c.acknowledged, e.exists, r.failedShards, del.acknowledged)}
whenReady(result, timeout(Span(6, Seconds))){case (c, e, r, del) => {
assert(c, "problems creating index")
assert(e, "problems checking if index exists")
assert(r === 0, "failures during refresh")
assert(del, "problems deleting index")
}}
}
it("should handle index aliases"){
val index: Index = "aliasiableindexname"
val indexAlias = "aliasname"
val result = for{
c <- client.execute(index create)
a1 <- client.execute(aliases(add(index, indexAlias)))
a2 <- client.execute(aliases(remove(index, indexAlias)))
del <- client.execute(index delete)
} yield{(c.acknowledged, a1.acknowledged, a2.acknowledged, del.acknowledged)}
whenReady(result, timeout(Span(6, Seconds))){case (c, a1, a2, del) => {
assert(c, "problems creating index")
assert(a1, "problems adding alias")
assert(a2, "problems removing alias")
assert(del, "problems deleting index")
}}
}
}
override def afterAll() = {
client.close()
node.close()
}
} | skytteren/elasticala | src/test/scala/no/skytteren/elasticala/IndexDSLSpec.scala | Scala | apache-2.0 | 2,408 |
package ore.permission.scope
import scala.language.implicitConversions
import ore.db.{DbRef, Model}
import ore.models.organization.Organization
import ore.models.project.Project
import simulacrum.typeclass
@typeclass trait HasScope[-A] {
def scope(a: A): Scope
}
object HasScope {
def orgScope[A](f: A => DbRef[Organization]): HasScope[A] = (a: A) => OrganizationScope(f(a))
def projectScope[A](f: A => DbRef[Project]): HasScope[A] = (a: A) => ProjectScope(f(a))
implicit def hasUnderlyingScope[A](implicit hasScope: HasScope[A]): HasScope[Model[A]] =
(a: Model[A]) => hasScope.scope(a.obj)
}
| SpongePowered/Ore | models/src/main/scala/ore/permission/scope/HasScope.scala | Scala | mit | 611 |
/**
* Copyright 2012-2013 greencheek.org (www.greencheek.org)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.greencheek.jms.yankeedo.scenarioexecution.producer
import org.greencheek.jms.yankeedo.structure.actions.{JmsProducerAction => JmsProd, Queue, Topic}
import akka.camel.{CamelMessage, Oneway, Producer}
import akka.actor.ActorRef
import akka.actor.Status.Failure
/**
* User: dominictootell
* Date: 08/01/2013
* Time: 08:23
*/
class AkkaProducer(val jmsAction : JmsProd, responseReciever : ActorRef) extends Producer with Oneway{
val endpoint = {
jmsAction destination match {
case Topic(destName) => "jms:topic:" + destName
case Queue(destName) => "jms:queue:" + destName
}
}
override def endpointUri = endpoint
override def routeResponse(msg: Any) = {
msg match {
case message : CamelMessage => {
responseReciever ! msg
}
case failure : Failure => {
responseReciever ! failure
}
}
}
}
| tootedom/yankeedo | yankeedo-core/src/main/scala/org/greencheek/jms/yankeedo/scenarioexecution/producer/AkkaProducer.scala | Scala | apache-2.0 | 1,510 |
/**
* Copyright 2011-2017 GatlingCorp (http://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.http.check.header
import io.gatling.core.check._
import io.gatling.core.check.extractor.Extractor
import io.gatling.core.check.extractor.regex.{ GroupExtractor, Patterns }
import io.gatling.core.session.{ Expression, Session }
import io.gatling.http.check.HttpCheck
import io.gatling.http.check.HttpCheckBuilders._
import io.gatling.http.response.Response
trait HttpHeaderRegexCheckType
trait HttpHeaderRegexOfType {
self: HttpHeaderRegexCheckBuilder[String] =>
def ofType[X: GroupExtractor] = new HttpHeaderRegexCheckBuilder[X](headerName, pattern, patterns)
}
object HttpHeaderRegexCheckBuilder {
def headerRegex(headerName: Expression[String], pattern: Expression[String], patterns: Patterns) =
new HttpHeaderRegexCheckBuilder[String](headerName, pattern, patterns) with HttpHeaderRegexOfType
}
class HttpHeaderRegexCheckBuilder[X: GroupExtractor](
private[header] val headerName: Expression[String],
private[header] val pattern: Expression[String],
private[header] val patterns: Patterns
)
extends DefaultMultipleFindCheckBuilder[HttpHeaderRegexCheckType, Response, X] {
import HttpHeaderRegexExtractorFactory._
private def withHeaderAndPattern[T](f: (String, String) => T): Expression[T] =
(session: Session) => for {
headerName <- headerName(session)
pattern <- pattern(session)
} yield f(headerName, pattern)
override def findExtractor(occurrence: Int): Expression[Extractor[Response, X]] =
withHeaderAndPattern(newHeaderRegexSingleExtractor(_, _, occurrence, patterns))
override def findAllExtractor: Expression[Extractor[Response, Seq[X]]] =
withHeaderAndPattern(newHeaderRegexMultipleExtractor(_, _, patterns))
override def countExtractor: Expression[Extractor[Response, Int]] =
withHeaderAndPattern(newHeaderRegexCountExtractor(_, _, patterns))
}
object HttpHeaderRegexProvider extends CheckProtocolProvider[HttpHeaderRegexCheckType, HttpCheck, Response, Response] {
override val specializer: Specializer[HttpCheck, Response] = HeaderSpecializer
override val preparer: Preparer[Response, Response] = PassThroughResponsePreparer
}
| timve/gatling | gatling-http/src/main/scala/io/gatling/http/check/header/HttpHeaderRegexCheckBuilder.scala | Scala | apache-2.0 | 2,764 |
package com.eevolution.context.dictionary.infrastructure.repository
import com.eevolution.context.dictionary.domain.model.InformationWindowTrl
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 20/10/17.
*/
/**
* Information Window Trl Mapping
*/
trait InformationWindowTrlMapping {
val queryInformationWindowTrl = quote {
querySchema[InformationWindowTrl]("AD_InfoWindow_Trl",
_.infoWindowId-> "AD_InfoWindow_ID",
_.language-> "AD_Language",
_.tenantId -> "AD_Client_ID" ,
_.organizationId -> "AD_Org_ID",
_.isActive-> "IsActive",
_.created-> "Created",
_.createdBy-> "CreatedBy",
_.updated-> "Updated",
_.updatedBy-> "UpdatedBy",
_.isTranslated-> "IsTranslated",
_.name-> "Name",
_.description-> "Description",
_.help-> "Help",
_.uuid-> "UUID")
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/InformationWindowTrlMapping.scala | Scala | gpl-3.0 | 1,791 |
/*
* Copyright 2013-2015 Websudos, Limited.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* - Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* - Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
*
* - Explicit consent must be obtained from the copyright owner, Outworkers Limited before any redistribution is made.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.websudos.phantom.builder.serializers
import com.websudos.phantom.builder.QueryBuilder
import com.websudos.phantom.builder.query.{CQLQuery, QueryBuilderTest}
class InsertQueryBuilderTest extends QueryBuilderTest {
"The INSERT query builder" - {
"should allow appending columns and values to a query" - {
"should serialize a sequence of appended values" in {
val query = QueryBuilder.Insert.values(List(CQLQuery("a"), CQLQuery("b"))).queryString
query shouldEqual "VALUES(a, b)"
}
"should serialize a sequence of value additions" in {
}
}
}
}
| levinson/phantom | phantom-dsl/src/test/scala/com/websudos/phantom/builder/serializers/InsertQueryBuilderTest.scala | Scala | bsd-2-clause | 2,109 |
package org.scalatra
import org.scalatra.test.specs2.ScalatraSpec
class CorsSupportSpec extends ScalatraSpec {
addServlet(new ScalatraServlet with CorsSupport {
get("/") { "OK" }
override def initialize(config: ConfigT) {
config.context.setInitParameter(CorsSupport.AllowedOriginsKey, "http://www.example.com")
config.context.setInitParameter(CorsSupport.AllowedHeadersKey, "X-Requested-With,Authorization,Content-Type,Accept,Origin")
config.context.setInitParameter(CorsSupport.AllowedMethodsKey, "GET,HEAD,POST")
super.initialize(config)
}
}, "/*")
def is =
"The CORS support should" ^
"augment a valid simple request" ! context.validSimpleRequest ^
"not touch a regular request" ! context.dontTouchRegularRequest ^
"respond to a valid preflight request" ! context.validPreflightRequest ^
"respond to a valid preflight request with headers" ! context.validPreflightRequestWithHeaders ^ end
object context {
def validSimpleRequest = {
get("/", headers = Map(CorsSupport.OriginHeader -> "http://www.example.com")) {
response.getHeader(CorsSupport.AccessControlAllowOriginHeader) must_== "http://www.example.com"
}
}
def dontTouchRegularRequest = {
get("/") {
response.getHeader(CorsSupport.AccessControlAllowOriginHeader) must beNull
}
}
def validPreflightRequest = {
options("/", headers = Map(CorsSupport.OriginHeader -> "http://www.example.com", CorsSupport.AccessControlRequestMethodHeader -> "GET", "Content-Type" -> "application/json")) {
response.getHeader(CorsSupport.AccessControlAllowOriginHeader) must_== "http://www.example.com"
}
}
def validPreflightRequestWithHeaders = {
val hdrs = Map(
CorsSupport.OriginHeader -> "http://www.example.com",
CorsSupport.AccessControlRequestMethodHeader -> "GET",
CorsSupport.AccessControlRequestHeadersHeader -> "Origin, Authorization, Accept",
"Content-Type" -> "application/json")
options("/", headers = hdrs) {
response.getHeader(CorsSupport.AccessControlAllowOriginHeader) must_== "http://www.example.com"
response.getHeader(CorsSupport.AccessControlAllowMethodsHeader) must_== "GET,HEAD,POST"
}
}
}
}
class DisabledCorsSupportSpec extends ScalatraSpec {
addServlet(new ScalatraServlet with CorsSupport {
get("/") { "OK" }
override def initialize(config: ConfigT) {
config.context.setInitParameter(CorsSupport.AllowedOriginsKey, "http://www.example.com")
config.context.setInitParameter(CorsSupport.AllowedHeadersKey, "X-Requested-With,Authorization,Content-Type,Accept,Origin")
config.context.setInitParameter(CorsSupport.AllowedMethodsKey, "GET,HEAD,POST")
config.context.setInitParameter(CorsSupport.EnableKey, "false")
super.initialize(config)
}
}, "/disabled")
def is =
"The CORS support should" ^
"be disabled with configuration" ! context.simpleRequestToDisabledCors ^ end
object context {
def simpleRequestToDisabledCors = {
get("/disabled/", headers = Map(CorsSupport.OriginHeader -> "http://www.example.com")) {
response.getHeader(CorsSupport.AccessControlAllowOriginHeader) must_== null
}
}
}
}
| seratch/scalatra | core/src/test/scala/org/scalatra/CorsSupportSpec.scala | Scala | bsd-2-clause | 3,295 |
package scalaprops
object Variant {
def variant[A](n: Long, g: CogenState[A]): CogenState[A] = {
val (next, int) = g.rand.nextInt
val seed = n + int
CogenState(next, Gen.gen((i, r) => g.gen.f(i, r.reseed(seed))))
}
def variantInt[A](n: Int, g: CogenState[A]): CogenState[A] = {
val (next, int) = g.rand.nextInt
val seed = n + int
CogenState(next, Gen.gen((i, r) => g.gen.f(i, r.setIntSeed(seed))))
}
}
| scalaprops/scalaprops | gen/js_native/src/main/scala/scalaprops/Variant.scala | Scala | mit | 435 |
package com.twitter.finagle.redis.integration
import com.twitter.conversions.time._
import com.twitter.finagle.redis.ServerError
import com.twitter.finagle.redis.naggati.RedisClientTest
import com.twitter.finagle.redis.tags.{RedisTest, ClientTest}
import com.twitter.finagle.redis.util.StringToBuf
import com.twitter.util.{Return, Await}
import org.junit.Ignore
import org.junit.runner.RunWith
import org.scalatest.junit.JUnitRunner
@Ignore
@RunWith(classOf[JUnitRunner])
final class TopologyClientIntegrationSuite extends RedisClientTest {
val TIMEOUT = 10.seconds
protected val bufKey = StringToBuf("1234")
protected val bufVal = StringToBuf("5")
protected val bufKeyNonNumeric = StringToBuf("asdf")
protected val bufValNonNumeric = StringToBuf("g")
protected val bufValLarge = StringToBuf("99999") // a value >= #databases (configured in redis)
test("Correctly perform TOPOLOGYADD", RedisTest, ClientTest) {
withRedisClient { client =>
assert(Await.result(client.topologyAdd(bufKey, bufVal).liftToTry, TIMEOUT) == Return.Unit)
}
}
test("Correctly perform TOPOLOGYGET after TOPOLOGYADD", RedisTest, ClientTest) {
withRedisClient { client =>
assert(Await.result(client.topologyAdd(bufKey, bufVal).liftToTry, TIMEOUT) == Return.Unit)
assert(Await.result(client.topologyGet(bufKey), TIMEOUT) == Some(bufVal))
}
}
test("Correctly perform TOPOLOGYDELETE", RedisTest, ClientTest) {
withRedisClient { client =>
assert(Await.result(client.topologyDelete(bufKey).liftToTry, TIMEOUT) == Return.Unit)
}
}
test("Correctly perform TOPOLOGYGET after TOPOLOGYDELETE", RedisTest, ClientTest) {
withRedisClient { client =>
assert(Await.result(client.topologyDelete(bufKey).liftToTry, TIMEOUT) == Return.Unit)
assert(Await.result(client.topologyGet(bufKey).liftToTry, TIMEOUT) == Return.None)
}
}
test("Throw a ServerError for non-numeric key to TOPOLOGYADD", RedisTest, ClientTest) {
withRedisClient { client =>
intercept[ServerError] {
Await.result(client.topologyAdd(bufKeyNonNumeric, bufVal), TIMEOUT)
}
}
}
test("Throw a ServerError for non-numeric val to TOPOLOGYADD", RedisTest, ClientTest) {
withRedisClient { client =>
intercept[ServerError] {
Await.result(client.topologyAdd(bufKey, bufValNonNumeric), TIMEOUT)
}
}
}
test("Throw a ServerError for non-numeric key and val to TOPOLOGYADD", RedisTest, ClientTest) {
withRedisClient { client =>
intercept[ServerError] {
Await.result(client.topologyAdd(bufKeyNonNumeric, bufValNonNumeric), TIMEOUT)
}
}
}
test("Throw a ServerError for large int val to TOPOLOGYADD", RedisTest, ClientTest) {
withRedisClient { client =>
intercept[ServerError] {
Await.result(client.topologyAdd(bufKey, bufValLarge), TIMEOUT)
}
}
}
} | lukiano/finagle | finagle-redis/src/test/scala/com/twitter/finagle/redis/commands/topology/TopologyClientIntegrationSuite.scala | Scala | apache-2.0 | 2,890 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import java.util.concurrent._
import java.util.concurrent.atomic._
import scala.collection._
import org.apache.log4j.Logger
import kafka.cluster._
import kafka.utils._
import org.I0Itec.zkclient.exception.ZkNodeExistsException
import java.net.InetAddress
import org.I0Itec.zkclient.{IZkStateListener, IZkChildListener, ZkClient}
import org.apache.zookeeper.Watcher.Event.KeeperState
import kafka.api.OffsetRequest
/**
* This class handles the consumers interaction with zookeeper
*
* Directories:
* 1. Consumer id registry:
* /consumers/[group_id]/ids[consumer_id] -> topic1,...topicN
* A consumer has a unique consumer id within a consumer group. A consumer registers its id as an ephemeral znode
* and puts all topics that it subscribes to as the value of the znode. The znode is deleted when the client is gone.
* A consumer subscribes to event changes of the consumer id registry within its group.
*
* The consumer id is picked up from configuration, instead of the sequential id assigned by ZK. Generated sequential
* ids are hard to recover during temporary connection loss to ZK, since it's difficult for the client to figure out
* whether the creation of a sequential znode has succeeded or not. More details can be found at
* (http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling)
*
* 2. Broker node registry:
* /brokers/[0...N] --> { "host" : "host:port",
* "topics" : {"topic1": ["partition1" ... "partitionN"], ...,
* "topicN": ["partition1" ... "partitionN"] } }
* This is a list of all present broker brokers. A unique logical node id is configured on each broker node. A broker
* node registers itself on start-up and creates a znode with the logical node id under /brokers. The value of the znode
* is a JSON String that contains (1) the host name and the port the broker is listening to, (2) a list of topics that
* the broker serves, (3) a list of logical partitions assigned to each topic on the broker.
* A consumer subscribes to event changes of the broker node registry.
*
* 3. Partition owner registry:
* /consumers/[group_id]/owner/[topic]/[broker_id-partition_id] --> consumer_node_id
* This stores the mapping before broker partitions and consumers. Each partition is owned by a unique consumer
* within a consumer group. The mapping is reestablished after each rebalancing.
*
* 4. Consumer offset tracking:
* /consumers/[group_id]/offsets/[topic]/[broker_id-partition_id] --> offset_counter_value
* Each consumer tracks the offset of the latest message consumed for each partition.
*
*/
private[kafka] object ZookeeperConsumerConnector {
val MAX_N_RETRIES = 4
val shutdownCommand: FetchedDataChunk = new FetchedDataChunk(null, null, -1L)
}
/**
* JMX interface for monitoring consumer
*/
trait ZookeeperConsumerConnectorMBean {
def getPartOwnerStats: String
def getConsumerGroup: String
def getOffsetLag(topic: String, brokerId: Int, partitionId: Int): Long
def getConsumedOffset(topic: String, brokerId: Int, partitionId: Int): Long
def getLatestOffset(topic: String, brokerId: Int, partitionId: Int): Long
}
private[kafka] class ZookeeperConsumerConnector(val config: ConsumerConfig,
val enableFetcher: Boolean) // for testing only
extends ConsumerConnector with ZookeeperConsumerConnectorMBean {
private val logger = Logger.getLogger(getClass())
private val isShuttingDown = new AtomicBoolean(false)
private val rebalanceLock = new Object
private var fetcher: Option[Fetcher] = None
private var zkClient: ZkClient = null
private val topicRegistry = new Pool[String, Pool[Partition, PartitionTopicInfo]]
// queues : (topic,consumerThreadId) -> queue
private val queues = new Pool[Tuple2[String,String], BlockingQueue[FetchedDataChunk]]
private val scheduler = new KafkaScheduler(1, "Kafka-consumer-autocommit-", false)
connectZk
createFetcher
if (config.autoCommit) {
logger.info("starting auto committer every " + config.autoCommitIntervalMs + " ms")
scheduler.scheduleWithRate(autoCommit, config.autoCommitIntervalMs, config.autoCommitIntervalMs)
}
def this(config: ConsumerConfig) = this(config, true)
def createMessageStreams(topicCountMap: Map[String,Int]) : Map[String,List[KafkaMessageStream]] = {
consume(topicCountMap)
}
private def createFetcher() {
if (enableFetcher)
fetcher = Some(new Fetcher(config, zkClient))
}
private def connectZk() {
logger.info("Connecting to zookeeper instance at " + config.zkConnect)
zkClient = new ZkClient(config.zkConnect, config.zkSessionTimeoutMs, config.zkConnectionTimeoutMs, StringSerializer)
}
def shutdown() {
val canShutdown = isShuttingDown.compareAndSet(false, true);
if (canShutdown) {
logger.info("ZKConsumerConnector shutting down")
try {
scheduler.shutdown
fetcher match {
case Some(f) => f.shutdown
case None =>
}
sendShudownToAllQueues
if (zkClient != null) {
zkClient.close()
zkClient = null
}
}
catch {
case e =>
logger.fatal(e)
logger.fatal(Utils.stackTrace(e))
}
logger.info("ZKConsumerConnector shut down completed")
}
}
def consume(topicCountMap: scala.collection.Map[String,Int]): Map[String,List[KafkaMessageStream]] = {
logger.debug("entering consume ")
if (topicCountMap == null)
throw new RuntimeException("topicCountMap is null")
val dirs = new ZKGroupDirs(config.groupId)
var ret = new mutable.HashMap[String,List[KafkaMessageStream]]
var consumerUuid : String = null
config.consumerId match {
case Some(consumerId) // for testing only
=> consumerUuid = consumerId
case None // generate unique consumerId automatically
=> consumerUuid = InetAddress.getLocalHost.getHostName + "-" + System.currentTimeMillis
}
val consumerIdString = config.groupId + "_" + consumerUuid
val topicCount = new TopicCount(consumerIdString, topicCountMap)
// listener to consumer and partition changes
val loadBalancerListener = new ZKRebalancerListener(config.groupId, consumerIdString)
registerConsumerInZK(dirs, consumerIdString, topicCount)
zkClient.subscribeChildChanges(dirs.consumerRegistryDir, loadBalancerListener)
// create a queue per topic per consumer thread
val consumerThreadIdsPerTopic = topicCount.getConsumerThreadIdsPerTopic
for ((topic, threadIdSet) <- consumerThreadIdsPerTopic) {
var streamList: List[KafkaMessageStream] = Nil
for (threadId <- threadIdSet) {
val stream = new LinkedBlockingQueue[FetchedDataChunk](config.maxQueuedChunks)
queues.put((topic, threadId), stream)
streamList ::= new KafkaMessageStream(stream, config.consumerTimeoutMs)
}
ret += (topic -> streamList)
logger.debug("adding topic " + topic + " and stream to map..")
// register on broker partition path changes
val partitionPath = ZkUtils.brokerTopicsPath + "/" + topic
ZkUtils.makeSurePersistentPathExists(zkClient, partitionPath)
zkClient.subscribeChildChanges(partitionPath, loadBalancerListener)
}
// register listener for session expired event
zkClient.subscribeStateChanges(
new ZKSessionExpireListenner(dirs, consumerIdString, topicCount, loadBalancerListener))
// explicitly trigger load balancing for this consumer
loadBalancerListener.syncedRebalance
ret
}
private def registerConsumerInZK(dirs: ZKGroupDirs, consumerIdString: String, topicCount: TopicCount) = {
logger.info("begin registering consumer " + consumerIdString + " in ZK")
ZkUtils.createEphemeralPathExpectConflict(zkClient, dirs.consumerRegistryDir + "/" + consumerIdString, topicCount.toJsonString)
logger.info("end registering consumer " + consumerIdString + " in ZK")
}
private def sendShudownToAllQueues() = {
for (queue <- queues.values) {
logger.debug("Clearing up queue")
queue.clear
queue.put(ZookeeperConsumerConnector.shutdownCommand)
logger.debug("Cleared queue and sent shutdown command")
}
}
def autoCommit() {
if(logger.isTraceEnabled)
logger.trace("auto committing")
try {
commitOffsets
}
catch {
case t: Throwable =>
// log it and let it go
logger.error("exception during autoCommit: ", t)
}
}
def commitOffsets() {
if (zkClient == null)
return
for ((topic, infos) <- topicRegistry) {
val topicDirs = new ZKGroupTopicDirs(config.groupId, topic)
for (info <- infos.values) {
val newOffset = info.getConsumeOffset
try {
ZkUtils.updatePersistentPath(zkClient, topicDirs.consumerOffsetDir + "/" + info.partition.name,
newOffset.toString)
}
catch {
case t: Throwable =>
// log it and let it go
logger.warn("exception during commitOffsets: " + t + Utils.stackTrace(t))
}
if(logger.isDebugEnabled)
logger.debug("Committed offset " + newOffset + " for topic " + info)
}
}
}
// for JMX
def getPartOwnerStats(): String = {
val builder = new StringBuilder
for ((topic, infos) <- topicRegistry) {
builder.append("\\n" + topic + ": [")
val topicDirs = new ZKGroupTopicDirs(config.groupId, topic)
for(partition <- infos.values) {
builder.append("\\n {")
builder.append{partition.partition.name}
builder.append(",fetch offset:" + partition.getFetchOffset)
builder.append(",consumer offset:" + partition.getConsumeOffset)
builder.append("}")
}
builder.append("\\n ]")
}
builder.toString
}
// for JMX
def getConsumerGroup(): String = config.groupId
def getOffsetLag(topic: String, brokerId: Int, partitionId: Int): Long =
getLatestOffset(topic, brokerId, partitionId) - getConsumedOffset(topic, brokerId, partitionId)
def getConsumedOffset(topic: String, brokerId: Int, partitionId: Int): Long = {
val partition = new Partition(brokerId, partitionId)
val partitionInfos = topicRegistry.get(topic)
if (partitionInfos != null) {
val partitionInfo = partitionInfos.get(partition)
if (partitionInfo != null)
return partitionInfo.getConsumeOffset
}
//otherwise, try to get it from zookeeper
try {
val topicDirs = new ZKGroupTopicDirs(config.groupId, topic)
val znode = topicDirs.consumerOffsetDir + "/" + partition.name
val offsetString = ZkUtils.readDataMaybeNull(zkClient, znode)
if (offsetString != null)
return offsetString.toLong
else
return -1
}
catch {
case e =>
logger.error("error in getConsumedOffset JMX ", e)
}
return -2
}
def getLatestOffset(topic: String, brokerId: Int, partitionId: Int): Long = {
var simpleConsumer: SimpleConsumer = null
var producedOffset: Long = -1L
try {
val cluster = ZkUtils.getCluster(zkClient)
val broker = cluster.getBroker(brokerId)
simpleConsumer = new SimpleConsumer(broker.host, broker.port, ConsumerConfig.SOCKET_TIMEOUT,
ConsumerConfig.SOCKET_BUFFER_SIZE)
val latestOffset = simpleConsumer.getOffsetsBefore(topic, partitionId,
OffsetRequest.LATEST_TIME, 1)
producedOffset = latestOffset(0)
}
catch {
case e =>
logger.error("error in getLatestOffset jmx ", e)
}
finally {
if (simpleConsumer != null)
simpleConsumer.close
}
producedOffset
}
class ZKSessionExpireListenner(val dirs: ZKGroupDirs,
val consumerIdString: String,
val topicCount: TopicCount,
val loadBalancerListener: ZKRebalancerListener)
extends IZkStateListener {
@throws(classOf[Exception])
def handleStateChanged(state: KeeperState) {
// do nothing, since zkclient will do reconnect for us.
}
/**
* Called after the zookeeper session has expired and a new session has been created. You would have to re-create
* any ephemeral nodes here.
*
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleNewSession() {
/**
* When we get a SessionExpired event, we lost all ephemeral nodes and zkclient has reestablished a
* connection for us. We need to release the ownership of the current consumer and re-register this
* consumer in the consumer registry and trigger a rebalance.
*/
logger.info("ZK expired; release old broker parition ownership; re-register consumer " + consumerIdString)
loadBalancerListener.resetState
registerConsumerInZK(dirs, consumerIdString, topicCount)
// explicitly trigger load balancing for this consumer
loadBalancerListener.syncedRebalance
// There is no need to resubscribe to child and state changes.
// The child change watchers will be set inside rebalance when we read the children list.
}
}
class ZKRebalancerListener(val group: String, val consumerIdString: String)
extends IZkChildListener {
private val dirs = new ZKGroupDirs(group)
private var oldPartitionsPerTopicMap: mutable.Map[String,List[String]] = new mutable.HashMap[String,List[String]]()
private var oldConsumersPerTopicMap: mutable.Map[String,List[String]] = new mutable.HashMap[String,List[String]]()
@throws(classOf[Exception])
def handleChildChange(parentPath : String, curChilds : java.util.List[String]) {
syncedRebalance
}
private def releasePartitionOwnership() {
for ((topic, infos) <- topicRegistry) {
val topicDirs = new ZKGroupTopicDirs(group, topic)
for(partition <- infos.keys) {
val znode = topicDirs.consumerOwnerDir + "/" + partition
ZkUtils.deletePath(zkClient, znode)
if(logger.isDebugEnabled)
logger.debug("Consumer " + consumerIdString + " releasing " + znode)
}
}
}
private def getConsumersPerTopic(group: String) : mutable.Map[String, List[String]] = {
val consumers = ZkUtils.getChildrenParentMayNotExist(zkClient, dirs.consumerRegistryDir)
val consumersPerTopicMap = new mutable.HashMap[String, List[String]]
for (consumer <- consumers) {
val topicCount = getTopicCount(consumer)
for ((topic, consumerThreadIdSet) <- topicCount.getConsumerThreadIdsPerTopic()) {
for (consumerThreadId <- consumerThreadIdSet)
consumersPerTopicMap.get(topic) match {
case Some(curConsumers) => consumersPerTopicMap.put(topic, consumerThreadId :: curConsumers)
case _ => consumersPerTopicMap.put(topic, List(consumerThreadId))
}
}
}
for ( (topic, consumerList) <- consumersPerTopicMap )
consumersPerTopicMap.put(topic, consumerList.sortWith((s,t) => s < t))
consumersPerTopicMap
}
private def getRelevantTopicMap(myTopicThreadIdsMap: Map[String, Set[String]],
newPartMap: Map[String,List[String]],
oldPartMap: Map[String,List[String]],
newConsumerMap: Map[String,List[String]],
oldConsumerMap: Map[String,List[String]]): Map[String, Set[String]] = {
var relevantTopicThreadIdsMap = new mutable.HashMap[String, Set[String]]()
for ( (topic, consumerThreadIdSet) <- myTopicThreadIdsMap )
if ( oldPartMap.get(topic) != newPartMap.get(topic) || oldConsumerMap.get(topic) != newConsumerMap.get(topic))
relevantTopicThreadIdsMap += (topic -> consumerThreadIdSet)
relevantTopicThreadIdsMap
}
private def getTopicCount(consumerId: String) : TopicCount = {
val topicCountJson = ZkUtils.readData(zkClient, dirs.consumerRegistryDir + "/" + consumerId)
TopicCount.constructTopicCount(consumerId, topicCountJson)
}
def resetState() {
topicRegistry.clear
oldConsumersPerTopicMap.clear
oldPartitionsPerTopicMap.clear
}
def syncedRebalance() {
rebalanceLock synchronized {
for (i <- 0 until ZookeeperConsumerConnector.MAX_N_RETRIES) {
logger.info("begin rebalancing consumer " + consumerIdString + " try #" + i)
var done = false
try {
done = rebalance
}
catch {
case e =>
// occasionally, we may hit a ZK exception because the ZK state is changing while we are iterating.
// For example, a ZK node can disappear between the time we get all children and the time we try to get
// the value of a child. Just let this go since another rebalance will be triggered.
logger.info("exception during rebalance " + e)
}
logger.info("end rebalancing consumer " + consumerIdString + " try #" + i)
if (done)
return
// release all partitions, reset state and retry
releasePartitionOwnership
resetState
Thread.sleep(config.zkSyncTimeMs)
}
}
throw new RuntimeException(consumerIdString + " can't rebalance after " + ZookeeperConsumerConnector.MAX_N_RETRIES +" retires")
}
private def rebalance(): Boolean = {
// testing code
//if ("group1_consumer1" == consumerIdString) {
// logger.info("sleeping " + consumerIdString)
// Thread.sleep(20)
//}
val myTopicThreadIdsMap = getTopicCount(consumerIdString).getConsumerThreadIdsPerTopic
val cluster = ZkUtils.getCluster(zkClient)
val consumersPerTopicMap = getConsumersPerTopic(group)
val partitionsPerTopicMap = ZkUtils.getPartitionsForTopics(zkClient, myTopicThreadIdsMap.keys.iterator)
val relevantTopicThreadIdsMap = getRelevantTopicMap(myTopicThreadIdsMap, partitionsPerTopicMap, oldPartitionsPerTopicMap, consumersPerTopicMap, oldConsumersPerTopicMap)
if (relevantTopicThreadIdsMap.size <= 0) {
logger.info("Consumer " + consumerIdString + " with " + consumersPerTopicMap + " doesn't need to rebalance.")
return true
}
logger.info("Committing all offsets")
commitOffsets
logger.info("Releasing partition ownership")
releasePartitionOwnership
val queuesToBeCleared = new mutable.HashSet[BlockingQueue[FetchedDataChunk]]
for ((topic, consumerThreadIdSet) <- relevantTopicThreadIdsMap) {
topicRegistry.remove(topic)
topicRegistry.put(topic, new Pool[Partition, PartitionTopicInfo])
val topicDirs = new ZKGroupTopicDirs(group, topic)
val curConsumers = consumersPerTopicMap.get(topic).get
var curPartitions: List[String] = partitionsPerTopicMap.get(topic).get
val nPartsPerConsumer = curPartitions.size / curConsumers.size
val nConsumersWithExtraPart = curPartitions.size % curConsumers.size
logger.info("Consumer " + consumerIdString + " rebalancing the following partitions: " + curPartitions +
" for topic " + topic + " with consumers: " + curConsumers)
for (consumerThreadId <- consumerThreadIdSet) {
val myConsumerPosition = curConsumers.findIndexOf(_ == consumerThreadId)
assert(myConsumerPosition >= 0)
val startPart = nPartsPerConsumer*myConsumerPosition + myConsumerPosition.min(nConsumersWithExtraPart)
val nParts = nPartsPerConsumer + (if (myConsumerPosition + 1 > nConsumersWithExtraPart) 0 else 1)
/**
* Range-partition the sorted partitions to consumers for better locality.
* The first few consumers pick up an extra partition, if any.
*/
if (nParts <= 0)
logger.warn("No broker partions consumed by consumer thread " + consumerThreadId + " for topic " + topic)
else {
for (i <- startPart until startPart + nParts) {
val partition = curPartitions(i)
logger.info(consumerThreadId + " attempting to claim partition " + partition)
if (!processPartition(topicDirs, partition, topic, consumerThreadId))
return false
}
queuesToBeCleared += queues.get((topic, consumerThreadId))
}
}
}
updateFetcher(cluster, queuesToBeCleared)
oldPartitionsPerTopicMap = partitionsPerTopicMap
oldConsumersPerTopicMap = consumersPerTopicMap
true
}
private def updateFetcher(cluster: Cluster, queuesTobeCleared: Iterable[BlockingQueue[FetchedDataChunk]]) {
// update partitions for fetcher
var allPartitionInfos : List[PartitionTopicInfo] = Nil
for (partitionInfos <- topicRegistry.values)
for (partition <- partitionInfos.values)
allPartitionInfos ::= partition
logger.info("Consumer " + consumerIdString + " selected partitions : " +
allPartitionInfos.sortWith((s,t) => s.partition < t.partition).map(_.toString).mkString(","))
fetcher match {
case Some(f) => f.initConnections(allPartitionInfos, cluster, queuesTobeCleared)
case None =>
}
}
private def processPartition(topicDirs: ZKGroupTopicDirs, partition: String,
topic: String, consumerThreadId: String) : Boolean = {
val partitionOwnerPath = topicDirs.consumerOwnerDir + "/" + partition
try {
ZkUtils.createEphemeralPathExpectConflict(zkClient, partitionOwnerPath, consumerThreadId)
}
catch {
case e: ZkNodeExistsException =>
// The node hasn't been deleted by the original owner. So wait a bit and retry.
logger.info("waiting for the partition ownership to be deleted: " + partition)
return false
case e2 => throw e2
}
addPartitionTopicInfo(topicDirs, partition, topic, consumerThreadId)
true
}
private def addPartitionTopicInfo(topicDirs: ZKGroupTopicDirs, partitionString: String,
topic: String, consumerThreadId: String) {
val partition = Partition.parse(partitionString)
val partTopicInfoMap = topicRegistry.get(topic)
val znode = topicDirs.consumerOffsetDir + "/" + partition.name
val offsetString = ZkUtils.readDataMaybeNull(zkClient, znode)
// If first time starting a consumer, use default offset.
// TODO: handle this better (if client doesn't know initial offsets)
val offset : Long = if (offsetString == null) Long.MaxValue else offsetString.toLong
val queue = queues.get((topic, consumerThreadId))
val consumedOffset = new AtomicLong(offset)
val fetchedOffset = new AtomicLong(offset)
val partTopicInfo = new PartitionTopicInfo(topic,
partition.brokerId,
partition,
queue,
consumedOffset,
fetchedOffset,
new AtomicInteger(config.fetchSize))
partTopicInfoMap.put(partition, partTopicInfo)
if (logger.isDebugEnabled)
logger.debug(partTopicInfo + " selected new offset " + offset)
}
}
}
| quipo/kafka | core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala | Scala | apache-2.0 | 24,239 |
class EmptyFinalltDefaultTest {
def m(): Unit = {
object B {
def b(): Unit = {
try {
println("BT")
}
catch {
case e: Exception =>
println("BE")
}
finally {
println("BF")
}
}
}
trait C {
def c(): Unit = {
try {
println("CT")
} finally {}
}
}
try {
println("M1T")
}
catch {
case e: Exception =>
println("M1E")
}
try {
println("M2T")
}
finally {
println("M2F")
}
try {
println("M3T")
}
finally {
}
}
} | jean-andre-gauthier/scalasca | src/test/scala/lara/epfl/scalasca/tests/unit/executables/emptyfinally/EmptyFinallyDefaultTest.scala | Scala | bsd-3-clause | 529 |
package com.sksamuel.scapegoat.inspections.empty
import com.sksamuel.scapegoat.InspectionTest
/** @author Stephen Samuel */
class EmptyTryBlockTest extends InspectionTest {
override val inspections = Seq(new EmptyTryBlock)
"empty try block" - {
"should report warning" in {
val code = """object Test {
| try {
| } catch {
| case r: RuntimeException => throw r
| case e: Exception =>
| case t: Throwable =>
| }
|
| try {
| getClass
| } catch {
| case r: RuntimeException => throw r
| case e: Exception =>
| case t: Throwable =>
| }
} """.stripMargin
compileCodeSnippet(code)
compiler.scapegoat.feedback.warnings.size shouldBe 1
}
}
}
| sksamuel/scalac-scapegoat-plugin | src/test/scala/com/sksamuel/scapegoat/inspections/empty/EmptyTryBlockTest.scala | Scala | apache-2.0 | 1,064 |
package com.lrs.rest
import java.util.concurrent.TimeUnit
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model._
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server._
import akka.stream.{ActorMaterializer, ActorMaterializerSettings}
import com.lrs.rest.actors.{ RecordPersistWorker, RecordProcessWorker}
import com.lrs.common.models.errors.{ExternalResourceException, ExternalResourceNotFoundException}
import com.lrs.rest.routes.{HighwayRoutes, MonitoringRoutes}
import com.typesafe.config.ConfigFactory
import scala.concurrent.Await
import scala.concurrent.duration.Duration
object AkkaHttpScalaDockerSeed extends App {
implicit val system = ActorSystem("main-actor-system")
implicit val materializer = ActorMaterializer()
implicit val ec = system.dispatcher
val conf = ConfigFactory.load()
// actors
//val queueConnector = system.actorOf(QueueConnector.props(queueName), "queue-connector")
//val stockPriceConnector = system.actorOf(StockPriceConnector.props(apiKey), "stock-price-connector")
//val stockPriceWorker = system.actorOf(MessageWorker.props(queueConnector, messageToSend), "message-worker")
// route definitions
//val queueRoutes = new QueueRoutes(queueConnector, stockPriceConnector)
val recordPersistWorker = system.actorOf(RecordPersistWorker.props, "recordPersistWorker-actor")
val recordProcessWorker = system.actorOf(RecordProcessWorker.props(recordPersistWorker), "recordProcessWorker-actor")
val highwayRoutes = new HighwayRoutes(recordPersistWorker, recordProcessWorker)
val monitoringRoutes = new MonitoringRoutes()
// implicit exception handler - this will be picked up by the route definitions
implicit def myExceptionHandler: ExceptionHandler = customGlobalErrorHandler
// merge all routes here
def allRoutes = {
highwayRoutes.routes ~
monitoringRoutes.routes
}
Http().bindAndHandle(allRoutes, "0.0.0.0", 5000)
Await.ready(system.whenTerminated, Duration.Inf)
// When handling and completing errors as results like this
// you have the option (see bellow) of not logging errors or log with a lower level when
// failures are caused by the user (not the system itself)
def customGlobalErrorHandler = ExceptionHandler {
case ex: ExternalResourceNotFoundException =>
extractUri { uri =>
// no errors will be logged here
complete(HttpResponse(NotFound, entity = ex.message))
}
case ex: ExternalResourceException => {
// a WARN will be logged instead of an error
system.log.warning(ex.message)
complete(HttpResponse(BadGateway, entity = ex.message))
}
// This behaves as a pipeline; errors that are not handled above will be passed on (bubble up)
// up to the route.
// The default behaviour is to log the error and return a InternalServerError and no body
}
}
| edmundgmail/HighwaySystem | highway-rest/src/main/scala/com/lrs/rest/AkkaHttpScalaDockerSeed.scala | Scala | apache-2.0 | 2,941 |
/**
* Test cases for NBody.scala.
*
* @author Yujian Zhang <yujian{dot}zhang[at]gmail(dot)com>
*
* License:
* GNU General Public License v2
* http://www.gnu.org/licenses/gpl-2.0.html
* Copyright (C) 2013 Yujian Zhang
*/
import net.whily.scasci.math._
import net.whily.scasci.math.linalg._
import net.whily.scasci.phys._
import org.scalatest.Matchers
import org.scalatest.FunSpec
class NBodySpec extends FunSpec with Matchers {
describe("Two body as in section 3.1 and 4.7 of http://www.artcompsci.org/kali/vol/n_body_problem/volume4.pdf") {
val sim = NBody.twoBodyParamSim
sim.evolve(10.0)
it("correct position and velocity for the 1st body") {
val pos = sim.bodies(0).pos
val vel = sim.bodies(0).vel
(pos ≈ Vec3(1.1992351e-01, -7.2126917e-02, 0.0)) should be (true)
(vel ≈ Vec3(2.0616138e-01, 4.2779061e-02, 0.0)) should be (true)
}
it("correct position and velocity for the 2nd body") {
val pos = sim.bodies(1).pos
val vel = sim.bodies(1).vel
(pos ≈ Vec3(-4.7969404e-01, 2.8850767e-01, 0.0)) should be (true)
(vel ≈ Vec3(-8.2464553e-01, -1.7111624e-01, 0.0)) should be (true)
}
it("should have small relative energy error") {
math.abs(sim.relativeEnergyError) should be < 2e-13
}
}
describe("Three body figure 8 as in section 5.1 and 5.2 of http://www.artcompsci.org/kali/vol/n_body_problem/volume4.pdf") {
val sim = NBody.figure8ParamSim
sim.evolve(2.1088)
it("correct position and velocity for the 1st body") {
val pos = sim.bodies(0).pos
val vel = sim.bodies(0).vel
(pos ≈ Vec3(2.5982241e-5, -2.0259655e-5, 0.0)) should be (true)
(vel ≈ Vec3(-0.93227637, -0.86473501, 0.0)) should be (true)
}
it("correct position and velocity for the 2nd body") {
val pos = sim.bodies(1).pos
val vel = sim.bodies(1).vel
(pos ≈ Vec3(0.97011046, -0.24305269, 0.0)) should be (true)
(vel ≈ Vec3(0.46619301, 0.43238574, 0.0)) should be (true)
}
it("correct position and velocity for the 3rd body") {
val pos = sim.bodies(2).pos
val vel = sim.bodies(2).vel
(pos ≈ Vec3(-0.97013644, 0.24307295, 0.0)) should be (true)
(vel ≈ Vec3(0.46608336, 0.43234927, 0.0)) should be (true)
}
it("should have small relative energy error") {
math.abs(sim.relativeEnergyError) should be < 1e-13
}
}
describe("Test energy of three body configurations from http://suki.ipb.ac.rs/3body/") {
it("Energy test") {
for (config <- NBody.threeBodyConfigs) {
val sim = new NBody(config, 0.0001)
Field.fieldD.≈(sim.totalEnergy(), config.energy) should be (true)
Field.fieldD.≈(sim.angularMomentum(), config.angularMomentum) should be (true)
sim.evolve(1.0)
math.abs(sim.relativeEnergyError) should be < 2e-11
}
}
}
}
| whily/scasci | src/test/scala/phys/NBodyTest.scala | Scala | gpl-2.0 | 2,898 |
package test.kofre
import kofre.primitives.{LastWriterWins, MultiValueRegister}
import kofre.sets.ORSet
import kofre.{IdUtil, Lattice}
import org.scalacheck.{Arbitrary, Gen}
import kofre.causality.VectorClock
object DataGenerator {
given arbId: Arbitrary[IdUtil.Id] = Arbitrary(Gen.oneOf('a' to 'g').map(_.toString))
given arbVersion: Arbitrary[VectorClock] = Arbitrary(for {
ids: Set[IdUtil.Id] <- Gen.nonEmptyListOf(arbId.arbitrary).map(_.toSet)
value: List[Long] <- Gen.listOfN(ids.size, Gen.oneOf(0L to 100L))
} yield VectorClock.fromMap(ids.zip(value).toMap))
given arbLww: Arbitrary[LastWriterWins[Int]] = Arbitrary(
for {
time <- Gen.long
value <- Gen.choose(Int.MinValue, Int.MaxValue)
} yield LastWriterWins(time, value)
)
given Lattice[Int] = _ max _
given arbORSet[A: Arbitrary]: Arbitrary[ORSet[A]] = Arbitrary(for {
added <- Gen.nonEmptyListOf(Arbitrary.arbitrary[A])
removed <- Gen.listOf(Gen.oneOf(added))
} yield {
val a = added.foldLeft(ORSet.empty[A])((s, v) => Lattice.merge(s, s.add(v)))
removed.foldLeft(a)((s, v) => Lattice.merge(s, s.remove(v)))
})
given arbMVR[A: Arbitrary]: Arbitrary[MultiValueRegister[A]] =
val pairgen = for {
version <- arbVersion.arbitrary
value <- Arbitrary.arbitrary[A]
} yield (version, value)
val map = Gen.listOf(pairgen).map(vs => MultiValueRegister(vs.toMap))
Arbitrary(map)
}
| guidosalva/REScala | Code/Extensions/Kofre/src/test/scala/test/kofre/DataGenerator.scala | Scala | apache-2.0 | 1,438 |
package controllers
import play.api.mvc.{ Action, Controller }
import play.api.libs.json.{ Json, JsError, JsValue }
import model.BallotBox
import helper.Schulze
/**
* User: Björn Reimer
* Date: 11/23/13
* Time: 8:24 PM
*/
object directSchulze extends Controller {
// def direct() = Action(parse.tolerantJson) {
//
// request =>
// val jsBody: JsValue = request.body
//
// jsBody.validate[BallotBox].fold (
// invalid = {e => BadRequest(JsError.toFlatJson(e))},
// valid = b => {
// val ranking = Schulze.getRanking(b.ballots.toList)
//
// Ok(Json.toJson(b))
// }
// )
// }
}
| Reimerei/prefr | app/controllers/directSchulze.scala | Scala | mit | 691 |
package se.stagehand.trpg
import se.stagehand.plugins._
import se.stagehand.lib.scripting._
class TRPGPlugin extends ScriptPlugin {
val name = "TRPG"
val guis = List(SceneGUI, EventGUI)
val scriptcomponents: Array[ScriptComponent] = Array(new Scene, new Event)
} | evilcandybag/Stagehand-TRPG | src/main/scala/se/stagehand/trpg/TRPGPlugin.scala | Scala | gpl-2.0 | 284 |
/*
*
* * Copyright (c) 2014-2016. National Institute of Advanced Industrial Science and Technology (AIST)
* * All rights reserved.
*
*/
package jp.go.aist.cspe
import jp.go.aist.cspe.CSPE._
private[cspe] class PartialParallel(p1 : Process, as : Set[Symbol]) {
def || (p2 : Process) = parallel(List(p1, p2), as)
}
| yoriyuki/cspe | src/main/scala/jp/go/aist/cspe/PartialParallel.scala | Scala | bsd-3-clause | 324 |
package scala.slick.testutil
import java.io.File
import java.util.logging.{Level, Logger}
import java.sql.SQLException
import scala.slick.driver._
import scala.slick.jdbc.{ResultSetInvoker, StaticQuery => Q}
import scala.slick.jdbc.GetResult._
import scala.slick.jdbc.meta.MTable
import scala.slick.session.{Database, Session}
import com.typesafe.slick.testkit.util.{ExternalTestDB, TestDB}
object TestDBs {
def H2Mem(cname: String) = new TestDB("h2mem", H2Driver) {
val url = "jdbc:h2:mem:test1"
val jdbcDriver = "org.h2.Driver"
override def isPersistent = false
override lazy val capabilities = driver.capabilities + TestDB.plainSql + TestDB.plainSqlWide
}
def H2Disk(cname: String) = new TestDB("h2disk", H2Driver) {
val dbName = "h2-"+cname
val url = "jdbc:h2:"+TestDB.testDBPath+"/"+dbName
val jdbcDriver = "org.h2.Driver"
override def cleanUpBefore() = TestDB.deleteDBFiles(dbName)
// Recreating the DB is faster than dropping everything individually
override def dropUserArtifacts(implicit session: Session) = {
session.close()
cleanUpBefore()
}
override lazy val capabilities = driver.capabilities + TestDB.plainSql + TestDB.plainSqlWide
}
def HsqldbMem(cname: String) = new HsqlDB("hsqldbmem") {
val dbName = "test1"
val url = "jdbc:hsqldb:mem:"+dbName+";user=SA;password=;shutdown=true"
override def isPersistent = false
}
def HsqldbDisk(cname: String) = new HsqlDB("hsqldbdisk") {
val dbName = "hsqldb-"+cname
val url = "jdbc:hsqldb:file:"+TestDB.testDBPath+"/"+dbName+";user=SA;password=;shutdown=true;hsqldb.applog=0"
override def cleanUpBefore() = TestDB.deleteDBFiles(dbName)
// Recreating the DB is faster than dropping everything individually
override def dropUserArtifacts(implicit session: Session) = {
session.close()
cleanUpBefore()
}
}
def SQLiteMem(cname: String) = new SQLiteTestDB("jdbc:sqlite::memory:", "sqlitemem") {
override def isPersistent = false
override def isShared = false
}
def SQLiteDisk(cname: String) = {
val prefix = "sqlite-"+cname
new SQLiteTestDB("jdbc:sqlite:"+TestDB.testDBPath+"/"+prefix+".db", "sqlitedisk") {
override def cleanUpBefore() = TestDB.deleteDBFiles(prefix)
}
}
def DerbyMem(cname: String) = new DerbyDB("derbymem") {
val dbName = "test1"
val url = "jdbc:derby:memory:"+dbName+";create=true"
override def cleanUpBefore() = {
val dropUrl = "jdbc:derby:memory:"+dbName+";drop=true"
try { Database.forURL(dropUrl, driver = jdbcDriver) withSession { s:Session => s.conn } }
catch { case e: SQLException => }
}
}
def DerbyDisk(cname: String) = new DerbyDB("derbydisk") {
val dbName = "derby-"+cname
val url = "jdbc:derby:"+TestDB.testDBPath+"/"+dbName+";create=true"
override def cleanUpBefore() = {
val dropUrl = "jdbc:derby:"+TestDB.testDBPath+"/"+dbName+";shutdown=true"
try { Database.forURL(dropUrl, driver = jdbcDriver) withSession { s:Session => s.conn } }
catch { case e: SQLException => }
TestDB.deleteDBFiles(dbName)
}
}
def Postgres(cname: String) = new ExternalTestDB("postgres", PostgresDriver) {
override def getLocalTables(implicit session: Session) = {
val tables = ResultSetInvoker[(String,String,String, String)](_.conn.getMetaData().getTables("", "public", null, null))
tables.list.filter(_._4.toUpperCase == "TABLE").map(_._3).sorted
}
override def getLocalSequences(implicit session: Session) = {
val tables = ResultSetInvoker[(String,String,String, String)](_.conn.getMetaData().getTables("", "public", null, null))
tables.list.filter(_._4.toUpperCase == "SEQUENCE").map(_._3).sorted
}
override lazy val capabilities = driver.capabilities + TestDB.plainSql + TestDB.plainSqlWide
}
def MySQL(cname: String) = new ExternalTestDB("mysql", MySQLDriver) {
// Recreating the DB is faster than dropping everything individually
override def dropUserArtifacts(implicit session: Session) = {
session.close()
cleanUpBefore()
}
/*override def dropUserArtifacts(implicit session: Session) = {
val constraints = (Q[(String, String)]+"""
select distinct constraint_name, table_name
from information_schema.key_column_usage
where referenced_table_name is not null
""").list
println("###### "+constraints)
for((c, t) <- constraints if !c.startsWith("SQL"))
(Q.u+"alter table "+driver.quoteIdentifier(t)+" drop foreign key "+driver.quoteIdentifier(c)).execute()
for(t <- getLocalTables)
(Q.u+"drop table if exists "+driver.quoteIdentifier(t)+" cascade").execute()
for(t <- getLocalSequences)
(Q.u+"drop sequence if exists "+driver.quoteIdentifier(t)+" cascade").execute()
}*/
override lazy val capabilities = driver.capabilities + TestDB.plainSql + TestDB.plainSqlWide
}
def SQLServerJTDS(cname: String) = new SQLServerDB("sqlserver")
def SQLServerSQLJDBC(cname: String) = new SQLServerDB("sqlserver-jdbc")
def MSAccess(cname: String) = new AccessDB("access")
}
class SQLiteTestDB(dburl: String, confName: String) extends TestDB(confName, SQLiteDriver) {
val url = dburl
val jdbcDriver = "org.sqlite.JDBC"
override def getLocalTables(implicit session: Session) =
super.getLocalTables.filter(s => !s.toLowerCase.contains("sqlite_"))
override def dropUserArtifacts(implicit session: Session) = {
for(t <- getLocalTables)
(Q.u+"drop table if exists "+driver.quoteIdentifier(t)).execute()
for(t <- getLocalSequences)
(Q.u+"drop sequence if exists "+driver.quoteIdentifier(t)).execute()
}
override lazy val capabilities = driver.capabilities + TestDB.plainSql
}
class AccessDB(confName: String) extends TestDB(confName, AccessDriver) {
val jdbcDriver = TestDB.get(confName, "driver").orNull
def dbName = TestDB.get(confName, "testDB").get
val dir = new File(TestDB.testDBDir)
val dbPath = dir.getAbsolutePath.replace("\\\\", "/")
lazy val emptyDBFile = TestDB.get(confName, "emptyDBFile").get
.replace("[DB]", dbName).replace("[DBPATH]", dbPath)
lazy val testDBFile = TestDB.get(confName, "testDBFile").get
.replace("[DB]", dbName).replace("[DBPATH]", dbPath)
lazy val url = TestDB.get(confName, "url").getOrElse("")
.replace("[DB]", dbName).replace("[DBPATH]", dbPath)
override def isEnabled = TestDB.isExternalEnabled(confName)
override def createDB() = Database.forURL(url, driver = jdbcDriver)
override def cleanUpBefore() {
cleanUpAfter()
TestDB.copy(new File(emptyDBFile), new File(testDBFile))
}
override def cleanUpAfter() = TestDB.deleteDBFiles(dbName)
override def dropUserArtifacts(implicit session: Session) = {
session.close()
cleanUpBefore()
}
/* Works in some situations but fails with "Optional feature not implemented" in others */
override def canGetLocalTables = false
override def getLocalTables(implicit session: Session) =
MTable.getTables.list.map(_.name.name).sorted
override lazy val capabilities = driver.capabilities + TestDB.plainSql
}
abstract class DerbyDB(confName: String) extends TestDB(confName, DerbyDriver) {
System.setProperty("derby.stream.error.method", classOf[DerbyDB].getName + ".DEV_NULL")
val jdbcDriver = "org.apache.derby.jdbc.EmbeddedDriver"
override def getLocalTables(implicit session: Session): List[String] = {
val tables = ResultSetInvoker[(String,String,String)](_.conn.getMetaData().getTables(null, "APP", null, null))
tables.list.map(_._3).sorted
}
override def dropUserArtifacts(implicit session: Session) = {
try {
try { (Q.u+"create table \\"__derby_dummy\\"(x integer primary key)").execute }
catch { case ignore: SQLException => }
val constraints = (Q[(String, String)]+"""
select c.constraintname, t.tablename
from sys.sysconstraints c, sys.sysschemas s, sys.systables t
where c.schemaid = s.schemaid and c.tableid = t.tableid and s.schemaname = 'APP'
""").list
for((c, t) <- constraints if !c.startsWith("SQL"))
(Q.u+"alter table "+driver.quoteIdentifier(t)+" drop constraint "+driver.quoteIdentifier(c)).execute()
for(t <- getLocalTables)
(Q.u+"drop table "+driver.quoteIdentifier(t)).execute()
for(t <- getLocalSequences)
(Q.u+"drop sequence "+driver.quoteIdentifier(t)).execute()
} catch {
case e: Exception =>
println("[Caught Exception while dropping user artifacts in Derby: "+e+"]")
session.close()
cleanUpBefore()
}
}
override lazy val capabilities = driver.capabilities + TestDB.plainSql
}
object DerbyDB {
val DEV_NULL = new java.io.OutputStream { def write(b: Int) {} };
}
abstract class HsqlDB(confName: String) extends TestDB(confName, HsqldbDriver) {
val jdbcDriver = "org.hsqldb.jdbcDriver"
override def getLocalTables(implicit session: Session): List[String] = {
val tables = ResultSetInvoker[(String,String,String)](_.conn.getMetaData().getTables(null, "PUBLIC", null, null))
tables.list.map(_._3).sorted
}
override def cleanUpBefore() {
// Try to turn Hsqldb logging off -- does not work :(
System.setProperty("hsqldb.reconfig_logging", "false")
Logger.getLogger("org.hsqldb.persist.Logger").setLevel(Level.OFF)
Logger.getLogger("org.hsqldb").setLevel(Level.OFF)
Logger.getLogger("hsqldb").setLevel(Level.OFF)
}
override lazy val capabilities = driver.capabilities + TestDB.plainSql
}
class SQLServerDB(confName: String) extends ExternalTestDB(confName, SQLServerDriver) {
val defaultSchema = TestDB.get(confName, "defaultSchema").getOrElse("")
override def getLocalTables(implicit session: Session): List[String] = {
val tables = ResultSetInvoker[(String,String,String)](_.conn.getMetaData().getTables(dbName, defaultSchema, null, null))
tables.list.map(_._3).sorted
}
override def dropUserArtifacts(implicit session: Session) = {
val constraints = (Q[(String, String)]+"""
select constraint_name, table_name
from information_schema.table_constraints
where constraint_type = 'FOREIGN KEY'
""").list
for((c, t) <- constraints if !c.startsWith("SQL"))
(Q.u+"alter table "+driver.quoteIdentifier(t)+" drop constraint "+driver.quoteIdentifier(c)).execute()
for(t <- getLocalTables)
(Q.u+"drop table "+driver.quoteIdentifier(t)).execute()
}
override lazy val capabilities = driver.capabilities + TestDB.plainSql
}
| zefonseca/slick-1.0.0-scala.2.11.1 | slick-testkit/src/test/scala/scala/slick/testutil/TestDBs.scala | Scala | bsd-2-clause | 10,604 |
package scalpel
import org.scalatest.FunSpec
import org.scalatest.ShouldMatchers
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class RunCaliper extends FunSpec with ShouldMatchers {
describe("run caliper") { it("runs") { port.CaliperRunner.run() } }
}
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class RunSM extends FunSpec with ShouldMatchers {
describe("run scalameter") { it("runs") { SMRunner.run(LocalBenchmark) } }
}
@org.junit.runner.RunWith(classOf[org.scalatest.junit.JUnitRunner])
class RunScalpel extends FunSpec with ShouldMatchers {
describe("run scalpel") { it("runs") { ScalpelRunner.run() } }
}
| lossyrob/scalpel | src/test/scala/scalpel/Runner.scala | Scala | bsd-3-clause | 667 |
package com.jejking.rprng.rng.actors
import akka.actor.ActorSystem
import akka.pattern.ask
import akka.testkit.{DefaultTimeout, ImplicitSender, TestActorRef, TestKit}
import akka.util.ByteString
import com.jejking.rprng.rng._
import org.scalamock.scalatest.MockFactory
import org.scalatest.BeforeAndAfterAll
import org.scalatest.concurrent.{Eventually, ScalaFutures}
import org.scalatest.flatspec.AnyFlatSpecLike
import org.scalatest.matchers.should.Matchers
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* Test of functionality around [[RngActor]].
*/
class RngActorSpec extends TestKit(ActorSystem("test")) with DefaultTimeout with ImplicitSender
with AnyFlatSpecLike with Matchers with BeforeAndAfterAll with MockFactory with Eventually with ScalaFutures {
import Protocol._
implicit override val patienceConfig = PatienceConfig(timeout = 1 second, interval = 100 milliseconds)
private val fourNotVeryRandomBytes = Array[Byte](1, 2, 3, 4)
private val eightNotVeryRandomBytes = Array[Byte](1, 2, 3, 4, 5, 6, 7, 8)
"the random byte source actor" should "respond with bytes from the wrapped byte source in response to a request" in {
val request = RandomByteRequest(4)
val mockByteSource = mock[Rng]
(mockByteSource.randomBytes _).expects(where {
(request: RandomByteRequest) => request.count == 8
}).returning(eightNotVeryRandomBytes)
(mockByteSource.reseed _).expects(*)
(mockByteSource.randomBytes _).expects(where {
(request: RandomByteRequest) => request.count == 4
}).returning(fourNotVeryRandomBytes)
val fixedSecureSeeder = stub[SecureSeeder]
(fixedSecureSeeder.generateSeed _).when().returning(Seed(0L))
val actorRef = system.actorOf(RngActor.props(mockByteSource, fixedSecureSeeder))
// send request for four "random" bytes
actorRef ! request
expectMsg(ByteString(fourNotVeryRandomBytes))
}
it should "initialise itself from a proper seed source" in {
val mockByteSource = mock[Rng]
val mockSecureSeeder = mock[SecureSeeder]
(mockSecureSeeder.generateSeed _).expects().returning(Seed(0L))
(mockByteSource.reseed _).expects(*)
(mockByteSource.randomBytes _).expects(where {
(request: RandomByteRequest) => request.count == 8
}).returning(eightNotVeryRandomBytes)
val actorRef = TestActorRef(new RngActor(mockByteSource, mockSecureSeeder))
val future = actorRef ? RandomByteRequest(4)
}
it should "schedule a message to itself to reseed" in {
val mockByteSource = mock[Rng]
(mockByteSource.reseed _).expects(*)
(mockByteSource.randomBytes _).expects(where {
(request: RandomByteRequest) => request.count == 8
}).returning(TestUtils.arrayOfEightZeroBytes())
val mockSecureSeeder = mock[SecureSeeder]
(mockSecureSeeder.generateSeed _).expects().returning(Seed(0))
val mockScheduleHelper = mock[ScheduleHelper]
// we expect that the scheduler is called to send a reseed message between min and max duration from now...
// the compiler warning emitted here is misleading as it doesn't quite get the particular combination
// of mocking and an apparently pure function
(mockScheduleHelper.scheduleOnce(_ : FiniteDuration)(_ : () => Unit)(_ : ExecutionContext)) expects (where {
(finiteDuration: FiniteDuration, *, ec: ExecutionContext) => finiteDuration === TimeRangeToReseed.defaultMinLifeTime
})
val scheduleHelperFactory: ActorSystem => ScheduleHelper = _ => mockScheduleHelper
val actorRef = TestActorRef(new RngActor(mockByteSource, mockSecureSeeder, scheduleHelperFactory))
}
it should "obtain new seed - in a way that does not block message processing" in {
val timeRange = TimeRangeToReseed(1 milliseconds, 2 milliseconds)
val mockByteSource = mock[Rng]
(mockByteSource.randomBytes _).expects(where {
(request: RandomByteRequest) => request.count == 8
}).returning(eightNotVeryRandomBytes)
(mockByteSource.reseed _).expects(Seed(0))
val mockSecureSeeder = mock[SecureSeeder]
(mockSecureSeeder.generateSeed _).expects().returning(Seed(0L)).atLeastTwice()
val actorRef = TestActorRef(new RngActor(mockByteSource, mockSecureSeeder, timeRangeToReseed = timeRange))
Thread.sleep(250) // wait for the async stuff to happen before evaluating the expectations
}
it should "apply new seed in a thread-safe way" in {
val timeRange = TimeRangeToReseed(1 milliseconds, 2 milliseconds)
val mockByteSource = mock[Rng]
(mockByteSource.randomBytes _).expects(where {
(request: RandomByteRequest) => request.count == 8
}).returning(eightNotVeryRandomBytes)
(mockByteSource.reseed _).expects(*).atLeastTwice()
val mockSecureSeeder = mock[SecureSeeder]
(mockSecureSeeder.generateSeed _).expects().returning(Seed(0L)).atLeastTwice()
val actorRef = TestActorRef(new RngActor(mockByteSource, mockSecureSeeder, timeRangeToReseed = timeRange))
Thread.sleep(250) // wait for the async stuff to happen before evaluating the expectations
}
it should "politely ignore other message types" in {
// create actor ref with byte source that wraps the fixed source
val mockByteSource = mock[Rng]
(mockByteSource.randomBytes _).expects(where {
(request: RandomByteRequest) => request.count == 8
}).returning(eightNotVeryRandomBytes)
(mockByteSource.reseed _).expects(*)
val fixedSecureSeeder = stub[SecureSeeder]
(fixedSecureSeeder.generateSeed _).when().returning(Seed(0L))
val actorRef = TestActorRef(new RngActor(mockByteSource, fixedSecureSeeder))
// send request for four "random" bytes
val future = actorRef ? "Hello"
future.value.get.get shouldBe UnknownInputType
}
override def afterAll(): Unit = {
TestKit.shutdownActorSystem(system)
}
}
| jejking/rprng | src/test/scala/com/jejking/rprng/rng/actors/RngActorSpec.scala | Scala | apache-2.0 | 5,884 |
/*
* Accio is a platform to launch computer science experiments.
* Copyright (C) 2016-2018 Vincent Primault <[email protected]>
*
* Accio is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Accio is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Accio. If not, see <http://www.gnu.org/licenses/>.
*/
package fr.cnrs.liris.accio.sdk
/**
* Trait used to help implementing an operator in Scala. Operators should be defined as case
* classes. The inputs of the operator are materialized as the parameters of this case class,
* while the outputs are materialized as the parameters of the output it produces, which itself is
* a case class.
*
* Fields of the input and output types must be annotated with the [[Arg]] annotation, while the
* operator must be annotation with the [[Op]] annotation. Input and output parameters must be
* supported by one of the registered [[fr.cnrs.liris.lumos.domain.DataType]].
*
* @tparam T Output type
*/
trait ScalaOperator[T] {
this: Product =>
/**
* Execute this operator. Within provided context, it should produce a deterministic output.
*
* Implementations can use a seed if they need some randomness. Outside of this, the execution
* should be perfectly deterministic. A working directory is provided for operators who need to
* write results somewhere. This directory is only valid for the operator's life, it can be
* deleted at any point once the operator completed.
*
* @param ctx Execution context.
*/
def execute(ctx: OpContext): T
} | privamov/accio | accio/java/fr/cnrs/liris/accio/sdk/ScalaOperator.scala | Scala | gpl-3.0 | 1,989 |
package ingraph.compiler.sql
import java.sql.Connection
import org.neo4j.driver.v1.Session
object ExportSteps {
private val exportVertex = new ExportStep(
"""// vertex
|MATCH (n)
|RETURN id(n) AS vertex_id""".stripMargin, "vertex")
private val exportEdge = new ExportStep(
"""// edge
|MATCH (from)-[edge]->(to)
|RETURN id(edge) AS edge_id, id(from) AS from, id(to) AS to, type(edge) AS type""".stripMargin, "edge")
private val exportLabel = new ExportStep(
"""// label
|MATCH (n)
|UNWIND labels(n) AS name
|RETURN id(n) AS parent, name""".stripMargin, "label")
private val exportVertex_property = new ExportStep(
"""// vertex_property
|MATCH (n)
|UNWIND keys(n) AS key
|RETURN id(n) AS parent, key, properties(n)[key] AS value""".stripMargin, "vertex_property")
private val exportEdge_property = new ExportStep(
"""// edge_property
|MATCH ()-[e]->()
|UNWIND keys(e) AS key
|RETURN id(e) AS parent, key, properties(e)[key] AS value""".stripMargin, "edge_property")
private val steps = Array(exportVertex, exportEdge, exportLabel, exportVertex_property, exportEdge_property)
def execute(cypherSession: Session, sqlConnection: Connection): Unit = {
for (step <- steps)
step.exportToTable(cypherSession, sqlConnection)
}
}
| FTSRG/ingraph | cypher-to-sql/src/main/scala/ingraph/compiler/sql/ExportSteps.scala | Scala | epl-1.0 | 1,352 |
package xitrum.validator
object RangeLength {
def apply(min: Int, max: Int) = new RangeLength(min, max)
}
class RangeLength(min: Int, max: Int) extends Validator[String] {
def check(value: String) = {
val length = value.length
min <= length && length <= max
}
def message(name: String, value: String) = {
val length = value.length
if (min <= length && length <= max)
None
else
Some("%s must be at least %d and at most %d characters".format(name, min, max))
}
}
| caiiiycuk/xitrum | src/main/scala/xitrum/validator/RangeLength.scala | Scala | mit | 506 |
package progscala2.fp.categories
import org.scalatest.FunSpec
import org.scalatest.prop.PropertyChecks
/**
* Created by younggi on 1/24/17.
*/
class MonadProperties extends FunSpec with PropertyChecks {
// Arbitrary function:
val f1: Int => Seq[Int] = i => 0 until 10 by ((math.abs(i) % 10) + 1)
describe ("Monad law for unit") {
it ("works for Sequence Monads") {
import SeqM._
val unitInt: Int => Seq[Int] = (i: Int) => unit(i)
// Property based testing (default: 100 valid value, but 10?)
forAll { (i: Int) =>
println(i)
val seq: Seq[Int] = Seq(i)
assert( flatMap(unit(i))(f1) === f1(i) )
assert( flatMap(seq)(unitInt) === seq )
}
}
}
describe ("Monad law for function composition") {
it ("works for Sequence Monads") {
val f2: Int => Seq[Int] = i => Seq(i+1)
import SeqM._
forAll { (i: Int) =>
val seq = Seq(i)
assert( flatMap(flatMap(seq)(f1))(f2) ===
flatMap(seq)(x => flatMap(f1(x))(f2)) )
}
}
}
} | younggi/books | programming_scala/progscala2/src/test/scala/progscala2/fp/categories/MonadProperties.scala | Scala | mit | 1,052 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import org.scalatest.BeforeAndAfterEach
import org.apache.spark.sql.catalyst.plans.logical.Expand
import org.apache.spark.sql.functions._
import org.apache.spark.sql.test.SharedSparkSession
import org.apache.spark.sql.types.StringType
class DataFrameTimeWindowingSuite extends QueryTest with SharedSparkSession {
import testImplicits._
test("simple tumbling window with record at window start") {
val df = Seq(
("2016-03-27 19:39:30", 1, "a")).toDF("time", "value", "id")
checkAnswer(
df.groupBy(window($"time", "10 seconds"))
.agg(count("*").as("counts"))
.orderBy($"window.start".asc)
.select($"window.start".cast("string"), $"window.end".cast("string"), $"counts"),
Seq(
Row("2016-03-27 19:39:30", "2016-03-27 19:39:40", 1)
)
)
}
test("SPARK-21590: tumbling window using negative start time") {
val df = Seq(
("2016-03-27 19:39:30", 1, "a"),
("2016-03-27 19:39:25", 2, "a")).toDF("time", "value", "id")
checkAnswer(
df.groupBy(window($"time", "10 seconds", "10 seconds", "-5 seconds"))
.agg(count("*").as("counts"))
.orderBy($"window.start".asc)
.select($"window.start".cast("string"), $"window.end".cast("string"), $"counts"),
Seq(
Row("2016-03-27 19:39:25", "2016-03-27 19:39:35", 2)
)
)
}
test("tumbling window groupBy statement") {
val df = Seq(
("2016-03-27 19:39:34", 1, "a"),
("2016-03-27 19:39:56", 2, "a"),
("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
checkAnswer(
df.groupBy(window($"time", "10 seconds"))
.agg(count("*").as("counts"))
.orderBy($"window.start".asc)
.select("counts"),
Seq(Row(1), Row(1), Row(1))
)
}
test("tumbling window groupBy statement with startTime") {
val df = Seq(
("2016-03-27 19:39:34", 1, "a"),
("2016-03-27 19:39:56", 2, "a"),
("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
checkAnswer(
df.groupBy(window($"time", "10 seconds", "10 seconds", "5 seconds"), $"id")
.agg(count("*").as("counts"))
.orderBy($"window.start".asc)
.select("counts"),
Seq(Row(1), Row(1), Row(1)))
}
test("SPARK-21590: tumbling window groupBy statement with negative startTime") {
val df = Seq(
("2016-03-27 19:39:34", 1, "a"),
("2016-03-27 19:39:56", 2, "a"),
("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
checkAnswer(
df.groupBy(window($"time", "10 seconds", "10 seconds", "-5 seconds"), $"id")
.agg(count("*").as("counts"))
.orderBy($"window.start".asc)
.select("counts"),
Seq(Row(1), Row(1), Row(1)))
}
test("tumbling window with multi-column projection") {
val df = Seq(
("2016-03-27 19:39:34", 1, "a"),
("2016-03-27 19:39:56", 2, "a"),
("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
.select(window($"time", "10 seconds"), $"value")
.orderBy($"window.start".asc)
.select($"window.start".cast("string"), $"window.end".cast("string"), $"value")
val expands = df.queryExecution.optimizedPlan.find(_.isInstanceOf[Expand])
assert(expands.isEmpty, "Tumbling windows shouldn't require expand")
checkAnswer(
df,
Seq(
Row("2016-03-27 19:39:20", "2016-03-27 19:39:30", 4),
Row("2016-03-27 19:39:30", "2016-03-27 19:39:40", 1),
Row("2016-03-27 19:39:50", "2016-03-27 19:40:00", 2)
)
)
}
test("sliding window grouping") {
val df = Seq(
("2016-03-27 19:39:34", 1, "a"),
("2016-03-27 19:39:56", 2, "a"),
("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
checkAnswer(
df.groupBy(window($"time", "10 seconds", "3 seconds", "0 second"))
.agg(count("*").as("counts"))
.orderBy($"window.start".asc)
.select($"window.start".cast("string"), $"window.end".cast("string"), $"counts"),
// 2016-03-27 19:39:27 UTC -> 4 bins
// 2016-03-27 19:39:34 UTC -> 3 bins
// 2016-03-27 19:39:56 UTC -> 3 bins
Seq(
Row("2016-03-27 19:39:18", "2016-03-27 19:39:28", 1),
Row("2016-03-27 19:39:21", "2016-03-27 19:39:31", 1),
Row("2016-03-27 19:39:24", "2016-03-27 19:39:34", 1),
Row("2016-03-27 19:39:27", "2016-03-27 19:39:37", 2),
Row("2016-03-27 19:39:30", "2016-03-27 19:39:40", 1),
Row("2016-03-27 19:39:33", "2016-03-27 19:39:43", 1),
Row("2016-03-27 19:39:48", "2016-03-27 19:39:58", 1),
Row("2016-03-27 19:39:51", "2016-03-27 19:40:01", 1),
Row("2016-03-27 19:39:54", "2016-03-27 19:40:04", 1))
)
}
test("sliding window projection") {
val df = Seq(
("2016-03-27 19:39:34", 1, "a"),
("2016-03-27 19:39:56", 2, "a"),
("2016-03-27 19:39:27", 4, "b")).toDF("time", "value", "id")
.select(window($"time", "10 seconds", "3 seconds", "0 second"), $"value")
.orderBy($"window.start".asc, $"value".desc).select("value")
val expands = df.queryExecution.optimizedPlan.find(_.isInstanceOf[Expand])
assert(expands.nonEmpty, "Sliding windows require expand")
checkAnswer(
df,
// 2016-03-27 19:39:27 UTC -> 4 bins
// 2016-03-27 19:39:34 UTC -> 3 bins
// 2016-03-27 19:39:56 UTC -> 3 bins
Seq(Row(4), Row(4), Row(4), Row(4), Row(1), Row(1), Row(1), Row(2), Row(2), Row(2))
)
}
test("windowing combined with explode expression") {
val df = Seq(
("2016-03-27 19:39:34", 1, Seq("a", "b")),
("2016-03-27 19:39:56", 2, Seq("a", "c", "d"))).toDF("time", "value", "ids")
checkAnswer(
df.select(window($"time", "10 seconds"), $"value", explode($"ids"))
.orderBy($"window.start".asc).select("value"),
// first window exploded to two rows for "a", and "b", second window exploded to 3 rows
Seq(Row(1), Row(1), Row(2), Row(2), Row(2))
)
}
test("null timestamps") {
val df = Seq(
("2016-03-27 09:00:05", 1),
("2016-03-27 09:00:32", 2),
(null, 3),
(null, 4)).toDF("time", "value")
checkDataset(
df.select(window($"time", "10 seconds"), $"value")
.orderBy($"window.start".asc)
.select("value")
.as[Int],
1, 2) // null columns are dropped
}
test("time window joins") {
val df = Seq(
("2016-03-27 09:00:05", 1),
("2016-03-27 09:00:32", 2),
(null, 3),
(null, 4)).toDF("time", "value")
val df2 = Seq(
("2016-03-27 09:00:02", 3),
("2016-03-27 09:00:35", 6)).toDF("time", "othervalue")
checkAnswer(
df.select(window($"time", "10 seconds"), $"value").join(
df2.select(window($"time", "10 seconds"), $"othervalue"), Seq("window"))
.groupBy("window")
.agg((sum("value") + sum("othervalue")).as("total"))
.orderBy($"window.start".asc).select("total"),
Seq(Row(4), Row(8)))
}
test("negative timestamps") {
val df4 = Seq(
("1970-01-01 00:00:02", 1),
("1970-01-01 00:00:12", 2)).toDF("time", "value")
checkAnswer(
df4.select(window($"time", "10 seconds", "10 seconds", "5 seconds"), $"value")
.orderBy($"window.start".asc)
.select($"window.start".cast(StringType), $"window.end".cast(StringType), $"value"),
Seq(
Row("1969-12-31 23:59:55", "1970-01-01 00:00:05", 1),
Row("1970-01-01 00:00:05", "1970-01-01 00:00:15", 2))
)
}
test("multiple time windows in a single operator throws nice exception") {
val df = Seq(
("2016-03-27 09:00:02", 3),
("2016-03-27 09:00:35", 6)).toDF("time", "value")
val e = intercept[AnalysisException] {
df.select(window($"time", "10 second"), window($"time", "15 second")).collect()
}
assert(e.getMessage.contains(
"Multiple time window expressions would result in a cartesian product"))
}
test("aliased windows") {
val df = Seq(
("2016-03-27 19:39:34", 1, Seq("a", "b")),
("2016-03-27 19:39:56", 2, Seq("a", "c", "d"))).toDF("time", "value", "ids")
checkAnswer(
df.select(window($"time", "10 seconds").as("time_window"), $"value")
.orderBy($"time_window.start".asc)
.select("value"),
Seq(Row(1), Row(2))
)
}
test("millisecond precision sliding windows") {
val df = Seq(
("2016-03-27 09:00:00.41", 3),
("2016-03-27 09:00:00.62", 6),
("2016-03-27 09:00:00.715", 8)).toDF("time", "value")
checkAnswer(
df.groupBy(window($"time", "200 milliseconds", "40 milliseconds", "0 milliseconds"))
.agg(count("*").as("counts"))
.orderBy($"window.start".asc)
.select($"window.start".cast(StringType), $"window.end".cast(StringType), $"counts"),
Seq(
Row("2016-03-27 09:00:00.24", "2016-03-27 09:00:00.44", 1),
Row("2016-03-27 09:00:00.28", "2016-03-27 09:00:00.48", 1),
Row("2016-03-27 09:00:00.32", "2016-03-27 09:00:00.52", 1),
Row("2016-03-27 09:00:00.36", "2016-03-27 09:00:00.56", 1),
Row("2016-03-27 09:00:00.4", "2016-03-27 09:00:00.6", 1),
Row("2016-03-27 09:00:00.44", "2016-03-27 09:00:00.64", 1),
Row("2016-03-27 09:00:00.48", "2016-03-27 09:00:00.68", 1),
Row("2016-03-27 09:00:00.52", "2016-03-27 09:00:00.72", 2),
Row("2016-03-27 09:00:00.56", "2016-03-27 09:00:00.76", 2),
Row("2016-03-27 09:00:00.6", "2016-03-27 09:00:00.8", 2),
Row("2016-03-27 09:00:00.64", "2016-03-27 09:00:00.84", 1),
Row("2016-03-27 09:00:00.68", "2016-03-27 09:00:00.88", 1))
)
}
private def withTempTable(f: String => Unit): Unit = {
val tableName = "temp"
Seq(
("2016-03-27 19:39:34", 1),
("2016-03-27 19:39:56", 2),
("2016-03-27 19:39:27", 4)).toDF("time", "value").createOrReplaceTempView(tableName)
try {
f(tableName)
} finally {
spark.catalog.dropTempView(tableName)
}
}
test("time window in SQL with single string expression") {
withTempTable { table =>
checkAnswer(
spark.sql(s"""select window(time, "10 seconds"), value from $table""")
.select($"window.start".cast(StringType), $"window.end".cast(StringType), $"value"),
Seq(
Row("2016-03-27 19:39:20", "2016-03-27 19:39:30", 4),
Row("2016-03-27 19:39:30", "2016-03-27 19:39:40", 1),
Row("2016-03-27 19:39:50", "2016-03-27 19:40:00", 2)
)
)
}
}
test("time window in SQL with two expressions") {
withTempTable { table =>
checkAnswer(
spark.sql(
s"""select window(time, "10 seconds", 10000000), value from $table""")
.select($"window.start".cast(StringType), $"window.end".cast(StringType), $"value"),
Seq(
Row("2016-03-27 19:39:20", "2016-03-27 19:39:30", 4),
Row("2016-03-27 19:39:30", "2016-03-27 19:39:40", 1),
Row("2016-03-27 19:39:50", "2016-03-27 19:40:00", 2)
)
)
}
}
test("time window in SQL with three expressions") {
withTempTable { table =>
checkAnswer(
spark.sql(
s"""select window(time, "10 seconds", 10000000, "5 seconds"), value from $table""")
.select($"window.start".cast(StringType), $"window.end".cast(StringType), $"value"),
Seq(
Row("2016-03-27 19:39:25", "2016-03-27 19:39:35", 1),
Row("2016-03-27 19:39:25", "2016-03-27 19:39:35", 4),
Row("2016-03-27 19:39:55", "2016-03-27 19:40:05", 2)
)
)
}
}
test("SPARK-21590: time window in SQL with three expressions including negative start time") {
withTempTable { table =>
checkAnswer(
spark.sql(
s"""select window(time, "10 seconds", 10000000, "-5 seconds"), value from $table""")
.select($"window.start".cast(StringType), $"window.end".cast(StringType), $"value"),
Seq(
Row("2016-03-27 19:39:25", "2016-03-27 19:39:35", 1),
Row("2016-03-27 19:39:25", "2016-03-27 19:39:35", 4),
Row("2016-03-27 19:39:55", "2016-03-27 19:40:05", 2)
)
)
}
}
}
| pgandhi999/spark | sql/core/src/test/scala/org/apache/spark/sql/DataFrameTimeWindowingSuite.scala | Scala | apache-2.0 | 12,965 |
package com.arcusys.valamis.certificate.service
import java.security.MessageDigest
import com.arcusys.learn.liferay.LiferayClasses._
import com.arcusys.learn.liferay.services.{PermissionHelper, UserLocalServiceHelper}
import com.arcusys.valamis.certificate.model.CertificateSortBy.CertificateSortBy
import com.arcusys.valamis.certificate.model.badge._
import com.arcusys.valamis.certificate.model._
import com.arcusys.valamis.certificate.service.util.OpenBadgesHelper
import com.arcusys.valamis.certificate.storage.{CertificateStateRepository, CertificateRepository}
import com.arcusys.valamis.lrs.api.StatementApi
import com.arcusys.valamis.settings.model
import com.arcusys.valamis.settings.model.SettingType
import com.arcusys.valamis.settings.storage.SettingStorage
import com.arcusys.valamis.user.service.UserService
import com.arcusys.valamis.util.HexHelper
import com.escalatesoft.subcut.inject.Injectable
import org.joda.time.DateTime
import org.joda.time.format.ISODateTimeFormat
//TODO refactor, move badge client code to BadgeClient class
trait CertificateUserServiceImpl extends Injectable with CertificateService {
private lazy val userLocalServiceHelper = inject[UserLocalServiceHelper]
private lazy val certificateRepository = inject[CertificateRepository]
private lazy val certificateToUserRepository = inject[CertificateStateRepository]
private lazy val userService = inject[UserService]
private lazy val settingStorage = inject[SettingStorage]
private lazy val checker = inject[CertificateStatusChecker]
//new CertificateStatusChecker(bindingModule)
def addUser(certificateId: Int, userId: Int) = {
val certificate = certificateRepository.getById(certificateId)
val user = userService.byId(userId)
certificateToUserRepository.create(CertificateState(user.getUserId, CertificateStatus.InProgress, DateTime.now, DateTime.now, certificate.id))//create(certificate, (DateTime.now, user.getUserId))
}
def deleteUser(certificateId: Int, userId: Int) = {
val certificate = certificateRepository.getById(certificateId)
val user = userService.byId(userId)
certificateToUserRepository.delete(user.getUserId, certificate.id)
}
def getForUser(companyId: Int, skip: Int, take: Int, filter: String, sortAZ: Boolean,
userId: Int, isOnlyPublished: Boolean): Seq[Certificate] = {
var certificates = getForUser(userId).filter(c => if (isOnlyPublished) c.isPublished else true)
certificates = filtering(certificates, filter)
.sortBy(_.title.toLowerCase)
if (!sortAZ) certificates = certificates.reverse
if (skip < 0)
certificates
else
certificates.drop(skip).take(take)
}
def forUserCount(companyId: Int, filter: String, userId: Int, isOnlyPublished: Boolean): Int = {
val certificates = getForUser(userId).filter(c => if (isOnlyPublished) c.isPublished else true)
val filteredCertificates = filtering(certificates, filter)
filteredCertificates.length
}
private def filtering(certificates: Seq[Certificate], titleFilter: String) = {
if (titleFilter.isEmpty)
certificates
else {
certificates.filter(i => i.title.toLowerCase.contains(titleFilter.toLowerCase))
}
}
def availableForUserCount(companyId: Int, userId: Int, filter: String, isOnlyPublished: Boolean, scope: Option[Long]): Int = {
val certificates = scope match {
case Some(value) => getAvailableForUser(companyId, userId, isOnlyPublished)
.filter(x => x.scope.isDefined)
.filter(x => x.scope.get == value)
case None => getAvailableForUser(companyId, userId, isOnlyPublished)
}
val filteredCertificates = filtering(certificates, filter)
filteredCertificates.length
}
def getForUserWithStatus(companyId: Int, skip: Int, take: Int, filter: String, sortAZ: Boolean,
userId: Int, isOnlyPublished: Boolean): Seq[Certificate] = {
var certificates = getForUser(userId).filter(c => if (isOnlyPublished) c.isPublished else true)
certificates = filtering(certificates, filter)
.sortBy(_.title.toLowerCase)
if (!sortAZ) certificates = certificates.reverse
if (skip < 0)
certificates
else
certificates.drop(skip).take(take)
}
def getForUser(userId: Int): Seq[Certificate] =
certificateRepository.getByIds(certificateToUserRepository.getBy(CertificateStateFilter(userId = Some(userId))).map(_.certificateId).toSet)
private def getAvailableForUser(companyId: Int, userId: Int, isOnlyPublished: Boolean): Seq[Certificate] = {
val usersCertificates = getForUser(userId)
val all = certificateRepository.getBy(companyId = companyId)
.filter(c => if (isOnlyPublished) c.isPublished else true)
all.filter(certificate => !usersCertificates.exists(c => c.id == certificate.id)).toSeq
}
def getAvailableForUser(companyId: Int, skip: Int, take: Int, filter: String, sortAZ: Boolean, userId: Int,
isOnlyPublished: Boolean, scope: Option[Long]): Seq[Certificate] = {
var certificates = scope match {
case Some(value) => getAvailableForUser(companyId, userId, isOnlyPublished)
.filter(x => x.scope.isDefined)
.filter(x => x.scope.get == value)
case None => getAvailableForUser(companyId, userId, isOnlyPublished)
}
certificates = filtering(certificates, filter)
.sortBy(_.title.toLowerCase)
if (!sortAZ) certificates = certificates.reverse
if (skip < 0)
certificates
else
certificates.drop(skip).take(take)
}
def getJoinedUsers(certificateId: Int, filterName: String, orgId: Int, sortBy: CertificateSortBy,
sortAscDirection: Boolean, skip: Int, take: Int): Iterable[(String, LUser)] = {
val certificate = certificateRepository.getById(certificateId)
val certificateStudents = certificateToUserRepository.getBy(CertificateStateFilter(certificateId = Some(certificateId)))
val studentUserIds = certificateStudents.map(_.userId)
val formatter = ISODateTimeFormat.dateTime()
var users = userService
.all(certificate.companyId)
.filter(user => studentUserIds.contains(user.getUserId.toInt))
.filter(user => if (orgId != -1) user.getOrganizationIds.contains(orgId) else true)
.filter(user => {
if (filterName != "")
user.getFullName.toLowerCase.contains(filterName.toLowerCase)
else
true
})
.map(user => {
PermissionHelper.preparePermissionChecker(user)
(formatter.print(certificateStudents.find(v => v.userId == user.getUserId).head.userJoinedDate), user)
})
.sortBy(u => sortBy match {
case CertificateSortBy.UserJoined => u._1
case _ => u._2.getFullName
})
if (!sortAscDirection) users = users.reverse
if (skip < 0) {
users
} else {
users.drop(skip).take(take)
}
}
def getJoinedUsersCount(certificateId: Int, filterName: String, orgId: Int): Int = {
val certificate = certificateRepository.getById(certificateId)
val certificateStudents = certificateToUserRepository.getBy(CertificateStateFilter(certificateId = Some(certificateId)))
val studentUserIds = certificateStudents.map(student => student.userId)
userService
.all(certificate.companyId)
.filter(user => studentUserIds.contains(user.getUserId.toInt))
.filter(user => if (orgId != -1) user.getOrganizationIds.contains(orgId) else true).count(user =>
if (filterName != "")
user.getFullName.toLowerCase.contains(filterName.toLowerCase)
else
true)
}
def getFreeStudents(certificateId: Int, filterName: String, orgId: Int, sortBy: CertificateSortBy,
sortAscDirection: Boolean, skip: Int, take: Int): Iterable[LUser] = {
val certificate = certificateRepository.getById(certificateId)
val certificateStudents = certificateToUserRepository.getBy(CertificateStateFilter(certificateId = Some(certificateId)))
val studentUserIds = certificateStudents.map(student => student.userId)
var users = userService
.all(certificate.companyId)
.filter(user => !studentUserIds.contains(user.getUserId.toInt))
.filter(user => if (orgId != -1) user.getOrganizationIds.contains(orgId) else true)
.filter(user => !user.getFullName.isEmpty)
users = users.filter(user => {
if (filterName != "")
user.getFullName.toLowerCase.contains(filterName.toLowerCase)
else
true
})
if (!sortAscDirection) users = users.reverse
users.drop(skip).take(take)
}
def getFreeStudentsCount(certificateId: Int, orgId: Int, filterName: String): Int = {
val certificate = certificateRepository.getById(certificateId)
val certificateStudents = certificateToUserRepository.getBy(CertificateStateFilter(certificateId = Some(certificateId)))
val studentUserIds = certificateStudents.map(student => student.userId)
var users = userService
.all(certificate.companyId)
.filter(user => !studentUserIds.contains(user.getUserId.toInt))
.filter(user => if (orgId != -1) user.getOrganizationIds.contains(orgId) else true)
users = users.filter(user => {
if (filterName != "")
user.getFullName.toLowerCase.contains(filterName.toLowerCase)
else
true
})
users.count(p => true)
}
def getCertificatesByUserWithOpenBadges(statementApi: StatementApi, companyId: Int, skip: Int, take: Int, filter: String, sortAZ: Boolean,
userId: Int, isOnlyPublished: Boolean): Seq[Certificate] = {
var certificates = getCertificatesByUserWithOpenBadges(statementApi, companyId, userId, isOnlyPublished)
if (!filter.isEmpty)
certificates = certificates.filter(i => i.title.toLowerCase.contains(filter.toLowerCase))
if (!sortAZ)
certificates = certificates.reverse
certificates.drop(skip).take(take)
}
def getCertificatesByUserWithOpenBadges(statementApi: StatementApi, companyId: Int, userId: Int, isOnlyPublished: Boolean): Seq[Certificate] = {
val all = getForUser(userId)
.filter(c => if (isOnlyPublished) c.isPublished else true)
.filter(c => checker.getStatus(statementApi, c.id, userId) == CertificateStatus.Success)
val allSortedAZ = all.sortBy(_.title.toLowerCase)
val userEmail = userService.byId(userId).getEmailAddress
val openbadges = OpenBadgesHelper.getOpenBadges(userEmail)
.map(x => Certificate(id = -1, title = x("title").toString, description = x("description").toString, logo = x("logo").toString, companyId = companyId, createdAt = DateTime.now))
.filter(p => !allSortedAZ.exists(c => c.title == p.title))
allSortedAZ ++ openbadges
}
def getCertificatesCountByUserWithOpenBadges(statementApi: StatementApi, companyId: Int, filter: String, userId: Int, isOnlyPublished: Boolean): Int = {
val all = getCertificatesByUserWithOpenBadges(statementApi, companyId, userId, isOnlyPublished)
val allFiltered = if (filter.isEmpty)
all
else
all.filter(i => i.title.toLowerCase.contains(filter.toLowerCase))
allFiltered.length
}
def getIssuerBadge(certificateId: Int, liferayUserId: Int, rootUrl: String): BadgeResponse = {
val recipient = "sha256$" + hashEmail(userLocalServiceHelper.getUser(liferayUserId).getEmailAddress)
val issueOn = DateTime.now.toString("yyyy-MM-dd")
val identity = IdentityModel(recipient)
val badgeUrl = "%s/delegate/certificates/%s?action=GETBADGEMODEL&userID=%s&rootUrl=%s".format(
rootUrl,
certificateId,
liferayUserId,
rootUrl)
val verificationUrl = "%s/delegate/certificates/%s?action=GETISSUEBADGE&userID=%s&rootUrl=%s".format(
rootUrl,
certificateId,
liferayUserId,
rootUrl)
val verification = VerificationModel(url = verificationUrl)
BadgeResponse(certificateId.toString, identity, badgeUrl, verification, issueOn)
}
private def hashEmail(email: String) = {
val md = MessageDigest.getInstance("SHA-256")
md.update(email.getBytes)
HexHelper().toHexString(md.digest())
}
def getBadgeModel(certificateId: Int, rootUrl: String): BadgeModel = {
val certificate = certificateRepository.getById(certificateId)
val name = certificate.title.replaceAll("%20", " ")
val imageUrl = if (certificate.logo == "")
"%s/learn-portlet/img/certificate-default.jpg".format(rootUrl)
else
"%s/delegate/files/images?folderId=%s&file=%s".format(rootUrl, certificate.id, certificate.logo)
val description = certificate.shortDescription.replaceAll("%20", " ")
val issuerUrl = "%s/delegate/certificates/%s?action=GETISSUERMODEL&rootUrl=%s".format(
rootUrl,
certificateId,
rootUrl)
BadgeModel(name, description, imageUrl, rootUrl, issuerUrl)
}
def getIssuerModel(rootUrl: String): IssuerModel = {
val issuerName = settingStorage
.getByKey(SettingType.IssuerName)
.getOrElse(model.EmptySetting(SettingType.IssuerName))
.value
val issuerUrl = settingStorage.getByKey(SettingType.IssuerURL)
.getOrElse(model.EmptySetting(SettingType.IssuerURL, rootUrl))
.value
val issuerEmail = settingStorage.getByKey(SettingType.IssuerEmail)
.getOrElse(model.EmptySetting(SettingType.IssuerEmail))
.value
IssuerModel(issuerName, issuerUrl, issuerEmail)
}
def getUsers(c: Certificate) = certificateToUserRepository.getBy(CertificateStateFilter(certificateId = Some(c.id))).map(p => (p.userJoinedDate, UserLocalServiceHelper().getUser(p.userId)))
def getUsersCount(c: Certificate) = certificateToUserRepository.getBy(CertificateStateFilter(certificateId = Some(c.id))).size
}
| ViLPy/Valamis | valamis-certificate/src/main/scala/com/arcusys/valamis/certificate/service/CertificateUserServiceImpl.scala | Scala | lgpl-3.0 | 13,661 |
/**
* FILE: HarvestResourceTest.scala
* PERCORSO /Codice/sgad/servertier/src/test/scala/sgad/servertier/servertier/businesslogic/operations
* DATA CREAZIONE: 25 Febbraio 2014
* AUTORE: ProTech
* EMAIL: [email protected]
*
* Questo file è proprietà del gruppo ProTech, viene rilasciato sotto licenza Apache v2.
*
* DIARIO DELLE MODIFICHE:
* 2014-02-25 - Creazione della classe - Segantin Fabio
*/
import org.scalatest.FlatSpec
import scala.collection.mutable
import sgad.servertier.businesslogic.operations.HarvestResource
import sgad.servertier.dataaccess.data.shareddata._
import sgad.servertier.dataaccess.data.userdata._
/**
* Classe di Test per HarvestResource.
*/
class HarvestResourceTest extends FlatSpec {
var authentication = new AuthenticationData("uno", "due", "tre")
//Creo due uniPossession e la mappa per costruire un userData
val gold = new Resource("oro")
val chap = new `Unit`("fante", 15, 15, new Cost(10, Vector(new QuantityResource(gold, 200))), true)
val horse = new `Unit`("cavallo", 15, 15, new Cost(20, Vector(new QuantityResource(gold, 200))), true)
val chapUp = new UnitPossession(10, chap)
val knightUp = new UnitPossession(15, horse)
val mapUnitPossession = Map(chapUp.getKey -> chapUp, knightUp.getKey -> knightUp)
//Creo una buildingpossession e la mappa per creare l'userdata
var timeNow = System.currentTimeMillis / 1000L
val quantityResource = Array[QuantityResource]()
var preconditions = Vector[BuildingWithLevel]()
var bonus = new Bonus("bonus1", 2, 3)
var potion = new Resource("pozione")
var quantityResourceVector = Vector(new QuantityResource(gold, 100), new QuantityResource(potion, 300))
var cost = new Cost(4, quantityResourceVector)
var productedResource1 = new ProductedResource(gold, 10, 100, 200)
var productedUnit1 = Vector[`Unit`]()
var productedUnit2 = new `Unit`("fante", 1, 3, new Cost(1, Vector(new QuantityResource(gold, 200))), true)
var buildingWithLevel1 = new BuildingWithLevel(true, bonus, cost, 2, "Torre", preconditions, null, productedUnit1, 2, false)
var buildingWithLevel2 = new BuildingWithLevel(true, bonus, cost, 2, "Miniera", preconditions, productedResource1, productedUnit1, 2, false)
var position = new Position(3, 8)
var position2 = new Position(1, 12)
var unitInProgress = new UnitInProgress(productedUnit2, timeNow, 500)
val buildingPossession1 = new BuildingPossession(buildingWithLevel1, position, false, timeNow, null)
val buildingPossession2 = new BuildingPossession(buildingWithLevel1, position2, true, timeNow, unitInProgress)
val buildingPossession3 = new BuildingPossession(buildingWithLevel2, position2, true, timeNow, null)
val mapBuildingPossession = mutable.HashMap(buildingPossession1.getKey -> buildingPossession1, buildingPossession2.getKey -> buildingPossession2, buildingPossession3.getKey -> buildingPossession3)
//Creo la mappa di possessione risorse per l'utente
val ownedPotions = new OwnedResource(potion, 100)
val ownedGold = new OwnedResource(gold, 200)
val mapResourcePoss: Map[String, OwnedResource] = Map(ownedGold.getKey -> ownedGold, ownedPotions.getKey -> ownedPotions)
val userData = new UserData(authentication, mapResourcePoss, mapBuildingPossession, mapUnitPossession)
"HarversResource" must "ritornare vero aggiornando le risorse se si può raccogliere" in {
val harvestRes1 = new HarvestResource
val past = (System.currentTimeMillis() / 1000L) - timeNow
if (past < 10) {
Thread.sleep((10 - past) * 1000)
}
assert(harvestRes1.execute(userData, "{key:" + buildingPossession3.getKey + ",authentication:471038b0dc530621b038559a5b840942b10b7623}", userAuthorization = true) contains "result:true")
assert(userData.getOwnedResource("oro").getQuantity == 200 + 100 * (((System.currentTimeMillis() / 1000L) - timeNow) / 10).toInt)
}
it must "ritornare falso se l'edificio non produce o se non è ancora costruito" in {
val harvestRes2 = new HarvestResource
assert(harvestRes2.execute(userData, "{key:" + buildingPossession1.getKey + ",authentication:471038b0dc530621b038559a5b840942b10b7623}", userAuthorization = true) == "{data:false, precondition:false, messages:[]}")
assert(harvestRes2.execute(userData, "{key:" + buildingPossession2.getKey + ",authentication:471038b0dc530621b038559a5b840942b10b7623}", userAuthorization = true) == "{data:false, precondition:false, messages:[]}")
}
it must "ritornare falso se non si riesce ad autenticare o ci sono dati sbagliati" in {
val harvestRes2 = new HarvestResource
assert(harvestRes2.execute(userData, "{key:pippo,authentication:471038b0dc530621b038559a5b840942b10b7623}", userAuthorization = true) == "{data:false, exception:true}")
assert(harvestRes2.execute(userData, "{key:" + buildingPossession1.getKey + ",authentication:42}", userAuthorization = true) == "{data:false, authentication:false}")
}
it must "raccogliere al massimo la quantità massima" in {
val harvestRes3 = new HarvestResource
userData.getOwnedResource("oro").setQuantity(0)
Thread.sleep(30000)
assert(harvestRes3.execute(userData, "{key:" + buildingPossession3.getKey + ",authentication:471038b0dc530621b038559a5b840942b10b7623}", userAuthorization = true) contains "result:true")
assert(userData.getOwnedResource("oro").getQuantity == 200)
assert(harvestRes3.execute(userData, "{key:" + buildingPossession3.getKey + ",authentication:471038b0dc530621b038559a5b840942b10b7623}", userAuthorization = true) contains "result:true")
assert(userData.getOwnedResource("oro").getQuantity < 250)
}
} | protechunipd/SGAD | Codice/sgad/servertier/src/test/scala/sgad/servertier/businesslogic/operations/HarvestResourceTest.scala | Scala | apache-2.0 | 5,524 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.hortonworks.spark.sql.hive.llap.streaming.examples
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
/*
* A Hive Streaming example to ingest data from socket and push into hive table.
*
* Assumed HIVE table Schema:
* CREATE TABLE `streaming.web_sales`(
* `ws_sold_date_sk` int,
* `ws_sold_time_sk` int,
* `ws_ship_date_sk` int,
* `ws_item_sk` int,
* `ws_bill_customer_sk` int,
* `ws_bill_cdemo_sk` int,
* `ws_bill_hdemo_sk` int,
* `ws_bill_addr_sk` int,
* `ws_ship_customer_sk` int,
* `ws_ship_cdemo_sk` int,
* `ws_ship_hdemo_sk` int,
* `ws_ship_addr_sk` int,
* `ws_web_page_sk` int,
* `ws_web_site_sk` int,
* `ws_ship_mode_sk` int,
* `ws_warehouse_sk` int,
* `ws_promo_sk` int,
* `ws_order_number` int,
* `ws_quantity` int,
* `ws_wholesale_cost` float,
* `ws_list_price` float,
* `ws_sales_price` float,
* `ws_ext_discount_amt` float,
* `ws_ext_sales_price` float,
* `ws_ext_wholesale_cost` float,
* `ws_ext_list_price` float,
* `ws_ext_tax` float,
* `ws_coupon_amt` float,
* `ws_ext_ship_cost` float,
* `ws_net_paid` float,
* `ws_net_paid_inc_tax` float,
* `ws_net_paid_inc_ship` float,
* `ws_net_paid_inc_ship_tax` float,
* `ws_net_profit` float)
* PARTITIONED BY (
* `ws_sold_date` string)
* STORED AS ORC
* TBLPROPERTIES ('transactional'='true')
*/
case class Schema(ws_sold_date_sk: Int, ws_sold_time_sk: Int, ws_ship_date_sk: Int,
ws_item_sk: Int, ws_bill_customer_sk: Int, ws_bill_cdemo_sk: Int,
ws_bill_hdemo_sk: Int, ws_bill_addr_sk: Int, ws_ship_customer_sk: Int,
ws_ship_cdemo_sk: Int, ws_ship_hdemo_sk: Int, ws_ship_addr_sk: Int,
ws_web_page_sk: Int, ws_web_site_sk: Int, ws_ship_mode_sk: Int,
ws_warehouse_sk: Int, ws_promo_sk: Int, ws_order_number: Int,
ws_quantity: Int, ws_wholesale_cost: Float, ws_list_price: Float,
ws_sales_price: Float, ws_ext_discount_amt: Float, ws_ext_sales_price: Float,
ws_ext_wholesale_cost: Float, ws_ext_list_price: Float, ws_ext_tax: Float,
ws_coupon_amt: Float, ws_ext_ship_cost: Float, ws_net_paid: Float,
ws_net_paid_inc_tax: Float, ws_net_paid_inc_ship: Float,
ws_net_paid_inc_ship_tax: Float, ws_net_profit: Float, ws_sold_date: String)
object HiveStreamingExample {
def main(args: Array[String]): Unit = {
if (args.length != 3 && args.length != 5) {
// scalastyle:off println
System.err.println(s"Usage: HiveStreamingExample <socket host> <socket port>")
System.err.println(s"Usage: HiveStreamingExample " +
s"<socket host> <socket port> <database> <table>")
// scalastyle:on println
System.exit(1)
}
val host = args(0)
val port = args(1)
val metastoreUri = args(2)
val sparkConf = new SparkConf()
.set("spark.sql.streaming.checkpointLocation", "./checkpoint")
val sparkSession = SparkSession.builder()
.appName("HiveStreamingExample")
.config(sparkConf)
.enableHiveSupport()
.getOrCreate()
import sparkSession.implicits._
val socket = sparkSession.readStream
.format("socket")
.options(Map("host" -> host, "port" -> port))
.load()
.as[String]
val (dbName, tableName) = if (args.length == 5) {
(args(3), args(4))
} else {
(sparkConf.get("spark.datasource.hive.warehouse.dbname"),
sparkConf.get("spark.datasource.hive.warehouse.tablename"))
}
val writer =
socket.map { s =>
val x = s.split(",")
Schema(x(0).toInt, x(1).toInt, x(2).toInt, x(3).toInt, x(4).toInt, x(5).toInt,
x(6).toInt, x(7).toInt, x(8).toInt, x(9).toInt, x(10).toInt, x(11).toInt,
x(12).toInt, x(13).toInt, x(14).toInt, x(15).toInt, x(16).toInt, x(17).toInt,
x(18).toInt, x(19).toFloat, x(20).toFloat, x(21).toFloat, x(22).toFloat,
x(23).toFloat, x(24).toFloat, x(25).toFloat, x(26).toFloat, x(27).toFloat,
x(28).toFloat, x(29).toFloat, x(30).toFloat, x(31).toFloat, x(32).toFloat,
x(33).toFloat, x(34))
}
.writeStream
.format("com.hortonworks.spark.sql.hive.llap.streaming.HiveStreamingDataSource")
.option("metastoreUri", metastoreUri)
.option("database", dbName)
.option("table", tableName)
// before this, a new terminal that runs 'nc -l <port>' has to be started and
// csv records for web_sales table has to be pasted so that spark streaming
// can read the rows from nc and pass it on to hive data source
val query = writer.start()
query.awaitTermination()
query.stop()
sparkSession.stop()
}
} | hortonworks-spark/spark-llap | src/main/scala/com/hortonworks/spark/sql/hive/llap/streaming/examples/HiveStreamingExample.scala | Scala | apache-2.0 | 5,538 |
package converter.client.logger
import scala.annotation.elidable
import scala.annotation.elidable._
trait Logger {
/*
* Use @elidable annotation to completely exclude functions from the compiler generated byte-code based on
* the specified level. In a production build most logging functions will simply disappear with no runtime
* performance penalty.
*
* Specify level as a compiler parameter
* > scalac -Xelide-below INFO
*/
@elidable(FINEST) def trace(msg: String, e: Exception): Unit
@elidable(FINEST) def trace(msg: String): Unit
@elidable(FINE) def debug(msg: String, e: Exception): Unit
@elidable(FINE) def debug(msg: String): Unit
@elidable(INFO) def info(msg: String, e: Exception): Unit
@elidable(INFO) def info(msg: String): Unit
@elidable(WARNING) def warn(msg: String, e: Exception): Unit
@elidable(WARNING) def warn(msg: String): Unit
@elidable(SEVERE) def error(msg: String, e: Exception): Unit
@elidable(SEVERE) def error(msg: String): Unit
@elidable(SEVERE) def fatal(msg: String, e: Exception): Unit
@elidable(SEVERE) def fatal(msg: String): Unit
def enableServerLogging(url: String): Unit
def disableServerLogging(): Unit
}
object LoggerFactory {
private[logger] def createLogger(name: String) = {}
lazy val consoleAppender = new BrowserConsoleAppender
lazy val popupAppender = new PopUpAppender
/**
* Create a logger that outputs to browser console
*/
def getLogger(name: String): Logger = {
//val nativeLogger = Log4JavaScript.log4javascript.getLogger(name)
val nativeLogger = Log4JavaScript.getLogger(name)
nativeLogger.addAppender(consoleAppender)
new L4JSLogger(nativeLogger)
}
/**
* Create a logger that outputs to a separate popup window
*/
def getPopUpLogger(name: String): Logger = {
// val nativeLogger = Log4JavaScript.log4javascript.getLogger(name)
val nativeLogger = Log4JavaScript.getLogger(name)
nativeLogger.addAppender(popupAppender)
new L4JSLogger(nativeLogger)
}
}
| aparo/scalajs-converter | client/src/main/scala/converter/client/logger/LoggerFactory.scala | Scala | apache-2.0 | 2,030 |
package monocle
import monocle.macros.{GenLens, Lenses}
import org.specs2.execute.AnyValueAsResult
import org.specs2.scalaz.Spec
import shapeless.test.illTyped
class LensExample extends Spec {
"Lens for monomorphic case class fields" should {
// @Lenses generate Lens automatically in the companion object
@Lenses case class Address(streetNumber: Int, streetName: String)
@Lenses case class Person(name: String, age: Int, address: Address)
object Manual { // Lens created manually (i.e. without macro)
val _name = Lens[Person, String](_.name)(n => p => p.copy(name = n))
val _age = Lens[Person, Int](_.age)(a => p => p.copy(age = a))
val _address = Lens[Person, Address](_.address)(a => p => p.copy(address = a))
val _streetNumber = Lens[Address, Int](_.streetNumber)(n => a => a.copy(streetNumber = n))
}
object Semi { // Lens generated semi automatically using GenLens macro
val name = GenLens[Person](_.name)
val age = GenLens[Person](_.age)
val address = GenLens[Person](_.address)
val streetNumber = GenLens[Address](_.streetNumber)
}
val john = Person("John", 30, Address(126, "High Street"))
"get" in {
Manual._name.get(john) ==== "John"
Semi.name.get(john) ==== "John"
Person.name.get(john) ==== "John"
}
"set" in {
val changedJohn = john.copy(age = 45)
Manual._age.set(45)(john) ==== changedJohn
Semi.age.set(45)(john) ==== changedJohn
Person.age.set(45)(john) ==== changedJohn
}
"compose" in {
(Manual._address composeLens Manual._streetNumber).get(john) ==== 126
(Semi.address composeLens Semi.streetNumber).get(john) ==== 126
(Person.address composeLens Address.streetNumber).get(john) ==== 126
}
@Lenses("_") // this generates lenses prefixed with _ in the Cat companion object
case class Cat(age: Int)
val alpha = Cat(2)
"@Lenses takes an optional prefix string" in {
Cat._age.get(alpha) ==== 2
}
}
"Lens for polymorphic case class fields" should {
@Lenses case class Foo[A,B](q: Map[(A,B),Double], default: Double)
object Manual { // Lens created manually (i.e. without macro)
def q[A,B] = Lens((_: Foo[A,B]).q)(q => f => f.copy(q = q))
def default[A,B] = Lens((_: Foo[A,B]).default)(d => f => f.copy(default = d))
}
object Semi { // Lens generated semi automatically using GenLens macro
def q[A,B] = GenLens[Foo[A,B]](_.q)
def default[A,B] = GenLens[Foo[A,B]](_.default)
}
val candyTrade = Foo(Map[(Int,Symbol),Double]((0,'Buy) -> -3.0, (12,'Sell) -> 7), 0.0)
"get" in {
Manual.default.get(candyTrade) ==== 0.0
Semi.default.get(candyTrade) ==== 0.0
Foo.default.get(candyTrade) ==== 0.0
}
"set" in {
val changedTrade = candyTrade.copy(q = candyTrade.q.updated((0,'Buy), -2.0))
Foo.q.modify((_: Map[(Int,Symbol),Double]).updated((0,'Buy), -2.0))(candyTrade) ==== changedTrade
}
}
"Modifications through lenses are chainable" in {
@Lenses case class Point(x: Int, y: Int)
import Point._
val update = x.modify(_ + 100) compose y.set(7)
update(Point(1,2)) ==== Point(101,7)
}
"@Lenses is for case classes only" in {
new AnyValueAsResult[Unit].asResult(
illTyped("""@Lenses class C""", "Invalid annotation target: must be a case class")
)
}
}
| CapeSepias/Monocle | example/src/test/scala/monocle/LensExample.scala | Scala | mit | 3,439 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.admin
import kafka.server.DynamicConfig.Broker._
import kafka.server.KafkaConfig._
import org.apache.kafka.common.errors.{InvalidReplicaAssignmentException, InvalidReplicationFactorException, InvalidTopicException, TopicExistsException}
import org.apache.kafka.common.metrics.Quota
import org.easymock.EasyMock
import org.junit.Assert._
import org.junit.{After, Test}
import java.util.Properties
import kafka.utils._
import kafka.log._
import kafka.zk.ZooKeeperTestHarness
import kafka.utils.{Logging, TestUtils, ZkUtils}
import kafka.common.TopicAndPartition
import kafka.server.{ConfigType, KafkaConfig, KafkaServer}
import java.io.File
import java.util
import java.util.concurrent.LinkedBlockingQueue
import kafka.utils.TestUtils._
import kafka.admin.AdminUtils._
import scala.collection.{Map, Set, immutable}
import kafka.utils.CoreUtils._
import org.apache.kafka.common.TopicPartition
import scala.collection.JavaConverters._
import scala.util.Try
class AdminTest extends ZooKeeperTestHarness with Logging with RackAwareTest {
var servers: Seq[KafkaServer] = Seq()
@After
override def tearDown() {
TestUtils.shutdownServers(servers)
super.tearDown()
}
@Test
def testReplicaAssignment() {
val brokerMetadatas = (0 to 4).map(new BrokerMetadata(_, None))
// test 0 replication factor
intercept[InvalidReplicationFactorException] {
AdminUtils.assignReplicasToBrokers(brokerMetadatas, 10, 0)
}
// test wrong replication factor
intercept[InvalidReplicationFactorException] {
AdminUtils.assignReplicasToBrokers(brokerMetadatas, 10, 6)
}
// correct assignment
val expectedAssignment = Map(
0 -> List(0, 1, 2),
1 -> List(1, 2, 3),
2 -> List(2, 3, 4),
3 -> List(3, 4, 0),
4 -> List(4, 0, 1),
5 -> List(0, 2, 3),
6 -> List(1, 3, 4),
7 -> List(2, 4, 0),
8 -> List(3, 0, 1),
9 -> List(4, 1, 2))
val actualAssignment = AdminUtils.assignReplicasToBrokers(brokerMetadatas, 10, 3, 0)
assertEquals(expectedAssignment, actualAssignment)
}
@Test
def testManualReplicaAssignment() {
val brokers = List(0, 1, 2, 3, 4)
TestUtils.createBrokersInZk(zkUtils, brokers)
// duplicate brokers
intercept[InvalidReplicaAssignmentException] {
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, "test", Map(0->Seq(0,0)))
}
// inconsistent replication factor
intercept[InvalidReplicaAssignmentException] {
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, "test", Map(0->Seq(0,1), 1->Seq(0)))
}
// good assignment
val assignment = Map(0 -> List(0, 1, 2),
1 -> List(1, 2, 3))
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, "test", assignment)
val found = zkUtils.getPartitionAssignmentForTopics(Seq("test"))
assertEquals(assignment, found("test"))
}
@Test
def testTopicCreationInZK() {
val expectedReplicaAssignment = Map(
0 -> List(0, 1, 2),
1 -> List(1, 2, 3),
2 -> List(2, 3, 4),
3 -> List(3, 4, 0),
4 -> List(4, 0, 1),
5 -> List(0, 2, 3),
6 -> List(1, 3, 4),
7 -> List(2, 4, 0),
8 -> List(3, 0, 1),
9 -> List(4, 1, 2),
10 -> List(1, 2, 3),
11 -> List(1, 3, 4)
)
val leaderForPartitionMap = immutable.Map(
0 -> 0,
1 -> 1,
2 -> 2,
3 -> 3,
4 -> 4,
5 -> 0,
6 -> 1,
7 -> 2,
8 -> 3,
9 -> 4,
10 -> 1,
11 -> 1
)
val topic = "test"
TestUtils.createBrokersInZk(zkUtils, List(0, 1, 2, 3, 4))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// create leaders for all partitions
TestUtils.makeLeaderForPartition(zkUtils, topic, leaderForPartitionMap, 1)
val actualReplicaList = leaderForPartitionMap.keys.toArray.map(p => p -> zkUtils.getReplicasForPartition(topic, p)).toMap
assertEquals(expectedReplicaAssignment.size, actualReplicaList.size)
for(i <- 0 until actualReplicaList.size)
assertEquals(expectedReplicaAssignment.get(i).get, actualReplicaList(i))
intercept[TopicExistsException] {
// shouldn't be able to create a topic that already exists
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
}
}
@Test
def testTopicCreationWithCollision() {
val topic = "test.topic"
val collidingTopic = "test_topic"
TestUtils.createBrokersInZk(zkUtils, List(0, 1, 2, 3, 4))
// create the topic
AdminUtils.createTopic(zkUtils, topic, 3, 1)
intercept[InvalidTopicException] {
// shouldn't be able to create a topic that collides
AdminUtils.createTopic(zkUtils, collidingTopic, 3, 1)
}
}
@Test
def testConcurrentTopicCreation() {
val topic = "test.topic"
// simulate the ZK interactions that can happen when a topic is concurrently created by multiple processes
val zkMock = EasyMock.createNiceMock(classOf[ZkUtils])
EasyMock.expect(zkMock.pathExists(s"/brokers/topics/$topic")).andReturn(false)
EasyMock.expect(zkMock.getAllTopics).andReturn(Seq("some.topic", topic, "some.other.topic"))
EasyMock.replay(zkMock)
intercept[TopicExistsException] {
AdminUtils.validateCreateOrUpdateTopic(zkMock, topic, Map.empty, new Properties, update = false)
}
}
private def getBrokersWithPartitionDir(servers: Iterable[KafkaServer], topic: String, partitionId: Int): Set[Int] = {
servers.filter(server => new File(server.config.logDirs.head, topic + "-" + partitionId).exists)
.map(_.config.brokerId)
.toSet
}
@Test
def testPartitionReassignmentWithLeaderInNewReplicas() {
val expectedReplicaAssignment = Map(0 -> List(0, 1, 2))
val topic = "test"
// create brokers
servers = TestUtils.createBrokerConfigs(4, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// reassign partition 0
val newReplicas = Seq(0, 2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, None, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment attempt failed for [test, 0]", reassignPartitionsCommand.reassignPartitions())
// wait until reassignment is completed
TestUtils.waitUntilTrue(() => {
val partitionsBeingReassigned = zkUtils.getPartitionsBeingReassigned().mapValues(_.newReplicas)
ReassignPartitionsCommand.checkIfPartitionReassignmentSucceeded(zkUtils, topicAndPartition,
Map(topicAndPartition -> newReplicas), partitionsBeingReassigned) == ReassignmentCompleted
},
"Partition reassignment should complete")
val assignedReplicas = zkUtils.getReplicasForPartition(topic, partitionToBeReassigned)
// in sync replicas should not have any replica that is not in the new assigned replicas
checkForPhantomInSyncReplicas(zkUtils, topic, partitionToBeReassigned, assignedReplicas)
assertEquals("Partition should have been reassigned to 0, 2, 3", newReplicas, assignedReplicas)
ensureNoUnderReplicatedPartitions(zkUtils, topic, partitionToBeReassigned, assignedReplicas, servers)
TestUtils.waitUntilTrue(() => getBrokersWithPartitionDir(servers, topic, 0) == newReplicas.toSet,
"New replicas should exist on brokers")
}
@Test
def testPartitionReassignmentWithLeaderNotInNewReplicas() {
val expectedReplicaAssignment = Map(0 -> List(0, 1, 2))
val topic = "test"
// create brokers
servers = TestUtils.createBrokerConfigs(4, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// reassign partition 0
val newReplicas = Seq(1, 2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, None, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment failed for test, 0", reassignPartitionsCommand.reassignPartitions())
// wait until reassignment is completed
TestUtils.waitUntilTrue(() => {
val partitionsBeingReassigned = zkUtils.getPartitionsBeingReassigned().mapValues(_.newReplicas)
ReassignPartitionsCommand.checkIfPartitionReassignmentSucceeded(zkUtils, topicAndPartition,
Map(topicAndPartition -> newReplicas), partitionsBeingReassigned) == ReassignmentCompleted
},
"Partition reassignment should complete")
val assignedReplicas = zkUtils.getReplicasForPartition(topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 0, 2, 3", newReplicas, assignedReplicas)
checkForPhantomInSyncReplicas(zkUtils, topic, partitionToBeReassigned, assignedReplicas)
ensureNoUnderReplicatedPartitions(zkUtils, topic, partitionToBeReassigned, assignedReplicas, servers)
TestUtils.waitUntilTrue(() => getBrokersWithPartitionDir(servers, topic, 0) == newReplicas.toSet,
"New replicas should exist on brokers")
}
@Test
def testPartitionReassignmentNonOverlappingReplicas() {
val expectedReplicaAssignment = Map(0 -> List(0, 1))
val topic = "test"
// create brokers
servers = TestUtils.createBrokerConfigs(4, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// reassign partition 0
val newReplicas = Seq(2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, None, Map(topicAndPartition -> newReplicas))
assertTrue("Partition reassignment failed for test, 0", reassignPartitionsCommand.reassignPartitions())
// wait until reassignment is completed
TestUtils.waitUntilTrue(() => {
val partitionsBeingReassigned = zkUtils.getPartitionsBeingReassigned().mapValues(_.newReplicas)
ReassignPartitionsCommand.checkIfPartitionReassignmentSucceeded(zkUtils, topicAndPartition,
Map(topicAndPartition -> newReplicas), partitionsBeingReassigned) == ReassignmentCompleted
},
"Partition reassignment should complete")
val assignedReplicas = zkUtils.getReplicasForPartition(topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 2, 3", newReplicas, assignedReplicas)
checkForPhantomInSyncReplicas(zkUtils, topic, partitionToBeReassigned, assignedReplicas)
ensureNoUnderReplicatedPartitions(zkUtils, topic, partitionToBeReassigned, assignedReplicas, servers)
TestUtils.waitUntilTrue(() => getBrokersWithPartitionDir(servers, topic, 0) == newReplicas.toSet,
"New replicas should exist on brokers")
}
@Test
def testReassigningNonExistingPartition() {
val topic = "test"
// create brokers
servers = TestUtils.createBrokerConfigs(4, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// reassign partition 0
val newReplicas = Seq(2, 3)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, None, Map(topicAndPartition -> newReplicas))
assertFalse("Partition reassignment failed for test, 0", reassignPartitionsCommand.reassignPartitions())
val reassignedPartitions = zkUtils.getPartitionsBeingReassigned()
assertFalse("Partition should not be reassigned", reassignedPartitions.contains(topicAndPartition))
}
@Test
def testResumePartitionReassignmentThatWasCompleted() {
val expectedReplicaAssignment = Map(0 -> List(0, 1))
val topic = "test"
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
// put the partition in the reassigned path as well
// reassign partition 0
val newReplicas = Seq(0, 1)
val partitionToBeReassigned = 0
val topicAndPartition = TopicAndPartition(topic, partitionToBeReassigned)
val reassignPartitionsCommand = new ReassignPartitionsCommand(zkUtils, None, Map(topicAndPartition -> newReplicas))
reassignPartitionsCommand.reassignPartitions()
// create brokers
servers = TestUtils.createBrokerConfigs(2, zkConnect, false).map(b => TestUtils.createServer(KafkaConfig.fromProps(b)))
// wait until reassignment completes
TestUtils.waitUntilTrue(() => !checkIfReassignPartitionPathExists(zkUtils),
"Partition reassignment should complete")
val assignedReplicas = zkUtils.getReplicasForPartition(topic, partitionToBeReassigned)
assertEquals("Partition should have been reassigned to 0, 1", newReplicas, assignedReplicas)
checkForPhantomInSyncReplicas(zkUtils, topic, partitionToBeReassigned, assignedReplicas)
// ensure that there are no under replicated partitions
ensureNoUnderReplicatedPartitions(zkUtils, topic, partitionToBeReassigned, assignedReplicas, servers)
TestUtils.waitUntilTrue(() => getBrokersWithPartitionDir(servers, topic, 0) == newReplicas.toSet,
"New replicas should exist on brokers")
}
@Test
def testPreferredReplicaJsonData() {
// write preferred replica json data to zk path
val partitionsForPreferredReplicaElection = Set(TopicAndPartition("test", 1), TopicAndPartition("test2", 1))
PreferredReplicaLeaderElectionCommand.writePreferredReplicaElectionData(zkUtils, partitionsForPreferredReplicaElection)
// try to read it back and compare with what was written
val preferredReplicaElectionZkData = zkUtils.readData(ZkUtils.PreferredReplicaLeaderElectionPath)._1
val partitionsUndergoingPreferredReplicaElection =
PreferredReplicaLeaderElectionCommand.parsePreferredReplicaElectionData(preferredReplicaElectionZkData)
assertEquals("Preferred replica election ser-de failed", partitionsForPreferredReplicaElection,
partitionsUndergoingPreferredReplicaElection)
}
@Test
def testBasicPreferredReplicaElection() {
val expectedReplicaAssignment = Map(1 -> List(0, 1, 2))
val topic = "test"
val partition = 1
val preferredReplica = 0
// create brokers
val brokerRack = Map(0 -> "rack0", 1 -> "rack1", 2 -> "rack2")
val serverConfigs = TestUtils.createBrokerConfigs(3, zkConnect, false, rackInfo = brokerRack).map(KafkaConfig.fromProps)
// create the topic
AdminUtils.createOrUpdateTopicPartitionAssignmentPathInZK(zkUtils, topic, expectedReplicaAssignment)
servers = serverConfigs.reverseMap(s => TestUtils.createServer(s))
// broker 2 should be the leader since it was started first
val currentLeader = TestUtils.waitUntilLeaderIsElectedOrChanged(zkUtils, topic, partition, oldLeaderOpt = None)
// trigger preferred replica election
val preferredReplicaElection = new PreferredReplicaLeaderElectionCommand(zkUtils, Set(TopicAndPartition(topic, partition)))
preferredReplicaElection.moveLeaderToPreferredReplica()
val newLeader = TestUtils.waitUntilLeaderIsElectedOrChanged(zkUtils, topic, partition, oldLeaderOpt = Some(currentLeader))
assertEquals("Preferred replica election failed", preferredReplica, newLeader)
}
@Test
def testControlledShutdown() {
val expectedReplicaAssignment = Map(1 -> List(0, 1, 2))
val topic = "test"
val partition = 1
// create brokers
val serverConfigs = TestUtils.createBrokerConfigs(3, zkConnect, false).map(KafkaConfig.fromProps)
servers = serverConfigs.reverseMap(s => TestUtils.createServer(s))
// create the topic
TestUtils.createTopic(zkUtils, topic, partitionReplicaAssignment = expectedReplicaAssignment, servers = servers)
val controllerId = zkUtils.getController()
val controller = servers.find(p => p.config.brokerId == controllerId).get.kafkaController
val resultQueue = new LinkedBlockingQueue[Try[Set[TopicPartition]]]()
val controlledShutdownCallback = (controlledShutdownResult: Try[Set[TopicPartition]]) => resultQueue.put(controlledShutdownResult)
controller.controlledShutdown(2, controlledShutdownCallback)
var partitionsRemaining = resultQueue.take().get
var activeServers = servers.filter(s => s.config.brokerId != 2)
// wait for the update metadata request to trickle to the brokers
TestUtils.waitUntilTrue(() =>
activeServers.forall(_.apis.metadataCache.getPartitionInfo(topic,partition).get.basePartitionState.isr.size != 3),
"Topic test not created after timeout")
assertEquals(0, partitionsRemaining.size)
var partitionStateInfo = activeServers.head.apis.metadataCache.getPartitionInfo(topic,partition).get
var leaderAfterShutdown = partitionStateInfo.basePartitionState.leader
assertEquals(0, leaderAfterShutdown)
assertEquals(2, partitionStateInfo.basePartitionState.isr.size)
assertEquals(List(0,1), partitionStateInfo.basePartitionState.isr.asScala)
controller.controlledShutdown(1, controlledShutdownCallback)
partitionsRemaining = resultQueue.take().get
assertEquals(0, partitionsRemaining.size)
activeServers = servers.filter(s => s.config.brokerId == 0)
partitionStateInfo = activeServers.head.apis.metadataCache.getPartitionInfo(topic,partition).get
leaderAfterShutdown = partitionStateInfo.basePartitionState.leader
assertEquals(0, leaderAfterShutdown)
assertTrue(servers.forall(_.apis.metadataCache.getPartitionInfo(topic,partition).get.basePartitionState.leader == 0))
controller.controlledShutdown(0, controlledShutdownCallback)
partitionsRemaining = resultQueue.take().get
assertEquals(1, partitionsRemaining.size)
// leader doesn't change since all the replicas are shut down
assertTrue(servers.forall(_.apis.metadataCache.getPartitionInfo(topic,partition).get.basePartitionState.leader == 0))
}
/**
* This test creates a topic with a few config overrides and checks that the configs are applied to the new topic
* then changes the config and checks that the new values take effect.
*/
@Test
def testTopicConfigChange() {
val partitions = 3
val topic = "my-topic"
val server = TestUtils.createServer(KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, zkConnect)))
servers = Seq(server)
def makeConfig(messageSize: Int, retentionMs: Long, throttledLeaders: String, throttledFollowers: String) = {
val props = new Properties()
props.setProperty(LogConfig.MaxMessageBytesProp, messageSize.toString)
props.setProperty(LogConfig.RetentionMsProp, retentionMs.toString)
props.setProperty(LogConfig.LeaderReplicationThrottledReplicasProp, throttledLeaders)
props.setProperty(LogConfig.FollowerReplicationThrottledReplicasProp, throttledFollowers)
props
}
def checkConfig(messageSize: Int, retentionMs: Long, throttledLeaders: String, throttledFollowers: String, quotaManagerIsThrottled: Boolean) {
def checkList(actual: util.List[String], expected: String): Unit = {
assertNotNull(actual)
if (expected == "")
assertTrue(actual.isEmpty)
else
assertEquals(expected.split(",").toSeq, actual.asScala)
}
TestUtils.retry(10000) {
for (part <- 0 until partitions) {
val tp = new TopicPartition(topic, part)
val log = server.logManager.getLog(tp)
assertTrue(log.isDefined)
assertEquals(retentionMs, log.get.config.retentionMs)
assertEquals(messageSize, log.get.config.maxMessageSize)
checkList(log.get.config.LeaderReplicationThrottledReplicas, throttledLeaders)
checkList(log.get.config.FollowerReplicationThrottledReplicas, throttledFollowers)
assertEquals(quotaManagerIsThrottled, server.quotaManagers.leader.isThrottled(tp))
}
}
}
// create a topic with a few config overrides and check that they are applied
val maxMessageSize = 1024
val retentionMs = 1000 * 1000
AdminUtils.createTopic(server.zkUtils, topic, partitions, 1, makeConfig(maxMessageSize, retentionMs, "0:0,1:0,2:0", "0:1,1:1,2:1"))
//Standard topic configs will be propagated at topic creation time, but the quota manager will not have been updated.
checkConfig(maxMessageSize, retentionMs, "0:0,1:0,2:0", "0:1,1:1,2:1", false)
//Update dynamically and all properties should be applied
AdminUtils.changeTopicConfig(server.zkUtils, topic, makeConfig(maxMessageSize, retentionMs, "0:0,1:0,2:0", "0:1,1:1,2:1"))
checkConfig(maxMessageSize, retentionMs, "0:0,1:0,2:0", "0:1,1:1,2:1", true)
// now double the config values for the topic and check that it is applied
val newConfig = makeConfig(2 * maxMessageSize, 2 * retentionMs, "*", "*")
AdminUtils.changeTopicConfig(server.zkUtils, topic, makeConfig(2 * maxMessageSize, 2 * retentionMs, "*", "*"))
checkConfig(2 * maxMessageSize, 2 * retentionMs, "*", "*", quotaManagerIsThrottled = true)
// Verify that the same config can be read from ZK
val configInZk = AdminUtils.fetchEntityConfig(server.zkUtils, ConfigType.Topic, topic)
assertEquals(newConfig, configInZk)
//Now delete the config
AdminUtils.changeTopicConfig(server.zkUtils, topic, new Properties)
checkConfig(Defaults.MaxMessageSize, Defaults.RetentionMs, "", "", quotaManagerIsThrottled = false)
//Add config back
AdminUtils.changeTopicConfig(server.zkUtils, topic, makeConfig(maxMessageSize, retentionMs, "0:0,1:0,2:0", "0:1,1:1,2:1"))
checkConfig(maxMessageSize, retentionMs, "0:0,1:0,2:0", "0:1,1:1,2:1", quotaManagerIsThrottled = true)
//Now ensure updating to "" removes the throttled replica list also
AdminUtils.changeTopicConfig(server.zkUtils, topic, propsWith((LogConfig.FollowerReplicationThrottledReplicasProp, ""), (LogConfig.LeaderReplicationThrottledReplicasProp, "")))
checkConfig(Defaults.MaxMessageSize, Defaults.RetentionMs, "", "", quotaManagerIsThrottled = false)
}
@Test
def shouldPropagateDynamicBrokerConfigs() {
val brokerIds = Seq(0, 1, 2)
servers = createBrokerConfigs(3, zkConnect).map(fromProps).map(createServer(_))
def checkConfig(limit: Long) {
retry(10000) {
for (server <- servers) {
assertEquals("Leader Quota Manager was not updated", limit, server.quotaManagers.leader.upperBound)
assertEquals("Follower Quota Manager was not updated", limit, server.quotaManagers.follower.upperBound)
}
}
}
val limit: Long = 1000000
// Set the limit & check it is applied to the log
changeBrokerConfig(zkUtils, brokerIds, propsWith(
(LeaderReplicationThrottledRateProp, limit.toString),
(FollowerReplicationThrottledRateProp, limit.toString)))
checkConfig(limit)
// Now double the config values for the topic and check that it is applied
val newLimit = 2 * limit
changeBrokerConfig(zkUtils, brokerIds, propsWith(
(LeaderReplicationThrottledRateProp, newLimit.toString),
(FollowerReplicationThrottledRateProp, newLimit.toString)))
checkConfig(newLimit)
// Verify that the same config can be read from ZK
for (brokerId <- brokerIds) {
val configInZk = AdminUtils.fetchEntityConfig(servers(brokerId).zkUtils, ConfigType.Broker, brokerId.toString)
assertEquals(newLimit, configInZk.getProperty(LeaderReplicationThrottledRateProp).toInt)
assertEquals(newLimit, configInZk.getProperty(FollowerReplicationThrottledRateProp).toInt)
}
//Now delete the config
changeBrokerConfig(servers(0).zkUtils, brokerIds, new Properties)
checkConfig(DefaultReplicationThrottledRate)
}
/**
* This test simulates a client config change in ZK whose notification has been purged.
* Basically, it asserts that notifications are bootstrapped from ZK
*/
@Test
def testBootstrapClientIdConfig() {
val clientId = "my-client"
val props = new Properties()
props.setProperty("producer_byte_rate", "1000")
props.setProperty("consumer_byte_rate", "2000")
// Write config without notification to ZK.
val configMap = Map[String, String] ("producer_byte_rate" -> "1000", "consumer_byte_rate" -> "2000")
val map = Map("version" -> 1, "config" -> configMap)
zkUtils.updatePersistentPath(ZkUtils.getEntityConfigPath(ConfigType.Client, clientId), Json.encode(map))
val configInZk: Map[String, Properties] = AdminUtils.fetchAllEntityConfigs(zkUtils, ConfigType.Client)
assertEquals("Must have 1 overriden client config", 1, configInZk.size)
assertEquals(props, configInZk(clientId))
// Test that the existing clientId overrides are read
val server = TestUtils.createServer(KafkaConfig.fromProps(TestUtils.createBrokerConfig(0, zkConnect)))
servers = Seq(server)
assertEquals(new Quota(1000, true), server.apis.quotas.produce.quota("ANONYMOUS", clientId))
assertEquals(new Quota(2000, true), server.apis.quotas.fetch.quota("ANONYMOUS", clientId))
}
@Test
def testGetBrokerMetadatas() {
// broker 4 has no rack information
val brokerList = 0 to 5
val rackInfo = Map(0 -> "rack1", 1 -> "rack2", 2 -> "rack2", 3 -> "rack1", 5 -> "rack3")
val brokerMetadatas = toBrokerMetadata(rackInfo, brokersWithoutRack = brokerList.filterNot(rackInfo.keySet))
TestUtils.createBrokersInZk(brokerMetadatas, zkUtils)
val processedMetadatas1 = AdminUtils.getBrokerMetadatas(zkUtils, RackAwareMode.Disabled)
assertEquals(brokerList, processedMetadatas1.map(_.id))
assertEquals(List.fill(brokerList.size)(None), processedMetadatas1.map(_.rack))
val processedMetadatas2 = AdminUtils.getBrokerMetadatas(zkUtils, RackAwareMode.Safe)
assertEquals(brokerList, processedMetadatas2.map(_.id))
assertEquals(List.fill(brokerList.size)(None), processedMetadatas2.map(_.rack))
intercept[AdminOperationException] {
AdminUtils.getBrokerMetadatas(zkUtils, RackAwareMode.Enforced)
}
val partialList = List(0, 1, 2, 3, 5)
val processedMetadatas3 = AdminUtils.getBrokerMetadatas(zkUtils, RackAwareMode.Enforced, Some(partialList))
assertEquals(partialList, processedMetadatas3.map(_.id))
assertEquals(partialList.map(rackInfo), processedMetadatas3.flatMap(_.rack))
val numPartitions = 3
AdminUtils.createTopic(zkUtils, "foo", numPartitions, 2, rackAwareMode = RackAwareMode.Safe)
val assignment = zkUtils.getReplicaAssignmentForTopics(Seq("foo"))
assertEquals(numPartitions, assignment.size)
}
}
| themarkypantz/kafka | core/src/test/scala/unit/kafka/admin/AdminTest.scala | Scala | apache-2.0 | 27,920 |
/*
* Copyright 2014 Frugal Mechanic (http://frugalmechanic.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package fm.serializer
trait RawDeserializer[@specialized T] {
def deserializeRaw(input: RawInput): T
} | frugalmechanic/fm-serializer | src/main/scala/fm/serializer/RawDeserializer.scala | Scala | apache-2.0 | 732 |
package threesixty.algorithms
import org.scalatest.FunSpec
import threesixty.ProcessingMethods.TimeSelection.TimeSelection
import threesixty.data.{ProcessedData, TaggedDataPoint}
import threesixty.data.Data.Timestamp
import threesixty.data.tags._
class TimeSelectionTestSpec extends FunSpec {
describe("TimeSelection") {
describe("TimeSelection1") {
val aggregator = TimeSelection(new Timestamp(1), new Timestamp(2), Map("SomeID" -> "SomeID"))
describe("from (0,0) to (3, 3)") {
val sampleData = new ProcessedData("SomeID", List(
TaggedDataPoint(new Timestamp(0), 0.0, Set[Tag]()),
TaggedDataPoint(new Timestamp(1), 1.0, Set[Tag]()),
TaggedDataPoint(new Timestamp(2), 2.0, Set[Tag]()),
TaggedDataPoint(new Timestamp(3), 3.0, Set[Tag]())
))
it("should be (0,0.5), (2,2.5)") {
val expectedResult = Set(ProcessedData("SomeID", List(
TaggedDataPoint(new Timestamp(1), 1.0, Set[Tag]()),
TaggedDataPoint(new Timestamp(2), 2.0, Set[Tag]())
)))
assertResult(expectedResult) {
aggregator(sampleData)
}
}
}
}
}
}
| elordin/threesixty | src/test/scala/threesixty/algorithms/TimeSelectionTestSpec.scala | Scala | mit | 1,371 |
package io.argos.agent.sentinels
import java.lang.management.ManagementFactory
import io.argos.agent.SentinelConfiguration
import io.argos.agent.bean.CheckMetrics
import io.argos.agent.util.{HostnameProvider, OSBeanAccessor, WindowBuffer}
class LoadAverageSentinel(override val conf: SentinelConfiguration) extends Sentinel {
private lazy val threshold = conf.threshold
val wBuffer = new WindowBuffer[Double](conf.windowSize)
override def processProtocolElement: Receive = {
case CheckMetrics() => analyze
}
def analyze() : Unit = {
if (System.currentTimeMillis >= nextReact) {
val loadAvg = OSBeanAccessor.loadAvg
wBuffer.push(loadAvg)
if (log.isDebugEnabled) {
log.debug("LoadAvg=<{}>, threshold=<{}>", loadAvg.toString, threshold.toString)
}
if ( (conf.checkMean && !wBuffer.meanUnderThreshold(threshold, (x) => x))
|| (!conf.checkMean && !wBuffer.underThreshold(threshold, (x) => x))) {
react(loadAvg)
}
}
}
def react(loadAvg: Double): Unit = {
val message =
s"""Cassandra Node ${HostnameProvider.hostname} is overloaded.
|
|Current loadAvg : ${loadAvg}
|Threshold : ${threshold}
|
|Something wrong may append on this node...""".stripMargin
context.system.eventStream.publish(buildNotification(conf.messageHeader.map(h => h + " \\n\\n--####--\\n\\n" + message).getOrElse(message)))
updateNextReact()
{ }
}
}
| leleueri/argos | argos-agent/src/main/scala/io/argos/agent/sentinels/LoadAverageSentinel.scala | Scala | apache-2.0 | 1,476 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.http.ws.default
import play.api.libs.json.{Json, Writes}
import uk.gov.hmrc.http.{CorePut, HttpResponse, PutHttpTransport}
import uk.gov.hmrc.play.http.ws.{WSExecute, WSHttpResponse, WSRequestBuilder}
import scala.concurrent.{ExecutionContext, Future}
trait WSPut extends CorePut with PutHttpTransport with WSRequestBuilder with WSExecute{
override def doPut[A](
url: String,
body: A,
headers: Seq[(String, String)]
)(
implicit rds: Writes[A],
ec: ExecutionContext
): Future[HttpResponse] =
execute(buildRequest(url, headers).withBody(Json.toJson(body)), "PUT")
.map(WSHttpResponse.apply)
override def doPutString(
url: String,
body: String,
headers: Seq[(String, String)]
)(
implicit ec: ExecutionContext
): Future[HttpResponse] =
execute(buildRequest(url, headers).withBody(body), "PUT")
.map(WSHttpResponse.apply)
}
| hmrc/http-verbs | http-verbs-common/src/main/scala/uk/gov/hmrc/play/http/ws/default/WSPut.scala | Scala | apache-2.0 | 1,528 |
import sbt._
import Keys._
object MyDefaults {
lazy val settings =
Defaults.defaultSettings ++ Seq (
organization := "jeffmay.me",
version := Versions.mine,
scalaVersion := Versions.scala,
scalacOptions += "-feature",
scalacOptions += "-unchecked",
scalacOptions += "-deprecation",
resolvers ++= Seq(
Resolver.sbtPluginRepo("releases"),
Resolver.sonatypeRepo("snapshots"),
Resolver.sonatypeRepo("releases")
)
)
} | jeffmay/angular-play-multimodule-seed | project/MyDefaults.scala | Scala | apache-2.0 | 500 |
/*
* Copyright 2008 Sanjiv Sahayam
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.googlecode.pondskum.gui.swing.notifyer
import org.junit.Assert.assertThat
import notifyer._
import java.awt.Color
import org.hamcrest.core.IsEqual.equalTo
import org.junit.{Test}
import UsageColourChooserObject._
final class UsageColourChooserTest {
private val colourChooser = new UsageColourChooser
@Test def shouldReturnAGreenColourForARangeOfZeroToTwentyFivePercent { assertPercentageRangeWithColour(0, 25, greenColour) }
@Test def shouldReturnAYellowColourForARangeOfTwentySixToFifty { assertPercentageRangeWithColour(26, 50, yellowColour) }
@Test def shouldReturnAnOrangeColourForARangeOfFiftyOneToSeventyFive { assertPercentageRangeWithColour(51, 75, orangeColour) }
@Test def shouldReturnADarkRedColourForARangeOfSeventySixOneToNinety { assertPercentageRangeWithColour(76, 90, darkRedColour) }
@Test def shouldReturnABrightRedColourForARangeOfNintyOneToHundred { assertPercentageRangeWithColour(91, 100, brightRedColour) }
private def assertPercentageRangeWithColour(min : Int, max : Int, expectedColor : Color) {
val randomPercentage = getRandomPercentage(min, max)
assertThat("failed with percentage of " + randomPercentage, expectedColor, equalTo(colourChooser getColor randomPercentage))
}
} | ssanj/pondskum | src/test/scala/com/googlecode/pondskum/gui/swing/notifyer/UsageColourChooserTest.scala | Scala | apache-2.0 | 1,847 |
package scratchpad.monoid
import fpinscala.state.RNG
import fpinscala.testing.Prop._
import fpinscala.testing.{Gen, Prop}
object Main {
def main(args: Array[String]): Unit = {
// println("Try it")
// val result: Result = Monoid.monoidLaws(Monoid.intAddition, Gen.smallInt).run(100, 100, RNG.Simple(123))
// result match {
// case Passed => println("Succeeded")
// case Falsified(failCase, _) => println("Failed: " + failCase)
// }
val orderedList: IndexedSeq[Int] = IndexedSeq(1, 2, 2, 3, 4, 5, 6, 7, 10)
println(Monoid.isOrdered(orderedList))
val unorderedList = IndexedSeq(1, 2, 2, 3, 5, 4, 6, 7, 10)
println(Monoid.isOrdered(unorderedList))
}
}
object Monoid {
val intAddition: Monoid[Int] = new Monoid[Int] {
def op(a1: Int, a2: Int): Int = {
a1 + a2
}
def zero: Int = 0
}
val intMultiplication: Monoid[Int] = new Monoid[Int] {
def op(a1: Int, a2: Int): Int = {
a1 * a2
}
def zero: Int = 1
}
val booleanOr: Monoid[Boolean] = new Monoid[Boolean] {
def op(a1: Boolean, a2: Boolean): Boolean = {
a1 || a2
}
def zero: Boolean = false
}
val booleanAnd: Monoid[Boolean] = new Monoid[Boolean] {
def op(a1: Boolean, a2: Boolean): Boolean = {
a1 && a2
}
def zero: Boolean = true
}
def optionMonoid[A]: Monoid[Option[A]] = new Monoid[Option[A]]{
def op(a1: Option[A], a2: Option[A]): Option[A] = {
a1 orElse a2
}
def zero: Option[A] = None
}
def endoMonoid[A]: Monoid[A => A] = new Monoid[A => A] {
override def op(a1: A => A, a2: A => A): A => A = {
(a: A) => a2(a1(a))
}
override def zero: (A) => A = a => a
}
def productMonoid[A,B](a: Monoid[A], b: Monoid[B]): Monoid[(A,B)] = {
new Monoid[(A, B)] {
override def op(a1: (A, B), a2: (A, B)): (A, B) =
(a.op(a1._1, a2._1), b.op(a1._2, a2._2))
override def zero: (A, B) =
(a.zero, b.zero)
}
}
def foldMap[A,B](as: List[A], m: Monoid[B])(f: A => B): B = {
as.foldLeft(m.zero)((coll: B, cur: A) => {
m.op(coll, f(cur))
})
}
def foldRight[A, B](as: List[A])(z: B)(f: (A, B) => B): B = {
val curried: (A) => (B) => B = f.curried
val endo: Monoid[B => B] = endoMonoid[B]
foldMap(as, endo)(curried)(z)
}
// We can get the dual of any monoid just by flipping the `op`.
def dual[A](m: Monoid[A]): Monoid[A] = new Monoid[A] {
def op(x: A, y: A): A = m.op(y, x)
val zero = m.zero
}
def foldLeft[A, B](list: List[A])(z: B)(f: (A, B) => B): B = {
val curried: (A) => (B) => B = f.curried
val flippedEndo: Monoid[B => B] = dual(endoMonoid[B])
foldMap(list, flippedEndo)(curried)(z)
}
def foldMapV[A,B](v: IndexedSeq[A], m: Monoid[B])(f: A => B): B = {
if (v.size == 0) {
m.zero
} else if (v.size == 1) {
f(v(0))
} else {
val (v1, v2) = v.splitAt(v.length / 2)
m.op(foldMapV(v1, m)(f), foldMapV(v2, m)(f))
}
}
type IsOrderedResult = Either[String, (Int, Int)]
def isOrdered(v: IndexedSeq[Int]): IsOrderedResult = {
val orderedMonoid = new Monoid[IsOrderedResult] {
override def op(a1: IsOrderedResult , a2: IsOrderedResult): IsOrderedResult = {
for {
range1 <- a1.right
range2 <- a2.right
result <- {
val firstSegmentEnd: Int = range1._2
val secondSegmentStart: Int = range2._1
if (firstSegmentEnd <= secondSegmentStart)
Right[String, (Int, Int)](range1._1, range2._2)
else
Left[String, (Int, Int)](firstSegmentEnd + " >= " + secondSegmentStart)
}.right
} yield result
}
override def zero: IsOrderedResult = Right((-1, -1))
}
foldMapV(v, orderedMonoid)(a => Right(a, a))
}
import fpinscala.parallelism.Nonblocking._
def par[A](m: Monoid[A]): Monoid[Par[A]] = new Monoid[Par[A]] {
override def op(a1: Par[A], a2: Par[A]): Par[A] = {
Par.map2(a1, a2)((a1v, a2v) => m.op(a1v, a2v))
}
override def zero: Par[A] = Par.unit(m.zero)
}
//Official version
// def parFoldMap[A,B](v: IndexedSeq[A], m: Monoid[B])(f: A => B): Par[B] =
// Par.parMap(v)(f).flatMap { bs =>
// foldMapV(bs, par(m))(b => Par.async(b))
// }
//I think this works too?
def parFoldMap[A,B](v: IndexedSeq[A], m: Monoid[B])(f: A => B): Par[B] = {
def go[A,B](v: IndexedSeq[A], m: Monoid[Par[B]])(f: A => B): Par[B] = {
if (v.size == 0) {
m.zero
} else if (v.size == 1) {
Par.delay(f(v(0)))
} else {
val (v1, v2) = v.splitAt(v.length / 2)
m.op(go(v1, m)(f), go(v2, m)(f))
}
}
go(v, par(m))(f)
}
def monoidLaws[A](m: Monoid[A], gen: Gen[A]): Prop = {
val associativityProp = Prop.forAll(gen.listOfN(2))((s: List[A]) => {
val (a1, a2) = (s(0), s(1))
m.op(a1, a2) == m.op(a2, a1)
})
val zeroProp = Prop.forAll(gen)((a: A) => {
m.op(a, m.zero) == a
})
associativityProp && zeroProp
}
}
trait Monoid[A] {
def op(a1: A, a2: A): A
def zero: A
} | waxmittmann/fpinscala | answers/src/main/scala/scratchpad/done/Monoid.scala | Scala | mit | 5,112 |
import sbt._
import Keys._
import PlayProject._
object ApplicationBuild extends Build {
val appName = "tabledice"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
// Add your project dependencies here,
"org.mindrot" % "jbcrypt" % "0.3m",
"com.typesafe.akka" % "akka-actor" % "2.0",
"net.iharder" % "base64" % "2.3.8",
"postgresql" % "postgresql" % "9.1-901.jdbc4"
)
val main = PlayProject(appName, appVersion, appDependencies, mainLang = SCALA).settings(
// Add your own project settings here
)
}
| tommycli/tabledice | project/Build.scala | Scala | agpl-3.0 | 591 |
package phoenix.collection.immutable.trees
/**
* Created by Satya on 21/08/14.
*/
case class LeafBST[T <% Ordered[T]] (elem: T) extends BinarySearchTree[T] {
def value = Some(elem)
def left = EmptyBST
def right = EmptyBST
def inorderTraversal = Seq()
}
case object EmptyBST extends BinarySearchTree[Nothing] {
def value = None
def left = EmptyBST
def right = EmptyBST
def inorderTraversal = Seq()
}
case class RecursiveBinarySearchTree[T <% Ordered[T]] (elem: T, val left: BinarySearchTree[T], val right: BinarySearchTree[T]) extends BinarySearchTree[T] {
def value: Option[T] = Some(this.elem)
def inorderTraversal: Seq[T] = (if(left == EmptyBST) Seq() else left.inorderTraversal) ++ Seq(elem) ++ (if(right == EmptyBST) Seq() else right.inorderTraversal)
}
object RecursiveBinarySearchTree {
def buildFromSeq[T <% Ordered[T]](list: Seq[T]): BinarySearchTree[T] = list match {
case Seq() => EmptyBST
case _ => {
val (lt, rt) = list.splitAt(list.length/2)
val elem = rt.head
val left = buildFromSeq(lt)
val right = buildFromSeq(rt.tail)
RecursiveBinarySearchTree(elem, left, right)
}
}
def build[T <% Ordered[T]](node: T, leftTree: BinarySearchTree[T], rightTree: BinarySearchTree[T]) = {
val l = if(leftTree == EmptyBST) Nil else leftTree.inorderTraversal
val r = if(rightTree == EmptyBST) Nil else rightTree.inorderTraversal
def merge(xs: Seq[T], ys: Seq[T]): Seq[T] = (xs, ys) match {
case (Nil, ys) => ys
case (xs, Nil) => xs
case (x::xs1, y::ys1) => {
if(x < y) x +: merge(xs1, ys)
else y +: merge(xs, ys1)
}
}
val mergedSeq = merge(merge(l, r), Seq(node))
val (lt, rt) = mergedSeq.splitAt(mergedSeq.length/2)
val left = buildFromSeq(lt)
val right = buildFromSeq(rt.tail)
RecursiveBinarySearchTree(rt.head, left, right)
}
} | Satyapr/data-structures-and-algorithms | Scala/src/phoenix/collection/immutable/trees/RecursiveBinarySearchTreee.scala | Scala | bsd-2-clause | 1,883 |
package scala.tools.nsc
import java.io.IOException
import java.nio.file.attribute.BasicFileAttributes
import java.nio.file.{FileVisitResult, Files, Path, SimpleFileVisitor}
import org.junit.{After, Before, Test}
import scala.jdk.CollectionConverters._
import FileUtils._
import scala.tools.nsc.PipelineMain._
import scala.tools.nsc.reporters.{ConsoleReporter, StoreReporter}
class PipelineMainTest {
private var base: Path = _
// Enables verbose output to console to help understand what the test is doing.
private val debug = false
private var deleteBaseAfterTest = true
@Before def before(): Unit = {
base = Files.createTempDirectory("pipelineBase")
}
@After def after(): Unit = {
if (base != null && !debug && deleteBaseAfterTest) {
deleteRecursive(base)
}
}
private def projectsBase = createDir(base, "projects")
@Test def pipelineMainBuildsSeparate(): Unit = {
check(allBuilds.map(_.projects))
}
@Test def pipelineMainBuildsCombined(): Unit = {
check(List(allBuilds.flatMap(_.projects)))
}
@Test def pipelineMainBuildsJavaAccessor(): Unit = {
// Tests the special case in Typer:::canSkipRhs to make outline typing descend into method bodies might
// give rise to super accessors
check(List(b5SuperAccessor.projects), altStrategies = List(OutlineTypePipeline))
}
private val pipelineSettings = PipelineMain.defaultSettings.copy(
useJars = true,
parallelism = java.lang.Runtime.getRuntime.availableProcessors,
cacheMacro = true,
cachePlugin = true,
stripExternalClassPath = true,
useTraditionalForLeaf = true,
createReporter = ((s: Settings) => if (debug) new ConsoleReporter(s) else new StoreReporter(s))
)
private def check(projectss: List[List[Build#Project]], altStrategies: List[BuildStrategy] = List(Pipeline, OutlineTypePipeline)): Unit = {
def build(strategy: BuildStrategy): Unit = {
for (projects <- projectss) {
val argsFiles = projects.map(_.argsFile(Nil, printArgs = debug))
val main = new PipelineMainClass(argsFiles, pipelineSettings.copy(strategy = strategy, logDir = Some(base.resolve(strategy.toString))))
assert(main.process())
}
}
build(Traditional)
val reference = snapshotClasses(Traditional)
clean()
for (strategy <- altStrategies) {
build(strategy)
val recompiled = snapshotClasses(strategy)
// Bytecode should be identical regardless of compilation strategy.
deleteBaseAfterTest = false
assertDirectorySame(reference, recompiled, strategy.toString)
deleteBaseAfterTest = true
}
}
private lazy val allBuilds = List(m1, b2, b3, b4, b5SuperAccessor)
// Build containing a macro definition and a reference to it from another internal subproject
private lazy val m1: Build = {
val build = new Build(projectsBase, "m1")
val macroProject = build.project("p1")
macroProject.withSource("m1/p1/Macro.scala")(
"""
|package m1.p1
|import reflect.macros.blackbox.Context, language.experimental._
|object Macro {
| def m: Unit = macro impl
| def impl(c: Context): c.Tree = {
| import c.universe._
| q"()"
| }
|}
""".stripMargin)
val internalMacroClient = build.project("internalMacroClient")
internalMacroClient.scalacOptions ++= List("-Ymacro-classpath", macroProject.out.toString)
internalMacroClient.classpath += macroProject.out
internalMacroClient.withSource("m2/p2/InternalClient.scala")(
"""
|package m1.p2
|class InternalClient { m1.p1.Macro.m }
""".stripMargin)
build
}
// Build containing a reference to the external macro from `b1`
private lazy val b2: Build = {
val build = new Build(projectsBase, "b1")
val p1 = build.project("p1")
val m1P1 = m1.project("p1")
p1.classpath += m1P1.out
p1.scalacOptions ++= List("-Ymacro-classpath", m1P1.out.toString)
p1.withSource("b1/p1/ExternalClient.scala")(
"""
|package b2.p2
|class ExternalClient { m1.p1.Macro.m }
""".stripMargin)
build
}
// Build containing projects with mixed Java/Scala source files.
// PipelineMain pickles the API of jointly compiled .java files and
// places these on the classpath of downstream scalac invocations.
private lazy val b3: Build = {
val build = new Build(projectsBase, "b3")
val p1 = build.project("p1")
p1.withSource("b3/p1/JavaDefined.java")(
"""
|package b3.p1;
|public class JavaDefined<T> {
| ScalaJoint<T> id(T t) { return new ScalaJoint<T>(); }
|}
""".stripMargin)
p1.withSource("b3/p1/ScalaJoint.scala")(
"""
|package b3.p1
|class ScalaJoint[T] {
| def foo: Unit = new JavaDefined[String]
|}
""".stripMargin)
val p2 = build.project("p2")
p2.classpath += p1.out
p2.withSource("b3/p2/JavaClient.java")(
"""
|package b3.p2;
|public class JavaClient {
| b3.p1.JavaDefined<String> test() { return null; }
|}
""".stripMargin)
p2.withSource("b3/p2/ScalaClient.scala")(
"""
|package b3.p2
|class ScalaClient {
| def test(): b3.p1.JavaDefined[String] = null;
|}
""".stripMargin)
build
}
// External version of `b4.p2`.
private lazy val b4: Build = {
val build = new Build(projectsBase, "b4")
val b3P1 = b3.project("p1")
val p2 = build.project("p2")
p2.classpath += b3P1.out
p2.withSource("b4/p2/JavaClient.java")(
"""
|package b4.p2;
|public class JavaClient {
| b3.p1.JavaDefined<String> test() { return null; }
|}
""".stripMargin)
p2.withSource("b4/p2/ScalaClient.scala")(
"""
|package b4.p2
|class ScalaClient {
| def test(): b3.p1.JavaDefined[String] = null;
|}
""".stripMargin)
build
}
// Build containing motivating test case for special handling of `Super` AST nodes
// in outline typechecking implementation.
private lazy val b5SuperAccessor: Build = {
val build = new Build(projectsBase, "b5")
val p1 = build.project("p1")
p1.withSource("b5/p1/JavaProtectedMethod.java")(
"""
|package b5.p1;
|public class JavaProtectedMethod {
| protected String foo() { return "JavaProtectedMethod.foo"; }
|}
""".stripMargin)
p1.withSource("b5/p1/NeedSuperAccessor.scala")(
"""
|package b5.p1
|trait NeedSuperAccessor extends JavaProtectedMethod {
| protected override def foo = "NeedSuperAccessor.foo"
| class Inner {
| def test: Any = {
| NeedSuperAccessor.super[JavaProtectedMethod].foo
| }
| }
|}
""".stripMargin)
val p2 = build.project("p2")
p2.classpath += p1.out
p2.withSource("b5/p2/ScalaSub.scala")(
"""
|package b5.p2
|class ScalaSub extends b5.p1.NeedSuperAccessor {
|}
""".stripMargin)
build
}
private def clean(): Unit = {
class CleanVisitor() extends SimpleFileVisitor[Path] {
override def preVisitDirectory(dir: Path, attrs: BasicFileAttributes): FileVisitResult = {
if (dir.getFileName.toString == "target") {
for (p <- Files.list(dir).iterator.asScala)
deleteRecursive(p)
FileVisitResult.SKIP_SUBTREE
} else super.preVisitDirectory(dir, attrs)
}
}
Files.walkFileTree(projectsBase, new CleanVisitor())
}
private def snapshotClasses(strategy: BuildStrategy): Path = {
val src = projectsBase
val dest = createDir(base, strategy.toString + "/classes")
class CopyVisitor(src: Path, dest: Path) extends SimpleFileVisitor[Path] {
override def preVisitDirectory(dir: Path, attrs: BasicFileAttributes): FileVisitResult = {
Files.createDirectories(dest.resolve(src.relativize(dir)))
super.preVisitDirectory(dir, attrs)
}
override def postVisitDirectory(dir: Path, exc: IOException): FileVisitResult = {
val destDir = dest.resolve(src.relativize(dir))
val listing = Files.list(destDir)
try {
if (!listing.iterator().hasNext)
Files.delete(destDir)
} finally {
listing.close()
}
super.postVisitDirectory(dir, exc)
}
override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult = {
Files.copy(file, dest.resolve(src.relativize(file)))
super.visitFile(file, attrs)
}
}
Files.walkFileTree(src, new CopyVisitor(src, dest))
dest
}
}
| lrytz/scala | test/junit/scala/tools/nsc/PipelineMainTest.scala | Scala | apache-2.0 | 8,719 |
package org.jetbrains.plugins.scala
package annotator.createFromUsage
import com.intellij.codeInsight.template.TemplateBuilder
import com.intellij.openapi.editor.Editor
import com.intellij.openapi.fileEditor.{FileEditorManager, OpenFileDescriptor}
import com.intellij.psi.util.PsiTreeUtil
import com.intellij.psi.{PsiClass, PsiElement}
import org.jetbrains.plugins.scala.codeInspection.collections.MethodRepr
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._
import org.jetbrains.plugins.scala.lang.psi.api.base.types.{ScParameterizedTypeElement, ScSimpleTypeElement, ScTupleTypeElement}
import org.jetbrains.plugins.scala.lang.psi.api.base.{ScConstructor, ScReferenceElement}
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScExpression, ScReferenceExpression}
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScFunction
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScParameter, ScTypeParam}
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScTypedDefinition
import org.jetbrains.plugins.scala.lang.psi.types.api.{Any, ExtractClass}
import org.jetbrains.plugins.scala.lang.psi.types.result.TypingContext
import org.jetbrains.plugins.scala.lang.psi.types.{ScType, ScTypeExt}
import org.jetbrains.plugins.scala.lang.refactoring.namesSuggester.NameSuggester
import org.jetbrains.plugins.scala.util.TypeAnnotationUtil
/**
* Nikolay.Tropin
* 2014-07-31
*/
object CreateFromUsageUtil {
def uniqueNames(names: Seq[String]): List[String] = {
names.foldLeft(List[String]()) { (r, h) =>
(h #:: Stream.from(1).map(h + _)).find(!r.contains(_)).get :: r
}.reverse
}
def nameByType(tp: ScType): String = NameSuggester.suggestNamesByType(tp).headOption.getOrElse("value")
def nameAndTypeForArg(arg: PsiElement): (String, ScType) = {
implicit val project = arg.projectContext
arg match {
case ref: ScReferenceExpression => (ref.refName, ref.getType().getOrAny)
case expr: ScExpression =>
val tp = expr.getType().getOrAny
(nameByType(tp), tp)
case bp: ScBindingPattern if !bp.isWildcard => (bp.name, bp.getType(TypingContext.empty).getOrAny)
case p: ScPattern =>
val tp: ScType = p.getType(TypingContext.empty).getOrAny
(nameByType(tp), tp)
case _ => ("value", Any)
}
}
def paramsText(args: Seq[PsiElement]): String = {
val (names, types) = args.map(nameAndTypeForArg).unzip
(uniqueNames(names), types).zipped.map((name, tpe) => s"$name: ${tpe.canonicalText}").mkString("(", ", ", ")")
}
def parametersText(ref: ScReferenceElement): String = {
ref.getParent match {
case p: ScPattern =>
paramsText(patternArgs(p))
case MethodRepr(_, _, _, args) => paramsText(args) //for case class
case _ =>
val fromConstrArguments = PsiTreeUtil.getParentOfType(ref, classOf[ScConstructor]) match {
case ScConstructor(simple: ScSimpleTypeElement, args) if ref.getParent == simple => args
case ScConstructor(pt: ScParameterizedTypeElement, args) if ref.getParent == pt.typeElement => args
case _ => Seq.empty
}
fromConstrArguments.map(argList => paramsText(argList.exprs)).mkString
}
}
def patternArgs(pattern: ScPattern): Seq[ScPattern] = {
pattern match {
case cp: ScConstructorPattern => cp.args.patterns
case inf: ScInfixPattern => inf.leftPattern +: inf.rightPattern.toSeq
case _ => Seq.empty
}
}
def addParametersToTemplate(elem: PsiElement, builder: TemplateBuilder): Unit = {
elem.depthFirst().filterByType[ScParameter].foreach { parameter =>
val id = parameter.getNameIdentifier
builder.replaceElement(id, id.getText)
parameter.paramType.foreach { it =>
builder.replaceElement(it, it.getText)
}
}
}
def addTypeParametersToTemplate(elem: PsiElement, builder: TemplateBuilder): Unit = {
elem.depthFirst().filterByType[ScTypeParam].foreach { tp =>
builder.replaceElement(tp.nameId, tp.name)
}
}
def addQmarksToTemplate(elem: PsiElement, builder: TemplateBuilder): Unit = {
val Q_MARKS = "???"
elem.depthFirst().filterByType[ScReferenceExpression].filter(_.getText == Q_MARKS)
.foreach { qmarks =>
builder.replaceElement(qmarks, Q_MARKS)
}
}
def addUnapplyResultTypesToTemplate(fun: ScFunction, builder: TemplateBuilder): Unit = {
TypeAnnotationUtil.removeTypeAnnotationIfNeeded(fun)
fun.returnTypeElement match {
case Some(ScParameterizedTypeElement(_, Seq(tuple: ScTupleTypeElement))) => //Option[(A, B)]
tuple.components.foreach(te => builder.replaceElement(te, te.getText))
case Some(ScParameterizedTypeElement(_, args)) =>
args.foreach(te => builder.replaceElement(te, te.getText))
case _ =>
}
}
def positionCursor(element: PsiElement): Editor = {
val offset = element.getTextRange.getEndOffset
val project = element.getProject
val descriptor = new OpenFileDescriptor(project, element.getContainingFile.getVirtualFile, offset)
FileEditorManager.getInstance(project).openTextEditor(descriptor, true)
}
def unapplyMethodText(pattern: ScPattern): String = {
import pattern.projectContext
val pType = pattern.expectedType.getOrElse(Any)
val pName = nameByType(pType)
s"def unapply($pName: ${pType.canonicalText}): ${unapplyMethodTypeText(pattern)} = ???"
}
def unapplyMethodTypeText(pattern: ScPattern): String = {
val types = CreateFromUsageUtil.patternArgs(pattern).map(_.getType(TypingContext.empty).getOrAny)
val typesText = types.map(_.canonicalText).mkString(", ")
types.size match {
case 0 => "Boolean"
case 1 => s"Option[$typesText]"
case _ => s"Option[($typesText)]"
}
}
}
object InstanceOfClass {
def unapply(elem: PsiElement): Option[PsiClass] = {
elem match {
case ScExpression.Type(TypeAsClass(psiClass)) => Some(psiClass)
case ResolvesTo(typed: ScTypedDefinition) =>
typed.getType().toOption match {
case Some(TypeAsClass(psiClass)) => Some(psiClass)
case _ => None
}
case _ => None
}
}
}
object TypeAsClass {
def unapply(scType: ScType): Option[PsiClass] = scType match {
case ExtractClass(aClass) => Some(aClass)
case t: ScType => t.extractDesignatorSingleton.flatMap(_.extractClass)
case _ => None
}
}
| ilinum/intellij-scala | src/org/jetbrains/plugins/scala/annotator/createFromUsage/CreateFromUsageUtil.scala | Scala | apache-2.0 | 6,460 |
package com.twitter.finagle.filter
import com.twitter.finagle.Service
import com.twitter.util.{Future, Promise, Return}
import org.specs.SpecificationWithJUnit
import org.specs.mock.Mockito
class MaskCancelFilterSpec extends SpecificationWithJUnit with Mockito {
"MaskCancelFilter" should {
val service = mock[Service[Int, Int]]
service.close(any) returns Future.Done
val filter = new MaskCancelFilter[Int, Int]
val filtered = filter andThen service
val p = new Promise[Int] {
@volatile var interrupted: Option[Throwable] = None
setInterruptHandler { case exc => interrupted = Some(exc) }
}
service(1) returns p
val f = filtered(1)
there was one(service).apply(1)
"mask interrupts" in {
p.interrupted must beNone
f.raise(new Exception)
p.interrupted must beNone
}
"propagate results" in {
f.poll must beNone
p.setValue(123)
p.poll must beSome(Return(123))
}
}
}
| firebase/finagle | finagle-core/src/test/scala/com/twitter/finagle/filter/MaskCancelFilterSpec.scala | Scala | apache-2.0 | 972 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.descriptors
import org.apache.flink.table.api.ValidationException
import org.apache.flink.table.descriptors.StatisticsValidator.{STATISTICS_COLUMNS, STATISTICS_PROPERTY_VERSION, STATISTICS_ROW_COUNT, validateColumnStats}
import org.apache.flink.table.plan.stats.ColumnStats
import org.apache.flink.table.util.JavaScalaConversionUtil.toScala
import scala.collection.mutable
/**
* Validator for [[FormatDescriptor]].
*/
class StatisticsValidator extends DescriptorValidator {
override def validate(properties: DescriptorProperties): Unit = {
properties.validateInt(STATISTICS_PROPERTY_VERSION, true, 0, Integer.MAX_VALUE)
properties.validateLong(STATISTICS_ROW_COUNT, true, 0)
validateColumnStats(properties, STATISTICS_COLUMNS)
}
}
object StatisticsValidator {
val STATISTICS_PROPERTY_VERSION = "statistics.property-version"
val STATISTICS_ROW_COUNT = "statistics.row-count"
val STATISTICS_COLUMNS = "statistics.columns"
// per column properties
val NAME = "name"
val DISTINCT_COUNT = "distinct-count"
val NULL_COUNT = "null-count"
val AVG_LENGTH = "avg-length"
val MAX_LENGTH = "max-length"
val MAX_VALUE = "max-value"
val MIN_VALUE = "min-value"
// utilities
def normalizeColumnStats(columnStats: ColumnStats): Map[String, String] = {
val stats = mutable.HashMap[String, String]()
if (columnStats.ndv != null) {
stats += DISTINCT_COUNT -> columnStats.ndv.toString
}
if (columnStats.nullCount != null) {
stats += NULL_COUNT -> columnStats.nullCount.toString
}
if (columnStats.avgLen != null) {
stats += AVG_LENGTH -> columnStats.avgLen.toString
}
if (columnStats.maxLen != null) {
stats += MAX_LENGTH -> columnStats.maxLen.toString
}
if (columnStats.max != null) {
stats += MAX_VALUE -> columnStats.max.toString
}
if (columnStats.min != null) {
stats += MIN_VALUE -> columnStats.min.toString
}
stats.toMap
}
def validateColumnStats(properties: DescriptorProperties, key: String): Unit = {
// filter for number of columns
val columnCount = properties.getIndexedProperty(key, NAME).size
for (i <- 0 until columnCount) {
properties.validateString(s"$key.$i.$NAME", false, 1)
properties.validateLong(s"$key.$i.$DISTINCT_COUNT", true, 0L)
properties.validateLong(s"$key.$i.$NULL_COUNT", true, 0L)
properties.validateDouble(s"$key.$i.$AVG_LENGTH", true, 0.0)
properties.validateInt(s"$key.$i.$MAX_LENGTH", true, 0)
properties.validateDouble(s"$key.$i.$MAX_VALUE", true, 0.0)
properties.validateDouble(s"$key.$i.$MIN_VALUE", true, 0.0)
}
}
def readColumnStats(properties: DescriptorProperties, key: String): Map[String, ColumnStats] = {
// filter for number of columns
val columnCount = properties.getIndexedProperty(key, NAME).size
val stats = for (i <- 0 until columnCount) yield {
val name = toScala(properties.getOptionalString(s"$key.$i.$NAME")).getOrElse(
throw new ValidationException(s"Could not find name of property '$key.$i.$NAME'."))
val stats = ColumnStats(
properties.getOptionalLong(s"$key.$i.$DISTINCT_COUNT").orElse(null),
properties.getOptionalLong(s"$key.$i.$NULL_COUNT").orElse(null),
properties.getOptionalDouble(s"$key.$i.$AVG_LENGTH").orElse(null),
properties.getOptionalInt(s"$key.$i.$MAX_LENGTH").orElse(null),
properties.getOptionalDouble(s"$key.$i.$MAX_VALUE").orElse(null),
properties.getOptionalDouble(s"$key.$i.$MIN_VALUE").orElse(null)
)
name -> stats
}
stats.toMap
}
}
| mylog00/flink | flink-libraries/flink-table/src/main/scala/org/apache/flink/table/descriptors/StatisticsValidator.scala | Scala | apache-2.0 | 4,457 |
package com.softwaremill.macwire
package object tagging {
@deprecated("Use com.softwaremill.tagging instead")
type Tag[+U] = com.softwaremill.tagging.Tag[U]
@deprecated("Use com.softwaremill.tagging instead")
type @@[T, +U] = com.softwaremill.tagging.@@[T, U]
@deprecated("Use com.softwaremill.tagging instead")
type Tagged[T, +U] = com.softwaremill.tagging.Tagged[T, U]
implicit class Tagger[T](t: T) {
@deprecated("Use com.softwaremill.tagging instead")
def taggedWith[U]: T @@ U = new com.softwaremill.tagging.Tagger(t).taggedWith[U]
}
implicit class AndTagger[T, U](t: T @@ U) {
@deprecated("Use com.softwaremill.tagging instead")
def andTaggedWith[V]: T @@ (U with V) = new com.softwaremill.tagging.AndTagger[T, U](t).andTaggedWith[V]
}
}
| adamw/macwire | util/src/main/scala/com/softwaremill/macwire/tagging/package.scala | Scala | apache-2.0 | 785 |
package unfiltered.netty
import unfiltered.Async
import unfiltered.response.{ ResponseFunction, HttpResponse, Pass }
import unfiltered.request.{ Charset, HttpRequest, POST, PUT, RequestContentType, & }
import io.netty.buffer.{ ByteBufInputStream, ByteBufOutputStream, Unpooled }
import io.netty.channel.{ ChannelFuture, ChannelFutureListener, ChannelHandlerContext }
import io.netty.handler.codec.http.{
DefaultHttpResponse, DefaultFullHttpResponse, HttpContent,
HttpHeaderValues, HttpHeaderNames, HttpMessage, HttpUtil,
HttpRequest => NettyHttpRequest, HttpResponse => NettyHttpResponse, HttpResponseStatus,
HttpVersion }
import io.netty.handler.ssl.SslHandler
import io.netty.util.{ CharsetUtil, ReferenceCountUtil }
import java.io.{ BufferedReader, ByteArrayOutputStream, InputStreamReader }
import java.net.{ InetSocketAddress, URLDecoder }
import java.nio.charset.{ Charset => JNIOCharset }
import scala.collection.JavaConverters._
object HttpConfig {
val DEFAULT_CHARSET = CharsetUtil.UTF_8.name()
}
object Content {
def unapply(msg: HttpMessage) =
msg match {
case has: HttpContent => Some(has)
case _ => None
}
}
class RequestBinding(msg: ReceivedMessage)
extends HttpRequest(msg) with Async.Responder[NettyHttpResponse] {
private[this] val req = msg.request
private[this] val content = msg.content
private[this] lazy val params = queryParams ++ bodyParams
private def queryParams = req.uri.split("\\\\?", 2) match {
case Array(_, qs) => URLParser.urldecode(qs)
case _ => Map.empty[String,Seq[String]]
}
private def bodyParams = (this, content) match {
case ((POST(_) | PUT(_)) & RequestContentType(ct), Some(content))
if ct.contains(HttpHeaderValues.APPLICATION_X_WWW_FORM_URLENCODED.toString) =>
URLParser.urldecode(content.content.toString(JNIOCharset.forName(charset)))
case _ =>
Map.empty[String,Seq[String]]
}
private def charset = Charset(this).getOrElse {
HttpConfig.DEFAULT_CHARSET
}
lazy val inputStream =
new ByteBufInputStream(content.map(_.content).getOrElse(Unpooled.EMPTY_BUFFER))
lazy val reader: BufferedReader =
new BufferedReader(new InputStreamReader(inputStream, charset))
def protocol = req.protocolVersion.text()
def method = req.method.toString.toUpperCase
// todo should we call URLDecoder.decode(uri, charset) on this here?
def uri = req.uri
def parameterNames = params.keySet.iterator
def parameterValues(param: String) = params.getOrElse(param, Seq.empty)
def headerNames = req.headers.names.iterator.asScala
def headers(name: String) = req.headers.getAll(name).iterator.asScala
def isSecure =
Option(msg.context.pipeline.get(classOf[SslHandler])).isDefined
def remoteAddr = msg.context.channel.remoteAddress.asInstanceOf[InetSocketAddress].getAddress.getHostAddress
def respond(rf: ResponseFunction[NettyHttpResponse]) =
underlying.respond(rf)
}
/** Extension of basic request binding to expose Netty-specific attributes */
case class ReceivedMessage(
request: NettyHttpRequest,
context: ChannelHandlerContext,
message: java.lang.Object) { // todo: remove this. its the same as request?
def content: Option[HttpContent] =
Content.unapply(request)
/** Binds a Netty HttpResponse res to Unfiltered's HttpResponse to apply any
* response function to it. */
def response[T <: NettyHttpResponse](res: T)(rf: ResponseFunction[T]) =
rf(new ResponseBinding(res)).underlying
/** @return a new Netty FullHttpResponse bound to an Unfiltered HttpResponse */
lazy val defaultResponse = response(new DefaultFullHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK))_
/** @return a new partial Netty HttpResonse bound to an Unfiltered HttpResponse. */
lazy val partialResponse = response(new DefaultHttpResponse(HttpVersion.HTTP_1_1, HttpResponseStatus.OK))_
/** @return a ChannelFutureListener which releases the NettyHttpRequest of this message */
lazy val releaser = new ChannelFutureListener {
def operationComplete(f: ChannelFuture): Unit =
ReferenceCountUtil.release(request)
}
/** Applies rf to a new `defaultResponse` and writes it out */
def respond: (ResponseFunction[NettyHttpResponse] => Unit) = {
case Pass =>
context.fireChannelRead(request)
case rf =>
val keepAlive = HttpUtil.isKeepAlive(request)
lazy val closer = new unfiltered.response.Responder[NettyHttpResponse] {
def respond(res: HttpResponse[NettyHttpResponse]): Unit = {
res.outputStream.close() // close() triggers writing content to response body
(
if (keepAlive) {
val defaults = unfiltered.response.Connection(HttpHeaderValues.KEEP_ALIVE.toString)
res.underlying match {
case Content(has) =>
defaults ~> unfiltered.response.ContentLength(
has.content.readableBytes.toString)
case _ =>
defaults
}
} else unfiltered.response.Connection(HttpHeaderValues.CLOSE.toString)
)(res)
}
}
val future = context.channel.writeAndFlush(
defaultResponse(rf ~> closer)
).addListener(releaser)
if (!keepAlive)
future.addListener(ChannelFutureListener.CLOSE)
}
}
/** An unfiltered response implementation backed by a netty http response.
* Note the type of netty HttpResponse determines whether or not the unfiltered
* response combinators can write to it. As a general rule of thumb, only netty
* FullHttpResponses may be written to by calling respond with a response writer */
class ResponseBinding[U <: NettyHttpResponse](res: U)
extends HttpResponse(res) {
/** available when serving non-chunked responses */
private[netty] lazy val content: Option[HttpContent] =
Content.unapply(res)
/** Relays to httpContent, if defined. Otherwise this stream goes nowhere */
private lazy val outStream =
content.map(httpContent =>
new ByteBufOutputStream(httpContent.content)
).getOrElse(new ByteArrayOutputStream)
def status(code: Int) =
res.setStatus(HttpResponseStatus.valueOf(code))
def status: Int =
res.status.code()
def header(name: String, value: String) =
res.headers.add(name, value)
def redirect(url: String) =
res.setStatus(HttpResponseStatus.FOUND).headers.add(HttpHeaderNames.LOCATION, url)
def outputStream = outStream
}
private [netty] object URLParser {
def urldecode(enc: String) : Map[String, Seq[String]] = {
def decode(raw: String) = URLDecoder.decode(raw, HttpConfig.DEFAULT_CHARSET)
val pairs = enc.split('&').flatMap {
_.split('=') match {
case Array(key, value) => List((decode(key), decode(value)))
case Array(key) if key != "" => List((decode(key), ""))
case _ => Nil
}
}.reverse
pairs.foldLeft(Map.empty[String, List[String]].withDefaultValue(Nil): Map[String, List[String]]) {
case (m, (k, v)) => m + (k -> (v :: m(k)))
}
}
}
| unfiltered/unfiltered | netty/src/main/scala/bindings.scala | Scala | mit | 7,047 |
/* ____ __ ____ ____ ____,,___ ____ __ __ ____
* ( _ \\ /__\\ (_ )(_ _)( ___)/ __) ( _ \\( )( )( _ \\ Read
* ) / /(__)\\ / /_ _)(_ )__) \\__ \\ )___/ )(__)( ) _ < README.txt
* (_)\\_)(__)(__)(____)(____)(____)(___/ (__) (______)(____/ LICENSE.txt
*/
package razie.diesel.engine.nodes
import razie.ctrace
import razie.diesel.dom.RDOM._
import razie.diesel.dom._
import razie.diesel.engine.exec.EApplicable
import razie.diesel.engine.{AstKinds, DomAst, DomState, EGenerated}
import razie.diesel.expr.{BoolExpr, CExpr, DieselExprException, ECtx}
import razie.tconf.EPos
import razie.wiki.Enc
/**
* these nodes support conditions
*/
trait EConditioned {
def cond: Option[EIf]
/** test cond and remember in ast */
def test(ast: DomAst, cole: Option[MatchCollector] = None)(implicit ctx: ECtx) = {
val res = cond.fold(true)(_.test(ast, List.empty, cole))
ast.guard = if (res) DomState.GUARD_TRUE else DomState.GUARD_FALSE
res
}
}
trait HasTestResult {
var testResult: Option[String] = None
}
/** test - expect a message m. optional guard */
case class ExpectM(not: Boolean, m: EMatch) extends CanHtml with HasPosition with HasTestResult {
var when: Option[EMatch] = None
var pos: Option[EPos] = None
var target: Option[DomAst] = None // if target then applies only in that sub-tree, otherwise guessing scope
// todo implement the cond
def withPos(p: Option[EPos]) = {
this.pos = p;
this
}
// clone because the original is a spec, reused in many stories
def withTarget(p: Option[DomAst]) = {
val x = this.copy();
x.pos = this.pos;
x.when = this.when;
x.target = p;
x
}
override def toHtml = kspan("expect::") + " " + m.toHtml
override def toString = "expect:: " + m.toString
def withGuard(guard: EMatch) = {
this.when = Some(guard); this
}
def withGuard(guard: Option[EMatch]) = {
this.when = guard; this
}
/** check to match the arguments */
def sketch(cole: Option[MatchCollector] = None)(implicit ctx: ECtx): List[EMsg] = {
var e = EMsg(m.cls, m.met, sketchAttrs(m.attrs, cole), AstKinds.GENERATED)
e.pos = pos
// e.spec = destSpec
// count += 1
List(e)
}
}
// todo use a EMatch and combine with ExpectM - empty e/a
/** test - expect a value or more. optional guard */
case class ExpectV(not: Boolean, pm: MatchAttrs, cond: Option[EIf] = None) extends CanHtml with HasPosition with
HasTestResult {
var when: Option[EMatch] = None
var pos: Option[EPos] = None
var target: Option[DomAst] = None // if target then applies only in that sub-tree, otherwise guessing scope
def withPos(p: Option[EPos]) = {
this.pos = p;
this
}
// clone because the original is a spec, reused in many stories
def withTarget(p: Option[DomAst]) = {
val x = this.copy()
x.pos = this.pos // need to copy vars myself
x.when = this.when
x.target = p
x
}
def restoHtml = testResult.map { value =>
(if (value == "ok")
kspan(value, "success")
else if (value startsWith "fail")
kspan(value, "danger", Some(EPos.EMPTY), None, Some("error"))
else
kspan(value, "warning")
)
}.mkString
override def toHtml =
restoHtml + kspan("expect::") + (if (not) "NOT" else "") + " " + toHtmlMAttrs(pm) + cond.map(_.toHtml).mkString
override def toString =
"expect:: " + (if (not) "NOT" else "") + " " + pm.mkString("(", ",", ")") + cond.map(_.toHtml).mkString
def withGuard(guard: Option[EMatch]) = {
this.when = guard;
this
}
/** check to match the arguments */
def applicable(ast: DomAst, a: Attrs)(implicit ctx: ECtx) = {
val res = cond.fold(true)(_.test(ast, a, None))
ast.guard = if (res) DomState.GUARD_TRUE else DomState.GUARD_FALSE
res
}
/** check to match the arguments
*
* @param a results of previous tree/message or Nil for context-only matches
* @param cole collector for debugging info
* @param nodes nodes that were targeted, to reference in collectors
* @param ctx
*/
def test(ast: DomAst, a: Attrs, cole: Option[MatchCollector] = None, nodes: List[DomAst])(implicit ctx: ECtx) = {
val res = testMatchAttrs(a, pm, cole, Some({ p =>
// start a new collector for each value we're looking for, to mark this value
cole.foreach { c =>
nodes
.find(_.value.asInstanceOf[EVal].p.name == p.name)
.foreach(n => c.newMatch(n))
}
}), !not)
// we don't check the cond - it just doesn't apply
// && cond.fold(true)(_.test(a, cole))
ast.guard = if (res) DomState.GUARD_TRUE else DomState.GUARD_FALSE
res
}
/** check to match the arguments */
def sketch(cole: Option[MatchCollector] = None)(implicit ctx: ECtx): Attrs = {
sketchAttrs(pm, cole)
}
}
/** test - expect a value or more. optional guard */
case class ExpectAssert(not: Boolean, exprs: List[BoolExpr]) extends CanHtml with HasPosition with HasTestResult {
var when: Option[EMatch] = None
var pos: Option[EPos] = None
var target: Option[DomAst] = None // if target then applies only in that sub-tree, otherwise guessing scope
def withPos(p: Option[EPos]) = {
this.pos = p;
this
}
// clone because the original is a spec, reused in many stories
def withTarget(p: Option[DomAst]) = {
val x = this.copy(); x.target = p; x
}
override def toHtml = kspan(pos.mkString+"assert::") + " " + exprs.map(_.toDsl).mkString("(", ",", ")")
override def toString = "assert:: " + exprs.mkString("(", ",", ")")
def withGuard(guard: EMatch) = {
this.when = Some(guard); this
}
def withGuard(guard: Option[EMatch]) = {
this.when = guard; this
}
/** check to match the arguments */
def test(ast: DomAst, a: Attrs, cole: Option[MatchCollector] = None, nodes: List[DomAst])(implicit ctx: ECtx) = {
// todo collect from bapply
val res = exprs.foldLeft(true)((a, b) => a && b.bapply("").value)
ast.guard = if (res) DomState.GUARD_TRUE else DomState.GUARD_FALSE
res
// testA(a, pm, cole, Some({ p =>
// start a new collector to mark this value
// cole.foreach(c => nodes.find(_.value.asInstanceOf[EVal].p.name == p.name).foreach(n => c.newMatch(n)))
// }))
}
}
// a match case
case class EMatch(cls: String, met: String, attrs: MatchAttrs, cond: Option[EIf] = None) extends CanHtml {
// todo match also the object parms if any and method parms if any
/** test if this rule should apply
*
* @param fallback if this is looking for fallbacks or not
*/
def test(ast: DomAst, e: EMsg, cole: Option[MatchCollector] = None, fallback: Boolean = false)(implicit ctx: ECtx) = {
if (testEA(e, cole, fallback))
testAttrCond(ast, e, cole, fallback)
else false
}
/** test that the EA matches */
def testEA(e: EMsg, cole: Option[MatchCollector] = None, fallback:Boolean = false)(implicit ctx: ECtx) = {
if(cls.length == 0 || met.length == 0) {
regexm(cls+met, e.ea) // full match
} else if ("*" == cls || e.entity == cls || regexm(cls, e.entity)) {
cole.map(_.plus(e.entity))
if ("*" == met || e.met == met || regexm(met, e.met)) {
cole.map(_.plus(e.met))
true
} else {
false
}
} else false
}
/** test that the attrs and cond match */
def testAttrCond(ast: DomAst, e: EMsg, cole: Option[MatchCollector] = None, fallback: Boolean = false)(implicit
ctx: ECtx) = {
cole.map(_.plus(e.met))
val testedok = testMatchAttrs(e.attrs, attrs, cole)
val condok = if (testedok) true else cond.fold(true)(_.test(ast: DomAst, e.attrs, cole)) // respect bool shortcut
fallback || {
if (!(testedok && condok)) ctrace << s"...rule skipped attr/cond: $this"
testedok && condok
}
}
def ea:String = cls + "." + met
/** extract a message signature from the match */
def asMsg = EMsg(cls, met, attrs.map { p =>
// extract the sample value
val df = if (p.dflt.nonEmpty) p.dflt else p.expr match {
case Some(CExpr(e, _)) => e.toString
case _ => ""
}
P(p.name, df, p.ttype)
})
override def toHtml = ea(cls, met, "", true, AstKinds.GENERATED) + " " + toHtmlMAttrs(attrs) + cond.map(
_.toHtml).mkString
override def toString = cls + "." + met + " " + attrs.mkString("(", ",", ")") + cond.map(_.toString).mkString
/** simple signature */
def toCAString = cls + "." + met + " " + attrs.map(_.name).mkString("(", ",", ")")
}
/** just a call to next.
*
* This is used to wrap async spawns ==> and
* normal => when there's more than one (they start one at a time)
*
* @param msg the message wrapped / to be executed next
* @param arrow - how to call next: wait => or no wait ==> or spawn wait <=>
* @param cond optional condition for this step
* @param deferred
*/
case class ENextPas(msg: EMsgPas, arrow: String, cond: Option[EIf] = None, deferred: Boolean = false,
indentLevel: Int = 0)
extends CanHtml with EConditioned with EGenerated {
var parent: Option[EMsg] = None
var spec: Option[EMsg] = None
def withParent(p: EMsg) = {
this.parent = Some(p);
this
}
def withSpec(p: Option[EMsg]) = {
this.spec = p;
this
}
// todo match also the object parms if any and method parms if any
def evaluateMsg(implicit ctx: ECtx) = {
// if evaluation was deferred, do it
val m = if (deferred) {
EMap.sourcePasAttrs(msg.attrs)
} else msg
m
}
override def toHtml = (if (arrow != "-") arrow + " " else "") + msg.toHtml
override def toString = (if (arrow != "-") arrow + " " else "") + msg.toString
}
/** something that needs to decompose later, in another decomp cycle - the basis for async execution
*
* This is used to wrap async spawns ==>
* and normal => when there's more than one (they start one at a time) so then decomp is async
*
* @param msg the message wrapped / to be executed next
* @param arrow - how to call next: wait => or no wait ==>
* @param cond optional condition for this step
* @param deferred
* @param indentLevel if > 0 then this is part of a subtree
*/
case class ENext(msg: EMsg, arrow: String, cond: Option[EIf] = None, deferred: Boolean = false, indentLevel: Int = 0)
extends CanHtml with EConditioned with EGenerated {
var parent: Option[EMsg] = None
var spec: Option[EMsg] = None
def withParent(p: EMsg) = {
this.parent = Some(p);
this
}
def withSpec(p: Option[EMsg]) = {
this.spec = p;
this
}
def copiedFrom(other: ENext) = {
this.parent = other.parent
this.spec = other.spec
this
}
// todo match also the object parms if any and method parms if any
/** apply - evaluate the message if not already */
def evaluateMsgCall(implicit ctx: ECtx) = {
// if evaluation was deferred, do it
val m = if (deferred) {
parent.map { parent =>
msg
.copy(attrs = EMap.sourceAttrs(parent, msg.attrs, spec.map(_.attrs)))
.copiedFrom(msg)
} getOrElse {
msg // todo evaluate something here as well...
}
} else msg
m
}
override def toHtml = (if (arrow != "-") arrow + " " else "") + msg.toHtml
override def toString = (if (arrow != "-") arrow + " " else "") + msg.toString
}
/** $when - match and decomposition rule
*
* @param e the match that triggers this rule
* @param arch archetype or tags
* @param i the mappings to execute
*/
case class ERule(e: EMatch, arch:String, i: List[EMap]) extends CanHtml with EApplicable with HasPosition {
var pos: Option[EPos] = None
val isFallback = arch contains "fallback"
val isExclusive = arch contains "exclusive"
override def test(ast: DomAst, m: EMsg, cole: Option[MatchCollector] = None)(implicit ctx: ECtx) =
e.test(ast, m, cole)
/** after testing, apply this rule and decomp the message */
override def apply(in: EMsg, destSpec: Option[EMsg])(implicit ctx: ECtx): List[Any] = {
in.withRulePos(pos) // set my pos on decomposed msg - I must have matched it
i.flatMap(_.apply(in, destSpec, pos, false, arch))
}
override def toHtml =
span(arch + "::") + " " +
e.asMsg.hrefBtnGlobal +
s" ${e.toHtml} <br>${i.map(_.toHtml).mkString("<br>")} <br>"
override def toString = arch+":: " + e + " => " + i.mkString
}
// base for conditions
trait EIf extends CanHtml {
def test(ast: DomAst, e: Attrs, cole: Option[MatchCollector] = None)(implicit ctx: ECtx): Boolean
}
// a match condition
case class EIfm(attrs: MatchAttrs) extends CanHtml with EIf {
override def test(ast: DomAst, e: Attrs, cole: Option[MatchCollector] = None)(implicit ctx: ECtx) = {
val res = testMatchAttrs(e, attrs, cole)
ast.guard = if (res) DomState.GUARD_TRUE else DomState.GUARD_FALSE
res
}
override def toHtml = span("$ifm::") + attrs.mkString("<small>(", ", ", ")</small>")
override def toString = "$ifm " + attrs.mkString
}
// a match condition
case class EElse() extends CanHtml with EIf {
override def test(ast: DomAst, e: Attrs, cole: Option[MatchCollector] = None)(implicit ctx: ECtx) = {
if (ast != null) {
// must find last applicable IF in rule and then find out if it was applied and then NOT that
var lastIF: Option[DomAst] = None
var found = false
// previous sibbling
ctx.root
.engine
.flatMap(_.findParent(ast))
.toList
.flatMap(_.children)
.foreach { e =>
if (!found) {
if (e.id == ast.id) {
found = true
} else {
if (e.guard != DomState.GUARD_NONE) {
lastIF = Some(e)
}
}
}
}
if (lastIF.isDefined) {
lastIF.get.guard == DomState.GUARD_FALSE
} else {
// should I ignore silently?
throw new DieselExprException("$else found no preceeding $if")
}
} else {
false
}
}
override def toHtml = span("$else::")
override def toString = "$else "
}
/** an expression condition */
case class EIfc(cond: BoolExpr) extends CanHtml with EIf {
override def test(ast: DomAst, e: Attrs, cole: Option[MatchCollector] = None)(implicit ctx: ECtx) =
// todo collect from bapply
cond.bapply("").value
override def toHtml = span("$ifc::") + cond.toDsl
override def toString = "$ifc " + cond.toDsl
}
/** a mock, must like a rule */
case class EMock(rule: ERule) extends CanHtml with HasPosition {
var pos: Option[EPos] = rule.pos
override def toHtml = span(count.toString) + " " + rule.toHtml//.replaceFirst("when", "mock")
override def toString = count.toString + " " + rule.toString//.replaceFirst("when", "mock")
def count = rule.i.map(_.count).sum // todo is slow
}
/** a typed variable
*
* @param p
*/
case class EVal(p: RDOM.P) extends CanHtml with HasPosition with HasKind {
def this(name: String, value: String) = this(P(name, value))
var pos: Option[EPos] = None
def withPos(p: Option[EPos]) = {
this.pos = p;
this
}
def copyFrom(other: EVal) = {
this.pos = other.pos
this
}
// overwrite default, so some values don't show in info view
var kind: Option[String] = None
def withKind(k: String) = {
this.kind = Some(k);
this
}
def toj: Map[String, Any] =
Map(
"class" -> "EVal",
"name" -> p.name,
"value" -> p.currentStringValue
) ++ {
pos.map { p =>
Map("ref" -> p.toRef,
"pos" -> p.toJmap
)
}.getOrElse(Map.empty)
}
override def toHtml =
if(pos.isDefined) kspan("val") + p.toHtml
else spanClick("val", "info", Enc.escapeHtml(p.currentStringValue)) + p.toHtml
override def toHtmlFull =
if(pos.isDefined) kspan("val") + p.toHtml(false)
else spanClick("val", "info", Enc.escapeHtml(p.currentStringValue)) + p.toHtml(false)
override def toString = "val " + p.toString
}
| razie/diesel-rx | diesel/src/main/scala/razie/diesel/engine/nodes/ERules.scala | Scala | apache-2.0 | 16,122 |
package kafka.rest;
import scala.collection.JavaConversions._
import javax.servlet.http.HttpServletRequest
import javax.servlet.http.HttpServletResponse
import org.apache.log4j.Logger;
import org.bson.BSONObject
import org.bson.BasicBSONDecoder
import org.bson.BasicBSONEncoder
import org.bson.BasicBSONObject
import com.mongodb.util.JSON
trait ReplyFormatter {
def getReplyContentType(request:HttpServletRequest):String = {
val replyType = request.getHeader("Accept") match {
case "application/json" => "application/json"
case "application/bson" => "application/bson"
case "" | "*/*"=> "application/json"
case _ => throw new Exception("Unsupported content type in Accept: '%s'".format(request.getHeader("Accept")))
}
replyType
}
def replyWithJson(obj:BSONObject, response:HttpServletResponse, status:Int){
response.setContentType("application/json")
response.setStatus(status)
response.getWriter().print(JSON.serialize(obj))
}
def replyWithBson(obj:BSONObject, response:HttpServletResponse, status:Int){
response.setContentType("application/bson")
response.setStatus(status)
response.getOutputStream().write(new BasicBSONEncoder().encode(obj))
}
def replyWithStatus(obj:BSONObject, request:HttpServletRequest, response:HttpServletResponse, status:Int) {
getReplyContentType(request) match {
case "application/json" => replyWithJson(obj, response, status)
case "application/bson" => replyWithBson(obj, response, status)
}
}
def replyWith(obj:BSONObject, request:HttpServletRequest, response:HttpServletResponse){
replyWithStatus(obj, request, response, HttpServletResponse.SC_OK)
}
}
| lakshmi-kannan/kafka-sashafied | contrib/rest/src/main/scala/kafka/rest/ReplyFormatter.scala | Scala | apache-2.0 | 1,697 |
package io.soheila.um.entities
import play.api.libs.json.Json
case class ContactInfo(
name: String,
street: String,
city: String,
state: String,
country: String,
postcode: String,
phoneNumbers: Set[String],
isPrimary: Boolean = false,
isBusiness: Boolean = false
)
object ContactInfo {
implicit val jsonFormat = Json.format[ContactInfo]
}
| esfand-r/soheila-um | src/main/scala/io/soheila/um/entities/ContactInfo.scala | Scala | apache-2.0 | 362 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.parquet
import java.io.File
import java.sql.Timestamp
import org.apache.hadoop.fs.{FileSystem, Path}
import org.apache.parquet.hadoop.ParquetOutputFormat
import org.apache.spark.SparkException
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.{InternalRow, TableIdentifier}
import org.apache.spark.sql.catalyst.expressions.SpecificInternalRow
import org.apache.spark.sql.execution.FileSourceScanExec
import org.apache.spark.sql.execution.datasources.SQLHadoopMapReduceCommitProtocol
import org.apache.spark.sql.execution.datasources.parquet.TestingUDT.{NestedStruct, NestedStructUDT, SingleElement}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
/**
* A test suite that tests various Parquet queries.
*/
class ParquetQuerySuite extends QueryTest with ParquetTest with SharedSQLContext {
import testImplicits._
test("simple select queries") {
withParquetTable((0 until 10).map(i => (i, i.toString)), "t") {
checkAnswer(sql("SELECT _1 FROM t where t._1 > 5"), (6 until 10).map(Row.apply(_)))
checkAnswer(sql("SELECT _1 FROM t as tmp where tmp._1 < 5"), (0 until 5).map(Row.apply(_)))
}
}
test("appending") {
val data = (0 until 10).map(i => (i, i.toString))
spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
// Query appends, don't test with both read modes.
withParquetTable(data, "t", false) {
sql("INSERT INTO TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), (data ++ data).map(Row.fromTuple))
}
spark.sessionState.catalog.dropTable(
TableIdentifier("tmp"), ignoreIfNotExists = true, purge = false)
}
test("overwriting") {
val data = (0 until 10).map(i => (i, i.toString))
spark.createDataFrame(data).toDF("c1", "c2").createOrReplaceTempView("tmp")
withParquetTable(data, "t") {
sql("INSERT OVERWRITE TABLE t SELECT * FROM tmp")
checkAnswer(spark.table("t"), data.map(Row.fromTuple))
}
spark.sessionState.catalog.dropTable(
TableIdentifier("tmp"), ignoreIfNotExists = true, purge = false)
}
test("SPARK-15678: not use cache on overwrite") {
withTempDir { dir =>
val path = dir.toString
spark.range(1000).write.mode("overwrite").parquet(path)
val df = spark.read.parquet(path).cache()
assert(df.count() == 1000)
spark.range(10).write.mode("overwrite").parquet(path)
assert(df.count() == 10)
assert(spark.read.parquet(path).count() == 10)
}
}
test("SPARK-15678: not use cache on append") {
withTempDir { dir =>
val path = dir.toString
spark.range(1000).write.mode("append").parquet(path)
val df = spark.read.parquet(path).cache()
assert(df.count() == 1000)
spark.range(10).write.mode("append").parquet(path)
assert(df.count() == 1010)
assert(spark.read.parquet(path).count() == 1010)
}
}
test("self-join") {
// 4 rows, cells of column 1 of row 2 and row 4 are null
val data = (1 to 4).map { i =>
val maybeInt = if (i % 2 == 0) None else Some(i)
(maybeInt, i.toString)
}
// TODO: vectorized doesn't work here because it requires UnsafeRows
withParquetTable(data, "t", false) {
val selfJoin = sql("SELECT * FROM t x JOIN t y WHERE x._1 = y._1")
val queryOutput = selfJoin.queryExecution.analyzed.output
assertResult(4, "Field count mismatches")(queryOutput.size)
assertResult(2, "Duplicated expression ID in query plan:\\n $selfJoin") {
queryOutput.filter(_.name == "_1").map(_.exprId).size
}
checkAnswer(selfJoin, List(Row(1, "1", 1, "1"), Row(3, "3", 3, "3")))
}
}
test("nested data - struct with array field") {
val data = (1 to 10).map(i => Tuple1((i, Seq("val_$i"))))
withParquetTable(data, "t") {
checkAnswer(sql("SELECT _1._2[0] FROM t"), data.map {
case Tuple1((_, Seq(string))) => Row(string)
})
}
}
test("nested data - array of struct") {
val data = (1 to 10).map(i => Tuple1(Seq(i -> "val_$i")))
withParquetTable(data, "t") {
checkAnswer(sql("SELECT _1[0]._2 FROM t"), data.map {
case Tuple1(Seq((_, string))) => Row(string)
})
}
}
test("SPARK-1913 regression: columns only referenced by pushed down filters should remain") {
withParquetTable((1 to 10).map(Tuple1.apply), "t") {
checkAnswer(sql("SELECT _1 FROM t WHERE _1 < 10"), (1 to 9).map(Row.apply(_)))
}
}
test("SPARK-5309 strings stored using dictionary compression in parquet") {
withParquetTable((0 until 1000).map(i => ("same", "run_" + i /100, 1)), "t") {
checkAnswer(sql("SELECT _1, _2, SUM(_3) FROM t GROUP BY _1, _2"),
(0 until 10).map(i => Row("same", "run_" + i, 100)))
checkAnswer(sql("SELECT _1, _2, SUM(_3) FROM t WHERE _2 = 'run_5' GROUP BY _1, _2"),
List(Row("same", "run_5", 100)))
}
}
test("SPARK-6917 DecimalType should work with non-native types") {
val data = (1 to 10).map(i => Row(Decimal(i, 18, 0), new java.sql.Timestamp(i)))
val schema = StructType(List(StructField("d", DecimalType(18, 0), false),
StructField("time", TimestampType, false)).toArray)
withTempPath { file =>
val df = spark.createDataFrame(sparkContext.parallelize(data), schema)
df.write.parquet(file.getCanonicalPath)
val df2 = spark.read.parquet(file.getCanonicalPath)
checkAnswer(df2, df.collect().toSeq)
}
}
test("SPARK-10634 timestamp written and read as INT64 - TIMESTAMP_MILLIS") {
val data = (1 to 10).map(i => Row(i, new java.sql.Timestamp(i)))
val schema = StructType(List(StructField("d", IntegerType, false),
StructField("time", TimestampType, false)).toArray)
withSQLConf(SQLConf.PARQUET_INT64_AS_TIMESTAMP_MILLIS.key -> "true") {
withTempPath { file =>
val df = spark.createDataFrame(sparkContext.parallelize(data), schema)
df.write.parquet(file.getCanonicalPath)
("true" :: "false" :: Nil).foreach { vectorized =>
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> vectorized) {
val df2 = spark.read.parquet(file.getCanonicalPath)
checkAnswer(df2, df.collect().toSeq)
}
}
}
}
}
test("SPARK-10634 timestamp written and read as INT64 - truncation") {
withTable("ts") {
sql("create table ts (c1 int, c2 timestamp) using parquet")
sql("insert into ts values (1, '2016-01-01 10:11:12.123456')")
sql("insert into ts values (2, null)")
sql("insert into ts values (3, '1965-01-01 10:11:12.123456')")
checkAnswer(
sql("select * from ts"),
Seq(
Row(1, Timestamp.valueOf("2016-01-01 10:11:12.123456")),
Row(2, null),
Row(3, Timestamp.valueOf("1965-01-01 10:11:12.123456"))))
}
// The microsecond portion is truncated when written as TIMESTAMP_MILLIS.
withTable("ts") {
withSQLConf(SQLConf.PARQUET_INT64_AS_TIMESTAMP_MILLIS.key -> "true") {
sql("create table ts (c1 int, c2 timestamp) using parquet")
sql("insert into ts values (1, '2016-01-01 10:11:12.123456')")
sql("insert into ts values (2, null)")
sql("insert into ts values (3, '1965-01-01 10:11:12.125456')")
sql("insert into ts values (4, '1965-01-01 10:11:12.125')")
sql("insert into ts values (5, '1965-01-01 10:11:12.1')")
sql("insert into ts values (6, '1965-01-01 10:11:12.123456789')")
sql("insert into ts values (7, '0001-01-01 00:00:00.000000')")
checkAnswer(
sql("select * from ts"),
Seq(
Row(1, Timestamp.valueOf("2016-01-01 10:11:12.123")),
Row(2, null),
Row(3, Timestamp.valueOf("1965-01-01 10:11:12.125")),
Row(4, Timestamp.valueOf("1965-01-01 10:11:12.125")),
Row(5, Timestamp.valueOf("1965-01-01 10:11:12.1")),
Row(6, Timestamp.valueOf("1965-01-01 10:11:12.123")),
Row(7, Timestamp.valueOf("0001-01-01 00:00:00.000"))))
// Read timestamps that were encoded as TIMESTAMP_MILLIS annotated as INT64
// with PARQUET_INT64_AS_TIMESTAMP_MILLIS set to false.
withSQLConf(SQLConf.PARQUET_INT64_AS_TIMESTAMP_MILLIS.key -> "false") {
checkAnswer(
sql("select * from ts"),
Seq(
Row(1, Timestamp.valueOf("2016-01-01 10:11:12.123")),
Row(2, null),
Row(3, Timestamp.valueOf("1965-01-01 10:11:12.125")),
Row(4, Timestamp.valueOf("1965-01-01 10:11:12.125")),
Row(5, Timestamp.valueOf("1965-01-01 10:11:12.1")),
Row(6, Timestamp.valueOf("1965-01-01 10:11:12.123")),
Row(7, Timestamp.valueOf("0001-01-01 00:00:00.000"))))
}
}
}
}
test("Enabling/disabling merging partfiles when merging parquet schema") {
def testSchemaMerging(expectedColumnNumber: Int): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(0, 10).toDF("a").write.parquet(new Path(basePath, "foo=1").toString)
spark.range(0, 10).toDF("b").write.parquet(new Path(basePath, "foo=2").toString)
// delete summary files, so if we don't merge part-files, one column will not be included.
Utils.deleteRecursively(new File(basePath + "/foo=1/_metadata"))
Utils.deleteRecursively(new File(basePath + "/foo=1/_common_metadata"))
assert(spark.read.parquet(basePath).columns.length === expectedColumnNumber)
}
}
withSQLConf(
SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
classOf[SQLHadoopMapReduceCommitProtocol].getCanonicalName,
SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "true",
SQLConf.PARQUET_SCHEMA_RESPECT_SUMMARIES.key -> "true",
ParquetOutputFormat.ENABLE_JOB_SUMMARY -> "true"
) {
testSchemaMerging(2)
}
withSQLConf(
SQLConf.FILE_COMMIT_PROTOCOL_CLASS.key ->
classOf[SQLHadoopMapReduceCommitProtocol].getCanonicalName,
SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "true",
SQLConf.PARQUET_SCHEMA_RESPECT_SUMMARIES.key -> "false"
) {
testSchemaMerging(3)
}
}
test("Enabling/disabling schema merging") {
def testSchemaMerging(expectedColumnNumber: Int): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(0, 10).toDF("a").write.parquet(new Path(basePath, "foo=1").toString)
spark.range(0, 10).toDF("b").write.parquet(new Path(basePath, "foo=2").toString)
assert(spark.read.parquet(basePath).columns.length === expectedColumnNumber)
}
}
withSQLConf(SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "true") {
testSchemaMerging(3)
}
withSQLConf(SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "false") {
testSchemaMerging(2)
}
}
test("Enabling/disabling ignoreCorruptFiles") {
def testIgnoreCorruptFiles(): Unit = {
withTempDir { dir =>
val basePath = dir.getCanonicalPath
spark.range(1).toDF("a").write.parquet(new Path(basePath, "first").toString)
spark.range(1, 2).toDF("a").write.parquet(new Path(basePath, "second").toString)
spark.range(2, 3).toDF("a").write.json(new Path(basePath, "third").toString)
val df = spark.read.parquet(
new Path(basePath, "first").toString,
new Path(basePath, "second").toString,
new Path(basePath, "third").toString)
checkAnswer(
df,
Seq(Row(0), Row(1)))
}
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "true") {
testIgnoreCorruptFiles()
}
withSQLConf(SQLConf.IGNORE_CORRUPT_FILES.key -> "false") {
val exception = intercept[SparkException] {
testIgnoreCorruptFiles()
}
assert(exception.getMessage().contains("is not a Parquet file"))
}
}
test("SPARK-8990 DataFrameReader.parquet() should respect user specified options") {
withTempPath { dir =>
val basePath = dir.getCanonicalPath
spark.range(0, 10).toDF("a").write.parquet(new Path(basePath, "foo=1").toString)
spark.range(0, 10).toDF("b").write.parquet(new Path(basePath, "foo=a").toString)
// Disables the global SQL option for schema merging
withSQLConf(SQLConf.PARQUET_SCHEMA_MERGING_ENABLED.key -> "false") {
assertResult(2) {
// Disables schema merging via data source option
spark.read.option("mergeSchema", "false").parquet(basePath).columns.length
}
assertResult(3) {
// Enables schema merging via data source option
spark.read.option("mergeSchema", "true").parquet(basePath).columns.length
}
}
}
}
test("SPARK-9119 Decimal should be correctly written into parquet") {
withTempPath { dir =>
val basePath = dir.getCanonicalPath
val schema = StructType(Array(StructField("name", DecimalType(10, 5), false)))
val rowRDD = sparkContext.parallelize(Array(Row(Decimal("67123.45"))))
val df = spark.createDataFrame(rowRDD, schema)
df.write.parquet(basePath)
val decimal = spark.read.parquet(basePath).first().getDecimal(0)
assert(Decimal("67123.45") === Decimal(decimal))
}
}
test("SPARK-10005 Schema merging for nested struct") {
withTempPath { dir =>
val path = dir.getCanonicalPath
def append(df: DataFrame): Unit = {
df.write.mode(SaveMode.Append).parquet(path)
}
// Note that both the following two DataFrames contain a single struct column with multiple
// nested fields.
append((1 to 2).map(i => Tuple1((i, i))).toDF())
append((1 to 2).map(i => Tuple1((i, i, i))).toDF())
withSQLConf(SQLConf.PARQUET_BINARY_AS_STRING.key -> "true") {
checkAnswer(
spark.read.option("mergeSchema", "true").parquet(path),
Seq(
Row(Row(1, 1, null)),
Row(Row(2, 2, null)),
Row(Row(1, 1, 1)),
Row(Row(2, 2, 2))))
}
}
}
test("SPARK-10301 requested schema clipping - same schema") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(1).selectExpr("NAMED_STRUCT('a', id, 'b', id + 1) AS s").coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("a", LongType, nullable = true)
.add("b", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(0L, 1L)))
}
}
test("SPARK-11997 parquet with null partition values") {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(1, 3)
.selectExpr("if(id % 2 = 0, null, id) AS n", "id")
.write.partitionBy("n").parquet(path)
checkAnswer(
spark.read.parquet(path).filter("n is null"),
Row(2, null))
}
}
// This test case is ignored because of parquet-mr bug PARQUET-370
ignore("SPARK-10301 requested schema clipping - schemas with disjoint sets of fields") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(1).selectExpr("NAMED_STRUCT('a', id, 'b', id + 1) AS s").coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("c", LongType, nullable = true)
.add("d", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(null, null)))
}
}
test("SPARK-10301 requested schema clipping - requested schema contains physical schema") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(1).selectExpr("NAMED_STRUCT('a', id, 'b', id + 1) AS s").coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("a", LongType, nullable = true)
.add("b", LongType, nullable = true)
.add("c", LongType, nullable = true)
.add("d", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(0L, 1L, null, null)))
}
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(1).selectExpr("NAMED_STRUCT('a', id, 'd', id + 3) AS s").coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("a", LongType, nullable = true)
.add("b", LongType, nullable = true)
.add("c", LongType, nullable = true)
.add("d", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(0L, null, null, 3L)))
}
}
test("SPARK-10301 requested schema clipping - physical schema contains requested schema") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark
.range(1)
.selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2, 'd', id + 3) AS s")
.coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("a", LongType, nullable = true)
.add("b", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(0L, 1L)))
}
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark
.range(1)
.selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2, 'd', id + 3) AS s")
.coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("a", LongType, nullable = true)
.add("d", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(0L, 3L)))
}
}
test("SPARK-10301 requested schema clipping - schemas overlap but don't contain each other") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark
.range(1)
.selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2) AS s")
.coalesce(1)
df.write.parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("b", LongType, nullable = true)
.add("c", LongType, nullable = true)
.add("d", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(1L, 2L, null)))
}
}
test("SPARK-10301 requested schema clipping - deeply nested struct") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark
.range(1)
.selectExpr("NAMED_STRUCT('a', ARRAY(NAMED_STRUCT('b', id, 'c', id))) AS s")
.coalesce(1)
df.write.parquet(path)
val userDefinedSchema = new StructType()
.add("s",
new StructType()
.add(
"a",
ArrayType(
new StructType()
.add("b", LongType, nullable = true)
.add("d", StringType, nullable = true),
containsNull = true),
nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(Seq(Row(0, null)))))
}
}
test("SPARK-10301 requested schema clipping - out of order") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df1 = spark
.range(1)
.selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2) AS s")
.coalesce(1)
val df2 = spark
.range(1, 2)
.selectExpr("NAMED_STRUCT('c', id + 2, 'b', id + 1, 'd', id + 3) AS s")
.coalesce(1)
df1.write.parquet(path)
df2.write.mode(SaveMode.Append).parquet(path)
val userDefinedSchema = new StructType()
.add("s",
new StructType()
.add("a", LongType, nullable = true)
.add("b", LongType, nullable = true)
.add("d", LongType, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Seq(
Row(Row(0, 1, null)),
Row(Row(null, 2, 4))))
}
}
test("SPARK-10301 requested schema clipping - schema merging") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df1 = spark
.range(1)
.selectExpr("NAMED_STRUCT('a', id, 'c', id + 2) AS s")
.coalesce(1)
val df2 = spark
.range(1, 2)
.selectExpr("NAMED_STRUCT('a', id, 'b', id + 1, 'c', id + 2) AS s")
.coalesce(1)
df1.write.mode(SaveMode.Append).parquet(path)
df2.write.mode(SaveMode.Append).parquet(path)
checkAnswer(
spark
.read
.option("mergeSchema", "true")
.parquet(path)
.selectExpr("s.a", "s.b", "s.c"),
Seq(
Row(0, null, 2),
Row(1, 2, 3)))
}
}
testStandardAndLegacyModes("SPARK-10301 requested schema clipping - UDT") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark
.range(1)
.selectExpr(
"""NAMED_STRUCT(
| 'f0', CAST(id AS STRING),
| 'f1', NAMED_STRUCT(
| 'a', CAST(id + 1 AS INT),
| 'b', CAST(id + 2 AS LONG),
| 'c', CAST(id + 3.5 AS DOUBLE)
| )
|) AS s
""".stripMargin)
.coalesce(1)
df.write.mode(SaveMode.Append).parquet(path)
val userDefinedSchema =
new StructType()
.add(
"s",
new StructType()
.add("f1", new NestedStructUDT, nullable = true),
nullable = true)
checkAnswer(
spark.read.schema(userDefinedSchema).parquet(path),
Row(Row(NestedStruct(1, 2L, 3.5D))))
}
}
test("expand UDT in StructType") {
val schema = new StructType().add("n", new NestedStructUDT, nullable = true)
val expected = new StructType().add("n", new NestedStructUDT().sqlType, nullable = true)
assert(ParquetReadSupport.expandUDT(schema) === expected)
}
test("expand UDT in ArrayType") {
val schema = new StructType().add(
"n",
ArrayType(
elementType = new NestedStructUDT,
containsNull = false),
nullable = true)
val expected = new StructType().add(
"n",
ArrayType(
elementType = new NestedStructUDT().sqlType,
containsNull = false),
nullable = true)
assert(ParquetReadSupport.expandUDT(schema) === expected)
}
test("expand UDT in MapType") {
val schema = new StructType().add(
"n",
MapType(
keyType = IntegerType,
valueType = new NestedStructUDT,
valueContainsNull = false),
nullable = true)
val expected = new StructType().add(
"n",
MapType(
keyType = IntegerType,
valueType = new NestedStructUDT().sqlType,
valueContainsNull = false),
nullable = true)
assert(ParquetReadSupport.expandUDT(schema) === expected)
}
test("returning batch for wide table") {
withSQLConf(SQLConf.WHOLESTAGE_MAX_NUM_FIELDS.key -> "10") {
withTempPath { dir =>
val path = dir.getCanonicalPath
val df = spark.range(10).select(Seq.tabulate(11) {i => ('id + i).as(s"c$i")} : _*)
df.write.mode(SaveMode.Overwrite).parquet(path)
// donot return batch, because whole stage codegen is disabled for wide table (>200 columns)
val df2 = spark.read.parquet(path)
val fileScan2 = df2.queryExecution.sparkPlan.find(_.isInstanceOf[FileSourceScanExec]).get
assert(!fileScan2.asInstanceOf[FileSourceScanExec].supportsBatch)
checkAnswer(df2, df)
// return batch
val columns = Seq.tabulate(9) {i => s"c$i"}
val df3 = df2.selectExpr(columns : _*)
val fileScan3 = df3.queryExecution.sparkPlan.find(_.isInstanceOf[FileSourceScanExec]).get
assert(fileScan3.asInstanceOf[FileSourceScanExec].supportsBatch)
checkAnswer(df3, df.selectExpr(columns : _*))
}
}
}
test("SPARK-15719: disable writing summary files by default") {
withTempPath { dir =>
val path = dir.getCanonicalPath
spark.range(3).write.parquet(path)
val fs = FileSystem.get(sparkContext.hadoopConfiguration)
val files = fs.listFiles(new Path(path), true)
while (files.hasNext) {
val file = files.next
assert(!file.getPath.getName.contains("_metadata"))
}
}
}
test("SPARK-15804: write out the metadata to parquet file") {
val df = Seq((1, "abc"), (2, "hello")).toDF("a", "b")
val md = new MetadataBuilder().putString("key", "value").build()
val dfWithmeta = df.select('a, 'b.as("b", md))
withTempPath { dir =>
val path = dir.getCanonicalPath
dfWithmeta.write.parquet(path)
readParquetFile(path) { df =>
assert(df.schema.last.metadata.getString("key") == "value")
}
}
}
test("SPARK-16344: array of struct with a single field named 'element'") {
withTempPath { dir =>
val path = dir.getCanonicalPath
Seq(Tuple1(Array(SingleElement(42)))).toDF("f").write.parquet(path)
checkAnswer(
sqlContext.read.parquet(path),
Row(Array(Row(42)))
)
}
}
test("SPARK-16632: read Parquet int32 as ByteType and ShortType") {
withSQLConf(SQLConf.PARQUET_VECTORIZED_READER_ENABLED.key -> "true") {
withTempPath { dir =>
val path = dir.getCanonicalPath
// When being written to Parquet, `TINYINT` and `SMALLINT` should be converted into
// `int32 (INT_8)` and `int32 (INT_16)` respectively. However, Hive doesn't add the `INT_8`
// and `INT_16` annotation properly (HIVE-14294). Thus, when reading files written by Hive
// using Spark with the vectorized Parquet reader enabled, we may hit error due to type
// mismatch.
//
// Here we are simulating Hive's behavior by writing a single `INT` field and then read it
// back as `TINYINT` and `SMALLINT` in Spark to verify this issue.
Seq(1).toDF("f").write.parquet(path)
val withByteField = new StructType().add("f", ByteType)
checkAnswer(spark.read.schema(withByteField).parquet(path), Row(1: Byte))
val withShortField = new StructType().add("f", ShortType)
checkAnswer(spark.read.schema(withShortField).parquet(path), Row(1: Short))
}
}
}
}
object TestingUDT {
case class SingleElement(element: Long)
@SQLUserDefinedType(udt = classOf[NestedStructUDT])
case class NestedStruct(a: Integer, b: Long, c: Double)
class NestedStructUDT extends UserDefinedType[NestedStruct] {
override def sqlType: DataType =
new StructType()
.add("a", IntegerType, nullable = true)
.add("b", LongType, nullable = false)
.add("c", DoubleType, nullable = false)
override def serialize(n: NestedStruct): Any = {
val row = new SpecificInternalRow(sqlType.asInstanceOf[StructType].map(_.dataType))
row.setInt(0, n.a)
row.setLong(1, n.b)
row.setDouble(2, n.c)
}
override def userClass: Class[NestedStruct] = classOf[NestedStruct]
override def deserialize(datum: Any): NestedStruct = {
datum match {
case row: InternalRow =>
NestedStruct(row.getInt(0), row.getLong(1), row.getDouble(2))
}
}
}
}
| JerryLead/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/parquet/ParquetQuerySuite.scala | Scala | apache-2.0 | 29,226 |
package org.jmotor.sbt.parser
import scala.util.matching.Regex
/**
* Component:
* Description:
* Date: 2018/3/1
*
* @author AI
*/
object VersionParser {
lazy val VersionRegex: Regex = """val ?(\\w+) ?= ?"(.*)"""".r
lazy val VersionsObjectRegex: Regex = """[\\t ]*object ?Versions ?\\{([^{]*)[\\t ]*\\}""".r
def parseVersionLines(text: String): Array[String] = {
(for (m ← VersionsObjectRegex.findFirstMatchIn(text)) yield m.group(1)) match {
case None ⇒ Array.empty
case Some(v) ⇒
v.split("\\n").map { line ⇒
line.replace("\\t", "").trim
}.filter(_.nonEmpty)
}
}
}
| aiyanbo/sbt-dependency-updates | src/main/scala/org/jmotor/sbt/parser/VersionParser.scala | Scala | apache-2.0 | 631 |
package dpla.ingestion3.entries.ingest
import dpla.ingestion3.confs.{CmdArgs, Ingestion3Conf, i3Conf}
import dpla.ingestion3.entries.reports.ReporterMain.executeAllReports
import dpla.ingestion3.utils.Utils
import org.apache.spark.SparkConf
/**
* Driver for reading DplaMapData records (mapped or enriched) and generating
* Reports
*
* Expects three parameters:
* 1) a path to the mapped/enriched data
* 2) a path to output the reports
* 3) a path to the application configuration file
* 4) provider short name
* 5) spark master (optional parameter that overrides a --master param submitted
* via spark-submit
*
* Usage
* -----
* To invoke via sbt:
* sbt "run-main dpla.ingestion3.ReportsEntry
* --input=/input/path/to/enriched/
* --output=/output/path/to/reports/
* --conf=/path/to/application.conf
* --name=shortName"
* --sparkMaster=local[*]
*/
object ReportsEntry {
def main(args: Array[String]): Unit = {
// Read in command line args.
val cmdArgs = new CmdArgs(args)
val dataIn = cmdArgs.getInput
val dataOut = cmdArgs.getOutput
val shortName = cmdArgs.getProviderName
val confFile = cmdArgs.getConfigFile
val sparkMaster: Option[String] = cmdArgs.getSparkMaster
// Load configuration from file
val i3Conf: i3Conf = new Ingestion3Conf(confFile).load()
// Get logger
val logger = Utils.createLogger("reports", shortName)
val baseConf =
new SparkConf()
.setAppName(s"Reports: $shortName")
val sparkConf = sparkMaster match {
case Some(m) => baseConf.setMaster(m)
case None => baseConf
}
executeAllReports(sparkConf, dataIn, dataOut, shortName, logger)
}
}
| dpla/ingestion3 | src/main/scala/dpla/ingestion3/entries/ingest/ReportsEntry.scala | Scala | mit | 1,734 |
package controllers
import java.util.concurrent.TimeUnit
import com.github.athieriot._
import models.Stream
import models.message.{ReturnGetStreamIdList, ReturnGetPreferableAnalysis}
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
import play.api.libs.json.Json
import play.api.test.{FakeRequest, FakeApplication}
import play.api.test.Helpers._
import scala.concurrent.{Future, Await}
import scala.concurrent.duration.FiniteDuration
/**
* Created by calvin-pc on 6/10/2015.
*/
class ThridPartiesIT extends Specification{
//Use test database (dump in folder test)
import models.JsonFormats._
val timeout: FiniteDuration = FiniteDuration(5, TimeUnit.SECONDS)
"ThridParties" should {
"Return OK status when correct check password" in {
running(FakeApplication()) {
val request = FakeRequest.apply(GET, "/thridparty/check_password").withJsonBody(Json.obj(
"username" -> "coba",
"password" -> "benar"))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(OK)
}
}
"Return Bad Request status when check password if not valid json" in {
running(FakeApplication()) {
val request = FakeRequest.apply(GET, "/thridparty/check_password").withJsonBody(Json.obj(
"username" -> "coba",
"password" -> 9))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(BAD_REQUEST)
}
}
"Return Bad Request status when check password if wrong" in {
running(FakeApplication()) {
val request = FakeRequest.apply(GET, "/thridparty/check_password").withJsonBody(Json.obj(
"username" -> "coba",
"password" -> "coba"))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(BAD_REQUEST)
}
}
"add person with a valid json" in {
running(FakeApplication()) {
val request = FakeRequest.apply(POST, "/thridparty/add_user").withJsonBody(Json.obj(
"username" -> "munca",
"password" -> "whoknow"))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(CREATED)
val request2 = FakeRequest.apply(GET, "/thridparty/check_password").withJsonBody(Json.obj(
"username" -> "munca",
"password" -> "whoknow"))
val response2 = route(request2)
response2.isDefined mustEqual true
val result2 = Await.result(response2.get, timeout)
result2.header.status must equalTo(OK)
}
}
"fail when add person without valid json" in {
running(FakeApplication()) {
val request = FakeRequest.apply(POST, "/thridparty/add_user").withJsonBody(Json.obj(
"user" -> "munca",
"pass" -> "whoknow"))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(BAD_REQUEST)
}
}
"fail when add person with same username" in {
running(FakeApplication()) {
val request = FakeRequest.apply(POST, "/thridparty/add_user").withJsonBody(Json.obj(
"username" -> "munca",
"password" -> "mozart"))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(BAD_REQUEST)
val response2 = route(request)
response2.isDefined mustEqual true
val result2 = Await.result(response2.get, timeout)
result2.header.status must equalTo(BAD_REQUEST)
}
}
"Return OK status when update password correct" in {
running(FakeApplication()) {
var message = models.message.UpdatePassword("munca","whoknow","helloworld")
var request = FakeRequest.apply(POST, "/thridparty/update_password")
.withJsonBody(Json.toJson(message))
var response = route(request)
response.isDefined mustEqual true
var result = Await.result(response.get, timeout)
result.header.status must equalTo(OK)
request = FakeRequest.apply(GET, "/thridparty/check_password").withJsonBody(Json.obj(
"username" -> "munca",
"password" -> "helloworld"))
response = route(request)
response.isDefined mustEqual true
result = Await.result(response.get, timeout)
result.header.status must equalTo(OK)
}
}
"Return Bad Request status when update password old password wrong" in {
running(FakeApplication()) {
val message = models.message.UpdatePassword("munca","wawawa","hello")
val request = FakeRequest.apply(POST, "/thridparty/update_password")
.withJsonBody(Json.toJson(message))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(BAD_REQUEST)
}
}
"Return BAD status when update password not valid json format" in {
running(FakeApplication()) {
val message = models.message.CheckPassword("mega","mozart")
val request = FakeRequest.apply(POST, "/thridparty/update_password")
.withJsonBody(Json.toJson(message))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(BAD_REQUEST)
}
}
"get preferable analysis" in {
running(FakeApplication()) {
val message = models.message.GetPreferableAnalysis("coba","benar")
val request = FakeRequest.apply(GET, "/thridparty/get_preferable")
.withJsonBody(Json.toJson(message))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(OK)
val option = contentAsJson(response.get).asOpt[ReturnGetPreferableAnalysis]
option.isDefined mustEqual true
val returnMessage = option.get
returnMessage.id_analysis_list.length mustEqual 2
returnMessage.id_analysis_list.forall( string => { println("\\n\\n\\n\\n\\n[Hello]" + string)
string == "557a56037e26b8ab003c3f97" || string == "557a56b27e26b8b6003c3f98"}) mustEqual true
}
}
"return bad request when get preferable analysis because invalid json" in {
running(FakeApplication()) {
val message = models.message.ReturnGetPreferableAnalysis(Seq())
val request = FakeRequest.apply(GET, "/thridparty/get_preferable")
.withJsonBody(Json.toJson(message))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(BAD_REQUEST)
}
}
"return bad request when get preferable analysis because incorrect password" in {
running(FakeApplication()) {
val message = models.message.GetPreferableAnalysis("coba","coba")
val request = FakeRequest.apply(GET, "/thridparty/get_preferable")
.withJsonBody(Json.toJson(message))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(BAD_REQUEST)
}
}
"return bad request when get preferable analysis because user doen;t exist" in {
running(FakeApplication()) {
val message = models.message.GetPreferableAnalysis("xxxxxxxx","coba")
val request = FakeRequest.apply(GET, "/thridparty/get_preferable")
.withJsonBody(Json.toJson(message))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(BAD_REQUEST)
}
}
"update preferable" in {
running(FakeApplication()) {
val message = models.message.UpdatePreferable("munca", "helloworld" , Seq("557a56037e26b8ab003c3f97"))
val request = FakeRequest.apply(POST, "/thridparty/update_preferable")
.withJsonBody(Json.toJson(message))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(OK)
val message2 = models.message.GetPreferableAnalysis("munca", "helloworld")
val request2 = FakeRequest.apply(GET, "/thridparty/get_preferable")
.withJsonBody(Json.toJson(message2))
val response2 = route(request2)
response2.isDefined mustEqual true
val result2 = Await.result(response2.get, timeout)
result2.header.status must equalTo(OK)
val option = contentAsJson(response2.get).asOpt[ReturnGetPreferableAnalysis]
option.isDefined mustEqual true
val returnMessage = option.get
returnMessage.id_analysis_list.length mustEqual 1
returnMessage.id_analysis_list.head mustEqual "557a56037e26b8ab003c3f97"
}
}
"get Stream id list" in {
running(FakeApplication()) {
val message = models.message.GetStreamIdList("coba", "benar")
val request = FakeRequest.apply(GET, "/thridparty/get_all_stream_id")
.withJsonBody(Json.toJson(message))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(OK)
val option = contentAsJson(response.get).asOpt[ReturnGetStreamIdList]
option.isDefined mustEqual true
val returnMessage = option.get
returnMessage.id_list.length mustEqual 2
returnMessage.id_list.forall( string => {
string == "557a7c0a7e26b84302120c92" || string == "557a7c9d7e26b84302120c93"}) mustEqual true
}
}
"add raw Stream and parse it" in {
running(FakeApplication()) {
var id:String = {
"Hallo"
}
{ //Add
var message = models.message.AddRawStreamMessage("munca", "helloworld", 10, "raw", "ini coba - coba")
var request = FakeRequest.apply(POST, "/thridparty/add_stream")
.withJsonBody(Json.toJson(message))
var response = route(request)
response.isDefined mustEqual true
var result = Await.result(response.get, timeout)
result.header.status must equalTo(OK)
}
{ //check with get all id stream
val message = models.message.GetStreamIdList("munca", "helloworld")
val request = FakeRequest.apply(GET, "/thridparty/get_all_stream_id")
.withJsonBody(Json.toJson(message))
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(OK)
val option = contentAsJson(response.get).asOpt[ReturnGetStreamIdList]
option.isDefined mustEqual true
val returnMessage = option.get
returnMessage.id_list.length mustEqual 1
id = returnMessage.id_list.head
}
{//check the detail with getstream
val request = FakeRequest.apply(GET, "/stream/" + id)
val response = route(request)
response.isDefined mustEqual true
val result = Await.result(response.get, timeout)
result.header.status must equalTo(OK)
val option = contentAsJson(Future.successful(result)).asOpt[Stream]
option.isDefined mustEqual true
option.get._id mustEqual id
option.get.content mustEqual "ini coba - coba"
option.get.analysis.length mustEqual 1
option.get.analysis.head._id mustEqual "557a56037e26b8ab003c3f97"
option.get.max_validasi mustEqual 10
}
}
}
"fail to add raw Stream because file type not supported" in {
running(FakeApplication()) {
{ //Add
var message = models.message.AddRawStreamMessage("munca", "helloworld", 10, "magic", "ini coba - coba")
var request = FakeRequest.apply(POST, "/thridparty/add_stream")
.withJsonBody(Json.toJson(message))
var response = route(request)
response.isDefined mustEqual true
var result = Await.result(response.get, timeout)
result.header.status must equalTo(BAD_REQUEST)
}
}
}
}
} | calvinsadewa/backend | test/controllers/ThridPartiesIT.scala | Scala | apache-2.0 | 12,953 |
package edu.gemini.model.p1.immutable
import edu.gemini.model.p1.{ mutable => M }
object GmosNBlueprintLongslitNs {
def apply(m: M.GmosNBlueprintLongslitNs) = new GmosNBlueprintLongslitNs(
Altair(m.getAltair),
m.getDisperser,
m.getFilter,
m.getFpu)
}
case class GmosNBlueprintLongslitNs(altair: Altair, disperser: GmosNDisperser, filter: GmosNFilter, fpu: GmosNFpuNs)
extends GmosNBlueprintSpectrosopyBase {
def toChoice(n:Namer) = {
val c = Factory.createGmosNBlueprintChoice
c.setLongslitNs(mutable(n))
c.setRegime(M.GmosNWavelengthRegime.OPTICAL)
c
}
def mutable(n:Namer) = {
val m = Factory.createGmosNBlueprintLongslitNs
m.setId(n.nameOf(this))
m.setName(name)
m.setDisperser(disperser)
m.setAltair(altair.mutable)
m.setFilter(filter)
m.setFpu(fpu)
m
}
def name = s"GMOS-N LongSlit N+S $altair ${disperser.value} ${filter.value} ${fpu.value}"
} | arturog8m/ocs | bundle/edu.gemini.model.p1/src/main/scala/edu/gemini/model/p1/immutable/GmosNBlueprintLongslitNs.scala | Scala | bsd-3-clause | 936 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.qscript
import slamdata.Predef.{Boolean, Option, Some}
import quasar.RenderTree
import scalaz.{Enum, Show}
import scalaz.std.anyVal._
import scalaz.syntax.order._
sealed abstract class OnUndefined {
def fold[A](emit: => A, omit: => A): A =
this match {
case OnUndefined.Emit => emit
case OnUndefined.Omit => omit
}
}
object OnUndefined {
case object Emit extends OnUndefined
case object Omit extends OnUndefined
val emit: OnUndefined = Emit
val omit: OnUndefined = Omit
implicit val enum: Enum[OnUndefined] =
new Enum[OnUndefined] {
def order(x: OnUndefined, y: OnUndefined) =
asBool(x) ?|? asBool(y)
def pred(x: OnUndefined): OnUndefined =
x.fold(omit, emit)
def succ(x: OnUndefined): OnUndefined =
x.fold(omit, emit)
override val min: Option[OnUndefined] =
Some(Emit)
override val max: Option[OnUndefined] =
Some(Omit)
private val asBool: OnUndefined => Boolean =
_.fold(false, true)
}
implicit def renderTree: RenderTree[OnUndefined] =
RenderTree.fromShow("OnUndefined")
implicit def show: Show[OnUndefined] =
Show.showFromToString
}
| slamdata/quasar | qscript/src/main/scala/quasar/qscript/OnUndefined.scala | Scala | apache-2.0 | 1,806 |
/*
* Copyright 2012-2014 Kieron Wilkinson.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package viper.util
import java.util.prefs.Preferences
import java.awt.{Point, Dimension}
import javax.swing.{JFrame, JSplitPane}
import collection.mutable
trait Prefs {
private lazy val prefs = Preferences.userNodeForPackage(getClass)
private val saving = mutable.ListBuffer[() => Unit]()
private val loading = mutable.ListBuffer[() => Unit]()
def restorePrefs() {
loading.foreach(_())
}
def storePrefs() {
saving.foreach(_())
}
def registerPrefs(name: String, component: JFrame, defaultSize: Dimension) {
val key = name + ".frame"
loading += { () =>
component.setSize(restoreDimension(key).getOrElse(defaultSize))
restorePoint(key) match {
case Some(p) => component.setLocation(p)
case None => component.setLocationRelativeTo(null)
}
}
saving += { () =>
store(key, component.getSize)
store(key, component.getLocation)
}
}
def registerPrefs(name: String, component: JSplitPane) {
val key = name + ".divider"
loading += { () =>
val value = prefs.getInt(key, -1)
if (value != -1) {
component.setDividerLocation(value)
}
}
saving += { () =>
prefs.putInt(key, component.getDividerLocation)
}
}
private def store(name: String, value: Dimension) {
prefs.putInt(name + ".width", value.width)
prefs.putInt(name + ".height", value.height)
}
private def restoreDimension(name: String): Option[Dimension] = {
val width = prefs.getInt(name + ".width", -1)
val height = prefs.getInt(name + ".height", -1)
if (width == -1 || height == -1) None
else Some(new Dimension(width, height))
}
private def store(name: String, value: Point) {
prefs.putInt(name + ".x", value.x)
prefs.putInt(name + ".y", value.y)
}
private def restorePoint(name: String): Option[Point] = {
val x = prefs.getInt(name + ".x", -1)
val y = prefs.getInt(name + ".y", -1)
if (x == -1 || y == -1) None
else Some(new Point(x, y))
}
}
| vyadh/viper | util/src/main/scala/viper/util/Prefs.scala | Scala | apache-2.0 | 2,632 |
/*
* Copyright (c) 2012, The Broad Institute
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
package org.broadinstitute.sting.queue.extensions.picard
import org.broadinstitute.sting.commandline._
import java.io.File
/*
* Created by IntelliJ IDEA.
* User: carneiro
* Date: 6/22/11
* Time: 10:35 AM
*/
class MarkDuplicates extends org.broadinstitute.sting.queue.function.JavaCommandLineFunction with PicardBamFunction {
analysisName = "MarkDuplicates"
javaMainClass = "net.sf.picard.sam.MarkDuplicates"
@Input(doc="The input SAM or BAM files to analyze. Must be coordinate sorted.", shortName = "input", fullName = "input_bam_files", required = true)
var input: Seq[File] = Nil
@Output(doc="The output file to write marked records to", shortName = "output", fullName = "output_bam_file", required = true)
var output: File = _
@Output(doc="The output bam index", shortName = "out_index", fullName = "output_bam_index_file", required = false)
var outputIndex: File = _
@Output(doc="File to write duplication metrics to", shortName = "out_metrics", fullName = "output_metrics_file", required = false)
var metrics: File = new File(output + ".metrics")
@Argument(doc="If true do not write duplicates to the output file instead of writing them with appropriate flags set.", shortName = "remdup", fullName = "remove_duplicates", required = false)
var REMOVE_DUPLICATES: Boolean = false
@Argument(doc = "Maximum number of file handles to keep open when spilling read ends to disk. Set this number a little lower than the per-process maximum number of file that may be open. This number can be found by executing the 'ulimit -n' command on a Unix system.", shortName = "max_file_handles", fullName ="max_file_handles_for_read_ends_maps", required=false)
var MAX_FILE_HANDLES_FOR_READ_ENDS_MAP: Int = -1;
@Argument(doc = "This number, plus the maximum RAM available to the JVM, determine the memory footprint used by some of the sorting collections. If you are running out of memory, try reducing this number.", shortName = "sorting_ratio", fullName = "sorting_collection_size_ratio", required = false)
var SORTING_COLLECTION_SIZE_RATIO: Double = -1
override def freezeFieldValues() {
super.freezeFieldValues()
if (outputIndex == null && output != null)
outputIndex = new File(output.getName.stripSuffix(".bam") + ".bai")
}
override def inputBams = input
override def outputBam = output
this.sortOrder = null
this.createIndex = Some(true)
override def commandLine = super.commandLine +
required("M=" + metrics) +
conditional(REMOVE_DUPLICATES, "REMOVE_DUPLICATES=true") +
conditional(MAX_FILE_HANDLES_FOR_READ_ENDS_MAP > 0, "MAX_FILE_HANDLES_FOR_READ_ENDS_MAP=" + MAX_FILE_HANDLES_FOR_READ_ENDS_MAP.toString) +
conditional(SORTING_COLLECTION_SIZE_RATIO > 0, "SORTING_COLLECTION_SIZE_RATIO=" + SORTING_COLLECTION_SIZE_RATIO.toString)
} | iontorrent/Torrent-Variant-Caller-stable | public/scala/src/org/broadinstitute/sting/queue/extensions/picard/MarkDuplicates.scala | Scala | mit | 4,059 |
package com.twitter.finagle.loadbalancer
import com.twitter.finagle.Group
import com.twitter.finagle.stats.NullStatsReceiver
import com.twitter.finagle.{
ClientConnection, NoBrokersAvailableException, Service, ServiceFactory
}
import com.twitter.util.{Future, Time}
import org.specs.SpecificationWithJUnit
import org.specs.mock.Mockito
import java.net.{InetSocketAddress, SocketAddress}
class HeapBalancerSpec extends SpecificationWithJUnit with Mockito {
// test: service creation failure
class LoadedFactory extends ServiceFactory[Unit, LoadedFactory] {
var load = 0
var _isAvailable = true
var _closed = false
def apply(conn: ClientConnection) = Future.value {
load += 1
new Service[Unit, LoadedFactory] {
def apply(req: Unit) = Future.value(LoadedFactory.this)
override def close(deadline: Time) = { load -= 1; Future.Done }
}
}
override def isAvailable = _isAvailable
def close(deadline: Time) = {
_closed = true
Future.Done
}
}
"HeapBalancer (nonempty)" should {
val N = 10
val statsReceiver = NullStatsReceiver // mock[StatsReceiver]
val socket: SocketAddress = new InetSocketAddress(0)
val half1, half2 = 0 until N/2 map { _ => (socket -> new LoadedFactory) }
val factories = half1 ++ half2
val group = Group.mutable[(SocketAddress, ServiceFactory[Unit, LoadedFactory])](factories:_*)
val b = new HeapBalancer[Unit, LoadedFactory](group, statsReceiver)
val newFactory = new LoadedFactory // the host to be added after creating heapbalancer
factories.size must be_==(N)
"balance according to load" in {
val made = 0 until N map { _ => b()() }
factories foreach { case (_, f) =>
f.load must be_==(1)
}
val made2 = 0 until N map { _ => b()() }
factories foreach { case (_, f) =>
f.load must be_==(2)
}
// apologies for the ascii art.
val f = made(0)(())()
made(0).close()
f.load must be_==(1)
// f is now least-loaded
val f1 = b()()(())()
f1 must be(f)
}
"pick only healthy services" in {
0 until N foreach { _ => b() }
factories(0)._2._isAvailable = false
factories(1)._2._isAvailable = false
0 until 2*(N-2) foreach { _=> b() }
factories(0)._2.load must be_==(1)
factories(1)._2.load must be_==(1)
factories drop 2 foreach { case (_, f) =>
f.load must be_==(3)
}
}
"be able to handle dynamically added factory" in {
// initially N factories, load them twice
val made = 0 until N*2 map { _ => b()() }
factories foreach { case (_, f) => f.load must be_==(2) }
// add newFactory to the heap balancer. Initially it has load 0, so the next two make()() should both pick
// newFactory
group() += (socket -> newFactory)
b()()
newFactory.load must be_==(1)
b()()
newFactory.load must be_==(2)
// remove newFactory from the heap balancer. Further calls to make()() should not affect the load on newFactory
group() -= (socket -> newFactory)
val made2 = 0 until N foreach { _ => b()() }
factories foreach { case (_, f) => f.load must be_==(3) }
newFactory.load must be_==(2)
}
"be safe to remove a host from group before releasing it" in {
val made = 0 until N map { _ => b()() }
group() += (socket -> newFactory)
val made2 = b.apply().apply()
(factories :+ (socket -> newFactory)) foreach { case (_, f) => f.load must be_==(1) }
group() -= (socket -> newFactory)
made2.close()
newFactory.load must be_==(0)
}
"close a factory as it is removed from group" in {
val made = 0 until N map { _ => b()() }
group() --= half1
b()().release()
half1 foreach { case (a, f) => f._closed must beTrue }
}
}
"HeapBalancer (empty)" should {
"always return NoBrokersAvailableException" in {
val b = new HeapBalancer(Group.empty[(SocketAddress, ServiceFactory[Unit, LoadedFactory])])
b()() must throwA[NoBrokersAvailableException]
val heapBalancerEmptyGroup = "HeapBalancerEmptyGroup"
val c = new HeapBalancer(
Group.empty[(SocketAddress, ServiceFactory[Unit, LoadedFactory])],
NullStatsReceiver, NullStatsReceiver,
new NoBrokersAvailableException(heapBalancerEmptyGroup)
)
c()() must throwA[NoBrokersAvailableException].like {
case m => m.getMessage must beMatching(heapBalancerEmptyGroup)
}
}
}
}
| joshbedo/finagle | finagle-core/src/test/scala/com/twitter/finagle/loadbalancer/HeapBalancerSpec.scala | Scala | apache-2.0 | 4,555 |
/*
* Copyright 2014-2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.index
import java.util.Map.Entry
import com.typesafe.scalalogging.slf4j.Logging
import com.vividsolutions.jts.geom.{Geometry, Polygon}
import org.apache.accumulo.core.client.{BatchScanner, IteratorSetting, Scanner}
import org.apache.accumulo.core.data.{Key, Value}
import org.geotools.data.Query
import org.geotools.filter.text.ecql.ECQL
import org.joda.time.Interval
import org.locationtech.geomesa.core._
import org.locationtech.geomesa.core.data._
import org.locationtech.geomesa.core.index.QueryHints._
import org.locationtech.geomesa.core.index.QueryPlanner._
import org.locationtech.geomesa.core.index.Strategy._
import org.locationtech.geomesa.core.iterators.{FEATURE_ENCODING, _}
import org.locationtech.geomesa.core.util.{CloseableIterator, BatchMultiScanner, SelfClosingIterator}
import org.locationtech.geomesa.feature.FeatureEncoding.FeatureEncoding
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.opengis.feature.simple.SimpleFeatureType
import org.opengis.filter.Filter
import scala.collection.JavaConversions._
import scala.util.Random
trait Strategy extends Logging {
/**
* Plans the query - strategy implementations need to define this
*/
def getQueryPlan(query: Query, queryPlanner: QueryPlanner, output: ExplainerOutputType): QueryPlan
/**
* Execute a query against this strategy
*/
def execute(plan: QueryPlan, acc: AccumuloConnectorCreator, output: ExplainerOutputType): KVIter = {
try {
SelfClosingIterator(getScanner(plan, acc))
} catch {
case e: Exception =>
logger.error(s"Error in creating scanner: $e", e)
// since GeoTools would eat the error and return no records anyway,
// there's no harm in returning an empty iterator.
Iterator.empty
}
}
/**
* Creates a scanner based on a query plan
*/
private def getScanner(queryPlan: QueryPlan, acc: AccumuloConnectorCreator): KVIter =
queryPlan match {
case qp: ScanPlan =>
val scanner = acc.getScanner(qp.table)
configureScanner(scanner, qp)
SelfClosingIterator(scanner)
case qp: BatchScanPlan =>
if (qp.ranges.isEmpty) {
logger.warn("Query plan resulted in no valid ranges - nothing will be returned.")
CloseableIterator(Iterator.empty)
} else {
val batchScanner = acc.getBatchScanner(qp.table, qp.numThreads)
configureBatchScanner(batchScanner, qp)
SelfClosingIterator(batchScanner)
}
case qp: JoinPlan =>
val primary = if (qp.ranges.length == 1) {
val scanner = acc.getScanner(qp.table)
configureScanner(scanner, qp)
scanner
} else {
val batchScanner = acc.getBatchScanner(qp.table, qp.numThreads)
configureBatchScanner(batchScanner, qp)
batchScanner
}
val jqp = qp.joinQuery
val secondary = acc.getBatchScanner(jqp.table, jqp.numThreads)
configureBatchScanner(secondary, jqp)
val bms = new BatchMultiScanner(primary, secondary, qp.joinFunction)
SelfClosingIterator(bms.iterator, () => bms.close())
}
}
object Strategy {
def configureBatchScanner(bs: BatchScanner, qp: QueryPlan) {
qp.iterators.foreach { i => bs.addScanIterator(i) }
bs.setRanges(qp.ranges)
qp.columnFamilies.foreach { c => bs.fetchColumnFamily(c) }
}
def configureScanner(scanner: Scanner, qp: QueryPlan) {
qp.iterators.foreach { i => scanner.addScanIterator(i) }
qp.ranges.headOption.foreach(scanner.setRange)
qp.columnFamilies.foreach { c => scanner.fetchColumnFamily(c) }
}
def configureFeatureEncoding(cfg: IteratorSetting, featureEncoding: FeatureEncoding) {
cfg.addOption(FEATURE_ENCODING, featureEncoding.toString)
}
def configureStFilter(cfg: IteratorSetting, filter: Option[Filter]) = {
filter.foreach { f => cfg.addOption(ST_FILTER_PROPERTY_NAME, ECQL.toCQL(f)) }
}
def configureVersion(cfg: IteratorSetting, version: Int) =
cfg.addOption(GEOMESA_ITERATORS_VERSION, version.toString)
def configureFeatureType(cfg: IteratorSetting, featureType: SimpleFeatureType) = {
val encodedSimpleFeatureType = SimpleFeatureTypes.encodeType(featureType)
cfg.addOption(GEOMESA_ITERATORS_SIMPLE_FEATURE_TYPE, encodedSimpleFeatureType)
cfg.encodeUserData(featureType.getUserData, GEOMESA_ITERATORS_SIMPLE_FEATURE_TYPE)
}
def configureFeatureTypeName(cfg: IteratorSetting, featureType: String) =
cfg.addOption(GEOMESA_ITERATORS_SFT_NAME, featureType)
def configureIndexValues(cfg: IteratorSetting, featureType: SimpleFeatureType) = {
val encodedSimpleFeatureType = SimpleFeatureTypes.encodeType(featureType)
cfg.addOption(GEOMESA_ITERATORS_SFT_INDEX_VALUE, encodedSimpleFeatureType)
}
def configureEcqlFilter(cfg: IteratorSetting, ecql: Option[String]) =
ecql.foreach(filter => cfg.addOption(GEOMESA_ITERATORS_ECQL_FILTER, filter))
// store transform information into an Iterator's settings
def configureTransforms(cfg: IteratorSetting, query:Query) =
for {
transformOpt <- org.locationtech.geomesa.core.index.getTransformDefinition(query)
transform = transformOpt.asInstanceOf[String]
_ = cfg.addOption(GEOMESA_ITERATORS_TRANSFORM, transform)
sfType <- org.locationtech.geomesa.core.index.getTransformSchema(query)
encodedSFType = SimpleFeatureTypes.encodeType(sfType)
_ = cfg.addOption(GEOMESA_ITERATORS_TRANSFORM_SCHEMA, encodedSFType)
} yield Unit
def configureRecordTableIterator(
simpleFeatureType: SimpleFeatureType,
featureEncoding: FeatureEncoding,
ecql: Option[Filter],
query: Query): IteratorSetting = {
val cfg = new IteratorSetting(
iteratorPriority_SimpleFeatureFilteringIterator,
classOf[RecordTableIterator].getSimpleName,
classOf[RecordTableIterator]
)
configureFeatureType(cfg, simpleFeatureType)
configureFeatureEncoding(cfg, featureEncoding)
configureEcqlFilter(cfg, ecql.map(ECQL.toCQL))
configureTransforms(cfg, query)
cfg
}
def randomPrintableString(length:Int=5) : String = (1 to length).
map(i => Random.nextPrintableChar()).mkString
def getDensityIterCfg(query: Query,
geometryToCover: Geometry,
schema: String,
featureEncoding: FeatureEncoding,
featureType: SimpleFeatureType) = query match {
case _ if query.getHints.containsKey(DENSITY_KEY) =>
val clazz = classOf[DensityIterator]
val cfg = new IteratorSetting(iteratorPriority_AnalysisIterator,
"topfilter-" + randomPrintableString(5),
clazz)
val width = query.getHints.get(WIDTH_KEY).asInstanceOf[Int]
val height = query.getHints.get(HEIGHT_KEY).asInstanceOf[Int]
val polygon = if (geometryToCover == null) null else geometryToCover.getEnvelope.asInstanceOf[Polygon]
DensityIterator.configure(cfg, polygon, width, height)
cfg.addOption(DEFAULT_SCHEMA_NAME, schema)
configureFeatureEncoding(cfg, featureEncoding)
configureFeatureType(cfg, featureType)
Some(cfg)
case _ if query.getHints.containsKey(TEMPORAL_DENSITY_KEY) =>
val clazz = classOf[TemporalDensityIterator]
val cfg = new IteratorSetting(iteratorPriority_AnalysisIterator,
"topfilter-" + randomPrintableString(5),
clazz)
val interval = query.getHints.get(TIME_INTERVAL_KEY).asInstanceOf[Interval]
val buckets = query.getHints.get(TIME_BUCKETS_KEY).asInstanceOf[Int]
TemporalDensityIterator.configure(cfg, interval, buckets)
configureFeatureEncoding(cfg, featureEncoding)
configureFeatureType(cfg, featureType)
Some(cfg)
case _ if query.getHints.containsKey(MAP_AGGREGATION_KEY) =>
val clazz = classOf[MapAggregatingIterator]
val cfg = new IteratorSetting(iteratorPriority_AnalysisIterator,
"topfilter-" + randomPrintableString(5),
clazz)
val mapAttribute = query.getHints.get(MAP_AGGREGATION_KEY).asInstanceOf[String]
MapAggregatingIterator.configure(cfg, mapAttribute)
configureFeatureEncoding(cfg, featureEncoding)
configureFeatureType(cfg, featureType)
Some(cfg)
case _ => None
}
}
trait StrategyProvider {
/**
* Returns details on a potential strategy if the filter is valid for this strategy.
*
* @param filter
* @param sft
* @return
*/
def getStrategy(filter: Filter, sft: SimpleFeatureType, hints: StrategyHints): Option[StrategyDecision]
}
case class StrategyDecision(strategy: Strategy, cost: Long) | mmatz-ccri/geomesa | geomesa-core/src/main/scala/org/locationtech/geomesa/core/index/Strategy.scala | Scala | apache-2.0 | 9,345 |
// Generated by the Scala Plugin for the Protocol Buffer Compiler.
// Do not edit!
//
// Protofile syntax: PROTO3
package com.google.protobuf.wrappers
import scala.collection.JavaConversions._
@SerialVersionUID(0L)
final case class FloatValue(
value: Float = 0.0f
) extends com.trueaccord.scalapb.GeneratedMessage with com.trueaccord.scalapb.Message[FloatValue] with com.trueaccord.lenses.Updatable[FloatValue] {
@transient
lazy val serializedSize: Int = {
var __size = 0
if (value != 0.0f) { __size += com.google.protobuf.CodedOutputStream.computeFloatSize(1, value) }
__size
}
def writeTo(output: com.google.protobuf.CodedOutputStream): Unit = {
{
val __v = value
if (__v != 0.0f) {
output.writeFloat(1, __v)
}
};
}
def mergeFrom(__input: com.google.protobuf.CodedInputStream): com.google.protobuf.wrappers.FloatValue = {
var __value = this.value
var _done__ = false
while (!_done__) {
val _tag__ = __input.readTag()
_tag__ match {
case 0 => _done__ = true
case 13 =>
__value = __input.readFloat()
case tag => __input.skipField(tag)
}
}
com.google.protobuf.wrappers.FloatValue(
value = __value
)
}
def withValue(__v: Float): FloatValue = copy(value = __v)
def getField(__field: com.google.protobuf.Descriptors.FieldDescriptor): scala.Any = {
__field.getNumber match {
case 1 => {
val __t = value
if (__t != 0.0f) __t else null
}
}
}
override def toString: String = com.trueaccord.scalapb.TextFormat.printToUnicodeString(this)
def companion = com.google.protobuf.wrappers.FloatValue
}
object FloatValue extends com.trueaccord.scalapb.GeneratedMessageCompanion[FloatValue] with com.trueaccord.scalapb.JavaProtoSupport[FloatValue, com.google.protobuf.FloatValue] {
implicit def messageCompanion: com.trueaccord.scalapb.GeneratedMessageCompanion[FloatValue] with com.trueaccord.scalapb.JavaProtoSupport[FloatValue, com.google.protobuf.FloatValue] = this
def toJavaProto(scalaPbSource: com.google.protobuf.wrappers.FloatValue): com.google.protobuf.FloatValue = {
val javaPbOut = com.google.protobuf.FloatValue.newBuilder
javaPbOut.setValue(scalaPbSource.value)
javaPbOut.build
}
def fromJavaProto(javaPbSource: com.google.protobuf.FloatValue): com.google.protobuf.wrappers.FloatValue = com.google.protobuf.wrappers.FloatValue(
value = javaPbSource.getValue.floatValue
)
def fromFieldsMap(__fieldsMap: Map[com.google.protobuf.Descriptors.FieldDescriptor, scala.Any]): com.google.protobuf.wrappers.FloatValue = {
require(__fieldsMap.keys.forall(_.getContainingType() == descriptor), "FieldDescriptor does not match message type.")
val __fields = descriptor.getFields
com.google.protobuf.wrappers.FloatValue(
__fieldsMap.getOrElse(__fields.get(0), 0.0f).asInstanceOf[Float]
)
}
def descriptor: com.google.protobuf.Descriptors.Descriptor = GoogleProtobufWrappersProto.descriptor.getMessageTypes.get(1)
def messageCompanionForField(__field: com.google.protobuf.Descriptors.FieldDescriptor): com.trueaccord.scalapb.GeneratedMessageCompanion[_] = throw new MatchError(__field)
def enumCompanionForField(__field: com.google.protobuf.Descriptors.FieldDescriptor): com.trueaccord.scalapb.GeneratedEnumCompanion[_] = throw new MatchError(__field)
lazy val defaultInstance = com.google.protobuf.wrappers.FloatValue(
)
implicit class FloatValueLens[UpperPB](_l: com.trueaccord.lenses.Lens[UpperPB, FloatValue]) extends com.trueaccord.lenses.ObjectLens[UpperPB, FloatValue](_l) {
def value: com.trueaccord.lenses.Lens[UpperPB, Float] = field(_.value)((c_, f_) => c_.copy(value = f_))
}
final val VALUE_FIELD_NUMBER = 1
}
| eiennohito/ScalaPB | scalapb-runtime/jvm/src/main/scala/com/google/protobuf/wrappers/FloatValue.scala | Scala | apache-2.0 | 3,862 |
package extruder
import cats.Eq
import cats.data.NonEmptyList
import cats.laws.IsEq
import org.scalacheck.{Arbitrary, Gen, Prop}
import org.scalacheck.util.Pretty
package object laws {
implicit def catsLawsIsEqToProp[A: Eq](isEq: IsEq[A])(implicit pp: A => Pretty): Prop =
cats.kernel.laws.discipline.catsLawsIsEqToProp[A](isEq)
implicit def nonEmptyListArb[T](implicit arb: Arbitrary[T]): Arbitrary[NonEmptyList[T]] =
Arbitrary(for {
head <- arb.arbitrary
tail <- Gen.listOf(arb.arbitrary)
} yield NonEmptyList.of(head, tail: _*))
}
| janstenpickle/extruder | laws/src/main/scala/extruder/laws/package.scala | Scala | mit | 565 |
package de.adesso.scalaspring.config
import org.springframework.context.annotation.Configuration
import javax.sql.DataSource
import org.springframework.beans.factory.annotation.Autowired
import org.springframework.context.annotation.Bean
import org.springframework.jdbc.datasource.DataSourceTransactionManager
import de.adesso.scalaspring.dao.NoTxCustomerDAO
import de.adesso.scalaspring.service.Service2
import de.adesso.scalaspring.service.Service1
import org.apache.commons.dbcp.BasicDataSource
@Configuration
class ScalaConfig {
@Autowired
val dataSource: DataSource = null
@Bean
def transactionManager() = new DataSourceTransactionManager(dataSource)
@Bean
def customerDAO() = new NoTxCustomerDAO(dataSource)
@Bean
def service1() = new Service1(customerDAO())
@Bean
def service2() = new Service2(customerDAO())
} | ewolff/scala-spring | src/test/scala/de/adesso/scalaspring/config/ScalaConfig.scala | Scala | apache-2.0 | 847 |
Subsets and Splits