code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package chess
package format
class ForsythPerfTest extends ChessTest {
args(skipAll = true)
val initialBoard = Board.init(variant.Standard)
val emptyBoard = (Forsyth << "8/8/8/8/8/8/8/8").get.board
// "export one position board" should {
// "many times" in {
// val nb = 10000
// val iterations = 10
// def runOne = Forsyth.exportBoard(initialBoard)
// def run { for (i <- 1 to nb) runOne }
// runOne must_== Forsyth.initial.takeWhile(' '!=)
// if (nb * iterations > 1) {
// println("warming up")
// run
// }
// println("running tests")
// val durations = for (i <- 1 to iterations) yield {
// val start = System.currentTimeMillis
// run
// val duration = System.currentTimeMillis - start
// println(s"$nb positions in $duration ms")
// duration
// }
// val nbPositions = iterations * nb
// val moveNanos = (1000000 * durations.sum) / nbPositions
// println(s"Average = $moveNanos nanoseconds per position")
// println(s" ${1000000000 / moveNanos} positions per second")
// true === true
// }
// }
"export castles" should {
"many times" in {
val nb = 100000
val iterations = 10
def runOne = {
// Forsyth.exportCastles(emptyBoard)
Forsyth.exportCastles(initialBoard)
}
def run: Unit = { for (i <- 1 to nb) runOne }
if (nb * iterations > 1) {
println("warming up")
run
}
println("running tests")
val durations = for (i <- 1 to iterations) yield {
val start = System.currentTimeMillis
run
val duration = System.currentTimeMillis - start
println(s"$nb positions in $duration ms")
duration
}
val nbPositions = iterations * nb
val moveNanos = (1000000 * durations.sum) / nbPositions
println(s"Average = $moveNanos nanoseconds per position")
println(s" ${1000000000 / moveNanos} positions per second")
true === true
}
}
}
| niklasf/scalachess | src/test/scala/format/ForsythPerfTest.scala | Scala | mit | 2,083 |
package vulkan.wrapper.registry.vtype
import vulkan.wrapper.registry.{Registry, VulkanComponentMappedData}
import scala.xml.Node
class VulkanUnionType(registry: Registry, node: Node) extends VulkanMemberType(registry, node) {
}
object VulkanUnionType {
def apply(registry: Registry): VulkanComponentMappedData[VulkanUnionType] =
VulkanComponentMappedData(registry,(registry.xml \\ "types" \\ "type").filter(t => Set("struct","union").contains(t \\@ "category")).map(new VulkanUnionType(registry,_)))
}
| MrInformatic/VulkanWrapper | src/vulkan/wrapper/registry/vtype/VulkanUnionType.scala | Scala | mit | 511 |
package uk.gov.gds.ier.model
case class KeyForError(key:String) | michaeldfallen/ier-frontend | app/uk/gov/gds/ier/model/KeyForError.scala | Scala | mit | 64 |
package com.ftchinese.jobs.push
import java.net.Socket
import javax.net.ssl.{KeyManagerFactory, SSLContext}
import com.ftchinese.jobs.common.KeystoreManager
/**
* Push server manager.
* Created by wanbo on 16/3/21.
*/
class PushServerManager(keyName: String, keyPassword: String) {
private var server_host = ""
private var server_port: Int = _
def getPushServer(production: Boolean): Socket ={
if(production){
server_host = PushServer.pro_host
server_port = PushServer.pro_port
} else {
server_host = PushServer.dev_host
server_port = PushServer.dev_port
}
val context = SSLContext.getInstance("TLS")
val kmf = KeyManagerFactory.getInstance("sunx509")
kmf.init(KeystoreManager.loadKeystore(keyName), KeystoreManager.getKeystorePassword)
context.init(kmf.getKeyManagers, null, null)
val factory = context.getSocketFactory
val socket = factory.createSocket(server_host, server_port)
socket.setSoTimeout(3000)
socket
}
}
| FTChinese/push | src/main/scala/com/ftchinese/jobs/push/PushServerManager.scala | Scala | mit | 1,085 |
package colossus
package service
import core._
import akka.actor.ActorRef
import scala.concurrent.duration._
import scala.concurrent.ExecutionContext
import java.net.InetSocketAddress
import metrics.MetricAddress
import Codec._
trait CodecDSL {self =>
type Input
type Output
}
trait DefaultHandler extends CodecDSL {self =>
type ConnectionHandler = BasicServiceHandler[self.type]
}
object CodecDSL {
type PartialHandler[C <: CodecDSL] = PartialFunction[C#Input, Callback[C#Output]]
type HandlerGenerator[C <: CodecDSL] = ConnectionContext[C] => Unit
type Initializer[C <: CodecDSL] = ServiceContext[C] => HandlerGenerator[C]
type Receive = PartialFunction[Any, Unit]
type ErrorHandler[C <: CodecDSL] = PartialFunction[(C#Input, Throwable), C#Output]
}
import CodecDSL._
/**
* Provide a Codec as well as some convenience functions for usage within in a Service.
* @tparam C the type of codec this provider will supply
*/
trait CodecProvider[C <: CodecDSL] {
/**
* The Codec which will be used.
* @return
*/
def provideCodec(): ServerCodec[C#Input, C#Output]
/**
* Basic error response
* @param request Request that caused the error
* @param reason The resulting failure
* @return A response which represents the failure encoded with the Codec
*/
def errorResponse(request: C#Input, reason: Throwable): C#Output
/**
* Provider of a ConnectionHandler using this Codec
* @param config ServiceConfig of the Server the ConnectionHandlers will operate in
* @param worker The Worker to which the Connection is bound
* @param ex ExecutionContext
* @return Handler
*/
def provideHandler(config: ServiceConfig[C#Input, C#Output], worker: WorkerRef, intializer: HandlerGenerator[C])(implicit ex: ExecutionContext, tagDecorator: TagDecorator[C#Input, C#Output] = TagDecorator.default[C#Input, C#Output]): DSLHandler[C] = {
new BasicServiceHandler[C](config,worker,this, intializer)
}
/**
* Provider of a Delegator using this Codec
* @param func Function which provides a Delegator for this Service
* @param server Server which this Delegator will operate in
* @param worker Worker which the Delegator will be bound
* @param provider The codecProvider
* @param config ServiceConfig
* @return Delegator
*/
def provideDelegator(func: Initializer[C], server: ServerRef, worker: WorkerRef, provider: CodecProvider[C], config: ServiceConfig[C#Input, C#Output]) = {
new BasicServiceDelegator(func, server, worker, provider, config)
}
}
trait ClientCodecProvider[C <: CodecDSL] {
def name: String
def clientCodec(): ClientCodec[C#Input, C#Output]
}
/**
* This contains methods to interact with an individual connection.
*/
trait ConnectionContext[C <: CodecDSL] {
/**
* Set a partial function for the request processing handler for the connection. Any request that
* falls through the handler will be automatically converted into an error
* response with a `UnhandledRequestException` as the cause
*/
def become(p: PartialHandler[C])
/**
* Set a function for the request processing handler for the connection
*/
def process(f: C#Input => Callback[C#Output]){
become{case all => f(all)}
}
/**
* Set the handler for actor messages intended for this connection.
*/
def receive(receiver: PartialFunction[Any, Unit])
/**
* Gets the sender of the current message being processed in the receive handler
*/
def sender(): ActorRef
/**
* Immediately terminate the connection. Note that requests that are in the
* middle of being processed will continue processing although their final
* response will be discarded.
*/
def disconnect()
/**
* Terminate the connection, but allow any existing outstanding requests to
* complete processing before disconnecting. Any new requests that come in
* will be automatically completed with an error.
*/
def gracefulDisconnect()
/**
* Get current info about the connection
*/
def connectionInfo: Option[ConnectionInfo]
/**
* Attach a handler for non-recoverable errors. This includes uncaught
* exceptions, unhandled requests, request timeouts, and other server-level
* errors. In every case, this handler should not attempt to actually
* process the request, but instead simply return an appropriately formatted
* error response.
*/
def onError(handler: ErrorHandler[C])
implicit val callbackExecutor: CallbackExecutor
}
trait ServiceContext[C <: CodecDSL] {
def worker: WorkerRef
def handle(p: HandlerGenerator[C]) = p //this method is just for clarity
//delegator message handling
def receive(receiver: Receive)
def clientFor[D <: CodecDSL](config: ClientConfig)(implicit provider: ClientCodecProvider[D]): ServiceClient[D#Input, D#Output] = {
new ServiceClient(provider.clientCodec(), config, worker)
}
def clientFor[D <: CodecDSL](host: String, port: Int, requestTimeout: Duration = 1.second)(implicit provider: ClientCodecProvider[D]): ServiceClient[D#Input, D#Output] = {
clientFor[D](new InetSocketAddress(host, port), requestTimeout)
}
def clientFor[D <: CodecDSL]
(address: InetSocketAddress, requestTimeout: Duration)
(implicit provider: ClientCodecProvider[D]): ServiceClient[D#Input, D#Output] = {
val config = ClientConfig(
address = address,
requestTimeout = requestTimeout,
name = MetricAddress.Root / provider.name
)
clientFor[D](config)
}
}
abstract class DSLDelegator[C <: CodecDSL](server : ServerRef, worker : WorkerRef) extends Delegator(server, worker) with ServiceContext[C]
class BasicServiceDelegator[C <: CodecDSL](func: Initializer[C], server: ServerRef, worker: WorkerRef, provider: CodecProvider[C], config: ServiceConfig[C#Input, C#Output])
extends DSLDelegator[C](server, worker){
//note, this needs to be setup before func is called
private var currentReceiver: Receive = {case _ => ()}
val handlerInitializer: HandlerGenerator[C] = func(this)
def acceptNewConnection: Option[ServerConnectionHandler] = {
val handler: DSLHandler[C] = provider.provideHandler(config, worker, handlerInitializer)
Some(handler)
}
def receive(r: Receive) {
currentReceiver = r
}
//TODO: This should be settable, or the currentReceiver needs to be settable, that way,
//in the Initializer[T] function that is invoked, custom behavior can be supplied to this delegator
override def handleMessage: Receive = currentReceiver
}
trait DSLHandler[C <: CodecDSL] extends ServiceServer[C#Input, C#Output] with ConnectionContext[C]
class UnhandledRequestException(message: String) extends Exception(message)
class ReceiveException(message: String) extends Exception(message)
class BasicServiceHandler[C <: CodecDSL]
(config: ServiceConfig[C#Input, C#Output], worker: WorkerRef, provider: CodecProvider[C], val initializer: HandlerGenerator[C])
(implicit ex: ExecutionContext, tagDecorator: TagDecorator[C#Input, C#Output] = TagDecorator.default[C#Input, C#Output])
extends ServiceServer[C#Input, C#Output](provider.provideCodec(), config, worker)
with DSLHandler[C] {
protected def unhandled: PartialHandler[C] = PartialFunction[C#Input,Callback[C#Output]]{
case other => Callback.successful(processFailure(other, new UnhandledRequestException(s"Unhandled Request $other")))
}
protected def unhandledReceive: Receive = {
case _ => {}
}
protected def unhandledError: ErrorHandler[C] = {
case (request, reason) => provider.errorResponse(request, reason)
}
override def connected(e: WriteEndpoint) {
super.connected(e)
initializer(this)
}
private var currentHandler: PartialHandler[C] = unhandled
private var currentMessageReceiver: Receive = unhandledReceive
private var currentSender: Option[ActorRef] = None
private var currentErrorHandler: ErrorHandler[C] = unhandledError
def onError(handler: ErrorHandler[C]) {
currentErrorHandler = handler
}
def become(handler: PartialHandler[C]) {
currentHandler = handler
}
def sender() = currentSender.getOrElse(throw new ReceiveException("cannot call sender outside of receive"))
def receive(handler: PartialFunction[Any, Unit]) {
currentMessageReceiver = handler
}
def receivedMessage(message: Any, sender: ActorRef) {
currentSender = Some(sender)
(currentMessageReceiver orElse unhandledReceive)(message)
currentSender = None
}
protected def fullHandler: PartialFunction[C#Input, Callback[C#Output]] = currentHandler orElse unhandled
protected def processRequest(i: C#Input): Callback[C#Output] = fullHandler(i)
protected def processFailure(request: C#Input, reason: Throwable): C#Output = (currentErrorHandler orElse unhandledError)((request, reason))
}
/**
* The Service object is an entry point into the the Service DSL which provide some convenience functions for quickly
* creating a Server serving responses to requests utilizing a Codec(ie: memcached, http, redis, etc).
*
* One thing to always keep in mind is that code inside the Service.serve is placed inside a Delegator and ConnectionHandler,
* which means that it directly runs inside of a Worker and its SelectLoop.
* Be VERY mindful of the code that you place in here, as if there is any blocking code it will block the Worker. Not good.
*
*
* An example with full typing in place to illustrate :
*
* {{{
* import colossus.protocols.http._ //imports an implicit codec
*
* implicit val ioSystem : IOSystem = myBootStrappingFunction()
*
* Service.serve[Http]("my-app", 9090) { context : ServiceContext[Http] =>
*
* //everything in this block happens on delegator initialization, which is on application startup. One time only.
*
* context.handle { connection : ConnectionContext[Http] => {
* //This block is for connectionHandler initialization. Happens in the event loop. Don't block.
* //Note, that a connection can handle multiple requests.
* connection.become {
* //this partial function is what "handles" a request. Again.. don't block.
* case req @ Get on Text("test") => future(req.ok(someService.getDataFromStore()))
* case req @ Get on Text("path") => req.ok("path")
* }
* }
* }
* }
*}}}
*
*
*/
object Service {
/** Start a service with worker and connection initialization
*
* The basic structure of a service using this method is:{{{
Service.serve[Http]{ workerContext =>
//worker initialization
workerContext.handle { connectionContext =>
//connection initialization
connection.become {
case ...
}
}
}
}}}
*
* @param serverSettings Settings to provide the underlying server
* @param serviceConfig Config for the service
* @param handler The worker initializer to use for the service
* @tparam T The codec to use, eg Http, Redis
* @return A [[ServerRef]] for the server.
*/
def serve[T <: CodecDSL]
(serverSettings: ServerSettings, serviceConfig: ServiceConfig[T#Input, T#Output])
(handler: Initializer[T])
(implicit system: IOSystem, provider: CodecProvider[T]): ServerRef = {
val serverConfig = ServerConfig(
name = serviceConfig.name,
settings = serverSettings,
delegatorFactory = (s,w) => provider.provideDelegator(handler, s, w, provider, serviceConfig)
)
Server(serverConfig)
}
/** Quick-start a service, using default settings
*
* @param name The name of the service
* @param port The port to bind the server to
*/
def serve[T <: CodecDSL]
(name: String, port: Int, requestTimeout: Duration = 100.milliseconds)
(handler: Initializer[T])
(implicit system: IOSystem, provider: CodecProvider[T]): ServerRef = {
serve[T](ServerSettings(port), ServiceConfig[T#Input, T#Output](name = name, requestTimeout = requestTimeout))(handler)
}
/** Start a simple, stateless service
*
* Unlike `Service.serve`, there is no room for per-worker or per-connection
* initialization. Useful when starting simple services or testing
*
* @param name Name of this Service
* @param port Port on which this Server will accept connections
* @param handler
* @param system The IOSystem to which this Server will belong
* @param provider CodecProvider
* @tparam T the type of codec this service uses
* @return
*/
def become[T <: CodecDSL]
(name: String, port: Int, requestTimeout: Duration = 100.milliseconds)
(handler: PartialHandler[T])
(implicit system: IOSystem, provider: CodecProvider[T]): ServerRef = {
serve[T](name, port, requestTimeout){context =>
context.handle{connection =>
connection.become(handler)
}
}
}
}
| zgagnon/colossus | colossus/src/main/scala/colossus/service/ServiceDSL.scala | Scala | apache-2.0 | 12,771 |
package pl.bigpicture.wikipv
import java.net.URLEncoder
/**
* Created by kuba on 14/02/16.
*/
case class PageStat(page: String, lang: String, pv: Int, ts: Int) {
def pageTitleURL = URLEncoder.encode(page.replace(" ", "_"), "UTF-8")
/**
* Get URL to Wikipedia article for given page and lang
*/
def pageURL = "https://%s.wikipedia.org/wiki/%s".format(lang, pageTitleURL)
def pageTitleJson = page.replace("\\"", "\\\\\\"")
}
| jpieprzyk/wikipv | src/main/scala/pl/bigpicture/wikipv/PageStat.scala | Scala | apache-2.0 | 445 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.matchers.dsl
import org.scalatest._
import Matchers._
class ResultOfOfTypeInvocationSpec extends FunSpec {
describe("ResultOfOfTypeInvocation ") {
it("should have pretty toString") {
val result = of[Exception]
result.toString should be ("of [java.lang.Exception]")
}
}
}
| dotty-staging/scalatest | scalatest-test/src/test/scala/org/scalatest/matchers/dsl/ResultOfOfTypeInvocationSpec.scala | Scala | apache-2.0 | 936 |
/* sbt -- Simple Build Tool
* Copyright 2009, 2010 Mark Harrah
*/
package sbt
import java.io.File
import xsbti.ArtifactInfo.{ ScalaCompilerID, ScalaLibraryID, ScalaOrganization }
/**
* Represents the source for Scala classes for a given version. The reason both a ClassLoader and the jars are required
* is that the compiler requires the location of the library jar on the (boot)classpath and the loader is used
* for the compiler itself.
* The 'version' field is the version used to obtain the Scala classes. This is typically the version for the maven repository.
* The 'actualVersion' field should be used to uniquely identify the compiler. It is obtained from the compiler.properties file.
*
* This should be constructed via the ScalaInstance.apply methods. The primary constructor is deprecated.
*/
final class ScalaInstance(val version: String, val loader: ClassLoader, val libraryJar: File,
@deprecated("Only `allJars` and `jars` can be reliably provided for modularized Scala.", "0.13.0") val compilerJar: File,
@deprecated("Only `allJars` and `jars` can be reliably provided for modularized Scala.", "0.13.0") val extraJars: Seq[File],
val explicitActual: Option[String]) extends xsbti.compile.ScalaInstance {
/**
* This tells us if the scalaInstance is from a managed (i.e. ivy-resolved) scala *or*
* if it's a free-floating ScalaInstance, in which case we need to do tricks to the classpaths we find
* because it won't be on them.
*/
final def isManagedVersion = explicitActual.isDefined
// These are to implement xsbti.ScalaInstance
@deprecated("Only `allJars` and `jars` can be reliably provided for modularized Scala.", "0.13.0")
def otherJars: Array[File] = extraJars.toArray
def allJars: Array[File] = jars.toArray
require(version.indexOf(' ') < 0, "Version cannot contain spaces (was '" + version + "')")
def jars = libraryJar :: compilerJar :: extraJars.toList
/** Gets the version of Scala in the compiler.properties file from the loader. This version may be different than that given by 'version'*/
lazy val actualVersion = explicitActual getOrElse ScalaInstance.actualVersion(loader)("\n version " + version + ", " + jarStrings)
def jarStrings = "library jar: " + libraryJar + ", compiler jar: " + compilerJar
override def toString = "Scala instance{version label " + version + ", actual version " + actualVersion + ", " + jarStrings + "}"
}
object ScalaInstance {
val ScalaOrg = ScalaOrganization
val VersionPrefix = "version "
def apply(org: String, version: String, launcher: xsbti.Launcher): ScalaInstance =
// Due to incompatibility with previous launchers if scalaOrg has default value revert to an existing method
if (org == ScalaOrg)
apply(version, launcher)
else try {
apply(version, launcher.getScala(version, "", org))
} catch {
case x: NoSuchMethodError => sys.error("Incompatible version of the xsbti.Launcher interface. Use an sbt 0.12+ launcher instead.")
}
/** Creates a ScalaInstance using the given provider to obtain the jars and loader.*/
def apply(version: String, launcher: xsbti.Launcher): ScalaInstance =
apply(version, launcher.getScala(version))
def apply(version: String, provider: xsbti.ScalaProvider): ScalaInstance =
new ScalaInstance(version, provider.loader, provider.libraryJar, provider.compilerJar, (provider.jars.toSet - provider.libraryJar - provider.compilerJar).toSeq, None)
def apply(scalaHome: File, launcher: xsbti.Launcher): ScalaInstance =
apply(libraryJar(scalaHome), compilerJar(scalaHome), launcher, allJars(scalaHome): _*)
def apply(scalaHome: File)(classLoader: List[File] => ClassLoader): ScalaInstance =
apply(libraryJar(scalaHome), compilerJar(scalaHome), allJars(scalaHome): _*)(classLoader)
def apply(version: String, scalaHome: File, launcher: xsbti.Launcher): ScalaInstance =
apply(version, libraryJar(scalaHome), compilerJar(scalaHome), launcher, allJars(scalaHome): _*)
@deprecated("Does not handle modularized Scala. Use a variant that only accepts all jars.", "0.13.0")
def apply(libraryJar: File, compilerJar: File, launcher: xsbti.Launcher, extraJars: File*): ScalaInstance =
apply(libraryJar, compilerJar, extraJars: _*)(scalaLoader(launcher))
@deprecated("Does not handle modularized Scala. Use a variant that only accepts all jars.", "0.13.0")
def apply(libraryJar: File, compilerJar: File, extraJars: File*)(classLoader: List[File] => ClassLoader): ScalaInstance =
{
val loader = classLoader(libraryJar :: compilerJar :: extraJars.toList)
val version = actualVersion(loader)(" (library jar " + libraryJar.getAbsolutePath + ")")
new ScalaInstance(version, loader, libraryJar, compilerJar, extraJars, None)
}
@deprecated("Does not handle modularized Scala. Use a variant that only accepts all jars.", "0.13.0")
def apply(version: String, libraryJar: File, compilerJar: File, launcher: xsbti.Launcher, extraJars: File*): ScalaInstance =
apply(version, None, libraryJar, compilerJar, launcher, extraJars: _*)
@deprecated("Does not handle modularized Scala. Use a variant that only accepts all jars.", "0.13.0")
def apply(version: String, libraryJar: File, compilerJar: File, extraJars: File*)(classLoader: List[File] => ClassLoader): ScalaInstance =
apply(version, None, libraryJar, compilerJar, extraJars: _*)(classLoader)
@deprecated("Does not handle modularized Scala. Use a variant that only accepts all jars.", "0.13.0")
def apply(version: String, explicitActual: Option[String], libraryJar: File, compilerJar: File, launcher: xsbti.Launcher, extraJars: File*): ScalaInstance =
apply(version, explicitActual, libraryJar, compilerJar, extraJars: _*)(scalaLoader(launcher))
@deprecated("Does not handle modularized Scala. Use a variant that only accepts all jars.", "0.13.0")
def apply(version: String, explicitActual: Option[String], libraryJar: File, compilerJar: File, extraJars: File*)(classLoader: List[File] => ClassLoader): ScalaInstance =
new ScalaInstance(version, classLoader(libraryJar :: compilerJar :: extraJars.toList), libraryJar, compilerJar, extraJars, explicitActual)
@deprecated("Cannot be reliably provided for modularized Scala.", "0.13.0")
def extraJars(scalaHome: File): Seq[File] =
optScalaJar(scalaHome, "jline.jar") ++
optScalaJar(scalaHome, "fjbg.jar") ++
optScalaJar(scalaHome, "scala-reflect.jar")
def allJars(scalaHome: File): Seq[File] = IO.listFiles(scalaLib(scalaHome)).filter(f => !blacklist(f.getName))
private[this] def scalaLib(scalaHome: File): File = new File(scalaHome, "lib")
private[this] val blacklist: Set[String] = Set("scala-actors.jar", "scalacheck.jar", "scala-partest.jar", "scala-partest-javaagent.jar", "scalap.jar", "scala-swing.jar")
private def compilerJar(scalaHome: File) = scalaJar(scalaHome, "scala-compiler.jar")
private def libraryJar(scalaHome: File) = scalaJar(scalaHome, "scala-library.jar")
def scalaJar(scalaHome: File, name: String) = new File(scalaLib(scalaHome), name)
@deprecated("No longer used.", "0.13.0")
def optScalaJar(scalaHome: File, name: String): List[File] =
{
val jar = scalaJar(scalaHome, name)
if (jar.isFile) jar :: Nil else Nil
}
/** Gets the version of Scala in the compiler.properties file from the loader.*/
private def actualVersion(scalaLoader: ClassLoader)(label: String) =
try fastActualVersion(scalaLoader)
catch { case e: Exception => slowActualVersion(scalaLoader)(label) }
private def slowActualVersion(scalaLoader: ClassLoader)(label: String) =
{
val v = try { Class.forName("scala.tools.nsc.Properties", true, scalaLoader).getMethod("versionString").invoke(null).toString }
catch { case cause: Exception => throw new InvalidScalaInstance("Scala instance doesn't exist or is invalid: " + label, cause) }
if (v.startsWith(VersionPrefix)) v.substring(VersionPrefix.length) else v
}
private def fastActualVersion(scalaLoader: ClassLoader): String =
{
val stream = scalaLoader.getResourceAsStream("compiler.properties")
try {
val props = new java.util.Properties
props.load(stream)
props.getProperty("version.number")
} finally stream.close()
}
import java.net.{ URL, URLClassLoader }
private def scalaLoader(launcher: xsbti.Launcher): Seq[File] => ClassLoader = jars =>
new URLClassLoader(jars.map(_.toURI.toURL).toArray[URL], launcher.topLoader)
}
class InvalidScalaInstance(message: String, cause: Throwable) extends RuntimeException(message, cause) | pdalpra/sbt | util/classpath/src/main/scala/sbt/ScalaInstance.scala | Scala | bsd-3-clause | 8,626 |
/***********************************************************************
* Copyright (c) 2013-2016 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
*************************************************************************/
package org.locationtech.geomesa.tools.export
import java.io._
import java.util.zip.{Deflater, GZIPOutputStream}
import com.beust.jcommander.ParameterException
import com.typesafe.scalalogging.LazyLogging
import org.apache.commons.io.IOUtils
import org.geotools.data.Query
import org.geotools.data.simple.SimpleFeatureCollection
import org.geotools.filter.text.ecql.ECQL
import org.locationtech.geomesa.index.conf.QueryHints
import org.locationtech.geomesa.index.geotools.GeoMesaDataStore
import org.locationtech.geomesa.tools.export.formats.{BinExporter, NullExporter, ShapefileExporter, _}
import org.locationtech.geomesa.tools.utils.DataFormats
import org.locationtech.geomesa.tools.utils.DataFormats._
import org.locationtech.geomesa.tools.{Command, DataStoreCommand}
import org.locationtech.geomesa.utils.index.IndexMode
import org.locationtech.geomesa.utils.stats.{MethodProfiling, Timing}
import org.opengis.filter.Filter
import scala.util.control.NonFatal
trait ExportCommand[DS <: GeoMesaDataStore[_, _, _]] extends DataStoreCommand[DS] with MethodProfiling {
override val name = "export"
override def params: ExportParams
override def execute(): Unit = {
implicit val timing = new Timing
val count = profile(withDataStore(export))
Command.user.info(s"Feature export complete to ${Option(params.file).map(_.getPath).getOrElse("standard out")} " +
s"in ${timing.time}ms${count.map(" for " + _ + " features").getOrElse("")}")
}
protected def export(ds: DS): Option[Long] = {
import ExportCommand._
import org.locationtech.geomesa.tools.utils.DataFormats._
val fmt = DataFormats.values.find(_.toString.equalsIgnoreCase(params.outputFormat)).getOrElse {
throw new ParameterException(s"Invalid format '${params.outputFormat}'. Valid values are " +
DataFormats.values.filter(_ != Bin).map(_.toString.toLowerCase).mkString("'", "', '", "'"))
}
if (fmt == Bin) {
throw new ParameterException(s"This operation has been deprecated. Use the command 'export-bin' instead.")
}
val attributes = getAttributes(ds, fmt, params)
val features = getFeatureCollection(ds, fmt, attributes, params)
lazy val avroCompression = Option(params.gzip).map(_.toInt).getOrElse(Deflater.DEFAULT_COMPRESSION)
val exporter = fmt match {
case Csv | Tsv => new DelimitedExporter(getWriter(params), fmt, attributes, !params.noHeader)
case Shp => new ShapefileExporter(checkShpFile(params))
case GeoJson | Json => new GeoJsonExporter(getWriter(params))
case Gml => new GmlExporter(createOutputStream(params.file, params.gzip))
case Avro => new AvroExporter(features.getSchema, createOutputStream(params.file, null), avroCompression)
case Null => NullExporter
// shouldn't happen unless someone adds a new format and doesn't implement it here
case _ => throw new UnsupportedOperationException(s"Format $fmt can't be exported")
}
try {
val count = exporter.export(features)
exporter.flush()
count
} finally {
IOUtils.closeQuietly(exporter)
}
}
}
object ExportCommand extends LazyLogging {
def getFeatureCollection(ds: GeoMesaDataStore[_, _, _],
fmt: DataFormat,
attributes: Option[ExportAttributes],
params: BaseExportParams): SimpleFeatureCollection = {
val filter = Option(params.cqlFilter).map(ECQL.toFilter).getOrElse(Filter.INCLUDE)
logger.debug(s"Applying CQL filter ${ECQL.toCQL(filter)}")
logger.debug(s"Applying transform ${attributes.map(_.names.mkString(",")).orNull}")
val q = new Query(params.featureName, filter, attributes.map(_.names.toArray).orNull)
Option(params.maxFeatures).map(Int.unbox).foreach(q.setMaxFeatures)
params.loadIndex(ds, IndexMode.Read).foreach { index =>
q.getHints.put(QueryHints.QUERY_INDEX, index)
logger.debug(s"Using index ${index.identifier}")
}
// get the feature store used to query the GeoMesa data
val fs = ds.getFeatureSource(params.featureName)
try {
fs.getFeatures(q)
} catch {
case NonFatal(e) =>
throw new RuntimeException("Could not execute export query. Please ensure " +
"that all arguments are correct.", e)
}
}
def getAttributes(ds: GeoMesaDataStore[_, _, _], fmt: DataFormat, params: BaseExportParams): Option[ExportAttributes] = {
import scala.collection.JavaConversions._
lazy val sft = ds.getSchema(params.featureName)
val provided = Option(params.attributes).collect { case a if !a.isEmpty => a.toSeq }
if (fmt == DataFormats.Shp) {
val attributes = provided.map(ShapefileExporter.replaceGeom(sft, _)).getOrElse(ShapefileExporter.modifySchema(sft))
Some(ExportAttributes(attributes, fid = true))
} else if (fmt == DataFormats.Bin) {
val attributes = provided.getOrElse {
import org.locationtech.geomesa.utils.geotools.RichSimpleFeatureType.RichSimpleFeatureType
BinExporter.getAttributeList(params.asInstanceOf[BinExportParams], sft.getDtgField)
}
Some(ExportAttributes(attributes, fid = false))
} else {
provided.map { p =>
val (id, attributes) = p.partition(_.equalsIgnoreCase("id"))
ExportAttributes(attributes, id.nonEmpty)
}
}
}
def createOutputStream(file: File, compress: Integer): OutputStream = {
val out = Option(file).map(new FileOutputStream(_)).getOrElse(System.out)
val compressed = if (compress == null) { out } else new GZIPOutputStream(out) {
`def`.setLevel(compress) // hack to access the protected deflate level
}
new BufferedOutputStream(compressed)
}
def getWriter(params: FileExportParams): Writer = new OutputStreamWriter(createOutputStream(params.file, params.gzip))
def checkShpFile(params: FileExportParams): File = {
if (params.file != null) { params.file } else {
throw new ParameterException("Error: -o or --output for file-based output is required for " +
"shapefile export (stdout not supported for shape files)")
}
}
case class ExportAttributes(names: Seq[String], fid: Boolean)
}
| nagavallia/geomesa | geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/export/ExportCommand.scala | Scala | apache-2.0 | 6,712 |
package Scalisp
import org.scalatest.FlatSpec
import org.scalatest.matchers.ShouldMatchers
class BugFiexes extends FlatSpec with ShouldMatchers {
val repl = new REPL()
// https://github.com/Mononofu/Scalisp/issues/1
"Variables set in child scopes" should "change the parent scope too" in {
repl.execute("""
(define x 3)
(defun f (n) (set! x n))
""")
repl.executeLine("x") should equal (3)
repl.executeLine(("(f 2)"))
repl.executeLine("x") should equal (2)
}
}
| quantintel/Scalisp | src/test/scala/BugFixes.scala | Scala | mit | 507 |
package io.cronit.services
import io.cronit.common.SchedulerServiceException
import io.cronit.models.{CronScheduler, JobModel, RestJobModel, ScheduleOnce}
import io.cronit.utils.Configuration
import org.joda.time.DateTime
import org.mockito.Mockito.when
import org.scalatest.{FlatSpec, Matchers}
import org.specs2.mock.Mockito
class JobModelBuilderServiceTest extends FlatSpec with Matchers
with JobModelBuilderComponent
with JsonServiceComponent
with Configuration
with Mockito {
override val jobModelBuilderService = new JobModelBuilderService()
override val jsonService = mock[JsonService]
behavior of "Job Model builder service"
it should "create rest job model with CronScheduler from given Map when job type is rest" in {
val jobMapRepresentation = Map("id" -> "jobId", "name" -> "jobName", "jobType" -> "Rest",
"group" -> "executionGroup", "schedulerInfo" -> Map("expression" -> "* * * * *", "type" -> "CronScheduler"),
"url" -> "http://cronscheduler.it", "method" -> "POST", "body" -> "jobBody", "expectedStatus" -> 301D,
"headers" -> List(Map("foo" -> "bar")))
when(jsonService.deserializeAsMap("valid job content json")).thenReturn(jobMapRepresentation)
val jobModel: JobModel = jobModelBuilderService.from("valid job content json")
jobModel shouldBe a[RestJobModel]
jobModel.id shouldEqual "jobId"
jobModel.name shouldEqual "jobName"
jobModel.group shouldEqual "executionGroup"
jobModel.scheduleInfo shouldBe a[CronScheduler]
jobModel.scheduleInfo.asInstanceOf[CronScheduler].expression shouldEqual "* * * * *"
val restJobModel = jobModel.asInstanceOf[RestJobModel]
restJobModel.url shouldEqual "http://cronscheduler.it"
restJobModel.body shouldEqual Some("jobBody")
restJobModel.method shouldEqual "POST"
restJobModel.expectedStatus shouldEqual 301
restJobModel.headers shouldEqual Some(Map("foo" -> "bar"))
}
it should "create rest job model with CronScheduler and with empty header from given Map when job type is rest and headers are empty" in {
val jobMapRepresentation = Map("id" -> "jobId", "name" -> "jobName", "jobType" -> "Rest",
"group" -> "executionGroup", "schedulerInfo" -> Map("expression" -> "* * * * *", "type" -> "CronScheduler"),
"url" -> "http://cronscheduler.it", "method" -> "POST", "body" -> "jobBody", "expectedStatus" -> 301D)
when(jsonService.deserializeAsMap("valid job content json")).thenReturn(jobMapRepresentation)
val jobModel: JobModel = jobModelBuilderService.from("valid job content json")
jobModel shouldBe a[RestJobModel]
jobModel.id shouldEqual "jobId"
jobModel.name shouldEqual "jobName"
jobModel.group shouldEqual "executionGroup"
jobModel.scheduleInfo shouldBe a[CronScheduler]
jobModel.scheduleInfo.asInstanceOf[CronScheduler].expression shouldEqual "* * * * *"
val restJobModel = jobModel.asInstanceOf[RestJobModel]
restJobModel.url shouldEqual "http://cronscheduler.it"
restJobModel.body shouldEqual Some("jobBody")
restJobModel.method shouldEqual "POST"
restJobModel.expectedStatus shouldEqual 301
restJobModel.headers shouldEqual Some(Map())
}
it should "create rest job model with ScheduleOnce with Default execution group from given Map when job type is rest and group is not defined" in {
val jobMapRepresentation = Map("id" -> "jobId", "name" -> "jobName", "jobType" -> "Rest",
"schedulerInfo" -> Map("runAt" -> "2016-10-30T00:00:00.000Z", "type" -> "ScheduleOnce"),
"url" -> "http://cronscheduler.it", "method" -> "POST", "body" -> "jobBody", "expectedStatus" -> 301D,
"headers" -> List(Map("foo" -> "bar")))
when(jsonService.deserializeAsMap("valid job content json")).thenReturn(jobMapRepresentation)
val jobModel: JobModel = jobModelBuilderService.from("valid job content json")
jobModel shouldBe a[RestJobModel]
jobModel.id shouldEqual "jobId"
jobModel.name shouldEqual "jobName"
jobModel.group shouldEqual "Default"
jobModel.scheduleInfo shouldBe a[ScheduleOnce]
jobModel.scheduleInfo.asInstanceOf[ScheduleOnce].runAt shouldEqual DateTime.parse("2016-10-30T00:00:00.000Z")
val restJobModel = jobModel.asInstanceOf[RestJobModel]
restJobModel.url shouldEqual "http://cronscheduler.it"
restJobModel.body shouldEqual Some("jobBody")
restJobModel.method shouldEqual "POST"
restJobModel.expectedStatus shouldEqual 301
restJobModel.headers shouldEqual Some(Map("foo" -> "bar"))
}
it should "throw SchedulerServiceException when job type is not known" in {
val jobMapRepresentation = Map("id" -> "jobId", "name" -> "jobName", "jobType" -> "UnknownType",
"group" -> "executionGroup", "schedulerInfo" -> Map("runAt" -> "2016-10-30T00:00:00.000Z", "type" -> "ScheduleOnce"),
"url" -> "http://cronscheduler.it", "method" -> "POST", "body" -> "jobBody", "expectedStatus" -> 301D,
"headers" -> List(Map("foo" -> "bar")))
when(jsonService.deserializeAsMap("not valid job type job content json")).thenReturn(jobMapRepresentation)
intercept[SchedulerServiceException] {
jobModelBuilderService.from("not valid job type job content json")
}
}
}
| 212data/cronit-service | src/test/scala/io/cronit/services/JobModelBuilderServiceTest.scala | Scala | apache-2.0 | 5,223 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2010, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala.collection
package mutable
import generic._
import scala.reflect.ClassManifest
/** A builder class for arrays.
*
* @tparam A type of elements that can be added to this builder.
* @param manifest class manifest for objects of type `A`.
*
* @since 2.8
*/
class WrappedArrayBuilder[A](manifest: ClassManifest[A]) extends Builder[A, WrappedArray[A]] {
private var elems: WrappedArray[A] = _
private var capacity: Int = 0
private var size: Int = 0
private def mkArray(size: Int): WrappedArray[A] = {
val newelems = manifest.newWrappedArray(size)
if (this.size > 0) Array.copy(elems.array, 0, newelems.array, 0, this.size)
newelems
}
private def resize(size: Int) {
elems = mkArray(size)
capacity = size
}
override def sizeHint(size: Int) {
if (capacity < size) resize(size)
}
private def ensureSize(size: Int) {
if (capacity < size) {
var newsize = if (capacity == 0) 16 else capacity * 2
while (newsize < size) newsize *= 2
resize(newsize)
}
}
def +=(elem: A): this.type = {
ensureSize(size + 1)
elems(size) = elem
size += 1
this
}
def clear() {
size = 0
}
def result() = {
if (capacity != 0 && capacity == size) elems
else mkArray(size)
}
// todo: add ++=
}
| cran/rkafkajars | java/scala/collection/mutable/WrappedArrayBuilder.scala | Scala | apache-2.0 | 1,851 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.unicomplex.dummyfailedextensions
import org.squbs.lifecycle.ExtensionLifecycle
trait DummyFailedExtension extends ExtensionLifecycle {
private[dummyfailedextensions] var _state = "start"
def state: String
override def preInit(): Unit = {
_state += "preInit"
}
override def init(): Unit = {
_state += "init"
}
override def postInit(): Unit = {
_state += "postInit"
}
}
class DummyFailedExtensionA extends DummyFailedExtension{
def state = "A" + _state
override def preInit(): Unit = {
throw new IllegalStateException("Test failing preInit()")
}
}
class DummyFailedExtensionB extends DummyFailedExtension{
def state = "B" + _state
override def postInit(): Unit = {
throw new IllegalStateException("Test failing postInit()")
}
}
| akara/squbs | squbs-unicomplex/src/test/scala/org/squbs/unicomplex/dummyfailedextensions/DummyFailedExtensions.scala | Scala | apache-2.0 | 1,392 |
package org.greencheek.spray.cache.memcached
import org.greencheek.util.memcached.{WithMemcached, MemcachedBasedSpec}
import akka.actor.ActorSystem
import net.spy.memcached.ConnectionFactoryBuilder.Protocol
import scala.concurrent._
import ExecutionContext.Implicits.global
import org.greencheek.util.PortUtil
import org.specs2.runner.JUnitRunner
import org.junit.runner.RunWith
import scala.reflect.io.{File, Path}
// adds await on the future
import spray.util._
object FileReader {
def using[A <: { def close():Unit},B](resource:A)(f: A => B) : B = {
try {
f(resource)
} finally {
resource.close
}
}
}
/**
* Created by dominictootell on 30/03/2014.
*/
@RunWith(classOf[JUnitRunner])
class LargeContentCachingSpec extends MemcachedBasedSpec {
val largeContent = LargeString.string
implicit val system = ActorSystem()
val memcachedContext = WithMemcached(false)
"A Memcached cache" >> {
"can store a large piece of content" in memcachedContext {
val hosts = "localhost:"+memcachedContext.memcached.port
val cache = new MemcachedCache[String] ( memcachedHosts = hosts, protocol = Protocol.TEXT,
doHostConnectionAttempt = true, waitForMemcachedSet = true)
cache("98765499")(largeContent).await === largeContent
cache("98765499")("B").await === largeContent
}
}
}
| tootedom/spray-cache-spymemcached | src/test/scala/org/greencheek/spray/cache/memcached/LargeContentCachingSpec.scala | Scala | apache-2.0 | 1,356 |
package com.twitter.finagle.memcached.protocol.text.client
import com.twitter.finagle.memcached.protocol.ServerError
import com.twitter.finagle.memcached.protocol.text.FrameDecoder
import com.twitter.finagle.memcached.util.ParserUtils
import com.twitter.io.Buf
import com.twitter.logging.Logger
import scala.collection.mutable
private object ClientDecoder {
private val log = Logger.get()
private val End: Buf = Buf.Utf8("END")
private val Item: Buf = Buf.Utf8("ITEM")
private val Stat: Buf = Buf.Utf8("STAT")
private val Value: Buf = Buf.Utf8("VALUE")
private def isEnd(tokens: Seq[Buf]): Boolean =
tokens.length == 1 && tokens.head == End
private def isStats(tokens: Seq[Buf]): Boolean = {
if (tokens.isEmpty) false
else
tokens.head match {
case Stat | Item => true
case _ => false
}
}
private def validateValueResponse(args: Seq[Buf]): Unit = {
if (args.length < 4) throw new ServerError("Too few arguments")
if (args.length > 5) throw new ServerError("Too many arguments")
if (args.length == 5 && !ParserUtils.isDigits(args(4)))
throw new ServerError("CAS must be a number")
if (!ParserUtils.isDigits(args(3))) throw new ServerError("Bytes must be number")
}
}
/**
* Decodes Buf-encoded protocol messages into protocol specific Responses. Used by the client.
*
* @note Class contains mutable state. Not thread-safe.
*/
private[finagle] abstract class ClientDecoder[R] extends FrameDecoder[R] {
import ClientDecoder._
/** Type that represents a complete cache value */
protected type Value
/** Sequence of tokens that represents a text line */
final protected type Tokens = Seq[Buf]
private sealed trait State
private case object AwaitingResponse extends State
private case class AwaitingResponseOrEnd(valuesSoFar: Seq[Value]) extends State
private case class AwaitingStatsOrEnd(valuesSoFar: Seq[Tokens]) extends State
private case class AwaitingData(valuesSoFar: Seq[Value], tokens: Seq[Buf], bytesNeeded: Int)
extends State
private case class Failed(error: Throwable) extends State
private[this] var state: State = AwaitingResponse
/** Parse a sequence of tokens into a response */
protected def parseResponse(tokens: Seq[Buf]): R
/** Parse a text line, its associated data, and the casUnique into a Value */
protected def parseValue(tokens: Seq[Buf], data: Buf): Value
/** Parse a collection of values into a single response */
protected def parseResponseValues(values: Seq[Value]): R
/** Parse a collection of token sequences into a single response */
protected def parseStatLines(lines: Seq[Tokens]): R
final def nextFrameBytes(): Int = state match {
case AwaitingData(_, _, bytesNeeded) => bytesNeeded
case _ => -1
}
final def decodeData(buffer: Buf, results: mutable.Buffer[R]): Unit = state match {
case AwaitingData(valuesSoFar, tokens, bytesNeeded) =>
// The framer should have given us the right sized Buf
if (buffer.length != bytesNeeded) {
throw new IllegalArgumentException(
s"Expected to receive a buffer of $bytesNeeded bytes but " +
s"only received ${buffer.length} bytes"
)
}
state = AwaitingResponseOrEnd(valuesSoFar :+ parseValue(tokens, buffer))
case AwaitingResponse =>
val tokens = ParserUtils.splitOnWhitespace(buffer)
val dataBytes = needsData(tokens)
if (dataBytes == -1) {
if (isEnd(tokens)) {
results += parseResponseValues(Nil)
} else if (isStats(tokens)) {
state = AwaitingStatsOrEnd(Vector(tokens))
} else {
results += parseResponse(tokens)
}
} else {
// We are waiting for data next
state = AwaitingData(Nil, tokens, dataBytes)
}
case AwaitingStatsOrEnd(linesSoFar) =>
val tokens = ParserUtils.splitOnWhitespace(buffer)
if (isEnd(tokens)) {
state = AwaitingResponse
results += parseStatLines(linesSoFar)
} else if (isStats(tokens)) {
state = AwaitingStatsOrEnd(linesSoFar :+ tokens)
} else {
val ex = new ServerError("Invalid reply from STATS command")
state = Failed(ex)
throw ex
}
case AwaitingResponseOrEnd(valuesSoFar) =>
val tokens = ParserUtils.splitOnWhitespace(buffer)
val bytesNeeded = needsData(tokens)
if (bytesNeeded == -1) {
if (isEnd(tokens)) {
state = AwaitingResponse
results += parseResponseValues(valuesSoFar)
} else {
// This is a problem: if it wasn't a value line, it should have been an END.
val bufString =
tokens.foldLeft("") { (acc, buffer) =>
acc + Buf.Utf8.unapply(buffer).getOrElse("<non-string token>") + " "
}
val ex = new ServerError(
s"Server returned invalid response when values or END was expected: $bufString"
)
state = Failed(ex)
throw ex
}
} else {
state = AwaitingData(valuesSoFar, tokens, bytesNeeded)
}
case Failed(cause) =>
val msg = "Failed Memcached decoder called after previous decoding failure."
val ex = new IllegalStateException(msg, cause)
log.error(msg, ex)
throw ex
}
private[this] def needsData(tokens: Seq[Buf]): Int = {
if (tokens.isEmpty) -1
else {
val responseName = tokens.head
if (responseName == Value) {
validateValueResponse(tokens)
val dataLengthAsBuf = tokens(3)
ParserUtils.bufToInt(dataLengthAsBuf)
} else -1
}
}
}
| twitter/finagle | finagle-memcached/src/main/scala/com/twitter/finagle/memcached/protocol/text/client/ClientDecoder.scala | Scala | apache-2.0 | 5,630 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.status
import java.io.File
import java.util.{Arrays, List => JList}
import scala.collection.JavaConverters._
import org.apache.spark.{JobExecutionStatus, SparkConf}
import org.apache.spark.scheduler.SparkListener
import org.apache.spark.status.api.v1
import org.apache.spark.ui.scope._
import org.apache.spark.util.{Distribution, Utils}
import org.apache.spark.util.kvstore.{InMemoryStore, KVStore}
/**
* A wrapper around a KVStore that provides methods for accessing the API data stored within.
*/
private[spark] class AppStatusStore(
val store: KVStore,
listener: Option[AppStatusListener] = None) {
def applicationInfo(): v1.ApplicationInfo = {
store.view(classOf[ApplicationInfoWrapper]).max(1).iterator().next().info
}
def environmentInfo(): v1.ApplicationEnvironmentInfo = {
val klass = classOf[ApplicationEnvironmentInfoWrapper]
store.read(klass, klass.getName()).info
}
def jobsList(statuses: JList[JobExecutionStatus]): Seq[v1.JobData] = {
val it = store.view(classOf[JobDataWrapper]).reverse().asScala.map(_.info)
if (statuses != null && !statuses.isEmpty()) {
it.filter { job => statuses.contains(job.status) }.toSeq
} else {
it.toSeq
}
}
def job(jobId: Int): v1.JobData = {
store.read(classOf[JobDataWrapper], jobId).info
}
def executorList(activeOnly: Boolean): Seq[v1.ExecutorSummary] = {
val base = store.view(classOf[ExecutorSummaryWrapper])
val filtered = if (activeOnly) {
base.index("active").reverse().first(true).last(true)
} else {
base
}
filtered.asScala.map(_.info).toSeq
}
def executorSummary(executorId: String): v1.ExecutorSummary = {
store.read(classOf[ExecutorSummaryWrapper], executorId).info
}
/**
* This is used by ConsoleProgressBar to quickly fetch active stages for drawing the progress
* bar. It will only return anything useful when called from a live application.
*/
def activeStages(): Seq[v1.StageData] = {
listener.map(_.activeStages()).getOrElse(Nil)
}
def stageList(statuses: JList[v1.StageStatus]): Seq[v1.StageData] = {
val it = store.view(classOf[StageDataWrapper]).reverse().asScala.map(_.info)
if (statuses != null && !statuses.isEmpty()) {
it.filter { s => statuses.contains(s.status) }.toSeq
} else {
it.toSeq
}
}
def stageData(stageId: Int, details: Boolean = false): Seq[v1.StageData] = {
store.view(classOf[StageDataWrapper]).index("stageId").first(stageId).last(stageId)
.asScala.map { s =>
if (details) stageWithDetails(s.info) else s.info
}.toSeq
}
def lastStageAttempt(stageId: Int): v1.StageData = {
val it = store.view(classOf[StageDataWrapper]).index("stageId").reverse().first(stageId)
.closeableIterator()
try {
it.next().info
} finally {
it.close()
}
}
def stageAttempt(stageId: Int, stageAttemptId: Int, details: Boolean = false): v1.StageData = {
val stageKey = Array(stageId, stageAttemptId)
val stage = store.read(classOf[StageDataWrapper], stageKey).info
if (details) stageWithDetails(stage) else stage
}
def taskSummary(
stageId: Int,
stageAttemptId: Int,
quantiles: Array[Double]): v1.TaskMetricDistributions = {
val stage = Array(stageId, stageAttemptId)
val rawMetrics = store.view(classOf[TaskDataWrapper])
.index("stage")
.first(stage)
.last(stage)
.asScala
.flatMap(_.info.taskMetrics)
.toList
.view
def metricQuantiles(f: v1.TaskMetrics => Double): IndexedSeq[Double] =
Distribution(rawMetrics.map { d => f(d) }).get.getQuantiles(quantiles)
// We need to do a lot of similar munging to nested metrics here. For each one,
// we want (a) extract the values for nested metrics (b) make a distribution for each metric
// (c) shove the distribution into the right field in our return type and (d) only return
// a result if the option is defined for any of the tasks. MetricHelper is a little util
// to make it a little easier to deal w/ all of the nested options. Mostly it lets us just
// implement one "build" method, which just builds the quantiles for each field.
val inputMetrics =
new MetricHelper[v1.InputMetrics, v1.InputMetricDistributions](rawMetrics, quantiles) {
def getSubmetrics(raw: v1.TaskMetrics): v1.InputMetrics = raw.inputMetrics
def build: v1.InputMetricDistributions = new v1.InputMetricDistributions(
bytesRead = submetricQuantiles(_.bytesRead),
recordsRead = submetricQuantiles(_.recordsRead)
)
}.build
val outputMetrics =
new MetricHelper[v1.OutputMetrics, v1.OutputMetricDistributions](rawMetrics, quantiles) {
def getSubmetrics(raw: v1.TaskMetrics): v1.OutputMetrics = raw.outputMetrics
def build: v1.OutputMetricDistributions = new v1.OutputMetricDistributions(
bytesWritten = submetricQuantiles(_.bytesWritten),
recordsWritten = submetricQuantiles(_.recordsWritten)
)
}.build
val shuffleReadMetrics =
new MetricHelper[v1.ShuffleReadMetrics, v1.ShuffleReadMetricDistributions](rawMetrics,
quantiles) {
def getSubmetrics(raw: v1.TaskMetrics): v1.ShuffleReadMetrics =
raw.shuffleReadMetrics
def build: v1.ShuffleReadMetricDistributions = new v1.ShuffleReadMetricDistributions(
readBytes = submetricQuantiles { s => s.localBytesRead + s.remoteBytesRead },
readRecords = submetricQuantiles(_.recordsRead),
remoteBytesRead = submetricQuantiles(_.remoteBytesRead),
remoteBytesReadToDisk = submetricQuantiles(_.remoteBytesReadToDisk),
remoteBlocksFetched = submetricQuantiles(_.remoteBlocksFetched),
localBlocksFetched = submetricQuantiles(_.localBlocksFetched),
totalBlocksFetched = submetricQuantiles { s =>
s.localBlocksFetched + s.remoteBlocksFetched
},
fetchWaitTime = submetricQuantiles(_.fetchWaitTime)
)
}.build
val shuffleWriteMetrics =
new MetricHelper[v1.ShuffleWriteMetrics, v1.ShuffleWriteMetricDistributions](rawMetrics,
quantiles) {
def getSubmetrics(raw: v1.TaskMetrics): v1.ShuffleWriteMetrics =
raw.shuffleWriteMetrics
def build: v1.ShuffleWriteMetricDistributions = new v1.ShuffleWriteMetricDistributions(
writeBytes = submetricQuantiles(_.bytesWritten),
writeRecords = submetricQuantiles(_.recordsWritten),
writeTime = submetricQuantiles(_.writeTime)
)
}.build
new v1.TaskMetricDistributions(
quantiles = quantiles,
executorDeserializeTime = metricQuantiles(_.executorDeserializeTime),
executorDeserializeCpuTime = metricQuantiles(_.executorDeserializeCpuTime),
executorRunTime = metricQuantiles(_.executorRunTime),
executorCpuTime = metricQuantiles(_.executorCpuTime),
resultSize = metricQuantiles(_.resultSize),
jvmGcTime = metricQuantiles(_.jvmGcTime),
resultSerializationTime = metricQuantiles(_.resultSerializationTime),
memoryBytesSpilled = metricQuantiles(_.memoryBytesSpilled),
diskBytesSpilled = metricQuantiles(_.diskBytesSpilled),
inputMetrics = inputMetrics,
outputMetrics = outputMetrics,
shuffleReadMetrics = shuffleReadMetrics,
shuffleWriteMetrics = shuffleWriteMetrics
)
}
def taskList(stageId: Int, stageAttemptId: Int, maxTasks: Int): Seq[v1.TaskData] = {
val stageKey = Array(stageId, stageAttemptId)
store.view(classOf[TaskDataWrapper]).index("stage").first(stageKey).last(stageKey).reverse()
.max(maxTasks).asScala.map(_.info).toSeq.reverse
}
def taskList(
stageId: Int,
stageAttemptId: Int,
offset: Int,
length: Int,
sortBy: v1.TaskSorting): Seq[v1.TaskData] = {
val stageKey = Array(stageId, stageAttemptId)
val base = store.view(classOf[TaskDataWrapper])
val indexed = sortBy match {
case v1.TaskSorting.ID =>
base.index("stage").first(stageKey).last(stageKey)
case v1.TaskSorting.INCREASING_RUNTIME =>
base.index("runtime").first(stageKey ++ Array(-1L)).last(stageKey ++ Array(Long.MaxValue))
case v1.TaskSorting.DECREASING_RUNTIME =>
base.index("runtime").first(stageKey ++ Array(Long.MaxValue)).last(stageKey ++ Array(-1L))
.reverse()
}
indexed.skip(offset).max(length).asScala.map(_.info).toSeq
}
def rddList(cachedOnly: Boolean = true): Seq[v1.RDDStorageInfo] = {
store.view(classOf[RDDStorageInfoWrapper]).asScala.map(_.info).filter { rdd =>
!cachedOnly || rdd.numCachedPartitions > 0
}.toSeq
}
/**
* Calls a closure that may throw a NoSuchElementException and returns `None` when the exception
* is thrown.
*/
def asOption[T](fn: => T): Option[T] = {
try {
Some(fn)
} catch {
case _: NoSuchElementException => None
}
}
private def stageWithDetails(stage: v1.StageData): v1.StageData = {
val tasks = taskList(stage.stageId, stage.attemptId, Int.MaxValue)
.map { t => (t.taskId, t) }
.toMap
val stageKey = Array(stage.stageId, stage.attemptId)
val execs = store.view(classOf[ExecutorStageSummaryWrapper]).index("stage").first(stageKey)
.last(stageKey).closeableIterator().asScala
.map { exec => (exec.executorId -> exec.info) }
.toMap
new v1.StageData(
stage.status,
stage.stageId,
stage.attemptId,
stage.numTasks,
stage.numActiveTasks,
stage.numCompleteTasks,
stage.numFailedTasks,
stage.numKilledTasks,
stage.numCompletedIndices,
stage.executorRunTime,
stage.executorCpuTime,
stage.submissionTime,
stage.firstTaskLaunchedTime,
stage.completionTime,
stage.failureReason,
stage.inputBytes,
stage.inputRecords,
stage.outputBytes,
stage.outputRecords,
stage.shuffleReadBytes,
stage.shuffleReadRecords,
stage.shuffleWriteBytes,
stage.shuffleWriteRecords,
stage.memoryBytesSpilled,
stage.diskBytesSpilled,
stage.name,
stage.description,
stage.details,
stage.schedulingPool,
stage.rddIds,
stage.accumulatorUpdates,
Some(tasks),
Some(execs),
stage.killedTasksSummary)
}
def rdd(rddId: Int): v1.RDDStorageInfo = {
store.read(classOf[RDDStorageInfoWrapper], rddId).info
}
def streamBlocksList(): Seq[StreamBlockData] = {
store.view(classOf[StreamBlockData]).asScala.toSeq
}
def operationGraphForStage(stageId: Int): RDDOperationGraph = {
store.read(classOf[RDDOperationGraphWrapper], stageId).toRDDOperationGraph()
}
def operationGraphForJob(jobId: Int): Seq[RDDOperationGraph] = {
val job = store.read(classOf[JobDataWrapper], jobId)
val stages = job.info.stageIds
stages.map { id =>
val g = store.read(classOf[RDDOperationGraphWrapper], id).toRDDOperationGraph()
if (job.skippedStages.contains(id) && !g.rootCluster.name.contains("skipped")) {
g.rootCluster.setName(g.rootCluster.name + " (skipped)")
}
g
}
}
def pool(name: String): PoolData = {
store.read(classOf[PoolData], name)
}
def close(): Unit = {
store.close()
}
}
private[spark] object AppStatusStore {
val CURRENT_VERSION = 1L
/**
* Create an in-memory store for a live application.
*
* @param conf Configuration.
* @param addListenerFn Function to register a listener with a bus.
*/
def createLiveStore(conf: SparkConf, addListenerFn: SparkListener => Unit): AppStatusStore = {
val store = new InMemoryStore()
val listener = new AppStatusListener(store, conf, true)
addListenerFn(listener)
AppStatusPlugin.loadPlugins().foreach { p =>
p.setupListeners(conf, store, addListenerFn, true)
}
new AppStatusStore(store, listener = Some(listener))
}
}
/**
* Helper for getting distributions from nested metric types.
*/
private abstract class MetricHelper[I, O](
rawMetrics: Seq[v1.TaskMetrics],
quantiles: Array[Double]) {
def getSubmetrics(raw: v1.TaskMetrics): I
def build: O
val data: Seq[I] = rawMetrics.map(getSubmetrics)
/** applies the given function to all input metrics, and returns the quantiles */
def submetricQuantiles(f: I => Double): IndexedSeq[Double] = {
Distribution(data.map { d => f(d) }).get.getQuantiles(quantiles)
}
}
| ron8hu/spark | core/src/main/scala/org/apache/spark/status/AppStatusStore.scala | Scala | apache-2.0 | 13,336 |
package org.scalaide.core.internal.text
import org.eclipse.jface.text.IDocument
import org.eclipse.jface.text.IRegion
import org.eclipse.jface.text.TextUtilities
import org.scalaide.core.text.Document
import org.scalaide.core.text.InternalDocument
class TextDocument(private val doc: IDocument) extends Document with InternalDocument {
override def apply(i: Int): Char =
doc.getChar(i)
override def length: Int =
doc.getLength()
override def text: String =
doc.get()
override def textRange(start: Int, end: Int): String =
doc.get(start, end-start)
override def textRangeOpt(start: Int, end: Int): Option[String] =
if (isValidRange(start, end))
Some(doc.get(start, end-start))
else
None
override def lines: Seq[IRegion] =
0 until lineCount map lineInformation
override def lineCount: Int =
doc.getNumberOfLines()
override def lineInformation(lineNumber: Int): IRegion =
doc.getLineInformation(lineNumber)
override def lineInformationOfOffset(offset: Int): IRegion =
doc.getLineInformationOfOffset(offset)
override def replace(start: Int, end: Int, text: String): Unit =
doc.replace(start, end-start, text)
override def head: Char =
doc.getChar(0)
override def headOpt: Option[Char] =
if (!isEmpty)
Some(doc.getChar(0))
else
None
override def tail: String =
doc.get(1, length-1)
override def tailOpt: Option[String] =
if (!isEmpty)
Some(doc.get(1, length-1))
else
None
override def init: String =
doc.get(0, length-1)
override def initOpt: Option[String] =
if (!isEmpty)
Some(doc.get(0, length-1))
else
None
override def last: Char =
doc.getChar(length-1)
override def lastOpt: Option[Char] =
if (!isEmpty)
Some(doc.getChar(length-1))
else
None
override def defaultLineDelimiter: String =
TextUtilities.getDefaultLineDelimiter(doc)
override def toString(): String =
text
private def isEmpty: Boolean =
length == 0
private def isValidRange(start: Int, end: Int): Boolean =
start >= 0 && start <= end && end <= length
}
| dragos/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/text/TextDocument.scala | Scala | bsd-3-clause | 2,152 |
package gmodlab
import reflect.{BeanProperty}, java.util.concurrent.atomic.{AtomicBoolean}
//serial port/socket operations
class Port extends ul.GetTextable {
@BeanProperty var serial:ul.commx.SerialPJC = null
@BeanProperty var socket:java.net.Socket = null
val _isOpened = new AtomicBoolean(false)
val _isSerial = new AtomicBoolean(true)
def isOpened = _isOpened.get
def isOpened_=(state:Boolean) = { _isOpened.set(state) }
def isSerial = _isSerial.get
def isSerial_=(state:Boolean) = { _isSerial.set(state) }
//get opened port name
def name:String = synchronized {
if (isOpened){
if (isSerial) serial.name else (socket.getInetAddress.toString + ":" + socket.getPort)
} else tr("new")
}
//get number of available received bytes
def available:Int = synchronized {
if (!isOpened) 0
else if (isSerial) serial.available
else socket.getInputStream.available
}
//write bytes
def write(buf:Array[Byte]) = synchronized {
if (isOpened){
if (isSerial) serial.write(buf)
else socket.getOutputStream.write(buf.toArray)
}
}
//read received bytes
def read:Array[Byte] = {
if (available == 0) Nil.toArray
else if (isSerial) serial.read(0).toArray
else {
val a = new Array[Byte](available)
socket.getInputStream.read(a)
a
}
}
//close port
def close = synchronized {
if (isOpened){
isOpened = false
if (isSerial) serial.close else socket.close
}
}
//get current baud rate
def baud = synchronized { if (isOpened && isSerial) serial.baud else 0 }
def bits = synchronized { if (isOpened && isSerial) serial.bits else 0 }
def stops = synchronized { if (isOpened && isSerial) serial.stops else 0 }
def parity = synchronized { if (isOpened && isSerial) serial.parity else "" }
def ip = synchronized { if (isOpened && !isSerial) socket.getInetAddress.getCanonicalHostName else "" }
def port = synchronized { if (isOpened && !isSerial) socket.getPort else "" }
}
| tardigrade888/gmodlab | repo/src/gmodlab/Port.scala | Scala | mit | 2,195 |
package it.polimi.genomics.GMQLServer
import it.polimi.genomics.core.DataStructures.Builtin.MetaAggregateFactory
import it.polimi.genomics.core.DataStructures.MetaAggregate.{MetaAggregateFunction}
object DefaultMetaAggregateFactory extends MetaAggregateFactory{
val castExc = "GMQL Casting Exception – Could not parse"
def get(name : String, output_name : Option[String]) = {
name.toUpperCase() match {
case "COUNTSAMP" => getCount("", output_name)
case _ => throw new Exception("No aggregate function with the given name (" + name + ") found.")
}
}
def get(name : String, input_name : String, output_name : Option[String]) = {
name.toUpperCase() match {
case "SUM" => getSum(input_name,output_name)
case "MIN" => getMin(input_name,output_name)
case "MAX" => getMax(input_name,output_name)
case "AVG" => getAvg(input_name,output_name)
case "BAG" => getBAG(input_name,output_name)
case "BAGD" => getBAGD(input_name,output_name)
case "STD" => getSTD(input_name,output_name)
case "MEDIAN" => getMEDIAN(input_name,output_name)
case _ => throw new Exception("No aggregate function with the given name (" + name + ") found.")
}
}
/*
override def get(name: String, input : String, output: String): MetaAggregateFunction = {
new MetaAggregateFunction {
//fun should take the list of values of attribute inputAttributeNames[0]
override val fun: (Array[Traversable[String]]) => String = name.toLowerCase match {
case "sum" => get_sum()
//case "avg" =>
}
//notice that regardless the fact it's a list, inputAttributeNames will always have exactly one element
override val inputAttributeNames: List[String] = List(input)
override val newAttributeName: String = output
}
//this should be the sum aggregate function,
// not-tested!!!
def get_sum() = {
(x:Array[Traversable[String]]) =>
//Only consider the elem at 0: Array will only have one element.
x(0).toList.map(_.toDouble).sum.toString
}
}*/
private def getSTD(input_name:String,new_name:Option[String]) = new MetaAggregateFunction {
override val newAttributeName = if(new_name.isDefined) new_name.get else "STD"
override val inputName: String = input_name
override val fun: (Array[Traversable[String]]) => String = {
(line) =>{
val doubleVals = line.head.flatMap((value) => {
val v1 = castDoubleOrString(value);
if (v1.isInstanceOf[Double]) Some(v1.asInstanceOf[Double]) else None
}).toArray
if (!doubleVals.isEmpty) {
val std = stdev(doubleVals)
std.toString
}
else castExc
}
}
}
def avg(data: Array[Double]): Double = {
if (data.length < 1)
return Double.NaN
data.sum / data.length
}
def stdev(data: Array[Double]): Double = {
if (data.length < 2)
return Double.NaN
// average
val mean: Double = avg(data)
val sum = data.foldLeft(0.0)((sum, tail) => {
val dif = tail - mean
sum + dif * dif
})
Math.sqrt(sum / (data.length - 1))
}
private def getMEDIAN(input_name:String,new_name:Option[String]) = new MetaAggregateFunction {
override val newAttributeName: String = if(new_name.isDefined) new_name.get else "MEDIAN"
override val inputName: String = input_name
override val fun: (Array[Traversable[String]]) => String = {
(line) => {
val values: List[Double] = line.head.flatMap{(value) =>
val v1=castDoubleOrString(value);
if (v1.isInstanceOf[Double]) Some(v1.asInstanceOf[Double]) else None
}.toList.sorted
if (!values.isEmpty) {
if (values.length % 2 == 0) {
val right = values.length / 2
val left = (values.length / 2) - 1
val res = (values(left) + values(left)) / 2
res.toString
}
else {
val res = values(values.length / 2)
res.toString
}
}
else castExc
}
}
}
private def getSum(input_name:String,new_name:Option[String]) = new MetaAggregateFunction {
override val newAttributeName = if(new_name.isDefined) new_name.get else "SUM"
override val inputName: String = input_name
override val fun: (Array[Traversable[String]]) => String = {
(line) =>{
val ss = line.head.flatMap{(value) =>
val v1=castDoubleOrString(value);
if (v1.isInstanceOf[Double]) Some(v1.asInstanceOf[Double]) else None}
if (!ss.isEmpty) {
val dd = ss.reduce(_ + _);
dd.toString
}
else castExc
}
}
}
private def getCount(input_name:String,new_name:Option[String]) = new MetaAggregateFunction {
override val newAttributeName = if(new_name.isDefined) new_name.get else "COUNTSAMP"
override val inputName: String = input_name
override val fun: (Array[Traversable[String]]) => String = {
(line) =>{line.head.size.toString}
}
}
private def getMin(input_name:String, new_name:Option[String]) = new MetaAggregateFunction {
override val newAttributeName = if(new_name.isDefined) new_name.get else "MIN"
override val inputName: String = input_name
override val fun: (Array[Traversable[String]]) => String = {
(line) =>
val lines = line.head.flatMap{(value) =>
val v1=castDoubleOrString(value);
if (v1.isInstanceOf[Double]) Some(v1.asInstanceOf[Double]) else None}
if(!lines.isEmpty)
lines.reduce( (x,y) =>Math.min(x,y)).toString
else castExc
}
}
private def getMax(input_name:String, new_name:Option[String]) = new MetaAggregateFunction {
override val newAttributeName = if(new_name.isDefined) new_name.get else "MAX"
override val inputName: String = input_name
override val fun: (Array[Traversable[String]]) => String = {
(line) =>
val lines = line.head.flatMap{(value) =>
val v1=castDoubleOrString(value);
if (v1.isInstanceOf[Double]) Some(v1.asInstanceOf[Double]) else None}
if(!lines.isEmpty)
lines.reduce( (x,y) =>Math.max(x,y)).toString
else castExc
}
}
private def getAvg(input_name:String, new_name:Option[String]) = new MetaAggregateFunction {
override val newAttributeName = if(new_name.isDefined) new_name.get else "AVG"
override val inputName: String = input_name
override val fun: (Array[Traversable[String]]) => String = {
(line) =>
val lines = line.head.flatMap{(value) =>
val v1=castDoubleOrString(value);
if (v1.isInstanceOf[Double]) Some(v1.asInstanceOf[Double]) else None}
if(!lines.isEmpty)
(lines.reduce(_ + _) / lines.size).toString
else
castExc
}
}
private def getBAG(input_name:String, new_name:Option[String]) = new MetaAggregateFunction {
override val newAttributeName = if(new_name.isDefined) new_name.get else "Bag"
override val inputName: String = input_name
override val fun: (Array[Traversable[String]]) => String = {
(list) => {
if (list.nonEmpty) {
list.head.toArray.sorted.map((value) => {
if (!value.isEmpty) value else "."
}).mkString(",")
}
else "."
}
}
}
private def getBAGD(input_name:String, new_name:Option[String]) = new MetaAggregateFunction {
override val newAttributeName = if(new_name.isDefined) new_name.get else "BagD"
override val inputName: String = input_name
override val fun: (Array[Traversable[String]]) => String = {
(list) =>{
if (list.nonEmpty)
list.head.toArray.distinct.sorted.map((value)=> {
if (!value.isEmpty) value else "."
}).mkString(",")
else "."
}
}
}
def castDoubleOrString(value : Any) : Any = {
try{
value.toString.toDouble
} catch {
case e : Throwable => value.toString
}
}
}
| DEIB-GECO/GMQL | GMQL-Server/src/main/scala/it/polimi/genomics/GMQLServer/DefaultMetaAggregateFactory.scala | Scala | apache-2.0 | 8,039 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package collection
package mutable
import generic._
/** The base trait of all builders.
* A builder lets one construct a collection incrementally, by adding
* elements to the builder with `+=` and then converting to the required
* collection type with `result`.
*
* One cannot assume that a single `Builder` can build more than one
* instance of the desired collection. Particular subclasses may allow
* such behavior. Otherwise, `result` should be treated as a terminal
* operation: after it is called, no further methods should be called on
* the builder. Extend the [[collection.mutable.ReusableBuilder]] trait
* instead of `Builder` for builders that may be reused to build multiple
* instances.
*
* @tparam Elem the type of elements that get added to the builder.
* @tparam To the type of collection that it produced.
*
* @since 2.8
*/
trait Builder[-Elem, +To] extends Growable[Elem] {
/** Adds a single element to the builder.
* @param elem the element to be added.
* @return the builder itself.
*/
def +=(elem: Elem): this.type
/** Clears the contents of this builder.
* After execution of this method the builder will contain no elements.
*/
def clear()
/** Produces a collection from the added elements. This is a terminal operation:
* the builder's contents are undefined after this operation, and no further
* methods should be called.
*
* @return a collection containing the elements added to this builder.
*/
def result(): To
/** Gives a hint how many elements are expected to be added
* when the next `result` is called. Some builder classes
* will optimize their representation based on the hint. However,
* builder implementations are still required to work correctly even if the hint is
* wrong, i.e. a different number of elements is added.
*
* @param size the hint how many elements will be added.
*/
def sizeHint(size: Int) {}
/** Gives a hint that one expects the `result` of this builder
* to have the same size as the given collection, plus some delta. This will
* provide a hint only if the collection is known to have a cheap
* `size` method, which is determined by calling `sizeHint`.
*
* Some builder classes will optimize their representation based on the hint. However,
* builder implementations are still required to work correctly even if the hint is
* wrong, i.e. a different number of elements is added.
*
* @param coll the collection which serves as a hint for the result's size.
*/
def sizeHint(coll: TraversableLike[_, _]) {
coll.sizeHintIfCheap match {
case -1 =>
case n => sizeHint(n)
}
}
/** Gives a hint that one expects the `result` of this builder
* to have the same size as the given collection, plus some delta. This will
* provide a hint only if the collection is known to have a cheap
* `size` method. Currently this is assumed to be the case if and only if
* the collection is of type `IndexedSeqLike`.
* Some builder classes
* will optimize their representation based on the hint. However,
* builder implementations are still required to work correctly even if the hint is
* wrong, i.e. a different number of elements is added.
*
* @param coll the collection which serves as a hint for the result's size.
* @param delta a correction to add to the `coll.size` to produce the size hint.
*/
def sizeHint(coll: TraversableLike[_, _], delta: Int) {
coll.sizeHintIfCheap match {
case -1 =>
case n => sizeHint(n + delta)
}
}
/** Gives a hint how many elements are expected to be added
* when the next `result` is called, together with an upper bound
* given by the size of some other collection. Some builder classes
* will optimize their representation based on the hint. However,
* builder implementations are still required to work correctly even if the hint is
* wrong, i.e. a different number of elements is added.
*
* @param size the hint how many elements will be added.
* @param boundingColl the bounding collection. If it is
* an IndexedSeqLike, then sizes larger
* than collection's size are reduced.
*/
def sizeHintBounded(size: Int, boundingColl: TraversableLike[_, _]) {
boundingColl.sizeHintIfCheap match {
case -1 =>
case n => sizeHint(size min n)
}
}
/** Creates a new builder by applying a transformation function to
* the results of this builder.
* @param f the transformation function.
* @tparam NewTo the type of collection returned by `f`.
* @return a new builder which is the same as the current builder except
* that a transformation function is applied to this builder's result.
*
* @note The original builder should no longer be used after `mapResult` is called.
*/
def mapResult[NewTo](f: To => NewTo): Builder[Elem, NewTo] =
new Builder[Elem, NewTo] with Proxy {
val self = Builder.this
def +=(x: Elem): this.type = { self += x; this }
def clear() = self.clear()
override def ++=(xs: TraversableOnce[Elem]): this.type = { self ++= xs; this }
override def sizeHint(size: Int) = self.sizeHint(size)
override def sizeHintBounded(size: Int, boundColl: TraversableLike[_, _]) = self.sizeHintBounded(size, boundColl)
def result: NewTo = f(self.result())
}
}
| felixmulder/scala | src/library/scala/collection/mutable/Builder.scala | Scala | bsd-3-clause | 6,040 |
/*
* Copyright 2014–2020 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.contrib.argonaut
import slamdata.Predef._
import scala.Float
import argonaut.DecodeJson
object JsonCodecs {
implicit def decodeDouble: DecodeJson[Double] =
DecodeJson.optionDecoder(_.number.flatMap(_.toDouble), "Double")
implicit def decodeFloat: DecodeJson[Float] =
DecodeJson.optionDecoder(_.number.flatMap(_.toFloat), "Float")
implicit def decodeInt: DecodeJson[Int] =
DecodeJson.optionDecoder(_.number.flatMap(_.toInt), "Int")
implicit def decodeLong: DecodeJson[Long] =
DecodeJson.optionDecoder(_.number.flatMap(_.toLong), "Long")
implicit def decodeShort: DecodeJson[Short] =
DecodeJson.optionDecoder(_.number.flatMap(_.toShort), "Short")
implicit def decodeBigInt: DecodeJson[BigInt] =
DecodeJson.optionDecoder(_.number.flatMap(_.toBigInt), "BigInt")
implicit def decodeBigDecimal: DecodeJson[BigDecimal] =
DecodeJson.optionDecoder(_.number.map(_.toBigDecimal), "BigDecimal")
implicit def decodeBoolean: DecodeJson[Boolean] =
DecodeJson.optionDecoder(_.bool, "Boolean")
}
| slamdata/quasar | foundation/src/main/scala/quasar/contrib/argonaut/JsonCodecs.scala | Scala | apache-2.0 | 1,659 |
/*
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*
*/
package org.apache.samza.util
import java.nio.channels.ClosedByInterruptException
import org.apache.samza.util.ExponentialSleepStrategy.RetryLoop
/**
* Encapsulates the pattern of retrying an operation until it succeeds.
* Before every retry there is a delay, which starts short and gets exponentially
* longer on each retry, up to a configurable maximum. There is no limit to the
* number of retries.
*
* @param backOffMultiplier The factor by which the delay increases on each retry.
* @param initialDelayMs Time in milliseconds to wait after the first attempt failed.
* @param maximumDelayMs Cap up to which we will increase the delay.
*/
class ExponentialSleepStrategy(
backOffMultiplier: Double = 2.0,
initialDelayMs: Long = 100,
maximumDelayMs: Long = 10000) {
require(backOffMultiplier > 1.0, "backOffMultiplier must be greater than 1")
require(initialDelayMs > 0, "initialDelayMs must be positive")
require(maximumDelayMs >= initialDelayMs, "maximumDelayMs must be >= initialDelayMs")
/**
* Given the delay before the last retry, calculate what the delay before the
* next retry should be.
*/
def getNextDelay(previousDelay: Long): Long = {
val nextDelay = (previousDelay * backOffMultiplier).asInstanceOf[Long]
math.min(math.max(initialDelayMs, nextDelay), maximumDelayMs)
}
/** Can be overridden by subclasses to customize looping behavior. */
def startLoop: RetryLoop = new RetryLoopState
/**
* Starts a retryable operation with the delay properties that were configured
* when the object was created. Every call to run is independent, so the same
* ExponentialSleepStrategy object can be used for several different retry loops.
*
* loopOperation is called on every attempt, and given as parameter a RetryLoop
* object. By default it is assumed that the operation failed. If the operation
* succeeded, you must call <code>done</code> on the RetryLoop object to indicate
* success. This method returns the return value of the successful loopOperation.
*
* If an exception is thrown during the execution of loopOperation, the onException
* handler is called. You can choose to re-throw the exception (so that it aborts
* the run loop and bubbles up), or ignore it (the operation will be retried),
* or call <code>done</code> (give up, don't retry).
*
* @param loopOperation The operation that should be attempted and may fail.
* @param onException Handler function that determines what to do with an exception.
* @return If loopOperation succeeded, an option containing the return value of
* the last invocation. If done was called in the exception handler or the
* thread was interrupted, None.
*/
def run[A](loopOperation: RetryLoop => A, onException: (Exception, RetryLoop) => Unit): Option[A] = {
val loop = startLoop
while (!loop.isDone && !Thread.currentThread.isInterrupted) {
try {
val result = loopOperation(loop)
if (loop.isDone) return Some(result)
} catch {
case e: InterruptedException => throw e
case e: ClosedByInterruptException => throw e
case e: OutOfMemoryError => throw e
case e: StackOverflowError => throw e
case e: Exception => onException(e, loop)
}
if (!loop.isDone && !Thread.currentThread.isInterrupted) loop.sleep
}
None
}
private[util] class RetryLoopState extends RetryLoop {
var previousDelay = 0L
var isDone = false
var sleepCount = 0
def sleep {
sleepCount += 1
val nextDelay = getNextDelay(previousDelay)
previousDelay = nextDelay
Thread.sleep(nextDelay)
}
def reset {
previousDelay = 0
isDone = false
}
def done {
isDone = true
}
}
}
object ExponentialSleepStrategy {
/**
* State of the retry loop, passed to every invocation of the loopOperation
* or the exception handler.
*/
trait RetryLoop {
/** Let the current thread sleep for the backoff time (called by run method). */
def sleep
/** Tell the retry loop to revert to initialDelayMs for the next retry. */
def reset
/** Tell the retry loop to stop trying (success or giving up). */
def done
/** Check whether <code>done</code> was called (used by the run method). */
def isDone: Boolean
/** Returns the number of times that the retry loop has called <code>sleep</code>. */
def sleepCount: Int
}
/** For tests using ExponentialSleepStrategy.Mock */
class CallLimitReached extends Exception
/**
* For writing tests of retryable code. Doesn't actually sleep, so that tests
* are quick to run.
*
* @param maxCalls The maximum number of retries to allow before throwing CallLimitReached.
*/
class Mock(maxCalls: Int) extends ExponentialSleepStrategy {
override def startLoop = new MockRetryLoop
class MockRetryLoop extends RetryLoop {
var isDone = false
var sleepCount = 0
def sleep { sleepCount += 1; if (sleepCount > maxCalls) throw new CallLimitReached }
def reset { isDone = false }
def done { isDone = true }
}
}
}
| TiVo/samza | samza-core/src/main/scala/org/apache/samza/util/ExponentialSleepStrategy.scala | Scala | apache-2.0 | 6,027 |
package speedith.mixr.isabelle
import org.junit.Test
import java.io.{LineNumberReader, InputStreamReader}
import java.nio.charset.Charset
import isabelle.Term.Term
import mixr.isabelle.pure.lib.TermYXML
import org.junit.Assert.assertTrue
import speedith.core.lang.{SpiderDiagram, CompoundSpiderDiagram}
import TranslationsTest._
class TranslationsTest {
@Test
def termToSpiderDiagram_must_return_a_compound_spider_diagram_with_spiders(): Unit = {
val spiderDiagram = translateSpiderDiagramFromIsaTerm(YXML_FILE_SPIDER_DIAGRAM_AB_WITH_SPIDERS)
assertTrue(spiderDiagram.isInstanceOf[CompoundSpiderDiagram])
}
}
object TranslationsTest {
/**
* (∃s1 s2. distinct[s1, s2] ∧ s1 ∈ A ∩ B ∧ s2 ∈ (A - B) ∪ (B - A)) ⟶ (∃t1 t2. distinct[t1, t2] ∧ t1 ∈ A ∧ t2 ∈ B) ∧ (A ∩ B) ≠ {}
*/
val YXML_FILE_SPIDER_DIAGRAM_AB_WITH_SPIDERS = "/speedith/mixr/isabelle/UnescapedYXML_spider_diagram_AB_with_spiders"
/**
* (∃s1 s2. distinct[s1, s2] ∧ s1 ∈ A ∩ B ∧ s2 ∈ (A - B) ∪ (B - A) ∧ A ⊆ {s1, s2}) ⟶ (∃t1 t2. distinct[t1, t2] ∧ t1 ∈ A ∧ t2 ∈ B) ∧ (A ∩ B) ≠ {}
*/
val YXML_FILE_SPIDER_DIAGRAM_AB_WITH_SPIDERS_AND_SHADED_ZONES = "/speedith/mixr/isabelle/UnescapedYXML_spider_diagram_AB_with_spiders_and_shaded_zones"
def translateSpiderDiagramFromIsaTerm(yxmlFilePath: String): SpiderDiagram = {
val isabelleTerm = parseYXMLFile(yxmlFilePath)
Translations.termToSpiderDiagram(isabelleTerm)
}
private def readFirstLine(file: String): String = {
val inputStream = classOf[TranslationsTest].getResourceAsStream(file)
val reader = new InputStreamReader(inputStream, Charset.forName("US-ASCII"))
new LineNumberReader(reader).readLine()
}
def parseYXMLFile(file: String): Term = {
TermYXML.parseYXML(readFirstLine(file))
}
} | urbas/mixr | libs/SpeedithIsabelleTranslator/src/test/scala/speedith/mixr/isabelle/TranslationsTest.scala | Scala | mit | 1,839 |
package org.photon.realm
import org.photon.common.{network => base}
import java.nio.charset.Charset
import com.twitter.util.Future
import org.photon.protocol.photon.UserInfos
final case class GrantAccessException(reason: String = "", underlying: Throwable = null)
extends RuntimeException(reason, underlying)
final case class AuthException(reason: String = "", underlying: Throwable = null)
extends RuntimeException(reason, underlying)
trait NetworkService extends base.NetworkService {
def grantUser(user: UserInfos, ticket: String): Future[Unit]
def auth(ticket: String): Future[UserInfos]
}
trait NetworkSession extends base.NetworkSession {
var userOption: Option[UserInfos]
def user = userOption.get
var playerOption: Option[Player]
def player = playerOption.get
}
trait NetworkComponent {
self: ConfigurationComponent =>
val networkConfig = config.getConfig("photon.network.realm")
val networkPort = networkConfig.getInt("port")
val networkCharset = Charset.forName(networkConfig.getString("charset"))
val networkService: NetworkService
}
| Emudofus/Photon | realm/main/src/org/photon/realm/NetworkComponent.scala | Scala | mit | 1,067 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2016 Ian McIntosh
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package f3
package web
import cats.implicits._
import diode._
import diode.ActionResult.ModelUpdate
import diode.react.ReactConnector
object AppCircuit extends Circuit[AppModel] with ReactConnector[AppModel] {
def initialModel = AppModel(List(), StandingsContent, none[Selection])
override val actionHandler: HandlerFunction = { (model, action) =>
action match {
case LoadLeagues(leagues) => {
model.initCache(leagues)
ModelUpdate(model.copy(leagues = leagues, selection = none[Selection])).some
}
case SelectLeague(leagueIx) => {
ModelUpdate(
model.copy(
selection = {
leagueIx.flatMap { ix =>
model.leagues.lift(ix).map { league =>
Selection(
ix,
league.currentSeason.year,
league.currentSeason.currentWeek
)
}
}
}
)
).some
}
case SelectSeason(season) => {
val week =
model.league.flatMap(_.seasons.find(_.year == season).map(_.currentWeek)).getOrElse(1)
ModelUpdate(
model.copy(selection = model.selection.map(_.copy(season = season, week = week)))).some
}
case SelectWeek(week) => {
ModelUpdate(model.copy(selection = model.selection.map(_.copy(week = week)))).some
}
case SelectContent(contentType) => ModelUpdate(model.copy(activeContent = contentType)).some
case _ => none[ModelUpdate[AppModel]]
}
}
}
| cranst0n/f3 | modules/web/src/main/scala/f3/web/circuit.scala | Scala | mit | 2,731 |
package com.airbnb.scheduler.state
import com.airbnb.scheduler.jobs._
/**
* @author Florian Leibert ([email protected])
*/
trait PersistenceStore {
/**
* Persists a job with the underlying persistence store
* @param job
* @return
*/
def persistJob(job: BaseJob): Boolean
/**
* Saves a taskId in the state abstraction.
* @param name the name of the taks to persist.
* @param data the data to persist into the task.
* @return true if the taskId was saved, false if the taskId couldn't be saved.
*/
def persistTask(name: String, data: Array[Byte]): Boolean
/**
* Removes a task from the ZooKeeperState abstraction.
* @param taskId the taskId to remove.
* @return true if the job was saved, false if the job couldn't be saved.
*/
def removeTask(taskId: String): Boolean
/**
* Removes a job from the ZooKeeperState abstraction.
* @param job the job to remove.
* @return true if the job was saved, false if the job couldn't be saved.
*/
def removeJob(job: BaseJob)
/**
* Loads a job from the underlying store
* @param name
* @return
*/
def getJob(name: String): BaseJob
/**
* Purges all tasks from the underlying store
*/
def purgeTasks()
/**
* Returns a list of all task names stored in the underlying store
* @param filter a filter that's matched on the taskId.
* @return
*/
def getTaskIds(filter: Option[String]): List[String]
/**
* Gets all tasks from the underlying store
* @return
*/
def getTasks: Map[String, Array[Byte]]
/**
* Returns all jobs from the underlying store
* @return
*/
def getJobs: Iterator[BaseJob]
}
| snegi/chronos | src/main/scala/com/airbnb/scheduler/state/PersistenceStore.scala | Scala | apache-2.0 | 1,663 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package main.scala
import scala.collection.mutable.{ListBuffer, Queue}
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.hive.HiveContext
case class Person(name: String, age: Int)
object SparkSqlExample {
def main(args: Array[String]) {
val conf = sys.env.get("SPARK_AUDIT_MASTER") match {
case Some(master) => new SparkConf().setAppName("Simple Sql App").setMaster(master)
case None => new SparkConf().setAppName("Simple Sql App")
}
val sc = new SparkContext(conf)
val hiveContext = new HiveContext(sc)
import hiveContext._
sql("DROP TABLE IF EXISTS src")
sql("CREATE TABLE IF NOT EXISTS src (key INT, value STRING)")
sql("LOAD DATA LOCAL INPATH 'data.txt' INTO TABLE src")
val results = sql("FROM src SELECT key, value WHERE key >= 0 AND KEY < 5").collect()
results.foreach(println)
def test(f: => Boolean, failureMsg: String) = {
if (!f) {
println(failureMsg)
System.exit(-1)
}
}
test(results.size == 5, "Unexpected number of selected elements: " + results)
println("Test succeeded")
sc.stop()
}
}
| hengyicai/OnlineAggregationUCAS | dev/audit-release/sbt_app_hive/src/main/scala/HiveApp.scala | Scala | apache-2.0 | 2,004 |
/*
* Licensed to Metamarkets Group Inc. (Metamarkets) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. Metamarkets licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.metamx.tranquility.test
import _root_.io.druid.data.input.impl.TimestampSpec
import _root_.io.druid.granularity.QueryGranularities
import _root_.io.druid.query.aggregation.LongSumAggregatorFactory
import _root_.scala.collection.JavaConverters._
import _root_.scala.reflect.runtime.universe.typeTag
import com.github.nscala_time.time.Imports._
import com.google.common.base.Charsets
import com.google.common.io.ByteStreams
import com.metamx.common.parsers.ParseException
import com.metamx.common.scala.timekeeper.TestingTimekeeper
import com.metamx.common.scala.timekeeper.Timekeeper
import com.metamx.common.scala.Jackson
import com.metamx.common.scala.Logging
import com.metamx.common.Granularity
import com.metamx.common.ISE
import com.metamx.tranquility.beam.ClusteredBeamTuning
import com.metamx.tranquility.beam.RoundRobinBeam
import com.metamx.tranquility.config.DataSourceConfig
import com.metamx.tranquility.config.PropertiesBasedConfig
import com.metamx.tranquility.config.TranquilityConfig
import com.metamx.tranquility.druid._
import com.metamx.tranquility.test.DirectDruidTest._
import com.metamx.tranquility.test.common._
import com.metamx.tranquility.tranquilizer.MessageDroppedException
import com.metamx.tranquility.tranquilizer.Tranquilizer
import com.metamx.tranquility.typeclass.DefaultJsonWriter
import com.metamx.tranquility.typeclass.JavaObjectWriter
import com.metamx.tranquility.typeclass.Timestamper
import com.twitter.util._
import java.io.ByteArrayInputStream
import java.nio.ByteBuffer
import java.{util => ju}
import javax.ws.rs.core.MediaType
import org.apache.curator.framework.CuratorFramework
import org.joda.time.DateTime
import org.scalatest.FunSuite
object DirectDruidTest
{
val TimeColumn = "ts"
val TimeFormat = "posix"
def generateEvents(now: DateTime): Seq[SimpleEvent] = {
// Need to use somewhat nowish timestamps for the timekeeper, because this is an integration test
// against unmodified Druid indexing, and it will use real wall clock time to make its decisions.
Seq(
// This event should be sent
SimpleEvent(now, "hey", 2, 37.7833, -122.4167),
// This event is intended to be dropped
SimpleEvent(now - 1.year, "hey", 4, 37.7833, -122.4167),
// This event should be sent
SimpleEvent(now + 1.minute, "what", 3, 37.7833, 122.4167)
)
}
def newBuilder(curator: CuratorFramework, timekeeper: Timekeeper): DruidBeams.Builder[SimpleEvent, SimpleEvent] = {
val dataSource = "xxx"
val tuning = ClusteredBeamTuning(Granularity.HOUR, 0.minutes, 10.minutes, 1, 1, 1, 1)
val rollup = DruidRollup(
SpecificDruidDimensions(
Vector("foo"),
Vector(MultipleFieldDruidSpatialDimension("coord.geo", Seq("lat", "lon")))
),
IndexedSeq(new LongSumAggregatorFactory("barr", "bar")),
QueryGranularities.MINUTE,
true
)
val druidEnvironment = new DruidEnvironment(
"druid/tranquility/indexer" /* Slashes should be converted to colons */ ,
"druid:tranquility:firehose:%s"
)
val druidLocation = new DruidLocation(druidEnvironment, dataSource)
DruidBeams.builder[SimpleEvent]()
.curator(curator)
.discoveryPath("/disco-fever")
.location(druidLocation)
.rollup(rollup)
.tuning(tuning)
.timekeeper(timekeeper)
.timestampSpec(new TimestampSpec(TimeColumn, TimeFormat, null))
.beamMergeFn(beams => new RoundRobinBeam(beams.toIndexedSeq))
}
def readDataSourceConfig(
zkConnect: String,
rsrc: String = "direct-druid-test.yaml"
): DataSourceConfig[PropertiesBasedConfig] =
{
val configString = new String(
ByteStreams.toByteArray(getClass.getClassLoader.getResourceAsStream(rsrc)),
Charsets.UTF_8
).replaceAll("@ZKPLACEHOLDER@", zkConnect)
val config = TranquilityConfig.read(new ByteArrayInputStream(configString.getBytes(Charsets.UTF_8)))
config.getDataSource("xxx")
}
}
class DirectDruidTest
extends FunSuite with DruidIntegrationSuite with CuratorRequiringSuite with Logging
{
JulUtils.routeJulThroughSlf4j()
test("Druid standalone") {
withDruidStack {
(curator, broker, coordinator, overlord) =>
val timekeeper = new TestingTimekeeper
val indexing = newBuilder(curator, timekeeper).buildTranquilizer()
indexing.start()
try {
timekeeper.now = new DateTime().hourOfDay().roundFloorCopy()
val eventsSent = Future.collect(
generateEvents(timekeeper.now) map { event =>
indexing.send(event) transform {
case Return(()) => Future.value(true)
case Throw(e: MessageDroppedException) => Future.value(false)
case Throw(e) => Future.exception(e)
}
}
)
assert(Await.result(eventsSent) === Seq(true, false, true))
runTestQueriesAndAssertions(broker, timekeeper)
}
catch {
case NonFatal(e) =>
throw new ISE(e, "Failed test")
}
finally {
indexing.stop()
}
}
}
test("Druid standalone - Custom ObjectWriter") {
withDruidStack {
(curator, broker, coordinator, overlord) =>
val timekeeper = new TestingTimekeeper
val beam = newBuilder(curator, timekeeper).objectWriter(
new JavaObjectWriter[SimpleEvent]
{
override def asBytes(obj: SimpleEvent) = throw new UnsupportedOperationException
override def batchAsBytes(objects: ju.Iterator[SimpleEvent]) = {
val strings = objects.asScala.map(o => Jackson.generate(o.toMap))
val packed = "[%s]" format strings.mkString(", ")
packed.getBytes
}
/**
* @return content type of the serialized form
*/
override def contentType: String = MediaType.APPLICATION_JSON
}
).buildBeam()
val indexing = Tranquilizer.create(beam)
indexing.start()
try {
timekeeper.now = new DateTime().hourOfDay().roundFloorCopy()
val eventsSent = Future.collect(
generateEvents(timekeeper.now) map { event =>
indexing.send(event) transform {
case Return(()) => Future.value(true)
case Throw(e: MessageDroppedException) => Future.value(false)
case Throw(e) => Future.exception(e)
}
}
)
assert(Await.result(eventsSent) === Seq(true, false, true))
runTestQueriesAndAssertions(broker, timekeeper)
}
catch {
case NonFatal(e) =>
throw new ISE(e, "Failed test")
}
finally {
indexing.stop()
}
}
}
test("Druid standalone - From config file - Smile mapper") {
withDruidStack {
(curator, broker, coordinator, overlord) =>
val timekeeper = new TestingTimekeeper
val config = readDataSourceConfig(
curator.getZookeeperClient.getCurrentConnectionString,
"direct-druid-test-smile.yaml"
)
val indexing = DruidBeams
.fromConfig(config)
.timekeeper(timekeeper)
.buildTranquilizer(config.tranquilizerBuilder())
indexing.start()
try {
timekeeper.now = new DateTime().hourOfDay().roundFloorCopy()
val eventsSent = Future.collect(
generateEvents(timekeeper.now) map { event =>
indexing.send(event.toMap.asJava.asInstanceOf[ju.Map[String, AnyRef]]) transform {
case Return(()) => Future.value(true)
case Throw(e: MessageDroppedException) => Future.value(false)
case Throw(e) => Future.exception(e)
}
}
)
assert(Await.result(eventsSent) === Seq(true, false, true))
runTestQueriesAndAssertions(broker, timekeeper)
}
catch {
case NonFatal(e) =>
throw new ISE(e, "Failed test")
}
finally {
indexing.stop()
}
}
}
test("Druid standalone - From config file - Custom type") {
withDruidStack {
(curator, broker, coordinator, overlord) =>
val timekeeper = new TestingTimekeeper
val config = readDataSourceConfig(curator.getZookeeperClient.getCurrentConnectionString)
val indexing = DruidBeams
.fromConfig(config, implicitly[Timestamper[SimpleEvent]], new DefaultJsonWriter)
.timekeeper(timekeeper)
.buildTranquilizer(config.tranquilizerBuilder())
indexing.start()
try {
timekeeper.now = new DateTime().hourOfDay().roundFloorCopy()
val eventsSent = Future.collect(
generateEvents(timekeeper.now) map { event =>
indexing.send(event) transform {
case Return(()) => Future.value(true)
case Throw(e: MessageDroppedException) => Future.value(false)
case Throw(e) => Future.exception(e)
}
}
)
assert(Await.result(eventsSent) === Seq(true, false, true))
runTestQueriesAndAssertions(broker, timekeeper)
}
catch {
case NonFatal(e) =>
throw new ISE(e, "Failed test")
}
finally {
indexing.stop()
}
}
}
test("Druid standalone - overlord based task discovery") {
withDruidStack {
(curator, broker, coordinator, overlord) =>
val timekeeper = new TestingTimekeeper
val indexing = newBuilder(curator, timekeeper)
.druidBeamConfig(DruidBeamConfig(taskLocator = TaskLocator.Overlord))
.timekeeper(timekeeper)
.buildTranquilizer()
indexing.start()
try {
timekeeper.now = new DateTime().hourOfDay().roundFloorCopy()
val eventsSent = Future.collect(
generateEvents(timekeeper.now) map { event =>
indexing.send(event) transform {
case Return(()) => Future.value(true)
case Throw(e: MessageDroppedException) => Future.value(false)
case Throw(e) => Future.exception(e)
}
}
)
assert(Await.result(eventsSent) === Seq(true, false, true))
runTestQueriesAndAssertions(broker, timekeeper)
}
catch {
case NonFatal(e) =>
throw new ISE(e, "Failed test")
}
finally {
indexing.stop()
}
}
}
test("Druid standalone - From config file - Java Map type") {
withDruidStack {
(curator, broker, coordinator, overlord) =>
val timekeeper = new TestingTimekeeper
val config = readDataSourceConfig(curator.getZookeeperClient.getCurrentConnectionString)
val indexing = DruidBeams
.fromConfig(config, typeTag[java.util.Map[String, AnyRef]])
.timekeeper(timekeeper)
.buildTranquilizer(config.tranquilizerBuilder())
indexing.start()
try {
timekeeper.now = new DateTime().hourOfDay().roundFloorCopy()
val eventsSent = Future.collect(
generateEvents(timekeeper.now) map { event =>
indexing.send(event.toMap.asJava.asInstanceOf[java.util.Map[String, AnyRef]]) transform {
case Return(()) => Future.value(true)
case Throw(e: MessageDroppedException) => Future.value(false)
case Throw(e) => Future.exception(e)
}
}
)
assert(Await.result(eventsSent) === Seq(true, false, true))
runTestQueriesAndAssertions(broker, timekeeper)
}
catch {
case NonFatal(e) =>
throw new ISE(e, "Failed test")
}
finally {
indexing.stop()
}
}
}
test("Druid standalone - From config file - Scala Map type") {
withDruidStack {
(curator, broker, coordinator, overlord) =>
val timekeeper = new TestingTimekeeper
val config = readDataSourceConfig(curator.getZookeeperClient.getCurrentConnectionString)
val indexing = DruidBeams
.fromConfig(config, typeTag[Map[String, Any]])
.buildTranquilizer(config.tranquilizerBuilder())
indexing.start()
try {
timekeeper.now = new DateTime().hourOfDay().roundFloorCopy()
val eventsSent = Future.collect(
generateEvents(timekeeper.now) map { event =>
indexing.send(event.toMap) transform {
case Return(()) => Future.value(true)
case Throw(e: MessageDroppedException) => Future.value(false)
case Throw(e) => Future.exception(e)
}
}
)
assert(Await.result(eventsSent) === Seq(true, false, true))
runTestQueriesAndAssertions(broker, timekeeper)
}
catch {
case NonFatal(e) =>
throw new ISE(e, "Failed test")
}
finally {
indexing.stop()
}
}
}
test("Druid standalone - From config file - String type (CSV with ParseExceptions)") {
withDruidStack {
(curator, broker, coordinator, overlord) =>
val timekeeper = new TestingTimekeeper
val config = readDataSourceConfig(curator.getZookeeperClient.getCurrentConnectionString)
val indexing = DruidBeams
.fromConfig(config, typeTag[String])
.buildTranquilizer(config.tranquilizerBuilder())
indexing.start()
try {
timekeeper.now = new DateTime().hourOfDay().roundFloorCopy()
val eventsSent = Future.collect(
(generateEvents(timekeeper.now).map(_.toCsv) ++ Seq("invalid \\"csv")) map { csv =>
indexing.send(csv) transform {
case Return(()) => Future.value(1)
case Throw(e: MessageDroppedException) => Future.value(2)
case Throw(e: ParseException) => Future.value(3)
case Throw(e) => Future.exception(e)
}
}
)
assert(Await.result(eventsSent) === Seq(1, 2, 1, 3))
runTestQueriesAndAssertions(broker, timekeeper)
}
catch {
case NonFatal(e) =>
throw new ISE(e, "Failed test")
}
finally {
indexing.stop()
}
}
}
test("Druid standalone - From config file - Array[Byte] type (CSV with ParseExceptions)") {
withDruidStack {
(curator, broker, coordinator, overlord) =>
val timekeeper = new TestingTimekeeper
val config = readDataSourceConfig(curator.getZookeeperClient.getCurrentConnectionString)
val indexing = DruidBeams
.fromConfig(config, typeTag[Array[Byte]])
.buildTranquilizer(config.tranquilizerBuilder())
indexing.start()
try {
timekeeper.now = new DateTime().hourOfDay().roundFloorCopy()
val eventsSent = Future.collect(
(generateEvents(timekeeper.now).map(_.toCsv) ++ Seq("invalid \\"csv")) map { csv =>
indexing.send(csv.getBytes(Charsets.UTF_8)) transform {
case Return(()) => Future.value(1)
case Throw(e: MessageDroppedException) => Future.value(2)
case Throw(e: ParseException) => Future.value(3)
case Throw(e) => Future.exception(e)
}
}
)
assert(Await.result(eventsSent) === Seq(1, 2, 1, 3))
runTestQueriesAndAssertions(broker, timekeeper)
}
catch {
case NonFatal(e) =>
throw new ISE(e, "Failed test")
}
finally {
indexing.stop()
}
}
}
test("Druid standalone - From config file - ByteBuffer type (CSV with ParseExceptions)") {
withDruidStack {
(curator, broker, coordinator, overlord) =>
val timekeeper = new TestingTimekeeper
val config = readDataSourceConfig(curator.getZookeeperClient.getCurrentConnectionString)
val indexing = DruidBeams
.fromConfig(config, typeTag[ByteBuffer])
.buildTranquilizer(config.tranquilizerBuilder())
indexing.start()
try {
timekeeper.now = new DateTime().hourOfDay().roundFloorCopy()
val eventsSent = Future.collect(
(generateEvents(timekeeper.now).map(_.toCsv) ++ Seq("invalid \\"csv")) map { csv =>
indexing.send(ByteBuffer.wrap(csv.getBytes(Charsets.UTF_8))) transform {
case Return(()) => Future.value(1)
case Throw(e: MessageDroppedException) => Future.value(2)
case Throw(e: ParseException) => Future.value(3)
case Throw(e) => Future.exception(e)
}
}
)
assert(Await.result(eventsSent) === Seq(1, 2, 1, 3))
runTestQueriesAndAssertions(broker, timekeeper)
}
catch {
case NonFatal(e) =>
throw new ISE(e, "Failed test")
}
finally {
indexing.stop()
}
}
}
test("Druid standalone - From config file - Array[Byte] type (JSON with flattenSpec)") {
withDruidStack {
(curator, broker, coordinator, overlord) =>
val timekeeper = new TestingTimekeeper
val config = readDataSourceConfig(
curator.getZookeeperClient.getCurrentConnectionString,
"direct-druid-test-flattenSpec.yaml"
)
val indexing = DruidBeams
.fromConfig(config, typeTag[Array[Byte]])
.buildTranquilizer(config.tranquilizerBuilder())
indexing.start()
try {
timekeeper.now = new DateTime().hourOfDay().roundFloorCopy()
val eventsSent = Future.collect(
generateEvents(timekeeper.now).map(_.toNestedMap) map { m =>
indexing.send(Jackson.bytes(m)) transform {
case Return(()) => Future.value(1)
case Throw(e: MessageDroppedException) => Future.value(2)
case Throw(e) => Future.exception(e)
}
}
)
assert(Await.result(eventsSent) === Seq(1, 2, 1))
runTestQueriesAndAssertions(broker, timekeeper)
}
catch {
case NonFatal(e) =>
throw new ISE(e, "Failed test")
}
finally {
indexing.stop()
}
}
}
}
| druid-io/tranquility | core/src/test/scala/com/metamx/tranquility/test/DirectDruidTest.scala | Scala | apache-2.0 | 19,292 |
package gui
import java.awt.Color
import java.awt.event.{ActionListener, KeyEvent}
import javax.swing._
import connections.usb.Adb
import slide.Device
import net.miginfocom.swing.MigLayout
class DeviceField(var onComponentsShown: () => Unit, var actionListener: ActionListener) extends JPanel {
this.setLayout(new MigLayout())
this.setFocusable(true)
private val searchingText: String =
"<html><center>No devices detected<br/><br/>Scanning for devices...</center></html>"
private var ip: String = "localhost"
private var name: String = ""
private var manufacturer: String = ""
private val connectButton: JButton = new JButton("Connect")
connectButton.setEnabled(false)
private val nameLabel: JLabel = new JLabel
private val manuLabel: JLabel = new JLabel
private val ipAddressLabel: JLabel = new JLabel
private val lblIcon: JLabel = new JLabel(searchingText)
this.lblIcon.setHorizontalAlignment(SwingConstants.CENTER)
this.add(lblIcon, "cell 0 0, grow")
this.add(manuLabel, "cell 0 1, grow")
this.add(nameLabel, "cell 0 2, grow")
this.add(ipAddressLabel, "cell 0 3, grow")
this.add(connectButton, "cell 0 4, w 150!, grow")
val alta: KeyBinder = new KeyBinder(KeyEvent.VK_ALT, KeyEvent.VK_A) {
override def onKeysDown(): Unit = new Console().runAdbProcess(Adb.adbDevices())
}
val altl: KeyBinder = new KeyBinder(KeyEvent.VK_ALT, KeyEvent.VK_L) {
override def onKeysDown(): Unit = Licence.showLicense()
}
this.addKeyListener(alta)
this.addKeyListener(altl)
this.connectButton.addKeyListener(alta)
this.connectButton.addKeyListener(altl)
this.setVisible(true)
def setUi(d: Device): Unit = {
val icon: ImageIcon = d.icon
lblIcon.setIcon(icon)
lblIcon.setText("")
if (d.ip != "USB") {
ip = d.ip
name = d.model
manufacturer = d.manufacturer
}
lblIcon.setHorizontalAlignment(SwingConstants.CENTER)
nameLabel.setText(this.name)
nameLabel.setHorizontalAlignment(SwingConstants.CENTER)
nameLabel.setBackground(Color.white)
manuLabel.setText(manufacturer)
manuLabel.setBackground(Color.white)
manuLabel.setHorizontalAlignment(SwingConstants.CENTER)
ipAddressLabel.setText(ip)
ipAddressLabel.setBackground(Color.white)
ipAddressLabel.setHorizontalAlignment(SwingConstants.CENTER)
connectButton.setHorizontalAlignment(SwingConstants.CENTER)
if (connectButton.getActionListeners.length <= 0) {
connectButton.addActionListener(actionListener)
}
this.onComponentsShown()
this.updateUI()
}
override def show(): Unit = {
showDeviceField(visibility = true)
}
def showDeviceField(visibility: Boolean): Unit = {
if (visibility) {
// Controls shown
this.lblIcon.setText("")
} else {
this.lblIcon.setText(searchingText)
this.lblIcon.setIcon(null)
}
this.nameLabel.setVisible(visibility)
this.nameLabel.setText("")
this.manuLabel.setVisible(visibility)
this.manuLabel.setText("")
this.ipAddressLabel.setVisible(visibility)
this.ipAddressLabel.setText("localhost")
this.connectButton.setEnabled(visibility)
}
}
| itstar4tech/slide-desktop | src/main/java/gui/DeviceField.scala | Scala | gpl-2.0 | 3,415 |
package com.scalaAsm.x86
package Instructions
package System
// Description: Load Segment Limit
// Category: general
trait LSL extends InstructionDefinition {
val mnemonic = "LSL"
}
object LSL extends TwoOperands[LSL] with LSLImpl
trait LSLImpl extends LSL {
implicit object _0 extends TwoOp[r16, m16] {
val opcode: TwoOpcodes = (0x0F, 0x03) /r
val format = RegRmFormat
}
implicit object _1 extends TwoOp[r32, m16] {
val opcode: TwoOpcodes = (0x0F, 0x03) /r
val format = RegRmFormat
}
implicit object _2 extends TwoOp[r64, m16] {
val opcode: TwoOpcodes = (0x0F, 0x03) /r
override def prefix = REX.W(true)
val format = RegRmFormat
}
implicit object _3 extends TwoOp[r16, r16] {
val opcode: TwoOpcodes = (0x0F, 0x03) /r
val format = RegRmFormat
}
implicit object _4 extends TwoOp[r32, r32] {
val opcode: TwoOpcodes = (0x0F, 0x03) /r
val format = RegRmFormat
}
implicit object _5 extends TwoOp[r64, r32] {
val opcode: TwoOpcodes = (0x0F, 0x03) /r
override def prefix = REX.W(true)
val format = RegRmFormat
}
}
| bdwashbu/scala-x86-inst | src/main/scala/com/scalaAsm/x86/Instructions/System/LSL.scala | Scala | apache-2.0 | 1,098 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.internal.config
import java.util.concurrent.TimeUnit
import org.apache.spark.network.util.ByteUnit
private[spark] object Python {
val PYTHON_WORKER_REUSE = ConfigBuilder("spark.python.worker.reuse")
.version("1.2.0")
.booleanConf
.createWithDefault(true)
val PYTHON_TASK_KILL_TIMEOUT = ConfigBuilder("spark.python.task.killTimeout")
.version("2.2.2")
.timeConf(TimeUnit.MILLISECONDS)
.createWithDefaultString("2s")
val PYTHON_USE_DAEMON = ConfigBuilder("spark.python.use.daemon")
.version("2.3.0")
.booleanConf
.createWithDefault(true)
val PYTHON_DAEMON_MODULE = ConfigBuilder("spark.python.daemon.module")
.version("2.4.0")
.stringConf
.createOptional
val PYTHON_WORKER_MODULE = ConfigBuilder("spark.python.worker.module")
.version("2.4.0")
.stringConf
.createOptional
val PYSPARK_EXECUTOR_MEMORY = ConfigBuilder("spark.executor.pyspark.memory")
.version("2.4.0")
.bytesConf(ByteUnit.MiB)
.createOptional
val PYTHON_AUTH_SOCKET_TIMEOUT = ConfigBuilder("spark.python.authenticate.socketTimeout")
.internal()
.version("3.1.0")
.timeConf(TimeUnit.SECONDS)
.createWithDefaultString("15s")
val PYTHON_WORKER_FAULTHANLDER_ENABLED = ConfigBuilder("spark.python.worker.faulthandler.enabled")
.doc("When true, Python workers set up the faulthandler for the case when the Python worker " +
"exits unexpectedly (crashes), and shows the stack trace of the moment the Python worker " +
"crashes in the error message if captured successfully.")
.version("3.2.0")
.booleanConf
.createWithDefault(false)
}
| ueshin/apache-spark | core/src/main/scala/org/apache/spark/internal/config/Python.scala | Scala | apache-2.0 | 2,459 |
package connectionpools.impl
import java.util
import javax.sql.DataSource
import com.zaxxer.hikari.{HikariConfig, HikariDataSource}
import connectionpools.DataSources._
import connectionpools._
import scala.collection.JavaConverters._
/**
* HikariCPDataSourceFactory
* @author Sunghyouk Bae [email protected]
*/
class HikariCPDataSourceFactory extends DataSourceFactorySupport {
/**
* HikariCP DataSource를 생성합니다.
* @param dataSourceClassName dataSourceClassName
* ( 기존 driverClass 가 아닙니다 : mysql용은 com.mysql.jdbc.jdbc2.optional.MysqlDataSource 입니다 )
* @param url Database 주소
* @param username 사용자 명
* @param passwd 사용자 패스워드
* @return [[javax.sql.DataSource]] 인스턴스
*/
override def createDataSource(dataSourceClassName: String = "",
driverClass: String = JdbcDrivers.DRIVER_CLASS_H2,
url: String = "jdbc:h2:mem:test",
username: String = "",
passwd: String = "",
props: util.Map[String, String] = new util.HashMap(),
maxPoolSize: Int = MAX_POOL_SIZE): DataSource = {
log.info("Hikari DataSource를 빌드합니다... " +
s"dataSourceClassName=[$dataSourceClassName], driverClass=[$driverClass] url=[$url], username=[$username]")
val config = new HikariConfig()
config.setInitializationFailFast(true)
config.setIdleTimeout(30000)
// AutoCommit은 Driver 기본 값을 사용하도록 합니다. (mysql 은 auto commit = true)
//config.setAutoCommit(false)
if (dataSourceClassName != null && dataSourceClassName.length > 0) {
config.setDataSourceClassName(dataSourceClassName)
config.addDataSourceProperty("url", url)
config.addDataSourceProperty("user", username)
config.addDataSourceProperty("password", passwd)
} else {
config.setDriverClassName(driverClass)
config.setJdbcUrl(url)
config.setUsername(username)
config.setPassword(passwd)
}
// MySQL 인 경우 성능을 위해 아래 설정을 사용합니다.
val isMySQL = JdbcDrivers.DATASOURCE_CLASS_MYSQL.equals(dataSourceClassName) ||
JdbcDrivers.DRIVER_CLASS_MYSQL.equals(driverClass)
if (isMySQL) {
config.addDataSourceProperty("cachePrepStmts", "true")
config.addDataSourceProperty("prepStmtCacheSize", "500")
config.addDataSourceProperty("prepStmtCacheSqlLimit", "4096")
config.addDataSourceProperty("useServerPrepStmts", "true")
}
config.setConnectionTestQuery("SELECT 1")
val poolSize = maxPoolSize max MIN_POOL_SIZE
config.setMaximumPoolSize(poolSize)
// NOTE: 이게 상당히 중요하다!!!
// NOTE: 설정하지 않으면 max pool size 와 같게 둬서 connection pool 을 고갈 시킨다. 최소 갯수만 남겨둬야 한다.
config.setMinimumIdle(processCount min MIN_IDLE_SIZE)
if (props != null) {
props.asScala foreach {
case (name, value) => config.addDataSourceProperty(name, value)
}
}
new HikariDataSource(config)
}
}
| debop/connectionpool-benchmark | src/main/scala/connectionpools/impl/HikariCPDataSourceFactory.scala | Scala | apache-2.0 | 3,266 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.rules.logical
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.plan.nodes.FlinkConventions
import org.apache.flink.table.planner.plan.nodes.logical.{FlinkLogicalCalc, FlinkLogicalTableSourceScan}
import org.apache.flink.table.planner.plan.optimize.program._
import org.apache.flink.table.planner.plan.rules.FlinkBatchRuleSets
import org.apache.flink.table.planner.runtime.utils.JavaUserDefinedScalarFunctions.NonDeterministicUdf
import org.apache.flink.table.planner.utils.TableTestBase
import org.apache.calcite.plan.hep.HepMatchOrder
import org.apache.calcite.rel.rules._
import org.apache.calcite.tools.RuleSets
import org.junit.{Before, Test}
/**
* Test for [[FlinkCalcMergeRule]].
*/
class FlinkCalcMergeRuleTest extends TableTestBase {
private val util = batchTestUtil()
@Before
def setup(): Unit = {
val programs = new FlinkChainedProgram[BatchOptimizeContext]()
programs.addLast(
"table_ref",
FlinkHepRuleSetProgramBuilder.newBuilder
.setHepRulesExecutionType(HEP_RULES_EXECUTION_TYPE.RULE_SEQUENCE)
.setHepMatchOrder(HepMatchOrder.BOTTOM_UP)
.add(FlinkBatchRuleSets.TABLE_REF_RULES)
.build())
programs.addLast(
"logical",
FlinkVolcanoProgramBuilder.newBuilder
.add(RuleSets.ofList(
FilterToCalcRule.INSTANCE,
ProjectToCalcRule.INSTANCE,
FlinkCalcMergeRule.INSTANCE,
FlinkLogicalCalc.CONVERTER,
FlinkLogicalTableSourceScan.CONVERTER
))
.setRequiredOutputTraits(Array(FlinkConventions.LOGICAL))
.build())
util.replaceBatchProgram(programs)
util.addTableSource[(Int, Int, String)]("MyTable", 'a, 'b, 'c)
}
@Test
def testCalcMergeWithSameDigest(): Unit = {
util.verifyPlan("SELECT a, b FROM (SELECT * FROM MyTable WHERE a = b) t WHERE b = a")
}
@Test
def testCalcMergeWithNonDeterministicExpr1(): Unit = {
util.addFunction("random_udf", new NonDeterministicUdf)
val sqlQuery = "SELECT a, a1 FROM (SELECT a, random_udf(a) AS a1 FROM MyTable) t WHERE a1 > 10"
util.verifyPlan(sqlQuery)
}
@Test
def testCalcMergeWithNonDeterministicExpr2(): Unit = {
util.addFunction("random_udf", new NonDeterministicUdf)
val sqlQuery = "SELECT a FROM (SELECT a FROM MyTable) t WHERE random_udf(a) > 10"
util.verifyPlan(sqlQuery)
}
}
| bowenli86/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/rules/logical/FlinkCalcMergeRuleTest.scala | Scala | apache-2.0 | 3,261 |
package lampion.scala;
import _root_.scala.collection._
class Foo extends mutable.HashMap
| yusuke2255/dotty | tests/untried/pos/t595.scala | Scala | bsd-3-clause | 92 |
package com.pfalabs.tinyfilter
import org.osgi.service.component.annotations.Component
import javax.servlet.Servlet
import javax.servlet.http.HttpServlet
import org.osgi.service.component.annotations.ServiceScope.SINGLETON
import org.slf4j.Logger
import org.slf4j.LoggerFactory
@Component(service = Array(classOf[Servlet]), immediate = true, scope = SINGLETON,
property = {
Array("osgi.http.whiteboard.servlet.pattern=/*",
"osgi.http.whiteboard.filter.asyncSupported=true",
"osgi.http.whiteboard.context.select=(osgi.http.whiteboard.context.name=org.osgi.service.http)")
})
class DefaultServlet extends HttpServlet {
val log: Logger = LoggerFactory.getLogger(classOf[DefaultServlet])
override def init() = log.info("#init")
} | stillalex/scala-osgi-trials | tiny-filter/src/main/scala/com/pfalabs/tinyfilter/DefaultServlet.scala | Scala | apache-2.0 | 754 |
/*
* FILE: predicateJoinTestScala.scala
* Copyright (c) 2015 - 2018 GeoSpark Development Team
*
* MIT License
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
package org.datasyslab.geosparksql
import org.apache.spark.sql.Row
import org.apache.spark.sql.geosparksql.strategy.join.JoinQueryDetector
import org.apache.spark.sql.types._
import org.datasyslab.geospark.utils.GeoSparkConf
class predicateJoinTestScala extends TestBaseScala {
describe("GeoSpark-SQL Predicate Join Test") {
it("Passed ST_Contains in a join") {
val geosparkConf = new GeoSparkConf(sparkSession.sparkContext.getConf)
println(geosparkConf)
var polygonCsvDf = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPolygonInputLocation)
polygonCsvDf.createOrReplaceTempView("polygontable")
polygonCsvDf.show()
var polygonDf = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygontable._c0 as Decimal(24,20)),cast(polygontable._c1 as Decimal(24,20)), cast(polygontable._c2 as Decimal(24,20)), cast(polygontable._c3 as Decimal(24,20))) as polygonshape from polygontable")
polygonDf.createOrReplaceTempView("polygondf")
polygonDf.show()
var pointCsvDF = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPointInputLocation)
pointCsvDF.createOrReplaceTempView("pointtable")
pointCsvDF.show()
var pointDf = sparkSession.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20))) as pointshape from pointtable")
pointDf.createOrReplaceTempView("pointdf")
pointDf.show()
var rangeJoinDf = sparkSession.sql("select * from polygondf, pointdf where ST_Contains(polygondf.polygonshape,pointdf.pointshape) ")
rangeJoinDf.explain()
rangeJoinDf.show(3)
assert(rangeJoinDf.count() == 1000)
}
it("Passed ST_Intersects in a join") {
val geosparkConf = new GeoSparkConf(sparkSession.sparkContext.getConf)
println(geosparkConf)
var polygonCsvDf = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPolygonInputLocation)
polygonCsvDf.createOrReplaceTempView("polygontable")
polygonCsvDf.show()
var polygonDf = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygontable._c0 as Decimal(24,20)),cast(polygontable._c1 as Decimal(24,20)), cast(polygontable._c2 as Decimal(24,20)), cast(polygontable._c3 as Decimal(24,20))) as polygonshape from polygontable")
polygonDf.createOrReplaceTempView("polygondf")
polygonDf.show()
var pointCsvDF = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPointInputLocation)
pointCsvDF.createOrReplaceTempView("pointtable")
pointCsvDF.show()
var pointDf = sparkSession.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20))) as pointshape from pointtable")
pointDf.createOrReplaceTempView("pointdf")
pointDf.show()
var rangeJoinDf = sparkSession.sql("select * from polygondf, pointdf where ST_Intersects(polygondf.polygonshape,pointdf.pointshape) ")
rangeJoinDf.explain()
rangeJoinDf.show(3)
assert(rangeJoinDf.count() == 1000)
}
it("Passed ST_Touches in a join") {
val geosparkConf = new GeoSparkConf(sparkSession.sparkContext.getConf)
println(geosparkConf)
var polygonCsvDf = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPolygonInputLocation)
polygonCsvDf.createOrReplaceTempView("polygontable")
polygonCsvDf.show()
var polygonDf = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygontable._c0 as Decimal(24,20)),cast(polygontable._c1 as Decimal(24,20)), cast(polygontable._c2 as Decimal(24,20)), cast(polygontable._c3 as Decimal(24,20))) as polygonshape from polygontable")
polygonDf.createOrReplaceTempView("polygondf")
polygonDf.show()
var pointCsvDF = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPointInputLocation)
pointCsvDF.createOrReplaceTempView("pointtable")
pointCsvDF.show()
var pointDf = sparkSession.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20))) as pointshape from pointtable")
pointDf.createOrReplaceTempView("pointdf")
pointDf.show()
var rangeJoinDf = sparkSession.sql("select * from polygondf, pointdf where ST_Touches(polygondf.polygonshape,pointdf.pointshape) ")
rangeJoinDf.explain()
rangeJoinDf.show(3)
assert(rangeJoinDf.count() == 1000)
}
it("Passed ST_Within in a join") {
val geosparkConf = new GeoSparkConf(sparkSession.sparkContext.getConf)
println(geosparkConf)
var polygonCsvDf = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPolygonInputLocation)
polygonCsvDf.createOrReplaceTempView("polygontable")
polygonCsvDf.show()
var polygonDf = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygontable._c0 as Decimal(24,20)),cast(polygontable._c1 as Decimal(24,20)), cast(polygontable._c2 as Decimal(24,20)), cast(polygontable._c3 as Decimal(24,20))) as polygonshape from polygontable")
polygonDf.createOrReplaceTempView("polygondf")
polygonDf.show()
var pointCsvDF = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPointInputLocation)
pointCsvDF.createOrReplaceTempView("pointtable")
pointCsvDF.show()
var pointDf = sparkSession.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20))) as pointshape from pointtable")
pointDf.createOrReplaceTempView("pointdf")
pointDf.show()
var rangeJoinDf = sparkSession.sql("select * from polygondf, pointdf where ST_Within(pointdf.pointshape, polygondf.polygonshape) ")
rangeJoinDf.explain()
rangeJoinDf.show(3)
assert(rangeJoinDf.count() == 1000)
}
it("Passed ST_Overlaps in a join") {
val geosparkConf = new GeoSparkConf(sparkSession.sparkContext.getConf)
var polygonCsvDf = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPolygonInputLocation)
polygonCsvDf.createOrReplaceTempView("polygontable")
var polygonDf = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygontable._c0 as Decimal(24,20)),cast(polygontable._c1 as Decimal(24,20)), cast(polygontable._c2 as Decimal(24,20)), cast(polygontable._c3 as Decimal(24,20))) as polygonshape from polygontable")
polygonDf.createOrReplaceTempView("polygondf")
var polygonCsvOverlapDf = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(overlapPolygonInputLocation)
polygonCsvOverlapDf.createOrReplaceTempView("polygonoverlaptable")
var polygonOverlapDf = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygonoverlaptable._c0 as Decimal(24,20)),cast(polygonoverlaptable._c1 as Decimal(24,20)), cast(polygonoverlaptable._c2 as Decimal(24,20)), cast(polygonoverlaptable._c3 as Decimal(24,20))) as polygonshape from polygonoverlaptable")
polygonOverlapDf.createOrReplaceTempView("polygonodf")
var rangeJoinDf = sparkSession.sql("select * from polygondf, polygonodf where ST_Overlaps(polygondf.polygonshape, polygonodf.polygonshape)")
rangeJoinDf.explain()
rangeJoinDf.show(3)
assert(rangeJoinDf.count() == 57)
}
it("Passed ST_Crosses in a join") {
val geosparkConf = new GeoSparkConf(sparkSession.sparkContext.getConf)
println(geosparkConf)
var polygonCsvDf = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPolygonInputLocation)
polygonCsvDf.createOrReplaceTempView("polygontable")
polygonCsvDf.show()
var polygonDf = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygontable._c0 as Decimal(24,20)),cast(polygontable._c1 as Decimal(24,20)), cast(polygontable._c2 as Decimal(24,20)), cast(polygontable._c3 as Decimal(24,20))) as polygonshape from polygontable")
polygonDf.createOrReplaceTempView("polygondf")
polygonDf.show()
var pointCsvDF = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPointInputLocation)
pointCsvDF.createOrReplaceTempView("pointtable")
pointCsvDF.show()
var pointDf = sparkSession.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20))) as pointshape from pointtable")
pointDf.createOrReplaceTempView("pointdf")
pointDf.show()
var rangeJoinDf = sparkSession.sql("select * from polygondf, pointdf where ST_Crosses(pointdf.pointshape, polygondf.polygonshape) ")
rangeJoinDf.explain()
rangeJoinDf.show(3)
assert(rangeJoinDf.count() == 1000)
}
it("Passed ST_Distance <= radius in a join") {
sparkSession.experimental.extraStrategies = JoinQueryDetector :: Nil
var pointCsvDF1 = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPointInputLocation)
pointCsvDF1.createOrReplaceTempView("pointtable")
pointCsvDF1.show()
var pointDf1 = sparkSession.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20))) as pointshape1 from pointtable")
pointDf1.createOrReplaceTempView("pointdf1")
pointDf1.show()
var pointCsvDF2 = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPointInputLocation)
pointCsvDF2.createOrReplaceTempView("pointtable")
pointCsvDF2.show()
var pointDf2 = sparkSession.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20))) as pointshape2 from pointtable")
pointDf2.createOrReplaceTempView("pointdf2")
pointDf2.show()
var distanceJoinDf = sparkSession.sql("select * from pointdf1, pointdf2 where ST_Distance(pointdf1.pointshape1,pointdf2.pointshape2) <= 2")
distanceJoinDf.explain()
distanceJoinDf.show(10)
assert(distanceJoinDf.count() == 2998)
}
it("Passed ST_Distance < radius in a join") {
sparkSession.experimental.extraStrategies = JoinQueryDetector :: Nil
var pointCsvDF1 = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPointInputLocation)
pointCsvDF1.createOrReplaceTempView("pointtable")
pointCsvDF1.show()
var pointDf1 = sparkSession.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20))) as pointshape1 from pointtable")
pointDf1.createOrReplaceTempView("pointdf1")
pointDf1.show()
var pointCsvDF2 = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPointInputLocation)
pointCsvDF2.createOrReplaceTempView("pointtable")
pointCsvDF2.show()
var pointDf2 = sparkSession.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20))) as pointshape2 from pointtable")
pointDf2.createOrReplaceTempView("pointdf2")
pointDf2.show()
var distanceJoinDf = sparkSession.sql("select * from pointdf1, pointdf2 where ST_Distance(pointdf1.pointshape1,pointdf2.pointshape2) < 2")
distanceJoinDf.explain()
distanceJoinDf.show(10)
assert(distanceJoinDf.count() == 2998)
}
it("Passed ST_Contains in a range and join") {
val geosparkConf = new GeoSparkConf(sparkSession.sparkContext.getConf)
println(geosparkConf)
var polygonCsvDf = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPolygonInputLocation)
polygonCsvDf.createOrReplaceTempView("polygontable")
polygonCsvDf.show()
var polygonDf = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygontable._c0 as Decimal(24,20)),cast(polygontable._c1 as Decimal(24,20)), cast(polygontable._c2 as Decimal(24,20)), cast(polygontable._c3 as Decimal(24,20))) as polygonshape from polygontable")
polygonDf.createOrReplaceTempView("polygondf")
polygonDf.show()
var pointCsvDF = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPointInputLocation)
pointCsvDF.createOrReplaceTempView("pointtable")
pointCsvDF.show()
var pointDf = sparkSession.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20))) as pointshape from pointtable")
pointDf.createOrReplaceTempView("pointdf")
pointDf.show()
var rangeJoinDf = sparkSession.sql("select * from polygondf, pointdf where ST_Contains(polygondf.polygonshape,pointdf.pointshape) " +
"and ST_Contains(ST_PolygonFromEnvelope(1.0,101.0,501.0,601.0), polygondf.polygonshape)")
rangeJoinDf.explain()
rangeJoinDf.show(3)
assert(rangeJoinDf.count() == 500)
}
it("Passed super small data join") {
val rawPointDf = sparkSession.createDataFrame(
sparkSession.sparkContext.parallelize(
Seq(Row(1, "40.0", "-120.0"), Row(2, "30.0", "-110.0"), Row(3, "20.0", "-100.0"))),
StructType(
List(StructField("id", IntegerType, true), StructField("lat", StringType, true), StructField("lon", StringType, true))
))
rawPointDf.createOrReplaceTempView("rawPointDf")
val pointDF = sparkSession.sql("select id, ST_Point(cast(lat as Decimal(24,20)), cast(lon as Decimal(24,20))) AS latlon_point FROM rawPointDf")
pointDF.createOrReplaceTempView("pointDf")
pointDF.show(false)
val rawPolygonDf = sparkSession.createDataFrame(
sparkSession.sparkContext.parallelize(
Seq(Row("A", 25.0, -115.0, 35.0, -105.0), Row("B", 25.0, -135.0, 35.0, -125.0))),
StructType(
List(StructField("id", StringType, true), StructField("latmin", DoubleType, true),
StructField("lonmin", DoubleType, true), StructField("latmax", DoubleType, true),
StructField("lonmax", DoubleType, true))
))
rawPolygonDf.createOrReplaceTempView("rawPolygonDf")
val polygonEnvelopeDF = sparkSession.sql("select id, ST_PolygonFromEnvelope(" +
"cast(latmin as Decimal(24,20)), cast(lonmin as Decimal(24,20)), " +
"cast(latmax as Decimal(24,20)), cast(lonmax as Decimal(24,20))) AS polygon FROM rawPolygonDf")
polygonEnvelopeDF.createOrReplaceTempView("polygonDf")
val withinEnvelopeDF = sparkSession.sql("select * FROM pointDf, polygonDf WHERE ST_Within(pointDf.latlon_point, polygonDf.polygon)")
assert(withinEnvelopeDF.count() == 1)
}
it("Passed ST_Equals in a join for ST_Point") {
val geosparkConf = new GeoSparkConf(sparkSession.sparkContext.getConf)
println(geosparkConf)
var pointCsvDf1 = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPoint1InputLocation)
pointCsvDf1.createOrReplaceTempView("pointtable1")
pointCsvDf1.show()
var pointDf1 = sparkSession.sql("select ST_Point(cast(pointtable1._c0 as Decimal(24,20)),cast(pointtable1._c1 as Decimal(24,20)) ) as pointshape1 from pointtable1")
pointDf1.createOrReplaceTempView("pointdf1")
pointDf1.show()
var pointCsvDF2 = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPoint2InputLocation)
pointCsvDF2.createOrReplaceTempView("pointtable2")
pointCsvDF2.show()
var pointDf2 = sparkSession.sql("select ST_Point(cast(pointtable2._c0 as Decimal(24,20)),cast(pointtable2._c1 as Decimal(24,20))) as pointshape2 from pointtable2")
pointDf2.createOrReplaceTempView("pointdf2")
pointDf2.show()
var equalJoinDf = sparkSession.sql("select * from pointdf1, pointdf2 where ST_Equals(pointdf1.pointshape1,pointdf2.pointshape2) ")
equalJoinDf.explain()
equalJoinDf.show(3)
assert(equalJoinDf.count() == 100, s"Expected 100 but got ${equalJoinDf.count()}")
}
it("Passed ST_Equals in a join for ST_Polygon") {
val geosparkConf = new GeoSparkConf(sparkSession.sparkContext.getConf)
println(geosparkConf)
var polygonCsvDf1 = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPolygon1InputLocation)
polygonCsvDf1.createOrReplaceTempView("polygontable1")
polygonCsvDf1.show()
var polygonDf1 = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygontable1._c0 as Decimal(24,20)),cast(polygontable1._c1 as Decimal(24,20)), cast(polygontable1._c2 as Decimal(24,20)), cast(polygontable1._c3 as Decimal(24,20))) as polygonshape1 from polygontable1")
polygonDf1.createOrReplaceTempView("polygondf1")
polygonDf1.show()
var polygonCsvDf2 = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPolygon2InputLocation)
polygonCsvDf2.createOrReplaceTempView("polygontable2")
polygonCsvDf2.show()
var polygonDf2 = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygontable2._c0 as Decimal(24,20)),cast(polygontable2._c1 as Decimal(24,20)), cast(polygontable2._c2 as Decimal(24,20)), cast(polygontable2._c3 as Decimal(24,20))) as polygonshape2 from polygontable2")
polygonDf2.createOrReplaceTempView("polygondf2")
polygonDf2.show()
var equalJoinDf = sparkSession.sql("select * from polygondf1, polygondf2 where ST_Equals(polygondf1.polygonshape1,polygondf2.polygonshape2) ")
equalJoinDf.explain()
equalJoinDf.show(3)
assert(equalJoinDf.count() == 100, s"Expected 100 but got ${equalJoinDf.count()}")
}
it("Passed ST_Equals in a join for ST_Polygon Random Shuffle") {
val geosparkConf = new GeoSparkConf(sparkSession.sparkContext.getConf)
println(geosparkConf)
var polygonCsvDf1 = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPolygon1RandomInputLocation)
polygonCsvDf1.createOrReplaceTempView("polygontable1")
polygonCsvDf1.show()
var polygonDf1 = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygontable1._c0 as Decimal(24,20)),cast(polygontable1._c1 as Decimal(24,20)), cast(polygontable1._c2 as Decimal(24,20)), cast(polygontable1._c3 as Decimal(24,20))) as polygonshape1 from polygontable1")
polygonDf1.createOrReplaceTempView("polygondf1")
polygonDf1.show()
var polygonCsvDf2 = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPolygon2RandomInputLocation)
polygonCsvDf2.createOrReplaceTempView("polygontable2")
polygonCsvDf2.show()
var polygonDf2 = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygontable2._c0 as Decimal(24,20)),cast(polygontable2._c1 as Decimal(24,20)), cast(polygontable2._c2 as Decimal(24,20)), cast(polygontable2._c3 as Decimal(24,20))) as polygonshape2 from polygontable2")
polygonDf2.createOrReplaceTempView("polygondf2")
polygonDf2.show()
var equalJoinDf = sparkSession.sql("select * from polygondf1, polygondf2 where ST_Equals(polygondf1.polygonshape1,polygondf2.polygonshape2) ")
equalJoinDf.explain()
equalJoinDf.show(3)
assert(equalJoinDf.count() == 100, s"Expected 100 but got ${equalJoinDf.count()}")
}
it("Passed ST_Equals in a join for ST_Point and ST_Polygon") {
val geosparkConf = new GeoSparkConf(sparkSession.sparkContext.getConf)
println(geosparkConf)
var pointCsvDf = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPoint1InputLocation)
pointCsvDf.createOrReplaceTempView("pointtable")
pointCsvDf.show()
var pointDf = sparkSession.sql("select ST_Point(cast(pointtable._c0 as Decimal(24,20)),cast(pointtable._c1 as Decimal(24,20)) ) as pointshape from pointtable")
pointDf.createOrReplaceTempView("pointdf")
pointDf.show()
var polygonCsvDf = sparkSession.read.format("csv").option("delimiter", ",").option("header", "false").load(csvPolygon1InputLocation)
polygonCsvDf.createOrReplaceTempView("polygontable")
polygonCsvDf.show()
var polygonDf = sparkSession.sql("select ST_PolygonFromEnvelope(cast(polygontable._c0 as Decimal(24,20)),cast(polygontable._c1 as Decimal(24,20)), cast(polygontable._c2 as Decimal(24,20)), cast(polygontable._c3 as Decimal(24,20))) as polygonshape from polygontable")
polygonDf.createOrReplaceTempView("polygondf")
polygonDf.show()
var equalJoinDf = sparkSession.sql("select * from pointdf, polygondf where ST_Equals(pointdf.pointshape,polygondf.polygonshape) ")
equalJoinDf.explain()
equalJoinDf.show(3)
assert(equalJoinDf.count() == 0, s"Expected 0 but got ${equalJoinDf.count()}")
}
}
}
| Sarwat/GeoSpark | sql/src/test/scala/org/datasyslab/geosparksql/predicateJoinTestScala.scala | Scala | mit | 22,081 |
//
// NamelessToNamed.scala -- Scala trait NamelessToNamed
// Project OrcScala
//
// Created by dkitchin on Jul 10, 2010.
//
// Copyright (c) 2017 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.ast.oil.nameless
import orc.ast.oil.named
import orc.ast.hasOptionalVariableName
import orc.ast.oil.named.{ BoundTypevar, BoundVar }
/** @author dkitchin
*/
// Conversions from nameless to named representations
trait NamelessToNamed {
def namelessToNamed(e: Expression, context: List[BoundVar], typecontext: List[BoundTypevar]): named.Expression = {
import hasOptionalVariableName._
def recurse(e: Expression): named.Expression = namelessToNamed(e, context, typecontext)
e -> {
case Stop() => named.Stop()
case a: Argument => namelessToNamed(a, context)
case Call(target, args, typeargs) => {
val newtarget = namelessToNamed(target, context)
val newargs = args map { namelessToNamed(_, context) }
val newtypeargs = typeargs map { _ map { namelessToNamed(_, typecontext) } }
named.Call(newtarget, newargs, newtypeargs)
}
case Parallel(left, right) => named.Parallel(recurse(left), recurse(right))
case Sequence(left, right) => {
val x = new BoundVar(Some(unusedVariable))
named.Sequence(recurse(left), x, namelessToNamed(right, x :: context, typecontext))
}
case Graft(value, body) => {
val x = new BoundVar(Some(unusedVariable))
named.Graft(x, recurse(value), namelessToNamed(body, x :: context, typecontext))
}
case Trim(f) => named.Trim(recurse(f))
case Otherwise(left, right) => named.Otherwise(recurse(left), recurse(right))
case New(st, bindings, t) => {
// FIXME: this probably looses the self name information.
val self = new BoundVar(Some(id"self${st match { case Some(v) => v; case _ => "" }}"))
val defcontext = self :: context
val newbindings = Map() ++ bindings.mapValues(namelessToNamed(_, defcontext, typecontext))
named.New(self, st.map(namelessToNamed(_, typecontext)), newbindings, t.map(namelessToNamed(_, typecontext)))
}
case FieldAccess(obj, field) => named.FieldAccess(namelessToNamed(obj, context), field)
case DeclareCallables(openvars, defs, body) => {
val opennames = openvars map context
val defnames = defs map { d => new BoundVar(d.optionalVariableName) }
val defcontext = defnames.reverse ::: opennames.reverse ::: context
val bodycontext = defnames.reverse ::: context
val newdefs = for ((x, d) <- defnames zip defs) yield namelessToNamed(x, d, defcontext, typecontext)
val newbody = namelessToNamed(body, bodycontext, typecontext)
named.DeclareCallables(newdefs, newbody)
}
case DeclareType(t, body) => {
val x = new BoundTypevar()
val newTypeContext = x :: typecontext
/* A type may be defined recursively, so its name is in scope for its own definition */
val newt = namelessToNamed(t, newTypeContext)
val newbody = namelessToNamed(body, context, newTypeContext)
named.DeclareType(x, newt, newbody)
}
case HasType(body, expectedType) => {
named.HasType(recurse(body), namelessToNamed(expectedType, typecontext))
}
case VtimeZone(timeOrder, body) => named.VtimeZone(namelessToNamed(timeOrder, context), recurse(body))
case Hole(holeContext, holeTypeContext) => {
val newHoleContext = holeContext mapValues { namelessToNamed(_, context) }
val newHoleTypeContext = holeTypeContext mapValues { namelessToNamed(_, typecontext) }
named.Hole(newHoleContext, newHoleTypeContext)
}
}
}
def namelessToNamed(a: Argument, context: List[BoundVar]): named.Argument =
a -> {
case Constant(v) => named.Constant(v)
case Variable(i) => context(i)
case UnboundVariable(s) => named.UnboundVar(s)
}
def namelessToNamed(t: Type, typecontext: List[BoundTypevar]): named.Type = {
def toType(t: Type): named.Type = namelessToNamed(t, typecontext)
t -> {
case TypeVar(i) => typecontext(i)
case UnboundTypeVariable(u) => named.UnboundTypevar(u)
case Top() => named.Top()
case Bot() => named.Bot()
case FunctionType(typearity, argtypes, returntype) => {
val typeformals = (for (_ <- 0 until typearity) yield new BoundTypevar()).toList
val newTypeContext = typeformals ::: typecontext
val newArgTypes = argtypes map { namelessToNamed(_, newTypeContext) }
val newReturnType = namelessToNamed(returntype, newTypeContext)
named.FunctionType(typeformals, newArgTypes, newReturnType)
}
case TupleType(elements) => named.TupleType(elements map toType)
case RecordType(entries) => {
val newEntries = entries map { case (s, t) => (s, toType(t)) }
named.RecordType(newEntries)
}
case TypeApplication(i, typeactuals) => {
val tycon = typecontext(i)
val newTypeActuals = typeactuals map toType
named.TypeApplication(tycon, newTypeActuals)
}
case AssertedType(assertedType) => named.AssertedType(toType(assertedType))
case TypeAbstraction(typearity, t) => {
val typeformals = (for (_ <- 0 until typearity) yield new BoundTypevar()).toList
val newTypeContext = typeformals ::: typecontext
val newt = namelessToNamed(t, newTypeContext)
named.TypeAbstraction(typeformals, newt)
}
case ImportedType(classname) => named.ImportedType(classname)
case ClassType(classname) => named.ClassType(classname)
case VariantType(typearity, variants) => {
val self = new BoundTypevar()
val typeformals = (for (_ <- 0 until typearity) yield new BoundTypevar()).toList
val newTypeContext = self :: typeformals ::: typecontext
val newVariants =
for ((name, variant) <- variants) yield {
(name, variant map { namelessToNamed(_, newTypeContext) })
}
named.VariantType(self, typeformals, newVariants)
}
case IntersectionType(a, b) => named.IntersectionType(toType(a), toType(b))
case UnionType(a, b) => named.UnionType(toType(a), toType(b))
case StructuralType(members) => named.StructuralType(members.mapValues(toType))
case NominalType(a) => named.NominalType(toType(a))
}
}
def namelessToNamed(x: BoundVar, defn: Callable, context: List[BoundVar], typecontext: List[BoundTypevar]): named.Callable = {
defn -> {
case Callable(typearity, arity, body, argtypes, returntype) => {
val formals = (for (_ <- 0 until arity) yield new BoundVar(None)).toList
val typeformals = (for (_ <- 0 until typearity) yield new BoundTypevar()).toList
val newContext = formals ::: context
val newTypeContext = typeformals ::: typecontext
val newbody = namelessToNamed(body, newContext, newTypeContext)
val newArgTypes = argtypes map { _ map { namelessToNamed(_, newTypeContext) } }
val newReturnType = returntype map { namelessToNamed(_, newTypeContext) }
defn match {
case _: Def =>
named.Def(x, formals, newbody, typeformals, newArgTypes, newReturnType)
case _: Site =>
named.Site(x, formals, newbody, typeformals, newArgTypes, newReturnType)
}
}
}
}
}
| orc-lang/orc | OrcScala/src/orc/ast/oil/nameless/NamelessToNamed.scala | Scala | bsd-3-clause | 7,646 |
package io.iohk.ethereum.vm
import io.iohk.ethereum.domain.UInt256
object Stack {
/**
* Stack max size as defined in the YP (9.1)
*/
val DefaultMaxSize = 1024
def empty(maxSize: Int = DefaultMaxSize): Stack =
new Stack(Vector(), maxSize)
}
//TODO: consider a List with head being top of the stack (DUP,SWAP go at most the depth of 16) [EC-251]
/**
* Stack for the EVM. Instruction pop their arguments from it and push their results to it.
* The Stack doesn't handle overflow and underflow errors. Any operations that trascend given stack bounds will
* return the stack unchanged. Pop will always return zeroes in such case.
*/
class Stack private (private val underlying: Vector[UInt256], val maxSize: Int) {
def pop: (UInt256, Stack) = underlying.lastOption match {
case Some(word) =>
val updated = underlying.dropRight(1)
(word, copy(updated))
case None =>
(UInt256.Zero, this)
}
/**
* Pop n elements from the stack. The first element in the resulting sequence will be the top-most element
* in the current stack
*/
def pop(n: Int): (Seq[UInt256], Stack) = {
val (updated, popped) = underlying.splitAt(underlying.length - n)
if (popped.length == n)
(popped.reverse, copy(updated))
else
(Seq.fill(n)(UInt256.Zero), this)
}
def push(word: UInt256): Stack = {
val updated = underlying :+ word
if (updated.length <= maxSize)
copy(updated)
else
this
}
/**
* Push a sequence of elements to the stack. That last element of the sequence will be the top-most element
* in the resulting stack
*/
def push(words: Seq[UInt256]): Stack = {
val updated = underlying ++ words
if (updated.length > maxSize)
this
else
copy(updated)
}
/**
* Duplicate i-th element of the stack, pushing it to the top. i=0 is the top-most element.
*/
def dup(i: Int): Stack = {
val j = underlying.length - i - 1
if (i < 0 || i >= underlying.length || underlying.length >= maxSize)
this
else
copy(underlying :+ underlying(j))
}
/**
* Swap i-th and the top-most elements of the stack. i=0 is the top-most element (and that would be a no-op)
*/
def swap(i: Int): Stack = {
val j = underlying.length - i - 1
if (i <= 0 || i >= underlying.length)
this
else {
val a = underlying.last
val b = underlying(j)
val updated = underlying.updated(j, a).init :+ b
copy(updated)
}
}
def size: Int = underlying.size
/**
* @return the elements of the stack as a sequence, with the top-most element of the stack
* as the first element in the sequence
*/
def toSeq: Seq[UInt256] = underlying.reverse
override def equals(that: Any): Boolean = that match {
case that: Stack => this.underlying == that.underlying
case _ => false
}
override def hashCode(): Int = underlying.hashCode
override def toString: String =
underlying.reverse.mkString("Stack(", ",", ")")
private def copy(updated: Vector[UInt256]): Stack =
new Stack(updated, maxSize)
}
| input-output-hk/etc-client | src/main/scala/io/iohk/ethereum/vm/Stack.scala | Scala | mit | 3,125 |
package com.twitter.scalding
import cascading.flow.FlowException
import org.scalatest.{ Matchers, WordSpec }
import scala.util.Try
class StatsTestJob1(args: Args) extends Job(args) with CounterVerification {
val nonZero = Stat("number of non-zero records", "stats")
TypedPipe.from(TypedTsv[(String, Int)](args("input")))
.map { kv =>
if (kv._2 != 0) nonZero.inc()
(kv._1.toLowerCase, kv._2)
}
.write(TypedTsv[(String, Int)](args("output")))
override def verifyCounters(counters: Map[StatKey, Long]): Try[Unit] = Try {
assert(counters(nonZero) > 0)
}
}
class StatsTestJob2(args: Args) extends StatsTestJob1(args) {
override def verifyCountersInTest: Boolean = false
}
class StatsTest extends WordSpec with Matchers {
val goodInput = List(("a", 0), ("b", 1), ("c", 2))
val badInput = List(("a", 0), ("b", 0), ("c", 0))
def runJobTest[T: TupleSetter](f: Args => Job, input: List[T]): Unit = {
JobTest(f)
.arg("input", "input")
.arg("output", "output")
.source(TypedTsv[(String, Int)]("input"), input)
.sink[(String, Int)](TypedTsv[(String, Int)]("output")){ outBuf => outBuf shouldBe input }
.run
}
"StatsTestJob" should {
"pass if verifyCounters() is true" in {
runJobTest(new StatsTestJob1(_), goodInput)
}
}
it should {
"fail if verifyCounters() is false" in {
an[FlowException] should be thrownBy runJobTest(new StatsTestJob1(_), badInput)
}
}
it should {
"skip verifyCounters() if job fails" in {
(the[FlowException] thrownBy runJobTest(new StatsTestJob1(_), List((null, 0)))).getCause.getCause shouldBe a[NullPointerException]
}
}
it should {
"skip verifyCounters() if verifyCountersInTest is false" in {
runJobTest(new StatsTestJob2(_), badInput)
}
}
} | tdyas/scalding | scalding-core/src/test/scala/com/twitter/scalding/StatsTest.scala | Scala | apache-2.0 | 1,822 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frs102.boxes
import uk.gov.hmrc.ct.accounts.frs102.calculations.IntangibleAssetsCalculator
import uk.gov.hmrc.ct.accounts.frs102.retriever.{Frs102AccountsBoxRetriever, FullAccountsBoxRetriever}
import uk.gov.hmrc.ct.box._
case class AC123A(value: Option[Int]) extends CtBoxIdentifier(name = "Intangible assets - Goodwill - Net book value at [POA START]")
with CtOptionalInteger
with Input
with ValidatableBox[Frs102AccountsBoxRetriever]
with Validators {
override def validate(boxRetriever: Frs102AccountsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateMoney(value, min = 0)
)
}
}
object AC123A extends Calculated[AC123A, FullAccountsBoxRetriever]
with IntangibleAssetsCalculator {
override def calculate(boxRetriever: FullAccountsBoxRetriever): AC123A = {
import boxRetriever._
calculateAC123A(ac114A(), ac118A())
}
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frs102/boxes/AC123A.scala | Scala | apache-2.0 | 1,515 |
package tastytest
object TestDefaults extends Suite("TestDefaults") {
test("new Defaults()()") {
val defaults = new Defaults()()
assert(defaults.i == 33)
assert(defaults.s == "foo")
assert(defaults.b == false)
}
test("""new Defaults(s="bar")()""") {
val defaults = new Defaults(s="bar")()
assert(defaults.i == 33)
assert(defaults.s == "bar")
assert(defaults.b == false)
}
test("""new Defaults(s="baz", i=21)()""") {
val defaults = new Defaults(s="baz", i=21)()
assert(defaults.i == 21)
assert(defaults.s == "baz")
assert(defaults.b == false)
}
test("""new Defaults(s="qux", i=93)(b=true)""") {
val defaults = new Defaults(s="qux", i=93)(b=true)
assert(defaults.i == 93)
assert(defaults.s == "qux")
assert(defaults.b == true)
}
test("""new Defaults(101, "bip")(true)""") {
val defaults = new Defaults(101, "bip")(true)
assert(defaults.i == 101)
assert(defaults.s == "bip")
assert(defaults.b == true)
}
test("""Defaults(101, "bip")(true)""") {
val defaults = Defaults(101, "bip")(true)
assert(defaults.i == 101)
assert(defaults.s == "bip")
assert(defaults.b == true)
}
test("""new Defaults()().foo()()() = (0,"",false)""") {
val foo = new Defaults()().foo()()()
assert(foo === (0,"",false))
}
test("""new Defaults()().foo()(b="wow")() = (0,"wow",false)""") {
val foo = new Defaults()().foo()(b="wow")()
assert(foo === (0,"wow",false))
}
test("""new Defaults.Specialised().bar() = (12L,"abc",true)""") {
val bar = new Defaults.Specialised().bar()
assert(bar === (12L,"abc",true))
}
test("""new Defaults.OutOfOrder().bar() = (12L,"abc",true)""") {
val bar = new Defaults.OutOfOrder().bar()
assert(bar === (12L,"abc",true))
}
}
| lrytz/scala | test/tasty/run/src-2/tastytest/TestDefaults.scala | Scala | apache-2.0 | 1,803 |
package nl.dekkr.eversync.scala.model
import java.io.File
case class FileDetails(file: File, mimeType: String)
| dekkr/eversync | src/main/scala/nl/dekkr/eversync/scala/model/FileDetails.scala | Scala | mit | 114 |
package org.jetbrains.plugins.scala
package lang
package psi
package stubs
package elements
package signatures
import com.intellij.psi.PsiElement
import com.intellij.psi.stubs.{IndexSink, StubElement, StubInputStream, StubOutputStream}
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.{ScClassParameter, ScParameter}
import org.jetbrains.plugins.scala.lang.psi.stubs.impl.ScParameterStubImpl
/**
* User: Alexander Podkhalyuzin
* Date: 19.10.2008
*/
abstract class ScParamElementType[Param <: ScParameter](debugName: String)
extends ScStubElementType[ScParameterStub, ScParameter](debugName) {
def createStubImpl[ParentPsi <: PsiElement](psi: ScParameter, parentStub: StubElement[ParentPsi]): ScParameterStub = {
val typeText: String = psi.typeElement match {
case Some(t) => t.getText
case None => ""
}
val (isVal, isVar) = psi match {
case c: ScClassParameter => (c.isVal, c.isVar)
case _ => (false, false)
}
val isCallByName = psi.isCallByNameParameter
val defaultExprText = psi.getActualDefaultExpression.map(_.getText)
val deprecatedName = psi.deprecatedName
new ScParameterStubImpl[ParentPsi](parentStub, this, psi.name, typeText, psi.isStable, psi.baseDefaultParam,
psi.isRepeatedParameter, isVal, isVar, isCallByName, defaultExprText, deprecatedName)
}
def serialize(stub: ScParameterStub, dataStream: StubOutputStream) {
dataStream.writeName(stub.getName)
dataStream.writeName(stub.getTypeText)
dataStream.writeBoolean(stub.isStable)
dataStream.writeBoolean(stub.isDefaultParam)
dataStream.writeBoolean(stub.isRepeated)
dataStream.writeBoolean(stub.isVal)
dataStream.writeBoolean(stub.isVar)
dataStream.writeBoolean(stub.isCallByNameParameter)
stub.getDefaultExprText match {
case None =>
dataStream.writeBoolean(false)
case Some(str) =>
dataStream.writeBoolean(true)
dataStream.writeName(str)
}
stub.deprecatedName match {
case None => dataStream.writeBoolean(false)
case Some(name) =>
dataStream.writeBoolean(true)
dataStream.writeName(name)
}
}
def deserializeImpl(dataStream: StubInputStream, parentStub: Any): ScParameterStub = {
val name = dataStream.readName
val parent = parentStub.asInstanceOf[StubElement[PsiElement]]
val typeText = dataStream.readName
val stable = dataStream.readBoolean
val default = dataStream.readBoolean
val repeated = dataStream.readBoolean
val isVal = dataStream.readBoolean
val isVar = dataStream.readBoolean
val isCallByName = dataStream.readBoolean()
val defaultExpr = if (dataStream.readBoolean()) Some(dataStream.readName().toString) else None
val deprecatedName = if (dataStream.readBoolean()) Some(dataStream.readName().toString) else None
new ScParameterStubImpl(parent, this, name, typeText, stable, default, repeated, isVal, isVar, isCallByName,
defaultExpr, deprecatedName)
}
def indexStub(stub: ScParameterStub, sink: IndexSink) {}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/signatures/ScParamElementType.scala | Scala | apache-2.0 | 3,055 |
package org.jetbrains.plugins.scala
package base
import java.io.File
import com.intellij.openapi.module.Module
import com.intellij.openapi.project.Project
import com.intellij.openapi.projectRoots.{JavaSdk, Sdk}
import com.intellij.openapi.roots._
import com.intellij.openapi.roots.libraries.Library
import com.intellij.openapi.util.io.FileUtil
import com.intellij.openapi.vfs.impl.VirtualFilePointerManagerImpl
import com.intellij.openapi.vfs.newvfs.impl.VfsRootAccess
import com.intellij.openapi.vfs.pointers.VirtualFilePointerManager
import com.intellij.openapi.vfs.{JarFileSystem, LocalFileSystem, VfsUtil, VirtualFile}
import com.intellij.testFramework.PsiTestUtil
import org.jetbrains.plugins.scala.extensions._
import org.jetbrains.plugins.scala.lang.psi.impl.toplevel.synthetic.SyntheticClasses
import org.jetbrains.plugins.scala.project._
import org.jetbrains.plugins.scala.project.template.Artifact
import org.jetbrains.plugins.scala.util.TestUtils
import org.jetbrains.plugins.scala.util.TestUtils.ScalaSdkVersion
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
/**
* Nikolay.Tropin
* 5/29/13
*/
class ScalaLibraryLoader(project: Project, module: Module, rootPath: String,
isIncludeScalazLibrary: Boolean = false, isIncludeReflectLibrary: Boolean = false,
isIncludeSprayLibrary: Boolean = false, isIncludeSlickLibrary: Boolean = false, javaSdk: Option[Sdk] = None) {
private val addedLibraries = ArrayBuffer[Library]()
def loadScala(libVersion: TestUtils.ScalaSdkVersion) {
initScalaComponents()
addSyntheticClasses()
VfsRootAccess.allowRootAccess(TestUtils.getTestDataPath)
if (rootPath != null) {
FileUtil.createIfDoesntExist(new File(rootPath))
val testDataRoot: VirtualFile = LocalFileSystem.getInstance.refreshAndFindFileByPath(rootPath)
assert(testDataRoot != null)
PsiTestUtil.addSourceRoot(module, testDataRoot)
}
addScalaSdk(module, libVersion, isIncludeReflectLibrary)
if (isIncludeScalazLibrary) addLibrary(module, "scalaz", TestUtils.getMockScalazLib(libVersion))
if (isIncludeSprayLibrary) addLibrary(module, "spray", TestUtils.getMockSprayLib(libVersion))
if (isIncludeSlickLibrary) addLibrary(module, "slick", TestUtils.getMockSlickLib(libVersion))
javaSdk.foreach { sdk =>
val rootModel = ModuleRootManager.getInstance(module).getModifiableModel
rootModel.setSdk(sdk)
inWriteAction(rootModel.commit())
}
}
def initScalaComponents(): Unit = {
ScalaLoader.loadScala()
}
def addSyntheticClasses(): Unit = {
val syntheticClasses: SyntheticClasses = project.getComponent(classOf[SyntheticClasses])
if (!syntheticClasses.isClassesRegistered) {
syntheticClasses.registerClasses()
}
}
def clean() {
if (rootPath != null) {
val testDataRoot: VirtualFile = LocalFileSystem.getInstance.refreshAndFindFileByPath(rootPath)
PsiTestUtil.removeSourceRoot(module, testDataRoot)
}
inWriteAction {
addedLibraries.foreach(module.detach)
}
}
def addScalaSdk(module: Module, sdkVersion: ScalaSdkVersion, loadReflect: Boolean) = {
val compilerPath = TestUtils.getScalaCompilerPath(sdkVersion)
val libraryPath = TestUtils.getScalaLibraryPath(sdkVersion)
val reflectPath = TestUtils.getScalaReflectPath(sdkVersion)
val scalaSdkJars = Seq(libraryPath, compilerPath) ++ (if (loadReflect) Seq(reflectPath) else Seq.empty)
val classRoots = scalaSdkJars.map(path => JarFileSystem.getInstance.refreshAndFindFileByPath(path + "!/")).asJava
val scalaLibrarySrc = TestUtils.getScalaLibrarySrc(sdkVersion)
val srcsRoots = Seq(JarFileSystem.getInstance.refreshAndFindFileByPath(scalaLibrarySrc + "!/")).asJava
val scalaSdkLib = PsiTestUtil.addProjectLibrary(module, "scala-sdk", classRoots, srcsRoots)
val languageLevel = Artifact.ScalaCompiler.versionOf(new File(compilerPath))
.flatMap(ScalaLanguageLevel.from).getOrElse(ScalaLanguageLevel.Default)
inWriteAction {
scalaSdkLib.convertToScalaSdkWith(languageLevel, scalaSdkJars.map(new File(_)))
module.attach(scalaSdkLib)
addedLibraries += scalaSdkLib
}
VirtualFilePointerManager.getInstance.asInstanceOf[VirtualFilePointerManagerImpl].storePointers()
}
private def addLibrary(module: Module, libraryName: String, mockLib: String): Unit = {
if (module.libraries.exists(_.getName == libraryName)) return
VfsRootAccess.allowRootAccess(mockLib)
val rootModel = ModuleRootManager.getInstance(module).getModifiableModel
val libraryTable = rootModel.getModuleLibraryTable
val library = libraryTable.createLibrary(libraryName)
val libModel = library.getModifiableModel
val libRoot: File = new File(mockLib)
assert(libRoot.exists)
libModel.addRoot(VfsUtil.getUrlForLibraryRoot(libRoot), OrderRootType.CLASSES)
inWriteAction {
libModel.commit()
rootModel.commit()
}
VirtualFilePointerManager.getInstance.asInstanceOf[VirtualFilePointerManagerImpl].storePointers()
}
}
object ScalaLibraryLoader {
def getSdkNone: Option[Sdk] = None
def withMockJdk(project: Project, module: Module, rootPath: String,
isIncludeScalazLibrary: Boolean = false, isIncludeReflectLibrary: Boolean = false,
isIncludeSprayLibrary: Boolean = false, isIncludeSlickLibrary: Boolean = false): ScalaLibraryLoader = {
val mockJdk = TestUtils.getDefaultJdk
VfsRootAccess.allowRootAccess(mockJdk)
val javaSdk = Some(JavaSdk.getInstance.createJdk("java sdk", mockJdk, false))
new ScalaLibraryLoader(project, module, rootPath, isIncludeScalazLibrary, isIncludeReflectLibrary,
isIncludeSprayLibrary, isIncludeSlickLibrary, javaSdk)
}
}
| ghik/intellij-scala | test/org/jetbrains/plugins/scala/base/ScalaLibraryLoader.scala | Scala | apache-2.0 | 5,817 |
package com.thoughtworks.datacommons.prepbuddy.rdds
import org.apache.spark.rdd.RDD
class NumericIndexedRDD(doubleRDD: RDD[Double]) {
private val sortedRDDWithIndex = doubleRDD.sortBy(x => x, ascending = true).zipWithIndex()
private val RDDWithIndex = sortedRDDWithIndex.map { case (rowValue, rowIndex) => (rowIndex, rowValue) }.cache
private val totalCount = RDDWithIndex.count
private val secondQuartileIndex = getMedianIndex(totalCount)
private val firstQuartileIndex = getMedianIndex(secondQuartileIndex - 1)
private val thirdQuartileIndex = firstQuartileIndex + secondQuartileIndex
def lookup(key: Long): Double = RDDWithIndex.lookup(key).head
def interQuartileRange: Double = thirdQuartileValue - firstQuartileValue
def firstQuartileValue: Double = lookup(firstQuartileIndex)
def thirdQuartileValue: Double = lookup(thirdQuartileIndex)
private def getMedianIndex(count: Long): Long = {
def isOdd(num: Long): Boolean = num % 2 != 0
val middleIndex = count / 2
if (isOdd(count)) return middleIndex
middleIndex + 1
}
}
| data-commons/prep-buddy | src/main/scala/com/thoughtworks/datacommons/prepbuddy/rdds/NumericIndexedRDD.scala | Scala | apache-2.0 | 1,111 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver
import java.io._
import java.util.{ArrayList => JArrayList, Locale}
import org.apache.spark.sql.AnalysisException
import scala.collection.JavaConverters._
import jline.console.ConsoleReader
import jline.console.history.FileHistory
import org.apache.commons.lang3.StringUtils
import org.apache.commons.logging.LogFactory
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.cli.{CliDriver, CliSessionState, OptionsProcessor}
import org.apache.hadoop.hive.common.{HiveInterruptCallback, HiveInterruptUtils}
import org.apache.hadoop.hive.conf.HiveConf
import org.apache.hadoop.hive.ql.Driver
import org.apache.hadoop.hive.ql.exec.Utilities
import org.apache.hadoop.hive.ql.processors.{AddResourceProcessor, SetProcessor, CommandProcessor, CommandProcessorFactory}
import org.apache.hadoop.hive.ql.session.SessionState
import org.apache.thrift.transport.TSocket
import org.apache.spark.Logging
import org.apache.spark.sql.hive.HiveContext
import org.apache.spark.util.{ShutdownHookManager, Utils}
/**
* This code doesn't support remote connections in Hive 1.2+, as the underlying CliDriver
* has dropped its support.
*/
private[hive] object SparkSQLCLIDriver extends Logging {
private var prompt = "spark-sql"
private var continuedPrompt = "".padTo(prompt.length, ' ')
private var transport: TSocket = _
installSignalHandler()
/**
* Install an interrupt callback to cancel all Spark jobs. In Hive's CliDriver#processLine(),
* a signal handler will invoke this registered callback if a Ctrl+C signal is detected while
* a command is being processed by the current thread.
*/
def installSignalHandler() {
HiveInterruptUtils.add(new HiveInterruptCallback {
override def interrupt() {
// Handle remote execution mode
if (SparkSQLEnv.sparkContext != null) {
SparkSQLEnv.sparkContext.cancelAllJobs()
} else {
if (transport != null) {
// Force closing of TCP connection upon session termination
transport.getSocket.close()
}
}
}
})
}
def main(args: Array[String]) {
val oproc = new OptionsProcessor()
if (!oproc.process_stage1(args)) {
System.exit(1)
}
val cliConf = new HiveConf(classOf[SessionState])
// Override the location of the metastore since this is only used for local execution.
HiveContext.newTemporaryConfiguration().foreach {
case (key, value) => cliConf.set(key, value)
}
val sessionState = new CliSessionState(cliConf)
sessionState.in = System.in
try {
sessionState.out = new PrintStream(System.out, true, "UTF-8")
sessionState.info = new PrintStream(System.err, true, "UTF-8")
sessionState.err = new PrintStream(System.err, true, "UTF-8")
} catch {
case e: UnsupportedEncodingException => System.exit(3)
}
if (!oproc.process_stage2(sessionState)) {
System.exit(2)
}
// Set all properties specified via command line.
val conf: HiveConf = sessionState.getConf
sessionState.cmdProperties.entrySet().asScala.foreach { item =>
val key = item.getKey.toString
val value = item.getValue.toString
// We do not propagate metastore options to the execution copy of hive.
if (key != "javax.jdo.option.ConnectionURL") {
conf.set(key, value)
sessionState.getOverriddenConfigurations.put(key, value)
}
}
SessionState.start(sessionState)
// Clean up after we exit
ShutdownHookManager.addShutdownHook { () => SparkSQLEnv.stop() }
val remoteMode = isRemoteMode(sessionState)
// "-h" option has been passed, so connect to Hive thrift server.
if (!remoteMode) {
// Hadoop-20 and above - we need to augment classpath using hiveconf
// components.
// See also: code in ExecDriver.java
var loader = conf.getClassLoader
val auxJars = HiveConf.getVar(conf, HiveConf.ConfVars.HIVEAUXJARS)
if (StringUtils.isNotBlank(auxJars)) {
loader = Utilities.addToClassPath(loader, StringUtils.split(auxJars, ","))
}
conf.setClassLoader(loader)
Thread.currentThread().setContextClassLoader(loader)
} else {
// Hive 1.2 + not supported in CLI
throw new RuntimeException("Remote operations not supported")
}
val cli = new SparkSQLCLIDriver
cli.setHiveVariables(oproc.getHiveVariables)
// TODO work around for set the log output to console, because the HiveContext
// will set the output into an invalid buffer.
sessionState.in = System.in
try {
sessionState.out = new PrintStream(System.out, true, "UTF-8")
sessionState.info = new PrintStream(System.err, true, "UTF-8")
sessionState.err = new PrintStream(System.err, true, "UTF-8")
} catch {
case e: UnsupportedEncodingException => System.exit(3)
}
if (sessionState.database != null) {
SparkSQLEnv.hiveContext.runSqlHive(s"USE ${sessionState.database}")
}
// Execute -i init files (always in silent mode)
cli.processInitFiles(sessionState)
if (sessionState.execString != null) {
System.exit(cli.processLine(sessionState.execString))
}
try {
if (sessionState.fileName != null) {
System.exit(cli.processFile(sessionState.fileName))
}
} catch {
case e: FileNotFoundException =>
logError(s"Could not open input file for reading. (${e.getMessage})")
System.exit(3)
}
val reader = new ConsoleReader()
reader.setBellEnabled(false)
reader.setExpandEvents(false)
// reader.setDebug(new PrintWriter(new FileWriter("writer.debug", true)))
CliDriver.getCommandCompleter.foreach((e) => reader.addCompleter(e))
val historyDirectory = System.getProperty("user.home")
try {
if (new File(historyDirectory).exists()) {
val historyFile = historyDirectory + File.separator + ".hivehistory"
reader.setHistory(new FileHistory(new File(historyFile)))
} else {
logWarning("WARNING: Directory for Hive history file: " + historyDirectory +
" does not exist. History will not be available during this session.")
}
} catch {
case e: Exception =>
logWarning("WARNING: Encountered an error while trying to initialize Hive's " +
"history file. History will not be available during this session.")
logWarning(e.getMessage)
}
// TODO: missing
/*
val clientTransportTSocketField = classOf[CliSessionState].getDeclaredField("transport")
clientTransportTSocketField.setAccessible(true)
transport = clientTransportTSocketField.get(sessionState).asInstanceOf[TSocket]
*/
transport = null
var ret = 0
var prefix = ""
val currentDB = ReflectionUtils.invokeStatic(classOf[CliDriver], "getFormattedDb",
classOf[HiveConf] -> conf, classOf[CliSessionState] -> sessionState)
def promptWithCurrentDB: String = s"$prompt$currentDB"
def continuedPromptWithDBSpaces: String = continuedPrompt + ReflectionUtils.invokeStatic(
classOf[CliDriver], "spacesForString", classOf[String] -> currentDB)
var currentPrompt = promptWithCurrentDB
var line = reader.readLine(currentPrompt + "> ")
while (line != null) {
if (!line.startsWith("--")) {
if (prefix.nonEmpty) {
prefix += '\\n'
}
if (line.trim().endsWith(";") && !line.trim().endsWith("\\\\;")) {
line = prefix + line
ret = cli.processLine(line, true)
prefix = ""
currentPrompt = promptWithCurrentDB
} else {
prefix = prefix + line
currentPrompt = continuedPromptWithDBSpaces
}
}
line = reader.readLine(currentPrompt + "> ")
}
sessionState.close()
System.exit(ret)
}
def isRemoteMode(state: CliSessionState): Boolean = {
// sessionState.isRemoteMode
state.isHiveServerQuery
}
}
private[hive] class SparkSQLCLIDriver extends CliDriver with Logging {
private val sessionState = SessionState.get().asInstanceOf[CliSessionState]
private val LOG = LogFactory.getLog("CliDriver")
private val console = new SessionState.LogHelper(LOG)
private val isRemoteMode = {
SparkSQLCLIDriver.isRemoteMode(sessionState)
}
private val conf: Configuration =
if (sessionState != null) sessionState.getConf else new Configuration()
// Force initializing SparkSQLEnv. This is put here but not object SparkSQLCliDriver
// because the Hive unit tests do not go through the main() code path.
if (!isRemoteMode) {
SparkSQLEnv.init()
} else {
// Hive 1.2 + not supported in CLI
throw new RuntimeException("Remote operations not supported")
}
override def processCmd(cmd: String): Int = {
val cmd_trimmed: String = cmd.trim()
val cmd_lower = cmd_trimmed.toLowerCase(Locale.ENGLISH)
val tokens: Array[String] = cmd_trimmed.split("\\\\s+")
val cmd_1: String = cmd_trimmed.substring(tokens(0).length()).trim()
if (cmd_lower.equals("quit") ||
cmd_lower.equals("exit")) {
sessionState.close()
System.exit(0)
}
if (tokens(0).toLowerCase(Locale.ENGLISH).equals("source") ||
cmd_trimmed.startsWith("!") ||
tokens(0).toLowerCase.equals("list") ||
isRemoteMode) {
val start = System.currentTimeMillis()
super.processCmd(cmd)
val end = System.currentTimeMillis()
val timeTaken: Double = (end - start) / 1000.0
console.printInfo(s"Time taken: $timeTaken seconds")
0
} else {
var ret = 0
val hconf = conf.asInstanceOf[HiveConf]
val proc: CommandProcessor = CommandProcessorFactory.get(tokens, hconf)
if (proc != null) {
// scalastyle:off println
if (proc.isInstanceOf[Driver] || proc.isInstanceOf[SetProcessor] ||
proc.isInstanceOf[AddResourceProcessor]) {
val driver = new SparkSQLDriver
driver.init()
val out = sessionState.out
val err = sessionState.err
val start: Long = System.currentTimeMillis()
if (sessionState.getIsVerbose) {
out.println(cmd)
}
val rc = driver.run(cmd)
val end = System.currentTimeMillis()
val timeTaken: Double = (end - start) / 1000.0
ret = rc.getResponseCode
if (ret != 0) {
// For analysis exception, only the error is printed out to the console.
rc.getException() match {
case e : AnalysisException =>
err.println(s"""Error in query: ${e.getMessage}""")
case _ => err.println(rc.getErrorMessage())
}
driver.close()
return ret
}
val res = new JArrayList[String]()
if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_CLI_PRINT_HEADER)) {
// Print the column names.
Option(driver.getSchema.getFieldSchemas).foreach { fields =>
out.println(fields.asScala.map(_.getName).mkString("\\t"))
}
}
var counter = 0
try {
while (!out.checkError() && driver.getResults(res)) {
res.asScala.foreach { l =>
counter += 1
out.println(l)
}
res.clear()
}
} catch {
case e: IOException =>
console.printError(
s"""Failed with exception ${e.getClass.getName}: ${e.getMessage}
|${org.apache.hadoop.util.StringUtils.stringifyException(e)}
""".stripMargin)
ret = 1
}
val cret = driver.close()
if (ret == 0) {
ret = cret
}
var responseMsg = s"Time taken: $timeTaken seconds"
if (counter != 0) {
responseMsg += s", Fetched $counter row(s)"
}
console.printInfo(responseMsg , null)
// Destroy the driver to release all the locks.
driver.destroy()
} else {
if (sessionState.getIsVerbose) {
sessionState.out.println(tokens(0) + " " + cmd_1)
}
ret = proc.run(cmd_1).getResponseCode
}
// scalastyle:on println
}
ret
}
}
}
| chenc10/Spark-PAF | sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/SparkSQLCLIDriver.scala | Scala | apache-2.0 | 13,187 |
/*
* Copyright (C) 2009-2014 Typesafe Inc. <http://www.typesafe.com>
*/
package test
import play.api.test._
import controllers.Assets.Asset
object RouterSpec extends PlaySpecification {
"reverse routes containing boolean parameters" in {
"in the query string" in {
controllers.routes.Application.takeBool(true).url must equalTo ("/take-bool?b=true")
controllers.routes.Application.takeBool(false).url must equalTo ("/take-bool?b=false")
}
"in the path" in {
controllers.routes.Application.takeBool2(true).url must equalTo ("/take-bool-2/true")
controllers.routes.Application.takeBool2(false).url must equalTo ("/take-bool-2/false")
}
}
"bind boolean parameters" in {
"from the query string" in new WithApplication() {
val Some(result) = route(FakeRequest(GET, "/take-bool?b=true"))
contentAsString(result) must equalTo ("true")
val Some(result2) = route(FakeRequest(GET, "/take-bool?b=false"))
contentAsString(result2) must equalTo ("false")
// Bind boolean values from 1 and 0 integers too
contentAsString(route(FakeRequest(GET, "/take-bool?b=1")).get) must equalTo ("true")
contentAsString(route(FakeRequest(GET, "/take-bool?b=0")).get) must equalTo ("false")
}
"from the path" in new WithApplication() {
val Some(result) = route(FakeRequest(GET, "/take-bool-2/true"))
contentAsString(result) must equalTo ("true")
val Some(result2) = route(FakeRequest(GET, "/take-bool-2/false"))
contentAsString(result2) must equalTo ("false")
// Bind boolean values from 1 and 0 integers too
contentAsString(route(FakeRequest(GET, "/take-bool-2/1")).get) must equalTo ("true")
contentAsString(route(FakeRequest(GET, "/take-bool-2/0")).get) must equalTo ("false")
}
}
"bind int parameters from the query string as a list" in {
"from a list of numbers" in new WithApplication() {
val Some(result) = route(FakeRequest(GET, controllers.routes.Application.takeList(List(1, 2, 3)).url))
contentAsString(result) must equalTo("1,2,3")
}
"from a list of numbers and letters" in new WithApplication() {
val Some(result) = route(FakeRequest(GET, "/take-list?x=1&x=a&x=2"))
contentAsString(result) must equalTo("1,2")
}
"when there is no parameter at all" in new WithApplication() {
val Some(result) = route(FakeRequest(GET, "/take-list"))
contentAsString(result) must equalTo("")
}
"using the Java API" in new WithApplication() {
val Some(result) = route(FakeRequest(GET, "/take-java-list?x=1&x=2&x=3"))
contentAsString(result) must equalTo("1,2,3")
}
}
"use a new instance for each instantiated controller" in new WithApplication() {
route(FakeRequest(GET, "/instance")) must beSome.like {
case result => contentAsString(result) must_== "1"
}
route(FakeRequest(GET, "/instance")) must beSome.like {
case result => contentAsString(result) must_== "1"
}
}
"URL encoding and decoding works correctly" in new WithApplication() {
def checkDecoding(
dynamicEncoded: String, staticEncoded: String, queryEncoded: String,
dynamicDecoded: String, staticDecoded: String, queryDecoded: String) = {
val path = s"/urlcoding/$dynamicEncoded/$staticEncoded?q=$queryEncoded"
val expected = s"dynamic=$dynamicDecoded static=$staticDecoded query=$queryDecoded"
val Some(result) = route(FakeRequest(GET, path))
val actual = contentAsString(result)
actual must equalTo(expected)
}
def checkEncoding(
dynamicDecoded: String, staticDecoded: String, queryDecoded: String,
dynamicEncoded: String, staticEncoded: String, queryEncoded: String) = {
val expected = s"/urlcoding/$dynamicEncoded/$staticEncoded?q=$queryEncoded"
val call = controllers.routes.Application.urlcoding(dynamicDecoded, staticDecoded, queryDecoded)
call.url must equalTo(expected)
}
checkDecoding("a", "a", "a", "a", "a", "a")
checkDecoding("%2B", "%2B", "%2B", "+", "%2B", "+")
checkDecoding("+", "+", "+", "+", "+", " ")
checkDecoding("%20", "%20", "%20", " ", "%20", " ")
checkDecoding("&", "&", "-", "&", "&", "-")
checkDecoding("=", "=", "-", "=", "=", "-")
checkEncoding("+", "+", "+", "+", "+", "%2B")
checkEncoding(" ", " ", " ", "%20", " ", "+")
checkEncoding("&", "&", "&", "&", "&", "%26")
checkEncoding("=", "=", "=", "=", "=", "%3D")
// We use java.net.URLEncoder for query string encoding, which is not
// RFC compliant, e.g. it percent-encodes "/" which is not a delimiter
// for query strings, and it percent-encodes "~" which is an "unreserved" character
// that should never be percent-encoded. The following tests, therefore
// don't really capture our ideal desired behaviour for query string
// encoding. However, the behaviour for dynamic and static paths is correct.
checkEncoding("/", "/", "/", "%2F", "/", "%2F")
checkEncoding("~", "~", "~", "~", "~", "%7E")
checkDecoding("123", "456", "789", "123", "456", "789")
checkEncoding("123", "456", "789", "123", "456", "789")
}
"allow reverse routing of routes includes" in new WithApplication() {
// Force the router to bootstrap the prefix
app.routes
controllers.module.routes.ModuleController.index().url must_== "/module/index"
}
"document the router" in new WithApplication() {
// The purpose of this test is to alert anyone that changes the format of the router documentation that
// it is being used by Swagger. So if you do change it, please let Tony Tam know at tony at wordnik dot com.
val someRoute = app.routes.documentation.find(r => r._1 == "GET" && r._2.startsWith("/with/"))
someRoute must beSome[(String, String, String)]
val route = someRoute.get
route._2 must_== "/with/$param<[^/]+>"
route._3 must startWith("controllers.Application.withParam")
}
"choose the first matching route for a call in reverse routes" in new WithApplication() {
controllers.routes.Application.hello().url must_== "/hello"
}
"The assets reverse route support" should {
"fingerprint assets" in new WithApplication() {
controllers.routes.Assets.versioned("css/main.css").url must_== "/public/css/abcd1234-main.css"
}
"selected the minified version" in new WithApplication() {
controllers.routes.Assets.versioned("css/minmain.css").url must_== "/public/css/abcd1234-minmain-min.css"
}
"work for non fingerprinted assets" in new WithApplication() {
controllers.routes.Assets.versioned("css/nonfingerprinted.css").url must_== "/public/css/nonfingerprinted.css"
}
"selected the minified non fingerprinted version" in new WithApplication() {
controllers.routes.Assets.versioned("css/nonfingerprinted-minmain.css").url must_== "/public/css/nonfingerprinted-minmain-min.css"
}
}
}
| jyotikamboj/container | pf-framework/src/sbt-plugin/src/sbt-test/routes-compiler-plugin/injected-routes-compilation/tests/RouterSpec.scala | Scala | mit | 7,061 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.akka
import java.io.IOException
import java.net._
import java.util.concurrent.{Callable, TimeUnit}
import akka.actor._
import akka.pattern.{ask => akkaAsk}
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.flink.api.common.time.Time
import org.apache.flink.configuration.{AkkaOptions, ConfigConstants, Configuration}
import org.apache.flink.runtime.net.SSLUtils
import org.apache.flink.util.{ConfigurationException, NetUtils, Preconditions}
import org.jboss.netty.logging.{InternalLoggerFactory, Slf4JLoggerFactory}
import org.slf4j.LoggerFactory
import scala.concurrent._
import scala.concurrent.duration._
import scala.language.postfixOps
/**
* This class contains utility functions for akka. It contains methods to start an actor system with
* a given akka configuration. Furthermore, the akka configuration used for starting the different
* actor systems resides in this class.
*/
object AkkaUtils {
val LOG = LoggerFactory.getLogger(AkkaUtils.getClass)
val INF_TIMEOUT = 21474835 seconds
/**
* Creates a local actor system without remoting.
*
* @param configuration instance containing the user provided configuration values
* @return The created actor system
*/
def createLocalActorSystem(configuration: Configuration): ActorSystem = {
val akkaConfig = getAkkaConfig(configuration, None)
createActorSystem(akkaConfig)
}
/**
* Creates an actor system bound to the given hostname and port.
*
* @param configuration instance containing the user provided configuration values
* @param hostname of the network interface to bind to
* @param port of to bind to
* @return created actor system
*/
def createActorSystem(
configuration: Configuration,
hostname: String,
port: Int)
: ActorSystem = {
createActorSystem(configuration, Some((hostname, port)))
}
/**
* Creates an actor system. If a listening address is specified, then the actor system will listen
* on that address for messages from a remote actor system. If not, then a local actor system
* will be instantiated.
*
* @param configuration instance containing the user provided configuration values
* @param listeningAddress an optional tuple containing a bindAddress and a port to bind to.
* If the parameter is None, then a local actor system will be created.
* @return created actor system
*/
def createActorSystem(
configuration: Configuration,
listeningAddress: Option[(String, Int)])
: ActorSystem = {
val akkaConfig = getAkkaConfig(configuration, listeningAddress)
createActorSystem(akkaConfig)
}
/**
* Creates an actor system with the given akka config.
*
* @param akkaConfig configuration for the actor system
* @return created actor system
*/
def createActorSystem(akkaConfig: Config): ActorSystem = {
// Initialize slf4j as logger of Akka's Netty instead of java.util.logging (FLINK-1650)
InternalLoggerFactory.setDefaultFactory(new Slf4JLoggerFactory)
ActorSystem.create("flink", akkaConfig)
}
/**
* Creates an actor system with the default config and listening on a random port of the
* localhost.
*
* @return default actor system listening on a random port of the localhost
*/
def createDefaultActorSystem(): ActorSystem = {
createActorSystem(getDefaultAkkaConfig)
}
/**
* Return a remote Akka config for the given configuration values.
*
* @param configuration containing the user provided configuration values
* @param hostname to bind against. If null, then the loopback interface is used
* @param port to bind against
* @return A remote Akka config
*/
def getAkkaConfig(configuration: Configuration, hostname: String, port: Int): Config = {
getAkkaConfig(configuration, Some((hostname, port)))
}
/**
* Return a local Akka config for the given configuration values.
*
* @param configuration containing the user provided configuration values
* @return A local Akka config
*/
def getAkkaConfig(configuration: Configuration): Config = {
getAkkaConfig(configuration, None)
}
/**
* Creates an akka config with the provided configuration values. If the listening address is
* specified, then the actor system will listen on the respective address.
*
* @param configuration instance containing the user provided configuration values
* @param externalAddress optional tuple of bindAddress and port to be reachable at.
* If None is given, then an Akka config for local actor system
* will be returned
* @return Akka config
*/
@throws(classOf[UnknownHostException])
def getAkkaConfig(configuration: Configuration,
externalAddress: Option[(String, Int)]): Config = {
val defaultConfig = getBasicAkkaConfig(configuration)
externalAddress match {
case Some((hostname, port)) =>
val remoteConfig = getRemoteAkkaConfig(configuration,
// the wildcard IP lets us bind to all network interfaces
NetUtils.getWildcardIPAddress, port,
hostname, port)
remoteConfig.withFallback(defaultConfig)
case None =>
defaultConfig
}
}
/**
* Creates the default akka configuration which listens on a random port on the local machine.
* All configuration values are set to default values.
*
* @return Flink's Akka default config
*/
def getDefaultAkkaConfig: Config = {
getAkkaConfig(new Configuration(), Some(("", 0)))
}
/**
* Gets the basic Akka config which is shared by remote and local actor systems.
*
* @param configuration instance which contains the user specified values for the configuration
* @return Flink's basic Akka config
*/
private def getBasicAkkaConfig(configuration: Configuration): Config = {
val akkaThroughput = configuration.getInteger(ConfigConstants.AKKA_DISPATCHER_THROUGHPUT,
ConfigConstants.DEFAULT_AKKA_DISPATCHER_THROUGHPUT)
val lifecycleEvents = configuration.getBoolean(ConfigConstants.AKKA_LOG_LIFECYCLE_EVENTS,
ConfigConstants.DEFAULT_AKKA_LOG_LIFECYCLE_EVENTS)
val jvmExitOnFatalError = if (
configuration.getBoolean(ConfigConstants.AKKA_JVM_EXIT_ON_FATAL_ERROR, true)){
"on"
} else {
"off"
}
val logLifecycleEvents = if (lifecycleEvents) "on" else "off"
val logLevel = getLogLevel
val config =
s"""
|akka {
| daemonic = on
|
| loggers = ["akka.event.slf4j.Slf4jLogger"]
| logging-filter = "akka.event.slf4j.Slf4jLoggingFilter"
| log-config-on-start = off
|
| jvm-exit-on-fatal-error = $jvmExitOnFatalError
|
| serialize-messages = off
|
| loglevel = $logLevel
| stdout-loglevel = OFF
|
| log-dead-letters = $logLifecycleEvents
| log-dead-letters-during-shutdown = $logLifecycleEvents
|
| actor {
| guardian-supervisor-strategy = "akka.actor.StoppingSupervisorStrategy"
| default-dispatcher {
| throughput = $akkaThroughput
|
| fork-join-executor {
| parallelism-factor = 2.0
| }
| }
| }
|}
""".stripMargin
ConfigFactory.parseString(config)
}
def testDispatcherConfig: Config = {
val config =
s"""
|akka {
| actor {
| default-dispatcher {
| fork-join-executor {
| parallelism-factor = 1.0
| parallelism-min = 1
| parallelism-max = 4
| }
| }
| }
|}
""".stripMargin
ConfigFactory.parseString(config)
}
/**
* Creates a Akka config for a remote actor system listening on port on the network interface
* identified by bindAddress.
*
* @param configuration instance containing the user provided configuration values
* @param bindAddress of the network interface to bind on
* @param port to bind to or if 0 then Akka picks a free port automatically
* @param externalHostname The host name to expect for Akka messages
* @param externalPort The port to expect for Akka messages
* @return Flink's Akka configuration for remote actor systems
*/
private def getRemoteAkkaConfig(configuration: Configuration,
bindAddress: String, port: Int,
externalHostname: String, externalPort: Int): Config = {
val akkaAskTimeout = Duration(configuration.getString(
ConfigConstants.AKKA_ASK_TIMEOUT,
ConfigConstants.DEFAULT_AKKA_ASK_TIMEOUT))
val startupTimeout = configuration.getString(
ConfigConstants.AKKA_STARTUP_TIMEOUT,
(akkaAskTimeout * 10).toString)
val transportHeartbeatInterval = configuration.getString(
ConfigConstants.AKKA_TRANSPORT_HEARTBEAT_INTERVAL,
ConfigConstants.DEFAULT_AKKA_TRANSPORT_HEARTBEAT_INTERVAL)
val transportHeartbeatPause = configuration.getString(
ConfigConstants.AKKA_TRANSPORT_HEARTBEAT_PAUSE,
ConfigConstants.DEFAULT_AKKA_TRANSPORT_HEARTBEAT_PAUSE)
val transportThreshold = configuration.getDouble(
ConfigConstants.AKKA_TRANSPORT_THRESHOLD,
ConfigConstants.DEFAULT_AKKA_TRANSPORT_THRESHOLD)
val watchHeartbeatInterval = configuration.getString(AkkaOptions.AKKA_WATCH_HEARTBEAT_INTERVAL);
val watchHeartbeatPause = configuration.getString(AkkaOptions.AKKA_WATCH_HEARTBEAT_PAUSE);
val watchThreshold = configuration.getDouble(
ConfigConstants.AKKA_WATCH_THRESHOLD,
ConfigConstants.DEFAULT_AKKA_WATCH_THRESHOLD)
val akkaTCPTimeout = configuration.getString(AkkaOptions.AKKA_TCP_TIMEOUT);
val akkaFramesize = configuration.getString(
ConfigConstants.AKKA_FRAMESIZE,
ConfigConstants.DEFAULT_AKKA_FRAMESIZE)
val lifecycleEvents = configuration.getBoolean(
ConfigConstants.AKKA_LOG_LIFECYCLE_EVENTS,
ConfigConstants.DEFAULT_AKKA_LOG_LIFECYCLE_EVENTS)
val logLifecycleEvents = if (lifecycleEvents) "on" else "off"
val akkaEnableSSLConfig = configuration.getBoolean(ConfigConstants.AKKA_SSL_ENABLED,
ConfigConstants.DEFAULT_AKKA_SSL_ENABLED) &&
SSLUtils.getSSLEnabled(configuration)
val akkaEnableSSL = if (akkaEnableSSLConfig) "on" else "off"
val akkaSSLKeyStore = configuration.getString(
ConfigConstants.SECURITY_SSL_KEYSTORE,
null)
val akkaSSLKeyStorePassword = configuration.getString(
ConfigConstants.SECURITY_SSL_KEYSTORE_PASSWORD,
null)
val akkaSSLKeyPassword = configuration.getString(
ConfigConstants.SECURITY_SSL_KEY_PASSWORD,
null)
val akkaSSLTrustStore = configuration.getString(
ConfigConstants.SECURITY_SSL_TRUSTSTORE,
null)
val akkaSSLTrustStorePassword = configuration.getString(
ConfigConstants.SECURITY_SSL_TRUSTSTORE_PASSWORD,
null)
val akkaSSLProtocol = configuration.getString(
ConfigConstants.SECURITY_SSL_PROTOCOL,
ConfigConstants.DEFAULT_SECURITY_SSL_PROTOCOL)
val akkaSSLAlgorithmsString = configuration.getString(
ConfigConstants.SECURITY_SSL_ALGORITHMS,
ConfigConstants.DEFAULT_SECURITY_SSL_ALGORITHMS)
val akkaSSLAlgorithms = akkaSSLAlgorithmsString.split(",").toList.mkString("[", ",", "]")
val configString =
s"""
|akka {
| actor {
| provider = "akka.remote.RemoteActorRefProvider"
| }
|
| remote {
| startup-timeout = $startupTimeout
|
| transport-failure-detector{
| acceptable-heartbeat-pause = $transportHeartbeatPause
| heartbeat-interval = $transportHeartbeatInterval
| threshold = $transportThreshold
| }
|
| watch-failure-detector{
| heartbeat-interval = $watchHeartbeatInterval
| acceptable-heartbeat-pause = $watchHeartbeatPause
| threshold = $watchThreshold
| }
|
| netty {
| tcp {
| transport-class = "akka.remote.transport.netty.NettyTransport"
| port = $externalPort
| bind-port = $port
| connection-timeout = $akkaTCPTimeout
| maximum-frame-size = $akkaFramesize
| tcp-nodelay = on
| }
| }
|
| log-remote-lifecycle-events = $logLifecycleEvents
| }
|}
""".stripMargin
val effectiveHostname =
if (externalHostname != null && externalHostname.nonEmpty) {
externalHostname
} else {
// if bindAddress is null or empty, then leave bindAddress unspecified. Akka will pick
// InetAddress.getLocalHost.getHostAddress
"\\"\\""
}
val hostnameConfigString =
s"""
|akka {
| remote {
| netty {
| tcp {
| hostname = $effectiveHostname
| bind-hostname = $bindAddress
| }
| }
| }
|}
""".stripMargin
val sslConfigString = if (akkaEnableSSLConfig) {
s"""
|akka {
| remote {
|
| enabled-transports = ["akka.remote.netty.ssl"]
|
| netty {
|
| ssl = $${akka.remote.netty.tcp}
|
| ssl {
|
| enable-ssl = $akkaEnableSSL
| security {
| key-store = "$akkaSSLKeyStore"
| key-store-password = "$akkaSSLKeyStorePassword"
| key-password = "$akkaSSLKeyPassword"
| trust-store = "$akkaSSLTrustStore"
| trust-store-password = "$akkaSSLTrustStorePassword"
| protocol = $akkaSSLProtocol
| enabled-algorithms = $akkaSSLAlgorithms
| random-number-generator = ""
| }
| }
| }
| }
|}
""".stripMargin
}else{
""
}
ConfigFactory.parseString(configString + hostnameConfigString + sslConfigString).resolve()
}
def getLogLevel: String = {
if (LOG.isTraceEnabled) {
"TRACE"
} else {
if (LOG.isDebugEnabled) {
"DEBUG"
} else {
if (LOG.isInfoEnabled) {
"INFO"
} else {
if (LOG.isWarnEnabled) {
"WARNING"
} else {
if (LOG.isErrorEnabled) {
"ERROR"
} else {
"OFF"
}
}
}
}
}
}
/** Returns a [[Future]] to the [[ActorRef]] of the child of a given actor. The child is specified
* by providing its actor name.
*
* @param parent [[ActorRef]] to the parent of the child to be retrieved
* @param child Name of the child actor
* @param system [[ActorSystem]] to be used
* @param timeout Maximum timeout for the future
* @return [[Future]] to the [[ActorRef]] of the child actor
*/
def getChild(
parent: ActorRef,
child: String,
system: ActorSystem,
timeout: FiniteDuration)
: Future[ActorRef] = {
system.actorSelection(parent.path / child).resolveOne()(timeout)
}
/** Returns a [[Future]] to the [[ActorRef]] of an actor. The actor is specified by its path.
*
* @param path Path to the actor to be retrieved
* @param system [[ActorSystem]] to be used
* @param timeout Maximum timeout for the future
* @return [[Future]] to the [[ActorRef]] of the actor
*/
def getActorRefFuture(
path: String,
system: ActorSystem,
timeout: FiniteDuration)
: Future[ActorRef] = {
system.actorSelection(path).resolveOne()(timeout)
}
/** Returns an [[ActorRef]] for the actor specified by the path parameter.
*
* @param path Path to the actor to be retrieved
* @param system [[ActorSystem]] to be used
* @param timeout Maximum timeout for the future
* @throws java.io.IOException
* @return [[ActorRef]] of the requested [[Actor]]
*/
@throws(classOf[IOException])
def getActorRef(
path: String,
system: ActorSystem,
timeout: FiniteDuration)
: ActorRef = {
try {
val future = AkkaUtils.getActorRefFuture(path, system, timeout)
Await.result(future, timeout)
}
catch {
case e @ (_ : ActorNotFound | _ : TimeoutException) =>
throw new IOException(
s"Actor at $path not reachable. " +
"Please make sure that the actor is running and its port is reachable.", e)
case e: IOException =>
throw new IOException(s"Could not connect to the actor at $path", e)
}
}
/**
* Utility function to construct a future which tries multiple times to execute itself if it
* fails. If the maximum number of tries are exceeded, then the future fails.
*
* @param body function describing the future action
* @param tries number of maximum tries before the future fails
* @param executionContext which shall execute the future
* @tparam T return type of the future
* @return future which tries to recover by re-executing itself a given number of times
*/
def retry[T](body: => T, tries: Int)(implicit executionContext: ExecutionContext): Future[T] = {
Future{ body }.recoverWith{
case t:Throwable =>
if(tries > 0){
retry(body, tries - 1)
}else{
Future.failed(t)
}
}
}
/**
* Utility function to construct a future which tries multiple times to execute itself if it
* fails. If the maximum number of tries are exceeded, then the future fails.
*
* @param callable future action
* @param tries maximum number of tries before the future fails
* @param executionContext which shall execute the future
* @tparam T return type of the future
* @return future which tries to recover by re-executing itself a given number of times
*/
def retry[T](callable: Callable[T], tries: Int)(implicit executionContext: ExecutionContext):
Future[T] = {
retry(callable.call(), tries)
}
/**
* Utility function to construct a future which tries multiple times to execute itself if it
* fails. If the maximum number of tries are exceeded, then the future fails.
*
* @param target actor which receives the message
* @param message to be sent to the target actor
* @param tries maximum number of tries before the future fails
* @param executionContext which shall execute the future
* @param timeout of the future
* @return future which tries to receover by re-executing itself a given number of times
*/
def retry(target: ActorRef, message: Any, tries: Int)(implicit executionContext:
ExecutionContext, timeout: FiniteDuration): Future[Any] = {
(target ? message)(timeout) recoverWith{
case t: Throwable =>
if(tries > 0){
retry(target, message, tries-1)
}else{
Future.failed(t)
}
}
}
def getTimeout(config: Configuration): FiniteDuration = {
val duration = Duration(config.getString(ConfigConstants.AKKA_ASK_TIMEOUT,
ConfigConstants.DEFAULT_AKKA_ASK_TIMEOUT))
new FiniteDuration(duration.toMillis, TimeUnit.MILLISECONDS)
}
def getDefaultTimeout: Time = {
val duration = Duration(ConfigConstants.DEFAULT_AKKA_ASK_TIMEOUT)
Time.milliseconds(duration.toMillis)
}
def getDefaultTimeoutAsFiniteDuration: FiniteDuration = {
val timeout = getDefaultTimeout
new FiniteDuration(timeout.toMilliseconds, TimeUnit.MILLISECONDS)
}
def getLookupTimeout(config: Configuration): FiniteDuration = {
val duration = Duration(config.getString(
ConfigConstants.AKKA_LOOKUP_TIMEOUT,
ConfigConstants.DEFAULT_AKKA_LOOKUP_TIMEOUT))
new FiniteDuration(duration.toMillis, TimeUnit.MILLISECONDS)
}
def getDefaultLookupTimeout: FiniteDuration = {
val duration = Duration(ConfigConstants.DEFAULT_AKKA_LOOKUP_TIMEOUT)
new FiniteDuration(duration.toMillis, TimeUnit.MILLISECONDS)
}
def getClientTimeout(config: Configuration): FiniteDuration = {
val duration = Duration(
config.getString(
ConfigConstants.AKKA_CLIENT_TIMEOUT,
ConfigConstants.DEFAULT_AKKA_CLIENT_TIMEOUT
))
new FiniteDuration(duration.toMillis, TimeUnit.MILLISECONDS)
}
def getDefaultClientTimeout: FiniteDuration = {
val duration = Duration(ConfigConstants.DEFAULT_AKKA_CLIENT_TIMEOUT)
new FiniteDuration(duration.toMillis, TimeUnit.MILLISECONDS)
}
/** Returns the address of the given [[ActorSystem]]. The [[Address]] object contains
* the port and the host under which the actor system is reachable
*
* @param system [[ActorSystem]] for which the [[Address]] shall be retrieved
* @return [[Address]] of the given [[ActorSystem]]
*/
def getAddress(system: ActorSystem): Address = {
RemoteAddressExtension(system).address
}
/** Returns the given [[ActorRef]]'s path string representation with host and port of the
* [[ActorSystem]] in which the actor is running.
*
* @param system [[ActorSystem]] in which the given [[ActorRef]] is running
* @param actor [[ActorRef]] of the [[Actor]] for which the URL has to be generated
* @return String containing the [[ActorSystem]] independent URL of the [[Actor]]
*/
def getAkkaURL(system: ActorSystem, actor: ActorRef): String = {
val address = getAddress(system)
actor.path.toStringWithAddress(address)
}
/** Returns the AkkaURL for a given [[ActorSystem]] and a path describing a running [[Actor]] in
* the actor system.
*
* @param system [[ActorSystem]] in which the given [[Actor]] is running
* @param path Path describing an [[Actor]] for which the URL has to be generated
* @return String containing the [[ActorSystem]] independent URL of an [[Actor]] specified by
* path.
*/
def getAkkaURL(system: ActorSystem, path: String): String = {
val address = getAddress(system)
address.toString + path
}
/** Extracts the hostname and the port of the remote actor system from the given Akka URL. The
* result is an [[InetSocketAddress]] instance containing the extracted hostname and port. If
* the Akka URL does not contain the hostname and port information, e.g. a local Akka URL is
* provided, then an [[Exception]] is thrown.
*
* @param akkaURL The URL to extract the host and port from.
* @throws java.lang.Exception Thrown, if the given string does not represent a proper url
* @return The InetSocketAddress with teh extracted host and port.
*/
@throws(classOf[Exception])
def getInetSockeAddressFromAkkaURL(akkaURL: String): InetSocketAddress = {
// AkkaURLs have the form schema://systemName@host:port/.... if it's a remote Akka URL
try {
// we need to manually strip the protocol, because "akka.tcp" is not
// a valid protocol for Java's URL class
val protocolonPos = akkaURL.indexOf("://")
if (protocolonPos == -1 || protocolonPos >= akkaURL.length - 4) {
throw new MalformedURLException()
}
val url = new URL("http://" + akkaURL.substring(protocolonPos + 3))
if (url.getHost == null || url.getPort == -1) {
throw new MalformedURLException()
}
new InetSocketAddress(url.getHost, url.getPort)
}
catch {
case _ : MalformedURLException =>
throw new Exception(s"Could not retrieve InetSocketAddress from Akka URL $akkaURL")
}
}
def formatDurationParingErrorMessage: String = {
"Duration format must be \\"val unit\\", where 'val' is a number and 'unit' is " +
"(d|day)|(h|hour)|(min|minute)|s|sec|second)|(ms|milli|millisecond)|" +
"(µs|micro|microsecond)|(ns|nano|nanosecond)"
}
/**
* Returns the local akka url for the given actor name.
*
* @param actorName Actor name identifying the actor
* @return Local Akka URL for the given actor
*/
def getLocalAkkaURL(actorName: String): String = {
"akka://flink/user/" + actorName
}
}
| oscarceballos/flink-1.3.2 | flink-runtime/src/main/scala/org/apache/flink/runtime/akka/AkkaUtils.scala | Scala | apache-2.0 | 25,186 |
package com.socrata.querycoordinator
import com.socrata.http.client.RequestBuilder
import com.socrata.soql.environment.TableName
import com.socrata.soql.typed._
import com.socrata.thirdparty.typesafeconfig.Propertizer
import com.typesafe.config.{Config, ConfigFactory}
import org.apache.log4j.PropertyConfigurator
import org.scalatest.{BeforeAndAfterAll, FunSuite, Matchers}
import org.scalatestplus.scalacheck.ScalaCheckPropertyChecks
abstract class TestBase extends FunSuite with Matchers with ScalaCheckPropertyChecks with BeforeAndAfterAll {
val config: Config = ConfigFactory.load().getConfig("com.socrata.query-coordinator")
PropertyConfigurator.configure(Propertizer("log4j", config.getConfig("log4j")))
val fakeRequestBuilder = RequestBuilder("")
}
| socrata-platform/query-coordinator | query-coordinator/src/test/scala/com/socrata/querycoordinator/TestBase.scala | Scala | apache-2.0 | 768 |
/*
* Copyright (C) 2016 Nikos Katzouris
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package experiments.datautils.tracknow
import java.io.{File, PrintWriter}
import scala.io.Source
import util.Try
object TK extends App {
val dataPath = "/media/nkatz/storage/Zelitron-data/data"
val rtecDataPath = "/media/nkatz/storage/Zelitron-data/RTEC_data"
/*
object MyFile {
def apply(f: File): MyFile = {
val split = f.getName.split("_")(2).split("-")
val (year, month, day) = (split(0).toInt, split(1).toInt, split(2).toInt)
MyFile(year, month, day, f)
}
}
case class MyFile(year: Int, month: Int, day: Int, file: File)
*/
def rename(oldName: String, newName: String) = {
Try(new File(oldName).renameTo(new File(newName))).getOrElse(false)
}
def getListOfFiles(dir: String) = {
val d = new File(dir)
var i = 0
val k = d.listFiles
d.listFiles foreach { x =>
val files = x.listFiles.sortBy{ f =>
val split = f.getName.split("_")(2).split("-")
val (year, month, day) = (split(0).toInt, split(1).toInt, split(2).toInt)
(year, month, day)
}
val newFileName = s"$rtecDataPath/$i.csv"
val pw = new PrintWriter(new File(newFileName))
var firstTime = "non-set-yet"
var endTime = "non-set-yet"
files foreach { f =>
val source = Source.fromFile(f.getAbsolutePath)
try {
val lines = source.getLines
lines foreach { line =>
val split = line.split(";")
val company = split(0)
val vehicle = split(1)
val format = new java.text.SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS")
val ttime = split(2)
val time = format.parse(ttime).getTime()
if (firstTime == "non-set-yet") firstTime = time.toString
endTime = time.toString
val engineStatus = split(3)
val speed = split(10)
val ttype = split(23).replaceAll("\\\\s", "").replaceAll(",", "").toLowerCase
/*
if (engineStatus != "0" && engineStatus != "3") {
val newLine = s"speed|$time|$time|$vehicle|$ttype|$speed"
pw.write(newLine+"\\n")
}
*/
val newLine = s"speed|$time|$time|$vehicle|$ttype|$speed"
pw.write(newLine + "\\n")
}
} finally { source.close() }
}
pw.close()
rename(newFileName, s"$rtecDataPath/$i-$firstTime-$endTime.csv")
i += 1
println(i + " complete.")
}
}
println(getListOfFiles(dataPath))
}
| nkatzz/OLED | src/main/scala/experiments/datautils/tracknow/TK.scala | Scala | gpl-3.0 | 3,202 |
// Copyright (c) 2011 Paul Butcher
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package com.borachio
import org.scalatest.WordSpec
trait Trait1 { def method1(x: Int, y: String): String }
trait Trait2
trait Trait3
abstract class Class1 extends Trait1 with Trait2
class ProxyTest extends WordSpec {
def unimplemented(proxy: AnyRef, name: Symbol, args: Array[AnyRef]): AnyRef =
throw new RuntimeException("unimplemented")
"A proxy" should {
"implement a single interface" in {
val p = Proxy.create(threadContextClassLoaderStrategy, classOf[Trait1]) {
unimplemented _
}
assert(p.isInstanceOf[Trait1])
}
"implement multiple interfaces" in {
val p = Proxy.create(threadContextClassLoaderStrategy, classOf[Trait1], classOf[Trait2], classOf[Trait3]) {
unimplemented _
}
assert(p.isInstanceOf[Trait1])
assert(p.isInstanceOf[Trait2])
assert(p.isInstanceOf[Trait3])
}
"fail if given a class" in {
intercept[IllegalArgumentException] {
val p = Proxy.create(threadContextClassLoaderStrategy, classOf[Class1]) {
unimplemented _
}
}
}
"forward calls" in {
val p = Proxy.create(threadContextClassLoaderStrategy, classOf[Trait1]) {
(proxy: AnyRef, name: Symbol, args: Array[AnyRef]) =>
expect(name) { 'method1 }
expect(2) { args.length }
expect(42) { args(0).asInstanceOf[Int] }
expect("foo") { args(1).asInstanceOf[String] }
"called"
}.asInstanceOf[Trait1]
expect("called") { p.method1(42, "foo") }
}
}
}
| paulbutcher/borachio | core_tests/src/test/scala/ProxyTest.scala | Scala | mit | 2,677 |
package com.stefansavev.randomprojections.interface
import com.stefansavev.randomprojections.datarepr.dense.PointIndexes
import com.stefansavev.randomprojections.implementation.{PointSignatures, RandomTreeLeaf}
//collects the points into buckets
trait BucketCollector {
def collectPoints(pointIds: PointIndexes): RandomTreeLeaf
//returns leaf id
def build(pointSignatures: PointSignatures, labels: Array[Int]): Index
}
| stefansavev/random-projections-at-berlinbuzzwords | src/main/scala/com/stefansavev/randomprojections/interface/BucketCollector.scala | Scala | apache-2.0 | 428 |
package com.seremis.geninfusion.model.animation
import com.seremis.geninfusion.api.GIApiInterface
import com.seremis.geninfusion.api.model.IModel
import com.seremis.geninfusion.api.model.animation.{IAnimation, IAnimator}
import scala.collection.mutable.Queue
class Animator(model: IModel) extends IAnimator {
protected val queuedAnimations: Queue[(IAnimation, () => Unit)] = Queue()
override def animate() {
//packet handling and stuff
}
//Should only be called on server
override def enqueueAnimation(animationName: String, startCallback: () => Unit): Unit = {
queuedAnimations += (GIApiInterface.animationRegistry.getAnimationForName(animationName) -> startCallback)
}
}
| Seremis/Genetic-Infusion | src/main/scala/com/seremis/geninfusion/model/animation/Animator.scala | Scala | gpl-3.0 | 719 |
package es.weso.rdf.turtle.parser
import es.weso.rdf.nodes._
import scala.language.postfixOps
import es.weso.rdf._
import es.weso.rdf.triples.RDFTriple
case class TurtleParserState(triples: List[RDFTriple], namespaces: PrefixMap, bNodeLabels: BNodeTable, baseIRI: IRI) {
def addTriple(t: RDFTriple): TurtleParserState = {
TurtleParserState(t :: triples, namespaces, bNodeLabels, baseIRI)
}
def addTriples(ts: List[RDFTriple]): TurtleParserState = {
TurtleParserState(ts ++ triples, namespaces, bNodeLabels, baseIRI)
}
def retrieveTriples: (List[RDFTriple], TurtleParserState) = {
(triples, TurtleParserState(List(), namespaces, bNodeLabels, baseIRI))
}
def newTable(table: BNodeTable): TurtleParserState =
TurtleParserState(triples, namespaces, table, baseIRI)
def addPrefix(prefix: String, iri: IRI): TurtleParserState =
TurtleParserState(triples, namespaces.addPrefix(prefix, iri), bNodeLabels, baseIRI)
def newBNode: (BNodeId, TurtleParserState) = {
val (id, t) = bNodeLabels.newBNode;
(id, TurtleParserState(triples, namespaces, t, baseIRI))
}
def newBase(newIRI: IRI) =
TurtleParserState(triples, namespaces, bNodeLabels, newIRI)
}
object TurtleParserState {
def initial: TurtleParserState = initial(IRI(""))
def initial(baseIRI: IRI) = TurtleParserState(List(), PrefixMap.empty, BNodeTable.empty, baseIRI)
} | labra/turtleparser-with-combinators | turtleparser/shared/src/main/scala/es/weso/rdf/turtle/parser/TurtleParserState.scala | Scala | lgpl-3.0 | 1,384 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.computations.Validators.DonationsValidation
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CPQ21(value: Option[Boolean]) extends CtBoxIdentifier(name = "Donations made?")
with CtOptionalBoolean with Input with ValidatableBox[ComputationsBoxRetriever] with DonationsValidation {
override def validate(boxRetriever: ComputationsBoxRetriever): Set[CtValidation] = {
collectErrors(
validateBooleanAsMandatory("CPQ21", this),
validateLessThanTotalDonationsInPAndL(boxRetriever),
validateLessThanNetProfit(boxRetriever),
validateHasCharitableDonations(this, boxRetriever)
)
}
private def validateHasCharitableDonations(box: CtOptionalBoolean, retriever: ComputationsBoxRetriever): Set[CtValidation] = {
failIf(box.isTrue && !retriever.cp301().isPositive && !retriever.cp302().isPositive && !retriever.cp303().isPositive) {
Set(CtValidation(None, "error.CPQ21.no.charitable.donations"))
}
}
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CPQ21.scala | Scala | apache-2.0 | 1,672 |
package fpinscala.exercises.ch09parsing
import java.util.regex._
import scala.util.matching.Regex
trait Parsers[Parser[+_]] { self =>
def run[A](p: Parser[A])(input: String): Either[ParseError, A]
def succeed[A](a: A): Parser[A]
def or[A](s1: Parser[A], s2: Parser[A]): Parser[A]
def flatMap[A, B](p: Parser[A])(f: A => Parser[B]): Parser[B]
implicit def string(s: String): Parser[String]
implicit def operators[A](p: Parser[A]): ParserOps[A] = ParserOps[A](p)
implicit def regex(r: Regex): Parser[String]
def slice[A](p: Parser[A]): Parser[String]
def defaultSucceed[A](a: A): Parser[A] = string("") map (_ => a)
/**
* Exercise 9.7
*
* Implement product and map2 in terms of flatMap.
*/
def product[A, B](p1: Parser[A], p2: => Parser[B]): Parser[(A, B)] = {
flatMap(p1)(a => map(p2)((a, _)))
}
def map2ViaFlatMap[A, B, C](p: Parser[A], p2: => Parser[B])(f: (A, B) => C): Parser[C] = {
flatMap(p)(a => map(p2)(f(a, _)))
}
/**
* Exercise 9.6
*
* Using flatMap and any other combinators,
* write the context-sensitive parser we couldn't express earlier.
* To parse the digits, you can make use of a new primitive,
* regex, which promotes a regular expression to a Parser.
* In Scala, a string s can be promoted to a Regex object
* (which has methods for matching) using s.r,
* for instance, "[a-zA-Z_][a-zA-Z0-9_]*".r.
* implicit def regex(r: Regex): Parser[String]
*/
/**
* Exercise 9.8
*
* map is no longer primitive.
* Express it in terms of flatMap and/or other combinators.
*/
def map[A, B](p: Parser[A])(f: A => B): Parser[B] = {
flatMap(p)(f andThen succeed)
}
def orString(s1: String, s2: String): Parser[String] = or(succeed(s1), succeed(s2))
val numA: Parser[Int] = many(char('a')).map(_.size)
def char(c: Char): Parser[Char] = string(c.toString).map(_.charAt(0))
def label[A](msg: String)(p: Parser[A]): Parser[A]
def scope[A](msg: String)(p: Parser[A]): Parser[A]
def attempt[A](p: Parser[A]): Parser[A]
def skipL[B](p: Parser[Any], p2: => Parser[B]): Parser[B] = {
map2(slice(p), p2)((_, b) => b)
}
def skipR[A](p: Parser[A], p2: => Parser[Any]): Parser[A] = {
map2(p, slice(p2))((a, _) => a)
}
/**
* EXERCISE 9.1
*
* Using product, implement the now-familiar combinator map2
* and then use this to implement many1 in terms of many.
* Note that we could have chosen to make map2 primitive
* and defined product in terms of map2 as we've done in previous chapters.
* The choice is up to you.
*/
def many1[A](p: Parser[A]): Parser[List[A]] = map2(p, many(p))(_ :: _)
def map2[A, B, C](p: Parser[A], p2: => Parser[B])(f: (A, B) => C): Parser[C] = {
for { a <- p; b <- p2 } yield f(a, b)
// p.product(p2).map(pair => f(pair._1, pair._2))
}
/**
* Exercise 9.3 - Hard
*
* Before continuing, see if you can define many in terms of or, map2, and succeed.
*/
def many[A](p: Parser[A]): Parser[List[A]] = {
map2(p, many(p))(_ :: _) or self.succeed(Nil: List[A])
// map2(p, wrap(many(p)))(_ :: _) or self.succeed(Nil: List[A])
}
/**
* Exercise 9.5
*
* We could also deal with non-strictness with a separate combinator
* like we did in chapter 7.
* Try this here and make the necessary changes to your existing combinators.
* What do you think of that approach in this instance?
*/
// def wrap[A](p: => Parser[A]): Parser[A]
// char('a').many.slice.map(_.size) ** char('b').many1.slice.map(_size)
/**
* Exercise 9.4 - Hard
*
* Using map2 and succeed, implement the listOfN combinator from earlier.
*/
def listOfN[A](n: Int, p: Parser[A]): Parser[List[A]] = {
if (n <= 0) succeed(List())
else map2(p, listOfN(n-1, p))(_ :: _)
}
implicit def asStringParser[A](a: A)(implicit f: A => Parser[String]): ParserOps[String] =
ParserOps(f(a))
def whitespace: Parser[String] = "\\\\s*".r
def digits: Parser[String] = "\\\\d+".r
def doubleString: Parser[String] =
token("[-+]?([0-9]*\\\\.)?[0-9]+([eE][-+]?[0-9]+)?".r)
def double: Parser[Double] = doubleString map (_.toDouble) label "double literal"
def thru(s: String): Parser[String] = (".*?" + Pattern.quote(s)).r
def quoted: Parser[String] = string("\\"") *> thru("\\"").map(_.dropRight(1))
def escapedQuoted: Parser[String] = {
token(quoted label "string literal")
}
def token[A](p: Parser[A]): Parser[A] = {
attempt(p) <* whitespace
}
def sep[A](p: Parser[A], p2: Parser[Any]): Parser[List[A]] = {
sep1(p, p2) or succeed(List())
}
def sep1[A](p: Parser[A], p2: Parser[Any]): Parser[List[A]] = {
map2(p, many(p2 *> p))(_ :: _)
}
def opL[A](p: Parser[A])(op: Parser[(A, A) => A]): Parser[A] =
map2(p, many(op ** p))((h, t) => t.foldLeft(h)((a, b) => b._1(a, b._2)))
def surround[A](start: Parser[Any], stop: Parser[Any])(p: => Parser[A]): Parser[A] = {
start *> p <* stop
}
def eof: Parser[String] = {
regex("\\\\z".r).label("unexpected trailing characters")
}
def root[A](p: Parser[A]): Parser[A] = p <* eof
case class ParserOps[A](p: Parser[A]) {
def |[B>:A](p2: => Parser[B]): Parser[B] = self.or(p, p2)
def or[B>:A](p2: => Parser[B]): Parser[B] = self.or(p, p2)
def map[B](f: A => B): Parser[B] = self.map(p)(f)
def many: Parser[List[A]] = self.many(p)
def slice: Parser[String] = self.slice(p)
def **[B](p2: => Parser[B]): Parser[(A, B)] = p.product(p2)
def product[B](p2: => Parser[B]): Parser[(A, B)] = self.product(p, p2)
def flatMap[B](f: A => Parser[B]): Parser[B] = self.flatMap(p)(f)
def label(msg: String): Parser[A] = self.label(msg)(p)
def scope(msg: String): Parser[A] = self.scope(msg)(p)
def *>[B](p2: => Parser[B]): Parser[B] = self.skipL(p, p2)
def <*(p2: => Parser[Any]): Parser[A] = self.skipR(p, p2)
def token: Parser[A] = self.token(p)
def sep(separator: Parser[Any]): Parser[List[A]] = self.sep(p, separator)
def sep1(separator: Parser[Any]): Parser[List[A]] = self.sep1(p, separator)
def as[B](b: B): Parser[B] = self.map(self.slice(p))(_ => b)
def opL(op: Parser[(A, A) => A]): Parser[A] = self.opL(p)(op)
}
import fpinscala.exercises.ch08testing._
import fpinscala.exercises.ch08testing.Prop._
object Laws {
def equal[A](p1: Parser[A], p2: Parser[A])(in: Gen[String]): Prop = {
forAll(in)(s => run(p1)(s) == run(p2)(s))
}
def mapLaw[A](p: Parser[A])(in: Gen[String]): Prop = {
equal(p, p.map(a => a))(in)
}
def succeedLaw[A](a: A)(in: Gen[String]): Prop = {
forAll(in)(s => run(succeed(a))(s) == Right(a))
}
/**
* EXERCISE 9.2 - Hard
*
* Copied from
* https://github.com/fpinscala/fpinscala/blob/master/answerkey/parsing/02.answer.scala
*
* Try coming up with laws to specify the behavior of product.
*/
def unbiasL[A, B, C](p: ((A, B), C)): (A, B, C) = (p._1._1, p._1._2, p._2)
def unbiasR[A, B, C](p: (A, (B, C))): (A, B, C) = (p._1, p._2._1, p._2._2)
def productRaw[A, B, C, D, E](a: Parser[A], b: Parser[B], c: Parser[C])
(f: A => D, g: B => E)
(in: Gen[String]): Prop = {
equal((a ** b) ** c map unbiasL, a ** (b ** c) map unbiasR)(in) &&
equal((a ** b).map(pair => (f(pair._1), g(pair._2))), a.map(f) ** b.map(g))(in)
}
}
}
case class Location(input: String, offset: Int = 0) {
lazy val line = input.slice(0, offset + 1).count(_ == '\\n') + 1
lazy val col = input.slice(0, offset + 1).lastIndexOf('\\n') match {
case -1 => offset + 1
case lineStart => offset - lineStart
}
def toError(msg: String): ParseError = ParseError(List((this, msg)))
def advanceBy(n: Int): Location = copy(offset = offset + n)
def currentLine: String = {
if (input.length > 1) input.lines.drop(line - 1).next
else ""
}
def columnCaret: String = (" " * (col - 1)) + "^"
}
case class ParseError(stack: List[(Location, String)] = List()) {
def push(loc: Location, msg: String): ParseError = copy(stack = (loc, msg) :: stack)
def label[A](s: String): ParseError = ParseError(latestLoc.map((_, s)).toList)
def latest: Option[(Location, String)] = stack.lastOption
def latestLoc: Option[Location] = latest.map(_._1)
def formatLoc(l: Location): String = l.line + "." + l.col
override def toString: String = {
if (stack.isEmpty) "no error message"
else {
val collapsed = collapseStack(stack)
val context = collapsed.lastOption.map("\\n\\n" + _._1.currentLine).getOrElse("") +
collapsed.lastOption.map("\\n" + _._1.columnCaret).getOrElse("")
collapsed.map { case (loc, msg) => loc.line.toString + "." + loc.col + " " + msg }
.mkString("\\n") + context
}
}
def collapseStack(s: List[(Location, String)]): List[(Location, String)] = {
s.groupBy(_._1)
.mapValues(_.map(_._2).mkString("; "))
.toList.sortBy(_._1.offset)
}
}
object Parsers {
}
| huajianmao/fpinscala | src/main/scala/fpinscala/exercises/ch09parsing/Parsers.scala | Scala | mit | 9,026 |
/*
* Copyright (c) 2012, 2013, 2014, 2015, 2016 SURFnet BV
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
* * Neither the name of the SURFnet BV nor the names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package support
import javax.net.ssl._
import play.core.ApplicationProvider
import play.server.api._
/*
* This custom SSL engine creates an instance of the default SSL Engine and
* enables client authentication. For this class to be instantiated the
* following command line argument must be used when starting the application:
*
* -Dplay.http.sslengineprovider=support.CustomSSLEngineProvider
*/
class CustomSSLEngineProvider(appProvider: ApplicationProvider) extends SSLEngineProvider {
override def createSSLEngine(): SSLEngine = {
val sslEngine = SSLContext.getDefault.createSSLEngine
sslEngine.setNeedClientAuth(true)
sslEngine
}
}
| BandwidthOnDemand/nsi-requester | app/support/CustomSSLEngineProvider.scala | Scala | bsd-3-clause | 2,241 |
package lettergenerator
package validators
object ErrorMessageFactory {
def apply(validator: Validator): String = validator match {
case PathValidator() => "Could not reach the %s. Please" +
" check if path is correct, or report this issue"
case DetailsValidator(_, _) => "Details file error: the row" +
" with values %s is incomplete. Please check it and try again"
case TemplateValidator(_) => "Error: could not find variable %s on template."
}
}
| claudiusbr/LetterGenerator | src/main/scala/lettergenerator/validators/ErrorMessageFactory.scala | Scala | mit | 485 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka
import joptsimple.OptionParser
import java.util.Properties
import java.util.Random
import java.io._
import scala.io.Source
import scala.io.BufferedSource
import kafka.producer._
import kafka.consumer._
import kafka.serializer._
import kafka.utils._
import kafka.log.FileMessageSet
import kafka.log.Log
/**
* This is a torture test that runs against an existing broker. Here is how it works:
*
* It produces a series of specially formatted messages to one or more partitions. Each message it produces
* it logs out to a text file. The messages have a limited set of keys, so there is duplication in the key space.
*
* The broker will clean its log as the test runs.
*
* When the specified number of messages have been produced we create a consumer and consume all the messages in the topic
* and write that out to another text file.
*
* Using a stable unix sort we sort both the producer log of what was sent and the consumer log of what was retrieved by the message key.
* Then we compare the final message in both logs for each key. If this final message is not the same for all keys we
* print an error and exit with exit code 1, otherwise we print the size reduction and exit with exit code 0.
*/
object TestLogCleaning {
def main(args: Array[String]) {
val parser = new OptionParser
val numMessagesOpt = parser.accepts("messages", "The number of messages to send or consume.")
.withRequiredArg
.describedAs("count")
.ofType(classOf[java.lang.Long])
.defaultsTo(Long.MaxValue)
val numDupsOpt = parser.accepts("duplicates", "The number of duplicates for each key.")
.withRequiredArg
.describedAs("count")
.ofType(classOf[java.lang.Integer])
.defaultsTo(5)
val brokerOpt = parser.accepts("broker", "Url to connect to.")
.withRequiredArg
.describedAs("url")
.ofType(classOf[String])
val topicsOpt = parser.accepts("topics", "The number of topics to test.")
.withRequiredArg
.describedAs("count")
.ofType(classOf[java.lang.Integer])
.defaultsTo(1)
val percentDeletesOpt = parser.accepts("percent-deletes", "The percentage of updates that are deletes.")
.withRequiredArg
.describedAs("percent")
.ofType(classOf[java.lang.Integer])
.defaultsTo(0)
val zkConnectOpt = parser.accepts("zk", "Zk url.")
.withRequiredArg
.describedAs("url")
.ofType(classOf[String])
val sleepSecsOpt = parser.accepts("sleep", "Time to sleep between production and consumption.")
.withRequiredArg
.describedAs("ms")
.ofType(classOf[java.lang.Integer])
.defaultsTo(0)
val dumpOpt = parser.accepts("dump", "Dump the message contents of a topic partition that contains test data from this test to standard out.")
.withRequiredArg
.describedAs("directory")
.ofType(classOf[String])
val options = parser.parse(args:_*)
if(options.has(dumpOpt)) {
dumpLog(new File(options.valueOf(dumpOpt)))
System.exit(0)
}
if(!options.has(brokerOpt) || !options.has(zkConnectOpt) || !options.has(numMessagesOpt)) {
parser.printHelpOn(System.err)
System.exit(1)
}
// parse options
val messages = options.valueOf(numMessagesOpt).longValue
val percentDeletes = options.valueOf(percentDeletesOpt).intValue
val dups = options.valueOf(numDupsOpt).intValue
val brokerUrl = options.valueOf(brokerOpt)
val topicCount = options.valueOf(topicsOpt).intValue
val zkUrl = options.valueOf(zkConnectOpt)
val sleepSecs = options.valueOf(sleepSecsOpt).intValue
val testId = new Random().nextInt(Int.MaxValue)
val topics = (0 until topicCount).map("log-cleaner-test-" + testId + "-" + _).toArray
println("Producing %d messages...".format(messages))
val producedDataFile = produceMessages(brokerUrl, topics, messages, dups, percentDeletes)
println("Sleeping for %d seconds...".format(sleepSecs))
Thread.sleep(sleepSecs * 1000)
println("Consuming messages...")
val consumedDataFile = consumeMessages(zkUrl, topics)
val producedLines = lineCount(producedDataFile)
val consumedLines = lineCount(consumedDataFile)
val reduction = 1.0 - consumedLines.toDouble/producedLines.toDouble
println("%d rows of data produced, %d rows of data consumed (%.1f%% reduction).".format(producedLines, consumedLines, 100 * reduction))
println("Deduplicating and validating output files...")
validateOutput(producedDataFile, consumedDataFile)
producedDataFile.delete()
consumedDataFile.delete()
}
def dumpLog(dir: File) {
require(dir.exists, "Non-existant directory: " + dir.getAbsolutePath)
for(file <- dir.list.sorted; if file.endsWith(Log.LogFileSuffix)) {
val ms = new FileMessageSet(new File(dir, file))
for(entry <- ms) {
val key = Utils.readString(entry.message.key)
val content =
if(entry.message.isNull)
null
else
Utils.readString(entry.message.payload)
println("offset = %s, key = %s, content = %s".format(entry.offset, key, content))
}
}
}
def lineCount(file: File): Int = io.Source.fromFile(file).getLines.size
def validateOutput(producedDataFile: File, consumedDataFile: File) {
val producedReader = externalSort(producedDataFile)
val consumedReader = externalSort(consumedDataFile)
val produced = valuesIterator(producedReader)
val consumed = valuesIterator(consumedReader)
val producedDedupedFile = new File(producedDataFile.getAbsolutePath + ".deduped")
val producedDeduped = new BufferedWriter(new FileWriter(producedDedupedFile), 1024*1024)
val consumedDedupedFile = new File(consumedDataFile.getAbsolutePath + ".deduped")
val consumedDeduped = new BufferedWriter(new FileWriter(consumedDedupedFile), 1024*1024)
var total = 0
var mismatched = 0
while(produced.hasNext && consumed.hasNext) {
val p = produced.next()
producedDeduped.write(p.toString)
producedDeduped.newLine()
val c = consumed.next()
consumedDeduped.write(c.toString)
consumedDeduped.newLine()
if(p != c)
mismatched += 1
total += 1
}
producedDeduped.close()
consumedDeduped.close()
require(!produced.hasNext, "Additional values produced not found in consumer log.")
require(!consumed.hasNext, "Additional values consumed not found in producer log.")
println("Validated " + total + " values, " + mismatched + " mismatches.")
require(mismatched == 0, "Non-zero number of row mismatches.")
// if all the checks worked out we can delete the deduped files
producedDedupedFile.delete()
consumedDedupedFile.delete()
}
def valuesIterator(reader: BufferedReader) = {
new IteratorTemplate[TestRecord] {
def makeNext(): TestRecord = {
var next = readNext(reader)
while(next != null && next.delete)
next = readNext(reader)
if(next == null)
allDone()
else
next
}
}
}
def readNext(reader: BufferedReader): TestRecord = {
var line = reader.readLine()
if(line == null)
return null
var curr = new TestRecord(line)
while(true) {
line = peekLine(reader)
if(line == null)
return curr
val next = new TestRecord(line)
if(next == null || next.topicAndKey != curr.topicAndKey)
return curr
curr = next
reader.readLine()
}
null
}
def peekLine(reader: BufferedReader) = {
reader.mark(4096)
val line = reader.readLine
reader.reset()
line
}
def externalSort(file: File): BufferedReader = {
val builder = new ProcessBuilder("sort", "--key=1,2", "--stable", "--buffer-size=20%", "--temporary-directory=" + System.getProperty("java.io.tmpdir"), file.getAbsolutePath)
val process = builder.start()
new Thread() {
override def run() {
val exitCode = process.waitFor()
if(exitCode != 0) {
System.err.println("Process exited abnormally.")
while(process.getErrorStream.available > 0) {
System.err.write(process.getErrorStream().read())
}
}
}
}.start()
new BufferedReader(new InputStreamReader(process.getInputStream()), 10*1024*1024)
}
def produceMessages(brokerUrl: String,
topics: Array[String],
messages: Long,
dups: Int,
percentDeletes: Int): File = {
val producerProps = new Properties
producerProps.setProperty("producer.type", "async")
producerProps.setProperty("metadata.broker.list", brokerUrl)
producerProps.setProperty("serializer.class", classOf[StringEncoder].getName)
producerProps.setProperty("key.serializer.class", classOf[StringEncoder].getName)
producerProps.setProperty("queue.enqueue.timeout.ms", "-1")
producerProps.setProperty("batch.num.messages", 1000.toString)
val producer = new Producer[String, String](new ProducerConfig(producerProps))
val rand = new Random(1)
val keyCount = (messages / dups).toInt
val producedFile = File.createTempFile("kafka-log-cleaner-produced-", ".txt")
println("Logging produce requests to " + producedFile.getAbsolutePath)
val producedWriter = new BufferedWriter(new FileWriter(producedFile), 1024*1024)
for(i <- 0L until (messages * topics.length)) {
val topic = topics((i % topics.length).toInt)
val key = rand.nextInt(keyCount)
val delete = i % 100 < percentDeletes
val msg =
if(delete)
new KeyedMessage[String, String](topic = topic, key = key.toString, message = null)
else
new KeyedMessage[String, String](topic = topic, key = key.toString, message = i.toString)
producer.send(msg)
producedWriter.write(TestRecord(topic, key, i, delete).toString)
producedWriter.newLine()
}
producedWriter.close()
producer.close()
producedFile
}
def makeConsumer(zkUrl: String, topics: Array[String]): ZookeeperConsumerConnector = {
val consumerProps = new Properties
consumerProps.setProperty("group.id", "log-cleaner-test-" + new Random().nextInt(Int.MaxValue))
consumerProps.setProperty("zookeeper.connect", zkUrl)
consumerProps.setProperty("consumer.timeout.ms", (20*1000).toString)
consumerProps.setProperty("auto.offset.reset", "smallest")
new ZookeeperConsumerConnector(new ConsumerConfig(consumerProps))
}
def consumeMessages(zkUrl: String, topics: Array[String]): File = {
val connector = makeConsumer(zkUrl, topics)
val streams = connector.createMessageStreams(topics.map(topic => (topic, 1)).toMap, new StringDecoder, new StringDecoder)
val consumedFile = File.createTempFile("kafka-log-cleaner-consumed-", ".txt")
println("Logging consumed messages to " + consumedFile.getAbsolutePath)
val consumedWriter = new BufferedWriter(new FileWriter(consumedFile))
for(topic <- topics) {
val stream = streams(topic).head
try {
for(item <- stream) {
val delete = item.message == null
val value = if(delete) -1L else item.message.toLong
consumedWriter.write(TestRecord(topic, item.key.toInt, value, delete).toString)
consumedWriter.newLine()
}
} catch {
case e: ConsumerTimeoutException =>
}
}
consumedWriter.close()
connector.shutdown()
consumedFile
}
}
case class TestRecord(val topic: String, val key: Int, val value: Long, val delete: Boolean) {
def this(pieces: Array[String]) = this(pieces(0), pieces(1).toInt, pieces(2).toLong, pieces(3) == "d")
def this(line: String) = this(line.split("\\t"))
override def toString() = topic + "\\t" + key + "\\t" + value + "\\t" + (if(delete) "d" else "u")
def topicAndKey = topic + key
} | unix1986/universe | tool/kafka-0.8.1.1-src/core/src/test/scala/other/kafka/TestLogCleaning.scala | Scala | bsd-2-clause | 13,382 |
package com.github.akiomik.leap_scala
import com.leapmotion.leap.Finger
object StaticFinger extends StaticFinger
trait StaticFinger {
def invalid: Finger = Finger.invalid
}
| akiomik/leap-scala | src/main/scala/com/github/akiomik/leap_scala/StaticFinger.scala | Scala | mit | 179 |
package mesosphere.marathon
package core.matcher.base.util
import mesosphere.UnitTest
import mesosphere.marathon.core.instance.{ LocalVolume, LocalVolumeId }
import mesosphere.marathon.core.launcher.InstanceOpFactory
import mesosphere.marathon.core.launcher.impl.TaskLabels
import mesosphere.marathon.core.task.Task
import mesosphere.marathon.state._
import mesosphere.marathon.stream.Implicits._
import mesosphere.marathon.test.MarathonTestHelper
import mesosphere.mesos.protos.ResourceProviderID
import org.apache.mesos.{ Protos => Mesos }
class OfferOperationFactoryTest extends UnitTest {
"OfferOperationFactory" should {
"Launch operation succeeds even if principal/role are not set" in {
val f = new Fixture
Given("a factory without principal or role")
val factory = new OfferOperationFactory(None, None)
val taskInfo = MarathonTestHelper.makeOneCPUTask(f.taskId).build()
When("We create a launch operation")
val operation = factory.launch(taskInfo)
Then("the Offer Operation is created")
operation.hasLaunch shouldEqual true
operation.getLaunch.getTaskInfos(0) shouldEqual taskInfo
}
"Reserve operation fails when role is not set" in {
val f = new Fixture
Given("a factory without role")
val factory = new OfferOperationFactory(Some("principal"), None)
When("We create a reserve operation")
val error = intercept[WrongConfigurationException] {
factory.reserve(f.reservationLabels, Seq(Mesos.Resource.getDefaultInstance))
}
Then("A meaningful exception is thrown")
error.getMessage should startWith("No role set")
}
"Reserve operation succeeds" in {
val f = new Fixture
Given("A simple task")
val factory = new OfferOperationFactory(Some("principal"), Some("role"))
val task = MarathonTestHelper.makeOneCPUTask(f.taskId)
When("We create a reserve operation")
val operations = factory.reserve(f.reservationLabels, task.getResourcesList.to[Seq])
Then("The operation is as expected")
operations.length shouldEqual 1
val operation = operations.head
operation.getType shouldEqual Mesos.Offer.Operation.Type.RESERVE
operation.hasReserve shouldEqual true
operation.getReserve.getResourcesCount shouldEqual task.getResourcesCount
And("The resource is reserved")
val resource = operation.getReserve.getResources(0)
resource.getName shouldEqual "cpus"
resource.getType shouldEqual Mesos.Value.Type.SCALAR
resource.getScalar.getValue shouldEqual 1
resource.getRole shouldEqual "role"
resource.hasReservation shouldEqual true
resource.getReservation.getPrincipal shouldEqual "principal"
}
"CreateVolumes operation succeeds" in {
val f = new Fixture
Given("a factory without principal")
val factory = new OfferOperationFactory(Some("principal"), Some("role"))
val volume1 = f.localVolume("mount1")
val volume2 = f.localVolume("mount2")
val volumes = Seq(volume1, volume2)
When("We create a reserve operation")
val offeredVolume1 = InstanceOpFactory.OfferedVolume(None, DiskSource.root, volume1)
val offeredVolume2 =
InstanceOpFactory.OfferedVolume(Some(ResourceProviderID("pID")), DiskSource.root, volume2)
val offeredVolumes = Seq(offeredVolume1, offeredVolume2)
val operations = factory.createVolumes(f.reservationLabels, offeredVolumes)
Then("The operation is as expected")
operations.length shouldEqual 2
val (operationWithProviderId, operationWithoutProviderId) =
if (operations.head.getCreate.getVolumesList.exists(_.hasProviderId)) {
(operations.head, operations.last)
} else {
(operations.last, operations.head)
}
operationWithProviderId.getType shouldEqual Mesos.Offer.Operation.Type.CREATE
operationWithProviderId.hasCreate shouldEqual true
operationWithProviderId.getCreate.getVolumesCount shouldEqual 1
operationWithProviderId.getCreate.getVolumesList.exists(_.hasProviderId) shouldEqual true
operationWithoutProviderId.getType shouldEqual Mesos.Offer.Operation.Type.CREATE
operationWithoutProviderId.hasCreate shouldEqual true
operationWithoutProviderId.getCreate.getVolumesCount shouldEqual 1
operationWithoutProviderId.getCreate.getVolumesList.exists(_.hasProviderId) shouldEqual false
And("The volumes are correct")
val volumeWithProviderId = operationWithProviderId.getCreate.getVolumes(0)
val originalVolume = volume2
volumeWithProviderId.getName shouldEqual "disk"
volumeWithProviderId.getRole shouldEqual "role"
volumeWithProviderId.getScalar.getValue shouldEqual 10
volumeWithProviderId.hasReservation shouldEqual true
volumeWithProviderId.getReservation.getPrincipal shouldEqual "principal"
volumeWithProviderId.hasDisk shouldEqual true
volumeWithProviderId.getDisk.hasPersistence shouldEqual true
volumeWithProviderId.getDisk.getPersistence.getId shouldEqual volume2.id.idString
volumeWithProviderId.getDisk.hasVolume shouldEqual true
volumeWithProviderId.getDisk.getVolume.getContainerPath shouldEqual volume2.mount.mountPath
volumeWithProviderId.getDisk.getVolume.getMode shouldEqual Mesos.Volume.Mode.RW
val volumeWithoutProviderId = operationWithoutProviderId.getCreate.getVolumes(0)
volumeWithoutProviderId.getName shouldEqual "disk"
volumeWithoutProviderId.getRole shouldEqual "role"
volumeWithoutProviderId.getScalar.getValue shouldEqual 10
volumeWithoutProviderId.hasReservation shouldEqual true
volumeWithoutProviderId.getReservation.getPrincipal shouldEqual "principal"
volumeWithoutProviderId.hasDisk shouldEqual true
volumeWithoutProviderId.getDisk.hasPersistence shouldEqual true
volumeWithoutProviderId.getDisk.getPersistence.getId shouldEqual volume1.id.idString
volumeWithoutProviderId.getDisk.hasVolume shouldEqual true
volumeWithoutProviderId.getDisk.getVolume.getContainerPath shouldEqual volume1.mount.mountPath
volumeWithoutProviderId.getDisk.getVolume.getMode shouldEqual Mesos.Volume.Mode.RW
}
}
class Fixture {
val runSpecId = PathId("/my-app")
val taskId = Task.Id.forRunSpec(runSpecId)
val frameworkId = MarathonTestHelper.frameworkId
val reservationLabels = TaskLabels.labelsForTask(frameworkId, taskId)
val principal = Some("principal")
val role = Some("role")
val factory = new OfferOperationFactory(principal, role)
def localVolume(mountPath: String): LocalVolume = {
val pv = PersistentVolume(None, PersistentVolumeInfo(size = 10))
val mount = VolumeMount(None, mountPath)
LocalVolume(LocalVolumeId(runSpecId, pv, mount), pv, mount)
}
}
}
| janisz/marathon | src/test/scala/mesosphere/marathon/core/matcher/base/util/OfferOperationFactoryTest.scala | Scala | apache-2.0 | 6,875 |
/*
* Copyright (c) 2013 Habla Computing
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.hablapps.dofollow.test.portal.project
import org.scalatest.FunSpec
import org.scalatest.BeforeAndAfter
import org.scalatest.matchers.ShouldMatchers
import org.hablapps.updatable._
import org.hablapps.react
import org.hablapps.speech
import org.hablapps.speech._
import org.hablapps.speech.serializer._
import speech.serializer.SerializableComponent
import org.hablapps.dofollow
import org.hablapps.dofollow._
import org.hablapps.dofollow.portal._
import org.hablapps.dofollow.portal.administration._
import org.hablapps.dofollow.portal.administration.projectModel._
import org.hablapps.dofollow.portal.project._
import org.hablapps.dofollow.portal.project.task._
class SetUpProjectTest(System: speech.System with DoFollowSystem with react.Debug) extends FunSpec with ShouldMatchers with BeforeAndAfter {
describe( "SetUpProjectTest"){
it("SetUp Project with one task") {
import System._
// turn_on_log = true
// show_causes = true
// show_empty_reactions = true
val Output(portal1, admin1) = reset(for {
portal1 <- Initiate(Portal())
administration <- Initiate2(Administration(), portal1)
admin1 <- Play2(Admin(_forename = "adminForename", _surname = "SurnameAdmin", _name = Some("0")), administration)
dep1 <- Initiate2(Department(_departmentName = "Department 1", _name = Some("dep1")), portal1)
dep2 <- Initiate2(Department(_departmentName = "Admins", _name = Some("37")), portal1)
modelPro1 <- Initiate2(ProjectModel(_projectName = "ProjectType 1", _name = Some("1")), administration)
modelTask1 <- Initiate2(TaskModel(_taskName = "Task 1",
_description = "Description",
_dependencies = Set(),
_duration = 3,
_department = "dep1",
_waitingDate = None,
_temporalDependence = None,
_temporalDuration = None), modelPro1)
} yield (portal1, admin1))
val NextState(obtained) = attempt(Say(admin1, portal1, SetUpProject(__new = Some(Project(_projectName = "Project 1")),
_numProjectAdmin = "12", _codProject = "1")))
reset(for {
portal1 <- Initiate(Portal())
administration <- Initiate2(Administration(), portal1)
admin1 <- Play2(Admin(_forename = "adminForename", _surname = "SurnameAdmin", _name = Some("0")), administration)
dep1 <- Initiate2(Department(_departmentName = "Department 1", _name = Some("dep1")), portal1)
dep2 <- Initiate2(Department(_departmentName = "Admins", _name = Some("37")), portal1)
modelPro1 <- Initiate2(ProjectModel(_projectName = "ProjectType 1", _name = Some("1")), administration)
modelTask1 <- Initiate2(TaskModel(_taskName = "Task 1",
_description = "Description",
_dependencies = Set(),
_duration = 3,
_department = "dep1",
_waitingDate = None,
_temporalDependence = None,
_temporalDuration = None), modelPro1)
setUp1 <- Say(admin1, portal1, SetUpProject(__new = Some(Project(_projectName = "Project 1")),
_numProjectAdmin = "12", _codProject = "1"))
_ <- Done(setUp1, PERFORMED)
project1 <- Initiate2(Project(_persistent = true, _projectAdmin = "12", _projectName = "Project 1"), portal1)
operator1 <- Play2(Operator(_persistent = true, _substatus = Some(Hidden), _name = Some("12")), dep2)
task1 <- Initiate2(Task(_persistent = true, _taskName = "Task 1", _description = "Description",
_duration = 3, _launchType = "A", _deadline = Some(345600000.toLong),
_substatus = Some(Executing), _startDate = 0, _name = Some("11")), project1)
_ <- Let(task1.context += dep1)
_ <- Let(dep1.subinteraction += task1)
responsible1 <- Play3(Responsible(_persistent = true), admin1, task1)
} yield ())
obtained should be(getState())
}
it("SetUp Project with two tasks") {
import System._
val Output(portal1, admin1) = reset(for {
portal1 <- Initiate(Portal())
administration <- Initiate2(Administration(), portal1)
admin1 <- Play2(Admin(_forename = "forename", _surname = "SurnameAdmin", _name = Some("0")), administration)
dep1 <- Initiate2(Department(_departmentName = "Department 1", _name = Some("dep1")), portal1)
dep2 <- Initiate2(Department(_departmentName = "Admins", _name = Some("37")), portal1)
modelPro1 <- Initiate2(ProjectModel(_projectName = "ProjectType 1", _name = Some("1")), administration)
modelTask1 <- Initiate2(TaskModel(_name = Some("t1"), _taskName = "Task 1",
_description = "Description",
_dependencies = Set(),
_duration = 3,
_department = "dep1",
_waitingDate = None,
_temporalDependence = None,
_temporalDuration = None), modelPro1)
catTask2 <- Initiate2(TaskModel(_name = Some("t2"), _taskName = "Task 2",
_description = "description 2",
_dependencies = Set("t1"),
_duration = 3,
_department = "dep1",
_waitingDate = None,
_temporalDependence = None,
_temporalDuration = None), modelPro1)
} yield (portal1, admin1))
val NextState(obtained) = attempt(Say(admin1, portal1, SetUpProject(__new = Some(Project(_projectName = "Project 1")),
_numProjectAdmin = "12", _codProject = "1")))
reset(for {
portal1 <- Initiate(Portal())
administration <- Initiate2(Administration(), portal1)
admin1 <- Play2(Admin(_forename = "forename", _surname = "SurnameAdmin", _name = Some("0")), administration)
dep1 <- Initiate2(Department(_departmentName = "Department 1", _name = Some("dep1")), portal1)
dep2 <- Initiate2(Department(_departmentName = "Admins", _name = Some("37")), portal1)
modelPro1 <- Initiate2(ProjectModel(_projectName = "ProjectType 1", _name = Some("1")), administration)
modelTask1 <- Initiate2(TaskModel(_name = Some("t1"), _taskName = "Task 1",
_description = "Description",
_dependencies = Set(),
_duration = 3,
_department = "dep1",
_waitingDate = None,
_temporalDependence = None,
_temporalDuration = None), modelPro1)
catTask2 <- Initiate2(TaskModel(_name = Some("t2"), _taskName = "Task 2",
_description = "description 2",
_dependencies = Set("t1"),
_duration = 3,
_department = "dep1",
_waitingDate = None,
_temporalDependence = None,
_temporalDuration = None), modelPro1)
setUp1 <- Say(admin1, portal1, SetUpProject(__new = Some(Project(_projectName = "Project 1")),
_numProjectAdmin = "12", _codProject = "1"))
_ <- Done(setUp1, PERFORMED)
project1 <- Initiate2(Project(_persistent = true, _projectAdmin = "12", _projectName = "Project 1"), portal1)
operator1 <- Play2(Operator(_persistent = true, _substatus = Some(Hidden), _name = Some("12")), dep2)
task1 <- Initiate2(Task(_persistent = true, _name = Some("t1"), _taskName = "Task 1", _description = "Description",
_duration = 3, _launchType = "A", _deadline = Some(345600000.toLong),
_substatus = Some(Executing), _startDate = 0), project1)
_ <- Let(task1.context += dep1)
_ <- Let(dep1.subinteraction += task1)
task2 <- Initiate2(Task(_persistent = true, _name = Some("t2"), _taskName = "Task 2", _description = "description 2",
_duration = 3, _substatus = Some(Waiting), _launchType = "A", _deadline = Some(604800000)), project1)
_ <- Let(task1.context += dep1)
_ <- Let(dep1.subinteraction += task1)
_ <- Let(task2.context += task1)
_ <- Let(task1.subinteraction += task2)
_ <- Let(task2.context += dep1)
_ <- Let(dep1.subinteraction += task2)
responsible1 <- Play3(Responsible(_persistent = true), admin1, task1)
} yield ())
obtained should be(getState())
}
}
} | hablapps/app-dofollow | src/test/scala/org/hablapps/dofollow/test/portal/project/SetUpProjectTest.scala | Scala | apache-2.0 | 8,897 |
/**
* © 2019 Refinitiv. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the “License”); you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
*
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package cmwell.common.formats
import java.io._
import cmwell.domain._
import cmwell.syntaxutils._
import cmwell.common.file.MimeTypeIdentifier
import cmwell.common.{WriteCommand, _}
import com.fasterxml.jackson.core._
import com.typesafe.scalalogging.LazyLogging
/**
* Created with IntelliJ IDEA.
* User: israel
* Date: 12/19/13
* Time: 3:48 PM
* To change this template use File | Settings | File Templates.
*/
object JsonSerializer extends AbstractJsonSerializer with LazyLogging {
val typeChars = List(/*'s',*/ 'i', 'l', 'w', 'b', 'd', 'f')
def encodeCommand(command: Command): Array[Byte] = {
val baos = new ByteArrayOutputStream()
val jsonGenerator = jsonFactory.createGenerator(baos).enable(JsonGenerator.Feature.AUTO_CLOSE_TARGET)
encodeCommandWithGenerator(command, jsonGenerator)
jsonGenerator.close()
baos.toByteArray
}
def decodeCommand(in: Array[Byte]): Command = {
val bais = new ByteArrayInputStream(in)
val jsonParser = jsonFactory.createParser(bais).enable(JsonParser.Feature.AUTO_CLOSE_SOURCE)
val command = decodeCommandWithParser(in, jsonParser)
jsonParser.close()
command
}
//TODO: when TLog serialization is replaced (to binary serialization), ditch the toEs boolean (since it will always be to ES...)
def encodeInfoton(infoton: Infoton,
omitBinaryData: Boolean = false,
toEs: Boolean = false,
newBG: Boolean = false,
current: Boolean = true): Array[Byte] = {
val baos = new ByteArrayOutputStream()
val jsonGenerator = jsonFactory.createGenerator(baos).enable(JsonGenerator.Feature.AUTO_CLOSE_TARGET)
encodeInfotonWithGenerator(infoton, jsonGenerator, omitBinaryData, toEs, newBG, current)
jsonGenerator.close()
baos.toByteArray
}
def decodeTrackingIDWithParser(jsonParser: JsonParser): Option[Either[Vector[StatusTracking], String]] = {
if (jsonParser.nextToken() == JsonToken.VALUE_STRING) Some(Right(jsonParser.getText()))
else {
assume(jsonParser.currentToken == JsonToken.START_ARRAY, s"expected value of tid is either a string or an array")
val b = Vector.newBuilder[StatusTracking]
while (jsonParser.nextToken() != JsonToken.END_ARRAY) {
assume(jsonParser.currentToken == JsonToken.VALUE_STRING,
s"expected value for tid field\\n${jsonParser.getCurrentLocation.toString}")
val t = StatusTrackingFormat.parseTrackingStatus(jsonParser.getText)
assume(t.isSuccess, s"expected success for tid field\\n$t")
b += t.get
}
Some(Left(b.result()))
}
}
def decodePrevUUIDWithParser(jsonParser: JsonParser): Option[String] = {
assume(jsonParser.nextToken() == JsonToken.VALUE_STRING,
s"expected uuid string for 'prevUUID' field\\n${jsonParser.getCurrentLocation.toString}")
val rv = Some(jsonParser.getText())
//consume "type" field name
assume(
jsonParser.nextToken() == JsonToken.FIELD_NAME && "type".equals(jsonParser.getCurrentName()),
s"expected 'type' field name\\n${jsonParser.getCurrentLocation.toString}"
)
rv
}
def decodeCommandWithParser(originalJson: Array[Byte],
jsonParser: JsonParser,
assumeStartObject: Boolean = true): Command = {
// If requested, expect start of command object
if (assumeStartObject) {
assume(jsonParser.nextToken() == JsonToken.START_OBJECT,
s"expected start of command object\\n${jsonParser.getCurrentLocation.toString}")
}
assume(jsonParser.nextToken() == JsonToken.FIELD_NAME, s"expected field")
jsonParser.getCurrentName() match {
case "type" => logger.error("This version (0) is not supported anymore!!!"); !!!
case "version" => {
val jt = jsonParser.nextToken()
val ver = jsonParser.getText
assume(jsonParser.nextToken() == JsonToken.FIELD_NAME,
s"expected 'type','tid',or 'prevUUID' field name\\n${jsonParser.getCurrentLocation.toString}")
val (tidOpt, prevUUIDOpt) = jsonParser.getCurrentName() match {
case "type" => None -> None
case "prevUUID" => None -> decodePrevUUIDWithParser(jsonParser)
case "tid" =>
decodeTrackingIDWithParser(jsonParser) -> {
assume(jsonParser.nextToken() == JsonToken.FIELD_NAME,
s"expected 'type',or 'prevUUID' field name\\n${jsonParser.getCurrentLocation.toString}")
jsonParser.getCurrentName() match {
case "type" => None
case "prevUUID" => decodePrevUUIDWithParser(jsonParser)
}
}
}
ver match {
case v @ ("1" | "2" | "3" | "4" | "5") =>
logger.error(
s"This version ($v) is not supported anymore!!! The original json was: ${new String(originalJson, "UTF-8")}"
); !!!
case "6" => JsonSerializer6.decodeCommandWithParser(jsonParser, tidOpt, prevUUIDOpt)
case x => logger.error(s"got: $x"); ???
}
}
}
}
private def encodeCommandWithGenerator(command: Command, jsonGenerator: JsonGenerator): Unit = {
jsonGenerator.writeStartObject()
jsonGenerator.writeStringField("version", cmwell.util.build.BuildInfo.encodingVersion)
command match {
case sc: SingleCommand => {
sc.trackingID.foreach(jsonGenerator.writeStringField("tid", _))
sc.prevUUID.foreach(jsonGenerator.writeStringField("prevUUID", _))
}
case ic: IndexCommand if ic.trackingIDs.nonEmpty =>
jsonGenerator.writeArrayFieldStart("tid")
ic.trackingIDs.foreach {
case StatusTracking(tid, 1) => jsonGenerator.writeString(tid)
case StatusTracking(t, n) => jsonGenerator.writeString(s"$n,$t")
}
jsonGenerator.writeEndArray()
case _ => //Do Nothing!
}
jsonGenerator.writeStringField("type", command.getClass.getSimpleName)
command match {
case WriteCommand(infoton, trackingID, prevUUID) =>
jsonGenerator.writeFieldName("infoton")
encodeInfotonWithGenerator(infoton, jsonGenerator)
case IndexNewInfotonCommand(uuid, isCurrent, path, infotonOpt, indexName, trackingIDs) =>
jsonGenerator.writeStringField("uuid", uuid)
jsonGenerator.writeBooleanField("isCurrent", isCurrent)
jsonGenerator.writeStringField("path", path)
jsonGenerator.writeStringField("indexName", indexName)
infotonOpt.foreach { infoton =>
jsonGenerator.writeFieldName("infoton")
encodeInfotonWithGenerator(infoton, jsonGenerator)
}
case IndexNewInfotonCommandForIndexer(uuid, isCurrent, path, infotonOpt, indexName, persistOffsets, trackingIDs) =>
jsonGenerator.writeStringField("uuid", uuid)
jsonGenerator.writeBooleanField("isCurrent", isCurrent)
jsonGenerator.writeStringField("path", path)
jsonGenerator.writeStringField("indexName", indexName)
jsonGenerator.writeFieldName("persistOffsets")
encodeOffsetSeqWithGenerator(persistOffsets, jsonGenerator)
infotonOpt.foreach { infoton =>
jsonGenerator.writeFieldName("infoton")
encodeInfotonWithGenerator(infoton, jsonGenerator)
}
case IndexExistingInfotonCommand(uuid, weight, path, indexName, trackingIDs) =>
jsonGenerator.writeStringField("uuid", uuid)
jsonGenerator.writeNumberField("weight", weight)
jsonGenerator.writeStringField("path", path)
jsonGenerator.writeStringField("indexName", indexName)
case IndexExistingInfotonCommandForIndexer(uuid, weight, path, indexName, persistOffsets, trackingIDs) =>
jsonGenerator.writeStringField("uuid", uuid)
jsonGenerator.writeNumberField("weight", weight)
jsonGenerator.writeStringField("path", path)
jsonGenerator.writeStringField("indexName", indexName)
jsonGenerator.writeFieldName("persistOffsets")
encodeOffsetSeqWithGenerator(persistOffsets, jsonGenerator)
case NullUpdateCommandForIndexer(uuid, path, indexName, persistOffsets, trackingIDs) =>
jsonGenerator.writeStringField("uuid", uuid)
jsonGenerator.writeStringField("path", path)
jsonGenerator.writeStringField("indexName", indexName)
jsonGenerator.writeFieldName("persistOffsets")
encodeOffsetSeqWithGenerator(persistOffsets, jsonGenerator)
case DeleteAttributesCommand(path, fields, lastModified, lastModifiedBy, trackingID, prevUUID) =>
jsonGenerator.writeStringField("path", path)
encodeFieldsWithGenerator(fields, jsonGenerator)
jsonGenerator.writeStringField("lastModified", dateFormatter.print(lastModified))
jsonGenerator.writeStringField("lastModifiedBy", lastModifiedBy)
case DeletePathCommand(path, lastModified, lastModifiedBy, trackingID, prevUUID) =>
jsonGenerator.writeStringField("path", path)
jsonGenerator.writeStringField("lastModified", dateFormatter.print(lastModified))
jsonGenerator.writeStringField("lastModifiedBy", lastModifiedBy)
case UpdatePathCommand(path, deleteFields, updateFields, lastModified, lastModifiedBy, trackingID, prevUUID, protocol) =>
jsonGenerator.writeStringField("path", path)
encodeUpdateFieldsWithGenerator(deleteFields, updateFields, jsonGenerator)
jsonGenerator.writeStringField("lastModified", dateFormatter.print(lastModified))
jsonGenerator.writeStringField("lastModifiedBy", lastModifiedBy)
jsonGenerator.writeStringField("protocol", protocol)
case OverwriteCommand(infoton, trackingID) =>
jsonGenerator.writeFieldName("infoton")
encodeInfotonWithGenerator(infoton, jsonGenerator)
case CommandRef(ref) =>
jsonGenerator.writeStringField("ref", ref)
case HeartbitCommand => ???
}
jsonGenerator.writeEndObject()
}
private def encodeUpdateFieldsWithGenerator(deleteFields: Map[String, Set[FieldValue]],
updateFields: Map[String, Set[FieldValue]],
jsonGenerator: JsonGenerator) = {
encodeFieldsWithGenerator(deleteFields, jsonGenerator, "deleteFields")
encodeFieldsWithGenerator(updateFields, jsonGenerator, "updateFields")
}
//TODO: when TLog serialization is replaced (to binary serialization), ditch the toEs boolean (since it will always be to ES...)
private def encodeFieldsWithGenerator(fields: Map[String, Set[FieldValue]],
jsonGenerator: JsonGenerator,
name: String = "fields",
toEs: Boolean = false) = {
def encodeESFieldValue(fv: FieldValue, jp: JsonGenerator): Unit = fv match {
case FBoolean(bool, _) => jp.writeBoolean(bool)
case FString(str, _, _) => jp.writeString(str)
case FReference(fr, _) => jp.writeString(fr)
case FDate(dateTime, _) => jp.writeString(dateTime)
case FExternal(value, _, _) => jp.writeString(value)
case FInt(int, _) => jp.writeNumber(int)
case FLong(long, _) => jp.writeNumber(long)
case FBigInt(bigInt, _) => jp.writeNumber(bigInt)
case FFloat(float, _) => jp.writeNumber(float)
case FDouble(double, _) => jp.writeNumber(double)
case FBigDecimal(bigDecimal, _) => jp.writeNumber(bigDecimal)
case _: FNull => !!! //this is just a marker for IMP, should not index it anywhere...
case _: FExtra[_] =>
!!! // FExtra is just a marker for outputting special properties, should not index it anywhere...
}
def fullEncodeFieldValue(fv: FieldValue, jp: JsonGenerator): Unit = fv match {
case FString(str, l, q) => jp.writeString(s"s${l.getOrElse("")}\\n${q.getOrElse("")}\\n$str")
case FBoolean(bool, q) => jp.writeString(s"b${q.getOrElse("")}\\n${bool.toString.head}")
case FReference(fr, q) => jp.writeString(s"r${q.getOrElse("")}\\n$fr")
case FDate(dateTime, q) => jp.writeString(s"d${q.getOrElse("")}\\n$dateTime")
case FInt(int, q) => jp.writeString(s"i${q.getOrElse("")}\\n$int")
case FLong(long, q) => jp.writeString(s"j${q.getOrElse("")}\\n$long")
case FBigInt(bigInt, q) => jp.writeString(s"k${q.getOrElse("")}\\n$bigInt")
case FFloat(float, q) => jp.writeString(s"f${q.getOrElse("")}\\n$float")
case FDouble(double, q) => jp.writeString(s"g${q.getOrElse("")}\\n$double")
case FBigDecimal(bigDecimal, q) => jp.writeString(s"h${q.getOrElse("")}\\n$bigDecimal")
case FExternal(value, uri, q) =>
jp.writeString(s"x$uri\\n${q.getOrElse("")}\\n$value") //require !uri.exists(_ == '\\n')
case FNull(q) => jp.writeString(s"n${q.getOrElse("")}")
case _: FExtra[_] =>
!!! // FExtra is just a marker for outputting special properties, should not ingest it anywhere...
}
def escapeString(s: String): String = if (!s.isEmpty && s.head == '#' && !toEs) s"#$s" else s
jsonGenerator.writeObjectFieldStart(name)
def prefixByType(fValue: FieldValue): String =
FieldValue.prefixByType(fValue) match { //.fold("")(`type` => s"${`type`}$$")
case 's' => ""
case chr => s"$chr$$"
}
// fValue match {
// case _:FString | _:FBigInt | _:FReference | _:FExternal => "s$"
// case _:FInt => "i$"
// case _:FLong => "l$"
// case _:FBigDecimal | _:FDouble => "w$"
// case _:FBoolean => "b$"
// case _:FDate => "d$"
// case _:FFloat => "f$"
// }
val fieldsByType = fields.flatMap {
case (k, vs) if toEs && vs.isEmpty => ((k -> vs) :: typeChars.map(typeChar => (typeChar + k) -> vs)).toMap
case (k, vs) if toEs =>
vs.groupBy { v =>
s"${prefixByType(v)}$k"
}
case (k, vs) => Map(k -> vs)
}
fieldsByType.foreach {
case (key, values) =>
jsonGenerator.writeArrayFieldStart(key)
if (toEs) values.foreach(encodeESFieldValue(_, jsonGenerator))
else values.foreach(fullEncodeFieldValue(_, jsonGenerator))
jsonGenerator.writeEndArray()
}
jsonGenerator.writeEndObject()
}
private def encodeOffsetSeqWithGenerator(offsets: Seq[Offset],
jsonGenerator: JsonGenerator): Unit = {
jsonGenerator.writeStartArray()
offsets.foreach(offset => encodeOffsetWithGenerator(offset, jsonGenerator))
jsonGenerator.writeEndArray()
}
private def encodeOffsetWithGenerator(offset: Offset,
jsonGenerator: JsonGenerator): Unit = {
jsonGenerator.writeStartObject()
jsonGenerator.writeStringField("offsetType", offset.getClass.getSimpleName)
jsonGenerator.writeStringField("topic", offset.topic)
jsonGenerator.writeNumberField("offset", offset.offset)
jsonGenerator.writeNumberField("part", offset.part)
jsonGenerator.writeNumberField("ofParts", offset.ofParts)
jsonGenerator.writeEndObject()
}
//TODO: when TLog serialization is replaced (to binary serialization), ditch the toEs boolean (since it will always be to ES...)
//TODO: aren't `toEs` and `omitBinaryData` always the same? we only omit binary data if we're indexing ES...
private def encodeInfotonWithGenerator(infoton: Infoton,
jsonGenerator: JsonGenerator,
omitBinaryData: Boolean = false,
toEs: Boolean = false,
newBG: Boolean = false,
current: Boolean = true): Unit = {
jsonGenerator.writeStartObject()
if (!newBG || !toEs) {
jsonGenerator.writeStringField("type", infoton.kind)
}
jsonGenerator.writeObjectFieldStart("system") // start system object field
if (newBG && toEs) {
jsonGenerator.writeStringField("kind", infoton.kind)
}
jsonGenerator.writeStringField("path", infoton.systemFields.path)
jsonGenerator.writeStringField("lastModified", dateFormatter.print(infoton.systemFields.lastModified))
jsonGenerator.writeStringField("lastModifiedBy", infoton.systemFields.lastModifiedBy)
jsonGenerator.writeStringField("indexName", infoton.systemFields.indexName)
jsonGenerator.writeStringField("uuid", infoton.uuid)
jsonGenerator.writeStringField("parent", infoton.parent)
jsonGenerator.writeStringField("dc", infoton.systemFields.dc)
//will add quad under system containing all the quads available in the fields
if (toEs) {
if (infoton.systemFields.indexTime.nonEmpty && infoton.systemFields.dc == SettingsHelper.dataCenter) {
logger.debug(
s"should not happen when writing a new infoton! indexTime should only be created while indexing, and not before. uuid = ${infoton.uuid}"
)
}
val idxT = {
if (infoton.systemFields.indexTime.isEmpty) {
logger.error(
s"indexing an infoton with no indexTime defined! setting a value of 613 as default. uuid = [${infoton.uuid}]"
)
// an indexTime of something in the future is problematic.
// i.e: when dc sync reaches "event horizon",
// it will be synced, and new infotons indexed after it,
// with current time will not be seen by remote dc.
// default value MUST be set in the past,
// though it really should'nt happen.
613L
} else infoton.systemFields.indexTime.get
}
jsonGenerator.writeNumberField("indexTime", idxT)
val quadsOpt = infoton.fields.map(
_.values
.flatMap(_.collect {
case fv if fv.quad.isDefined =>
fv.quad.get
})
.toSet
)
quadsOpt match {
case None => //DO NOTHING!
case Some(quadsSet) => {
jsonGenerator.writeFieldName("quad")
jsonGenerator.writeStartArray(quadsSet.size)
quadsSet.foreach(jsonGenerator.writeString)
jsonGenerator.writeEndArray()
}
}
if (newBG) {
if (current)
jsonGenerator.writeBooleanField("current", true)
else
jsonGenerator.writeBooleanField("current", false)
}
} else if (infoton.systemFields.indexTime.isDefined) { //this means it's an overwrite command to tlog 1
if (infoton.systemFields.dc == SettingsHelper.dataCenter) {
logger.debug("if should not exist (I think...)")
}
val idxT = infoton.systemFields.indexTime.get
// p.success(None)//(Some(idxT))
jsonGenerator.writeNumberField("indexTime", idxT)
}
jsonGenerator.writeStringField("protocol", infoton.systemFields.protocol)
jsonGenerator.writeEndObject() // end system object field
// write field object, if not empty
infoton.fields.foreach { fields =>
encodeFieldsWithGenerator(fields, jsonGenerator, toEs = toEs)
}
infoton match {
case FileInfoton(_, _, Some(FileContent(dataOpt, mimeType, dl, dp))) =>
jsonGenerator.writeObjectFieldStart("content")
jsonGenerator.writeStringField("mimeType", mimeType)
dataOpt.foreach { data =>
if (MimeTypeIdentifier.isTextual(mimeType)) {
val charset = mimeType.lastIndexOf("charset=") match {
case i if i != -1 => mimeType.substring(i + 8).trim
case _ => "utf-8"
}
jsonGenerator.writeStringField("data", new String(data, charset))
} else if (!omitBinaryData) {
jsonGenerator.writeBinaryField("base64-data", data)
}
}
dp.foreach { dataPointer =>
jsonGenerator.writeStringField("data-pointer", dataPointer)
}
jsonGenerator.writeNumberField("length", dataOpt.fold(dl)(_.length))
jsonGenerator.writeEndObject()
case LinkInfoton(_, _, linkTo, linkType) =>
jsonGenerator.writeStringField("linkTo", linkTo)
jsonGenerator.writeNumberField("linkType", linkType)
case _ =>
}
jsonGenerator.writeEndObject() // end Infoton object
}
}
| dudi3001/CM-Well | server/cmwell-common/src/main/scala/cmwell/common/formats/JsonSerializer.scala | Scala | apache-2.0 | 21,046 |
/*
* Copyright 2013 the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.jayway.restassured.scalatra
import org.scalatra.ScalatraServlet
import collection.mutable.HashMap
import util.Random
class CustomAuthExample extends ScalatraServlet {
val authenticatedSessions = new HashMap[String, Int]()
before() {
contentType = "application/json"
}
post("/login") {
val rand = new Random(System.currentTimeMillis());
val operandA = rand.nextInt(1000)
val operandB = rand.nextInt(1000)
val expectedSum = operandA + operandB
val id = rand.nextLong().toString
authenticatedSessions += id -> expectedSum
"{ \\"operandA\\" : "+operandA + ", \\"operandB\\" : "+operandB + ", \\"id\\" : \\""+id+"\\" }"
}
get("/secretMessage") {
returnIfLoggedIn("I'm secret")
}
get("/secretMessage2") {
returnIfLoggedIn("I'm also secret")
}
private def returnIfLoggedIn(message: => String) : String = {
val actualSum = request.getParameter("sum")
val id = request.getParameter("id")
val expectedSum = authenticatedSessions.getOrElse(id, -1)
if (actualSum == null || id == null || expectedSum == -1 || actualSum.toInt != expectedSum) {
"""{ "message" : "You're not authorized to see the secret message" }"""
} else {
"{ \\"message\\" : \\""+message+"\\" }"
}
}
} | lanwen/rest-assured | examples/scalatra-example/src/main/scala/com/jayway/restassured/scalatra/CustomAuthExample.scala | Scala | apache-2.0 | 1,882 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.plans.logical.sql
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
/**
* A DROP VIEW statement, as parsed from SQL.
*/
case class DropViewStatement(
viewName: Seq[String],
ifExists: Boolean) extends ParsedStatement {
override def output: Seq[Attribute] = Seq.empty
override def children: Seq[LogicalPlan] = Seq.empty
}
| aosagie/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/plans/logical/sql/DropViewStatement.scala | Scala | apache-2.0 | 1,250 |
package org.loklak.data
/**
* Created by Scott on 6/4/16.
*/
object DAO {
private val es = ES.client
def store(id:String, json:String) = {
es.index("loklak","twitter",json,id)
}
}
| DengYiping/loklak-scala | src/main/scala/org/loklak/data/DAO.scala | Scala | mit | 195 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.optimization.tfocs
import org.apache.spark.mllib.linalg.{ BLAS, DenseVector, Vectors }
import org.apache.spark.mllib.optimization.tfocs.VectorSpace._
/**
* Extra functions available on DVectors through an implicit conversion. DVectors are represented
* using RDD[DenseVector], and these helper functions apply operations to the values within each
* DenseVector of the RDD.
*/
private[tfocs] class DVectorFunctions(self: DVector) {
/** Apply a function to each DVector element. */
def mapElements(f: Double => Double): DVector =
self.map(part => new DenseVector(part.values.map(f)))
/**
* Zip a DVector's elements with those of another DVector and apply a function to each pair of
* elements.
*/
def zipElements(other: DVector, f: (Double, Double) => Double): DVector =
self.zip(other).map {
case (selfPart, otherPart) =>
if (selfPart.size != otherPart.size) {
throw new IllegalArgumentException("Can only call zipElements on DVectors with the " +
"same number of elements and consistent partitions.")
}
// NOTE DenseVectors are assumed here (not sparse safe).
val ret = new Array[Double](selfPart.size)
var i = 0
while (i < ret.size) {
ret(i) = f(selfPart(i), otherPart(i))
i += 1
}
new DenseVector(ret)
}
/** Apply aggregation functions to the DVector elements. */
def aggregateElements(zeroValue: Double)(
seqOp: (Double, Double) => Double,
combOp: (Double, Double) => Double): Double =
self.aggregate(zeroValue)(
seqOp = (aggregate, part) => {
// NOTE DenseVectors are assumed here (not sparse safe).
val partAggregate = part.values.aggregate(zeroValue)(seqop = seqOp, combop = combOp)
combOp(partAggregate, aggregate)
},
combOp = combOp)
/** Collect the DVector elements to a local array. */
def collectElements: Array[Double] =
// NOTE DenseVectors are assumed here (not sparse safe).
self.collect().flatMap(_.values)
/** Compute the elementwise difference of this DVector with another. */
def diff(other: DVector): DVector =
self.zip(other).map {
case (selfPart, otherPart) =>
val ret = selfPart.copy
BLAS.axpy(-1.0, otherPart, ret)
ret
}
/** Sum the DVector's elements. */
def sum: Double = self.aggregate(0.0)((sum, x) => sum + x.values.sum, _ + _)
/** Compute the dot product with another DVector. */
def dot(other: DVector): Double =
self.zip(other).aggregate(0.0)((sum, x) => sum + BLAS.dot(x._1, x._2), _ + _)
}
private[tfocs] object DVectorFunctions {
implicit def DVectorToDVectorFunctions(dVector: DVector): DVectorFunctions =
new DVectorFunctions(dVector)
}
| databricks/spark-tfocs | src/main/scala/org/apache/spark/mllib/optimization/tfocs/DVectorFunctions.scala | Scala | apache-2.0 | 3,593 |
// Copyright 2014 Pants project contributors (see CONTRIBUTORS.md).
// Licensed under the Apache License, Version 2.0 (see LICENSE).
package org.pantsbuild.example
// A simple jvm binary to test the jvm_run task on. Try, e.g.,
// ./pants -ldebug run --run-jvm-jvm-options='-Dfoo=bar' --run-jvm-args="Foo Bar" \\\\
// examples/src/scala/org/pantsbuild/example:jvm-run-example
object JvmRunExample {
def main(args: Array[String]) {
println("Hello, World")
println("args: " + args.mkString(", "))
}
}
| areitz/pants | examples/src/scala/org/pantsbuild/example/JvmRunExample.scala | Scala | apache-2.0 | 514 |
package redis
import scala.concurrent.Await
import redis.api.pubsub._
import redis.actors.RedisSubscriberActor
import java.net.InetSocketAddress
import akka.actor.{Props, ActorRef}
import akka.testkit.{TestActorRef, TestProbe}
import akka.util.ByteString
class RedisPubSubSpec extends RedisStandaloneServer {
sequential
"PubSub test" should {
"ok (client + callback)" in {
var redisPubSub: RedisPubSub = null
redisPubSub = RedisPubSub(
port = port,
channels = Seq("chan1", "secondChannel"),
patterns = Seq("chan*"),
onMessage = (m: Message) => {
redisPubSub.unsubscribe("chan1", "secondChannel")
redisPubSub.punsubscribe("chan*")
redisPubSub.subscribe(m.data.utf8String)
redisPubSub.psubscribe("next*")
}
)
Thread.sleep(2000)
val p = redis.publish("chan1", "nextChan")
val noListener = redis.publish("noListenerChan", "message")
Await.result(p, timeOut) mustEqual 2
Await.result(noListener, timeOut) mustEqual 0
Thread.sleep(2000)
val nextChan = redis.publish("nextChan", "message")
val p2 = redis.publish("chan1", "nextChan")
Await.result(p2, timeOut) mustEqual 0
Await.result(nextChan, timeOut) mustEqual 2
}
"ok (actor)" in {
val probeMock = TestProbe()
val channels = Seq("channel")
val patterns = Seq("pattern.*")
val subscriberActor = TestActorRef[SubscriberActor](
Props(classOf[SubscriberActor], new InetSocketAddress("localhost", port),
channels, patterns, probeMock.ref)
.withDispatcher(Redis.dispatcher.name),
"SubscriberActor"
)
import scala.concurrent.duration._
system.scheduler.scheduleOnce(2 seconds)(redis.publish("channel", "value"))
probeMock.expectMsgType[Message](5 seconds) mustEqual Message("channel", ByteString("value"))
redis.publish("pattern.1", "value")
probeMock.expectMsgType[PMessage] mustEqual PMessage("pattern.*", "pattern.1", ByteString("value"))
subscriberActor.underlyingActor.subscribe("channel2")
subscriberActor.underlyingActor.unsubscribe("channel")
system.scheduler.scheduleOnce(2 seconds)({
redis.publish("channel", "value")
redis.publish("channel2", "value")
})
probeMock.expectMsgType[Message](5 seconds) mustEqual Message("channel2", ByteString("value"))
subscriberActor.underlyingActor.unsubscribe("channel2")
system.scheduler.scheduleOnce(1 second)({
redis.publish("channel2", ByteString("value"))
})
probeMock.expectNoMessage(3 seconds)
subscriberActor.underlyingActor.subscribe("channel2")
system.scheduler.scheduleOnce(1 second)({
redis.publish("channel2", ByteString("value"))
})
probeMock.expectMsgType[Message](5 seconds) mustEqual Message("channel2", ByteString("value"))
subscriberActor.underlyingActor.psubscribe("pattern2.*")
subscriberActor.underlyingActor.punsubscribe("pattern.*")
system.scheduler.scheduleOnce(2 seconds)({
redis.publish("pattern2.match", ByteString("value"))
redis.publish("pattern.*", ByteString("value"))
})
probeMock.expectMsgType[PMessage](5 seconds) mustEqual PMessage("pattern2.*", "pattern2.match", ByteString("value"))
subscriberActor.underlyingActor.punsubscribe("pattern2.*")
system.scheduler.scheduleOnce(2 seconds)({
redis.publish("pattern2.match", ByteString("value"))
})
probeMock.expectNoMessage(3 seconds)
subscriberActor.underlyingActor.psubscribe("pattern.*")
system.scheduler.scheduleOnce(2 seconds)({
redis.publish("pattern.*", ByteString("value"))
})
probeMock.expectMsgType[PMessage](5 seconds) mustEqual PMessage("pattern.*", "pattern.*", ByteString("value"))
}
}
}
class SubscriberActor(address: InetSocketAddress,
channels: Seq[String],
patterns: Seq[String],
probeMock: ActorRef
) extends RedisSubscriberActor(address, channels, patterns, None, (b:Boolean) => () ) {
override def onMessage(m: Message) = {
probeMock ! m
}
def onPMessage(pm: PMessage): Unit = {
probeMock ! pm
}
}
| etaty/rediscala | src/test/scala/redis/RedisPubSubSpec.scala | Scala | apache-2.0 | 4,315 |
package breeze.optimize
import breeze.math.MutableCoordinateSpace
import com.typesafe.scalalogging.slf4j.Logging
import breeze.collection.mutable.RingBuffer
/**
* SPG is a Spectral Projected Gradient minimizer; it minimizes a differentiable
* function subject to the optimum being in some set, given by the projection operator projection
* @tparam T vector type
* @param optTol termination criterion: tolerance for norm of projected gradient
* @param gamma sufficient decrease parameter
* @param M number of history entries for linesearch
* @param alphaMax longest step
* @param alphaMin shortest step
* @param maxNumIt maximum number of iterations
* @param testOpt perform optimality check based on projected gradient at each iteration
* @param initFeas is the initial guess feasible, or should it be projected?
* @param maxSrchIt maximum number of line search attempts
* @param projection projection operations
*/
class SpectralProjectedGradient[T, -DF <: DiffFunction[T]](
val projection: T => T = { (t: T) => t },
tolerance: Double = 1e-6,
val suffDec: Double = 1e-4,
minImprovementWindow: Int = 10,
val alphaMax: Double = 1e10,
val alphaMin: Double = 1e-10,
maxIter: Int = 500,
val testOpt: Boolean = true,
val initFeas: Boolean = false,
val maxSrchIt: Int = 30)(implicit coord: MutableCoordinateSpace[T, Double]) extends FirstOrderMinimizer[T, DF](minImprovementWindow = minImprovementWindow, maxIter = maxIter, tolerance = tolerance) with Projecting[T] with Logging {
import coord._
type History = Double
protected def initialHistory(f: DF, init: T): History = 1.0
protected def chooseDescentDirection(state: State, f: DF): T = projectedVector(state.x, state.grad * -state.history)
override protected def adjust(newX: T, newGrad: T, newVal: Double):(Double,T) = (newVal,-projectedVector(newX, - newGrad))
protected def takeStep(state: State, dir: T, stepSize: Double): T = projection(state.x + dir * stepSize)
protected def updateHistory(newX: T, newGrad: T, newVal: Double, f: DF, oldState: State): History = {
val y = newGrad - oldState.grad
val s = newX - oldState.x
val alpha = s.dot(s) / s.dot(y)
if (alpha.isNaN())
0.0
else if (alpha < alphaMin || alpha > alphaMax)
1
else
alpha
}
protected def determineStepSize(state: State, f: DF, direction: T): Double = {
import state._
val funRef = if (fVals.isEmpty) Double.PositiveInfinity else fVals.max
val t = if (iter == 0) {
scala.math.min(1.0, (1.0 / norm(grad, 1)))
} else {
1.0
}
val searchStep = direction * t
val sufficientDecrease = grad.dot(searchStep) * suffDec
val requiredValue = funRef + sufficientDecrease
val lineSearchFunction = LineSearch.functionFromSearchDirection(f, x, direction)
val ls = new SimpleLineSearch(requiredValue, maxSrchIt)
ls.minimize(lineSearchFunction, t)
}
class SimpleLineSearch(requiredValue: Double, maxIterations: Int) extends ApproximateLineSearch {
def iterations(f: DiffFunction[Double], init: Double = 1.0): Iterator[State] = {
val (initfval, initfderiv) = f.calculate(init)
Iterator.iterate((State(init, initfval, initfderiv), 0)) {
case (State(alpha, fval, fderiv), iter) =>
val newAlpha = alpha / 2.0
val (fvalnew, fderivnew) = f.calculate(newAlpha)
(State(newAlpha, fvalnew, fderivnew), iter + 1)
}.takeWhile {
case (state, iterations) =>
(iterations == 0) ||
(iterations < maxIterations &&
state.value > requiredValue)
}.map(_._1)
}
}
}
| ktakagaki/breeze | src/main/scala/breeze/optimize/SpectralProjectedGradient.scala | Scala | apache-2.0 | 3,621 |
package org.jetbrains.plugins.scala.performance.typing
import scala.concurrent.duration.{Duration, DurationInt}
import scala.language.postfixOps
/**
* !!! Also see tests in [[org.jetbrains.plugins.scala.lang.actions.editor]] package
* TODO: unify tests and move to a common package
*/
class ScalaTypedHandlerTest extends TypingTestWithPerformanceTestBase {
implicit val typingTimeout: Duration = 150 milliseconds
private val SPACE: Char = ' '
override protected def folderPath: String = super.folderPath + "/typedHandler/"
def testCase(): Unit = doFileTest("case _")
def testDotAfterNewline(): Unit = doFileTest(".")
def testDotBeforeNewline(): Unit = doFileTest("a")
def testDefinitionAssignBeforeNewline(): Unit = doFileTest("a")
def testParametersComaBeforeNewline(): Unit = doFileTest("a")
def testCompleteScaladocOnSpace(): Unit = {
doTest(SPACE)(
s"""class X {
| /**$CARET
| def foo: Unit
|}
""".stripMargin,
s"""class X {
| /** $CARET */
| def foo: Unit
|}
""".stripMargin
)
}
def testNotCompleteScaladocOnSpaceIfLineIsNotEmpty(): Unit = {
doTest(SPACE)(
s"""class X {
| /**$CARET some text
| def foo: Unit
|}
|""".stripMargin,
s"""class X {
| /** $CARET some text
| def foo: Unit
|}
|""".stripMargin
)
}
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/performance/typing/ScalaTypedHandlerTest.scala | Scala | apache-2.0 | 1,438 |
package org.github.sguzman.scala.game.scalebra.mvc.view
import java.util.concurrent.Executors
import akka.actor.{Actor, ActorLogging}
import org.github.sguzman.scala.game.scalebra.Scalebra
import org.github.sguzman.scala.game.scalebra.actor.{Start, Stop}
import org.github.sguzman.scala.game.scalebra.util.log.L
import org.github.sguzman.scala.game.scalebra.mvc.model.Direction
import org.github.sguzman.scala.game.scalebra.mvc.model.artifact.Food
import org.github.sguzman.scala.game.scalebra.mvc.model.artifact.snake.Snake
import org.lwjgl.opengl.{Display, DisplayMode, GL11}
/**
* @author Salvador Guzman
* custom.user: sguzman
* custom.project: Scalebra
* @version org.github.sguzman.scala.game.scalebra.mvc.view
* @note This component represents the view aspect of the MVC model. It
* handles all the logistics of representing any visual aspect of the
* game. Preferably, it will have a thread to itself that will handle
* the communication with LWJGL's visual aspects, like calling the
* Display.* methods.
*
* custom.created: 5/5/16 2:29 AM
* @since 5/5/16
*/
class View extends Actor with ActorLogging {
/** Contains the snake and its body */
val snake = new Snake
/** Food */
var food = Food()
/**
* Render entire scene
*/
def render(): Unit = {
GL11.glClear(GL11.GL_COLOR_BUFFER_BIT | GL11.GL_DEPTH_BUFFER_BIT)
snake.render()
food.render()
}
class RenderTh extends Runnable {
/**
* Main logic of rendering thread
*/
def run() = {
View.init()
L.i("Starting up input thread", "RenderThread")
while (!Display.isCloseRequested) {
do {
FPS.updateFPS()
render()
} while (View.paused)
Display.update(false)
Display.sync(60)
}
}
}
/**
* Akka actor mailbox for View subsystem
*/
override def receive: Actor.Receive = {
case _: Start =>
L.i("Start object received... Init View thread and starting Input", "View")
View.rendTh.execute(new RenderTh)
Scalebra.inputAc ! Start()
case _: Stop =>
L.i("Stop object received... Stop View thread and stop Input", "View")
Scalebra.inputAc ! Stop
Display.destroy()
View.rendTh.shutdown()
case dir: Direction =>
L.d("Direction received: {}", "View", dir)
if(!View.paused) snake.setDir(dir)
case pause.TogglePause =>
L.d("Toggle pause", "View")
View.pauseToggle()
}
}
object View {
/** Rendering thread */
val rendTh = Executors.newSingleThreadExecutor()
/** Is the game paused? */
@volatile private var paused = false
/** Hard code width and height */
val width = 800
val height = 600
/** Contain entire grid */
val allArea = (for (j <- 0 to (height / 10); i <- 0 to (width / 10))
yield (i,j)).toSet
/**
* Init all subsystems
*/
def init(): Unit = {
L.i("Init all display systems", "View")
View.initLWJGL()
View.initGL()
FPS.init()
}
/**
* Initiate JWJGL subsystem
*/
def initLWJGL(): Unit = {
L.i("Init JWJGL", "View")
Display.setDisplayMode(new DisplayMode(800, 600))
Display.create()
}
/**
* Initialize OpenGL draw subsystem
*/
def initGL(): Unit = {
L.i("Init OpenGL", "View")
GL11.glMatrixMode(GL11.GL_PROJECTION)
GL11.glLoadIdentity()
GL11.glOrtho(0.0f, width, 0.0f, height, 1.0f, -1.0f)
GL11.glMatrixMode(GL11.GL_MODELVIEW)
}
/**
* Set title text
*/
def title(msg: String): Unit = Display.setTitle(msg)
/**
* Pause the game
*/
def pause(): Unit = paused = true
/**
* Unpause the game
*/
def unPause(): Unit = paused = false
/**
* Toggle pause of the game
*/
def pauseToggle(): Unit = paused = !paused
}
| sguzman/Scalebra | src/main/scala/org/github/sguzman/scala/game/scalebra/mvc/view/View.scala | Scala | mit | 3,840 |
package io.sphere.mongo.generic
import com.mongodb.DBObject
import org.scalatest.matchers.must.Matchers
import org.scalatest.wordspec.AnyWordSpec
import io.sphere.mongo.MongoUtils.dbObj
import io.sphere.mongo.format.DefaultMongoFormats._
import io.sphere.mongo.format.MongoFormat
import org.scalatest.Assertion
class SumTypesDerivingSpec extends AnyWordSpec with Matchers {
import SumTypesDerivingSpec._
"Serializing sum types" must {
"use 'type' as default field" in {
check(Color1.format, Color1.Red, dbObj("type" -> "Red"))
check(Color1.format, Color1.Custom("2356"), dbObj("type" -> "Custom", "rgb" -> "2356"))
}
"use custom field" in {
check(Color2.format, Color2.Red, dbObj("color" -> "Red"))
check(Color2.format, Color2.Custom("2356"), dbObj("color" -> "Custom", "rgb" -> "2356"))
}
"use custom values" in {
check(Color3.format, Color3.Red, dbObj("type" -> "red"))
check(Color3.format, Color3.Custom("2356"), dbObj("type" -> "custom", "rgb" -> "2356"))
}
"use custom field & values" in pendingUntilFixed {
check(Color4.format, Color4.Red, dbObj("color" -> "red"))
check(Color4.format, Color4.Custom("2356"), dbObj("color" -> "custom", "rgb" -> "2356"))
}
"not allow specifying different custom field" in pendingUntilFixed {
// to serialize Custom, should we use type "color" or "color-custom"?
"deriveMongoFormat[Color5]" mustNot compile
}
"not allow specifying different custom field on intermediate level" in {
// to serialize Custom, should we use type "color" or "color-custom"?
"deriveMongoFormat[Color6]" mustNot compile
}
"use intermediate level" in {
deriveMongoFormat[Color7]
}
"do not use sealed trait info when using a case class directly" in {
check(Color8.format, Color8.Custom("2356"), dbObj("type" -> "Custom", "rgb" -> "2356"))
check(Color8.Custom.format, Color8.Custom("2356"), dbObj("rgb" -> "2356"))
// unless annotated
check(
Color8.format,
Color8.CustomAnnotated("2356"),
dbObj("type" -> "CustomAnnotated", "rgb" -> "2356"))
check(
Color8.CustomAnnotated.format,
Color8.CustomAnnotated("2356"),
dbObj("type" -> "CustomAnnotated", "rgb" -> "2356"))
}
"use default values if custom values are empty" in {
check(Color9.format, Color9.Red, dbObj("type" -> "Red"))
check(Color9.format, Color9.Custom("2356"), dbObj("type" -> "Custom", "rgb" -> "2356"))
}
}
}
object SumTypesDerivingSpec {
import Matchers._
def check[A, B <: A](format: MongoFormat[A], b: B, dbo: DBObject): Assertion = {
val serialized = format.toMongoValue(b)
serialized must be(dbo)
format.fromMongoValue(serialized) must be(b)
}
sealed trait Color1
object Color1 {
case object Red extends Color1
case class Custom(rgb: String) extends Color1
val format = deriveMongoFormat[Color1]
}
@MongoTypeHintField("color")
sealed trait Color2
object Color2 {
case object Red extends Color2
case class Custom(rgb: String) extends Color2
val format = deriveMongoFormat[Color2]
}
sealed trait Color3
object Color3 {
@MongoTypeHint("red") case object Red extends Color3
@MongoTypeHint("custom") case class Custom(rgb: String) extends Color3
val format = deriveMongoFormat[Color3]
}
@MongoTypeHintField("color")
sealed trait Color4
object Color4 {
@MongoTypeHint("red") case object Red extends Color4
@MongoTypeHint("custom") case class Custom(rgb: String) extends Color4
val format = deriveMongoFormat[Color4]
}
@MongoTypeHintField("color")
sealed trait Color5
object Color5 {
@MongoTypeHint("red")
case object Red extends Color5
@MongoTypeHintField("color-custom")
@MongoTypeHint("custom")
case class Custom(rgb: String) extends Color5
}
@MongoTypeHintField("color")
sealed trait Color6
object Color6 {
@MongoTypeHintField("color-custom")
abstract class MyColor extends Color6
@MongoTypeHint("red")
case object Red extends MyColor
@MongoTypeHint("custom")
case class Custom(rgb: String) extends MyColor
}
sealed trait Color7
sealed trait Color7a extends Color7
object Color7 {
case object Red extends Color7a
case class Custom(rgb: String) extends Color7a
}
sealed trait Color8
object Color8 {
case object Red extends Color8
case class Custom(rgb: String) extends Color8
object Custom {
val format = deriveMongoFormat[Custom]
}
@MongoTypeHintField("type")
case class CustomAnnotated(rgb: String) extends Color8
object CustomAnnotated {
val format = deriveMongoFormat[CustomAnnotated]
}
val format = deriveMongoFormat[Color8]
}
sealed trait Color9
object Color9 {
@MongoTypeHint("")
case object Red extends Color9
@MongoTypeHint(" ")
case class Custom(rgb: String) extends Color9
val format = deriveMongoFormat[Color9]
}
}
| sphereio/sphere-scala-libs | mongo/mongo-derivation-magnolia/src/test/scala/io/sphere/mongo/generic/SumTypesDerivingSpec.scala | Scala | apache-2.0 | 5,032 |
package sangria.parser
import language.postfixOps
import sangria.ast.AstLocation
import sangria.ast
import sangria.ast._
import sangria.util.{FileUtil, StringMatchers}
import scala.reflect.ClassTag
import scala.util.{Failure, Success}
import org.scalatest.matchers.should.Matchers
import org.scalatest.wordspec.AnyWordSpec
class QueryParserSpec extends AnyWordSpec with Matchers with StringMatchers {
def parseQuery(query: String)(implicit scheme: DeliveryScheme[ast.Document]): scheme.Result =
QueryParser.parse(query, ParserConfig.default.withEmptySourceId.withoutSourceMapper)(scheme)
"QueryParser" should {
"parse complex query" in {
val query = FileUtil.loadQuery("complex-query.graphql")
val expectedAst =
Document(
Vector(
OperationDefinition(
OperationType.Query,
Some("FetchLukeAndLeiaAliased"),
Vector(
VariableDefinition(
"someVar",
NamedType("Int", Some(AstLocation(53, 2, 41))),
Some(BigDecimalValue(1.23, Vector.empty, Some(AstLocation(59, 2, 47)))),
Vector.empty,
Vector.empty,
Some(AstLocation(43, 2, 31))
),
VariableDefinition(
"anotherVar",
NamedType("Int", Some(AstLocation(77, 2, 65))),
Some(BigIntValue(123, Vector.empty, Some(AstLocation(83, 2, 71)))),
Vector.empty,
Vector.empty,
Some(AstLocation(64, 2, 52))
)
),
Vector(
Directive(
"include",
Vector(
Argument(
"if",
BooleanValue(true, Vector.empty, Some(AstLocation(100, 2, 88))),
Vector.empty,
Some(AstLocation(96, 2, 84))
)),
Vector.empty,
Some(AstLocation(87, 2, 75))
),
Directive(
"include",
Vector(
Argument(
"if",
BooleanValue(false, Vector.empty, Some(AstLocation(119, 2, 107))),
Vector.empty,
Some(AstLocation(115, 2, 103))
)),
Vector.empty,
Some(AstLocation(106, 2, 94))
)
),
Vector(
Field(
Some("luke"),
"human",
Vector(
Argument(
"id",
StringValue("1000", false, None, Vector.empty, Some(AstLocation(145, 3, 19))),
Vector.empty,
Some(AstLocation(141, 3, 15))
)),
Vector(Directive(
"include",
Vector(
Argument(
"if",
BooleanValue(true, Vector.empty, Some(AstLocation(165, 3, 39))),
Vector.empty,
Some(AstLocation(161, 3, 35))
)),
Vector.empty,
Some(AstLocation(152, 3, 26))
)),
Vector(Field(
None,
"friends",
Vector(
Argument(
"sort",
EnumValue("NAME", Vector.empty, Some(AstLocation(190, 4, 19))),
Vector.empty,
Some(AstLocation(184, 4, 13))
)),
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(176, 4, 5))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(129, 3, 3))
),
Field(
Some("leia"),
"human",
Vector(
Argument(
"id",
StringValue(
"10103\\n \\u00F6 \\u00F6",
false,
None,
Vector.empty,
Some(AstLocation(223, 6, 24))),
Vector.empty,
Some(AstLocation(214, 6, 15))
)),
Vector.empty,
Vector(
Field(
None,
"name",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(249, 7, 5))
)),
Vector.empty,
Vector(Comment(" some name", Some(AstLocation(254, 7, 10)))),
Some(AstLocation(202, 6, 3))
),
InlineFragment(
Some(NamedType("User", Some(AstLocation(280, 10, 10)))),
Vector.empty,
Vector(Field(
None,
"birth",
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"day",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(297, 11, 11))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(291, 11, 5))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(273, 10, 3))
),
FragmentSpread("Foo", Vector.empty, Vector.empty, Some(AstLocation(309, 14, 3)))
),
Vector(Comment(" test query", Some(AstLocation(0, 1, 1)))),
Vector.empty,
Some(AstLocation(13, 2, 1))
),
FragmentDefinition(
"Foo",
NamedType("User", Some(AstLocation(335, 17, 17))),
Vector(
Directive(
"foo",
Vector(
Argument(
"bar",
BigIntValue(1, Vector.empty, Some(AstLocation(350, 17, 32))),
Vector.empty,
Some(AstLocation(345, 17, 27))
)),
Vector.empty,
Some(AstLocation(340, 17, 22))
)),
Vector(
Field(
None,
"baz",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(356, 18, 3))
)),
Vector.empty,
Vector.empty,
Vector(Comment(" field in fragment!", Some(AstLocation(360, 18, 7)))),
Some(AstLocation(319, 17, 1))
)
),
Vector.empty,
Some(AstLocation(0, 1, 1)),
None
)
parseQuery(query) should be(Success(expectedAst))
}
"parse kitchen sink" in {
val query = FileUtil.loadQuery("kitchen-sink.graphql")
val expectedAst =
Document(
Vector(
OperationDefinition(
OperationType.Query,
Some("queryName"),
Vector(
VariableDefinition(
"foo",
NamedType("ComplexType", Some(AstLocation(310, 8, 23))),
None,
Vector.empty,
Vector.empty,
Some(AstLocation(304, 8, 17))
),
VariableDefinition(
"site",
NamedType("Site", Some(AstLocation(330, 8, 43))),
Some(EnumValue("MOBILE", Vector.empty, Some(AstLocation(337, 8, 50)))),
Vector.empty,
Vector.empty,
Some(AstLocation(323, 8, 36))
)
),
Vector.empty,
Vector(
Field(
Some("whoever123is"),
"node",
Vector(Argument(
"id",
ListValue(
Vector(
BigIntValue(123, Vector.empty, Some(AstLocation(373, 9, 27))),
BigIntValue(456, Vector.empty, Some(AstLocation(378, 9, 32)))),
Vector.empty,
Some(AstLocation(372, 9, 26))
),
Vector.empty,
Some(AstLocation(368, 9, 22))
)),
Vector.empty,
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(390, 10, 5))
),
InlineFragment(
Some(NamedType("User", Some(AstLocation(406, 11, 12)))),
Vector(
Directive(
"defer",
Vector.empty,
Vector.empty,
Some(AstLocation(411, 11, 17))
)),
Vector(Field(
None,
"field2",
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(443, 13, 9))
),
Field(
Some("alias"),
"field1",
Vector(
Argument(
"first",
BigIntValue(10, Vector.empty, Some(AstLocation(476, 14, 29))),
Vector.empty,
Some(AstLocation(470, 14, 23))
),
Argument(
"after",
VariableValue("foo", Vector.empty, Some(AstLocation(486, 14, 39))),
Vector.empty,
Some(AstLocation(480, 14, 33))
)
),
Vector(Directive(
"include",
Vector(Argument(
"if",
VariableValue("foo", Vector.empty, Some(AstLocation(506, 14, 59))),
Vector.empty,
Some(AstLocation(502, 14, 55))
)),
Vector.empty,
Some(AstLocation(493, 14, 46))
)),
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(524, 15, 11))
),
FragmentSpread(
"frag",
Vector.empty,
Vector.empty,
Some(AstLocation(538, 16, 11)))
),
Vector.empty,
Vector.empty,
Some(AstLocation(456, 14, 9))
)
),
Vector.empty,
Vector.empty,
Some(AstLocation(426, 12, 7))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(399, 11, 5))
)
),
Vector.empty,
Vector.empty,
Some(AstLocation(349, 9, 3))
)),
Vector(
Comment(" Copyright (c) 2015, Facebook, Inc.", Some(AstLocation(0, 1, 1))),
Comment(" All rights reserved.", Some(AstLocation(37, 2, 1))),
Comment("", Some(AstLocation(60, 3, 1))),
Comment(
" This source code is licensed under the BSD-style license found in the",
Some(AstLocation(62, 4, 1))),
Comment(
" LICENSE file in the root directory of this source tree. An additional grant",
Some(AstLocation(134, 5, 1))),
Comment(
" of patent rights can be found in the PATENTS file in the same directory.",
Some(AstLocation(212, 6, 1)))
),
Vector.empty,
Some(AstLocation(288, 8, 1))
),
OperationDefinition(
OperationType.Mutation,
Some("likeStory"),
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"like",
Vector(
Argument(
"story",
BigIntValue(123, Vector.empty, Some(AstLocation(612, 24, 15))),
Vector.empty,
Some(AstLocation(605, 24, 8))
)),
Vector(
Directive(
"defer",
Vector.empty,
Vector.empty,
Some(AstLocation(617, 24, 20))
)),
Vector(Field(
None,
"story",
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(644, 26, 7))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(630, 25, 5))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(600, 24, 3))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(577, 23, 1))
),
OperationDefinition(
OperationType.Subscription,
Some("StoryLikeSubscription"),
Vector(
VariableDefinition(
"input",
NamedType("StoryLikeSubscribeInput", Some(AstLocation(703, 31, 44))),
None,
Vector.empty,
Vector.empty,
Some(AstLocation(695, 31, 36))
)),
Vector.empty,
Vector(
Field(
None,
"storyLikeSubscribe",
Vector(
Argument(
"input",
VariableValue("input", Vector.empty, Some(AstLocation(758, 32, 29))),
Vector.empty,
Some(AstLocation(751, 32, 22))
)),
Vector.empty,
Vector(Field(
None,
"story",
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"likers",
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"count",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(803, 35, 9))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(786, 34, 7))
),
Field(
None,
"likeSentence",
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"text",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(846, 38, 9))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(823, 37, 7))
)
),
Vector.empty,
Vector.empty,
Some(AstLocation(772, 33, 5))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(732, 32, 3))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(660, 31, 1))
),
FragmentDefinition(
"frag",
NamedType("Friend", Some(AstLocation(889, 44, 18))),
Vector.empty,
Vector(
Field(
None,
"foo",
Vector(
Argument(
"size",
VariableValue("size", Vector.empty, Some(AstLocation(910, 45, 13))),
Vector.empty,
Some(AstLocation(904, 45, 7))
),
Argument(
"bar",
VariableValue("b", Vector.empty, Some(AstLocation(922, 45, 25))),
Vector.empty,
Some(AstLocation(917, 45, 20))
),
Argument(
"obj",
ObjectValue(
Vector(
ObjectField(
"key",
StringValue(
"value",
false,
None,
Vector.empty,
Some(AstLocation(937, 45, 40))),
Vector.empty,
Some(AstLocation(932, 45, 35))
)),
Vector.empty,
Some(AstLocation(931, 45, 34))
),
Vector.empty,
Some(AstLocation(926, 45, 29))
)
),
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(900, 45, 3))
)),
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(872, 44, 1))
),
OperationDefinition(
OperationType.Query,
None,
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"unnamed",
Vector(
Argument(
"truthy",
BooleanValue(true, Vector.empty, Some(AstLocation(970, 49, 19))),
Vector.empty,
Some(AstLocation(962, 49, 11))
),
Argument(
"falsey",
BooleanValue(false, Vector.empty, Some(AstLocation(984, 49, 33))),
Vector.empty,
Some(AstLocation(976, 49, 25))
)
),
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(954, 49, 3))
),
Field(
None,
"query",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(994, 50, 3))
),
InlineFragment(
None,
Vector(Directive(
"skip",
Vector(
Argument(
"unless",
VariableValue("foo", Vector.empty, Some(AstLocation(1021, 52, 21))),
Vector.empty,
Some(AstLocation(1013, 52, 13))
)),
Vector.empty,
Some(AstLocation(1007, 52, 7))
)),
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(1033, 53, 5))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(1003, 52, 3))
),
InlineFragment(
None,
Vector.empty,
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(1052, 56, 5))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(1042, 55, 3))
)
),
Vector.empty,
Vector.empty,
Some(AstLocation(950, 48, 1))
)
),
Vector.empty,
Some(AstLocation(0, 1, 1)),
None
)
parseQuery(query) should be(Success(expectedAst))
}
"parse kitchen sink without comments and locations" in {
val config =
ParserConfig.default.withoutLocations.withoutComments.withEmptySourceId.withoutSourceMapper
val query = FileUtil.loadQuery("kitchen-sink.graphql")
val expectedAst =
Document(
Vector(
OperationDefinition(
OperationType.Query,
Some("queryName"),
Vector(
VariableDefinition(
"foo",
NamedType("ComplexType", None),
None,
Vector.empty,
Vector.empty,
None
),
VariableDefinition(
"site",
NamedType("Site", None),
Some(EnumValue("MOBILE", Vector.empty, None)),
Vector.empty,
Vector.empty,
None
)
),
Vector.empty,
Vector(
Field(
Some("whoever123is"),
"node",
Vector(
Argument(
"id",
ListValue(
Vector(
BigIntValue(123, Vector.empty, None),
BigIntValue(456, Vector.empty, None)),
Vector.empty,
None
),
Vector.empty,
None
)),
Vector.empty,
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
None
),
InlineFragment(
Some(NamedType("User", None)),
Vector(
Directive(
"defer",
Vector.empty,
Vector.empty,
None
)),
Vector(Field(
None,
"field2",
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
None
),
Field(
Some("alias"),
"field1",
Vector(
Argument(
"first",
BigIntValue(10, Vector.empty, None),
Vector.empty,
None
),
Argument(
"after",
VariableValue("foo", Vector.empty, None),
Vector.empty,
None
)),
Vector(
Directive(
"include",
Vector(Argument(
"if",
VariableValue("foo", Vector.empty, None),
Vector.empty,
None
)),
Vector.empty,
None
)),
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
None
),
FragmentSpread("frag", Vector.empty, Vector.empty, None)),
Vector.empty,
Vector.empty,
None
)
),
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
None
)
),
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
None
),
OperationDefinition(
OperationType.Mutation,
Some("likeStory"),
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"like",
Vector(
Argument(
"story",
BigIntValue(123, Vector.empty, None),
Vector.empty,
None
)),
Vector(
Directive(
"defer",
Vector.empty,
Vector.empty,
None
)),
Vector(Field(
None,
"story",
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
None
),
OperationDefinition(
OperationType.Subscription,
Some("StoryLikeSubscription"),
Vector(
VariableDefinition(
"input",
NamedType("StoryLikeSubscribeInput", None),
None,
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector(
Field(
None,
"storyLikeSubscribe",
Vector(
Argument(
"input",
VariableValue("input", Vector.empty, None),
Vector.empty,
None
)),
Vector.empty,
Vector(Field(
None,
"story",
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"likers",
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"count",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
None
),
Field(
None,
"likeSentence",
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"text",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
None
)
),
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
None
),
FragmentDefinition(
"frag",
NamedType("Friend", None),
Vector.empty,
Vector(
Field(
None,
"foo",
Vector(
Argument(
"size",
VariableValue("size", Vector.empty, None),
Vector.empty,
None
),
Argument(
"bar",
VariableValue("b", Vector.empty, None),
Vector.empty,
None
),
Argument(
"obj",
ObjectValue(
Vector(
ObjectField(
"key",
StringValue("value", false, None, Vector.empty, None),
Vector.empty,
None
)),
Vector.empty,
None
),
Vector.empty,
None
)
),
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
Vector.empty,
None
),
OperationDefinition(
OperationType.Query,
None,
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"unnamed",
Vector(
Argument(
"truthy",
BooleanValue(true, Vector.empty, None),
Vector.empty,
None
),
Argument(
"falsey",
BooleanValue(false, Vector.empty, None),
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
None
),
Field(
None,
"query",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
None
),
InlineFragment(
None,
Vector(
Directive(
"skip",
Vector(
Argument(
"unless",
VariableValue("foo", Vector.empty, None),
Vector.empty,
None
)),
Vector.empty,
None
)),
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
None
),
InlineFragment(
None,
Vector.empty,
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
None
)),
Vector.empty,
Vector.empty,
None
)
),
Vector.empty,
Vector.empty,
None
)
),
Vector.empty,
None,
None
)
QueryParser.parse(query, config) should be(Success(expectedAst))
}
"parse anonymous query" in {
val query =
"""
query {
foo bar,
baz
}
"""
val expectedAst =
Document(
Vector(
OperationDefinition(
OperationType.Query,
None,
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"foo",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(31, 3, 13))),
Field(
None,
"bar",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(35, 3, 17))),
Field(
None,
"baz",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(52, 4, 13)))
),
Vector.empty,
Vector.empty,
Some(AstLocation(11, 2, 11))
)),
Vector.empty,
Some(AstLocation(11, 2, 11)),
None
)
parseQuery(stripCarriageReturns(query)) should be(Success(expectedAst))
}
"parse inline fragments without type condition" in {
val query =
"""
query {
... {
foo bar
}
... @include(if: true) {
baz
}
}
"""
val expectedAst =
Document(
Vector(
OperationDefinition(
OperationType.Query,
None,
Vector.empty,
Vector.empty,
Vector(
InlineFragment(
None,
Vector.empty,
Vector(
Field(
None,
"foo",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(51, 4, 15))),
Field(
None,
"bar",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(55, 4, 19)))
),
Vector.empty,
Vector.empty,
Some(AstLocation(31, 3, 13))
),
InlineFragment(
None,
Vector(Directive(
"include",
Vector(
Argument(
"if",
BooleanValue(true, Vector.empty, Some(AstLocation(103, 7, 30))),
Vector.empty,
Some(AstLocation(99, 7, 26)))),
Vector.empty,
Some(AstLocation(90, 7, 17))
)),
Vector(
Field(
None,
"baz",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(125, 8, 15)))),
Vector.empty,
Vector.empty,
Some(AstLocation(86, 7, 13))
)
),
Vector.empty,
Vector.empty,
Some(AstLocation(11, 2, 11))
)),
Vector.empty,
Some(AstLocation(11, 2, 11)),
None
)
parseQuery(stripCarriageReturns(query)) should be(Success(expectedAst))
}
"parse anonymous mutation" in {
val query =
"""
mutation {
foo bar,
baz
}
"""
val expectedAst =
Document(
Vector(
OperationDefinition(
OperationType.Mutation,
None,
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"foo",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(34, 3, 13))),
Field(
None,
"bar",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(38, 3, 17))),
Field(
None,
"baz",
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(55, 4, 13)))
),
Vector.empty,
Vector.empty,
Some(AstLocation(11, 2, 11))
)),
Vector.empty,
Some(AstLocation(11, 2, 11)),
None
)
parseQuery(stripCarriageReturns(query)) should be(Success(expectedAst))
}
"provide useful error message (fragment `on`)" in {
val Failure(error: SyntaxError) = parseQuery("""
{ ...MissingOn }
fragment MissingOn Type
""")
error.formattedError should equal(
"""Invalid input 'T', expected ExperimentalFragmentVariables or TypeCondition (line 3, column 30):
| fragment MissingOn Type
| ^""".stripMargin)(after.being(strippedOfCarriageReturns))
}
"provide useful error message (braces)" in {
val Failure(error: SyntaxError) = parseQuery("{ field: {} }")
error.formattedError should equal(
"""Invalid input "{ field: {", expected ExecutableDefinition or TypeSystemDefinition (line 1, column 1):
|{ field: {} }
|^""".stripMargin)(after.being(strippedOfCarriageReturns))
}
"provide useful error message (operation def)" in {
val Failure(error: SyntaxError) = parseQuery("notanoperation Foo { field }")
error.formattedError should equal(
"""Invalid input 'n', expected ExecutableDefinition or TypeSystemDefinition (line 1, column 1):
|notanoperation Foo { field }
|^""".stripMargin)(after.being(strippedOfCarriageReturns))
}
"provide useful error message (ellipsis)" in {
val Failure(error: SyntaxError) = parseQuery("...")
error.formattedError should equal(
"""Invalid input '.', expected ExecutableDefinition or TypeSystemDefinition (line 1, column 1):
|...
|^""".stripMargin)(after.being(strippedOfCarriageReturns))
}
"parses constant default values" in {
parseQuery("{ field(complex: { a: { b: [ $var ] } }) }").isSuccess should be(true)
}
"parses variable definition directives" in {
parseQuery("query Foo($x: Boolean = false @bar) { field }").isSuccess should be(true)
}
"parses variable inline values" in {
val Failure(error: SyntaxError) =
parseQuery("query Foo($x: Complex = { a: { b: [ $var ] } }) { field }")
error.getMessage should equal(
"""Syntax error while parsing GraphQL query. Invalid input '$', expected NumberValue, StringValue, BooleanValue, NullValue, EnumValue, ListValueConst or ObjectValueConst (line 1, column 37):
|query Foo($x: Complex = { a: { b: [ $var ] } }) { field }
| ^""".stripMargin)(
after.being(strippedOfCarriageReturns))
}
"produce parse error for `1.`" in {
val Failure(error: SyntaxError) = parseQuery("query Foo($x: Complex = 1.) { field }")
error.formattedError should equal(
"""Invalid input "1.)", expected ValueConst, DirectivesConst or VariableDefinition (line 1, column 25):
|query Foo($x: Complex = 1.) { field }
| ^""".stripMargin)(after.being(strippedOfCarriageReturns))
}
"produce parse error for `.123`" in {
val Failure(error: SyntaxError) = parseQuery("query Foo($x: Complex = .123) { field }")
error.formattedError should equal(
"""Invalid input '.', expected NumberValue, StringValue, BooleanValue, NullValue, EnumValue, ListValueConst or ObjectValueConst (line 1, column 25):
|query Foo($x: Complex = .123) { field }
| ^""".stripMargin)(after.being(strippedOfCarriageReturns))
}
"produce parse error for `1.0e`" in {
val Failure(error: SyntaxError) = parseQuery("query Foo($x: Complex = 1.0e) { field }")
error.formattedError should equal(
"""Invalid input "1.0e)", expected ValueConst, DirectivesConst or VariableDefinition (line 1, column 25):
|query Foo($x: Complex = 1.0e) { field }
| ^""".stripMargin)(after.being(strippedOfCarriageReturns))
}
"produce parse error for `1.A`" in {
val Failure(error: SyntaxError) = parseQuery("query Foo($x: Complex = 1.A) { field }")
error.formattedError should equal(
"""Invalid input "1.A", expected ValueConst, DirectivesConst or VariableDefinition (line 1, column 25):
|query Foo($x: Complex = 1.A) { field }
| ^""".stripMargin)(after.being(strippedOfCarriageReturns))
}
"produce parse error for `+1`" in {
val Failure(error: SyntaxError) = parseQuery("query Foo($x: Complex = +1) { field }")
error.formattedError should equal(
"""Invalid input '+', expected NumberValue, StringValue, BooleanValue, NullValue, EnumValue, ListValueConst or ObjectValueConst (line 1, column 25):
|query Foo($x: Complex = +1) { field }
| ^""".stripMargin)(after.being(strippedOfCarriageReturns))
}
"produce parse error for `1.0eA`" in {
val Failure(error: SyntaxError) = parseQuery("query Foo($x: Complex = 1.0eA) { field }")
error.formattedError should equal(
"""Invalid input "1.0eA", expected ValueConst, DirectivesConst or VariableDefinition (line 1, column 25):
|query Foo($x: Complex = 1.0eA) { field }
| ^""".stripMargin)(after.being(strippedOfCarriageReturns))
}
"disallows uncommon control characters" in {
parseQuery("{ field\\u0007 }").isSuccess should be(false)
parseQuery("{ field } \\u0007").isSuccess should be(false)
}
"accepts BOM header" in {
parseQuery("\\uFEFF{ field }").isSuccess should be(true)
}
"accepts new lines header" in {
parseQuery("{ field \\n another }").isSuccess should be(true)
parseQuery("{ field \\r\\n another }").isSuccess should be(true)
}
"accepts escape sequences" in {
parseQuery("{ field(id: \\"\\\\u000A\\") }").isSuccess should be(true)
parseQuery("{ field(id: \\"\\\\uXXXX\\") }").isSuccess should be(false)
parseQuery("{ field(id: \\"\\\\x\\") }").isSuccess should be(false)
}
"allow `null` to be the prefix of an enum value" in {
parseQuery("query Foo($x: Complex = null111) { field }").isSuccess should be(true)
parseQuery("query Foo($x: Complex = null_foo) { field }").isSuccess should be(true)
parseQuery("query Foo($x: Complex = nullFoo) { field }").isSuccess should be(true)
}
"parse leading vertical bar in union types" in {
val Success(ast) = parseQuery("union Hello = | Wo | Rld")
ast.withoutSourceMapper should be(
Document(
Vector(UnionTypeDefinition(
"Hello",
Vector(
NamedType("Wo", Some(AstLocation(16, 1, 17))),
NamedType("Rld", Some(AstLocation(21, 1, 22)))),
Vector.empty,
None,
Vector.empty,
Some(AstLocation(0, 1, 1))
)),
Vector.empty,
Some(AstLocation(0, 1, 1)),
None
))
}
"not parse invalid usage of vertical bar on union types" in {
parseQuery("union Hello = |").isSuccess should be(false)
parseQuery("union Hello = Wo | Rld |").isSuccess should be(false)
parseQuery("union Hello = || Wo | Rld").isSuccess should be(false)
parseQuery("union Hello = Wo || Rld").isSuccess should be(false)
parseQuery("union Hello = | Wo | Rld ||").isSuccess should be(false)
}
"parse leading vertical bar in directive definitions" in {
val Success(ast) = parseQuery("""
directive @include2(if: Boolean!) on
| FIELD
| FRAGMENT_SPREAD
| INLINE_FRAGMENT
""".stripCR)
ast.withoutSourceMapper should be(
Document(
Vector(DirectiveDefinition(
"include2",
Vector(InputValueDefinition(
"if",
NotNullType(
NamedType("Boolean", Some(AstLocation(33, 2, 33))),
Some(AstLocation(33, 2, 33))),
None,
Vector.empty,
None,
Vector.empty,
Some(AstLocation(29, 2, 29))
)),
Vector(
DirectiveLocation("FIELD", Vector.empty, Some(AstLocation(58, 3, 13))),
DirectiveLocation("FRAGMENT_SPREAD", Vector.empty, Some(AstLocation(76, 4, 13))),
DirectiveLocation("INLINE_FRAGMENT", Vector.empty, Some(AstLocation(104, 5, 13)))
),
None,
Vector.empty,
Some(AstLocation(9, 2, 9))
)),
Vector.empty,
Some(AstLocation(9, 2, 9)),
None
))
}
def findAst[T <: AstNode: ClassTag](ast: AstNode): Option[T] =
ast match {
case node if implicitly[ClassTag[T]].runtimeClass.isAssignableFrom(node.getClass) =>
Some(node.asInstanceOf[T])
case Document(defs, _, _, _) => defs.map(findAst[T]).find(_.isDefined) flatten
case OperationDefinition(_, _, vars, _, _, _, _, _) =>
vars.map(findAst[T]).find(_.isDefined) flatten
case VariableDefinition(_, _, default, _, _, _) => default.flatMap(findAst[T])
case _ => None
}
"parse int values" in {
val expectedTable = Vector(
"4" -> BigInt("4"),
"-4" -> BigInt("-4"),
"9" -> BigInt("9"),
"0" -> BigInt("0"),
"784236564875237645762347623147574756321" -> BigInt(
"784236564875237645762347623147574756321")
)
expectedTable.foreach { expected =>
findAst[BigIntValue](
parseQuery(s"query Foo($$x: Complex = ${expected._1}) { field }").get) should be(
Some(BigIntValue(expected._2, Vector.empty, Some(AstLocation(24, 1, 25)))))
}
}
"parse float values" in {
val expectedTable = Vector(
"4.123" -> BigDecimal("4.123"),
"-4.123" -> BigDecimal("-4.123"),
"0.123" -> BigDecimal("0.123"),
"123E4" -> BigDecimal("123E4"),
"123e-4" -> BigDecimal("123e-4"),
"-1.123e4" -> BigDecimal("-1.123e4"),
"-1.123E4" -> BigDecimal("-1.123E4"),
"-1.123e+4" -> BigDecimal("-1.123e+4"),
"-1.123e4567" -> BigDecimal("-1.123e4567")
)
expectedTable.foreach { expected =>
withClue(s"Parsing ${expected._1}.") {
findAst[BigDecimalValue](
parseQuery(s"query Foo($$x: Complex = ${expected._1}) { field }").get) should be(
Some(BigDecimalValue(expected._2, Vector.empty, Some(AstLocation(24, 1, 25)))))
}
}
}
"parse block string values" in {
val q = "\\"\\"\\""
val stringValue =
s"""
$q
hello,
world
$q
"""
QueryParser.parseInput(stripCarriageReturns(stringValue)) should be(
Success(
StringValue(
"hello,\\n world",
true,
Some("\\n hello,\\n world\\n "),
Vector.empty,
Some(AstLocation(11, 2, 11)))))
}
"parse input values independently" in {
val expectedTable = Vector(
"null" -> NullValue(Vector.empty, Some(AstLocation(0, 1, 1))),
"1.234" -> BigDecimalValue(BigDecimal("1.234"), Vector.empty, Some(AstLocation(0, 1, 1))),
"HELLO_WORLD" -> EnumValue("HELLO_WORLD", Vector.empty, Some(AstLocation(0, 1, 1))),
"[1, 2 \\"test\\"]" -> ListValue(
Vector(
BigIntValue(1, Vector.empty, Some(AstLocation(1, 1, 2))),
BigIntValue(2, Vector.empty, Some(AstLocation(4, 1, 5))),
StringValue("test", false, None, Vector.empty, Some(AstLocation(6, 1, 7)))
),
Vector.empty,
Some(AstLocation(0, 1, 1))
),
"{a: 1, b: \\"foo\\" c: {nest: true, oops: null, e: FOO_BAR}}" ->
ObjectValue(
Vector(
ObjectField(
"a",
BigIntValue(1, Vector.empty, Some(AstLocation(4, 1, 5))),
Vector.empty,
Some(AstLocation(1, 1, 2))),
ObjectField(
"b",
StringValue("foo", false, None, Vector.empty, Some(AstLocation(10, 1, 11))),
Vector.empty,
Some(AstLocation(7, 1, 8))),
ObjectField(
"c",
ObjectValue(
Vector(
ObjectField(
"nest",
BooleanValue(true, Vector.empty, Some(AstLocation(26, 1, 27))),
Vector.empty,
Some(AstLocation(20, 1, 21))),
ObjectField(
"oops",
NullValue(Vector.empty, Some(AstLocation(38, 1, 39))),
Vector.empty,
Some(AstLocation(32, 1, 33))),
ObjectField(
"e",
EnumValue("FOO_BAR", Vector.empty, Some(AstLocation(47, 1, 48))),
Vector.empty,
Some(AstLocation(44, 1, 45)))
),
Vector.empty,
Some(AstLocation(19, 1, 20))
),
Vector.empty,
Some(AstLocation(16, 1, 17))
)
),
Vector.empty,
Some(AstLocation(0, 1, 1))
),
"""
{
a: 1
# This is a test comment!
b: "foo"
}
""" ->
ObjectValue(
Vector(
ObjectField(
"a",
BigIntValue(1, Vector.empty, Some(AstLocation(26, 3, 15))),
Vector.empty,
Some(AstLocation(23, 3, 12))),
ObjectField(
"b",
StringValue("foo", false, None, Vector.empty, Some(AstLocation(80, 6, 15))),
Vector(Comment(" This is a test comment!", Some(AstLocation(40, 5, 12)))),
Some(AstLocation(77, 6, 12))
)
),
Vector.empty,
Some(AstLocation(10, 2, 10))
)
)
expectedTable.foreach { expected =>
withClue(s"Parsing ${expected._1}.") {
QueryParser.parseInput(stripCarriageReturns(expected._1)) should equal(
Success(expected._2))
}
}
}
"parse and collect comments in AST nodes" in {
val query = FileUtil.loadQuery("too-many-comments.graphql")
val expected =
Document(
Vector(
OperationDefinition(
OperationType.Query,
Some("queryName"),
Vector(
VariableDefinition(
"foo",
NamedType("ComplexType", Some(AstLocation(434, 23, 1))),
None,
Vector.empty,
Vector(
Comment(" comment 5", Some(AstLocation(354, 15, 1))),
Comment(" comment 6", Some(AstLocation(366, 16, 1)))),
Some(AstLocation(378, 17, 1))
),
VariableDefinition(
"site",
NamedType("Site", Some(AstLocation(565, 36, 1))),
Some(EnumValue(
"MOBILE",
Vector(
Comment(" comment 16.5", Some(AstLocation(602, 40, 1))),
Comment(" comment 16.6", Some(AstLocation(617, 41, 1)))),
Some(AstLocation(632, 42, 1))
)),
Vector.empty,
Vector(
Comment(" comment 11", Some(AstLocation(446, 24, 1))),
Comment(" comment 12", Some(AstLocation(459, 25, 1))),
Comment(" comment 13", Some(AstLocation(475, 28, 1))),
Comment(" comment 14", Some(AstLocation(488, 29, 1)))
),
Some(AstLocation(501, 30, 1))
),
VariableDefinition(
"foo",
NamedType("ComplexType", Some(AstLocation(703, 48, 7))),
Some(ObjectValue(
Vector(
ObjectField(
"field1",
StringValue(
"val",
false,
None,
Vector(
Comment(" comment 18.11", Some(AstLocation(849, 61, 1))),
Comment(" comment 18.12", Some(AstLocation(865, 62, 1)))),
Some(AstLocation(881, 63, 1))
),
Vector(
Comment(" comment 18.7", Some(AstLocation(779, 55, 1))),
Comment(" comment 18.8", Some(AstLocation(794, 56, 1)))),
Some(AstLocation(809, 57, 1))
),
ObjectField(
"list",
ListValue(
Vector(
BigIntValue(
1,
Vector(
Comment(" comment 18.21", Some(AstLocation(1026, 76, 1))),
Comment(" comment 18.22", Some(AstLocation(1042, 77, 1)))),
Some(AstLocation(1058, 78, 1))),
BigIntValue(
2,
Vector(
Comment(" comment 18.23", Some(AstLocation(1061, 79, 1))),
Comment(" comment 18.24", Some(AstLocation(1077, 80, 1)))),
Some(AstLocation(1093, 81, 1))),
BigIntValue(
3,
Vector(
Comment(" comment 18.25", Some(AstLocation(1096, 82, 1))),
Comment(" comment 18.26", Some(AstLocation(1112, 83, 1)))),
Some(AstLocation(1128, 84, 1)))
),
Vector(
Comment(" comment 18.19", Some(AstLocation(992, 73, 1))),
Comment(" comment 18.20", Some(AstLocation(1008, 74, 1)))),
Some(AstLocation(1024, 75, 1))
),
Vector(
Comment(" comment 18.13", Some(AstLocation(887, 64, 1))),
Comment(" comment 18.14", Some(AstLocation(903, 65, 1))),
Comment(" comment 18.15", Some(AstLocation(921, 67, 1))),
Comment(" comment 18.16", Some(AstLocation(937, 68, 1)))
),
Some(AstLocation(953, 69, 1))
),
ObjectField(
"field2",
BooleanValue(
true,
Vector(
Comment(" comment 18.35", Some(AstLocation(1271, 97, 1))),
Comment(" comment 18.36", Some(AstLocation(1287, 98, 1)))),
Some(AstLocation(1303, 99, 1))
),
Vector(
Comment(" comment 18.29", Some(AstLocation(1164, 88, 1))),
Comment(" comment 18.30", Some(AstLocation(1180, 89, 1))),
Comment(" comment 18.31", Some(AstLocation(1198, 91, 1))),
Comment(" comment 18.32", Some(AstLocation(1214, 92, 1)))
),
Some(AstLocation(1230, 93, 1))
)
),
Vector(
Comment(" comment 18.5", Some(AstLocation(747, 52, 1))),
Comment(" comment 18.6", Some(AstLocation(762, 53, 1)))),
Some(AstLocation(777, 54, 1))
)),
Vector.empty,
Vector(
Comment(" comment 17", Some(AstLocation(639, 43, 1))),
Comment(" comment 18", Some(AstLocation(652, 44, 1))),
Comment(" comment 18.1", Some(AstLocation(667, 46, 1))),
Comment(" comment 18.2", Some(AstLocation(682, 47, 1)))
),
Some(AstLocation(697, 48, 1))
)
),
Vector.empty,
Vector(
Field(
Some("whoever123is"),
"node",
Vector(Argument(
"id",
ListValue(
Vector(
BigIntValue(
123,
Vector(
Comment(" comment 35", Some(AstLocation(1660, 130, 3))),
Comment(" comment 36", Some(AstLocation(1675, 131, 3)))),
Some(AstLocation(1690, 132, 3))),
BigIntValue(
456,
Vector(
Comment(" comment 37", Some(AstLocation(1696, 133, 3))),
Comment(" comment 38", Some(AstLocation(1711, 134, 3)))),
Some(AstLocation(1726, 135, 3)))
),
Vector(
Comment(" comment 33", Some(AstLocation(1626, 127, 3))),
Comment(" comment 34", Some(AstLocation(1641, 128, 3)))),
Some(AstLocation(1656, 129, 3))
),
Vector(
Comment(" comment 29", Some(AstLocation(1557, 121, 3))),
Comment(" comment 30", Some(AstLocation(1572, 122, 3)))),
Some(AstLocation(1587, 123, 3))
)),
Vector.empty,
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector(
Comment(" comment 44", Some(AstLocation(1837, 145, 4))),
Comment(" comment 45", Some(AstLocation(1853, 146, 4)))),
Vector.empty,
Some(AstLocation(1870, 147, 5))
),
InlineFragment(
Some(NamedType("User", Some(AstLocation(1996, 156, 5)))),
Vector(Directive(
"defer",
Vector.empty,
Vector(
Comment(" comment 52", Some(AstLocation(2005, 157, 5))),
Comment(" comment 53", Some(AstLocation(2022, 158, 5)))),
Some(AstLocation(2039, 159, 5))
)),
Vector(Field(
None,
"field2",
Vector.empty,
Vector.empty,
Vector(Field(
Some("alias"),
"field1",
Vector(
Argument(
"first",
BigIntValue(
10,
Vector(
Comment(" comment 70", Some(AstLocation(2474, 185, 9))),
Comment(" comment 71", Some(AstLocation(2495, 186, 9)))),
Some(AstLocation(2516, 187, 9))),
Vector(
Comment(" comment 66", Some(AstLocation(2366, 179, 9))),
Comment(" comment 67", Some(AstLocation(2387, 180, 9)))),
Some(AstLocation(2408, 181, 9))
),
Argument(
"after",
VariableValue(
"foo",
Vector(
Comment(" comment 76", Some(AstLocation(2636, 194, 9))),
Comment(" comment 77", Some(AstLocation(2657, 195, 9)))),
Some(AstLocation(2678, 196, 9))),
Vector(
Comment(" comment 72", Some(AstLocation(2528, 188, 9))),
Comment(" comment 73", Some(AstLocation(2549, 189, 9)))),
Some(AstLocation(2570, 190, 9))
)
),
Vector(Directive(
"include",
Vector(Argument(
"if",
VariableValue(
"foo",
Vector(
Comment(" comment 88", Some(AstLocation(2961, 212, 10))),
Comment(" comment 89", Some(AstLocation(2983, 213, 10)))),
Some(AstLocation(3005, 214, 10))
),
Vector(
Comment(" comment 84", Some(AstLocation(2855, 206, 9))),
Comment(" comment 85", Some(AstLocation(2876, 207, 9)))),
Some(AstLocation(2897, 208, 9))
)),
Vector(
Comment(" comment 80", Some(AstLocation(2744, 200, 9))),
Comment(" comment 81", Some(AstLocation(2765, 201, 9)))),
Some(AstLocation(2786, 202, 9))
)),
Vector(
Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector(
Comment(" comment 94", Some(AstLocation(3130, 221, 11))),
Comment(" comment 95", Some(AstLocation(3153, 222, 11)))),
Vector.empty,
Some(AstLocation(3176, 223, 11))
),
FragmentSpread(
"frag",
Vector.empty,
Vector(
Comment(" comment 96", Some(AstLocation(3190, 224, 11))),
Comment(" comment 97", Some(AstLocation(3213, 225, 11)))),
Some(AstLocation(3237, 227, 11))
)
),
Vector(
Comment(" comment 58", Some(AstLocation(2151, 167, 7))),
Comment(" comment 59", Some(AstLocation(2170, 168, 7)))),
Vector(
Comment(" comment 100", Some(AstLocation(3312, 231, 11))),
Comment(" comment 101", Some(AstLocation(3336, 232, 11)))),
Some(AstLocation(2191, 169, 9))
)),
Vector.empty,
Vector(
Comment(" comment 102", Some(AstLocation(3368, 234, 9))),
Comment(" comment 103", Some(AstLocation(3390, 235, 9)))),
Some(AstLocation(2092, 163, 7))
)),
Vector(
Comment(" comment 46", Some(AstLocation(1879, 148, 5))),
Comment(" comment 47", Some(AstLocation(1896, 149, 5)))),
Vector(
Comment(" comment 104", Some(AstLocation(3418, 237, 7))),
Comment(" comment 105", Some(AstLocation(3438, 238, 7)))),
Some(AstLocation(1913, 150, 5))
)
),
Vector(
Comment(" comment 21", Some(AstLocation(1408, 109, 2))),
Comment(" comment 22", Some(AstLocation(1422, 110, 2)))),
Vector(
Comment(" comment 106", Some(AstLocation(3462, 240, 5))),
Comment(" comment 107", Some(AstLocation(3480, 241, 5)))),
Some(AstLocation(1437, 111, 3))
)),
Vector(
Comment(" Copyright (c) 2015, Facebook, Inc.", Some(AstLocation(0, 1, 1))),
Comment(" All rights reserved.", Some(AstLocation(37, 2, 1))),
Comment("", Some(AstLocation(60, 3, 1))),
Comment(
" This source code is licensed under the BSD-style license found in the",
Some(AstLocation(62, 4, 1))),
Comment(
" LICENSE file in the root directory of this source tree. An additional grant",
Some(AstLocation(134, 5, 1))),
Comment(
" of patent rights can be found in the PATENTS file in the same directory.",
Some(AstLocation(212, 6, 1)))
),
Vector(
Comment(" comment 108", Some(AstLocation(3500, 243, 3))),
Comment(" comment 109", Some(AstLocation(3516, 244, 3)))),
Some(AstLocation(288, 8, 1))
),
OperationDefinition(
OperationType.Mutation,
Some("likeStory"),
Vector.empty,
Vector.empty,
Vector(
Field(
None,
"like",
Vector(Argument(
"story",
BigIntValue(
123,
Vector(
Comment(" comment 124", Some(AstLocation(3793, 268, 3))),
Comment(" comment 125", Some(AstLocation(3809, 269, 3)))),
Some(AstLocation(3825, 270, 3))),
Vector(
Comment(" comment 120", Some(AstLocation(3717, 262, 3))),
Comment(" comment 121", Some(AstLocation(3733, 263, 3)))),
Some(AstLocation(3749, 264, 3))
)),
Vector(Directive(
"defer",
Vector.empty,
Vector(
Comment(" comment 128", Some(AstLocation(3867, 274, 3))),
Comment(" comment 129", Some(AstLocation(3883, 275, 3)))),
Some(AstLocation(3899, 276, 3))
)),
Vector(Field(
None,
"story",
Vector.empty,
Vector.empty,
Vector(Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector(
Comment(" comment 136", Some(AstLocation(4030, 286, 5))),
Comment(" comment 137", Some(AstLocation(4048, 287, 5))),
Comment(" comment 138", Some(AstLocation(4067, 289, 5))),
Comment(" comment 139", Some(AstLocation(4085, 290, 5)))
),
Vector.empty,
Some(AstLocation(4105, 291, 7))
)),
Vector(
Comment(" comment 132", Some(AstLocation(3944, 280, 3))),
Comment(" comment 133", Some(AstLocation(3960, 281, 3)))),
Vector(
Comment(" comment 140", Some(AstLocation(4114, 292, 7))),
Comment(" comment 141", Some(AstLocation(4134, 293, 7)))),
Some(AstLocation(3978, 282, 5))
)),
Vector(
Comment(" comment 116", Some(AstLocation(3644, 256, 1))),
Comment(" comment 117", Some(AstLocation(3658, 257, 1)))),
Vector(
Comment(" comment 142", Some(AstLocation(4158, 295, 5))),
Comment(" comment 143", Some(AstLocation(4176, 296, 5)))),
Some(AstLocation(3674, 258, 3))
)),
Vector(
Comment(" comment 110", Some(AstLocation(3536, 247, 4))),
Comment(" comment 111", Some(AstLocation(3553, 248, 4)))),
Vector(
Comment(" comment 144", Some(AstLocation(4196, 298, 3))),
Comment(" comment 145", Some(AstLocation(4212, 299, 3)))),
Some(AstLocation(3567, 249, 1))
),
FragmentDefinition(
"frag",
NamedType("Friend", Some(AstLocation(4358, 312, 1))),
Vector.empty,
Vector(
InlineFragment(
None,
Vector(Directive(
"skip",
Vector(Argument(
"unless",
VariableValue(
"foo",
Vector(
Comment(" comment 168", Some(AstLocation(4613, 334, 3))),
Comment(" comment 169", Some(AstLocation(4629, 335, 3)))),
Some(AstLocation(4645, 336, 3))
),
Vector(
Comment(" comment 164", Some(AstLocation(4536, 328, 3))),
Comment(" comment 165", Some(AstLocation(4552, 329, 3)))),
Some(AstLocation(4568, 330, 3))
)),
Vector(
Comment(" comment 160", Some(AstLocation(4460, 322, 3))),
Comment(" comment 161", Some(AstLocation(4476, 323, 3)))),
Some(AstLocation(4492, 324, 3))
)),
Vector(Field(
None,
"id",
Vector.empty,
Vector.empty,
Vector.empty,
Vector(
Comment(" comment 174", Some(AstLocation(4724, 343, 3))),
Comment(" comment 175", Some(AstLocation(4740, 344, 3)))),
Vector.empty,
Some(AstLocation(4758, 345, 5))
)),
Vector(
Comment(" comment 156", Some(AstLocation(4395, 316, 1))),
Comment(" comment 157", Some(AstLocation(4409, 317, 1))),
Comment(" comment 158", Some(AstLocation(4424, 319, 1))),
Comment(" comment 159", Some(AstLocation(4438, 320, 1)))
),
Vector(
Comment(" comment 176", Some(AstLocation(4765, 346, 5))),
Comment(" comment 177", Some(AstLocation(4783, 347, 5)))),
Some(AstLocation(4454, 321, 3))
)),
Vector.empty,
Vector(
Comment(" comment 146", Some(AstLocation(4228, 300, 3))),
Comment(" comment 147", Some(AstLocation(4242, 301, 1)))),
Vector(
Comment(" comment 178", Some(AstLocation(4803, 349, 3))),
Comment(" comment 179", Some(AstLocation(4819, 350, 3)))),
Some(AstLocation(4257, 303, 1))
)
),
Vector(
Comment(" comment 180", Some(AstLocation(4835, 352, 1))),
Comment(" comment 181", Some(AstLocation(4849, 353, 1)))),
Some(AstLocation(0, 1, 1)),
None
)
QueryParser.parse(
query,
ParserConfig.default.withEmptySourceId.withoutSourceMapper) should be(Success(expected))
}
"parse document with block strings" in {
val query = FileUtil.loadQuery("block-string.graphql")
val expected = Document(
Vector(
OperationDefinition(
OperationType.Query,
Some("FetchLukeAndLeiaAliased"),
Vector(
VariableDefinition(
"someVar",
NamedType("String", Some(AstLocation(40, 1, 41))),
Some(
StringValue(
"hello \\\\\\n world",
true,
Some("\\n hello \\\\\\n world"),
Vector.empty,
Some(AstLocation(53, 2, 5)))),
Vector.empty,
Vector.empty,
Some(AstLocation(30, 1, 31))
)),
Vector.empty,
Vector(
Field(
Some("luke"),
"human",
Vector(
Argument(
"id",
StringValue("1000", false, None, Vector.empty, Some(AstLocation(105, 5, 19))),
Vector.empty,
Some(AstLocation(101, 5, 15))
),
Argument(
"bar",
StringValue(
" \\\\\\"test\\n123 \\\\u0000",
true,
Some(" \\\\\\"test\\n 123 \\\\u0000\\n "),
Vector.empty,
Some(AstLocation(118, 5, 32))),
Vector.empty,
Some(AstLocation(113, 5, 27))
)
),
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(89, 5, 3))
),
FragmentSpread("Foo", Vector.empty, Vector.empty, Some(AstLocation(158, 9, 3)))
),
Vector.empty,
Vector.empty,
Some(AstLocation(0, 1, 1))
),
FragmentDefinition(
"Foo",
NamedType("User", Some(AstLocation(184, 12, 17))),
Vector(
Directive(
"foo",
Vector(
Argument(
"bar",
BigIntValue(1, Vector.empty, Some(AstLocation(199, 12, 32))),
Vector.empty,
Some(AstLocation(194, 12, 27))
)),
Vector.empty,
Some(AstLocation(189, 12, 22))
)),
Vector(
Field(
None,
"baz",
Vector.empty,
Vector(Directive(
"docs",
Vector(Argument(
"info",
StringValue(
"\\"\\"\\"\\n\\"\\"\\" this \\" is \\"\\"\\na description! \\"\\"\\"",
true,
Some("\\"\\"\\"\\n \\"\\"\\" this \\" is \\"\\"\\n a description! \\"\\"\\"\\n "),
Vector.empty,
Some(AstLocation(225, 14, 5))
),
Vector.empty,
Some(AstLocation(215, 13, 13))
)),
Vector.empty,
Some(AstLocation(209, 13, 7))
)),
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(205, 13, 3))
)),
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(168, 12, 1))
)
),
Vector.empty,
Some(AstLocation(0, 1, 1)),
None
)
parseQuery(query) should be(Success(expected))
}
"Experimental: allows parsing fragment defined variables" in {
val queryStr = "fragment a($v: Boolean = false) on t { f(v: $v) }"
parseQuery(queryStr).isFailure should be(true)
val Success(query) = QueryParser.parse(
queryStr,
ParserConfig.default.withEmptySourceId.withoutSourceMapper.withExperimentalFragmentVariables)
query should be(
Document(
Vector(FragmentDefinition(
"a",
NamedType("t", Some(AstLocation(35, 1, 36))),
Vector.empty,
Vector(Field(
None,
"f",
Vector(Argument(
"v",
VariableValue("v", Vector.empty, Some(AstLocation(44, 1, 45))),
Vector.empty,
Some(AstLocation(41, 1, 42))
)),
Vector.empty,
Vector.empty,
Vector.empty,
Vector.empty,
Some(AstLocation(39, 1, 40))
)),
Vector(VariableDefinition(
"v",
NamedType("Boolean", Some(AstLocation(15, 1, 16))),
Some(BooleanValue(false, Vector.empty, Some(AstLocation(25, 1, 26)))),
Vector.empty,
Vector.empty,
Some(AstLocation(11, 1, 12))
)),
Vector.empty,
Vector.empty,
Some(AstLocation(0, 1, 1))
)),
Vector.empty,
Some(AstLocation(0, 1, 1)),
None
))
}
}
"Ast" should {
"be equal for the same queries" in {
val query =
"""
{
id
name
friends {
name
}
}
"""
(parseQuery(query) == parseQuery(query)) should be(true)
}
"not be equal for the same queries with different AST node positions" in {
val query1 =
"""
{
id
name
friends {
name
}
}
"""
val query2 =
"""
{
id
name
friends {name}
}
"""
(parseQuery(query1) == parseQuery(query2)) should be(false)
}
}
}
| OlegIlyenko/sangria | modules/core/src/test/scala/sangria/parser/QueryParserSpec.scala | Scala | apache-2.0 | 86,421 |
package org.jetbrains.plugins.scala
package lang
package surroundWith
package surrounders
package expression
/**
* @author Dmitry Krasilschikov
*/
import com.intellij.lang.ASTNode
import com.intellij.openapi.util.TextRange
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.lang.psi.api.expr._
/*
* Surrounds expression with for: for { <Cursor> } yield Expression
*/
class ScalaWithForYieldSurrounder extends ScalaExpressionSurrounder {
override def getTemplateAsString(elements: Array[PsiElement]): String =
"for (a <- as) yield {" + super.getTemplateAsString(elements) + "}"
override def getTemplateDescription = "for / yield"
override def getSurroundSelectionRange(withForNode: ASTNode): TextRange = {
val element: PsiElement = withForNode.getPsi match {
case x: ScParenthesisedExpr => x.expr match {
case Some(y) => y
case _ => return x.getTextRange
}
case x => x
}
val forStmt = element.asInstanceOf[ScForStatement]
val enums = (forStmt.enumerators: @unchecked) match {
case Some(x) => x.getNode
}
val offset = enums.getTextRange.getStartOffset
forStmt.getNode.removeChild(enums)
new TextRange(offset, offset)
}
}
| whorbowicz/intellij-scala | src/org/jetbrains/plugins/scala/lang/surroundWith/surrounders/expression/ScalaWithForYieldSurrounder.scala | Scala | apache-2.0 | 1,242 |
// TLCockpit
// Copyright 2017-2021 Norbert Preining
// Licensed according to GPLv3+
//
// Front end for tlmgr
package TLCockpit
import javafx.scene.Node
import TLCockpit.Utils._
import TeXLive._
import TeXLive.OsTools._
import scala.collection.{immutable, mutable}
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.{Future, Promise, SyncVar}
import scala.concurrent.ExecutionContext.Implicits.global
import scala.io.Source
import scala.util.{Failure, Success}
import scala.sys.process._
import scalafx.beans.property.BooleanProperty
import scalafx.scene.text.Font
// ScalaFX imports
import scalafx.event.Event
import scalafx.beans.property.{ObjectProperty, StringProperty}
import scalafx.geometry.{Pos, Orientation}
import scalafx.scene.Cursor
import scalafx.scene.control.Alert.AlertType
import scalafx.scene.image.{Image, ImageView}
import scalafx.scene.input.{KeyCode, KeyEvent, MouseEvent}
import scalafx.scene.paint.Color
// needed see https://github.com/scalafx/scalafx/issues/137
import scalafx.scene.control.TableColumn._
import scalafx.scene.control.TreeTableColumn._
import scalafx.scene.control.TreeItem
import scalafx.scene.control.Menu._
import scalafx.scene.control.ListCell
import scalafx.Includes._
import scalafx.application.{JFXApp, Platform}
import scalafx.application.JFXApp.PrimaryStage
import scalafx.geometry.Insets
import scalafx.scene.Scene
import scalafx.scene.layout._
import scalafx.scene.control._
import scalafx.event.ActionEvent
import scalafx.collections.ObservableBuffer
import scalafx.collections.ObservableMap
// configuration file handling support
import java.util.Properties
import java.io.{ File, FileOutputStream, FileInputStream }
// JSON support - important load TLPackageJsonProtocol later!
import spray.json._
import TeXLive.JsonProtocol._
import org.json4s._
import org.json4s.jackson.JsonMethods._
// logging
import com.typesafe.scalalogging.LazyLogging
import ch.qos.logback.classic.{Level,Logger}
import org.slf4j.LoggerFactory
object ApplicationMain extends JFXApp with LazyLogging {
val version: String = getClass.getPackage.getImplementationVersion
// parse command line arguments
// nothing => INFO
// -q WARN -qq ERROR
// -d => DEBUG -dd => TRACE
val cmdlnlog: Int = parameters.unnamed.map( {
case "-d" => Level.DEBUG_INT
case "-dd" => Level.TRACE_INT
case "-q" => Level.WARN_INT
case "-qq" => Level.ERROR_INT
case _ => -1
} ).foldLeft(Level.OFF_INT)(scala.math.min)
if (cmdlnlog == -1) {
// Unknown log level has been passed in, error out
Console.err.println("Unsupported command line argument passed in, terminating.")
Platform.exit()
sys.exit(0)
}
// if nothing has been passed on the command line, use INFO
val newloglevel: Int = if (cmdlnlog == Level.OFF_INT) Level.INFO_INT else cmdlnlog
LoggerFactory.getLogger(org.slf4j.Logger.ROOT_LOGGER_NAME).
asInstanceOf[Logger].setLevel(Level.toLevel(newloglevel))
logger.trace("starting program tlcockpit")
val javaVersion = System.getProperty("java.specification.version")
val javaVersionSplit: Array[String] = javaVersion.split('.')
logger.debug(s"Got javaVersion ${javaVersion}")
val major = toInt(javaVersionSplit(0))
major match {
case Some(i) =>
if (major.get == 1) {
val minor = toInt(javaVersionSplit(1))
minor match {
case Some(j) =>
if (minor.get < 8) {
logger.error(s"Java version ${javaVersion} too old, need >= 1.8, terminating!")
Platform.exit()
sys.exit(1)
} else if (minor.get == 8) {
if (BuildInfo.javaVersion != 8) {
logger.warn(s"Build and run versions disagree: build: ${BuildInfo.javaVersion}, run: ${major.get}.${minor.get}, trying anyway!")
}
}
case None =>
logger.warn(s"Cannot find Java version from ${javaVersion}, continuing anyway!")
}
} else {
if (major.get > 9) {
if (major.get != BuildInfo.javaVersion) {
logger.warn(s"Build and run versions disagree: build: ${BuildInfo.javaVersion}, run: ${major.get}, trying anyway!")
}
} else {
logger.warn(s"Strange version number, please report: ${javaVersion}, continuing anyway!")
}
}
case None =>
logger.warn(s"Cannot find Java version from ${javaVersion}, continuing anyway!")
}
logger.info(s"Running on Java Version ${javaVersion}")
val userHomeDirectory = System.getProperty("user.home")
val confPath = userHomeDirectory + "/.tlcockpit.conf"
val props = new Properties()
val propsFile = new File(confPath)
if (propsFile.exists()) {
props.load(new FileInputStream(propsFile))
}
var tlmgrBusy = BooleanProperty(false)
// necessary action when Window is closed with X or some other operation
override def stopApp(): Unit = {
tlmgr.cleanup()
}
val iconImage = new Image(getClass.getResourceAsStream("tlcockpit-48.jpg"))
val logoImage = new Image(getClass.getResourceAsStream("tlcockpit-128.jpg"))
val busyImage = new Image(getClass.getResourceAsStream("spinner-small.gif"))
val msgFont = new Font(30f)
val busySpinner: ImageView = new ImageView(busyImage) {
// scaleX = 0.3
// scaleY = 0.3
}
def SpinnerPlaceHolder(txt: String): Node = {
val tmp = new Label(txt)
tmp.wrapText = true
tmp.opacity = 0.4f
tmp.font = msgFont
tmp.graphic = busySpinner
tmp
}
val tlpkgs: ObservableMap[String, TLPackageShort] = ObservableMap[String,TLPackageShort]()
val pkgs: ObservableMap[String, TLPackageDisplay] = ObservableMap[String, TLPackageDisplay]()
val upds: ObservableMap[String, TLUpdateDisplay] = ObservableMap[String, TLUpdateDisplay]()
val bkps: ObservableMap[String, Map[String, TLBackupDisplay]] = ObservableMap[String, Map[String,TLBackupDisplay]]() // pkgname -> (version -> TLBackup)*
val logText: ObservableBuffer[String] = ObservableBuffer[String]()
val outputText: ObservableBuffer[String] = ObservableBuffer[String]()
val errorText: ObservableBuffer[String] = ObservableBuffer[String]()
val outputfield: TextArea = new TextArea {
editable = false
wrapText = true
text = ""
}
val logfield: TextArea = new TextArea {
editable = false
wrapText = true
text = ""
}
val errorfield: TextArea = new TextArea {
editable = false
wrapText = true
text = ""
}
logText.onChange({
logfield.text = logText.mkString("\n")
logfield.scrollTop = Double.MaxValue
})
errorText.onChange({
errorfield.text = errorText.mkString("\n")
errorfield.scrollTop = Double.MaxValue
if (errorfield.text.value.nonEmpty) {
outerrpane.expanded = true
outerrtabs.selectionModel().select(2)
}
})
outputText.onChange({
outputfield.text = outputText.mkString("\n")
outputfield.scrollTop = Double.MaxValue
})
val update_all_menu: MenuItem = new MenuItem("Update all") {
val cmd: String = "--all" + {
if (disable_auto_install) " --no-auto-install" else "" } + {
if (disable_auto_removal) " --no-auto-remove" else "" } + {
if (enable_reinstall_forcible) " --reinstall-forcibly-removed" else "" }
onAction = (ae) => callback_update(cmd)
disable = true
}
val update_self_menu: MenuItem = new MenuItem("Update self") {
onAction = (ae) => callback_update("--self")
disable = true
}
val outerrtabs: TabPane = new TabPane {
minWidth = 400
tabs = Seq(
new Tab {
text = "Output"
closable = false
content = outputfield
},
new Tab {
text = "Logging"
closable = false
content = logfield
},
new Tab {
text = "Errors"
closable = false
content = errorfield
}
)
}
val outerrpane: TitledPane = new TitledPane {
text = "Debug"
collapsible = true
expanded = false
content = outerrtabs
}
val cmdline = new TextField()
cmdline.onKeyPressed = {
(ae: KeyEvent) => if (ae.code == KeyCode.Enter) callback_run_cmdline()
}
// read the perl dump of ctan mirrors by converting it to JSON code and parsing it
def parse_ctan_mirrors(tlroot: String): Map[String,Map[String,Seq[String]]] = {
try {
val fileName = tlroot + "/tlpkg/installer/ctan-mirrors.pl"
val foo: String = Source.fromFile(fileName).getLines.mkString("")
val jsonMirrorString = foo.substring(10).replace("=>", ":").replace("""'""", "\"").replace(";", "")
val ast = jsonMirrorString.parseJson
ast.convertTo[Map[String, Map[String, Map[String, Int]]]].map {
contpair =>
(contpair._1, contpair._2.map {
countrypair => (countrypair._1, countrypair._2.keys.toSeq)
})
}
} catch { case e: Exception =>
logText.append("Cannot find or parse ctan-mirrors.pl")
logger.debug("Cannot find or parse ctan-mirrors.pl")
Map[String,Map[String,Seq[String]]]()
}
}
def callback_quit(): Unit = {
tlmgr.cleanup()
Platform.exit()
sys.exit(0)
}
def callback_run_text(s: String): Unit = {
tlmgr_send(s, (a: String, b: Array[String]) => {})
}
def callback_run_cmdline(): Unit = {
tlmgr_send(cmdline.text.value, (status,output) => {
outputText.append(output.mkString("\n"))
outerrpane.expanded = true
outerrtabs.selectionModel().select(0)
})
}
def not_implemented_info(): Unit = {
new Alert(AlertType.Warning) {
initOwner(stage)
title = "Warning"
headerText = "This functionality is not implemented by now!"
contentText = "Sorry for the inconveniences."
}.showAndWait()
}
val OutputBuffer: ObservableBuffer[String] = ObservableBuffer[String]()
var OutputBufferIndex:Int = 0
val OutputFlushLines = 100
OutputBuffer.onChange {
// length is number of lines!
var foo = ""
OutputBuffer.synchronized(
if (OutputBuffer.length - OutputBufferIndex > OutputFlushLines) {
foo = OutputBuffer.slice(OutputBufferIndex, OutputBufferIndex + OutputFlushLines).mkString("")
OutputBufferIndex += OutputFlushLines
Platform.runLater {
outputText.append(foo)
}
}
)
}
def reset_output_buffer(): Unit = {
OutputBuffer.clear()
OutputBufferIndex = 0
}
def callback_run_external(ss: Array[String], unbuffered: Boolean = true): Unit = {
outputText.clear()
// logText.clear()
outerrpane.expanded = true
outerrtabs.selectionModel().select(0)
// outputText.append(s"Running ${ss.mkString(" ")}" + (if (unbuffered) " (unbuffered)" else " (buffered)"))
val foo = Future {
ss.foreach { s =>
val runcmd = if (isCygwin) "bash -l -c \"" + s + "\"" else s
Platform.runLater {
outputText.append(s"Running ${s}" + (if (unbuffered) " (unbuffered)" else " (buffered)"))
actionLabel.text = s"[${s}]"
}
runcmd ! ProcessLogger(
line => if (unbuffered) Platform.runLater(outputText.append(line))
else OutputBuffer.synchronized(OutputBuffer.append(line + "\n")),
line => Platform.runLater(logText.append(line))
)
}
}
foo.onComplete {
case Success(ret) =>
Platform.runLater {
actionLabel.text = ""
outputText.append(OutputBuffer.slice(OutputBufferIndex,OutputBuffer.length).mkString(""))
outputText.append("Completed")
reset_output_buffer()
outputfield.scrollTop = Double.MaxValue
}
case Failure(t) =>
Platform.runLater {
actionLabel.text = ""
outputText.append(OutputBuffer.slice(OutputBufferIndex,OutputBuffer.length).mkString(""))
outputText.append("Completed")
reset_output_buffer()
outputfield.scrollTop = Double.MaxValue
errorText.append(s"An ERROR has occurred running one of ${ss.mkString(" ")}: " + t.getMessage)
errorfield.scrollTop = Double.MaxValue
outerrpane.expanded = true
outerrtabs.selectionModel().select(2)
}
}
}
def callback_about(): Unit = {
new Alert(AlertType.Information) {
initOwner(stage)
title = "About TLCockpit"
graphic = new ImageView(logoImage)
headerText = "TLCockpit version " + version + "\n\nManage your TeX Live with speed!"
contentText = "Copyright 2017-2021 Norbert Preining\nLicense: GPL3+\nSources: https://github.com/TeX-Live/tlcockpit"
}.showAndWait()
}
// Output of update --self
/*
tlmgr>
update --self
location-url /home/norbert/public_html/tlnet /home/norbert/public_html/tlcritical /home/norbert/Domains/server/texlive.info/contrib/2017 /home/norbert/public_html/tltexjp
total-bytes 381087
end-of-header
texlive.infra u 4629 46295 381087 ??:?? ??:?? tlcritical - -
end-of-updates
STDERR running mktexlsr ...
STDERR done running mktexlsr.
STDERR running mtxrun --generate ...
STDERR done running mtxrun --generate.
OK
STDOUT (with patch STDERR) tlmgr has been updated, restarting!
protocol 1
tlmgr>
The problem with the update function lies in the
protocol 1
which is not accepted/expected by the update function!
*/
def set_line_update_function(mode: String): Unit = {
var prevName = ""
stdoutLineUpdateFunc = (l:String) => {
logger.trace("DEBUG line update: " + l + "=")
l match {
case u if u.startsWith("location-url") => None
case u if u.startsWith("total-bytes") => None
case u if u.startsWith("end-of-header") => None
// case u if u.startsWith("end-of-updates") => None
case u if u == "OK" => None
case u if u.startsWith("tlmgr>") => None
case u =>
if (prevName != "") {
if (mode == "update") {
// parallelism is a pain, I get concurrent access here, but don't know with whom?
// ConcurrentModificationExceptions often occur when you're modifying
// a collection while you are iterating over its elements.
// val newkids: ObservableBuffer[TreeItem[TLUpdateDisplay]] =
// updateTable.root.value.children.filter(_.value.value.name.value != prevName)
// .map(_.asInstanceOf[TreeItem[TLUpdateDisplay]])
// the last map is only necessary becasue ScalaFX is buggy and does not produce
// proper types here!!! Probably similar to https://github.com/scalafx/scalafx/issues/137
// updateTable.root.value.children = newkids
upds.remove(prevName)
trigger_update("upds")
} else if (mode == "remove") {
tlpkgs(prevName).installed = false
} else { // install
tlpkgs(prevName).installed = true
}
if (mode == "remove") {
pkgs(prevName).lrev = ObjectProperty[Int](0)
pkgs(prevName).installed = StringProperty("Not installed")
tlpkgs(prevName).lrev = 0
} else { // install and update
pkgs(prevName).lrev = pkgs(prevName).rrev
pkgs(prevName).installed = StringProperty("Installed") // TODO support Mixed!!!
tlpkgs(prevName).lrev = tlpkgs(prevName).rrev
}
packageTable.refresh()
}
if (u.startsWith("end-of-updates")) {
if (mode == "update") {
Platform.runLater {
updateTable.placeholder = SpinnerPlaceHolder("Post actions running")
actionLabel.text = "[post actions running]"
}
}
// nothing to be done, all has been done above
logger.debug("DEBUG got end of updates")
// } else if (u.startsWith("protocol ")) {
// logger.debug("Got protocol line, seems tlmgr got updated and restarted!")
// // nothing else to be done
// } else if (u.startsWith("tlmgr has been updated, restarting")) {
// logger.debug("tlmgr got updated and restarted, ignoring output")
} else {
logger.debug("DEBUG getting update line")
prevName = if (mode == "update") {
val foo = parse_one_update_line(l)
val pkgname = foo.name.value
upds(pkgname).status = StringProperty("Updating ...")
updateTable.refresh()
pkgname
} else if (mode == "install") {
val fields = l.split("\t")
val pkgname = fields(0)
pkgs(pkgname).installed = StringProperty("Installing ...")
packageTable.refresh()
pkgname
} else { // remove
val fields = l.split("\t")
val pkgname = fields(0)
pkgs(pkgname).installed = StringProperty("Removing ...")
packageTable.refresh()
pkgname
}
}
}
}
}
def callback_update(s: String): Unit = {
val prevph = updateTable.placeholder.value
set_line_update_function("update")
val cmd = if (s == "--self") "update --self --no-restart" else s"update $s"
tlmgr_send(cmd, (a,b) => {
stdoutLineUpdateFunc = defaultStdoutLineUpdateFunc
Platform.runLater {
updateTable.placeholder = prevph
}
if (s == "--self") {
reinitialize_tlmgr()
// this doesn't work seemingly
// update_upds_list()
}
})
}
def callback_remove(pkg: String): Unit = {
set_line_update_function("remove")
tlmgr_send(s"remove $pkg", (_, _) => {
stdoutLineUpdateFunc = defaultStdoutLineUpdateFunc
})
}
def callback_install(pkg: String): Unit = {
set_line_update_function("install")
tlmgr_send(s"install $pkg", (_,_) => {
stdoutLineUpdateFunc = defaultStdoutLineUpdateFunc
})
}
def callback_restore(str: String, rev: String): Unit = {
tlmgr_send(s"restore --force $str $rev", (_,_) => {
tlpkgs(str).lrev = rev.toLong
pkgs(str).lrev = ObjectProperty[Int](rev.toInt)
packageTable.refresh()
Platform.runLater { actionLabel.text = "[running post actions]" }
})
}
bkps.onChange( (obs,chs) => {
val doit = chs match {
case ObservableMap.Add(k, v) => k.toString == "root"
case ObservableMap.Replace(k, va, vr) => k.toString == "root"
case ObservableMap.Remove(k, v) => k.toString == "root"
}
if (doit) {
logger.debug("DEBUG bkps.onChange called new length = " + bkps.keys.toArray.length)
val newroot = new TreeItem[TLBackupDisplay](new TLBackupDisplay("root", "", "")) {
children = bkps
.filter(_._1 != "root")
.map(p => {
val pkgname: String = p._1
// we sort by negative of revision number, which give inverse sort
val versmap: Array[(String, TLBackupDisplay)] = p._2.toArray.sortBy(-_._2.rev.value.toInt)
val foo: Seq[TreeItem[TLBackupDisplay]] = versmap.tail.sortBy(-_._2.rev.value.toInt).map { q =>
new TreeItem[TLBackupDisplay](q._2)
}.toSeq
new TreeItem[TLBackupDisplay](versmap.head._2) {
children = foo
}
}).toArray.sortBy(_.value.value.name.value)
}
Platform.runLater {
backupTable.root = newroot
}
}
})
def view_pkgs_by_collections(pkgbuf: scala.collection.mutable.Map[String, TLPackageDisplay],
binbuf: scala.collection.mutable.Map[String, ArrayBuffer[TLPackageDisplay]],
colbuf: scala.collection.mutable.Map[String, ArrayBuffer[TLPackageDisplay]]): Seq[TreeItem[TLPackageDisplay]] = {
val bin_pkg_map = compute_bin_pkg_mapping(pkgbuf, binbuf)
colbuf.map(
p => {
val colname: String = p._1
val coldeps: Seq[TLPackageDisplay] = p._2
val coltlpd: TLPackageDisplay = pkgbuf(colname)
new TreeItem[TLPackageDisplay](coltlpd) {
children = coldeps.filter(q => tlpkgs(q.name.value).category != "Collection").sortBy(_.name.value).map(sub => {
val binmap: (Boolean, Seq[TLPackageDisplay]) = bin_pkg_map(sub.name.value)
val ismixed: Boolean = binmap._1
val kids: Seq[TLPackageDisplay] = binmap._2.sortBy(_.name.value)
val ti = if (ismixed) {
// replace installed status with "Mixed"
new TreeItem[TLPackageDisplay](
new TLPackageDisplay(sub.name.value, sub.lrev.value.toString, sub.rrev.value.toString, sub.shortdesc.value, sub.size.value.toString, "Mixed")
) {
children = kids.map(new TreeItem[TLPackageDisplay](_))
}
} else {
new TreeItem[TLPackageDisplay](sub) {
children = kids.map(new TreeItem[TLPackageDisplay](_))
}
}
ti
}
)
}
}
).toSeq
// ArrayBuffer.empty[TreeItem[TLPackageDisplay]]
}
def view_pkgs_by_names(pkgbuf: scala.collection.mutable.Map[String, TLPackageDisplay],
binbuf: scala.collection.mutable.Map[String, ArrayBuffer[TLPackageDisplay]]): Seq[TreeItem[TLPackageDisplay]] = {
val bin_pkg_map: Map[String, (Boolean, Seq[TLPackageDisplay])] = compute_bin_pkg_mapping(pkgbuf, binbuf)
pkgbuf.map{
p => {
val binmap: (Boolean, Seq[TLPackageDisplay]) = bin_pkg_map(p._1)
val pkgtlp: TLPackageDisplay = p._2
val ismixed: Boolean = binmap._1
val kids: Seq[TLPackageDisplay] = binmap._2.sortBy(_.name.value)
if (ismixed) {
new TreeItem[TLPackageDisplay](
new TLPackageDisplay(pkgtlp.name.value, pkgtlp.lrev.value.toString, pkgtlp.rrev.value.toString, pkgtlp.shortdesc.value, pkgtlp.size.value.toString, "Mixed")
) {
children = kids.map(new TreeItem[TLPackageDisplay](_))
}
} else {
new TreeItem[TLPackageDisplay](pkgtlp) {
children = kids.map(new TreeItem[TLPackageDisplay](_))
}
}
}
}.toSeq
}
def compute_bin_pkg_mapping(pkgbuf: scala.collection.mutable.Map[String, TLPackageDisplay],
binbuf: scala.collection.mutable.Map[String, ArrayBuffer[TLPackageDisplay]]): Map[String, (Boolean, Seq[TLPackageDisplay])] = {
pkgbuf.map {
p => {
val kids: Seq[TLPackageDisplay] = if (binbuf.keySet.contains(p._2.name.value)) {
binbuf(p._2.name.value)
} else {
Seq()
}
// for ismixed we && all the installed status. If all are installed, we get true
val allinstalled = (kids :+ p._2).foldRight[Boolean](true)((k, b) => k.installed.value == "Installed" && b)
val someinstalled = (kids :+ p._2).exists(_.installed.value == "Installed")
val mixedinstalled = !allinstalled && someinstalled
(p._1, (mixedinstalled, kids))
}
}.toMap
}
pkgs.onChange( (obs,chs) => {
val doit = chs match {
case ObservableMap.Add(k, v) => k.toString == "root"
case ObservableMap.Replace(k, va, vr) => k.toString == "root"
// don't call the trigger on root removal!
// case ObservableMap.Remove(k, v) => k.toString == "root"
case ObservableMap.Remove(k,v) => false
}
if (doit) {
logger.debug("DEBUG: entering pkgs.onChange")
// val pkgbuf: ArrayBuffer[TLPackageDisplay] = ArrayBuffer.empty[TLPackageDisplay]
val pkgbuf = scala.collection.mutable.Map.empty[String, TLPackageDisplay]
val binbuf = scala.collection.mutable.Map.empty[String, ArrayBuffer[TLPackageDisplay]]
val colbuf = scala.collection.mutable.Map.empty[String, ArrayBuffer[TLPackageDisplay]]
pkgs.foreach(pkg => {
// complicated part, determine whether it is a sub package or not!
// we strip of initial texlive. prefixes to make sure we deal
// with real packages
if ((pkg._1.startsWith("texlive.infra") && pkg._1.stripPrefix("texlive.infra").contains(".")) ||
pkg._1.stripPrefix("texlive.infra").contains(".")) {
val foo: Array[String] = if (pkg._1.startsWith("texlive.infra"))
Array("texlive.infra", pkg._1.stripPrefix("texlive.infra"))
else
pkg._1.split('.')
val pkgname = foo(0)
if (pkgname != "") {
val binname = foo(1)
if (binbuf.keySet.contains(pkgname)) {
binbuf(pkgname) += pkg._2
} else {
binbuf(pkgname) = ArrayBuffer[TLPackageDisplay](pkg._2)
}
}
} else if (pkg._1 == "root") {
// ignore the dummy root element,
// only used for speeding up event handling
} else {
pkgbuf(pkg._1) = pkg._2
}
})
// Another round to propagate purely .win32 packages like wintools.win32 or
// dviout.win32 from binpkg status to full pkg, since they don't have
// accompanying main packages
binbuf.foreach(p => {
if (!pkgbuf.contains(p._1)) {
if (p._2.length > 1) {
errorText += "THAT SHOULD NOT HAPPEN: >>" + p._1 + "<< >>" + p._2.length + "<<"
p._2.foreach(f => logger.trace("-> " + f.name.value))
} else {
logger.trace("DEBUG Moving " + p._2.head.name.value + " up to pkgbuf " + p._1)
pkgbuf(p._2.head.name.value) = p._2.head
// TODO will this work out with the foreach loop above???
binbuf -= p._1
}
}
})
// another loop to collection and fill the collections buffer
pkgs.foreach(pkg => {
if (tlpkgs.contains(pkg._1)) {
if (tlpkgs(pkg._1).category == "Collection") {
val foo: immutable.Seq[String] = tlpkgs(pkg._1).depends
colbuf(pkg._1) = ArrayBuffer[TLPackageDisplay]()
// TODO we need to deal with packages that get removed!!!
// for now just make sure we don't add them here!
colbuf(pkg._1) ++= foo.filter(pkgbuf.contains).map(pkgbuf(_))
}
} else if (pkg._1 == "root") {
// do nothing
} else {
errorText += "Cannot find information for " + pkg._1
}
})
// now we have all normal packages in pkgbuf, and its sub-packages in binbuf
// we need to create TreeItems
val viewkids: Seq[TreeItem[TLPackageDisplay]] =
if (ViewByPkg.selected.value)
view_pkgs_by_names(pkgbuf, binbuf)
else
view_pkgs_by_collections(pkgbuf, binbuf, colbuf)
logger.debug("DEBUG: leaving pkgs.onChange before runLater")
Platform.runLater {
packageTable.root = new TreeItem[TLPackageDisplay](new TLPackageDisplay("root", "0", "0", "", "0", "")) {
expanded = true
children = viewkids.sortBy(_.value.value.name.value)
}
}
}
})
upds.onChange( (obs, chs) => {
var doit = chs match {
case ObservableMap.Add(k, v) => k.toString == "root"
case ObservableMap.Replace(k, va, vr) => k.toString == "root"
case ObservableMap.Remove(k, v) => k.toString == "root"
}
if (doit) {
val infraAvailable = upds.keys.exists(_.startsWith("texlive.infra"))
// only allow for updates of other packages when no infra update available
val updatesAvailable = !infraAvailable && upds.keys.exists(p => !p.startsWith("texlive.infra") && !(p == "root"))
val newroot = new TreeItem[TLUpdateDisplay](new TLUpdateDisplay("root", "", "", "", "", "")) {
children = upds
.filter(_._1 != "root")
.map(p => new TreeItem[TLUpdateDisplay](p._2))
.toArray
.sortBy(_.value.value.name.value)
}
Platform.runLater {
update_self_menu.disable = !infraAvailable
update_all_menu.disable = !updatesAvailable
updateTable.root = newroot
if (infraAvailable) {
texlive_infra_update_warning()
}
}
}
})
def texlive_infra_update_warning(): Unit = {
new Alert(AlertType.Warning) {
initOwner(stage)
title = "TeX Live Infrastructure Update Available"
headerText = "Updates to the TeX Live Manager (Infrastructure) available."
contentText = "Please use \"Update self\" from the Menu!"
}.showAndWait()
}
def load_backups_update_bkps_view(): Unit = {
val prevph = backupTable.placeholder.value
backupTable.placeholder = SpinnerPlaceHolder("Loading backups")
tlmgr_send("restore --json", (status, lines) => {
val jsonAst = lines.mkString("").parseJson
val backups: Map[String, Map[String, TLBackupDisplay]] =
jsonAst
.convertTo[List[TLBackup]]
.groupBy[String](_.name)
.map(p => (p._1, p._2.map(q => (q.rev, new TLBackupDisplay(q.name, q.rev, q.date))).toMap))
bkps.clear()
bkps ++= backups
trigger_update("bkps")
backupTable.placeholder = prevph
})
}
def update_pkgs_view(): Unit = {
val newpkgs: Map[String, TLPackageDisplay] =
tlpkgs
.filter { p =>
val searchTerm = searchEntry.text.value.toLowerCase
p._1.toLowerCase.contains(searchTerm) ||
p._2.shortdesc.getOrElse("").toLowerCase.contains(searchTerm)
}
.map { p =>
(p._2.name,
new TLPackageDisplay(
p._2.name, p._2.lrev.toString, p._2.rrev.toString,
p._2.shortdesc.getOrElse(""), "0", if (p._2.installed) "Installed" else "Not installed"
)
)
}.toMap
pkgs.clear()
pkgs ++= newpkgs
trigger_update("pkgs")
}
/*
def load_tlpdb_update_pkgs_view():Unit = {
val prevph = packageTable.placeholder.value
packageTable.placeholder = SpinnerPlaceHolder("Loading database")
tlmgr_send("info --json", (status, lines) => {
logger.debug(s"load tlpdb update pkgs: got status ${status}")
logger.trace(s"load tlpdb update pkgs: got lines = " + lines.head)
val jsonAst = lines.mkString("").parseJson
tlpkgs.clear()
tlpkgs ++= jsonAst.convertTo[List[TLPackage]].map { p => (p.name, p)}
update_pkgs_view()
packageTable.placeholder = prevph
})
}
*/
def load_tlpdb_update_pkgs_view_no_json():Unit = {
val prevph = packageTable.placeholder.value
packageTable.placeholder = SpinnerPlaceHolder("Loading database")
tlmgr_send("info --data name,localrev,remoterev,category,size,installed,depends,shortdesc", (status, lines) => {
logger.debug(s"load tlpdb update (no json) pkgs: got status ${status}")
logger.trace(s"load tlpdb update (no json) pkgs: got lines = " + lines.head)
val newtlpkgs: Map[String, TLPackageShort] = lines.map(l => {
// logger.debug(s"got line >>>${l}<<<")
val fields = l.split(",",8)
val pkgname = fields(0)
val shortdesc = fields(7).stripPrefix(""""""").stripSuffix(""""""").replaceAll("""\\"""",""""""")
val lrev = fields(1).toLong
val rrev = fields(2).toLong
val cat = fields(3)
val size = fields(4).toLong
val installed = fields(5) == "1"
val depends = fields(6).split(":").toList
TLPackageShort(pkgname, if (shortdesc == "") None else Some(shortdesc), lrev, rrev, cat, depends, installed, lrev > 0)
}).map{ p =>
logger.trace("Constructed TLPackage: " + p)
(p.name, p)
}.toMap
tlpkgs.clear()
tlpkgs ++= newtlpkgs
update_pkgs_view()
packageTable.placeholder = prevph
})
}
def parse_one_update_line(l: String): TLUpdateDisplay = {
logger.debug(s"Got update line >>${l}")
val fields = l.split("\t")
logger.debug(s"Splitting into ${fields}")
val pkgname = fields(0)
val status = fields(1) match {
case "d" => "Removed on server"
case "f" => "Forcibly removed"
case "u" => "Update available"
case "r" => "Local is newer"
case "a" => "New on server"
case "i" => "Not installed"
case "I" => "Reinstall"
}
val localrev = fields(2)
val serverrev = fields(3)
val size = if (fields(1) == "d" || fields(1) == "r") "0" else humanReadableByteSize(fields(4).toLong)
val runtime = fields(5)
val esttot = fields(6)
val tag = fields(7)
val lctanv = fields(8)
val rctanv = fields(9)
val tlpkg: TLPackageDisplay = pkgs(pkgname)
val shortdesc = tlpkg.shortdesc.value
new TLUpdateDisplay(pkgname, status,
localrev + {
if (lctanv != "-") s" ($lctanv)" else ""
},
serverrev + {
if (rctanv != "-") s" ($rctanv)" else ""
},
shortdesc, size)
}
def load_updates_update_upds_view(): Unit = {
val prevph = updateTable.placeholder.value
updateTable.placeholder = SpinnerPlaceHolder("Loading updates")
tlmgr_send("update --list", (status, lines) => {
logger.debug(s"got updates length ${lines.length}")
logger.trace(s"tlmgr last output = ${lines}")
val newupds: Map[String, TLUpdateDisplay] = lines.filter {
case u if u.startsWith("location-url") => false
case u if u.startsWith("total-bytes") => false
case u if u.startsWith("end-of-header") => false
case u if u.startsWith("end-of-updates") => false
case u => true
}.map { l =>
val foo = parse_one_update_line(l)
(foo.name.value, foo)
}.toMap
val infraAvailable = newupds.keys.exists(_.startsWith("texlive.infra"))
upds.clear()
if (infraAvailable) {
upds ++= Seq( ("texlive.infra", newupds("texlive.infra") ) )
} else {
upds ++= newupds
}
trigger_update("upds")
updateTable.placeholder = prevph
})
}
def trigger_update(s:String): Unit = {
logger.debug("DEBUG: Triggering update of " + s)
if (s == "pkgs") {
pkgs("root") = new TLPackageDisplay("root", "0", "0", "", "0", "")
} else if (s == "upds") {
upds("root") = new TLUpdateDisplay("root", "", "", "", "", "")
} else if (s == "bkps") {
bkps("root") = Map[String, TLBackupDisplay](("0", new TLBackupDisplay("root", "0", "0")))
}
}
def doListView(files: Seq[String], clickable: Boolean): scalafx.scene.Node = {
if (files.length <= 5) {
val vb = new VBox()
vb.children = files.map { f =>
val fields = f.split(" ")
new Label(fields(0)) {
if (clickable) {
textFill = Color.Blue
onMouseClicked = { me: MouseEvent => OsTools.openFile(tlmgr.tlroot + "/" + fields(0)) }
cursor = Cursor.Hand
}
}
}
vb
} else {
val vb = new ListView[String] {}
vb.minHeight = 150
vb.prefHeight = 150
vb.maxHeight = 200
vb.vgrow = Priority.Always
vb.orientation = Orientation.Vertical
vb.cellFactory = { p => {
val foo = new ListCell[String]
foo.item.onChange { (_, _, str) => foo.text = str }
if (clickable) {
foo.textFill = Color.Blue
foo.onMouseClicked = { me: MouseEvent => OsTools.openFile(tlmgr.tlroot + "/" + foo.text.value) }
foo.cursor = Cursor.Hand
}
foo
}
}
// vb.children = docFiles.map { f =>
vb.items = ObservableBuffer(files.map { f =>
val fields = f.split(" ")
fields(0)
})
vb
}
}
val mainMenu: Menu = new Menu("TLCockpit") {
items = List(
// temporarily move here as we disable the Help menu
new MenuItem("About") {
onAction = (ae) => callback_about()
},
new MenuItem("Exit") {
onAction = (ae: ActionEvent) => callback_quit()
})
}
val toolsMenu: Menu = new Menu("Tools") {
items = List(
new MenuItem("Update filename databases ...") {
onAction = (ae) => {
callback_run_external(Array("mktexlsr", "mtxrun --generate"))
// callback_run_external("mtxrun --generate")
}
},
// too many lines are quickly output -> GUI becomes hanging until
// all the callbacks are done - call fmtutil with unbuffered = false
new MenuItem("Rebuild all formats ...") { onAction = (ae) => callback_run_external(Array("fmtutil --sys --all"), false) },
new MenuItem("Update font map database ...") {
onAction = (ae) => callback_run_external(Array("updmap --sys"))
}
)
}
val ViewByPkg = new RadioMenuItem("by package name") {
onAction = (ae) => {
searchEntry.text = ""
update_pkgs_view()
}
}
val ViewByCol = new RadioMenuItem("by collections") {
onAction = (ae) => {
searchEntry.text = ""
update_pkgs_view()
}
}
ViewByPkg.selected = true
ViewByCol.selected = false
val pkgsMenu: Menu = new Menu("Packages") {
val foo = new ToggleGroup
foo.toggles = Seq(ViewByPkg, ViewByCol)
items = List(ViewByPkg, ViewByCol)
}
var disable_auto_removal = false
var disable_auto_install = false
var enable_reinstall_forcible = false
val updMenu: Menu = new Menu("Updates") {
items = List(
update_all_menu,
update_self_menu,
new SeparatorMenuItem,
new CheckMenuItem("Disable auto removal") { onAction = (ae) => disable_auto_removal = selected.value },
new CheckMenuItem("Disable auto install") { onAction = (ae) => disable_auto_install = selected.value },
new CheckMenuItem("Reinstall forcibly removed") { onAction = (ae) => enable_reinstall_forcible = selected.value }
)
}
def callback_general_options(): Unit = {
tlmgr_send("option showall --json", (status, lines) => {
val jsonAst = lines.mkString("").parseJson
val tlpdopts: List[TLOption] = jsonAst.convertTo[List[TLOption]]
Platform.runLater {
val dg = new OptionsDialog(tlpdopts)
dg.showAndWait() match {
case Some(changedOpts) =>
changedOpts.foreach(p => {
// don't believe it or not, but \" does *NOT* work in Scala in
// interpolated strings, and it seems there is no better way
// than that one ...
tlmgr_send(s"option ${p._1} ${'"'}${p._2}${'"'}", (_,_) => None)
})
case None =>
}
}
})
}
def callback_paper(): Unit = {
tlmgr_send("paper --json", (status, lines) => {
val jsonAst = lines.mkString("").parseJson
val paperconfs: Map[String, TLPaperConf] = jsonAst.convertTo[List[TLPaperConf]].map { p => (p.program, p) }.toMap
val currentPapers: Map[String, String] = paperconfs.mapValues(p => p.options.head)
Platform.runLater {
val dg = new PaperDialog(paperconfs)
dg.showAndWait() match {
case Some(newPapers) =>
logger.debug(s"Got result ${newPapers}")
// collect changed settings
val changedPapers = newPapers.filter(p => currentPapers(p._1) != p._2)
logger.debug(s"Got changed papers ${changedPapers}")
changedPapers.foreach(p => {
tlmgr_send(s"paper ${p._1} paper ${p._2}", (_,_) => None)
})
case None =>
}
}
})
}
def callback_pkg_info(pkg: String) = {
tlmgr_send(s"info --json ${pkg}", (status, lines) => {
try {
val jsonAst = lines.mkString("").parseJson
val tlpkgs = jsonAst.convertTo[List[TLPackage]]
Platform.runLater {
new PkgInfoDialog(tlpkgs.head).showAndWait()
}
} catch {
case foo : spray.json.DeserializationException =>
new Alert(AlertType.Warning) {
initOwner(stage)
title = "Warning"
headerText = s"Cannot display information for ${pkg}"
contentText = s"We couldn't parse the output of\ntlmgr info --json ${pkg}\n"
}.showAndWait()
case bar : ArrayIndexOutOfBoundsException =>
new Alert(AlertType.Warning) {
initOwner(stage)
title = "Warning"
headerText = s"Cannot find information for ${pkg}"
contentText = s"We couldn't find information for ${pkg}\n"
}.showAndWait()
}
})
}
def save_properties(): Unit = {
props.store(new FileOutputStream(confPath), null)
}
val StartTabPkgs = new RadioMenuItem("Packages") {
onAction = (ae) => {
props.setProperty("StartupTab", "packages")
save_properties()
}
}
val StartTabUpds = new RadioMenuItem("Updates") {
onAction = (ae) => {
props.setProperty("StartupTab","updates")
save_properties()
}
}
val StartTabBcks = new RadioMenuItem("Backups") {
onAction = (ae) => {
props.setProperty("StartupTab","backups")
save_properties()
}
}
val startupTabMenu: Menu = new Menu("Startup Tab") {
val foo = new ToggleGroup
foo.toggles = Seq(StartTabPkgs, StartTabUpds, StartTabBcks)
items = List(StartTabPkgs, StartTabUpds, StartTabBcks)
}
val optionsMenu: Menu = new Menu("Options") {
items = List(
new MenuItem("General ...") { onAction = (ae) => callback_general_options() },
new MenuItem("Paper ...") { onAction = (ae) => callback_paper() },
startupTabMenu
/* new MenuItem("Platforms ...") { disable = true; onAction = (ae) => not_implemented_info() },
new SeparatorMenuItem,
new CheckMenuItem("Expert options") { disable = true },
new CheckMenuItem("Enable debugging options") { disable = true },
new CheckMenuItem("Disable auto-install of new packages") { disable = true },
new CheckMenuItem("Disable auto-removal of server-deleted packages") { disable = true } */
)
}
val expertPane: TitledPane = new TitledPane {
text = "Experts only"
collapsible = true
expanded = false
content = new VBox {
spacing = 10
children = List(
new HBox {
spacing = 10
alignment = Pos.CenterLeft
children = List(
new Label("tlmgr shell command:"),
cmdline,
new Button {
text = "Go"
onAction = (event: ActionEvent) => callback_run_cmdline()
}
)
}
)
}
}
val updateTable: TreeTableView[TLUpdateDisplay] = {
val colName = new TreeTableColumn[TLUpdateDisplay, String] {
text = "Package"
cellValueFactory = { _.value.value.value.name }
prefWidth = 150
}
val colStatus = new TreeTableColumn[TLUpdateDisplay, String] {
text = "Status"
cellValueFactory = { _.value.value.value.status }
prefWidth = 120
}
val colDesc = new TreeTableColumn[TLUpdateDisplay, String] {
text = "Description"
cellValueFactory = { _.value.value.value.shortdesc }
prefWidth = 300
}
val colLRev = new TreeTableColumn[TLUpdateDisplay, String] {
text = "Local rev"
cellValueFactory = { _.value.value.value.lrev }
prefWidth = 100
}
val colRRev = new TreeTableColumn[TLUpdateDisplay, String] {
text = "Remote rev"
cellValueFactory = { _.value.value.value.rrev }
prefWidth = 100
}
val colSize = new TreeTableColumn[TLUpdateDisplay, String] {
text = "Size"
cellValueFactory = { _.value.value.value.size }
prefWidth = 70
}
val table = new TreeTableView[TLUpdateDisplay](
new TreeItem[TLUpdateDisplay](new TLUpdateDisplay("root","","","","","")) {
expanded = false
}) {
columns ++= List(colName, colStatus, colDesc, colLRev, colRRev, colSize)
}
colDesc.prefWidth.bind(table.width - colName.width - colLRev.width - colRRev.width - colSize.width - colStatus. width - 15)
table.prefHeight = 300
table.vgrow = Priority.Always
table.placeholder = new Label("No updates available") {
opacity = 0.4f
font = msgFont
}
table.showRoot = false
table.rowFactory = { _ =>
val row = new TreeTableRow[TLUpdateDisplay] {}
val infoMI = new MenuItem("Info") {
onAction = (ae) => callback_pkg_info(row.item.value.name.value)
}
val updateMI = new MenuItem("Update") {
onAction = (ae) => callback_update(row.item.value.name.value)
}
val installMI = new MenuItem("Install") {
onAction = (ae) => callback_install(row.item.value.name.value)
}
val removeMI = new MenuItem("Remove") {
onAction = (ae) => callback_remove(row.item.value.name.value)
}
val ctm = new ContextMenu(infoMI, updateMI, installMI, removeMI)
row.item.onChange { (_,_,newTL) =>
if (newTL != null) {
if (newTL.status.value == "New on server") {
installMI.disable = false
removeMI.disable = true
updateMI.disable = true
} else if (newTL.status.value == "Removed on server") {
installMI.disable = true
removeMI.disable = false
updateMI.disable = true
} else {
installMI.disable = true
removeMI.disable = false
updateMI.disable = false
}
}
}
row.contextMenu = ctm
row
}
table
}
val packageTable: TreeTableView[TLPackageDisplay] = {
val colName = new TreeTableColumn[TLPackageDisplay, String] {
text = "Package"
cellValueFactory = { _.value.value.value.name }
prefWidth = 150
}
val colDesc = new TreeTableColumn[TLPackageDisplay, String] {
text = "Description"
cellValueFactory = { _.value.value.value.shortdesc }
prefWidth = 300
}
val colInst = new TreeTableColumn[TLPackageDisplay, String] {
text = "Installed"
cellValueFactory = { _.value.value.value.installed }
prefWidth = 100
}
val table = new TreeTableView[TLPackageDisplay](
new TreeItem[TLPackageDisplay](new TLPackageDisplay("root","0","0","","0","")) {
expanded = false
}) {
columns ++= List(colName, colDesc, colInst)
}
colDesc.prefWidth.bind(table.width - colInst.width - colName.width - 15)
table.prefHeight = 300
table.showRoot = false
table.vgrow = Priority.Always
table.rowFactory = { p =>
val row = new TreeTableRow[TLPackageDisplay] {}
val infoMI = new MenuItem("Info") {
onAction = (ae) => callback_pkg_info(row.item.value.name.value)
}
val installMI = new MenuItem("Install") {
onAction = (ae) => callback_install(row.item.value.name.value)
}
val removeMI = new MenuItem("Remove") {
onAction = (ae) => callback_remove(row.item.value.name.value)
}
val ctm = new ContextMenu(infoMI, installMI, removeMI)
row.item.onChange { (_,_,newTL) =>
if (newTL != null) {
val is_installed: Boolean = !(newTL.installed.value == "Not installed")
installMI.disable = is_installed
removeMI.disable = !is_installed
}
}
row.contextMenu = ctm
row
}
table
}
val backupTable: TreeTableView[TLBackupDisplay] = {
val colName = new TreeTableColumn[TLBackupDisplay, String] {
text = "Package"
cellValueFactory = { _.value.value.value.name }
prefWidth = 150
}
val colRev = new TreeTableColumn[TLBackupDisplay, String] {
text = "Revision"
cellValueFactory = { _.value.value.value.rev }
prefWidth = 100
}
val colDate = new TreeTableColumn[TLBackupDisplay, String] {
text = "Date"
cellValueFactory = { _.value.value.value.date }
prefWidth = 300
}
val table = new TreeTableView[TLBackupDisplay](
new TreeItem[TLBackupDisplay](new TLBackupDisplay("root","","")) {
expanded = false
}) {
columns ++= List(colName, colRev, colDate)
}
colDate.prefWidth.bind(table.width - colRev.width - colName.width - 15)
table.prefHeight = 300
table.showRoot = false
table.vgrow = Priority.Always
table.placeholder = new Label("No backups available") {
opacity = 0.4f
font = msgFont
}
table.rowFactory = { _ =>
val row = new TreeTableRow[TLBackupDisplay] {}
val ctm = new ContextMenu(
new MenuItem("Info") {
onAction = (ae) => callback_pkg_info(row.item.value.name.value)
},
new MenuItem("Restore") {
onAction = (ae) => callback_restore(row.item.value.name.value, row.item.value.rev.value)
}
)
row.contextMenu = ctm
row
}
table
}
val searchEntry = new TextField()
searchEntry.hgrow = Priority.Sometimes
searchEntry.onKeyPressed = {
(ae: KeyEvent) => if (ae.code == KeyCode.Enter) update_pkgs_view()
}
val statusLabel = new Label("Idle")
val actionLabel = new Label("") {
hgrow = Priority.Always
maxWidth = Double.MaxValue
}
val statusBox = new HBox {
children = Seq(
new Label("Tlmgr status:") {
vgrow = Priority.Always
alignmentInParent = Pos.CenterLeft
},
statusLabel,
actionLabel
)
maxWidth = Double.MaxValue
hgrow = Priority.Always
alignment = Pos.Center
alignmentInParent = Pos.CenterLeft
padding = Insets(10)
spacing = 10
}
val searchBox = new HBox {
children = Seq(
new Label("Search:") {
vgrow = Priority.Always
alignmentInParent = Pos.CenterLeft
},
searchEntry,
new Button("Go") {
onAction = _ => update_pkgs_view()
},
new Button("Reset") {
onAction = _ => {
searchEntry.text = ""
update_pkgs_view()
}
}
)
alignment = Pos.Center
// do not add padding at the bottom as we have one from the status field
padding = Insets(10,10,0,10)
spacing = 10
}
val pkgsContainer = new VBox {
children = Seq(packageTable,searchBox)
}
val pkgstabs: TabPane = new TabPane {
minWidth = 400
vgrow = Priority.Always
tabs = Seq(
new Tab {
text = "Packages"
closable = false
content = pkgsContainer
},
new Tab {
text = "Updates"
closable = false
content = updateTable
},
new Tab {
text = "Backups"
closable = false
content = backupTable
}
)
}
// val spacerMenu: Menu = new Menu(" ")
// spacerMenu.disable = true
// spacerMenu.hgrow = Priority.Always
val menuBar: MenuBar = new MenuBar {
useSystemMenuBar = true
// menus.addAll(mainMenu, optionsMenu, helpMenu)
menus.addAll(mainMenu, pkgsMenu, toolsMenu, optionsMenu)
}
var updLoaded = false
var bckLoaded = false
pkgstabs.selectionModel().selectedItem.onChange(
(a,b,c) => {
if (a.value.text() == "Backups") {
if (!bckLoaded) {
load_backups_update_bkps_view()
bckLoaded = true
}
menuBar.menus = Seq(mainMenu, toolsMenu, optionsMenu)
} else if (a.value.text() == "Updates") {
// only update if not done already
if (!updLoaded) {
load_updates_update_upds_view()
updLoaded = true
}
menuBar.menus = Seq(mainMenu, updMenu, toolsMenu, optionsMenu)
} else if (a.value.text() == "Packages") {
menuBar.menus = Seq(mainMenu, pkgsMenu, toolsMenu, optionsMenu)
}
}
)
stage = new PrimaryStage {
title = "TLCockpit"
scene = new Scene {
root = {
// val topBox = new HBox {
// children = List(menuBar, statusLabel)
// }
// topBox.hgrow = Priority.Always
// topBox.maxWidth = Double.MaxValue
val topBox = menuBar
val centerBox = new VBox {
padding = Insets(10)
children = List(pkgstabs, statusBox, expertPane, outerrpane)
}
new BorderPane {
// padding = Insets(20)
top = topBox
// left = leftBox
center = centerBox
// bottom = bottomBox
}
}
}
icons.add(iconImage)
}
stage.onCloseRequest = (e: Event) => callback_quit()
stage.width = 800
val selectedTab = props.getOrDefault("StartupTab", "packages")
StartTabPkgs.selected = true
StartTabUpds.selected = false
StartTabBcks.selected = false
var StartupTab = 0
selectedTab match {
case "packages" => {}
case "updates" => {
StartTabPkgs.selected = false
StartTabUpds.selected = true
StartupTab = 1
}
case "backups" => {
StartTabPkgs.selected = false
StartTabBcks.selected = true
StartupTab = 2
}
case _ => {
logger.warn(s"Unrecognized setting for StartupTab in config file: $selectedTab")
}
}
var currentPromise = Promise[(String,Array[String])]()
val pendingJobs = scala.collection.mutable.Queue[(String,(String, Array[String]) => Unit)]()
def initialize_tlmgr(): TlmgrProcess = {
tlmgrBusy.value = true
actionLabel.text = "[initializing tlmgr]"
// create new sync vars for each process
val outputLine = new SyncVar[String]
val errorLine = new SyncVar[String]
val tt = new TlmgrProcess(
(s: String) => {
logger.trace(s"outputline put ${s}")
outputLine.put(s)
},
(s: String) => {
logger.trace(s"errorline put ${s}")
errorLine.put(s)
}
)
if (!tt.start_process()) {
logger.debug("Cannot start tlmgr process, terminating!")
Platform.exit()
sys.exit(1)
}
logger.debug("initialize_tlmgr: sleeping after starting process")
Thread.sleep(1000)
/* val tlmgrMonitor = Future {
while (true) {
if (!tlmgr.isAlive) {
logger.debug("TLMGR HAS DIED!!!!")
// Platform.exit()
// sys.exit(1)
}
Thread.sleep(5000)
}
}*/
val stdoutFuture = Future {
val tlmgrOutput = ArrayBuffer[String]()
var tlmgrStatus = ""
var alive = true
logger.debug("initialize tlmgr: starting stdout reader thread")
while (alive) {
logger.trace("stdout reader before outputLine.take")
val s = outputLine.take
logger.trace("stdout reader after outputLine.take")
if (s == null) {
alive = false
logger.debug("got null from stdout, tlmgr seems to be terminated for restart")
if (!currentPromise.isCompleted) {
logger.debug(s"Fulfilling remaining open promise with ${tlmgrStatus} and ${tlmgrOutput.toArray.mkString}")
currentPromise.success((tlmgrStatus, tlmgrOutput.toArray))
}
tlmgrStatus = ""
tlmgrOutput.clear()
tlmgrBusy.value = false
} else {
logger.trace(s"DEBUG: got ==" + s + "==")
if (s == "OK") {
tlmgrStatus = s
} else if (s == "ERROR") {
tlmgrStatus = s
} else if (s == "tlmgr> ") {
logger.trace(s"Fulfilling current promise with ${tlmgrStatus} and ${tlmgrOutput.toArray.mkString}!")
if (!currentPromise.isCompleted) {
currentPromise.success((tlmgrStatus, tlmgrOutput.toArray))
}
tlmgrStatus = ""
tlmgrOutput.clear()
tlmgrBusy.value = false
if (pendingJobs.nonEmpty) {
logger.debug("pending Job found!")
val nextCmd = pendingJobs.dequeue()
logger.debug(s"running ${nextCmd._1}")
tlmgr_run_one_cmd(nextCmd._1, nextCmd._2)
}
} else {
tlmgrOutput += s
stdoutLineUpdateFunc(s)
}
}
}
logger.debug("initialize tlmgr: finishing stdout reader thread")
}
tlmgrBusy.onChange({
Platform.runLater {
statusLabel.text = if (tlmgrBusy.value) "Busy" else "Idle"
if (!tlmgrBusy.value)
actionLabel.text = ""
}
})
stdoutFuture.onComplete {
case Success(value) =>
logger.debug(s"tlmgr stdout reader terminated: ${value}")
case Failure(e) =>
logger.debug(s"tlmgr stdout reader terminated with error: ${e}")
}
val stderrFuture = Future {
var alive = true
logger.debug("initialize tlmgr: starting stderr reader thread")
while (alive) {
val s = errorLine.take
if (s == null)
alive = false
else
stderrLineUpdateFunc(s)
}
logger.debug("initialize tlmgr: finishing stderr reader thread")
}
stderrFuture.onComplete {
case Success(value) =>
logger.debug(s"tlmgr stderr reader terminated: ${value}")
case Failure(e) =>
logger.debug(s"tlmgr stderr reader terminated with failure: ${e}")
}
tt
}
def tlmgr_post_init() = {
// check for tlmgr revision
tlmgr_send("version", (status,output) => {
logger.debug(s"Callback after version, got ${status} and ${output.mkString("\n")}")
output.foreach ( l => {
if (l.startsWith("revision ")) {
val tlmgrRev = l.stripPrefix("revision ")
if (tlmgrRev == "unknown") {
logger.debug("Unknown tlmgr revision, assuming git/svn version")
logText += "Unknown tlmgr revision, assuming git/svn version"
} else {
if (tlmgrRev.toInt < 45838) {
new Alert(AlertType.Error) {
initOwner(stage)
title = "TeX Live Manager tlmgr is too old"
headerText = "TeX Live Manager tlmgr is too old"
contentText = "Please update from the command line\nusing 'tlmgr update --self'\nTerminating!"
}.showAndWait()
callback_quit()
}
}
}
})
pkgs.clear()
upds.clear()
bkps.clear()
logger.debug("Before loading tlpdb")
load_tlpdb_update_pkgs_view_no_json()
logger.debug("after loading tlpdb")
pkgstabs.selectionModel().select(StartupTab)
})
}
def tlmgr_run_one_cmd(s: String, onCompleteFunc: (String, Array[String]) => Unit): Unit = {
currentPromise = Promise[(String, Array[String])]()
tlmgrBusy.value = true
Platform.runLater {
actionLabel.text = s"[${s}]"
}
currentPromise.future.onComplete {
case Success((a, b)) =>
logger.debug("tlmgr run one cmd: current future completed!")
Platform.runLater {
logger.debug("tlmgr run one cmd: running on complete function")
onCompleteFunc(a, b)
}
case Failure(ex) =>
logger.debug("Running tlmgr command did no succeed" + ex.getMessage)
errorText += "Running a tlmgr command did not succeed: " + ex.getMessage
}
logger.debug(s"sending to tlmgr: ${s}")
tlmgr.send_command(s)
}
def tlmgr_send(s: String, onCompleteFunc: (String, Array[String]) => Unit): Unit = {
// logText.clear()
outputText.clear()
// don't close debug panel when it is open
// outerrpane.expanded = false
if (!currentPromise.isCompleted) {
logger.debug(s"tlmgr busy, put onto pending jobs: $s")
logger.debug("Currently running job: " + currentPromise)
pendingJobs += ((s, onCompleteFunc))
} else {
logger.debug(s"tlmgr_send sending ${s}")
tlmgr_run_one_cmd(s, onCompleteFunc)
}
}
def reinitialize_tlmgr(): Unit = {
logger.debug("reinit tlmgr: entering, clearing pending jobs")
pendingJobs.clear()
logger.debug("reinit tlmgr: cleared pending jobs")
// if (!currentPromise.isCompleted) {
// logger.debug("reinit tlmgr: current promise not complete, completing it")
// currentPromise.success(("",Array[String]()))
// logger.debug("reinit tlmgr: after completing current promise")
// }
logger.debug("reinit tlmgr: cleaning up tlmgr")
tlmgr.cleanup()
logger.debug("reinit tlmgr: sleeping 1s")
Thread.sleep(1000)
logger.debug("reinit tlmgr: initializaing new tlmgr")
tlmgr = initialize_tlmgr()
logger.debug("reinit tlmgr: finished")
tlmgr_post_init()
logger.debug("reinit tlmgr: post init done")
updLoaded = false
pkgstabs.getSelectionModel.select(0)
}
def defaultStdoutLineUpdateFunc(l: String) : Unit = { logger.trace(s"DEBUG: got ==$l== from tlmgr") }
def defaultStderrLineUpdateFunc(l: String) : Unit = { Platform.runLater { logText.append(l) } }
var stdoutLineUpdateFunc: String => Unit = defaultStdoutLineUpdateFunc
var stderrLineUpdateFunc: String => Unit = defaultStderrLineUpdateFunc
var tlmgr = initialize_tlmgr()
tlmgr_post_init()
} // object ApplicationMain
// vim:set tabstop=2 expandtab : //
| TeX-Live/tlcockpit | src/main/scala/TLCockpit/ApplicationMain.scala | Scala | gpl-3.0 | 60,601 |
package freecli
package circe
import scala.io.Source
import cats.Show
import io.circe.generic.auto._
import io.circe.Json
import io.circe.parser._
import core.api.StringDecoder
import circe.all._
class Test extends testkit.Test {
describe("Circe Json StringDecoder") {
it("decode inline string to json") {
implicitly[StringDecoder[Json]].apply("""{"test": "test"}""").valid should === (
parse("""{"test": "test"}""").toOption.get)
}
it("fail to decode inline string to json") {
implicitly[StringDecoder[Json]].apply("""{"test: "test"}""").invalid
}
it("decode inline to case class via json") {
case class Foo(test: String)
implicit val s = new Show[Foo] {
override def show(f: Foo): String = f.test
}
implicitly[StringDecoder[Foo]].apply("""{"test": "test"}""").valid should === (
Foo("test"))
}
it("fail decode inline to case class via json") {
case class Foo(test: String)
implicit val s = new Show[Foo] {
override def show(f: Foo): String = f.test
}
implicitly[StringDecoder[Foo]].apply("""{"test": 1}""").invalid
}
it("decode file to json") {
val file = getClass.getResource("/test.json").getFile
val fileContents = Source.fromFile(file).mkString
implicitly[StringDecoder[Json]].apply(file).valid should
=== (parse(fileContents).toOption.get)
}
it("fail decode file to json") {
val file = getClass.getResource("/invalid_test.json").getFile
implicitly[StringDecoder[Json]].apply(file).invalid
}
it("decode file to case class via json") {
val file = getClass.getResource("/test.json").getFile
val fileContents = Source.fromFile(file).mkString
case class Foo(foo: String, bar: Bar)
case class Bar(bar: String)
implicit val s = new Show[Foo] {
override def show(f: Foo): String = f.toString
}
implicitly[StringDecoder[Foo]].apply(file).valid should === (
decode[Foo](fileContents).toOption.get)
}
it("fail to decode file to case class via json") {
val file = getClass.getResource("/test.json").getFile
case class Foo(foo: String, bar1: Bar)
case class Bar(bar: String)
implicit val s = new Show[Foo] {
override def show(f: Foo): String = f.toString
}
implicitly[StringDecoder[Foo]].apply(file).invalid
}
}
}
| pavlosgi/freecli | circe/src/test/scala/freecli/circe/Test.scala | Scala | apache-2.0 | 2,430 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import java.util.concurrent.{ConcurrentHashMap, ConcurrentLinkedQueue, Semaphore, TimeUnit}
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import scala.concurrent.duration._
import org.scalatest.concurrent.Eventually
import org.apache.spark._
import org.apache.spark.internal.config
import org.apache.spark.scheduler._
import org.apache.spark.scheduler.cluster.StandaloneSchedulerBackend
import org.apache.spark.util.{ResetSystemProperties, SystemClock, ThreadUtils}
class BlockManagerDecommissionIntegrationSuite extends SparkFunSuite with LocalSparkContext
with ResetSystemProperties with Eventually {
val numExecs = 3
val numParts = 3
val TaskStarted = "TASK_STARTED"
val TaskEnded = "TASK_ENDED"
val JobEnded = "JOB_ENDED"
Seq(false, true).foreach { isEnabled =>
test(s"SPARK-32850: BlockManager decommission should respect the configuration " +
s"(enabled=${isEnabled})") {
val conf = new SparkConf()
.setAppName("test-blockmanager-decommissioner")
.setMaster("local-cluster[2, 1, 1024]")
.set(config.DECOMMISSION_ENABLED, true)
.set(config.STORAGE_DECOMMISSION_ENABLED, isEnabled)
.set(config.STORAGE_DECOMMISSION_SHUFFLE_BLOCKS_ENABLED, isEnabled)
sc = new SparkContext(conf)
TestUtils.waitUntilExecutorsUp(sc, 2, 60000)
val executors = sc.getExecutorIds().toArray
val decommissionListener = new SparkListener {
override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = {
// ensure Tasks launched at executors before they're marked as decommissioned by driver
Thread.sleep(3000)
sc.schedulerBackend.asInstanceOf[StandaloneSchedulerBackend]
.decommissionExecutors(
executors.map { id => (id, ExecutorDecommissionInfo("test")) },
true,
false)
}
}
sc.addSparkListener(decommissionListener)
val decommissionStatus: Seq[Boolean] = sc.parallelize(1 to 100, 2).mapPartitions { _ =>
val startTime = System.currentTimeMillis()
while (SparkEnv.get.blockManager.decommissioner.isEmpty &&
// wait at most 6 seconds for BlockManager to start to decommission (if enabled)
System.currentTimeMillis() - startTime < 6000) {
Thread.sleep(300)
}
val blockManagerDecommissionStatus =
if (SparkEnv.get.blockManager.decommissioner.isEmpty) false else true
Iterator.single(blockManagerDecommissionStatus)
}.collect()
assert(decommissionStatus.forall(_ == isEnabled))
sc.removeSparkListener(decommissionListener)
}
}
testRetry(s"verify that an already running task which is going to cache data succeeds " +
s"on a decommissioned executor after task start") {
runDecomTest(true, false, TaskStarted)
}
test(s"verify that an already running task which is going to cache data succeeds " +
s"on a decommissioned executor after one task ends but before job ends") {
runDecomTest(true, false, TaskEnded)
}
test(s"verify that shuffle blocks are migrated") {
runDecomTest(false, true, JobEnded)
}
test(s"verify that both migrations can work at the same time") {
runDecomTest(true, true, JobEnded)
}
private def runDecomTest(
persist: Boolean,
shuffle: Boolean,
whenToDecom: String): Unit = {
val migrateDuring = whenToDecom != JobEnded
val master = s"local-cluster[${numExecs}, 1, 1024]"
val conf = new SparkConf().setAppName("test").setMaster(master)
.set(config.DECOMMISSION_ENABLED, true)
.set(config.STORAGE_DECOMMISSION_ENABLED, true)
.set(config.STORAGE_DECOMMISSION_RDD_BLOCKS_ENABLED, persist)
.set(config.STORAGE_DECOMMISSION_SHUFFLE_BLOCKS_ENABLED, shuffle)
// Since we use the bus for testing we don't want to drop any messages
.set(config.LISTENER_BUS_EVENT_QUEUE_CAPACITY, 1000000)
// Just replicate blocks quickly during testing, there isn't another
// workload we need to worry about.
.set(config.STORAGE_DECOMMISSION_REPLICATION_REATTEMPT_INTERVAL, 10L)
if (whenToDecom == TaskStarted) {
// We are using accumulators below, make sure those are reported frequently.
conf.set(config.EXECUTOR_HEARTBEAT_INTERVAL.key, "10ms")
}
sc = new SparkContext(master, "test", conf)
// Wait for the executors to start
TestUtils.waitUntilExecutorsUp(sc = sc,
numExecutors = numExecs,
timeout = 60000) // 60s
val input = sc.parallelize(1 to numParts, numParts)
val accum = sc.longAccumulator("mapperRunAccumulator")
val sleepIntervalMs = whenToDecom match {
// Increase the window of time b/w task started and ended so that we can decom within that.
case TaskStarted => 10000
// Make one task take a really short time so that we can decommission right after it is
// done but before its peers are done.
case TaskEnded =>
if (TaskContext.getPartitionId() == 0) {
100
} else {
1000
}
// No sleep otherwise
case _ => 0
}
// Create a new RDD where we have sleep in each partition, we are also increasing
// the value of accumulator in each partition
val baseRdd = input.mapPartitions { x =>
accum.add(1)
if (sleepIntervalMs > 0) {
Thread.sleep(sleepIntervalMs)
}
x.map(y => (y, y))
}
val testRdd = shuffle match {
case true => baseRdd.reduceByKey(_ + _)
case false => baseRdd
}
// Listen for the job & block updates
val executorRemovedSem = new Semaphore(0)
val taskEndEvents = new ConcurrentLinkedQueue[SparkListenerTaskEnd]()
val executorsActuallyStarted = new ConcurrentHashMap[String, Boolean]()
val blocksUpdated = ArrayBuffer.empty[SparkListenerBlockUpdated]
def getCandidateExecutorToDecom: Option[String] = if (whenToDecom == TaskStarted) {
executorsActuallyStarted.keySet().asScala.headOption
} else {
taskEndEvents.asScala.filter(_.taskInfo.successful).map(_.taskInfo.executorId).headOption
}
sc.addSparkListener(new SparkListener {
override def onExecutorRemoved(execRemoved: SparkListenerExecutorRemoved): Unit = {
executorRemovedSem.release()
}
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = {
taskEndEvents.add(taskEnd)
}
override def onBlockUpdated(blockUpdated: SparkListenerBlockUpdated): Unit = synchronized {
blocksUpdated.append(blockUpdated)
}
override def onExecutorMetricsUpdate(
executorMetricsUpdate: SparkListenerExecutorMetricsUpdate): Unit = {
val executorId = executorMetricsUpdate.execId
if (executorId != SparkContext.DRIVER_IDENTIFIER) {
val validUpdate = executorMetricsUpdate
.accumUpdates
.flatMap(_._4)
.exists { accumInfo =>
accumInfo.name == accum.name && accumInfo.update.exists(_.asInstanceOf[Long] >= 1)
}
if (validUpdate) {
executorsActuallyStarted.put(executorId, java.lang.Boolean.TRUE)
}
}
}
})
// Cache the RDD lazily
if (persist) {
testRdd.persist()
}
// Start the computation of RDD - this step will also cache the RDD
val asyncCount = testRdd.countAsync()
// Make sure the job is either mid run or otherwise has data to migrate.
if (migrateDuring) {
// Wait for one of the tasks to succeed and finish writing its blocks.
// This way we know that this executor had real data to migrate when it is subsequently
// decommissioned below.
val intervalMs = if (whenToDecom == TaskStarted) {
3.milliseconds
} else {
10.milliseconds
}
eventually(timeout(20.seconds), interval(intervalMs)) {
assert(getCandidateExecutorToDecom.isDefined)
}
} else {
ThreadUtils.awaitResult(asyncCount, 1.minute)
}
// Decommission one of the executors.
val sched = sc.schedulerBackend.asInstanceOf[StandaloneSchedulerBackend]
val execToDecommission = getCandidateExecutorToDecom.get
logInfo(s"Decommissioning executor ${execToDecommission}")
// Decommission executor and ensure it is not relaunched by setting adjustTargetNumExecutors
sched.decommissionExecutor(
execToDecommission,
ExecutorDecommissionInfo("", None),
adjustTargetNumExecutors = true)
val decomTime = new SystemClock().getTimeMillis()
// Wait for job to finish.
val asyncCountResult = ThreadUtils.awaitResult(asyncCount, 1.minute)
assert(asyncCountResult === numParts)
// All tasks finished, so accum should have been increased numParts times.
assert(accum.value === numParts)
sc.listenerBus.waitUntilEmpty()
val taskEndEventsCopy = taskEndEvents.asScala
if (shuffle) {
// mappers & reducers which succeeded
assert(taskEndEventsCopy.count(_.reason == Success) === 2 * numParts,
s"Expected ${2 * numParts} tasks got ${taskEndEvents.size} (${taskEndEvents})")
} else {
// only mappers which executed successfully
assert(taskEndEventsCopy.count(_.reason == Success) === numParts,
s"Expected ${numParts} tasks got ${taskEndEvents.size} (${taskEndEvents})")
}
val minTaskEndTime = taskEndEventsCopy.map(_.taskInfo.finishTime).min
val maxTaskEndTime = taskEndEventsCopy.map(_.taskInfo.finishTime).max
// Verify that the decom time matched our expectations
val decomAssertMsg = s"$whenToDecom: decomTime: $decomTime, minTaskEnd: $minTaskEndTime," +
s" maxTaskEnd: $maxTaskEndTime"
assert(minTaskEndTime <= maxTaskEndTime, decomAssertMsg)
whenToDecom match {
case TaskStarted => assert(minTaskEndTime > decomTime, decomAssertMsg)
case TaskEnded => assert(minTaskEndTime <= decomTime &&
decomTime < maxTaskEndTime, decomAssertMsg)
case JobEnded => assert(maxTaskEndTime <= decomTime, decomAssertMsg)
}
// Wait for our respective blocks to have migrated
eventually(timeout(1.minute), interval(10.milliseconds)) {
if (persist) {
// One of our blocks should have moved.
val rddUpdates = blocksUpdated.filter { update =>
val blockId = update.blockUpdatedInfo.blockId
blockId.isRDD}
val blockLocs = rddUpdates.map { update =>
(update.blockUpdatedInfo.blockId.name,
update.blockUpdatedInfo.blockManagerId)}
val blocksToManagers = blockLocs.groupBy(_._1).mapValues(_.size)
assert(blocksToManagers.exists(_._2 > 1),
s"We should have a block that has been on multiple BMs in rdds:\n ${rddUpdates} from:\n" +
s"${blocksUpdated}\n but instead we got:\n ${blocksToManagers}")
}
// If we're migrating shuffles we look for any shuffle block updates
// as there is no block update on the initial shuffle block write.
if (shuffle) {
val numDataLocs = blocksUpdated.count { update =>
val blockId = update.blockUpdatedInfo.blockId
blockId.isInstanceOf[ShuffleDataBlockId]
}
val numIndexLocs = blocksUpdated.count { update =>
val blockId = update.blockUpdatedInfo.blockId
blockId.isInstanceOf[ShuffleIndexBlockId]
}
assert(numDataLocs === 1, s"Expect shuffle data block updates in ${blocksUpdated}")
assert(numIndexLocs === 1, s"Expect shuffle index block updates in ${blocksUpdated}")
}
}
// Since the RDD is cached or shuffled so further usage of same RDD should use the
// cached data. Original RDD partitions should not be recomputed i.e. accum
// should have same value like before
assert(testRdd.count() === numParts)
assert(accum.value === numParts)
val storageStatus = sc.env.blockManager.master.getStorageStatus
val execIdToBlocksMapping = storageStatus.map(
status => (status.blockManagerId.executorId, status.blocks)).toMap
// No cached blocks should be present on executor which was decommissioned
assert(
!execIdToBlocksMapping.contains(execToDecommission) ||
execIdToBlocksMapping(execToDecommission).keys.filter(_.isRDD).toSeq === Seq(),
"Cache blocks should be migrated")
if (persist) {
// There should still be all the RDD blocks cached
assert(execIdToBlocksMapping.values.flatMap(_.keys).count(_.isRDD) === numParts)
}
// Wait for the executor to be removed automatically after migration.
// This is set to a high value since github actions is sometimes high latency
// but I've never seen this go for more than a minute.
assert(executorRemovedSem.tryAcquire(1, 5L, TimeUnit.MINUTES))
// Since the RDD is cached or shuffled so further usage of same RDD should use the
// cached data. Original RDD partitions should not be recomputed i.e. accum
// should have same value like before
assert(testRdd.count() === numParts)
assert(accum.value === numParts)
}
}
| wangmiao1981/spark | core/src/test/scala/org/apache/spark/storage/BlockManagerDecommissionIntegrationSuite.scala | Scala | apache-2.0 | 13,924 |
// Copyright (C) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License. See LICENSE in project root for information.
package com.microsoft.ml.spark.schema
/** Contains objects and functions to manipulate Categoricals */
import org.apache.spark.sql.DataFrame
import org.apache.spark.sql.types._
import org.apache.spark.ml.attribute._
import SchemaConstants._
import javassist.bytecode.DuplicateMemberException
import scala.reflect.ClassTag
object CategoricalUtilities {
/** Sets the given levels on the column.
* @return The modified dataset.
*/
def setLevels(dataset: DataFrame, column: String, levels: Array[_]): DataFrame = {
if (levels == null) dataset
else {
val nonNullLevels = levels.filter(_ != null)
val hasNullLevels = nonNullLevels.length != levels.length
dataset.withColumn(column,
dataset.col(column).as(column,
updateLevelsMetadata(dataset.schema(column).metadata,
nonNullLevels,
getCategoricalTypeForValue(nonNullLevels.head), hasNullLevels)))
}
}
/** Update the levels on the existing metadata.
* @param existingMetadata The existing metadata to add to.
* @param levels The levels to add to the metadata.
* @param dataType The datatype of the levels.
* @return The new metadata.
*/
def updateLevelsMetadata(existingMetadata: Metadata,
levels: Array[_],
dataType: DataType,
hasNullLevels: Boolean): Metadata = {
val bldr =
if (existingMetadata.contains(MMLTag)) {
new MetadataBuilder().withMetadata(existingMetadata.getMetadata(MMLTag))
} else {
new MetadataBuilder()
}
bldr.putBoolean(Ordinal, false)
bldr.putBoolean(HasNullLevels, hasNullLevels)
dataType match {
case DataTypes.StringType => bldr.putStringArray(ValuesString, levels.asInstanceOf[Array[String]])
case DataTypes.DoubleType => bldr.putDoubleArray(ValuesDouble, levels.asInstanceOf[Array[Double]])
// Ints require special treatment, because Spark does not have putIntArray yet:
case DataTypes.IntegerType => bldr.putLongArray(ValuesInt, levels.asInstanceOf[Array[Int]].map(_.toLong))
case DataTypes.LongType => bldr.putLongArray(ValuesLong, levels.asInstanceOf[Array[Long]])
case DataTypes.BooleanType => bldr.putBooleanArray(ValuesBool, levels.asInstanceOf[Array[Boolean]])
case _ => throw new UnsupportedOperationException("Unsupported categorical data type: " + dataType)
}
val metadata = bldr.build()
new MetadataBuilder().withMetadata(existingMetadata).putMetadata(MMLTag, metadata).build()
}
/** Gets the levels from the dataset.
* @param schema The schema to get the levels from.
* @param column The column to retrieve metadata levels from.
* @return The levels.
*/
def getLevels(schema: StructType, column: String): Option[Array[_]] = {
val metadata = schema(column).metadata
if (metadata.contains(MMLTag)) {
val dataType: Option[DataType] = CategoricalColumnInfo.getDataType(metadata, false)
if (dataType.isEmpty) None
else {
dataType.get match {
case DataTypes.StringType => Some(getMap[String](metadata).levels)
case DataTypes.LongType => Some(getMap[Long](metadata).levels)
case DataTypes.IntegerType => Some(getMap[Int](metadata).levels)
case DataTypes.DoubleType => Some(getMap[Double](metadata).levels)
case DataTypes.BooleanType => Some(getMap[Boolean](metadata).levels)
case default => throw new UnsupportedOperationException("Unknown categorical type: " + default.typeName)
}
}
} else {
None
}
}
/** Gets the number of levels from the dataset.
* @param dataset The dataset to get the levels count from.
* @param column The column to retrieve metadata levels count from.
* @return The number of levels.
*/
def getLevelCount(dataset: DataFrame, column: String): Option[Int] = {
val metadata = dataset.schema(column).metadata
if (metadata.contains(MMLTag)) {
val dataType: Option[DataType] = CategoricalColumnInfo.getDataType(metadata, false)
if (dataType.isEmpty) None
else {
val numLevels =
dataType.get match {
case DataTypes.StringType => getMap[String](metadata).numLevels
case DataTypes.LongType => getMap[Long](metadata).numLevels
case DataTypes.IntegerType => getMap[Int](metadata).numLevels
case DataTypes.DoubleType => getMap[Double](metadata).numLevels
case DataTypes.BooleanType => getMap[Boolean](metadata).numLevels
case default => throw new UnsupportedOperationException("Unknown categorical type: " + default.typeName)
}
Option(numLevels)
}
} else {
None
}
}
/** Get the map of array of T from the metadata.
*
* @param ct Implicit class tag.
* @param metadata The metadata to retrieve from.
* @tparam T The type of map to retrieve.
* @return The map of array of T.
*/
def getMap[T](metadata: Metadata)(implicit ct: ClassTag[T]): CategoricalMap[T] = {
val data =
if (metadata.contains(MMLTag)) {
metadata.getMetadata(MMLTag)
} else if (metadata.contains(MLlibTag)) {
metadata.getMetadata(MLlibTag)
} else {
sys.error("Invalid metadata to retrieve map from")
}
val hasNullLevel =
if (data.contains(HasNullLevels)) data.getBoolean(HasNullLevels)
else false
val isOrdinal = false
val categoricalMap = implicitly[ClassTag[T]] match {
case ClassTag.Int => new CategoricalMap[Int](data.getLongArray(ValuesInt).map(_.toInt), isOrdinal, hasNullLevel)
case ClassTag.Double => new CategoricalMap[Double](data.getDoubleArray(ValuesDouble), isOrdinal, hasNullLevel)
case ClassTag.Boolean => new CategoricalMap[Boolean](data.getBooleanArray(ValuesBool), isOrdinal, hasNullLevel)
case ClassTag.Long => new CategoricalMap[Long](data.getLongArray(ValuesLong), isOrdinal, hasNullLevel)
case _ => new CategoricalMap[String](data.getStringArray(ValuesString), isOrdinal, hasNullLevel)
}
categoricalMap.asInstanceOf[CategoricalMap[T]]
}
/** Get a type for the given value.
* @param value The value to get the type from.
* @tparam T The generic type of the value.
* @return The DataType based on the value.
*/
def getCategoricalTypeForValue[T](value: T): DataType = {
value match {
// Complicated type matching is requred to get around type erasure
case _: String => DataTypes.StringType
case _: Double => DataTypes.DoubleType
case _: Int => DataTypes.IntegerType
case _: Long => DataTypes.LongType
case _: Boolean => DataTypes.BooleanType
case _ => throw new UnsupportedOperationException("Unsupported categorical data type " + value)
}
}
}
/** A wrapper around level maps: Map[T -> Int] and Map[Int -> T] that converts
* the data to/from Spark Metadata in both MLib and AzreML formats.
* @param levels The level values are assumed to be already sorted as needed
* @param isOrdinal A flag that indicates if the data are ordinal
* @tparam T Input levels could be String, Double, Int, Long, Boolean
*/
class CategoricalMap[T](val levels: Array[T],
val isOrdinal: Boolean = false,
val hasNullLevel: Boolean = false) extends Serializable {
require(levels.distinct.size == levels.size, "Categorical levels are not unique.")
require(!levels.isEmpty, "Levels should not be empty")
/** Total number of levels */
val numLevels = levels.length //TODO: add the maximum possible number of levels?
/** Spark DataType correspondint to type T */
val dataType = CategoricalUtilities.getCategoricalTypeForValue(levels.find(_ != null).head)
/** Maps levels to the corresponding integer index */
private lazy val levelToIndex: Map[T, Int] = levels.zipWithIndex.toMap
/** Returns the index of the given level, can throw */
def getIndex(level: T): Int = levelToIndex(level)
/** Returns the index of a given level as Option; does not throw */
def getIndexOption(level: T): Option[Int] = levelToIndex.get(level)
/** Checks if the given level exists */
def hasLevel(level: T): Boolean = levelToIndex.contains(level)
/** Returns the level of the given index; can throw */
def getLevel(index: Int): T = levels(index)
/** Returns the level of the given index as Option; does not throw */
def getLevelOption(index: Int): Option[T] =
if (index < 0 || index >= numLevels) None else Some(levels(index))
/** Stores levels in Spark Metadata in either MLlib format */
private def toMetadataMllib(existingMetadata: Metadata): Metadata = {
require(!isOrdinal, "Cannot save Ordinal data in MLlib Nominal format currently," +
" because it does not have a public constructor that accepts Ordinal")
// Currently, MLlib converts all non-string categorical values to string;
// see org.apache.spark.ml.feature.StringIndexer
val strLevels = levels.filter(_ != null).map(_.toString).asInstanceOf[Array[String]]
NominalAttribute.defaultAttr.withValues(strLevels).toMetadata(existingMetadata)
}
/** Stores levels in Spark Metadata in MML format */
private def toMetadataMML(existingMetadata: Metadata): Metadata = {
CategoricalUtilities.updateLevelsMetadata(existingMetadata, levels, dataType, hasNullLevel)
}
/** Add categorical levels to existing Spark Metadata
* @param existingMetadata [tag, categorical metadata] pair is added to existingMetadata,
* where tag is either MLlib or MML
* @param mmlStyle MML (true) or MLlib metadata (false)
*/
def toMetadata(existingMetadata: Metadata, mmlStyle: Boolean): Metadata = {
// assert that metadata does not have data with this tag
def assertNoTag(tag: String) =
assert(!existingMetadata.contains(tag),
//TODO: add tests to ensure
s"Metadata already contains the tag $tag; all the data are eraised")
if (mmlStyle) {
assertNoTag(MMLTag)
toMetadataMML(existingMetadata)
} else {
assertNoTag(MLlibTag)
toMetadataMllib(existingMetadata)
}
}
/** Add categorical levels and in either MML or MLlib style metadata
* @param mmlStyle MML (true) or MLlib metadata (false)
*/
def toMetadata(mmlStyle: Boolean): Metadata = toMetadata(Metadata.empty, mmlStyle)
}
/** Utilities for getting categorical column info. */
object CategoricalColumnInfo {
/** Gets the datatype from the column metadata.
* @param columnMetadata The column metadata
* @return The datatype
*/
def getDataType(metadata: Metadata, throwOnInvalid: Boolean = true): Option[DataType] = {
val mmlMetadata =
if (metadata.contains(MMLTag)) {
metadata.getMetadata(MMLTag)
} else {
throw new NoSuchFieldException(s"Could not find valid $MMLTag metadata")
}
val keys = MetadataUtilities.getMetadataKeys(mmlMetadata)
val validatedDataType = keys.foldRight(None: Option[DataType])((metadataKey, result) => metadataKey match {
case ValuesString => getValidated(result, DataTypes.StringType)
case ValuesLong => getValidated(result, DataTypes.LongType)
case ValuesInt => getValidated(result, DataTypes.IntegerType)
case ValuesDouble => getValidated(result, DataTypes.DoubleType)
case ValuesBool => getValidated(result, DataTypes.BooleanType)
case _ => if (result.isDefined) result else None
})
if (validatedDataType.isEmpty && throwOnInvalid) {
throw new NoSuchElementException("Unrecognized datatype or no datatype found in MML metadata")
}
validatedDataType
}
private def getValidated(result: Option[DataType], dataType: DataType): Option[DataType] = {
if (result.isDefined) {
throw new DuplicateMemberException("DataType metadata specified twice")
}
Option(dataType)
}
}
/** Extract categorical info from the DataFrame column
* @param df dataframe
* @param column column name
*/
class CategoricalColumnInfo(df: DataFrame, column: String) {
private val columnSchema = df.schema(column)
private val metadata = columnSchema.metadata
/** Get the basic info: whether the column is categorical or not, actual type of the column, etc */
val (isCategorical, isMML, isOrdinal, dataType, hasNullLevels) = {
val notCategorical = (false, false, false, NullType, false)
if (columnSchema.dataType != DataTypes.IntegerType
&& columnSchema.dataType != DataTypes.DoubleType) notCategorical
else if (metadata.contains(MMLTag)) {
val columnMetadata = metadata.getMetadata(MMLTag)
if (!columnMetadata.contains(Ordinal)) notCategorical
else {
val isOrdinal = columnMetadata.getBoolean(Ordinal)
val hasNullLevels =
if (columnMetadata.contains(HasNullLevels)) columnMetadata.getBoolean(HasNullLevels)
else false
val dataType: DataType = CategoricalColumnInfo.getDataType(metadata).get
(true, true, isOrdinal, dataType, hasNullLevels)
}
} else if (metadata.contains(MLlibTag)) {
val columnMetadata = metadata.getMetadata(MLlibTag)
// nominal metadata has ["type" -> "nominal"] pair
val isCategorical = columnMetadata.contains(MLlibTypeTag) &&
columnMetadata.getString(MLlibTypeTag) == AttributeType.Nominal.name
if (!isCategorical) notCategorical
else {
val isOrdinal = if (columnMetadata.contains(Ordinal)) columnMetadata.getBoolean(Ordinal) else false
val hasNullLevels =
if (columnMetadata.contains(HasNullLevels)) columnMetadata.getBoolean(HasNullLevels)
else false
val dataType =
if (columnMetadata.contains(ValuesString)) DataTypes.StringType
else throw new UnsupportedOperationException("nominal attribute does not contain string levels")
(true, false, isOrdinal, dataType, hasNullLevels)
}
} else
notCategorical
}
}
| rastala/mmlspark | src/core/schema/src/main/scala/Categoricals.scala | Scala | mit | 14,254 |
package slinky.readwrite
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.scalajs.js
trait TypeConstructorWriters {
implicit def optionWriter[T](implicit writer: Writer[T]): Writer[Option[T]] =
_.map(v => writer.write(v)).orNull
implicit def eitherWriter[A, B](implicit aWriter: Writer[A], bWriter: Writer[B]): Writer[Either[A, B]] = { v =>
val written = v.fold(aWriter.write, bWriter.write)
js.Dynamic.literal(
isLeft = v.isLeft,
value = written
)
}
implicit def collectionWriter[T, C[_]](implicit writer: Writer[T], ev: C[T] <:< Iterable[T]): Writer[C[T]] = s => {
val ret = js.Array[js.Object]()
s.foreach(v => ret.push(writer.write(v)))
ret.asInstanceOf[js.Object]
}
implicit def arrayWriter[T](implicit writer: Writer[T]): Writer[Array[T]] = s => {
val ret = new js.Array[js.Object](s.length)
(0 until s.length).foreach(i => ret(i) = (writer.write(s(i))))
ret.asInstanceOf[js.Object]
}
implicit def mapWriter[A, B](implicit abWriter: Writer[(A, B)]): Writer[Map[A, B]] = s => {
collectionWriter[(A, B), Iterable].write(s)
}
implicit def futureWriter[O](implicit oWriter: Writer[O]): Writer[Future[O]] = s => {
import scala.scalajs.js.JSConverters._
s.map(v => oWriter.write(v)).toJSPromise.asInstanceOf[js.Object]
}
}
| shadaj/slinky | readWrite/src/main/scala-2/slinky/readwrite/TypeConstructorWriters.scala | Scala | mit | 1,369 |
package com.omearac.http
import akka.http.scaladsl.server.Directives._
import com.omearac.http.routes.{
ConsumerCommands,
ProducerCommands,
CollectorCommands
}
trait HttpService
extends ConsumerCommands
with ProducerCommands
with CollectorCommands {
def routes =
producerHttpCommands ~
collectorHttpCommands ~
dataConsumerHttpCommands ~
eventConsumerHttpCommands
}
| soujiro0725/market-analysis-microservices | src/main/scala/com/omearac/http/HttpService.scala | Scala | apache-2.0 | 409 |
package org.jetbrains.plugins.scala.lang.formatter.tests
import com.intellij.psi.codeStyle.CommonCodeStyleSettings
import org.jetbrains.plugins.scala.lang.formatter.AbstractScalaFormatterTestBase
/**
* @author Alexander Podkhalyuzin
*/
class ScalaBugsTest extends AbstractScalaFormatterTestBase {
/* stub:
def test {
val before =
"""
""".replace("\\r", "")
val after =
"""
""".replace("\\r", "")
doTextTest(before, after)
}
*/
def testSCL2424 {
val before =
"""
someMethod(new Something, abc, def)
""".replace("\\r", "")
val after =
"""
someMethod(new Something, abc, def)
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL2425 {
val before =
"""
import foo.{Foo, Bar}
""".replace("\\r", "")
val after =
"""
import foo.{Foo, Bar}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL2477 {
val before =
"""
class Foo {
//some comment
private val i = 0;
/**
* @param p blah-blah-blah
*/
def doSmth(p: Int) {}
//comment
def foo = 1
}
""".replace("\\r", "")
val after =
"""
class Foo {
//some comment
private val i = 0;
/**
* @param p blah-blah-blah
*/
def doSmth(p: Int) {}
//comment
def foo = 1
}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL1875 {
val before =
"""
/**
* something{@link Foo}
*something
*/
class A
""".replace("\\r", "")
val after =
"""
/**
* something{@link Foo}
* something
*/
class A
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL2066FromDiscussion {
val settings = getCommonSettings
settings.BRACE_STYLE = CommonCodeStyleSettings.NEXT_LINE
val before =
"""
val n = Seq(1,2,3)
n.foreach
{
x =>
{
println(x)
}
}
""".replace("\\r", "")
val after =
"""
val n = Seq(1, 2, 3)
n.foreach
{
x =>
{
println(x)
}
}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL2775sTrue() {
getScalaSettings.KEEP_ONE_LINE_LAMBDAS_IN_ARG_LIST = true
val before =
"""
Set(1, 2, 3).filter{a => a % 2 == 0}
List((1, 2), (2, 3), (3, 4)).map {case (k: Int, n: Int) => k + n}
Map(1 -> "aa", 2 -> "bb", 3 -> "cc").filter{ case (1, "aa") => true; case _ => false}
""".replace("\\r", "")
val after =
"""
Set(1, 2, 3).filter { a => a % 2 == 0 }
List((1, 2), (2, 3), (3, 4)).map { case (k: Int, n: Int) => k + n }
Map(1 -> "aa", 2 -> "bb", 3 -> "cc").filter { case (1, "aa") => true; case _ => false }
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL2775sFalse() {
getScalaSettings.KEEP_ONE_LINE_LAMBDAS_IN_ARG_LIST = false
getScalaSettings.PLACE_CLOSURE_PARAMETERS_ON_NEW_LINE = true
val before =
"""
Set(1, 2, 3).filter{a => a % 2 == 0}
List((1, 2), (2, 3), (3, 4)).map {case (k: Int, n: Int) => k + n}
Map(1 -> "aa", 2 -> "bb", 3 -> "cc").filter{ case (1, "aa") => true; case _ => false}
""".replace("\\r", "")
val after =
"""
Set(1, 2, 3).filter {
a => a % 2 == 0
}
List((1, 2), (2, 3), (3, 4)).map {
case (k: Int, n: Int) => k + n
}
Map(1 -> "aa", 2 -> "bb", 3 -> "cc").filter {
case (1, "aa") => true;
case _ => false
}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL2839sTrue() {
getScalaSettings.INSERT_WHITESPACES_IN_SIMPLE_ONE_LINE_METHOD = true
getCommonSettings.KEEP_SIMPLE_METHODS_IN_ONE_LINE = true
val before =
"""
def func() {println("test")}
def func2() {
println("test")}
""".replace("\\r", "")
val after =
"""
def func() { println("test") }
def func2() {
println("test")
}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL2839sFalse() {
getCommonSettings.KEEP_SIMPLE_METHODS_IN_ONE_LINE = false
val before =
"""
def func() { println()}
def func2() { println()
}
""".replace("\\r", "")
val after =
"""
def func() {
println()
}
def func2() {
println()
}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL2470() {
getScalaSettings.NOT_CONTINUATION_INDENT_FOR_PARAMS = true
val before =
"""
def m = {
() => 123
}
def m2 = {
() => {
123
}
}
def f[T](i: Int) {
val a = () => 123
}
(a: Int, b: Int, c: Int) => a + b + c
""".replace("\\r", "")
val after =
"""
def m = {
() => 123
}
def m2 = {
() => {
123
}
}
def f[T](i: Int) {
val a = () => 123
}
(a: Int, b: Int, c: Int) => a + b + c
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL3126AllTrue() {
getScalaSettings.SPACE_BEFORE_INFIX_LIKE_METHOD_PARENTHESES = true
getScalaSettings.PRESERVE_SPACE_AFTER_METHOD_DECLARATION_NAME = true
getCommonSettings.SPACE_BEFORE_METHOD_PARENTHESES = true
val before =
"""
def f() {
println()
}
def foo (){}
def g(): Int = 12
def gg(i: Int): Int = {
i*2
}
def test (i: Int) {}
def +++(s: StringBuilder): StringBuilder = {
s append this.toString
}
def ::= (o: Any) {}
""".replace("\\r", "")
val after =
"""
def f () {
println()
}
def foo () {}
def g (): Int = 12
def gg (i: Int): Int = {
i * 2
}
def test (i: Int) {}
def +++ (s: StringBuilder): StringBuilder = {
s append this.toString
}
def ::= (o: Any) {}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL3126InfixFalse() {
getScalaSettings.SPACE_BEFORE_INFIX_LIKE_METHOD_PARENTHESES = false
getScalaSettings.PRESERVE_SPACE_AFTER_METHOD_DECLARATION_NAME = true
getCommonSettings.SPACE_BEFORE_METHOD_PARENTHESES = true
val before =
"""
def f() {
println()
}
def foo (){}
def g(): Int = 12
def gg(i: Int): Int = {
i*2
}
def test (i: Int) {}
def +++(s: StringBuilder): StringBuilder = {
s append this.toString
}
def ::= (o: Any) {}
""".replace("\\r", "")
val after =
"""
def f () {
println()
}
def foo () {}
def g (): Int = 12
def gg (i: Int): Int = {
i * 2
}
def test (i: Int) {}
def +++ (s: StringBuilder): StringBuilder = {
s append this.toString
}
def ::= (o: Any) {}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL3126InfixTruePreservevTrue() {
getScalaSettings.SPACE_BEFORE_INFIX_LIKE_METHOD_PARENTHESES = true
getScalaSettings.PRESERVE_SPACE_AFTER_METHOD_DECLARATION_NAME = true
getCommonSettings.SPACE_BEFORE_METHOD_PARENTHESES = false
val before =
"""
def f() {
println()
}
def foo (){}
def g(): Int = 12
def gg(i: Int): Int = {
i*2
}
def test (i: Int) {}
def +++(s: StringBuilder): StringBuilder = {
s append this.toString
}
def ::= (o: Any) {}
""".replace("\\r", "")
val after =
"""
def f() {
println()
}
def foo () {}
def g(): Int = 12
def gg(i: Int): Int = {
i * 2
}
def test (i: Int) {}
def +++ (s: StringBuilder): StringBuilder = {
s append this.toString
}
def ::= (o: Any) {}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL3126InfixTruePreserveFalse() {
getScalaSettings.SPACE_BEFORE_INFIX_LIKE_METHOD_PARENTHESES = true
getScalaSettings.PRESERVE_SPACE_AFTER_METHOD_DECLARATION_NAME = false
getCommonSettings.SPACE_BEFORE_METHOD_PARENTHESES = false
val before =
"""
def f() {
println()
}
def foo (){}
def g(): Int = 12
def gg(i: Int): Int = {
i*2
}
def test (i: Int) {}
def +++(s: StringBuilder): StringBuilder = {
s append this.toString
}
def ::= (o: Any) {}
""".replace("\\r", "")
val after =
"""
def f() {
println()
}
def foo() {}
def g(): Int = 12
def gg(i: Int): Int = {
i * 2
}
def test(i: Int) {}
def +++ (s: StringBuilder): StringBuilder = {
s append this.toString
}
def ::= (o: Any) {}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL3126AllFalse() {
getScalaSettings.SPACE_BEFORE_INFIX_LIKE_METHOD_PARENTHESES = false
getScalaSettings.PRESERVE_SPACE_AFTER_METHOD_DECLARATION_NAME = false
getCommonSettings.SPACE_BEFORE_METHOD_PARENTHESES = false
val before =
"""
def f() {
println()
}
def foo (){}
def g(): Int = 12
def gg(i: Int): Int = {
i*2
}
def test (i: Int) {}
def +++(s: StringBuilder): StringBuilder = {
s append this.toString
}
def ::= (o: Any) {}
""".replace("\\r", "")
val after =
"""
def f() {
println()
}
def foo() {}
def g(): Int = 12
def gg(i: Int): Int = {
i * 2
}
def test(i: Int) {}
def +++(s: StringBuilder): StringBuilder = {
s append this.toString
}
def ::=(o: Any) {}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL2474() {
getCommonSettings.SPACE_BEFORE_METHOD_CALL_PARENTHESES = true
getCommonSettings.SPACE_BEFORE_METHOD_PARENTHESES = true
val before =
"""
def f(i: Int)(j: Int) {}
f(1)(2)
""".replace("\\r", "")
val after =
"""
def f (i: Int)(j: Int) {}
f (1)(2)
""".replace("\\r", "")
doTextTest(before, after)
}
def testThisExtraSpace() {
getCommonSettings.SPACE_BEFORE_METHOD_PARENTHESES = false
getCommonSettings.SPACE_BEFORE_METHOD_CALL_PARENTHESES = false
val before =
"""
class A(i: Int) {
def this(s: String) {
this (s.length)
}
def this () {
this("")
}
}
class B(i: Int)(s: String) {
def this(s: String) {
this(s.length)(s)
}
def this () {
this ("")
}
}
""".replace("\\r", "")
val after =
"""
class A(i: Int) {
def this(s: String) {
this(s.length)
}
def this() {
this("")
}
}
class B(i: Int)(s: String) {
def this(s: String) {
this(s.length)(s)
}
def this() {
this("")
}
}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSpaceInsideClosureBraces() {
getScalaSettings.SPACE_INSIDE_CLOSURE_BRACES = true
getScalaSettings.SPACE_BEFORE_INFIX_METHOD_CALL_PARENTHESES = true
getScalaSettings.KEEP_ONE_LINE_LAMBDAS_IN_ARG_LIST = true
getScalaSettings.PLACE_CLOSURE_PARAMETERS_ON_NEW_LINE = false
getCommonSettings.KEEP_SIMPLE_BLOCKS_IN_ONE_LINE = true
val before =
"""
Array.fill(34){scala.util.Random.nextInt(12) }
foos map{ t=>getCounts(t).toSeq sortBy {-_._2 } map {_._1 }}
bars foreach {case (x, y) =>
list.add(x + y)
}
bars foreach {
case (x,y) => list.add(x+y)
}
bars foreach{ case (x,y) => list.add(x + y) }
""".replace("\\r", "")
val after =
"""
Array.fill(34) { scala.util.Random.nextInt(12) }
foos map { t => getCounts(t).toSeq sortBy { -_._2 } map { _._1 } }
bars foreach { case (x, y) =>
list.add(x + y)
}
bars foreach {
case (x, y) => list.add(x + y)
}
bars foreach { case (x, y) => list.add(x + y) }
""".replace("\\r", "")
doTextTest(before, after)
}
def testNoSpaceInsideClosure() {
getScalaSettings.SPACE_INSIDE_CLOSURE_BRACES = false
getScalaSettings.SPACE_BEFORE_INFIX_METHOD_CALL_PARENTHESES = true
getScalaSettings.KEEP_ONE_LINE_LAMBDAS_IN_ARG_LIST = true
getScalaSettings.PLACE_CLOSURE_PARAMETERS_ON_NEW_LINE = false
getCommonSettings.KEEP_SIMPLE_BLOCKS_IN_ONE_LINE = true
val before =
"""
Array.fill(34){scala.util.Random.nextInt(12) }
foos map{ t=>getCounts(t).toSeq sortBy {-_._2 } map {_._1 }}
bars foreach {case (x, y) =>
list.add(x + y)
}
bars foreach {
case (x,y) => list.add(x+y)
}
bars foreach{ case (x,y) => list.add(x + y) }
""".replace("\\r", "")
val after =
"""
Array.fill(34) {scala.util.Random.nextInt(12)}
foos map {t => getCounts(t).toSeq sortBy {-_._2} map {_._1}}
bars foreach {case (x, y) =>
list.add(x + y)
}
bars foreach {
case (x, y) => list.add(x + y)
}
bars foreach {case (x, y) => list.add(x + y)}
""".replace("\\r", "")
doTextTest(before, after)
}
def testSCL6702() {
getCurrentCodeStyleSettings.FORMATTER_TAGS_ENABLED = true
val before =
"""
|//@formatter:off
|class SCL6702 {
| def foo(p: String ) {
| println(p )
| }
|
| //@formatter:on
| def foop(p: String ): Unit = {
| println(p )
| }
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|//@formatter:off
|class SCL6702 {
| def foo(p: String ) {
| println(p )
| }
|
| //@formatter:on
| def foop(p: String): Unit = {
| println(p)
| }
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL5488_1() {
getScalaSettings.SPACES_IN_ONE_LINE_BLOCKS = false
getScalaSettings.SPACE_INSIDE_CLOSURE_BRACES = false
getCommonSettings.KEEP_SIMPLE_BLOCKS_IN_ONE_LINE = true
val before =
"""
|class SCL5488 {
| val foos = List[List[Integer]]()
| foos map {t => t.toSeq sortBy {-_ } map { _ * 2} }
| val f4: (Int, Int) => Int = { _ + _}
| val f5: (Int, Int) => Int = {_ + _ }
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|class SCL5488 {
| val foos = List[List[Integer]]()
| foos map {t => t.toSeq sortBy {-_} map {_ * 2}}
| val f4: (Int, Int) => Int = {_ + _}
| val f5: (Int, Int) => Int = {_ + _}
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL5488_2() {
getScalaSettings.SPACES_IN_ONE_LINE_BLOCKS = true
getScalaSettings.SPACE_INSIDE_CLOSURE_BRACES = false
getCommonSettings.KEEP_SIMPLE_BLOCKS_IN_ONE_LINE = true
val before =
"""
|class SCL5488 {
| val foos = List[List[Integer]]()
| foos map {t => t.toSeq sortBy {-_ } map { _ * 2} }
| val f4: (Int, Int) => Int = { _ + _}
| val f5: (Int, Int) => Int = {_ + _ }
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|class SCL5488 {
| val foos = List[List[Integer]]()
| foos map { t => t.toSeq sortBy { -_ } map { _ * 2 } }
| val f4: (Int, Int) => Int = { _ + _ }
| val f5: (Int, Int) => Int = { _ + _ }
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL5488_3() {
getScalaSettings.SPACES_IN_ONE_LINE_BLOCKS = false
getScalaSettings.SPACE_INSIDE_CLOSURE_BRACES = true
getCommonSettings.KEEP_SIMPLE_BLOCKS_IN_ONE_LINE = true
val before =
"""
|class SCL5488 {
| val foos = List[List[Integer]]()
| foos map {t => t.toSeq sortBy {-_ } map { _ * 2} }
| val f4: (Int, Int) => Int = { _ + _}
| val f5: (Int, Int) => Int = {_ + _ }
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|class SCL5488 {
| val foos = List[List[Integer]]()
| foos map { t => t.toSeq sortBy {-_} map {_ * 2} }
| val f4: (Int, Int) => Int = {_ + _}
| val f5: (Int, Int) => Int = {_ + _}
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL5488_4() {
getScalaSettings.SPACES_IN_ONE_LINE_BLOCKS = true
getScalaSettings.SPACE_INSIDE_CLOSURE_BRACES = true
getCommonSettings.KEEP_SIMPLE_BLOCKS_IN_ONE_LINE = true
val before =
"""
|class SCL5488 {
| val foos = List[List[Integer]]()
| foos map {t => t.toSeq sortBy {-_ } map { _ * 2} }
| val f4: (Int, Int) => Int = { _ + _}
| val f5: (Int, Int) => Int = {_ + _ }
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|class SCL5488 {
| val foos = List[List[Integer]]()
| foos map { t => t.toSeq sortBy { -_ } map { _ * 2 } }
| val f4: (Int, Int) => Int = { _ + _ }
| val f5: (Int, Int) => Int = { _ + _ }
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL9243() {
getScalaSettings.INDENT_BRACED_FUNCTION_ARGS = false
val before =
"""
|class a {
| foo(
| {
| "b" + "a" + "r"
| }
| )
|}
""".stripMargin.replace("\\r", "")
val after = before
doTextTest(before, after)
}
def testSCL5427(): Unit = {
getScalaSettings.USE_SCALADOC2_FORMATTING = false
val before =
"""
|/**
| * Some comments
| */
|class A
""".stripMargin.replace("\\r", "")
val after =
"""
|/**
| * Some comments
| */
|class A
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL9264(): Unit = {
val before =
"""
|class X {
| (for {
| i <- 1 to 10
| } yield {
| 1
| }).map(_ + 1)
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|class X {
| (for {
| i <- 1 to 10
| } yield {
| 1
| }).map(_ + 1)
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL7898(): Unit = {
getCommonSettings.KEEP_FIRST_COLUMN_COMMENT = true
val before =
"""
|class Test {
| println(a)
|// println(b)
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|class Test {
| println(a)
|// println(b)
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL9387(): Unit = {
val before =
"""
|val x = for {
|//Comment
| x <- Nil
|} yield {
| x
| }
""".stripMargin.replace("\\r", "")
val after =
"""
|val x = for {
|//Comment
| x <- Nil
|} yield {
| x
|}
""".stripMargin.replace("\\r", "")
/* TODO this is only a temporary reference
actual result should be the following:
|val x = for {
| //Comment
| x <- Nil
|} yield {
| x
|}
But current implementation of formatting model does not provide reasonable means of implementing this case.
*/
doTextTest(before, after)
}
def testSCL5028_1(): Unit = {
getCommonSettings.BRACE_STYLE = CommonCodeStyleSettings.NEXT_LINE
val before =
"""
|try {
| expr
|} catch
|{
| case _: Throwable => println("gotcha!")
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|try
|{
| expr
|} catch
|{
| case _: Throwable => println("gotcha!")
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL5028_2(): Unit = {
getCommonSettings.BRACE_STYLE = CommonCodeStyleSettings.NEXT_LINE_SHIFTED2
getCommonSettings.CATCH_ON_NEW_LINE = true
val before =
"""
|try {
| expr
|} catch
|{
| case _: Throwable => println("gotcha!")
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|try
| {
| expr
| }
|catch
| {
| case _: Throwable => println("gotcha!")
| }
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL8825(): Unit = {
getScalaSettings.DO_NOT_INDENT_CASE_CLAUSE_BODY = true
val before =
"""
|{
| case (i) =>
| testExpr
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|{
| case (i) =>
| testExpr
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL2454(): Unit = {
getCommonSettings.KEEP_LINE_BREAKS = false
val before =
"""
|val v
| =
| "smth"
""".stripMargin.replace("\\r", "")
val after =
"""
|val v = "smth"
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL2468(): Unit = {
getScalaSettings.NEWLINE_AFTER_ANNOTATIONS = true
val before =
"""
|@throws(classOf[IOException]) @deprecated def doSmth() {}
""".stripMargin.replace("\\r", "")
val after =
"""
|@throws(classOf[IOException])
|@deprecated
|def doSmth() {}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL2469(): Unit = {
getCommonSettings.VARIABLE_ANNOTATION_WRAP = CommonCodeStyleSettings.WRAP_ALWAYS
val before =
"""
|class Test {
| def foo(): Unit = {
| @deprecated @deprecated
| val myLocalVal = 42
| }
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|class Test {
| def foo(): Unit = {
| @deprecated
| @deprecated
| val myLocalVal = 42
| }
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL2571(): Unit = {
getCommonSettings.EXTENDS_LIST_WRAP = CommonCodeStyleSettings.WRAP_ALWAYS
val before =
"""
|class Foo extends Object with Thread with Serializable {
| def foo(x: Int = 0,
| y: Int = 1,
| z: Int = 2) = new Foo
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|class Foo extends Object with
| Thread with
| Serializable {
| def foo(x: Int = 0,
| y: Int = 1,
| z: Int = 2) = new Foo
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL2571_1(): Unit = {
getCommonSettings.EXTENDS_KEYWORD_WRAP = CommonCodeStyleSettings.WRAP_ALWAYS
val before =
"""
|class Foo extends Object with Thread {
| def foo(x: Int = 0,
| y: Int = 1,
| z: Int = 2) = new Foo
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|class Foo
| extends Object with Thread {
| def foo(x: Int = 0,
| y: Int = 1,
| z: Int = 2) = new Foo
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL2571_2(): Unit = {
getCommonSettings.EXTENDS_KEYWORD_WRAP = CommonCodeStyleSettings.WRAP_ALWAYS
getCommonSettings.EXTENDS_LIST_WRAP = CommonCodeStyleSettings.WRAP_ALWAYS
val before =
"""
|class Foo extends Object with Thread with Serializable {
| def foo(x: Int = 0,
| y: Int = 1,
| z: Int = 2) = new Foo
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|class Foo
| extends Object with
| Thread with
| Serializable {
| def foo(x: Int = 0,
| y: Int = 1,
| z: Int = 2) = new Foo
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
def testSCL2999(): Unit = {
getCommonSettings.EXTENDS_LIST_WRAP = CommonCodeStyleSettings.WRAP_ON_EVERY_ITEM
getScalaSettings.WRAP_BEFORE_WITH_KEYWORD = true
getCommonSettings.getIndentOptions.CONTINUATION_INDENT_SIZE = 4
val before =
"""
|class MyLongClassName(someParam: String, someOtherParam: Int) extends SomeClass with SomeTrait with AnotherTrait with AndAnotherTrait with YetAnotherTrait {
|}
""".stripMargin.replace("\\r", "")
val after =
"""
|class MyLongClassName(someParam: String, someOtherParam: Int) extends SomeClass
| with SomeTrait
| with AnotherTrait
| with AndAnotherTrait
| with YetAnotherTrait {
|}
""".stripMargin.replace("\\r", "")
doTextTest(before, after)
}
} | JetBrains/intellij-scala-historical | test/org/jetbrains/plugins/scala/lang/formatter/tests/ScalaBugsTest.scala | Scala | apache-2.0 | 23,547 |
/**
* Copyright 2009 Jorge Ortiz
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
**/
package com.github.nscala_time.time
import org.joda.time._
import com.github.nscala_time.time.Implicits._
import org.joda.time.format.DateTimeFormatter
object StaticDateTime extends StaticDateTime
trait StaticDateTime {
type Property = DateTime.Property
def now() = new DateTime
def now(zone: DateTimeZone) = DateTime.now(zone)
def now(chronology: Chronology) = DateTime.now(chronology)
def parse(str: String) = DateTime.parse(str)
def parse(str: String, formatter: DateTimeFormatter) = DateTime.parse(str, formatter)
def nextSecond() = now() + 1.second
def nextMinute() = now() + 1.minute
def nextHour() = now() + 1.hour
def nextDay() = now() + 1.day
def tomorrow() = now() + 1.day
def nextWeek() = now() + 1.week
def nextMonth() = now() + 1.month
def nextYear() = now() + 1.year
def lastSecond() = now() - 1.second
def lastMinute() = now() - 1.minute
def lastHour() = now() - 1.hour
def lastDay() = now() - 1.day
def yesterday() = now() - 1.day
def lastWeek() = now() - 1.week
def lastMonth() = now() - 1.month
def lastYear() = now() - 1.year
}
| xuwei-k/nscala-time | src/main/scala/com/github/nscala_time/time/StaticDateTime.scala | Scala | apache-2.0 | 1,726 |
package transport
import scala.scalajs.js
package object jsapi {
type RTCVoidCallback = js.Function0[Unit]
type RTCSessionDescriptionCallback = js.Function1[RTCSessionDescription, Unit]
type RTCPeerConnectionErrorCallback = js.Function1[js.Object, Unit]
type RTCStatsCallback = js.Function1[RTCStatsReport, Unit]
type Event = org.scalajs.dom.Event
type EventTarget = org.scalajs.dom.EventTarget
type MessageEvent = org.scalajs.dom.MessageEvent
type CloseEvent = org.scalajs.dom.CloseEvent
type ErrorEvent = org.scalajs.dom.ErrorEvent
type WebSocket = org.scalajs.dom.WebSocket
}
| OlivierBlanvillain/scalajs-transport | transport/webrtc/js/src/main/scala/transport/jsapi/package.scala | Scala | mit | 602 |
package org.bitcoins.core.gen
import org.bitcoins.core.bloom._
import org.scalacheck.Gen
/**
* Created by chris on 8/7/16.
*/
abstract class BloomFilterGenerator {
/** Builds a generic bloom filter loaded with no hashes and returns it */
def bloomFilter: Gen[BloomFilter] = for {
size <- Gen.choose(1, 100)
falsePositiveRate <- Gen.choose(0.00001, 0.99999)
tweak <- NumberGenerator.uInt32s
flags <- bloomFlag
} yield BloomFilter(size, falsePositiveRate, tweak, flags)
/** Loads a generic bloom filter with the given byte vectors and returns it */
def bloomFilter(byteVectors: Seq[Seq[Byte]]): Gen[BloomFilter] = for {
filter <- bloomFilter
} yield filter.insertByteVectors(byteVectors)
/** Returns a bloom filter loaded with randomly generated byte vectors */
def loadedBloomFilter: Gen[(BloomFilter, Seq[Seq[Byte]])] = for {
filter <- bloomFilter
randomNum <- Gen.choose(0, filter.filterSize.num.toInt)
hashes <- CryptoGenerators.doubleSha256DigestSeq(randomNum)
loaded = filter.insertHashes(hashes)
} yield (loaded, hashes.map(_.bytes))
/** Generates a random bloom flag */
def bloomFlag: Gen[BloomFlag] = Gen.oneOf(BloomUpdateNone, BloomUpdateAll, BloomUpdateP2PKOnly)
}
object BloomFilterGenerator extends BloomFilterGenerator
| Christewart/bitcoin-s-core | src/main/scala/org/bitcoins/core/gen/BloomFilterGenerators.scala | Scala | mit | 1,299 |
package org.jetbrains.plugins.scala.lang.completion.weighter
import com.intellij.codeInsight.completion.{CompletionLocation, CompletionWeigher}
import com.intellij.codeInsight.lookup.LookupElement
import org.jetbrains.plugins.scala.lang.completion.lookups.ScalaLookupItem
/**
* @author Alexander Podkhalyuzin
*/
class ScalaParameterCompletionWeigher extends CompletionWeigher {
case class ParameterNameComparable(isNamedParameters: Boolean)
extends Comparable[ParameterNameComparable] {
def compareTo(o: ParameterNameComparable): Int = {
if (isNamedParameters == o.isNamedParameters) 0
else if (isNamedParameters && !o.isNamedParameters) -1
else 1
}
}
def weigh(element: LookupElement, location: CompletionLocation): Comparable[_] = {
ScalaLookupItem.original(element) match {
case item: ScalaLookupItem => ParameterNameComparable(item.isNamedParameter)
case _ => ParameterNameComparable(isNamedParameters = false)
}
}
} | triggerNZ/intellij-scala | src/org/jetbrains/plugins/scala/lang/completion/weighter/ScalaParameterCompletionWeigher.scala | Scala | apache-2.0 | 984 |
package score.discord.canti.collections
import java.util.NoSuchElementException
import scala.collection.mutable
class LogBuffer[T](capacity: Int) extends mutable.IndexedSeq[T]:
private val buffer = new Array[Any](capacity)
private var readPos, writePos = 0
private var isEmpty_ = true
override def length: Int =
if isEmpty_ then 0
else
readPos - writePos match
case x if x > 0 => x
case x => x + buffer.length
override def isEmpty: Boolean = isEmpty_
def ::=(elem: T): Unit =
val nextWrite = writePos match
case 0 => buffer.length - 1
case x => x - 1
buffer(nextWrite) = elem
if readPos == writePos && !isEmpty then readPos = nextWrite
writePos = nextWrite
isEmpty_ = false
override def apply(idx: Int) =
buffer(idxToBufferIdx(idx)).asInstanceOf[T]
override def update(idx: Int, elem: T): Unit =
buffer(idxToBufferIdx(idx)) = elem
private def idxToBufferIdx(idx: Int) =
if idx < 0 || idx >= size then
throw IndexOutOfBoundsException(s"$idx out of bounds for buffer of size $size")
(idx + writePos) % buffer.length
def findAndUpdate(condition: T => Boolean)(replace: T => T): this.type =
val index = this.indexWhere(condition)
if index != -1 then this(index) = replace(this(index))
this
override def iterator: Iterator[T] = new Iterator[T]:
private var myPos = writePos
private var iterated = LogBuffer.this.isEmpty
override def hasNext: Boolean = myPos != readPos || !iterated
override def next(): T =
if !hasNext then throw NoSuchElementException()
val pos = myPos
val result = buffer(pos).asInstanceOf[T]
myPos = pos + 1 match
case x if x == buffer.length => 0
case x => x
iterated = true
result
end LogBuffer
| ScoreUnder/canti-bot | src/main/scala/score/discord/canti/collections/LogBuffer.scala | Scala | agpl-3.0 | 1,841 |
package org.broadinstitute.clio.client.dispatch
import akka.NotUsed
import akka.stream.scaladsl.Source
import org.broadinstitute.clio.client.commands.UndeliverCommand
import org.broadinstitute.clio.client.dispatch.MoveExecutor.IoOp
import org.broadinstitute.clio.client.util.IoUtil
import org.broadinstitute.clio.client.webclient.ClioWebClient
import org.broadinstitute.clio.transfer.model.DeliverableIndex
import scala.collection.immutable
import scala.concurrent.ExecutionContext
/**
* Executor for "deliver" commands, which are a specialized form of "move" commands.
*
* Delivery executors can extend this class to add custom IO operations which
* should be performed when moving files into a FireCloud workspace.
*/
class UndeliverExecutor[CI <: DeliverableIndex](
protected val undeliverCommand: UndeliverCommand[CI]
)(
implicit ec: ExecutionContext
) extends MoveExecutor(undeliverCommand) {
override def checkPreconditions(
ioUtil: IoUtil,
webClient: ClioWebClient
): Source[moveCommand.index.MetadataType, NotUsed] = {
val baseStream = super.checkPreconditions(ioUtil, webClient)
baseStream.flatMapConcat { metadata =>
metadata.workspaceName
.filterNot(n => undeliverCommand.force || !n.isEmpty)
.fold(Source.single(metadata)) { _ =>
Source.failed(
new UnsupportedOperationException(
s"Cannot undeliver $prettyKey " +
s" because it is currently not in a workspace." +
" Use --force if you want to override this."
)
)
}
}
}
override protected[dispatch] def buildMove(
metadata: moveCommand.index.MetadataType
): Source[(moveCommand.index.MetadataType, immutable.Seq[IoOp]), NotUsed] = {
super.buildMove(metadata).map {
case (m, ops) =>
(
m.withWorkspace("", ""),
ops
)
}
}
}
| broadinstitute/clio | clio-client/src/main/scala/org/broadinstitute/clio/client/dispatch/UndeliverExecutor.scala | Scala | bsd-3-clause | 1,908 |
/** Copyright 2016 - 2021 Martin Mauch (@nightscape)
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
* specific language governing permissions and limitations under the License.
*/
package com.crealytics.spark.v2.excel
import org.apache.hadoop.mapreduce.TaskAttemptContext
import org.apache.spark.internal.Logging
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.execution.datasources.OutputWriter
import org.apache.spark.sql.types.StructType
class ExcelOutputWriter(val path: String, dataSchema: StructType, context: TaskAttemptContext, options: ExcelOptions)
extends OutputWriter
with Logging {
private val gen = new ExcelGenerator(path, dataSchema, context.getConfiguration, options)
if (options.header) { gen.writeHeaders() }
override def write(row: InternalRow): Unit = gen.write(row)
override def close(): Unit = gen.close()
}
| crealytics/spark-excel | src/main/3.2/scala/com/crealytics/spark/v2/excel/ExcelOutputWriter.scala | Scala | apache-2.0 | 1,313 |
/*
* Copyright (C) 2016-2019 Lightbend Inc. <https://www.lightbend.com>
*/
package com.lightbend.lagom.internal.scaladsl.registry
import akka.Done
import akka.actor.CoordinatedShutdown
import com.lightbend.lagom.internal.registry.serviceDnsRecords
import com.lightbend.lagom.scaladsl.api.ServiceInfo
import com.typesafe.config.Config
import play.api.Logger
import scala.concurrent.ExecutionContext
import scala.util.Failure
import scala.util.Success
import scala.collection._
class ServiceRegistration(
serviceInfo: ServiceInfo,
coordinatedShutdown: CoordinatedShutdown,
config: Config,
registry: ServiceRegistry
)(implicit ec: ExecutionContext) {
private val logger: Logger = Logger(this.getClass)
private val uris = serviceDnsRecords(config)
coordinatedShutdown.addTask(
CoordinatedShutdown.PhaseBeforeServiceUnbind,
"unregister-services-from-service-locator-scaladsl"
) { () =>
registry.unregister(serviceInfo.serviceName).invoke().map(_ => Done)
}
registry
.register(serviceInfo.serviceName)
.invoke(new ServiceRegistryService(uris, immutable.Seq(serviceInfo.acls.toSeq: _*)))
.onComplete {
case Success(_) =>
logger.debug(s"Service name=[${serviceInfo.serviceName}] successfully registered with service locator.")
case Failure(e) =>
logger.error(s"Service name=[${serviceInfo.serviceName}] couldn't register itself to the service locator.", e)
}
}
| ignasi35/lagom | dev/service-registry/devmode-scaladsl/src/main/scala/com/lightbend/lagom/internal/scaladsl/registry/ServiceRegistration.scala | Scala | apache-2.0 | 1,447 |
package com.arcusys.valamis.web.servlet.base
import java.util
import javax.servlet.http.{HttpServletRequest, HttpServletRequestWrapper, HttpServletResponse}
import org.scalatra.servlet.ServletApiImplicits
import org.scalatra.{Delete, Handler, Patch, Put}
import scala.collection.JavaConversions._
// tomcat (with default settings, liferay bundle) do not read parameters from body for Put | Delete | Patch
// here we read parameters
trait HTTPMethodsSupport extends Handler with ServletApiImplicits {
abstract override def handle(req: HttpServletRequest, res: HttpServletResponse): Unit = {
val req2 = req.requestMethod match {
case Put | Delete | Patch =>
if (req.getContentType.toLowerCase.contains("application/x-www-form-urlencoded")) {
new HttpServletRequestWrapper(req) {
val bodyParams = HttpUtilsHelper.parsePostData(req.getContentLength, req.getInputStream, req.getCharacterEncoding)
override def getParameter(name: String) = {
val fromRequest = Option(req.getParameter(name))
lazy val fromBody = Option(bodyParams.get(name)).map(_.head)
fromRequest orElse fromBody orNull
}
override def getParameterNames = super.getParameterNames ++ bodyParams.keys()
override def getParameterMap = {
val paramM: util.HashMap[String, Array[String]] = new util.HashMap
(super.getParameterMap.entrySet() ++ bodyParams.entrySet())
.foreach(e => paramM.put(e.getKey.toString, e.getValue.asInstanceOf[Array[String]]))
paramM
}
}
} else req
case _ => req
}
super.handle(req2, res)
}
} | igor-borisov/valamis | valamis-portlets/src/main/scala/com/arcusys/valamis/web/servlet/base/HTTPMethodsSupport.scala | Scala | gpl-3.0 | 1,712 |
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** @author John Miller
* @version 1.3
* @date Tue May 29 14:45:32 EDT 2012
* @see LICENSE (MIT style license file).
*/
package scalation.analytics.clusterer
import scala.collection.mutable.Set
import scala.util.control.Breaks.{breakable, break}
import scalation.math.double_exp
import scalation.linalgebra.{MatrixD, VectorD, VectorI}
import scalation.random.{Randi, Uniform, RandomVecD, RandomVecI}
import scalation.util.{banner, Error}
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `KMeansClusterer` class cluster several vectors/points using k-means
* clustering. Either (1) randomly assign points to 'k' clusters or (2) randomly
* pick 'k' points as initial centroids (technique (1) to work better and is the
* primary technique). Iteratively, reassign each point to the cluster containing
* the closest centroid. Stop when there are no changes to the clusters.
*-----------------------------------------------------------------------------
* @param x the vectors/points to be clustered stored as rows of a matrix
* @param k the number of clusters to make
* @param s the random number stream (to vary the clusters made)
* @param primary true indicates use the primary technique for initiating the clustering
* @param remote whether to take a maximally remote or a randomly selected point
* @param post whether to perform post processing by randomly swapping points to reduce error
*/
class KMeansClusterer (x: MatrixD, k: Int, s: Int = 0, primary: Boolean = true, remote: Boolean = true, post: Boolean = true)
extends Clusterer with Error
{
if (k >= x.dim1) flaw ("constructor", "k must be less than the number of vectors")
protected val DEBUG = false // debug flag
protected val MAX_ITER = 200 // the maximum number of iterations
protected val cent = new MatrixD (k, x.dim2) // the k centroids of clusters
protected val sizes = new VectorI (k) // the cluster sizes
protected val myDist = new VectorD (x.dim1) // distances from centroids
protected val clustr = Array.ofDim [Int] (x.dim1) // assignment of vectors to clusters
protected val dist = new VectorD (x.dim1) // distance to closest centroid
var tc1: Double = 0.0
var tc2: Double = 0.0
dist.set (Double.PositiveInfinity)
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the sizes of the centroids. Should only be called after
* `cluster ()`.
*/
def csize (): VectorI = sizes
//::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Return the centroids. Should only be called after `cluster ()`.
*/
def centroids (): MatrixD = cent
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Randomly assign each vector/point 'x(i)' to a random cluster.
* Primary technique for initiating the clustering.
*/
def assign ()
{
val ran = new Randi (0, k-1, s) // for random integers: 0, ..., k-1
for (i <- x.range1) {
clustr(i) = ran.igen // randomly assign x(i) to a cluster
sizes(clustr(i)) += 1 // increment size of this cluster
} // for
} // assign
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Randomly pick vectors/points to serve as the initial 'k' centroids (cent).
* Secondary technique for initiating the clustering.
*/
def pickCentroids ()
{
val rvi = RandomVecI (k, x.dim1-1, 0, stream = s).igen // random vector of integers
for (i <- 0 until k) cent(i) = x(rvi(i)) // set the centroids
} // pickCentroids
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Fix any empty clusters by taking a point from the largest cluster.
* @param useDistance whether to pick a random or most remote point in cluster
*/
private def fixEmptyClusters (useDistance: Boolean = remote)
{
if (DEBUG) {
banner (s"fixemptyclusters()")
println (s"remote = $remote")
println (s"Initial clustering = ${clustr.deep}")
} // if
for (c <- 0 until k if ! (clustr contains c)) { // for each empty cluster
if (DEBUG) println (s"Cluster c=$c is empty!")
val biggest = sizes.argmax () // biggest cluster
val indices = clustr.indices.filter (clustr(_) == biggest) // indices of elements in biggest cluster
if (DEBUG) {
println (s"Current cluster sizes = $sizes")
println (s"Biggest cluster = $biggest")
println (s"Biggest cluster indices = $indices")
} // if
var i = 0 // element index to reassign
if (useDistance) {
i = clustr.indexOf (biggest) // first element in biggest cluster
var max = distance (x(i), cent(biggest)) // first distance in biggest cluster
for (ii <- indices) { // find furthest in biggest cluster
val dist = distance (x(ii), cent(biggest))
if (dist > max) { max = dist; i = ii }
} // for
} else {
val ran = new Randi (0, indices.size-1) // random integer generator
i = indices(ran.igen) // randomly pick one point from biggest cluster
} // if
sizes(clustr(i)) -= 1 // decrement size of previous cluster
clustr(i) = c // reassign vector x(i) to cluster c
sizes(c) += 1 // increment size of cluster c
if (DEBUG) println (s"New clustering = ${clustr.deep}")
} // for
} // fixEmptyClusters
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check for empty clusters and throw an execption if found.
*/
private def emptyClusters ()
{
for (c <- 0 until k if ! (clustr contains c)) throw new Exception (s"Empty cluster c = $c")
} // emptyClusters
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Reassign each vector/point to the cluster with the closest centroid.
* Indicate done, if no points changed clusters (for stopping rule).
*/
def reassign (): Boolean =
{
var done = true // done indicates no changes
for (i <- x.range1) {
val v = x(i) // let v be the ith vector
for (c <- 0 until k) {
val newDist = distance (v, cent(c)) // calc distance to centroid c
if (newDist < dist(i)) { // is it closer than old distance
dist(i) = newDist // make it the new distance
sizes(clustr(i)) -= 1 // decrement size of previous cluster
clustr(i) = c // reassign vector x(i) to cluster c
sizes(c) += 1 // increment size of cluster c
done = false // changed clusters => not done
} // if
} // for
} // for
done // return whether there were no changes
} // reassign
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Calculate the centroids based on current assignment of points to clusters.
*/
def calcCentroids ()
{
val cx = new MatrixD (k, x.dim2) // to hold sum of vectors for each cluster
val cs = new VectorD (k) // to hold number of vectors in each cluster
for (i <- x.range1) {
val ci = clustr(i) // x(i) currently assigned to cluster ci
cx(ci) = cx(ci) + x(i) // add the next vector in cluster
cs(ci) += 1.0 // add 1 to number in cluster
} // for
for (c <- 0 until k) cent(c) = cx(c) / cs(c) // divide to get averages/means
} // calcCentroids
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Iteratively recompute clusters until the assignment of points does not
* change, returning the final cluster assignment vector.
*/
def cluster (): Array [Int] =
{
if (primary) {
assign () // randomly assign points to clusters
fixEmptyClusters (false) // swap points into empty clusters
calcCentroids () // calculate the initial centroids
} else {
pickCentroids () // alt., pick points for initial centroids
fixEmptyClusters (false) // swap points into empty clusters
} // if
if (DEBUG) {
println ("(" + 0 + ") clustr = " + clustr.deep)
println ("(" + 0 + ") cent = " + cent)
} // if
breakable { for (l <- 1 to MAX_ITER) {
if (reassign ()) break // reassign points to clusters (no change => break)
fixEmptyClusters () // check for empty clusters
calcCentroids () // re-calculate the centroids
if (DEBUG) {
println ("(" + l + ") clustr = " + clustr.deep)
println ("(" + l + ") cent = " + cent)
} // if
}} // for
emptyClusters () // should not have any empty clusters
if (post) trySwaps () // swap points to improve sse
clustr // return the cluster assignment vector
} // cluster
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Swap clusters for points 'x(i)' and 'x(j)'.
* param i the inded for point x(i)
* param j the inded for point x(j)
*/
private def swapPoints (i: Int, j: Int)
{
val temp = clustr(i)
clustr(i) = clustr(j)
clustr(j) = temp
calcCentroids ()
} // swapPoints
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Try all pairwise swaps and make them if 'sse' improves.
*/
private def trySwaps ()
{
for (i <- 0 until x.dim1-1; j <- i+1 until x.dim1 if clustr(i) != clustr(j)) {
val sum1 = sse (clustr(i)) + sse (clustr(j))
swapPoints (i, j)
val sum2 = sse (clustr(i)) + sse (clustr(j))
if (DEBUG) println (s"sum1 = $sum1 vs. sum2 = $sum2")
if (sum2 > sum1) { // if not better, swap back
swapPoints (i, j)
if (DEBUG) println (s"swapping back")
} // if
} // for
} // randomSwaps
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Given a new point/vector 'y', determine which cluster it belongs to,
* i.e., the cluster whose centroid it is closest to.
* @param y the vector to classify
*/
def classify (y: VectorD): Int =
{
var dist = distance (y, cent(0)) // calc distance to centroid 0
var clus = 0 // assign y to cluster 0
for (c <- 1 until k) {
val newDist = distance (y, cent(c)) // calc distance to centroid c
if (newDist < dist) { // is it closer than old distance
dist = newDist // make it the new distance
clus = c // assign y to cluster c
} // if
} // for
clus // return cluster y belongs to
} // classify
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Compute the sum of squared errors (distance squared) from all points in
* cluster 'c' to the cluster's centroid.
* @param c the current cluster
*/
def sse (c: Int): Double =
{
var sum = 0.0
for (i <- x.range1) {
val cli = clustr(i)
if (cli == c) sum += distance (x(i), cent(cli))
} // for
sum
} // sse
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** Check to see if the sum of squared errors is optimum.
* @param opt the known (from human/oracle) optimum
*/
def checkOpt (opt: Double): Boolean = sse (x) <= opt
} // KMeansClusterer class
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `KMeansClustererTest` object is used to test the `KMeansClusterer` class.
* > run-main scalation.analytics.clusterer.KMeansClustererTest
*/
trait KMeansClustererTester
{
import scalation.stat.Statistic
def test (v: MatrixD, k: Int, primary: Boolean, remote: Boolean, post: Boolean, opt: Double = -1)
{
banner (s"test (primary = $primary, remote = $remote, post = $post)")
val statSSE = new Statistic ()
val statTC1 = new Statistic ()
val statTC2 = new Statistic ()
var ok = 0
for (s <- 0 until 1000) { // test with different random streams
//banner ("KMeansClusterer for stream s = " + s)
val cl = new KMeansClusterer (v, k, s, primary = primary, remote = remote, post = post)
cl.cluster ()
val sse = cl.sse (v)
//println ("--- final cluster = " + cl.cluster ().deep)
//println ("--- final sse = " + sse)
statSSE.tally (sse)
statTC1.tally (cl.tc1)
statTC2.tally (cl.tc2)
if ((opt != -1) && (cl.checkOpt (opt))) ok += 1
} // for
if (opt != -1) println (s"ok = $ok")
println (Statistic.labels)
println (statSSE)
println (statTC1)
println (statTC2)
} // test
} // KMeansClutererTester
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `KMeansClustererTest` object is used to test the `KMeansClusterer` class.
* > run-main scalation.analytics.clusterer.KMeansClustererTest
*/
object KMeansClustererTest extends App with KMeansClustererTester
{
val v = new MatrixD ((6, 2), 1.0, 2.0,
2.0, 1.0,
5.0, 4.0,
4.0, 5.0,
9.0, 8.0,
8.0, 9.0)
val k = 3
println ("v = " + v)
println ("k = " + k)
println ("----------------------------------------------------")
val tf = Array (true, false)
for (primary <- tf; remote <- tf; post <- tf) {
test (v, k, primary, remote, post, 3)
} // for
} // KMeansClustererTest object
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `KMeansClustererTest2` object is used to test the `KMeansClusterer` class.
* > run-main scalation.analytics.clusterer.KMeansClustererTest
*/
object KMeansClustererTest2 extends App with KMeansClustererTester
{
val v = new MatrixD ((8, 2), 1.0, 1.0,
1.0, 3.0,
5.0, 18.0,
5.0, 20.0,
9.0, 10.0,
9.0, 12.0,
15.0, 30.0,
15.0, 32.0)
val k = 4
println ("v = " + v)
println ("k = " + k)
println ("----------------------------------------------------")
val tf = Array (true, false)
for (primary <- tf; remote <- tf; post <- tf) {
test (v, k, primary, remote, post, 8)
} // for
} // KMeansClustererTest2 object
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `KMeansClustererTest2` object is used to test the `KMeansClusterer` class.
* > run-main scalation.analytics.clusterer.KMeansClustererTest3
*/
object KMeansClustererTest3 extends App with KMeansClustererTester
{
import scalation.random.{Bernoulli, Normal}
val coin = Bernoulli ()
val dist1 = Normal (2.0, 1.0)
val dist2 = Normal (8.0, 1.0)
val v = new MatrixD (50, 2)
val k = 4
for (i <- v.range1) v(i) = VectorD (if (coin.gen == 0) dist1.gen else dist2.gen,
if (coin.gen == 0) dist1.gen else dist2.gen)
import scalation.plot.Plot
new Plot (v.col(0), v.col(1))
println ("v = " + v)
println ("k = " + k)
println ("----------------------------------------------------")
val tf = Array (true, false)
for (primary <- tf; remote <- tf; post <- tf) {
test (v, k, primary, remote, post, 76.6)
} // for
} // KMeansClustererTest3 object
//:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::
/** The `KMeansClustererTest4` object is used to test the `KMeansClusterer` class.
* > run-main scalation.analytics.clusterer.KMeansClustererTest4
*
object KMeansClustererTest4 extends App with KMeansClustererTester
{
import org.apache.commons.math3.ml.clustering.KMeansPlusPlusClusterer
import scalation.random.{Normal, Bernoulli}
val coin = Bernoulli ()
val dist1 = Normal (2.0, 1.0)
val dist2 = Normal (8.0, 1.0)
val v = new MatrixD (50, 2)
val k = 4
for (i <- v.range1) v(i) = VectorD (if (coin.gen == 0) dist1.gen else dist2.gen,
if (coin.gen == 0) dist1.gen else dist2.gen)
import scalation.plot.Plot
new Plot (v.col(0), v.col(1))
val cl = new KMeansPlusPlusClusterer (k)
println ("v = " + v)
println ("k = " + k)
println ("----------------------------------------------------")
val tf = Array (true, false)
for (primary <- tf; remote <- tf; post <- tf) {
test (v, k, primary, remote, post, 76.6)
} // for
} // KMeansClustererTest4 object
*/
| scalation/fda | scalation_1.3/scalation_modeling/src/main/scala/scalation/analytics/clusterer/KMeansClusterer.scala | Scala | mit | 19,438 |
import scala.quoted.*
object Exp {
private def compileImpl(e: Expr[Int], env: Map[String, Expr[Int]])(using Quotes): Expr[Int] = {
e match {
case '{$s:Int} => s
case exp =>
compileImpl(exp, env)
}
}
private def compileUnlift(e: Expr[Int])(using Quotes): Expr[Int] = {
val environment = Map[String, Expr[Int]]()
compileImpl(e, environment)
}
inline def compile(inline expr: Int): Int = {
${compileUnlift('expr)}
}
}
| dotty-staging/dotty | tests/run-macros/i9475/Macro_1.scala | Scala | apache-2.0 | 474 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
import java.io.{File, IOException}
import java.nio.file.{Files, NoSuchFileException}
import kafka.common.LogSegmentOffsetOverflowException
import kafka.log.UnifiedLog.{CleanedFileSuffix, DeletedFileSuffix, SwapFileSuffix, isIndexFile, isLogFile, offsetFromFile}
import kafka.server.{LogDirFailureChannel, LogOffsetMetadata}
import kafka.server.epoch.LeaderEpochFileCache
import kafka.utils.{CoreUtils, Logging, Scheduler}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.InvalidOffsetException
import org.apache.kafka.common.utils.Time
import scala.collection.{Set, mutable}
case class LoadedLogOffsets(logStartOffset: Long,
recoveryPoint: Long,
nextOffsetMetadata: LogOffsetMetadata)
object LogLoader extends Logging {
/**
* Clean shutdown file that indicates the broker was cleanly shutdown in 0.8 and higher.
* This is used to avoid unnecessary recovery after a clean shutdown. In theory this could be
* avoided by passing in the recovery point, however finding the correct position to do this
* requires accessing the offset index which may not be safe in an unclean shutdown.
* For more information see the discussion in PR#2104
*/
val CleanShutdownFile = ".kafka_cleanshutdown"
}
/**
* @param dir The directory from which log segments need to be loaded
* @param topicPartition The topic partition associated with the log being loaded
* @param config The configuration settings for the log being loaded
* @param scheduler The thread pool scheduler used for background actions
* @param time The time instance used for checking the clock
* @param logDirFailureChannel The LogDirFailureChannel instance to asynchronously handle log
* directory failure
* @param hadCleanShutdown Boolean flag to indicate whether the associated log previously had a
* clean shutdown
* @param segments The LogSegments instance into which segments recovered from disk will be
* populated
* @param logStartOffsetCheckpoint The checkpoint of the log start offset
* @param recoveryPointCheckpoint The checkpoint of the offset at which to begin the recovery
* @param leaderEpochCache An optional LeaderEpochFileCache instance to be updated during recovery
* @param producerStateManager The ProducerStateManager instance to be updated during recovery
*/
class LogLoader(
dir: File,
topicPartition: TopicPartition,
config: LogConfig,
scheduler: Scheduler,
time: Time,
logDirFailureChannel: LogDirFailureChannel,
hadCleanShutdown: Boolean,
segments: LogSegments,
logStartOffsetCheckpoint: Long,
recoveryPointCheckpoint: Long,
leaderEpochCache: Option[LeaderEpochFileCache],
producerStateManager: ProducerStateManager
) extends Logging {
logIdent = s"[LogLoader partition=$topicPartition, dir=${dir.getParent}] "
/**
* Load the log segments from the log files on disk, and returns the components of the loaded log.
* Additionally, it also suitably updates the provided LeaderEpochFileCache and ProducerStateManager
* to reflect the contents of the loaded log.
*
* In the context of the calling thread, this function does not need to convert IOException to
* KafkaStorageException because it is only called before all logs are loaded.
*
* @return the offsets of the Log successfully loaded from disk
*
* @throws LogSegmentOffsetOverflowException if we encounter a .swap file with messages that
* overflow index offset
*/
def load(): LoadedLogOffsets = {
// First pass: through the files in the log directory and remove any temporary files
// and find any interrupted swap operations
val swapFiles = removeTempFilesAndCollectSwapFiles()
// The remaining valid swap files must come from compaction or segment split operation. We can
// simply rename them to regular segment files. But, before renaming, we should figure out which
// segments are compacted/split and delete these segment files: this is done by calculating
// min/maxSwapFileOffset.
// We store segments that require renaming in this code block, and do the actual renaming later.
var minSwapFileOffset = Long.MaxValue
var maxSwapFileOffset = Long.MinValue
swapFiles.filter(f => UnifiedLog.isLogFile(new File(CoreUtils.replaceSuffix(f.getPath, SwapFileSuffix, "")))).foreach { f =>
val baseOffset = offsetFromFile(f)
val segment = LogSegment.open(f.getParentFile,
baseOffset = baseOffset,
config,
time = time,
fileSuffix = UnifiedLog.SwapFileSuffix)
info(s"Found log file ${f.getPath} from interrupted swap operation, which is recoverable from ${UnifiedLog.SwapFileSuffix} files by renaming.")
minSwapFileOffset = Math.min(segment.baseOffset, minSwapFileOffset)
maxSwapFileOffset = Math.max(segment.readNextOffset, maxSwapFileOffset)
}
// Second pass: delete segments that are between minSwapFileOffset and maxSwapFileOffset. As
// discussed above, these segments were compacted or split but haven't been renamed to .delete
// before shutting down the broker.
for (file <- dir.listFiles if file.isFile) {
try {
if (!file.getName.endsWith(SwapFileSuffix)) {
val offset = offsetFromFile(file)
if (offset >= minSwapFileOffset && offset < maxSwapFileOffset) {
info(s"Deleting segment files ${file.getName} that is compacted but has not been deleted yet.")
file.delete()
}
}
} catch {
// offsetFromFile with files that do not include an offset in the file name
case _: StringIndexOutOfBoundsException =>
case _: NumberFormatException =>
}
}
// Third pass: rename all swap files.
for (file <- dir.listFiles if file.isFile) {
if (file.getName.endsWith(SwapFileSuffix)) {
info(s"Recovering file ${file.getName} by renaming from ${UnifiedLog.SwapFileSuffix} files.")
file.renameTo(new File(CoreUtils.replaceSuffix(file.getPath, UnifiedLog.SwapFileSuffix, "")))
}
}
// Fourth pass: load all the log and index files.
// We might encounter legacy log segments with offset overflow (KAFKA-6264). We need to split such segments. When
// this happens, restart loading segment files from scratch.
retryOnOffsetOverflow(() => {
// In case we encounter a segment with offset overflow, the retry logic will split it after which we need to retry
// loading of segments. In that case, we also need to close all segments that could have been left open in previous
// call to loadSegmentFiles().
segments.close()
segments.clear()
loadSegmentFiles()
})
val (newRecoveryPoint: Long, nextOffset: Long) = {
if (!dir.getAbsolutePath.endsWith(UnifiedLog.DeleteDirSuffix)) {
val (newRecoveryPoint, nextOffset) = retryOnOffsetOverflow(recoverLog)
// reset the index size of the currently active log segment to allow more entries
segments.lastSegment.get.resizeIndexes(config.maxIndexSize)
(newRecoveryPoint, nextOffset)
} else {
if (segments.isEmpty) {
segments.add(
LogSegment.open(
dir = dir,
baseOffset = 0,
config,
time = time,
initFileSize = config.initFileSize))
}
(0L, 0L)
}
}
leaderEpochCache.foreach(_.truncateFromEnd(nextOffset))
val newLogStartOffset = math.max(logStartOffsetCheckpoint, segments.firstSegment.get.baseOffset)
// The earliest leader epoch may not be flushed during a hard failure. Recover it here.
leaderEpochCache.foreach(_.truncateFromStart(logStartOffsetCheckpoint))
// Any segment loading or recovery code must not use producerStateManager, so that we can build the full state here
// from scratch.
if (!producerStateManager.isEmpty)
throw new IllegalStateException("Producer state must be empty during log initialization")
// Reload all snapshots into the ProducerStateManager cache, the intermediate ProducerStateManager used
// during log recovery may have deleted some files without the LogLoader.producerStateManager instance witnessing the
// deletion.
producerStateManager.removeStraySnapshots(segments.baseOffsets.toSeq)
UnifiedLog.rebuildProducerState(
producerStateManager,
segments,
newLogStartOffset,
nextOffset,
config.recordVersion,
time,
reloadFromCleanShutdown = hadCleanShutdown,
logIdent)
val activeSegment = segments.lastSegment.get
LoadedLogOffsets(
newLogStartOffset,
newRecoveryPoint,
LogOffsetMetadata(nextOffset, activeSegment.baseOffset, activeSegment.size))
}
/**
* Removes any temporary files found in log directory, and creates a list of all .swap files which could be swapped
* in place of existing segment(s). For log splitting, we know that any .swap file whose base offset is higher than
* the smallest offset .clean file could be part of an incomplete split operation. Such .swap files are also deleted
* by this method.
*
* @return Set of .swap files that are valid to be swapped in as segment files and index files
*/
private def removeTempFilesAndCollectSwapFiles(): Set[File] = {
val swapFiles = mutable.Set[File]()
val cleanedFiles = mutable.Set[File]()
var minCleanedFileOffset = Long.MaxValue
for (file <- dir.listFiles if file.isFile) {
if (!file.canRead)
throw new IOException(s"Could not read file $file")
val filename = file.getName
if (filename.endsWith(DeletedFileSuffix)) {
debug(s"Deleting stray temporary file ${file.getAbsolutePath}")
Files.deleteIfExists(file.toPath)
} else if (filename.endsWith(CleanedFileSuffix)) {
minCleanedFileOffset = Math.min(offsetFromFile(file), minCleanedFileOffset)
cleanedFiles += file
} else if (filename.endsWith(SwapFileSuffix)) {
swapFiles += file
}
}
// KAFKA-6264: Delete all .swap files whose base offset is greater than the minimum .cleaned segment offset. Such .swap
// files could be part of an incomplete split operation that could not complete. See Log#splitOverflowedSegment
// for more details about the split operation.
val (invalidSwapFiles, validSwapFiles) = swapFiles.partition(file => offsetFromFile(file) >= minCleanedFileOffset)
invalidSwapFiles.foreach { file =>
debug(s"Deleting invalid swap file ${file.getAbsoluteFile} minCleanedFileOffset: $minCleanedFileOffset")
Files.deleteIfExists(file.toPath)
}
// Now that we have deleted all .swap files that constitute an incomplete split operation, let's delete all .clean files
cleanedFiles.foreach { file =>
debug(s"Deleting stray .clean file ${file.getAbsolutePath}")
Files.deleteIfExists(file.toPath)
}
validSwapFiles
}
/**
* Retries the provided function only whenever an LogSegmentOffsetOverflowException is raised by
* it during execution. Before every retry, the overflowed segment is split into one or more segments
* such that there is no offset overflow in any of them.
*
* @param fn The function to be executed
* @return The value returned by the function, if successful
* @throws Exception whenever the executed function throws any exception other than
* LogSegmentOffsetOverflowException, the same exception is raised to the caller
*/
private def retryOnOffsetOverflow[T](fn: () => T): T = {
while (true) {
try {
return fn()
} catch {
case e: LogSegmentOffsetOverflowException =>
info(s"Caught segment overflow error: ${e.getMessage}. Split segment and retry.")
val result = UnifiedLog.splitOverflowedSegment(
e.segment,
segments,
dir,
topicPartition,
config,
scheduler,
logDirFailureChannel,
logIdent)
deleteProducerSnapshotsAsync(result.deletedSegments)
}
}
throw new IllegalStateException()
}
/**
* Loads segments from disk into the provided params.segments.
*
* This method does not need to convert IOException to KafkaStorageException because it is only called before all logs are loaded.
* It is possible that we encounter a segment with index offset overflow in which case the LogSegmentOffsetOverflowException
* will be thrown. Note that any segments that were opened before we encountered the exception will remain open and the
* caller is responsible for closing them appropriately, if needed.
*
* @throws LogSegmentOffsetOverflowException if the log directory contains a segment with messages that overflow the index offset
*/
private def loadSegmentFiles(): Unit = {
// load segments in ascending order because transactional data from one segment may depend on the
// segments that come before it
for (file <- dir.listFiles.sortBy(_.getName) if file.isFile) {
if (isIndexFile(file)) {
// if it is an index file, make sure it has a corresponding .log file
val offset = offsetFromFile(file)
val logFile = UnifiedLog.logFile(dir, offset)
if (!logFile.exists) {
warn(s"Found an orphaned index file ${file.getAbsolutePath}, with no corresponding log file.")
Files.deleteIfExists(file.toPath)
}
} else if (isLogFile(file)) {
// if it's a log file, load the corresponding log segment
val baseOffset = offsetFromFile(file)
val timeIndexFileNewlyCreated = !UnifiedLog.timeIndexFile(dir, baseOffset).exists()
val segment = LogSegment.open(
dir = dir,
baseOffset = baseOffset,
config,
time = time,
fileAlreadyExists = true)
try segment.sanityCheck(timeIndexFileNewlyCreated)
catch {
case _: NoSuchFileException =>
if (hadCleanShutdown || segment.baseOffset < recoveryPointCheckpoint)
error(s"Could not find offset index file corresponding to log file" +
s" ${segment.log.file.getAbsolutePath}, recovering segment and rebuilding index files...")
recoverSegment(segment)
case e: CorruptIndexException =>
warn(s"Found a corrupted index file corresponding to log file" +
s" ${segment.log.file.getAbsolutePath} due to ${e.getMessage}}, recovering segment and" +
" rebuilding index files...")
recoverSegment(segment)
}
segments.add(segment)
}
}
}
/**
* Just recovers the given segment, without adding it to the provided params.segments.
*
* @param segment Segment to recover
*
* @return The number of bytes truncated from the segment
*
* @throws LogSegmentOffsetOverflowException if the segment contains messages that cause index offset overflow
*/
private def recoverSegment(segment: LogSegment): Int = {
val producerStateManager = new ProducerStateManager(
topicPartition,
dir,
this.producerStateManager.maxTransactionTimeoutMs,
this.producerStateManager.maxProducerIdExpirationMs,
time)
UnifiedLog.rebuildProducerState(
producerStateManager,
segments,
logStartOffsetCheckpoint,
segment.baseOffset,
config.recordVersion,
time,
reloadFromCleanShutdown = false,
logIdent)
val bytesTruncated = segment.recover(producerStateManager, leaderEpochCache)
// once we have recovered the segment's data, take a snapshot to ensure that we won't
// need to reload the same segment again while recovering another segment.
producerStateManager.takeSnapshot()
bytesTruncated
}
/**
* Recover the log segments (if there was an unclean shutdown). Ensures there is at least one
* active segment, and returns the updated recovery point and next offset after recovery. Along
* the way, the method suitably updates the LeaderEpochFileCache or ProducerStateManager inside
* the provided LogComponents.
*
* This method does not need to convert IOException to KafkaStorageException because it is only
* called before all logs are loaded.
*
* @return a tuple containing (newRecoveryPoint, nextOffset).
*
* @throws LogSegmentOffsetOverflowException if we encountered a legacy segment with offset overflow
*/
private[log] def recoverLog(): (Long, Long) = {
/** return the log end offset if valid */
def deleteSegmentsIfLogStartGreaterThanLogEnd(): Option[Long] = {
if (segments.nonEmpty) {
val logEndOffset = segments.lastSegment.get.readNextOffset
if (logEndOffset >= logStartOffsetCheckpoint)
Some(logEndOffset)
else {
warn(s"Deleting all segments because logEndOffset ($logEndOffset) " +
s"is smaller than logStartOffset ${logStartOffsetCheckpoint}. " +
"This could happen if segment files were deleted from the file system.")
removeAndDeleteSegmentsAsync(segments.values)
leaderEpochCache.foreach(_.clearAndFlush())
producerStateManager.truncateFullyAndStartAt(logStartOffsetCheckpoint)
None
}
} else None
}
// If we have the clean shutdown marker, skip recovery.
if (!hadCleanShutdown) {
val unflushed = segments.values(recoveryPointCheckpoint, Long.MaxValue).iterator
var truncated = false
while (unflushed.hasNext && !truncated) {
val segment = unflushed.next()
info(s"Recovering unflushed segment ${segment.baseOffset}")
val truncatedBytes =
try {
recoverSegment(segment)
} catch {
case _: InvalidOffsetException =>
val startOffset = segment.baseOffset
warn(s"Found invalid offset during recovery. Deleting the" +
s" corrupt segment and creating an empty one with starting offset $startOffset")
segment.truncateTo(startOffset)
}
if (truncatedBytes > 0) {
// we had an invalid message, delete all remaining log
warn(s"Corruption found in segment ${segment.baseOffset}," +
s" truncating to offset ${segment.readNextOffset}")
removeAndDeleteSegmentsAsync(unflushed.toList)
truncated = true
}
}
}
val logEndOffsetOption = deleteSegmentsIfLogStartGreaterThanLogEnd()
if (segments.isEmpty) {
// no existing segments, create a new mutable segment beginning at logStartOffset
segments.add(
LogSegment.open(
dir = dir,
baseOffset = logStartOffsetCheckpoint,
config,
time = time,
initFileSize = config.initFileSize,
preallocate = config.preallocate))
}
// Update the recovery point if there was a clean shutdown and did not perform any changes to
// the segment. Otherwise, we just ensure that the recovery point is not ahead of the log end
// offset. To ensure correctness and to make it easier to reason about, it's best to only advance
// the recovery point when the log is flushed. If we advanced the recovery point here, we could
// skip recovery for unflushed segments if the broker crashed after we checkpoint the recovery
// point and before we flush the segment.
(hadCleanShutdown, logEndOffsetOption) match {
case (true, Some(logEndOffset)) =>
(logEndOffset, logEndOffset)
case _ =>
val logEndOffset = logEndOffsetOption.getOrElse(segments.lastSegment.get.readNextOffset)
(Math.min(recoveryPointCheckpoint, logEndOffset), logEndOffset)
}
}
/**
* This method deletes the given log segments and the associated producer snapshots, by doing the
* following for each of them:
* - It removes the segment from the segment map so that it will no longer be used for reads.
* - It schedules asynchronous deletion of the segments that allows reads to happen concurrently without
* synchronization and without the possibility of physically deleting a file while it is being
* read.
*
* This method does not need to convert IOException to KafkaStorageException because it is either
* called before all logs are loaded or the immediate caller will catch and handle IOException
*
* @param segmentsToDelete The log segments to schedule for deletion
*/
private def removeAndDeleteSegmentsAsync(segmentsToDelete: Iterable[LogSegment]): Unit = {
if (segmentsToDelete.nonEmpty) {
// Most callers hold an iterator into the `params.segments` collection and
// `removeAndDeleteSegmentAsync` mutates it by removing the deleted segment. Therefore,
// we should force materialization of the iterator here, so that results of the iteration
// remain valid and deterministic. We should also pass only the materialized view of the
// iterator to the logic that deletes the segments.
val toDelete = segmentsToDelete.toList
info(s"Deleting segments as part of log recovery: ${toDelete.mkString(",")}")
toDelete.foreach { segment =>
segments.remove(segment.baseOffset)
}
UnifiedLog.deleteSegmentFiles(
toDelete,
asyncDelete = true,
dir,
topicPartition,
config,
scheduler,
logDirFailureChannel,
logIdent)
deleteProducerSnapshotsAsync(segmentsToDelete)
}
}
private def deleteProducerSnapshotsAsync(segments: Iterable[LogSegment]): Unit = {
UnifiedLog.deleteProducerSnapshots(segments,
producerStateManager,
asyncDelete = true,
scheduler,
config,
logDirFailureChannel,
dir.getParent,
topicPartition)
}
}
| TiVo/kafka | core/src/main/scala/kafka/log/LogLoader.scala | Scala | apache-2.0 | 22,795 |
package io.udash.web.homepage.styles.partials
import io.udash.css.{CssBase, CssStyle}
import io.udash.web.commons.styles.attributes.Attributes
import io.udash.web.commons.styles.components.{HeaderButtonsStyles, HeaderNavStyles}
import io.udash.web.commons.styles.utils.{CommonStyleUtils, MediaQueries, StyleConstants}
import scalacss.internal.Literal
import scala.concurrent.duration.DurationInt
import scala.language.postfixOps
object HeaderStyles extends CssBase with HeaderButtonsStyles with HeaderNavStyles {
import dsl._
val header: CssStyle = style(
position.absolute,
top(`0`),
left(`0`),
width(100 %%),
height(StyleConstants.Sizes.LandingPageHeaderHeight px),
fontSize(1 rem),
zIndex(999),
&.attr(Attributes.data(Attributes.Pinned), "true")(
position.fixed,
height(StyleConstants.Sizes.HeaderHeightPin px),
backgroundColor.black,
animationName(headerAnimation),
animationIterationCount.count(1),
animationDuration(300 milliseconds),
MediaQueries.tabletLandscape(
height(StyleConstants.Sizes.HeaderHeightPin * .85 px)
),
unsafeChild(s".${headerLogo.className}")(
width(48 px),
height(56 px),
backgroundImage := "url(/assets/images/udash_logo.png)",
MediaQueries.tabletPortrait(
display.none
)
),
unsafeChild(s".${btnMobile.className}")(
CommonStyleUtils.middle
)
),
MediaQueries.tabletPortrait(
height(StyleConstants.Sizes.HeaderHeight * .9 px)
)
)
private lazy val headerAnimation: CssStyle = keyframes(
0d -> keyframe(
transform := "translateY(-100%)"
),
100d -> keyframe(
transform := "translateY(0)"
)
)
val headerLeft: CssStyle = style(
position.relative,
float.left,
height(100 %%)
)
lazy val headerLogo: CssStyle = style(
CommonStyleUtils.relativeMiddle,
display.inlineBlock,
verticalAlign.middle,
width(65 px),
height(96 px),
marginRight(25 px),
backgroundImage := "url(/assets/images/udash_logo_l.png)",
backgroundRepeat.noRepeat,
backgroundSize := "100%",
MediaQueries.tabletPortrait(
display.block,
width(StyleConstants.Sizes.GuideHeaderHeightMobile px),
height(14 px),
backgroundPosition := Literal.bottom,
transform := none,
top.auto
)
)
lazy val btnMobile: CssStyle = style(
position.relative
)
}
| UdashFramework/udash-core | guide/shared/src/main/scala/io/udash/web/homepage/styles/partials/HeaderStyles.scala | Scala | apache-2.0 | 2,468 |
package discord
import javax.inject.{Inject, Singleton}
import discord.api.DiscordCommand
import discord.commands._
@Singleton
class DiscordCommandRegistry @Inject()(
val helpCommand: DiscordHelpCommand,
val pingCommand: DiscordPingCommand,
val versionCommand: DiscordVersionCommand,
val sosCommand: DiscordSoSCommand,
val subCountCommand: DiscordSubCountCommand,
val uptimeCommand: DiscordUptimeCommand,
val bitsCommand: DiscordBitsCommand,
val bitGameCommand: DiscordBitGameCommand
) {
val commands: Map[String, DiscordCommand] = Map(
helpCommand.name -> helpCommand,
pingCommand.name -> pingCommand,
versionCommand.name -> versionCommand,
sosCommand.name -> sosCommand,
subCountCommand.name -> subCountCommand,
uptimeCommand.name -> uptimeCommand,
bitsCommand.name -> bitsCommand,
bitGameCommand.name -> bitGameCommand
)
}
| Cobbleopolis/MonsterTruckBot | modules/discord/app/discord/DiscordCommandRegistry.scala | Scala | mit | 1,277 |
package com.xenopsconsulting.gamedayapi.batch
import org.junit.Test
class MySQLDatabaseImporterTest {
@Test
def testCreate() {
// MySQLDatabaseImporter.createTables()
// MySQLDatabaseImporter.importPitchesByYearAndTeam(2011, "sea")
// MySQLDatabaseImporter.importPitchesByYearsAndTeam(List(2010, 2011, 2012, 2013), "sea")
// MySQLDatabaseImporter.importPitchesByYear(2013)
}
}
| ecopony/scala-gameday-api | src/test/scala/com/xenopsconsulting/gamedayapi/batch/MySQLDatabaseImporterTest.scala | Scala | mit | 399 |
/*
* Copyright 2017-2022 John Snow Labs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.johnsnowlabs.nlp.embeddings
import java.nio.{ByteBuffer, ByteOrder}
trait ReadsFromBytes {
def fromBytes(source: Array[Byte]): Array[Float] = {
val wrapper = ByteBuffer.wrap(source)
wrapper.order(ByteOrder.LITTLE_ENDIAN)
val result = Array.fill[Float](source.length / 4)(0f)
for (i <- result.indices) {
result(i) = wrapper.getFloat(i * 4)
}
result
}
}
| JohnSnowLabs/spark-nlp | src/main/scala/com/johnsnowlabs/nlp/embeddings/ReadsFromBytes.scala | Scala | apache-2.0 | 1,006 |
/*
* Copyright 2012 Comcast Cable Communications Management, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.comcast.money.core.samplers
import com.comcast.money.api.SpanId
/**
* A sampler that tests whether or not a span should be recorded or sampled.
*/
trait Sampler {
/**
* Tests the span to determine if it should be recorded or sampled.
* @param spanId the id of the span
* @param parentSpanId the id of the parent span, if any
* @param spanName the name of the span
* @return a [[SamplerResult]] indicating whether or not the span should be recorded or sampled
*/
def shouldSample(spanId: SpanId, parentSpanId: Option[SpanId], spanName: String): SamplerResult
}
| Comcast/money | money-core/src/main/scala/com/comcast/money/core/samplers/Sampler.scala | Scala | apache-2.0 | 1,227 |
package ee.cone.c4gate.deep_session
import ee.cone.c4actor._
import ee.cone.c4assemble.Assemble
import ee.cone.c4gate.SessionAttrAccessFactory
import ee.cone.c4proto.Protocol
trait DeepSessionAttrApp
extends SessionDataProtocolApp
with DeepSessionAttrFactoryImplApp
with DeepSessionDataAssembleApp
trait SessionDataProtocolApp extends ProtocolsApp {
override def protocols: List[Protocol] = DeepSessionDataProtocol :: super.protocols
}
trait DeepSessionDataAssembleApp extends AssemblesApp {
def mortal: MortalFactory
def userModel: Class[_ <: Product]
def roleModel: Class[_ <: Product]
override def assembles: List[Assemble] =
DeepSessionDataAssembles(mortal, userModel, roleModel) ::: super.assembles
}
trait DeepSessionAttrFactoryImplApp {
def qAdapterRegistry: QAdapterRegistry
def defaultModelRegistry: DefaultModelRegistry
def modelAccessFactory: ModelAccessFactory
def idGenUtil: IdGenUtil
def sessionAttrAccessFactory: SessionAttrAccessFactory
lazy val deepSessionAttrAccessFactory: DeepSessionAttrAccessFactory =
new DeepSessionAttrAccessFactoryImpl(qAdapterRegistry, defaultModelRegistry, modelAccessFactory, idGenUtil, sessionAttrAccessFactory)
}
| wregs/c4proto | c4gate-extra/src/main/scala/ee/cone/c4gate/deep_session/DeepSessionAttrMix.scala | Scala | apache-2.0 | 1,213 |
Subsets and Splits