code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package link.naicode.utils.math
/**
* Created by naicode on 8/31/14.
*
*/
import org.junit.runner.RunWith
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class TestClamp extends Specification {
"Clamp.range" should {
"clamp to big numbers" in {
Clamp.range(-2,10)(11) must_== 10
}
"clamp to smal numbers" in {
Clamp.range(-2,10)(-11) must_== -2
}
"not clamp fitting numbers" in {
Clamp.range(-2,10)(1) must_== 1
}
"work with Long" in {
Clamp.range(-2L,Long.MaxValue)(-11) must_== -2
}
"work with Byte" in {
val min:Byte = 12
val max:Byte = 24
Clamp.range(min,max)(12) must_== 12
}
"work with Float" in {
Clamp.range(.1f,.2f)(.15f) must_== .15f
}
"work with Double" in {
Clamp.range(.1d,.2d)(.15d) must_== .15d
}
"work with Char" in {
Clamp.range('A','Z')('+') must_== 'A'
}
"work with Short" in {
val min:Short = 12
val max:Short= 24
Clamp.range(min,max)(13) must_== 13
}
}
"Clamp.lenXXX" should {
"cut to long Strings (FillLeft)" in {
Clamp.lenFillLeft("0034",2,'0') must_== "34"
}
"cut to long Strings (FillRight)" in {
Clamp.lenFillRight("ab ",2,' ') must_== "ab"
}
"fill to short Strings (FillLeft)" in {
Clamp.lenFillLeft("34",4,'0') must_== "0034"
}
"fill to short Strings (FillRight)" in {
Clamp.lenFillRight("ab",4,' ') must_== "ab "
}
}
}
| naicode/DHmacAuth | src/test/scala/link/naicode/utils/math/TestClamp.scala | Scala | apache-2.0 | 1,535 |
package com.github.agourlay.cornichon.steps.regular
import cats.data.NonEmptyList
import cats.syntax.either._
import com.github.agourlay.cornichon.core._
import com.github.agourlay.cornichon.core.Engine._
import monix.eval.Task
import scala.concurrent.duration.Duration
case class DebugStep(message: Session ⇒ Either[CornichonError, String], title: String = "Debug step") extends ValueStep[String] {
def setTitle(newTitle: String) = copy(title = newTitle)
override def run(initialRunState: RunState) =
Task.delay {
message(initialRunState.session).leftMap(NonEmptyList.of(_))
}
override def onError(errors: NonEmptyList[CornichonError], initialRunState: RunState) = {
val debugErrorLogs = errorLogs(title, errors, initialRunState.depth)
val failedStep = FailedStep(this, errors)
(debugErrorLogs, failedStep)
}
override def onSuccess(result: String, initialRunState: RunState, executionTime: Duration) =
(Some(DebugLogInstruction(result, initialRunState.depth)), None)
} | OlegIlyenko/cornichon | cornichon-core/src/main/scala/com/github/agourlay/cornichon/steps/regular/DebugStep.scala | Scala | apache-2.0 | 1,019 |
/*
* Copyright ixias.net All Rights Reserved.
*
* Use of this source code is governed by an MIT-style license
* For the full copyright and license information,
* please view the LICENSE file that was distributed with this source code.
*/
package ixias.security
import scala.util.Try
import com.typesafe.config.ConfigFactory
import org.keyczar.Signer
import org.keyczar.HmacKey
import org.keyczar.interfaces.KeyczarReader
import org.apache.commons.codec.binary.{Hex, StringUtils}
import org.apache.commons.codec.digest.DigestUtils
/**
* TokenVerifiers verify session token signatures generated.
*/
case class TokenSigner(reader: KeyczarReader) {
lazy val signer = new Signer(reader)
/**
* Signs the input and produces a signature.
*/
def sign(token: String): String = {
val signature = signer.sign(StringUtils.getBytesUsAscii(token))
new String(Hex.encodeHex(signature)) + token
}
/**
* Verify the session token signature on the given data
*/
def verify(signedToken: String): Try[String] =
Try {
val (signature, token) = signedToken.splitAt(signer.digestSize * 2)
signer.verify(
StringUtils.getBytesUsAscii(token),
Hex.decodeHex(signature.toCharArray)
) match {
case true => token
case false => throw new java.security.SignatureException
}
}
}
// Companion object
//~~~~~~~~~~~~~~~~~~
object TokenSigner {
/** Creates a TokenSigner. */
def apply() = {
val config = ConfigFactory.load()
val secret = config.getString("session.token.secret")
new TokenSigner(HmacKeyReader(new HmacKey(DigestUtils.sha256(secret))))
}
}
| sp1rytus/ixias | framework/ixias-core/src/main/scala/ixias/security/TokenSigner.scala | Scala | mit | 1,647 |
package com.cleawing.akka.curator
import java.nio.file.Paths
import akka.actor._
import com.cleawing.akka.LikeActorRefExtension
import org.apache.curator.RetryPolicy
import org.apache.curator.ensemble.EnsembleProvider
import org.apache.curator.ensemble.exhibitor.{DefaultExhibitorRestClient, ExhibitorEnsembleProvider, Exhibitors}
import org.apache.curator.ensemble.exhibitor.Exhibitors.BackupConnectionStringProvider
import org.apache.curator.ensemble.fixed.FixedEnsembleProvider
import org.apache.curator.retry._
import org.apache.curator.utils.PathUtils
object Curator
extends ExtensionId[CuratorExt]
with ExtensionIdProvider {
override def lookup() = Curator
override def createExtension(system: ExtendedActorSystem) = new CuratorExt(system)
override def get(system: ActorSystem): CuratorExt = super.get(system)
object LeaderLatch {
import recipes.LeaderLatch.CloseMode
sealed trait Request
case class Join(path: String, closeMode: CloseMode.Value = CloseMode.SILENT) extends Request
case class Left(path: String) extends Request
}
object Reaper {
import recipes.Reaper.Mode
sealed trait Request
case class AddPath(path: String, recursiveFrom: Option[String] = None, mode: Mode.Value = Mode.REAP_INDEFINITELY) extends Request
object AddPath {
def apply(path: String, mode: Mode.Value): AddPath = new AddPath(path, None, mode)
}
case class RemovePath(path: String) extends Request
}
// TODO. Throws IllegalArgumentException
def buildPath(prefix: String, path: String) = PathUtils.validatePath(Paths.get("/", prefix, path).toString)
}
private[curator] class CuratorExt(val system: ExtendedActorSystem) extends LikeActorRefExtension {
val settings = CuratorSettings(system)
protected val target = system.systemActorOf(
CuratorGuardian.props(
settings.ensembleProvider,
settings.retryPolicy,
settings.namespace),
"curator"
)
}
private[curator] case class CuratorSettings (private val system: ActorSystem) {
private val config = system.settings.config.getConfig("akka.curator")
val namespace: String = s"akka/${if (config.getIsNull("namespace")) system.name else config.getString("namespace")}"
val retryPolicy: RetryPolicy = config.getString("use-retry-policy") match {
case "exponential-backoff" =>
val policy = config.getConfig("retry-policy.exponential-backoff")
new ExponentialBackoffRetry(
policy.getInt("base-sleep"),
policy.getInt("max-retries"),
policy.getInt("max-sleep") match {
case default if default <= 0 => Int.MaxValue
case other => other
}
)
case "bounded-exponential-backoff" =>
val policy = config.getConfig("retry-policy.bounded-exponential-backoff")
new BoundedExponentialBackoffRetry(
policy.getInt("base-sleep"),
policy.getInt("max-sleep"),
policy.getInt("max-retries")
)
case "only-once" => new RetryOneTime(config.getInt("retry-policy.one-time.sleep-between"))
case "capped-max" =>
val policy = config.getConfig("retry-policy.bounded-times")
new RetryNTimes(
policy.getInt("max-retries"),
policy.getInt("sleep-between")
)
case "until-elapsed" =>
val policy = config.getConfig("retry-policy.until-elapsed")
new RetryUntilElapsed(
policy.getInt("max-elapsed"),
policy.getInt("sleep-between")
)
case unsupported => throw new IllegalArgumentException(s"Unsupported RetryPolicy: $unsupported")
}
val ensembleProvider: EnsembleProvider = {
config.getString("use-ensemble-provider") match {
case "fixed" => new FixedEnsembleProvider(config.getString("ensemble-provider.fixed.zk"))
case "exhibitor" =>
val provider = config.getConfig("ensemble-provider.exhibitor")
val exhibitors = new Exhibitors(
provider.getStringList("hosts"),
provider.getInt("port"),
new BackupConnectionStringProvider {
val getBackupConnectionString = config.getString("ensemble-provider.fixed.zk")
}
)
new ExhibitorEnsembleProvider(
exhibitors,
new DefaultExhibitorRestClient(),
"/exhibitor/v1/cluster/list",
provider.getDuration("polling-interval").toMillis.toInt,
retryPolicy
)
case missedProvider => throw new IllegalArgumentException(s"Unsupported EnsembleProvider: $missedProvider")
}
}
val reaperThreshold: Int = config.getDuration("reaper-threshold").toMillis.toInt
val leaderLatchIdleTimeout: Long = config.getDuration("leader-latch-idle-timeout").toMillis
}
| Cleawing/united | akka-extensions/src/main/scala/com/cleawing/akka/curator/Curator.scala | Scala | apache-2.0 | 4,661 |
package mr.merc.politics
import mr.merc.politics.Economy._
import mr.merc.politics.ForeignPolicy._
import mr.merc.politics.Migration._
import mr.merc.politics.Regime._
import mr.merc.politics.SocialPolicy._
import mr.merc.politics.VotersPolicy._
import scalafx.scene.paint.Color
case class Party(name: String,
color: Color,
migration: MigrationPosition,
regime: RegimePosition,
foreignPolicy: ForeignPolicyPosition,
economy: EconomyPosition,
socialPolicy: SocialPolicyPosition,
votersPolicy: VotersPolicyPosition) {
def politicalPosition:PoliticalPosition = {
PoliticalPosition(migration, regime, foreignPolicy, economy, socialPolicy, votersPolicy)
}
}
object Party {
val Absolute: Party = Party("party.monarchistic", Color.White, OpenBorders, Regime.Absolute, Expansionism, StateEconomy, LifeNeedsSocialSecurity, NoVoting)
val Benevolent: Party = Party("party.enlightenedMonarchistic", Color.LightGray, OpenBorders, Regime.Absolute, Pacifism, StateEconomy, LifeNeedsSocialSecurity, NoVoting)
val Isolationist: Party = Party("party.isolationist", Color.Gray, ClosedBorders, Regime.Absolute, Pacifism, StateEconomy, NoSocialSecurity, NoVoting)
val Militaristic: Party = Party("party.militaristic", Color.Black, ClosedBorders, Regime.Absolute, Expansionism, StateEconomy, NoSocialSecurity, NoVoting)
val Magocratic: Party = Party("party.magocratic", Color.Brown, ClosedBorders, Constitutional, Expansionism, StateEconomy, NoSocialSecurity, MagesOnly)
val Theocratic: Party = Party("party.theocratic", Color.Purple, ClosedBorders, Constitutional, Expansionism, StateEconomy, NoSocialSecurity, ClericsOnly)
val Aristocratic: Party = Party("party.aristocratic", Color.LightBlue, ClosedBorders, Constitutional, Expansionism, StateEconomy, NoSocialSecurity, PrimaryUpperClass)
val Capitalistic: Party = Party("party.capitalistic", Color.LightYellow, OpenBorders, Constitutional, Expansionism, FreeMarket, NoSocialSecurity, PrimaryUpperClass)
val Manufactorers: Party = Party("party.manufactorers", Color.LightGoldrenrodYellow, OpenBorders, Constitutional, Expansionism, FreeMarket, NoSocialSecurity, PrimaryUpperAndMiddleClass)
val ResponsibleManufactorers: Party = Party("party.responsibleManufactorers", Color.YellowGreen, OpenBorders, Constitutional, Expansionism, FreeMarket, LifeNeedsSocialSecurity, PrimaryUpperAndMiddleClass)
val Paternalistic: Party = Party("party.paternalistic", Color.IndianRed, OpenBorders, Constitutional, Expansionism, StateEconomy, LifeNeedsSocialSecurity, PrimaryUpperAndMiddleClass)
val Oligarchic: Party = Party("party.oligarchic", Color.CornflowerBlue, OpenBorders, Constitutional, Expansionism, StateEconomy, NoSocialSecurity, PrimaryUpperAndMiddleClass)
val Conservative: Party = Party("party.conservative", Color.Blue, OpenBorders, Democracy, Expansionism, FreeMarket, LifeNeedsSocialSecurity, Everyone)
val Patriot: Party = Party("party.patriotic", Color.DarkBlue, OpenBorders, Democracy, Expansionism, StateEconomy, LifeNeedsSocialSecurity, Everyone)
val Liberal: Party = Party("party.liberal", Color.Yellow, OpenBorders, Democracy, Pacifism, FreeMarket, LifeNeedsSocialSecurity, Everyone)
// dark yellow
val Libertarian: Party = Party("party.libertarian", Color.rgb(153,153,0), OpenBorders, Democracy, Pacifism, FreeMarket, NoSocialSecurity, Everyone)
val Socialist: Party = Party("party.socialist", Color.Red, OpenBorders, Democracy, Pacifism, StateEconomy, RegularNeedsSocialSecurity, Everyone)
val SocialDemocratic: Party = Party("party.socialDemocratic", Color.Pink, OpenBorders, Democracy, Pacifism, FreeMarket, RegularNeedsSocialSecurity, Everyone)
val AllParties:List[Party] = List(Absolute, Benevolent, Isolationist, Militaristic, Magocratic, Theocratic,
Aristocratic, Capitalistic, Manufactorers, ResponsibleManufactorers, Paternalistic, Oligarchic,
Conservative, Liberal, Libertarian, Socialist, SocialDemocratic, Patriot)
val RegimeParties:Map[RegimePosition, List[Party]] = AllParties.groupBy(_.regime)
} | RenualdMarch/merc | src/main/scala/mr/merc/politics/Party.scala | Scala | gpl-3.0 | 4,152 |
/**
* (C) Copyright IBM Corp. 2015 - 2017
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.ibm.sparktc.sparkbench.workload.sql
import com.ibm.sparktc.sparkbench.utils.GeneralFunctions._
import com.ibm.sparktc.sparkbench.utils.SaveModes
import com.ibm.sparktc.sparkbench.utils.SparkFuncs._
import com.ibm.sparktc.sparkbench.workload.{Workload, WorkloadDefaults}
import org.apache.spark.sql.{DataFrame, SparkSession}
case class SQLWorkloadResult(
name: String,
timestamp: Long,
loadTime: Long,
queryTime: Long,
saveTime: Long = 0L,
total_Runtime: Long
)
object SQLWorkload extends WorkloadDefaults {
val name = "sql"
def apply(m: Map[String, Any]) =
new SQLWorkload(input = m.get("input").map(_.asInstanceOf[String]),
output = m.get("output").map(_.asInstanceOf[String]),
saveMode = getOrDefault[String](m, "save-mode", SaveModes.error),
queryStr = getOrThrow(m, "query").asInstanceOf[String],
cache = getOrDefault[Boolean](m, "cache", false),
numPartitions = m.get("partitions").map(_.asInstanceOf[Int])
)
}
case class SQLWorkload (input: Option[String],
output: Option[String] = None,
saveMode: String,
queryStr: String,
cache: Boolean,
numPartitions: Option[Int] = None
) extends Workload {
def loadFromDisk(spark: SparkSession): (Long, DataFrame) = time {
val df = load(spark, input.get)
if(cache) df.cache()
df
}
def query(df: DataFrame, spark: SparkSession): (Long, DataFrame) = time {
df.createOrReplaceTempView("input")
spark.sqlContext.sql(queryStr)
}
def save(res: DataFrame, where: String, spark: SparkSession): (Long, Unit) = time {
if(numPartitions.nonEmpty){
writeToDisk(where, saveMode, res.repartition(numPartitions.get), spark)
}
else {
writeToDisk(where, saveMode, res, spark)
}
}
override def doWorkload(df: Option[DataFrame] = None, spark: SparkSession): DataFrame = {
val timestamp = System.currentTimeMillis()
val (loadtime, df) = loadFromDisk(spark)
val (querytime, res) = query(df, spark)
val (savetime, _) = output match {
case Some(dir) => save(res, dir, spark)
case _ => (0L, Unit)
}
val total = loadtime + querytime + savetime
spark.createDataFrame(Seq(
SQLWorkloadResult(
"sql",
timestamp,
loadtime,
querytime,
savetime,
total
)
))
}
}
| SparkTC/spark-bench | cli/src/main/scala/com/ibm/sparktc/sparkbench/workload/sql/SQLWorkload.scala | Scala | apache-2.0 | 3,269 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.amaterasu.leader.utilities
import java.io.{File, FileInputStream}
import java.nio.file.{Files, Paths}
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import org.apache.amaterasu.common.configuration.ClusterConfig
import org.apache.amaterasu.common.dataobjects.{ActionData, ExecData, TaskData}
import org.apache.amaterasu.common.execution.dependencies.{Dependencies, PythonDependencies}
import org.apache.amaterasu.common.logging.Logging
import org.apache.amaterasu.common.runtime.Environment
import org.apache.mesos.protobuf.ByteString
import org.yaml.snakeyaml.Yaml
import scala.collection.JavaConverters._
import scala.collection.mutable
import scala.io.Source
/**
* Created by karel_alfonso on 27/06/2016.
*/
object DataLoader extends Logging {
val mapper = new ObjectMapper()
mapper.registerModule(DefaultScalaModule)
val ymlMapper = new ObjectMapper(new YAMLFactory())
ymlMapper.registerModule(DefaultScalaModule)
def getTaskData(actionData: ActionData, env: String): ByteString = {
val srcFile = actionData.src
val src = Source.fromFile(s"repo/src/$srcFile").mkString
val envValue = Source.fromFile(s"repo/env/$env/job.yml").mkString
val envData = ymlMapper.readValue(envValue, classOf[Environment])
val data = mapper.writeValueAsBytes(TaskData(src, envData, actionData.groupId, actionData.typeId, actionData.exports))
ByteString.copyFrom(data)
}
def getExecutorData(env: String, clusterConf: ClusterConfig): ByteString = {
// loading the job configuration
val envValue = Source.fromFile(s"repo/env/$env/job.yml").mkString //TODO: change this to YAML
val envData = ymlMapper.readValue(envValue, classOf[Environment])
// loading all additional configurations
val files = new File(s"repo/env/$env/").listFiles().filter(_.isFile).filter(_.getName != "job.yml")
val config = files.map(yamlToMap).toMap
// loading the job's dependencies
var depsData: Dependencies = null
var pyDepsData: PythonDependencies = null
if (Files.exists(Paths.get("repo/deps/jars.yml"))) {
val depsValue = Source.fromFile(s"repo/deps/jars.yml").mkString
depsData = ymlMapper.readValue(depsValue, classOf[Dependencies])
}
if (Files.exists(Paths.get("repo/deps/python.yml"))) {
val pyDepsValue = Source.fromFile(s"repo/deps/python.yml").mkString
pyDepsData = ymlMapper.readValue(pyDepsValue, classOf[PythonDependencies])
}
val data = mapper.writeValueAsBytes(ExecData(envData, depsData, pyDepsData, config))
ByteString.copyFrom(data)
}
def yamlToMap(file: File): (String, Map[String, Any]) = {
val yaml = new Yaml()
val conf = yaml.load(new FileInputStream(file)).asInstanceOf[java.util.Map[String, Any]].asScala.toMap
(file.getName.replace(".yml",""), conf)
}
}
class ConfMap[String, T <: ConfMap[String, T]] extends mutable.ListMap[String, Either[String, T]] | eyalbenivri/amaterasu | leader/src/main/scala/org/apache/amaterasu/leader/utilities/DataLoader.scala | Scala | apache-2.0 | 3,841 |
package org.openurp.edu.eams.system.report.web.action
import org.beangle.commons.collection.Order
import org.beangle.data.jpa.dao.OqlBuilder
import org.beangle.data.model.Entity
import org.openurp.edu.eams.system.report.ReportTemplate
import org.openurp.edu.eams.web.action.common.ProjectSupportAction
class ReportTemplateAction extends ProjectSupportAction {
protected override def getEntityName(): String = classOf[ReportTemplate].getName
protected override def saveAndForward(entity: Entity[_]): String = {
val template = entity.asInstanceOf[ReportTemplate]
if (null == template.getProject) template.setProject(getProject)
super.saveAndForward(entity)
}
protected override def getQueryBuilder(): OqlBuilder[_] = {
val builder = OqlBuilder.from(getEntityName, getShortName)
populateConditions(builder)
builder.orderBy(get(Order.ORDER_STR)).limit(getPageLimit)
builder.where("reportTemplate.project=:project", getProject)
builder
}
}
| openurp/edu-eams-webapp | web/src/main/scala/org/openurp/edu/eams/system/report/web/action/ReportTemplateAction.scala | Scala | gpl-3.0 | 984 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.play.http.ws
import play.api.libs.json.JsValue
import play.api.libs.ws.WSResponse
import uk.gov.hmrc.http.HttpResponse
@deprecated("Use WsHttpResponse.apply and HttpResponse instead", "11.0.0")
class WSHttpResponse(wsResponse: WSResponse) extends HttpResponse {
override def allHeaders: Map[String, Seq[String]] = wsResponse.headers.mapValues(_.toSeq).toMap
override def status: Int = wsResponse.status
override def json: JsValue = wsResponse.json
override def body: String = wsResponse.body
}
object WSHttpResponse {
def apply(wsResponse: WSResponse): HttpResponse =
// Note that HttpResponse defines `def json` as `Json.parse(body)` - this may be different from wsResponse.json depending on version.
// https://github.com/playframework/play-ws/commits/master/play-ws-standalone-json/src/main/scala/play/api/libs/ws/JsonBodyReadables.scala shows that is was redefined
// to handle an encoding issue, but subsequently reverted.
HttpResponse(
status = wsResponse.status,
body = wsResponse.body,
headers = wsResponse.headers.mapValues(_.toSeq).toMap
)
}
| hmrc/http-verbs | http-verbs-common/src/main/scala/uk/gov/hmrc/play/http/ws/WSHttpResponse.scala | Scala | apache-2.0 | 1,736 |
package org.jetbrains.plugins.scala.worksheet
import java.io.File
import com.intellij.openapi.module.Module
import org.jetbrains.annotations.ApiStatus
import org.jetbrains.plugins.scala.ExtensionPointDeclaration
import org.jetbrains.plugins.scala.worksheet.actions.topmenu.TopComponentAction
/**
* NOTE: do not use in external plugins, due `extraWorksheetActions` results
* are registered in Worksheet top panel UI, and effectively the EP is not dynamic
*/
@ApiStatus.Internal
trait WorksheetCompilerExtension {
def worksheetClasspath(module: Module): Option[Seq[File]]
def extraWorksheetActions(): Seq[TopComponentAction]
}
object WorksheetCompilerExtension
extends ExtensionPointDeclaration[WorksheetCompilerExtension](
"org.intellij.scala.worksheetCompilerExtension"
) {
def worksheetClasspath(module: Module): Option[Seq[File]] =
implementations.iterator.map(_.worksheetClasspath(module)).collectFirst { case Some(cp) => cp }
def extraWorksheetActions(): Seq[TopComponentAction] =
implementations.flatMap(_.extraWorksheetActions())
}
| JetBrains/intellij-scala | scala/worksheet/src/org/jetbrains/plugins/scala/worksheet/WorksheetCompilerExtension.scala | Scala | apache-2.0 | 1,071 |
package models.util
/**
* Author: matthijs
* Created on: 30 Dec 2013.
*/
object StringUtils {
implicit class StringImprovements(val s: String) {
import scala.util.control.Exception._
def toIntOpt = catching(classOf[NumberFormatException]) opt s.toInt
}
} | plamola/FeedR-V1 | app/models/util/StringUtils.scala | Scala | gpl-2.0 | 271 |
/**
* Copyright 2011-2012 eBusiness Information, Groupe Excilys (www.excilys.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.excilys.ebi.gatling.core.config
import java.util.{ HashMap => JHashMap, Map => JMap }
import scala.collection.JavaConversions.asScalaBuffer
import com.excilys.ebi.gatling.core.ConfigurationConstants._
import com.excilys.ebi.gatling.core.util.StringHelper.trimToOption
import com.typesafe.config.{ Config, ConfigFactory }
import grizzled.slf4j.Logging
/**
* Configuration loader of Gatling
*/
object GatlingConfiguration extends Logging {
var configuration: GatlingConfiguration = _
def setUp(props: JMap[String, Any] = new JHashMap) {
val classLoader = getClass.getClassLoader
val defaultsConfig = ConfigFactory.parseResources(classLoader, "gatling-defaults.conf")
val customConfig = ConfigFactory.parseResources(classLoader, "gatling.conf")
val propertiesConfig = ConfigFactory.parseMap(props)
val config = propertiesConfig.withFallback(customConfig).withFallback(defaultsConfig)
configuration = GatlingConfiguration(
simulation = SimulationConfiguration(
outputDirectoryBaseName = trimToOption(config.getString(CONF_SIMULATION_OUTPUT_DIRECTORY_BASE_NAME)),
runDescription = config.getString(CONF_SIMULATION_RUN_DESCRIPTION),
encoding = config.getString(CONF_SIMULATION_ENCODING),
clazz = trimToOption(config.getString(CONF_SIMULATION_CLASS))),
timeOut = TimeOutConfiguration(
simulation = config.getInt(CONF_TIME_OUT_SIMULATION),
actor = config.getInt(CONF_TIME_OUT_ACTOR)),
directory = DirectoryConfiguration(
data = config.getString(CONF_DIRECTORY_DATA),
requestBodies = config.getString(CONF_DIRECTORY_REQUEST_BODIES),
sources = config.getString(CONF_DIRECTORY_SIMULATIONS),
binaries = trimToOption(config.getString(CONF_DIRECTORY_BINARIES)),
reportsOnly = trimToOption(config.getString(CONF_DIRECTORY_REPORTS_ONLY)),
results = config.getString(CONF_DIRECTORY_RESULTS)),
charting = ChartingConfiguration(
noReports = config.getBoolean(CONF_CHARTING_NO_REPORTS),
maxPlotsPerSeries = config.getInt(CONF_CHARTING_MAX_PLOTS_PER_SERIES),
accuracy = config.getInt(CONF_CHARTING_ACCURACY),
indicators = IndicatorsConfiguration(
lowerBound = config.getInt(CONF_CHARTING_INDICATORS_LOWER_BOUND),
higherBound = config.getInt(CONF_CHARTING_INDICATORS_HIGHER_BOUND),
percentile1 = config.getInt(CONF_CHARTING_INDICATORS_PERCENTILE1),
percentile2 = config.getInt(CONF_CHARTING_INDICATORS_PERCENTILE2))),
http = HttpConfiguration(
provider = config.getString(CONF_HTTP_PROVIDER),
allowPoolingConnection = config.getBoolean(CONF_HTTP_ALLOW_POOLING_CONNECTION),
allowSslConnectionPool = config.getBoolean(CONF_HTTP_ALLOW_SSL_CONNECTION_POOL),
compressionEnabled = config.getBoolean(CONF_HTTP_COMPRESSION_ENABLED),
connectionTimeOut = config.getInt(CONF_HTTP_CONNECTION_TIMEOUT),
idleConnectionInPoolTimeOutInMs = config.getInt(CONF_HTTP_IDLE_CONNECTION_IN_POOL_TIMEOUT_IN_MS),
idleConnectionTimeOutInMs = config.getInt(CONF_HTTP_IDLE_CONNECTION_TIMEOUT_IN_MS),
ioThreadMultiplier = config.getInt(CONF_HTTP_IO_THREAD_MULTIPLIER),
maximumConnectionsPerHost = config.getInt(CONF_HTTP_MAXIMUM_CONNECTIONS_PER_HOST),
maximumConnectionsTotal = config.getInt(CONF_HTTP_MAXIMUM_CONNECTIONS_TOTAL),
maxRetry = config.getInt(CONF_HTTP_MAX_RETRY),
requestCompressionLevel = config.getInt(CONF_HTTP_REQUEST_COMPRESSION_LEVEL),
requestTimeOutInMs = config.getInt(CONF_HTTP_REQUEST_TIMEOUT_IN_MS),
useProxyProperties = config.getBoolean(CONF_HTTP_USE_PROXY_PROPERTIES),
userAgent = config.getString(CONF_HTTP_USER_AGENT),
useRawUrl = config.getBoolean(CONF_HTTP_USE_RAW_URL),
nonStandardJsonSupport = config.getStringList(CONF_HTTP_JSON_FEATURES).toList),
data = DataConfiguration(
dataWriterClasses = config.getStringList(CONF_DATA_WRITER_CLASS_NAMES).toList.map {
case "console" => "com.excilys.ebi.gatling.core.result.writer.ConsoleDataWriter"
case "file" => "com.excilys.ebi.gatling.core.result.writer.FileDataWriter"
case "graphite" => "com.excilys.ebi.gatling.metrics.GraphiteDataWriter"
case clazz => clazz
},
dataReaderClass = (config.getString(CONF_DATA_READER_CLASS_NAME)).trim match {
case "file" => "com.excilys.ebi.gatling.charts.result.reader.FileDataReader"
case clazz => clazz
}),
graphite = GraphiteConfiguration(
host = config.getString(CONF_GRAPHITE_HOST),
port = config.getInt(CONF_GRAPHITE_PORT),
bucketWidth = config.getInt(CONF_GRAPHITE_BUCKET_WIDTH)),
config)
}
}
case class SimulationConfiguration(
outputDirectoryBaseName: Option[String],
runDescription: String,
encoding: String,
clazz: Option[String])
case class TimeOutConfiguration(
simulation: Int,
actor: Int)
case class DirectoryConfiguration(
data: String,
requestBodies: String,
sources: String,
binaries: Option[String],
reportsOnly: Option[String],
results: String)
case class ChartingConfiguration(
noReports: Boolean,
maxPlotsPerSeries: Int,
accuracy: Int,
indicators: IndicatorsConfiguration)
case class IndicatorsConfiguration(
lowerBound: Int,
higherBound: Int,
percentile1: Int,
percentile2: Int)
case class HttpConfiguration(
provider: String,
allowPoolingConnection: Boolean,
allowSslConnectionPool: Boolean,
compressionEnabled: Boolean,
connectionTimeOut: Int,
idleConnectionInPoolTimeOutInMs: Int,
idleConnectionTimeOutInMs: Int,
ioThreadMultiplier: Int,
maximumConnectionsPerHost: Int,
maximumConnectionsTotal: Int,
maxRetry: Int,
requestCompressionLevel: Int,
requestTimeOutInMs: Int,
useProxyProperties: Boolean,
userAgent: String,
useRawUrl: Boolean,
nonStandardJsonSupport: List[String])
case class DataConfiguration(
dataWriterClasses: List[String],
dataReaderClass: String)
case class GraphiteConfiguration(
host: String,
port: Int,
bucketWidth: Int)
case class GatlingConfiguration(
simulation: SimulationConfiguration,
timeOut: TimeOutConfiguration,
directory: DirectoryConfiguration,
charting: ChartingConfiguration,
http: HttpConfiguration,
data: DataConfiguration,
graphite: GraphiteConfiguration,
config: Config)
| Tjoene/thesis | Case_Programs/gatling-1.4.0/gatling-core/src/main/scala/com/excilys/ebi/gatling/core/config/GatlingConfiguration.scala | Scala | gpl-2.0 | 6,777 |
package com.rasterfoundry.datamodel
import io.circe._
import cats.syntax.either._
sealed abstract class FileType(val repr: String) {
override def toString = repr
}
object FileType {
case object Geotiff extends FileType("GEOTIFF")
case object GeotiffWithMetadata extends FileType("GEOTIFF_WITH_METADATA")
case object NonSpatial extends FileType("NON_SPATIAL")
def fromString(s: String): FileType = s.toUpperCase match {
case "GEOTIFF" => Geotiff
case "GEOTIFF_WITH_METADATA" => GeotiffWithMetadata
case "NON_SPATIAL" => NonSpatial
}
implicit val fileTypeEncoder: Encoder[FileType] =
Encoder.encodeString.contramap[FileType](_.toString)
implicit val fileTypeDecoder: Decoder[FileType] =
Decoder.decodeString.emap { str =>
Either.catchNonFatal(fromString(str)).leftMap(_ => "FileType")
}
}
| azavea/raster-foundry | app-backend/datamodel/src/main/scala/FileType.scala | Scala | apache-2.0 | 863 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.convert2.transforms
import org.junit.runner.RunWith
import org.locationtech.geomesa.convert.EvaluationContext
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class PredicateTest extends Specification {
implicit val ctx: EvaluationContext = EvaluationContext.empty
"Predicates" should {
"compare string equals" >> {
foreach(Seq("strEq($1, $2)", "$1 == $2")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1", "2")) must beFalse
pred.apply(Array("", "1", "1")) must beTrue
pred.eval(Array("", "1", "2")) must beFalse
pred.eval(Array("", "1", "1")) must beTrue
}
}
"compare string not equals" >> {
val pred = Predicate("$1 != $2")
pred.apply(Array("", "1", "2")) must beTrue
pred.apply(Array("", "1", "1")) must beFalse
pred.eval(Array("", "1", "2")) must beTrue
pred.eval(Array("", "1", "1")) must beFalse
}
"compare int equals" >> {
foreach(Seq("intEq($1::int, $2::int)", "$1::int == $2::int")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1", "2")) must beFalse
pred.apply(Array("", "1", "1")) must beTrue
pred.eval(Array("", "1", "2")) must beFalse
pred.eval(Array("", "1", "1")) must beTrue
}
}
"compare integer equals" >> {
foreach(Seq("integerEq($1::int, $2::int)", "$1::int == $2::int")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1", "2")) must beFalse
pred.apply(Array("", "1", "1")) must beTrue
pred.eval(Array("", "1", "2")) must beFalse
pred.eval(Array("", "1", "1")) must beTrue
}
}
"compare nested int equals" >> {
foreach(Seq("intEq($1::int, strlen($2))", "$1::int == strlen($2)")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "3", "foo")) must beTrue
pred.apply(Array("", "4", "foo")) must beFalse
pred.eval(Array("", "3", "foo")) must beTrue
pred.eval(Array("", "4", "foo")) must beFalse
}
}
"compare int lteq" >> {
foreach(Seq("intLTEq($1::int, $2::int)", "$1::int <= $2::int")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1", "2")) must beTrue
pred.apply(Array("", "1", "1")) must beTrue
pred.apply(Array("", "1", "0")) must beFalse
pred.eval(Array("", "1", "2")) must beTrue
pred.eval(Array("", "1", "1")) must beTrue
pred.eval(Array("", "1", "0")) must beFalse
}
}
"compare int lt" >> {
foreach(Seq("intLT($1::int, $2::int)", "$1::int < $2::int")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1", "2")) must beTrue
pred.apply(Array("", "1", "1")) must beFalse
pred.eval(Array("", "1", "2")) must beTrue
pred.eval(Array("", "1", "1")) must beFalse
}
}
"compare int gteq" >> {
foreach(Seq("intGTEq($1::int, $2::int)", "$1::int >= $2::int")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1", "2")) must beFalse
pred.apply(Array("", "1", "1")) must beTrue
pred.apply(Array("", "2", "1")) must beTrue
pred.eval(Array("", "1", "2")) must beFalse
pred.eval(Array("", "1", "1")) must beTrue
pred.eval(Array("", "2", "1")) must beTrue
}
}
"compare int gt" >> {
foreach(Seq("intGT($1::int, $2::int)", "$1::int > $2::int")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1", "2")) must beFalse
pred.apply(Array("", "1", "1")) must beFalse
pred.apply(Array("", "2", "1")) must beTrue
pred.eval(Array("", "1", "2")) must beFalse
pred.eval(Array("", "1", "1")) must beFalse
pred.eval(Array("", "2", "1")) must beTrue
}
}
"compare double equals" >> {
foreach(Seq("doubleEq($1::double, $2::double)", "$1::double == $2::double")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1.0", "2.0")) must beFalse
pred.apply(Array("", "1.0", "1.0")) must beTrue
pred.eval(Array("", "1.0", "2.0")) must beFalse
pred.eval(Array("", "1.0", "1.0")) must beTrue
}
}
"compare double lteq" >> {
foreach(Seq("doubleLTEq($1::double, $2::double)", "$1::double <= $2::double")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1.0", "2.0")) must beTrue
pred.apply(Array("", "1.0", "1.0")) must beTrue
pred.apply(Array("", "1.0", "0.0")) must beFalse
pred.eval(Array("", "1.0", "2.0")) must beTrue
pred.eval(Array("", "1.0", "1.0")) must beTrue
pred.eval(Array("", "1.0", "0.0")) must beFalse
}
}
"compare double lt" >> {
foreach(Seq("doubleLT($1::double, $2::double)", "$1::double < $2::double")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1.0", "2.0")) must beTrue
pred.apply(Array("", "1.0", "1.0")) must beFalse
pred.eval(Array("", "1.0", "2.0")) must beTrue
pred.eval(Array("", "1.0", "1.0")) must beFalse
}
}
"compare double gteq" >> {
foreach(Seq("doubleGTEq($1::double, $2::double)", "$1::double >= $2::double")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1.0", "2.0")) must beFalse
pred.apply(Array("", "1.0", "1.0")) must beTrue
pred.apply(Array("", "2.0", "1.0")) must beTrue
pred.eval(Array("", "1.0", "2.0")) must beFalse
pred.eval(Array("", "1.0", "1.0")) must beTrue
pred.eval(Array("", "2.0", "1.0")) must beTrue
}
}
"compare double gt" >> {
foreach(Seq("doubleGT($1::double, $2::double)", "$1::double > $2::double")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1.0", "2.0")) must beFalse
pred.apply(Array("", "1.0", "1.0")) must beFalse
pred.apply(Array("", "2.0", "1.0")) must beTrue
pred.eval(Array("", "1.0", "2.0")) must beFalse
pred.eval(Array("", "1.0", "1.0")) must beFalse
pred.eval(Array("", "2.0", "1.0")) must beTrue
}
}
"compare not predicates" >> {
foreach(Seq("not(strEq($1, $2))", "!($1 == $2)")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "1", "1")) must beFalse
pred.eval(Array("", "1", "1")) must beFalse
}
}
"compare and predicates" >> {
foreach(Seq("and(strEq($1, $2), strEq(concat($3, $4), $1))", "$1 == $2 && concat($3, $4) == $1")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "foo", "foo", "f", "oo")) must beTrue
pred.eval(Array("", "foo", "foo", "f", "oo")) must beTrue
}
}
"compare or predicates" >> {
foreach(Seq("or(strEq($1, $2), strEq($3, $1))", "$1 == $2 || $3 == $1")) { s =>
val pred = Predicate(s)
pred.apply(Array("", "foo", "foo", "f", "oo")) must beTrue
pred.eval(Array("", "foo", "foo", "f", "oo")) must beTrue
}
}
"compare grouped predicates" >> {
val preds = Seq(
"and(strEq($1, $4), or(strEq($1, $2), strEq($3, $1)))",
"$1 == $4 && ($1 == $2 || $3 == $1)",
"($1 == $2 || $3 == $1) && $1 == $4"
)
foreach(preds) { s =>
val pred = Predicate(s)
pred.apply(Array("", "foo", "foo", "f", "foo")) must beTrue
pred.apply(Array("", "foo", "foo", "f", "oo")) must beFalse
pred.apply(Array("", "foo", "fo", "f", "foo")) must beFalse
pred.eval(Array("", "foo", "foo", "f", "foo")) must beTrue
pred.eval(Array("", "foo", "foo", "f", "oo")) must beFalse
pred.eval(Array("", "foo", "fo", "f", "foo")) must beFalse
}
}
}
}
| locationtech/geomesa | geomesa-convert/geomesa-convert-common/src/test/scala/org/locationtech/geomesa/convert2/transforms/PredicateTest.scala | Scala | apache-2.0 | 8,188 |
package chapter03
import scala.collection.mutable
object Mapimplementation {
def main(args: Array[String]): Unit = {
val roman = mutable.Map[Int, String]() //Map declaration
//Map Initialization
roman += (1 -> "I")
roman += (2 -> "II")
roman += (3 -> "III")
roman += (4 -> "IV")
roman += (5 -> "V")
println("Map : " + roman)
println("Keys : " + roman.keys) //Map keys
println("Values : " + roman.values) //Map values
println("Is the map empty : " + roman.isEmpty) //isEmpty?
//Another initialization
val roman2 = mutable.Map(6 -> "VI", 7 -> "VII", 8 -> "VIII", 9 -> "IX", 10 -> "X")
//Concat 2 Maps
val romanFull = roman ++ roman2
println(romanFull)
}
}
| aakashmathai/ScalaTutorial | src/main/scala/chapter03/Mapimplementation.scala | Scala | apache-2.0 | 728 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import java.nio.channels.ClosedByInterruptException
import kafka.api._
import kafka.network._
import kafka.utils._
import kafka.common.{ErrorMapping, TopicAndPartition}
import org.apache.kafka.common.utils.Utils._
/**
* A consumer of kafka messages
*/
@threadsafe
class SimpleConsumer(val host: String,
val port: Int,
val soTimeout: Int,
val bufferSize: Int,
val clientId: String) extends Logging {
ConsumerConfig.validateClientId(clientId)
private val lock = new Object()
private val blockingChannel = new BlockingChannel(host, port, bufferSize, BlockingChannel.UseDefaultBufferSize, soTimeout)
private val fetchRequestAndResponseStats = FetchRequestAndResponseStatsRegistry.getFetchRequestAndResponseStats(clientId)
private var isClosed = false
private def connect(): BlockingChannel = {
close
blockingChannel.connect()
blockingChannel
}
private def disconnect() = {
debug("Disconnecting from " + formatAddress(host, port))
blockingChannel.disconnect()
}
private def reconnect() {
disconnect()
connect()
}
def close() {
lock synchronized {
disconnect()
isClosed = true
}
}
private def sendRequest(request: RequestOrResponse): Receive = {
lock synchronized {
var response: Receive = null
try {
getOrMakeConnection()
blockingChannel.send(request)
response = blockingChannel.receive()
} catch {
case e : ClosedByInterruptException =>
throw e
case e : Throwable =>
info("Reconnect due to socket error: %s".format(e.toString))
// retry once
try {
reconnect()
blockingChannel.send(request)
response = blockingChannel.receive()
} catch {
case e: Throwable =>
disconnect()
throw e
}
}
response
}
}
def send(request: TopicMetadataRequest): TopicMetadataResponse = {
val response = sendRequest(request)
TopicMetadataResponse.readFrom(response.buffer)
}
def send(request: ConsumerMetadataRequest): ConsumerMetadataResponse = {
val response = sendRequest(request)
ConsumerMetadataResponse.readFrom(response.buffer)
}
/**
* Fetch a set of messages from a topic.
*
* @param request specifies the topic name, topic partition, starting byte offset, maximum bytes to be fetched.
* @return a set of fetched messages
*/
def fetch(request: FetchRequest): FetchResponse = {
var response: Receive = null
val specificTimer = fetchRequestAndResponseStats.getFetchRequestAndResponseStats(host, port).requestTimer
val aggregateTimer = fetchRequestAndResponseStats.getFetchRequestAndResponseAllBrokersStats.requestTimer
aggregateTimer.time {
specificTimer.time {
response = sendRequest(request)
}
}
val fetchResponse = FetchResponse.readFrom(response.buffer)
val fetchedSize = fetchResponse.sizeInBytes
fetchRequestAndResponseStats.getFetchRequestAndResponseStats(host, port).requestSizeHist.update(fetchedSize)
fetchRequestAndResponseStats.getFetchRequestAndResponseAllBrokersStats.requestSizeHist.update(fetchedSize)
fetchResponse
}
/**
* Get a list of valid offsets (up to maxSize) before the given time.
* @param request a [[kafka.api.OffsetRequest]] object.
* @return a [[kafka.api.OffsetResponse]] object.
*/
def getOffsetsBefore(request: OffsetRequest) = OffsetResponse.readFrom(sendRequest(request).buffer)
/**
* Commit offsets for a topic
* Version 0 of the request will commit offsets to Zookeeper and version 1 and above will commit offsets to Kafka.
* @param request a [[kafka.api.OffsetCommitRequest]] object.
* @return a [[kafka.api.OffsetCommitResponse]] object.
*/
def commitOffsets(request: OffsetCommitRequest) = {
// TODO: With KAFKA-1012, we have to first issue a ConsumerMetadataRequest and connect to the coordinator before
// we can commit offsets.
OffsetCommitResponse.readFrom(sendRequest(request).buffer)
}
/**
* Fetch offsets for a topic
* Version 0 of the request will fetch offsets from Zookeeper and version 1 and above will fetch offsets from Kafka.
* @param request a [[kafka.api.OffsetFetchRequest]] object.
* @return a [[kafka.api.OffsetFetchResponse]] object.
*/
def fetchOffsets(request: OffsetFetchRequest) = OffsetFetchResponse.readFrom(sendRequest(request).buffer)
private def getOrMakeConnection() {
if(!isClosed && !blockingChannel.isConnected) {
connect()
}
}
/**
* Get the earliest or latest offset of a given topic, partition.
* @param topicAndPartition Topic and partition of which the offset is needed.
* @param earliestOrLatest A value to indicate earliest or latest offset.
* @param consumerId Id of the consumer which could be a consumer client, SimpleConsumerShell or a follower broker.
* @return Requested offset.
*/
def earliestOrLatestOffset(topicAndPartition: TopicAndPartition, earliestOrLatest: Long, consumerId: Int): Long = {
val request = OffsetRequest(requestInfo = Map(topicAndPartition -> PartitionOffsetRequestInfo(earliestOrLatest, 1)),
clientId = clientId,
replicaId = consumerId)
val partitionErrorAndOffset = getOffsetsBefore(request).partitionErrorAndOffsets(topicAndPartition)
val offset = partitionErrorAndOffset.error match {
case ErrorMapping.NoError => partitionErrorAndOffset.offsets.head
case _ => throw ErrorMapping.exceptionFor(partitionErrorAndOffset.error)
}
offset
}
}
| crashlytics/kafka | core/src/main/scala/kafka/consumer/SimpleConsumer.scala | Scala | apache-2.0 | 6,568 |
package com.github.kimutansk.akka.exercise.message
import akka.actor.Actor
/**
* メッセージ送受信確認用子Actor
*
* @author kimutansk
*/
class ChildActor(name: String) extends Actor {
/** メッセージ受信時処理 */
def receive = {
case msg: String => {
val message = "ChildActor: Received String " + msg + " My name is " + name
println(message)
sender ! message.length
}
}
}
| togusafish/kimutansk-_-scala-exercise | akka-exercise/src/main/scala/com/github/kimutansk/akka/exercise/message/ChildActor.scala | Scala | mit | 432 |
/**
* TABuddy-Model - a human-centric K,V framework
*
* Copyright (c) 2012-2014 Alexey Aksenov [email protected]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.digimead.tabuddy.model.graph
import java.net.URI
import java.util.UUID
import java.util.concurrent.ConcurrentHashMap
import org.digimead.digi.lib.api.XDependencyInjection
import org.digimead.digi.lib.log.api.XLoggable
import org.digimead.tabuddy.model.Model
import org.digimead.tabuddy.model.element.{ Coordinate, Element }
import org.digimead.tabuddy.model.serialization.Serialization
import scala.collection.{ immutable, mutable }
import scala.collection.convert.Wrappers.JMapWrapperLike
import scala.language.implicitConversions
/**
* Graph is a container for nodes.
*
* @param node root element of the graph
* @param origin graph owner identifier
*/
class Graph[A <: Model.Like](val created: Element.Timestamp, val node: Node[A],
val origin: Symbol)(implicit val modelType: Manifest[A]) extends Modifiable.Read with ConsumerData with mutable.Publisher[Event] with Equals {
/** HashMap for index of graph nodes. */
// Please, keep it consistent.
val nodes = NodeMap(new ConcurrentHashMap[UUID, Node[_ <: Element]])
/** List of stored graphs. */
@volatile var retrospective = Graph.Retrospective.empty(origin)
/** Check if such node is already exists. */
@volatile var strict = true
/**
* Get explicit general graph.
*
* Derivative from Model.Like trait is invariant.
* Model.Like trait itself returns common type.
* Using .asInstanceOf[Graph[Model.Like]] here since A+ is not suitable.
*/
def **(): Graph[Model.Like] = this.asInstanceOf[Graph[Model.Like]]
/** Copy graph. */
def copy(created: Element.Timestamp = created,
id: Symbol = node.id,
modified: Element.Timestamp = node.modified,
origin: Symbol = this.origin,
unique: UUID = node.unique)(graphEarlyAccess: Graph[A] ⇒ Unit): Graph[A] = node.freezeRead { sourceModelNode ⇒
/*
* Create graph and model node
*/
val targetModelNode = Node.model[A](id, unique, modified)
val graph = new Graph[A](created, targetModelNode, origin)
graph.retrospective = this.retrospective
graphEarlyAccess(graph)
targetModelNode.safeWrite { targetNode ⇒
targetModelNode.initializeModelNode(graph, modified)
val projectionBoxes: Seq[(Coordinate, ElementBox[A])] = sourceModelNode.projectionBoxes.map {
case (coordinate, box) ⇒ coordinate -> box.copy(node = targetNode)
}.toSeq
if (graph.modelType != graph.node.elementType)
throw new IllegalArgumentException(s"Unexpected model type ${graph.modelType} vs ${graph.node.elementType}")
/*
* Copy model children
*/
targetNode.updateState(
children = sourceModelNode.children.map(_.copy(attach = false, target = targetNode, recursive = true)),
modified = null, // modification is already assigned
projectionBoxes = immutable.HashMap(projectionBoxes: _*))
}
graph
}
/** Get graph model. */
def model: A = node.rootBox.e
/** Get modification timestamp. */
def modified: Element.Timestamp = node.modified
/** Provide publish() public access */
override def publish(event: Event) = try {
super.publish(event)
} catch {
// catch all other subscriber exceptions
case e: Throwable ⇒
Graph.log.error(e.getMessage(), e)
throw e
}
/** Get actual storages. */
def storages: Seq[URI] = if (retrospective.history.isEmpty) Seq() else retrospective.getStorages(retrospective.history.keys.max)
/** Visit graph elements. */
def visit[A](visitor: Element.Visitor[A], onlyModified: Boolean = true,
multithread: Boolean = true)(implicit param: Element.Visitor.Param = Element.Visitor.defaultParam): Iterator[A] = {
val lazyIterator = node.freezeRead {
if (multithread) {
_.iteratorRecursive.grouped(param.multithreadGroupSize).flatMap { nodes ⇒
nodes.par.flatMap(_.projectionBoxes.values.flatMap(box ⇒
if (onlyModified) box.getModified.flatMap(_.eOnVisit(visitor)) else box.e.eOnVisit(visitor)))
}
} else {
_.iteratorRecursive.flatMap(_.projectionBoxes.values.flatMap(box ⇒
if (onlyModified) box.getModified.flatMap(_.eOnVisit(visitor)) else box.e.eOnVisit(visitor)))
}
}
if (param.lazyVizit) lazyIterator else lazyIterator.toVector.toIterator
}
override def canEqual(that: Any): Boolean = that.isInstanceOf[Graph[_]]
override def equals(other: Any) = other match {
case that: Graph[_] ⇒ (that eq this) ||
(that.canEqual(this) && this.## == that.## && this.modified == that.modified)
case _ ⇒ false
}
override def hashCode() = lazyHashCode
protected lazy val lazyHashCode = java.util.Arrays.hashCode(Array[AnyRef](this.created, this.node, this.origin, this.modelType))
override def toString() = s"Graph[${model.eId.name}@${origin.name}]#${modified}"
case class NodeMap[A, B <: Node[_ <: Element]](underlying: ConcurrentHashMap[A, B])
extends mutable.AbstractMap[A, B] with JMapWrapperLike[A, B, NodeMap[A, B]] {
override def empty = NodeMap(new ConcurrentHashMap[A, B])
/** Adds a single element to the map. */
override def +=(kv: (A, B)): this.type = { put(kv._1, kv._2); this }
/** Removes a key from this map. */
override def -=(key: A): this.type = { remove(key); this }
/** Adds a new key/value pair to this map and optionally returns previously bound value. */
override def put(key: A, value: B): Option[B] = {
if (isEmpty) {
// 1st node MUST be always a model
val undoF = () ⇒ {}
val result = super.put(key, value)
Graph.this.publish(Event.GraphChange(value, null, value)(undoF))
result
} else {
// Nth node MUST always have parent
val undoF = () ⇒ {}
val result = super.put(key, value)
if (strict)
result.foreach { previous ⇒
// restore
super.put(key, previous)
throw new IllegalStateException(s"Such node ${key} is already exists.")
}
Graph.this.publish(Event.GraphChange(value.parent.get, result.getOrElse(null.asInstanceOf[B]), value)(undoF))
result
}
}
/** Removes a key from this map, returning the value associated previously */
override def remove(key: A): Option[B] = super.remove(key).map { node ⇒
if (isEmpty) {
// last node MUST be always a model
val undoF = () ⇒ {}
Graph.this.publish(Event.GraphChange(node, node, null)(undoF))
} else {
// Nth node MUST always have parent
val undoF = () ⇒ {}
Graph.this.publish(Event.GraphChange(node.parent.get, node, null)(undoF))
}
node
}
/** Adds a new key/value pair to this map. */
override def update(key: A, value: B): Unit = put(key, value)
/**
* Removes all nodes from the map. After this operation has completed,
* the map will be empty.
*/
override def clear() = {
val undoF = () ⇒ {}
Graph.this.publish(Event.GraphReset(Graph.this)(undoF))
super.clear()
}
}
}
object Graph extends XLoggable {
implicit def graph2interface(g: Graph.type): Interface = DI.implementation
trait Interface {
def apply[A <: Model.Like: Manifest](node: Node[A], origin: Symbol): Graph[A] = new Graph(Element.timestamp(), node, origin)
/** Create a new graph. */
def apply[A <: Model.Like: Manifest](origin: Symbol, scope: A#StashType#ScopeType, serialization: Serialization.Identifier,
unique: UUID)(graphEarlyAccess: Graph[A] ⇒ Unit)(implicit stashClass: Class[_ <: A#StashType]): Graph[A] =
apply[A](origin, origin, scope, serialization, unique)(graphEarlyAccess)
/** Create a new graph. */
def apply[A <: Model.Like](id: Symbol, origin: Symbol, scope: A#StashType#ScopeType, serialization: Serialization.Identifier, unique: UUID,
timestamp: Element.Timestamp = Element.timestamp())(graphEarlyAccess: Graph[A] ⇒ Unit)(implicit m: Manifest[A], stashClass: Class[_ <: A#StashType]): Graph[A] = {
val modelNode = Node.model[A](id, unique, timestamp)
val modelGraph = new Graph[A](timestamp, modelNode, origin)
graphEarlyAccess(modelGraph)
modelNode.safeWrite { node ⇒
modelNode.initializeModelNode(modelGraph, timestamp)
val modelBox = ElementBox[A](Coordinate.root, timestamp, node, timestamp, scope, serialization)
node.updateBox(Coordinate.root, modelBox, timestamp)
if (modelGraph.modelType != modelGraph.node.elementType)
throw new IllegalArgumentException(s"Unexpected model type ${modelGraph.modelType} vs ${modelGraph.node.elementType}")
modelGraph
}
}
/** Dump the graph structure. */
def dump(graph: Graph[_ <: Model.Like], brief: Boolean, padding: Int = 2): String = synchronized {
val pad = " " * padding
val self = "graph origin:%s, model id:%s, model unique:%s".format(graph.origin, graph.node.id, graph.node.unique)
val childrenDump = Node.dump(graph.node, brief, padding)
if (childrenDump.isEmpty) self else self + "\\n" + pad + childrenDump
}
}
/**
* Container with graph evolution.
*/
case class Retrospective(val history: Map[Element.Timestamp, Retrospective.Indexes], val origins: Seq[Symbol], val storages: Seq[URI]) {
/** Get last modification. */
def last: Option[Element.Timestamp] = if (history.isEmpty) None else Some(history.keys.max)
/** Get head modification. */
def head: Option[Element.Timestamp] = if (history.isEmpty) None else Some(history.keys.min)
/** Get origin. */
def getOrigin(ts: Element.Timestamp): Symbol = history.get(ts) match {
case Some(Retrospective.Indexes(originIndex, storageIndexes)) ⇒ origins(originIndex)
case None ⇒ throw new NoSuchElementException("Timestamp not found: " + ts)
}
/** Get storages. */
def getStorages(ts: Element.Timestamp): Seq[URI] = history.get(ts) match {
case Some(Retrospective.Indexes(originIndex, storageIndexes)) ⇒ storageIndexes.map(storages)
case None ⇒ throw new NoSuchElementException("Timestamp not found: " + ts)
}
}
object Retrospective {
/** Get empty retrospective. */
def empty(origin: Symbol) = Graph.Retrospective(immutable.HashMap(), Seq(origin), Seq.empty)
/**
* History value.
*/
case class Indexes(val originIndex: Int, val storageIndexes: Seq[Int])
}
/**
* Dependency injection routines.
*/
private object DI extends XDependencyInjection.PersistentInjectable {
lazy val implementation = injectOptional[Interface] getOrElse new AnyRef with Interface {}
}
}
| digimead/digi-TABuddy-model | src/main/scala/org/digimead/tabuddy/model/graph/Graph.scala | Scala | apache-2.0 | 11,294 |
package org.pico.atomic.syntax.std
import java.util.concurrent.atomic.AtomicReference
import scala.annotation.tailrec
package object atomicReference {
implicit class AtomicReferenceOps_YYKh2cf[A](val self: AtomicReference[A]) extends AnyVal {
/** Repeatedly attempt to update the reference using the update function f
* until the condition is satisfied and is able to set it atomically.
* @param f The function to transform the current reference
* @param cond The value predicate
* @return An old value and a new value if an update happened
*/
@inline
final def updateIf(cond: A => Boolean, f: A => A): Option[(A, A)] = {
@tailrec
def go(): Option[(A, A)] = {
val oldValue = self.get()
val isOk = cond(oldValue)
if (!isOk) None
else {
val newValue = f(oldValue)
if (self.compareAndSet(oldValue, newValue)) Some(oldValue -> newValue)
else go()
}
}
go()
}
/** Repeatedly attempt to update the reference using the update function f until able to
* set it atomically.
* @param f The function to transform the current reference
* @return A pair of the old and new values
*/
@inline
final def update(f: A => A): (A, A) = {
updateIf(_ => true, f).get //Safe to call .get by construction, the predicate is hardcoded to be true
}
/** Atomically swap a value for the existing value in an atomic reference. Same as getAndSet.
*
* @param newValue The new value to atomically swap into the atomic reference
* @return The old value that was swapped out.
*/
@inline
final def swap(newValue: A): A = self.getAndSet(newValue)
/** Get the value
*
* @return The value
*/
@inline
final def value: A = self.get
}
}
| pico-works/pico-atomic | pico-atomic/src/main/scala/org/pico/atomic/syntax/std/atomicReference/package.scala | Scala | bsd-3-clause | 1,875 |
package advancedScala.chapter1
// pull in the typeclass
import cats.Show
// pull in the implicits for the Int type
import cats.instances.int._
import cats.instances.string._
// pull in
import cats.syntax.show._
object ShowableInstances {
implicit val catShow: Show[Cat] = {
// Show.show(cat => s"${cat.name.show} is a ${cat.age.show} year-old ${cat.color.show} cat.")
// val stringShow = Show[String]
// val intShow = Show[Int]
// Show.show(cat => s"${stringShow.show(cat.name)} is a ${intShow.show(cat.age)} year-old ${stringShow.show(cat.color)} cat.")
Show.show(cat => s"${Show[String].show(cat.name)} is a ${Show[Int].show(cat.age)} year-old ${Show[String].show(cat.color)} cat.")
}
}
object ShowMain {
def main(args: Array[String]): Unit = {
// if you go looking for the companion object in the Cats source,
// you won't find it. The boilerplate is being generated at compile
// time by the https://github.com/mpilquist/simulacrum library.
val showInt = Show.apply[Int]
// syntactic sugar for calling the apply method.
val alsoShowInt = Show[Int]
val showString = Show[String]
println("With extension methods...")
println(showInt.show(123))
println(alsoShowInt.show(123))
println(Show[Int].show(123))
println(showString.show("abc"))
println("With extension methods...")
println(123.show)
println("abc".show)
import ShowableInstances._
val cat = Cat("Betty", 6, "grey")
println(Show[Cat].show(cat))
}
}
| mikegehard/advanced-scala-with-cats-exercises | src/main/scala/advancedScala/chapter1/Show.scala | Scala | mit | 1,529 |
package io.mediachain.rpc.client
import java.util.concurrent.TimeUnit
import java.util.logging.Logger
import io.grpc.{ManagedChannel, ManagedChannelBuilder, StatusRuntimeException}
import io.mediachain.protos.Services._
import io.mediachain.protos.Services.LSpaceServiceGrpc.LSpaceServiceBlockingStub
import io.mediachain.rpc.RPCError
import io.mediachain.rpc.TypeConversions._
import cats.data.Xor
object LSpaceClient {
def apply(host: String, port: Int): LSpaceClient = {
val channel = ManagedChannelBuilder.forAddress(host, port)
.usePlaintext(true).build
val blockingStub = LSpaceServiceGrpc.blockingStub(channel)
new LSpaceClient(channel, blockingStub)
}
def main(args: Array[String]): Unit = {
val client = LSpaceClient("localhost", 50052)
try {
// FIXME: don't hardcode this (only works on staging server)
val canonical = client.fetchCanonical("0a84565b-2a43-4f6c-ba8f-6bd9802528b5")
println(s"Received canonical: $canonical")
} finally {
client.shutdown()
}
}
}
class LSpaceClient (
private val channel: ManagedChannel,
private val blockingStub: LSpaceServiceBlockingStub
) {
private[this] val logger = Logger.getLogger(classOf[LSpaceClient].getName)
def shutdown(): Unit =
channel.shutdown.awaitTermination(5, TimeUnit.SECONDS)
def tryRPCRequest[Response](f: => Response): Xor[RPCError, Response] = {
try Xor.Right(f)
catch {
case e: StatusRuntimeException => {
logger.warning(s"RPC request failed: ${e.getStatus}")
val err = RPCError.fromException(e)
Xor.Left(err)
}
case e: Throwable => throw e
}
}
def listCanonicals(page: Int = 0)
: Xor[RPCError, CanonicalList] =
tryRPCRequest {
logger.info("Requesting canonicals")
val request = ListCanonicalsRequest(page = page.toLong)
blockingStub.listCanonicals(request)
}
def fetchCanonical(canonicalID: String, withRawMetadata: Boolean = false)
: Xor[RPCError, CanonicalWithRootRevision] =
tryRPCRequest {
logger.info(s"Fetching canonical with id $canonicalID")
val request = FetchCanonicalRequest(
canonicalID = canonicalID,
withRawMetadata = withRawMetadata)
blockingStub.fetchCanonical(request)
}
def fetchHistoryForCanonical(canonicalID: String)
: Xor[RPCError, CanonicalWithHistory] =
tryRPCRequest {
logger.info(s"Fetching history for canonical with id $canonicalID")
val request = FetchCanonicalRequest(canonicalID = canonicalID)
blockingStub.fetchCanonicalHistory(request)
}
def listWorksForAuthorWithCanonicalID(canonicalID: String)
: Xor[RPCError, WorksForAuthor] =
tryRPCRequest {
logger.info(s"Fetching works for author with canonical id $canonicalID")
val request = WorksForAuthorRequest(authorCanonicalID = canonicalID)
blockingStub.listWorksForAuthor(request)
}
}
| mediachain/L-SPACE | rpc/src/main/scala/io/mediachain/rpc/client/LSpaceClient.scala | Scala | mit | 2,922 |
import java.util.zip.CRC32
import java.nio.ByteBuffer
import com.jimjh.merkle.spec.UnitSpec
import com.jimjh.merkle._
/** Spec for [[com.jimjh.merkle.MerkleTree]].
*
* This spec was left outside the package on purpose to verify the public interface.
*
* @author Jim Lim - [email protected]
*/
class UsageSpec extends UnitSpec {
it should "construct a new Merkle Tree" in {
val blocks = Array(Seq[Byte](0, 1, 2, 3), Seq[Byte](4, 5, 6))
MerkleTree(blocks, crc32)
}
def crc32(b: Block) = {
val digest = new CRC32()
digest.update(b.toArray)
val buffer = ByteBuffer.allocate(8)
buffer.putLong(digest.getValue)
buffer.array
}
}
| jimjh/merkle-trees-impl | src/test/scala/UsageSpec.scala | Scala | mit | 669 |
package beam.agentsim.infrastructure.taz
import beam.router.BeamRouter.Location
import beam.sim.common.GeoUtils
import org.matsim.api.core.v01.{Coord, Id}
import org.matsim.core.utils.collections.QuadTree
import scala.collection.JavaConverters._
/**
* represents a Traffic Analysis Zone
* @param tazId unique identifier of this TAZ
* @param coord location of the centroid of this TAZ
* @param areaInSquareMeters area of TAZ
*/
class TAZ(val tazId: Id[TAZ], val coord: Coord, val areaInSquareMeters: Double) {
def this(tazIdString: String, coord: Coord, area: Double) {
this(Id.create(tazIdString, classOf[TAZ]), coord, area)
}
}
object TAZ {
val DefaultTAZId: Id[TAZ] = Id.create("default", classOf[TAZ])
val EmergencyTAZId: Id[TAZ] = Id.create("emergency", classOf[TAZ])
val DefaultTAZ: TAZ = new TAZ(DefaultTAZId, new Coord(), 0)
/**
* performs a concentric disc search from the present location to find TAZs up to the SearchMaxRadius
* @param tazQuadTree tree to search
* @param searchCenter central location from which concentric discs will be built with an expanding radius
* @param startRadius the beginning search radius
* @param maxRadius search constrained to this maximum search radius
* @return the TAZs found in the first search disc which locates a TAZ center, along with their distances, not sorted
*/
def discSearch(
tazQuadTree: QuadTree[TAZ],
searchCenter: Location,
startRadius: Double,
maxRadius: Double
): List[(TAZ, Double)] = {
def _find(thisRadius: Double): List[TAZ] = {
if (thisRadius > maxRadius) List.empty[TAZ]
else {
val found = tazQuadTree
.getDisk(searchCenter.getX, searchCenter.getY, thisRadius)
.asScala
.toList
if (found.nonEmpty) found
else _find(thisRadius * 2)
}
}
_find(startRadius).map { taz =>
// Note, this assumes both TAZs and SearchCenter are in local coordinates, and therefore in units of meters
(taz, GeoUtils.distFormula(taz.coord, searchCenter))
}
}
/**
* performs a concentric ring search from the present location to find TAZs up to the SearchMaxRadius
* @param tazQuadTree tree to search
* @param searchCenter central location from which concentric discs will be built with an expanding radius
* @param startRadius the beginning search radius
* @param maxRadius search constrained to this maximum search radius
* @return the TAZs found in the first search ring which locates a TAZ center, along with their distances, not sorted
*/
def ringSearch(
tazQuadTree: QuadTree[TAZ],
searchCenter: Location,
startRadius: Double,
maxRadius: Double
): List[(TAZ, Double)] = {
def _find(innerRadius: Double, outerRadius: Double): List[TAZ] = {
if (innerRadius > maxRadius) List.empty[TAZ]
else {
val found = tazQuadTree
.getRing(searchCenter.getX, searchCenter.getY, innerRadius, outerRadius)
.asScala
.toList
if (found.nonEmpty) found
else _find(outerRadius, outerRadius * 2)
}
}
_find(0.0, startRadius).map { taz =>
// Note, this assumes both TAZs and SearchCenter are in local coordinates, and therefore in units of meters
(taz, GeoUtils.distFormula(taz.coord, searchCenter))
}
}
}
| colinsheppard/beam | src/main/scala/beam/agentsim/infrastructure/taz/TAZ.scala | Scala | gpl-3.0 | 3,364 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.io.File
import java.nio.charset.StandardCharsets.UTF_8
import java.util.concurrent.CountDownLatch
import scala.collection.mutable
import org.apache.commons.io.FileUtils
import org.apache.commons.lang3.RandomStringUtils
import org.apache.hadoop.fs.Path
import org.scalactic.TolerantNumerics
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatestplus.mockito.MockitoSugar
import org.apache.spark.{SparkConf, SparkException, TestUtils}
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{Column, DataFrame, Dataset, Row}
import org.apache.spark.sql.catalyst.expressions.{Literal, Rand, Randn, Shuffle, Uuid}
import org.apache.spark.sql.connector.read.InputPartition
import org.apache.spark.sql.connector.read.streaming.{Offset => OffsetV2}
import org.apache.spark.sql.execution.exchange.ReusedExchangeExec
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.sources.{MemorySink, TestForeachWriter}
import org.apache.spark.sql.functions._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.util.{BlockingSource, MockSourceProvider, StreamManualClock}
import org.apache.spark.sql.types.StructType
class StreamingQuerySuite extends StreamTest with BeforeAndAfter with Logging with MockitoSugar {
import AwaitTerminationTester._
import testImplicits._
override def sparkConf: SparkConf =
super.sparkConf
.setAppName("test")
.set("spark.sql.parquet.columnarReaderBatchSize", "4096")
.set("spark.sql.sources.useV1SourceList", "avro")
.set("spark.sql.extensions", "com.intel.oap.ColumnarPlugin")
.set("spark.sql.execution.arrow.maxRecordsPerBatch", "4096")
//.set("spark.shuffle.manager", "org.apache.spark.shuffle.sort.ColumnarShuffleManager")
.set("spark.memory.offHeap.enabled", "true")
.set("spark.memory.offHeap.size", "50m")
.set("spark.sql.join.preferSortMergeJoin", "false")
.set("spark.sql.columnar.codegen.hashAggregate", "false")
.set("spark.oap.sql.columnar.wholestagecodegen", "false")
.set("spark.sql.columnar.window", "false")
.set("spark.unsafe.exceptionOnMemoryLeak", "false")
//.set("spark.sql.columnar.tmp_dir", "/codegen/nativesql/")
.set("spark.sql.columnar.sort.broadcastJoin", "true")
.set("spark.oap.sql.columnar.preferColumnar", "true")
// To make === between double tolerate inexact values
implicit val doubleEquality = TolerantNumerics.tolerantDoubleEquality(0.01)
after {
sqlContext.streams.active.foreach(_.stop())
}
test("name unique in active queries") {
withTempDir { dir =>
def startQuery(name: Option[String]): StreamingQuery = {
val writer = MemoryStream[Int].toDS.writeStream
name.foreach(writer.queryName)
writer
.foreach(new TestForeachWriter)
.start()
}
// No name by default, multiple active queries can have no name
val q1 = startQuery(name = None)
assert(q1.name === null)
val q2 = startQuery(name = None)
assert(q2.name === null)
// Can be set by user
val q3 = startQuery(name = Some("q3"))
assert(q3.name === "q3")
// Multiple active queries cannot have same name
val e = intercept[IllegalArgumentException] {
startQuery(name = Some("q3"))
}
q1.stop()
q2.stop()
q3.stop()
}
}
test(
"id unique in active queries + persists across restarts, runId unique across start/restarts") {
val inputData = MemoryStream[Int]
withTempDir { dir =>
var cpDir: String = null
def startQuery(restart: Boolean): StreamingQuery = {
if (cpDir == null || !restart) cpDir = s"$dir/${RandomStringUtils.randomAlphabetic(10)}"
MemoryStream[Int].toDS().groupBy().count()
.writeStream
.format("memory")
.outputMode("complete")
.queryName(s"name${RandomStringUtils.randomAlphabetic(10)}")
.option("checkpointLocation", cpDir)
.start()
}
// id and runId unique for new queries
val q1 = startQuery(restart = false)
val q2 = startQuery(restart = false)
assert(q1.id !== q2.id)
assert(q1.runId !== q2.runId)
q1.stop()
q2.stop()
// id persists across restarts, runId unique across restarts
val q3 = startQuery(restart = false)
q3.stop()
val q4 = startQuery(restart = true)
q4.stop()
assert(q3.id === q3.id)
assert(q3.runId !== q4.runId)
// Only one query with same id can be active
withSQLConf(SQLConf.STREAMING_STOP_ACTIVE_RUN_ON_RESTART.key -> "false") {
val q5 = startQuery(restart = false)
val e = intercept[IllegalStateException] {
startQuery(restart = true)
}
}
}
}
testQuietly("isActive, exception, and awaitTermination") {
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map { 6 / _}
testStream(mapped)(
AssertOnQuery(_.isActive),
AssertOnQuery(_.exception.isEmpty),
AddData(inputData, 1, 2),
CheckAnswer(6, 3),
TestAwaitTermination(ExpectBlocked),
TestAwaitTermination(ExpectBlocked, timeoutMs = 2000),
TestAwaitTermination(ExpectNotBlocked, timeoutMs = 10, expectedReturnValue = false),
StopStream,
AssertOnQuery(_.isActive === false),
AssertOnQuery(_.exception.isEmpty),
TestAwaitTermination(ExpectNotBlocked),
TestAwaitTermination(ExpectNotBlocked, timeoutMs = 2000, expectedReturnValue = true),
TestAwaitTermination(ExpectNotBlocked, timeoutMs = 10, expectedReturnValue = true),
StartStream(),
AssertOnQuery(_.isActive),
AddData(inputData, 0),
ExpectFailure[SparkException](),
AssertOnQuery(_.isActive === false),
TestAwaitTermination(ExpectException[SparkException]),
TestAwaitTermination(ExpectException[SparkException], timeoutMs = 2000),
TestAwaitTermination(ExpectException[SparkException], timeoutMs = 10),
AssertOnQuery(q => {
q.exception.get.startOffset ===
q.committedOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString &&
q.exception.get.endOffset ===
q.availableOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString
}, "incorrect start offset or end offset on exception")
)
}
testQuietly("OneTime trigger, commit log, and exception") {
import Trigger.Once
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map { 6 / _}
testStream(mapped)(
AssertOnQuery(_.isActive),
StopStream,
AddData(inputData, 1, 2),
StartStream(trigger = Once),
CheckAnswer(6, 3),
StopStream, // clears out StreamTest state
AssertOnQuery { q =>
// both commit log and offset log contain the same (latest) batch id
q.commitLog.getLatest().map(_._1).getOrElse(-1L) ==
q.offsetLog.getLatest().map(_._1).getOrElse(-2L)
},
AssertOnQuery { q =>
// blow away commit log and sink result
q.commitLog.purge(1)
q.sink.asInstanceOf[MemorySink].clear()
true
},
StartStream(trigger = Once),
CheckAnswer(6, 3), // ensure we fall back to offset log and reprocess batch
StopStream,
AddData(inputData, 3),
StartStream(trigger = Once),
CheckLastBatch(2), // commit log should be back in place
StopStream,
AddData(inputData, 0),
StartStream(trigger = Once),
ExpectFailure[SparkException](),
AssertOnQuery(_.isActive === false),
AssertOnQuery(q => {
q.exception.get.startOffset ===
q.committedOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString &&
q.exception.get.endOffset ===
q.availableOffsets.toOffsetSeq(Seq(inputData), OffsetSeqMetadata()).toString
}, "incorrect start offset or end offset on exception")
)
}
testQuietly("status, lastProgress, and recentProgress") {
import StreamingQuerySuite._
clock = new StreamManualClock
/** Custom MemoryStream that waits for manual clock to reach a time */
val inputData = new MemoryStream[Int](0, sqlContext) {
private def dataAdded: Boolean = currentOffset.offset != -1
// latestOffset should take 50 ms the first time it is called after data is added
override def latestOffset(): OffsetV2 = synchronized {
if (dataAdded) clock.waitTillTime(1050)
super.latestOffset()
}
// getBatch should take 100 ms the first time it is called
override def planInputPartitions(start: OffsetV2, end: OffsetV2): Array[InputPartition] = {
synchronized {
clock.waitTillTime(1150)
super.planInputPartitions(start, end)
}
}
}
// query execution should take 350 ms the first time it is called
val mapped = inputData.toDS.coalesce(1).as[Long].map { x =>
clock.waitTillTime(1500) // this will only wait the first time when clock < 1500
10 / x
}.agg(count("*")).as[Long]
case class AssertStreamExecThreadIsWaitingForTime(targetTime: Long)
extends AssertOnQuery(q => {
eventually(Timeout(streamingTimeout)) {
if (q.exception.isEmpty) {
assert(clock.isStreamWaitingFor(targetTime))
}
}
if (q.exception.isDefined) {
throw q.exception.get
}
true
}, "") {
override def toString: String = s"AssertStreamExecThreadIsWaitingForTime($targetTime)"
}
case class AssertClockTime(time: Long)
extends AssertOnQuery(q => clock.getTimeMillis() === time, "") {
override def toString: String = s"AssertClockTime($time)"
}
var lastProgressBeforeStop: StreamingQueryProgress = null
testStream(mapped, OutputMode.Complete)(
StartStream(Trigger.ProcessingTime(1000), triggerClock = clock),
AssertStreamExecThreadIsWaitingForTime(1000),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
// Test status and progress when `latestOffset` is being called
AddData(inputData, 1, 2),
AdvanceManualClock(1000), // time = 1000 to start new trigger, will block on `latestOffset`
AssertStreamExecThreadIsWaitingForTime(1050),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive),
AssertOnQuery(_.status.message.startsWith("Getting offsets from")),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
AdvanceManualClock(50), // time = 1050 to unblock `latestOffset`
AssertClockTime(1050),
// will block on `planInputPartitions` that needs 1350
AssertStreamExecThreadIsWaitingForTime(1150),
AssertOnQuery(_.status.isDataAvailable),
AssertOnQuery(_.status.isTriggerActive),
AssertOnQuery(_.status.message === "Processing new data"),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
AdvanceManualClock(100), // time = 1150 to unblock `planInputPartitions`
AssertClockTime(1150),
AssertStreamExecThreadIsWaitingForTime(1500), // will block on map task that needs 1500
AssertOnQuery(_.status.isDataAvailable),
AssertOnQuery(_.status.isTriggerActive),
AssertOnQuery(_.status.message === "Processing new data"),
AssertOnQuery(_.recentProgress.count(_.numInputRows > 0) === 0),
// Test status and progress while batch processing has completed
AdvanceManualClock(350), // time = 1500 to unblock map task
AssertClockTime(1500),
CheckAnswer(2),
AssertStreamExecThreadIsWaitingForTime(2000), // will block until the next trigger
AssertOnQuery(_.status.isDataAvailable),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
AssertOnQuery { query =>
assert(query.lastProgress != null)
assert(query.recentProgress.exists(_.numInputRows > 0))
assert(query.recentProgress.last.eq(query.lastProgress))
val progress = query.lastProgress
assert(progress.id === query.id)
assert(progress.name === query.name)
assert(progress.batchId === 0)
assert(progress.timestamp === "1970-01-01T00:00:01.000Z") // 100 ms in UTC
assert(progress.numInputRows === 2)
assert(progress.processedRowsPerSecond === 4.0)
assert(progress.durationMs.get("latestOffset") === 50)
assert(progress.durationMs.get("queryPlanning") === 100)
assert(progress.durationMs.get("walCommit") === 0)
assert(progress.durationMs.get("addBatch") === 350)
assert(progress.durationMs.get("triggerExecution") === 500)
assert(progress.sources.length === 1)
assert(progress.sources(0).description contains "MemoryStream")
assert(progress.sources(0).startOffset === null) // no prior offset
assert(progress.sources(0).endOffset === "0")
assert(progress.sources(0).processedRowsPerSecond === 4.0) // 2 rows processed in 500 ms
assert(progress.stateOperators.length === 1)
assert(progress.stateOperators(0).numRowsUpdated === 1)
assert(progress.stateOperators(0).numRowsTotal === 1)
assert(progress.sink.description contains "MemorySink")
true
},
// Test whether input rate is updated after two batches
AssertStreamExecThreadIsWaitingForTime(2000), // blocked waiting for next trigger time
AddData(inputData, 1, 2),
AdvanceManualClock(500), // allow another trigger
AssertClockTime(2000),
AssertStreamExecThreadIsWaitingForTime(3000), // will block waiting for next trigger time
CheckAnswer(4),
AssertOnQuery(_.status.isDataAvailable),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
AssertOnQuery { query =>
assert(query.recentProgress.last.eq(query.lastProgress))
assert(query.lastProgress.batchId === 1)
assert(query.lastProgress.inputRowsPerSecond === 2.0)
assert(query.lastProgress.sources(0).inputRowsPerSecond === 2.0)
assert(query.lastProgress.sources(0).startOffset === "0")
assert(query.lastProgress.sources(0).endOffset === "1")
true
},
// Test status and progress after data is not available for a trigger
AdvanceManualClock(1000), // allow another trigger
AssertStreamExecThreadIsWaitingForTime(4000),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Waiting for next trigger"),
// Test status and progress after query stopped
AssertOnQuery { query =>
lastProgressBeforeStop = query.lastProgress
true
},
StopStream,
AssertOnQuery(_.lastProgress.json === lastProgressBeforeStop.json),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message === "Stopped"),
// Test status and progress after query terminated with error
StartStream(Trigger.ProcessingTime(1000), triggerClock = clock),
AdvanceManualClock(1000), // ensure initial trigger completes before AddData
AddData(inputData, 0),
AdvanceManualClock(1000), // allow another trigger
ExpectFailure[SparkException](),
AssertOnQuery(_.status.isDataAvailable === false),
AssertOnQuery(_.status.isTriggerActive === false),
AssertOnQuery(_.status.message.startsWith("Terminated with exception"))
)
}
test("lastProgress should be null when recentProgress is empty") {
BlockingSource.latch = new CountDownLatch(1)
withTempDir { tempDir =>
val sq = spark.readStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.load()
.writeStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.option("checkpointLocation", tempDir.toString)
.start()
// Creating source is blocked so recentProgress is empty and lastProgress should be null
assert(sq.lastProgress === null)
// Release the latch and stop the query
BlockingSource.latch.countDown()
sq.stop()
}
}
test("codahale metrics") {
val inputData = MemoryStream[Int]
/** Whether metrics of a query is registered for reporting */
def isMetricsRegistered(query: StreamingQuery): Boolean = {
val sourceName = s"spark.streaming.${query.id}"
val sources = spark.sparkContext.env.metricsSystem.getSourcesByName(sourceName)
require(sources.size <= 1)
sources.nonEmpty
}
// Disabled by default
assert(spark.conf.get(SQLConf.STREAMING_METRICS_ENABLED.key).toBoolean === false)
withSQLConf(SQLConf.STREAMING_METRICS_ENABLED.key -> "false") {
testStream(inputData.toDF)(
AssertOnQuery { q => !isMetricsRegistered(q) },
StopStream,
AssertOnQuery { q => !isMetricsRegistered(q) }
)
}
// Registered when enabled
withSQLConf(SQLConf.STREAMING_METRICS_ENABLED.key -> "true") {
testStream(inputData.toDF)(
AssertOnQuery { q => isMetricsRegistered(q) },
StopStream,
AssertOnQuery { q => !isMetricsRegistered(q) }
)
}
}
test("SPARK-22975: MetricsReporter defaults when there was no progress reported") {
withSQLConf(SQLConf.STREAMING_METRICS_ENABLED.key -> "true") {
BlockingSource.latch = new CountDownLatch(1)
withTempDir { tempDir =>
val sq = spark.readStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.load()
.writeStream
.format("org.apache.spark.sql.streaming.util.BlockingSource")
.option("checkpointLocation", tempDir.toString)
.start()
.asInstanceOf[StreamingQueryWrapper]
.streamingQuery
val gauges = sq.streamMetrics.metricRegistry.getGauges
assert(gauges.get("latency").getValue.asInstanceOf[Long] == 0)
assert(gauges.get("processingRate-total").getValue.asInstanceOf[Double] == 0.0)
assert(gauges.get("inputRate-total").getValue.asInstanceOf[Double] == 0.0)
assert(gauges.get("eventTime-watermark").getValue.asInstanceOf[Long] == 0)
assert(gauges.get("states-rowsTotal").getValue.asInstanceOf[Long] == 0)
assert(gauges.get("states-usedBytes").getValue.asInstanceOf[Long] == 0)
sq.stop()
}
}
}
test("input row calculation with same V1 source used twice in self-join") {
val streamingTriggerDF = spark.createDataset(1 to 10).toDF
val streamingInputDF = createSingleTriggerStreamingDF(streamingTriggerDF).toDF("value")
val progress = getStreamingQuery(streamingInputDF.join(streamingInputDF, "value"))
.recentProgress.head
assert(progress.numInputRows === 20) // data is read multiple times in self-joins
assert(progress.sources.size === 1)
assert(progress.sources(0).numInputRows === 20)
}
ignore("input row calculation with mixed batch and streaming V1 sources") {
val streamingTriggerDF = spark.createDataset(1 to 10).toDF
val streamingInputDF = createSingleTriggerStreamingDF(streamingTriggerDF).toDF("value")
val staticInputDF = spark.createDataFrame(Seq(1 -> "1", 2 -> "2")).toDF("value", "anotherValue")
// Trigger input has 10 rows, static input has 2 rows,
// therefore after the first trigger, the calculated input rows should be 10
val progress = getStreamingQuery(streamingInputDF.join(staticInputDF, "value"))
.recentProgress.head
assert(progress.numInputRows === 10)
assert(progress.sources.size === 1)
assert(progress.sources(0).numInputRows === 10)
}
test("input row calculation with trigger input DF having multiple leaves in V1 source") {
val streamingTriggerDF =
spark.createDataset(1 to 5).toDF.union(spark.createDataset(6 to 10).toDF)
require(streamingTriggerDF.logicalPlan.collectLeaves().size > 1)
val streamingInputDF = createSingleTriggerStreamingDF(streamingTriggerDF)
// After the first trigger, the calculated input rows should be 10
val progress = getStreamingQuery(streamingInputDF).recentProgress.head
assert(progress.numInputRows === 10)
assert(progress.sources.size === 1)
assert(progress.sources(0).numInputRows === 10)
}
test("input row calculation with same V2 source used twice in self-union") {
val streamInput = MemoryStream[Int]
testStream(streamInput.toDF().union(streamInput.toDF()))(
AddData(streamInput, 1, 2, 3),
CheckAnswer(1, 1, 2, 2, 3, 3),
AssertOnQuery { q =>
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.sources.length == 1)
// The source is scanned twice because of self-union
assert(lastProgress.get.numInputRows == 6)
true
}
)
}
ignore("input row calculation with same V2 source used twice in self-join") {
def checkQuery(check: AssertOnQuery): Unit = {
val memoryStream = MemoryStream[Int]
// TODO: currently the streaming framework always add a dummy Project above streaming source
// relation, which breaks exchange reuse, as the optimizer will remove Project from one side.
// Here we manually add a useful Project, to trigger exchange reuse.
val streamDF = memoryStream.toDF().select('value + 0 as "v")
testStream(streamDF.join(streamDF, "v"))(
AddData(memoryStream, 1, 2, 3),
CheckAnswer(1, 2, 3),
check
)
}
withSQLConf(SQLConf.EXCHANGE_REUSE_ENABLED.key -> "false") {
checkQuery(AssertOnQuery { q =>
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.sources.length == 1)
// The source is scanned twice because of self-join
assert(lastProgress.get.numInputRows == 6)
true
})
}
withSQLConf(SQLConf.EXCHANGE_REUSE_ENABLED.key -> "true") {
checkQuery(AssertOnQuery { q =>
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.sources.length == 1)
assert(q.lastExecution.executedPlan.collect {
case r: ReusedExchangeExec => r
}.length == 1)
// The source is scanned only once because of exchange reuse
assert(lastProgress.get.numInputRows == 3)
true
})
}
}
test("input row calculation with trigger having data for only one of two V2 sources") {
val streamInput1 = MemoryStream[Int]
val streamInput2 = MemoryStream[Int]
testStream(streamInput1.toDF().union(streamInput2.toDF()))(
AddData(streamInput1, 1, 2, 3),
CheckLastBatch(1, 2, 3),
AssertOnQuery { q =>
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.numInputRows == 3)
assert(lastProgress.get.sources.length == 2)
assert(lastProgress.get.sources(0).numInputRows == 3)
assert(lastProgress.get.sources(1).numInputRows == 0)
true
},
AddData(streamInput2, 4, 5),
CheckLastBatch(4, 5),
AssertOnQuery { q =>
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.numInputRows == 2)
assert(lastProgress.get.sources.length == 2)
assert(lastProgress.get.sources(0).numInputRows == 0)
assert(lastProgress.get.sources(1).numInputRows == 2)
true
}
)
}
ignore("input row calculation with mixed batch and streaming V2 sources") {
val streamInput = MemoryStream[Int]
val staticInputDF = spark.createDataFrame(Seq(1 -> "1", 2 -> "2")).toDF("value", "anotherValue")
testStream(streamInput.toDF().join(staticInputDF, "value"))(
AddData(streamInput, 1, 2, 3),
AssertOnQuery { q =>
q.processAllAvailable()
// The number of leaves in the trigger's logical plan should be same as the executed plan.
require(
q.lastExecution.logical.collectLeaves().length ==
q.lastExecution.executedPlan.collectLeaves().length)
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.numInputRows == 3)
assert(lastProgress.get.sources.length == 1)
assert(lastProgress.get.sources(0).numInputRows == 3)
true
}
)
val streamInput2 = MemoryStream[Int]
val staticInputDF2 = staticInputDF.union(staticInputDF).cache()
testStream(streamInput2.toDF().join(staticInputDF2, "value"))(
AddData(streamInput2, 1, 2, 3),
AssertOnQuery { q =>
q.processAllAvailable()
// The number of leaves in the trigger's logical plan should be different from
// the executed plan. The static input will have two leaves in the logical plan
// (due to the union), but will be converted to a single leaf in the executed plan
// (due to the caching, the cached subplan is replaced by a single InMemoryTableScanExec).
require(
q.lastExecution.logical.collectLeaves().length !=
q.lastExecution.executedPlan.collectLeaves().length)
// Despite the mismatch in total number of leaves in the logical and executed plans,
// we should be able to attribute streaming input metrics to the streaming sources.
val lastProgress = getLastProgressWithData(q)
assert(lastProgress.nonEmpty)
assert(lastProgress.get.numInputRows == 3)
assert(lastProgress.get.sources.length == 1)
assert(lastProgress.get.sources(0).numInputRows == 3)
true
}
)
}
testQuietly("StreamExecution metadata garbage collection") {
val inputData = MemoryStream[Int]
val mapped = inputData.toDS().map(6 / _)
withSQLConf(SQLConf.MIN_BATCHES_TO_RETAIN.key -> "1") {
// Run 3 batches, and then assert that only 2 metadata files is are at the end
// since the first should have been purged.
testStream(mapped)(
AddData(inputData, 1, 2),
CheckAnswer(6, 3),
AddData(inputData, 1, 2),
CheckAnswer(6, 3, 6, 3),
AddData(inputData, 4, 6),
CheckAnswer(6, 3, 6, 3, 1, 1),
AssertOnQuery("metadata log should contain only two files") { q =>
val metadataLogDir = new java.io.File(q.offsetLog.metadataPath.toUri)
val logFileNames = metadataLogDir.listFiles().toSeq.map(_.getName())
val toTest = logFileNames.filter(!_.endsWith(".crc")).sorted // Workaround for SPARK-17475
assert(toTest.size == 2 && toTest.head == "1")
true
}
)
}
val inputData2 = MemoryStream[Int]
withSQLConf(SQLConf.MIN_BATCHES_TO_RETAIN.key -> "2") {
// Run 5 batches, and then assert that 3 metadata files is are at the end
// since the two should have been purged.
testStream(inputData2.toDS())(
AddData(inputData2, 1, 2),
CheckAnswer(1, 2),
AddData(inputData2, 1, 2),
CheckAnswer(1, 2, 1, 2),
AddData(inputData2, 3, 4),
CheckAnswer(1, 2, 1, 2, 3, 4),
AddData(inputData2, 5, 6),
CheckAnswer(1, 2, 1, 2, 3, 4, 5, 6),
AddData(inputData2, 7, 8),
CheckAnswer(1, 2, 1, 2, 3, 4, 5, 6, 7, 8),
AssertOnQuery("metadata log should contain three files") { q =>
val metadataLogDir = new java.io.File(q.offsetLog.metadataPath.toUri)
val logFileNames = metadataLogDir.listFiles().toSeq.map(_.getName())
val toTest = logFileNames.filter(!_.endsWith(".crc")).sorted // Workaround for SPARK-17475
assert(toTest.size == 3 && toTest.head == "2")
true
}
)
}
}
testQuietly("StreamingQuery should be Serializable but cannot be used in executors") {
def startQuery(ds: Dataset[Int], queryName: String): StreamingQuery = {
ds.writeStream
.queryName(queryName)
.format("memory")
.start()
}
val input = MemoryStream[Int] :: MemoryStream[Int] :: MemoryStream[Int] :: Nil
val q1 = startQuery(input(0).toDS, "stream_serializable_test_1")
val q2 = startQuery(input(1).toDS.map { i =>
// Emulate that `StreamingQuery` get captured with normal usage unintentionally.
// It should not fail the query.
q1
i
}, "stream_serializable_test_2")
val q3 = startQuery(input(2).toDS.map { i =>
// Emulate that `StreamingQuery` is used in executors. We should fail the query with a clear
// error message.
q1.explain()
i
}, "stream_serializable_test_3")
try {
input.foreach(_.addData(1))
// q2 should not fail since it doesn't use `q1` in the closure
q2.processAllAvailable()
// The user calls `StreamingQuery` in the closure and it should fail
val e = intercept[StreamingQueryException] {
q3.processAllAvailable()
}
assert(e.getCause.isInstanceOf[SparkException])
assert(e.getCause.getCause.getCause.isInstanceOf[IllegalStateException])
TestUtils.assertExceptionMsg(e, "StreamingQuery cannot be used in executors")
} finally {
q1.stop()
q2.stop()
q3.stop()
}
}
test("StreamExecution should call stop() on sources when a stream is stopped") {
var calledStop = false
val source = new Source {
override def stop(): Unit = {
calledStop = true
}
override def getOffset: Option[Offset] = None
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
spark.emptyDataFrame
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source) {
val df = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
testStream(df)(StopStream)
assert(calledStop, "Did not call stop on source for stopped stream")
}
}
testQuietly("SPARK-19774: StreamExecution should call stop() on sources when a stream fails") {
var calledStop = false
val source1 = new Source {
override def stop(): Unit = {
throw new RuntimeException("Oh no!")
}
override def getOffset: Option[Offset] = Some(LongOffset(1))
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
spark.range(2).toDF(MockSourceProvider.fakeSchema.fieldNames: _*)
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
val source2 = new Source {
override def stop(): Unit = {
calledStop = true
}
override def getOffset: Option[Offset] = None
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
spark.emptyDataFrame
}
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source1, source2) {
val df1 = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
.as[Int]
val df2 = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
.as[Int]
testStream(df1.union(df2).map(i => i / 0))(
AssertOnQuery { sq =>
intercept[StreamingQueryException](sq.processAllAvailable())
sq.exception.isDefined && !sq.isActive
}
)
assert(calledStop, "Did not call stop on source for stopped stream")
}
}
test("get the query id in source") {
@volatile var queryId: String = null
val source = new Source {
override def stop(): Unit = {}
override def getOffset: Option[Offset] = {
queryId = spark.sparkContext.getLocalProperty(StreamExecution.QUERY_ID_KEY)
None
}
override def getBatch(start: Option[Offset], end: Offset): DataFrame = spark.emptyDataFrame
override def schema: StructType = MockSourceProvider.fakeSchema
}
MockSourceProvider.withMockSources(source) {
val df = spark.readStream
.format("org.apache.spark.sql.streaming.util.MockSourceProvider")
.load()
testStream(df)(
AssertOnQuery { sq =>
sq.processAllAvailable()
assert(sq.id.toString === queryId)
assert(sq.runId.toString !== queryId)
true
}
)
}
}
test("processAllAvailable should not block forever when a query is stopped") {
val input = MemoryStream[Int]
input.addData(1)
val query = input.toDF().writeStream
.trigger(Trigger.Once())
.format("console")
.start()
failAfter(streamingTimeout) {
query.processAllAvailable()
}
}
ignore("SPARK-22238: don't check for RDD partitions during streaming aggregation preparation") {
val stream = MemoryStream[(Int, Int)]
val baseDf = Seq((1, "A"), (2, "b")).toDF("num", "char").where("char = 'A'")
val otherDf = stream.toDF().toDF("num", "numSq")
.join(broadcast(baseDf), "num")
.groupBy('char)
.agg(sum('numSq))
testStream(otherDf, OutputMode.Complete())(
AddData(stream, (1, 1), (2, 4)),
CheckLastBatch(("A", 1)))
}
test("Uuid in streaming query should not produce same uuids in each execution") {
val uuids = mutable.ArrayBuffer[String]()
def collectUuid: Seq[Row] => Unit = { rows: Seq[Row] =>
rows.foreach(r => uuids += r.getString(0))
}
val stream = MemoryStream[Int]
val df = stream.toDF().select(new Column(Uuid()))
testStream(df)(
AddData(stream, 1),
CheckAnswer(collectUuid),
AddData(stream, 2),
CheckAnswer(collectUuid)
)
assert(uuids.distinct.size == 2)
}
ignore("Rand/Randn in streaming query should not produce same results in each execution") {
val rands = mutable.ArrayBuffer[Double]()
def collectRand: Seq[Row] => Unit = { rows: Seq[Row] =>
rows.foreach { r =>
rands += r.getDouble(0)
rands += r.getDouble(1)
}
}
val stream = MemoryStream[Int]
val df = stream.toDF().select(new Column(new Rand()), new Column(new Randn()))
testStream(df)(
AddData(stream, 1),
CheckAnswer(collectRand),
AddData(stream, 2),
CheckAnswer(collectRand)
)
assert(rands.distinct.size == 4)
}
ignore("Shuffle in streaming query should not produce same results in each execution") {
val rands = mutable.ArrayBuffer[Seq[Int]]()
def collectShuffle: Seq[Row] => Unit = { rows: Seq[Row] =>
rows.foreach { r =>
rands += r.getSeq[Int](0)
}
}
val stream = MemoryStream[Int]
val df = stream.toDF().select(new Column(new Shuffle(Literal.create[Seq[Int]](0 until 100))))
testStream(df)(
AddData(stream, 1),
CheckAnswer(collectShuffle),
AddData(stream, 2),
CheckAnswer(collectShuffle)
)
assert(rands.distinct.size == 2)
}
test("StreamingRelationV2/StreamingExecutionRelation/ContinuousExecutionRelation.toJSON " +
"should not fail") {
val df = spark.readStream.format("rate").load()
assert(df.logicalPlan.toJSON.contains("StreamingRelationV2"))
testStream(df)(
AssertOnQuery(_.logicalPlan.toJSON.contains("StreamingDataSourceV2Relation"))
)
testStream(df)(
StartStream(trigger = Trigger.Continuous(100)),
AssertOnQuery(_.logicalPlan.toJSON.contains("StreamingDataSourceV2Relation"))
)
}
test("special characters in checkpoint path") {
withTempDir { tempDir =>
val checkpointDir = new File(tempDir, "chk @#chk")
val inputData = MemoryStream[Int]
inputData.addData(1)
val q = inputData.toDF()
.writeStream
.format("noop")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.start()
try {
q.processAllAvailable()
assert(checkpointDir.listFiles().toList.nonEmpty)
} finally {
q.stop()
}
}
}
/**
* Copy the checkpoint generated by Spark 2.4.0 from test resource to `dir` to set up a legacy
* streaming checkpoint.
*/
private def setUp2dot4dot0Checkpoint(dir: File): Unit = {
val input = getClass.getResource("/structured-streaming/escaped-path-2.4.0")
assert(input != null, "cannot find test resource '/structured-streaming/escaped-path-2.4.0'")
val inputDir = new File(input.toURI)
// Copy test files to tempDir so that we won't modify the original data.
FileUtils.copyDirectory(inputDir, dir)
// Spark 2.4 and earlier escaped the _spark_metadata path once
val legacySparkMetadataDir = new File(
dir,
new Path("output %@#output/_spark_metadata").toUri.toString)
// Migrate from legacy _spark_metadata directory to the new _spark_metadata directory.
// Ideally we should copy "_spark_metadata" directly like what the user is supposed to do to
// migrate to new version. However, in our test, "tempDir" will be different in each run and
// we need to fix the absolute path in the metadata to match "tempDir".
val sparkMetadata = FileUtils.readFileToString(new File(legacySparkMetadataDir, "0"), UTF_8)
FileUtils.write(
new File(legacySparkMetadataDir, "0"),
sparkMetadata.replaceAll("TEMPDIR", dir.getCanonicalPath), UTF_8)
}
ignore("detect escaped path and report the migration guide") {
// Assert that the error message contains the migration conf, path and the legacy path.
def assertMigrationError(errorMessage: String, path: File, legacyPath: File): Unit = {
Seq(SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED.key,
path.getCanonicalPath,
legacyPath.getCanonicalPath).foreach { msg =>
assert(errorMessage.contains(msg))
}
}
withTempDir { tempDir =>
setUp2dot4dot0Checkpoint(tempDir)
// Here are the paths we will use to create the query
val outputDir = new File(tempDir, "output %@#output")
val checkpointDir = new File(tempDir, "chk %@#chk")
val sparkMetadataDir = new File(tempDir, "output %@#output/_spark_metadata")
// The escaped paths used by Spark 2.4 and earlier.
// Spark 2.4 and earlier escaped the checkpoint path three times
val legacyCheckpointDir = new File(
tempDir,
new Path(new Path(new Path("chk %@#chk").toUri.toString).toUri.toString).toUri.toString)
// Spark 2.4 and earlier escaped the _spark_metadata path once
val legacySparkMetadataDir = new File(
tempDir,
new Path("output %@#output/_spark_metadata").toUri.toString)
// Reading a file sink output in a batch query should detect the legacy _spark_metadata
// directory and throw an error
val e = intercept[SparkException] {
spark.read.load(outputDir.getCanonicalPath).as[Int]
}
assertMigrationError(e.getMessage, sparkMetadataDir, legacySparkMetadataDir)
// Restarting the streaming query should detect the legacy _spark_metadata directory and
// throw an error
val inputData = MemoryStream[Int]
val e2 = intercept[SparkException] {
inputData.toDF()
.writeStream
.format("parquet")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.start(outputDir.getCanonicalPath)
}
assertMigrationError(e2.getMessage, sparkMetadataDir, legacySparkMetadataDir)
// Move "_spark_metadata" to fix the file sink and test the checkpoint path.
FileUtils.moveDirectory(legacySparkMetadataDir, sparkMetadataDir)
// Restarting the streaming query should detect the legacy
// checkpoint path and throw an error.
val e3 = intercept[SparkException] {
inputData.toDF()
.writeStream
.format("parquet")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.start(outputDir.getCanonicalPath)
}
assertMigrationError(e3.getMessage, checkpointDir, legacyCheckpointDir)
// Fix the checkpoint path and verify that the user can migrate the issue by moving files.
FileUtils.moveDirectory(legacyCheckpointDir, checkpointDir)
val q = inputData.toDF()
.writeStream
.format("parquet")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.start(outputDir.getCanonicalPath)
try {
q.processAllAvailable()
// Check the query id to make sure it did use checkpoint
assert(q.id.toString == "09be7fb3-49d8-48a6-840d-e9c2ad92a898")
// Verify that the batch query can read "_spark_metadata" correctly after migration.
val df = spark.read.load(outputDir.getCanonicalPath)
assert(df.queryExecution.executedPlan.toString contains "MetadataLogFileIndex")
checkDatasetUnorderly(df.as[Int], 1, 2, 3)
} finally {
q.stop()
}
}
}
ignore("ignore the escaped path check when the flag is off") {
withTempDir { tempDir =>
setUp2dot4dot0Checkpoint(tempDir)
val outputDir = new File(tempDir, "output %@#output")
val checkpointDir = new File(tempDir, "chk %@#chk")
withSQLConf(SQLConf.STREAMING_CHECKPOINT_ESCAPED_PATH_CHECK_ENABLED.key -> "false") {
// Verify that the batch query ignores the legacy "_spark_metadata"
val df = spark.read.load(outputDir.getCanonicalPath)
assert(!(df.queryExecution.executedPlan.toString contains "MetadataLogFileIndex"))
checkDatasetUnorderly(df.as[Int], 1, 2, 3)
val inputData = MemoryStream[Int]
val q = inputData.toDF()
.writeStream
.format("parquet")
.option("checkpointLocation", checkpointDir.getCanonicalPath)
.start(outputDir.getCanonicalPath)
try {
q.processAllAvailable()
// Check the query id to make sure it ignores the legacy checkpoint
assert(q.id.toString != "09be7fb3-49d8-48a6-840d-e9c2ad92a898")
} finally {
q.stop()
}
}
}
}
test("containsSpecialCharsInPath") {
Seq("foo/b ar",
"/foo/b ar",
"file:/foo/b ar",
"file://foo/b ar",
"file:///foo/b ar",
"file://foo:bar@bar/foo/b ar").foreach { p =>
assert(StreamExecution.containsSpecialCharsInPath(new Path(p)), s"failed to check $p")
}
Seq("foo/bar",
"/foo/bar",
"file:/foo/bar",
"file://foo/bar",
"file:///foo/bar",
"file://foo:bar@bar/foo/bar",
// Special chars not in a path should not be considered as such urls won't hit the escaped
// path issue.
"file://foo:b ar@bar/foo/bar",
"file://foo:bar@b ar/foo/bar",
"file://f oo:bar@bar/foo/bar").foreach { p =>
assert(!StreamExecution.containsSpecialCharsInPath(new Path(p)), s"failed to check $p")
}
}
/** Create a streaming DF that only execute one batch in which it returns the given static DF */
private def createSingleTriggerStreamingDF(triggerDF: DataFrame): DataFrame = {
require(!triggerDF.isStreaming)
// A streaming Source that generate only on trigger and returns the given Dataframe as batch
val source = new Source() {
override def schema: StructType = triggerDF.schema
override def getOffset: Option[Offset] = Some(LongOffset(0))
override def getBatch(start: Option[Offset], end: Offset): DataFrame = {
sqlContext.internalCreateDataFrame(
triggerDF.queryExecution.toRdd, triggerDF.schema, isStreaming = true)
}
override def stop(): Unit = {}
}
StreamingExecutionRelation(source, spark)
}
/** Returns the query at the end of the first trigger of streaming DF */
private def getStreamingQuery(streamingDF: DataFrame): StreamingQuery = {
try {
val q = streamingDF.writeStream.format("memory").queryName("test").start()
q.processAllAvailable()
q
} finally {
spark.streams.active.map(_.stop())
}
}
/** Returns the last query progress from query.recentProgress where numInputRows is positive */
def getLastProgressWithData(q: StreamingQuery): Option[StreamingQueryProgress] = {
q.recentProgress.filter(_.numInputRows > 0).lastOption
}
/**
* A [[StreamAction]] to test the behavior of `StreamingQuery.awaitTermination()`.
*
* @param expectedBehavior Expected behavior (not blocked, blocked, or exception thrown)
* @param timeoutMs Timeout in milliseconds
* When timeoutMs is less than or equal to 0, awaitTermination() is
* tested (i.e. w/o timeout)
* When timeoutMs is greater than 0, awaitTermination(timeoutMs) is
* tested
* @param expectedReturnValue Expected return value when awaitTermination(timeoutMs) is used
*/
case class TestAwaitTermination(
expectedBehavior: ExpectedBehavior,
timeoutMs: Int = -1,
expectedReturnValue: Boolean = false
) extends AssertOnQuery(
TestAwaitTermination.assertOnQueryCondition(expectedBehavior, timeoutMs, expectedReturnValue),
"Error testing awaitTermination behavior"
) {
override def toString(): String = {
s"TestAwaitTermination($expectedBehavior, timeoutMs = $timeoutMs, " +
s"expectedReturnValue = $expectedReturnValue)"
}
}
object TestAwaitTermination {
/**
* Tests the behavior of `StreamingQuery.awaitTermination`.
*
* @param expectedBehavior Expected behavior (not blocked, blocked, or exception thrown)
* @param timeoutMs Timeout in milliseconds
* When timeoutMs is less than or equal to 0, awaitTermination() is
* tested (i.e. w/o timeout)
* When timeoutMs is greater than 0, awaitTermination(timeoutMs) is
* tested
* @param expectedReturnValue Expected return value when awaitTermination(timeoutMs) is used
*/
def assertOnQueryCondition(
expectedBehavior: ExpectedBehavior,
timeoutMs: Int,
expectedReturnValue: Boolean
)(q: StreamExecution): Boolean = {
def awaitTermFunc(): Unit = {
if (timeoutMs <= 0) {
q.awaitTermination()
} else {
val returnedValue = q.awaitTermination(timeoutMs)
assert(returnedValue === expectedReturnValue, "Returned value does not match expected")
}
}
AwaitTerminationTester.test(expectedBehavior, () => awaitTermFunc())
true // If the control reached here, then everything worked as expected
}
}
}
object StreamingQuerySuite {
// Singleton reference to clock that does not get serialized in task closures
var clock: StreamManualClock = null
}
| Intel-bigdata/OAP | oap-native-sql/core/src/test/scala/org/apache/spark/sql/streaming/StreamingQuerySuite.scala | Scala | apache-2.0 | 48,144 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import java.util.Locale
import scala.collection.JavaConverters._
import scala.language.implicitConversions
import org.apache.spark.annotation.Stable
import org.apache.spark.api.python.PythonEvalType
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.sql.catalyst.analysis.{Star, UnresolvedAlias, UnresolvedAttribute, UnresolvedFunction}
import org.apache.spark.sql.catalyst.encoders.encoderFor
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.util.toPrettySQL
import org.apache.spark.sql.execution.aggregate.TypedAggregateExpression
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types.{NumericType, StructType}
/**
* A set of methods for aggregations on a `DataFrame`, created by [[Dataset#groupBy groupBy]],
* [[Dataset#cube cube]] or [[Dataset#rollup rollup]] (and also `pivot`).
*
* The main method is the `agg` function, which has multiple variants. This class also contains
* some first-order statistics such as `mean`, `sum` for convenience.
*
* @note This class was named `GroupedData` in Spark 1.x.
*
* @since 2.0.0
*/
@Stable
class RelationalGroupedDataset protected[sql](
private[sql] val df: DataFrame,
private[sql] val groupingExprs: Seq[Expression],
groupType: RelationalGroupedDataset.GroupType) {
private[this] def toDF(aggExprs: Seq[Expression]): DataFrame = {
val aggregates = if (df.sparkSession.sessionState.conf.dataFrameRetainGroupColumns) {
groupingExprs ++ aggExprs
} else {
aggExprs
}
val aliasedAgg = aggregates.map(alias)
groupType match {
case RelationalGroupedDataset.GroupByType =>
Dataset.ofRows(df.sparkSession, Aggregate(groupingExprs, aliasedAgg, df.logicalPlan))
case RelationalGroupedDataset.RollupType =>
Dataset.ofRows(
df.sparkSession, Aggregate(Seq(Rollup(groupingExprs)), aliasedAgg, df.logicalPlan))
case RelationalGroupedDataset.CubeType =>
Dataset.ofRows(
df.sparkSession, Aggregate(Seq(Cube(groupingExprs)), aliasedAgg, df.logicalPlan))
case RelationalGroupedDataset.PivotType(pivotCol, values) =>
val aliasedGrps = groupingExprs.map(alias)
Dataset.ofRows(
df.sparkSession, Pivot(Some(aliasedGrps), pivotCol, values, aggExprs, df.logicalPlan))
}
}
// Wrap UnresolvedAttribute with UnresolvedAlias, as when we resolve UnresolvedAttribute, we
// will remove intermediate Alias for ExtractValue chain, and we need to alias it again to
// make it a NamedExpression.
private[this] def alias(expr: Expression): NamedExpression = expr match {
case u: UnresolvedAttribute => UnresolvedAlias(u)
case expr: NamedExpression => expr
case a: AggregateExpression if a.aggregateFunction.isInstanceOf[TypedAggregateExpression] =>
UnresolvedAlias(a, Some(Column.generateAlias))
case expr: Expression => Alias(expr, toPrettySQL(expr))()
}
private[this] def aggregateNumericColumns(colNames: String*)(f: Expression => AggregateFunction)
: DataFrame = {
val columnExprs = if (colNames.isEmpty) {
// No columns specified. Use all numeric columns.
df.numericColumns
} else {
// Make sure all specified columns are numeric.
colNames.map { colName =>
val namedExpr = df.resolve(colName)
if (!namedExpr.dataType.isInstanceOf[NumericType]) {
throw new AnalysisException(
s""""$colName" is not a numeric column. """ +
"Aggregation function can only be applied on a numeric column.")
}
namedExpr
}
}
toDF(columnExprs.map(expr => f(expr).toAggregateExpression()))
}
private[this] def strToExpr(expr: String): (Expression => Expression) = {
val exprToFunc: (Expression => Expression) = {
(inputExpr: Expression) => expr.toLowerCase(Locale.ROOT) match {
// We special handle a few cases that have alias that are not in function registry.
case "avg" | "average" | "mean" =>
UnresolvedFunction("avg", inputExpr :: Nil, isDistinct = false)
case "stddev" | "std" =>
UnresolvedFunction("stddev", inputExpr :: Nil, isDistinct = false)
// Also special handle count because we need to take care count(*).
case "count" | "size" =>
// Turn count(*) into count(1)
inputExpr match {
case s: Star => Count(Literal(1)).toAggregateExpression()
case _ => Count(inputExpr).toAggregateExpression()
}
case name => UnresolvedFunction(name, inputExpr :: Nil, isDistinct = false)
}
}
(inputExpr: Expression) => exprToFunc(inputExpr)
}
/**
* Returns a `KeyValueGroupedDataset` where the data is grouped by the grouping expressions
* of current `RelationalGroupedDataset`.
*
* @since 3.0.0
*/
def as[K: Encoder, T: Encoder]: KeyValueGroupedDataset[K, T] = {
val keyEncoder = encoderFor[K]
val valueEncoder = encoderFor[T]
// Resolves grouping expressions.
val dummyPlan = Project(groupingExprs.map(alias), LocalRelation(df.logicalPlan.output))
val analyzedPlan = df.sparkSession.sessionState.analyzer.execute(dummyPlan)
.asInstanceOf[Project]
df.sparkSession.sessionState.analyzer.checkAnalysis(analyzedPlan)
val aliasedGroupings = analyzedPlan.projectList
// Adds the grouping expressions that are not in base DataFrame into outputs.
val addedCols = aliasedGroupings.filter(g => !df.logicalPlan.outputSet.contains(g.toAttribute))
val qe = Dataset.ofRows(
df.sparkSession,
Project(df.logicalPlan.output ++ addedCols, df.logicalPlan)).queryExecution
new KeyValueGroupedDataset(
keyEncoder,
valueEncoder,
qe,
df.logicalPlan.output,
aliasedGroupings.map(_.toAttribute))
}
/**
* (Scala-specific) Compute aggregates by specifying the column names and
* aggregate methods. The resulting `DataFrame` will also contain the grouping columns.
*
* The available aggregate methods are `avg`, `max`, `min`, `sum`, `count`.
* {{{
* // Selects the age of the oldest employee and the aggregate expense for each department
* df.groupBy("department").agg(
* "age" -> "max",
* "expense" -> "sum"
* )
* }}}
*
* @since 1.3.0
*/
def agg(aggExpr: (String, String), aggExprs: (String, String)*): DataFrame = {
toDF((aggExpr +: aggExprs).map { case (colName, expr) =>
strToExpr(expr)(df(colName).expr)
})
}
/**
* (Scala-specific) Compute aggregates by specifying a map from column name to
* aggregate methods. The resulting `DataFrame` will also contain the grouping columns.
*
* The available aggregate methods are `avg`, `max`, `min`, `sum`, `count`.
* {{{
* // Selects the age of the oldest employee and the aggregate expense for each department
* df.groupBy("department").agg(Map(
* "age" -> "max",
* "expense" -> "sum"
* ))
* }}}
*
* @since 1.3.0
*/
def agg(exprs: Map[String, String]): DataFrame = {
toDF(exprs.map { case (colName, expr) =>
strToExpr(expr)(df(colName).expr)
}.toSeq)
}
/**
* (Java-specific) Compute aggregates by specifying a map from column name to
* aggregate methods. The resulting `DataFrame` will also contain the grouping columns.
*
* The available aggregate methods are `avg`, `max`, `min`, `sum`, `count`.
* {{{
* // Selects the age of the oldest employee and the aggregate expense for each department
* import com.google.common.collect.ImmutableMap;
* df.groupBy("department").agg(ImmutableMap.of("age", "max", "expense", "sum"));
* }}}
*
* @since 1.3.0
*/
def agg(exprs: java.util.Map[String, String]): DataFrame = {
agg(exprs.asScala.toMap)
}
/**
* Compute aggregates by specifying a series of aggregate columns. Note that this function by
* default retains the grouping columns in its output. To not retain grouping columns, set
* `spark.sql.retainGroupColumns` to false.
*
* The available aggregate methods are defined in [[org.apache.spark.sql.functions]].
*
* {{{
* // Selects the age of the oldest employee and the aggregate expense for each department
*
* // Scala:
* import org.apache.spark.sql.functions._
* df.groupBy("department").agg(max("age"), sum("expense"))
*
* // Java:
* import static org.apache.spark.sql.functions.*;
* df.groupBy("department").agg(max("age"), sum("expense"));
* }}}
*
* Note that before Spark 1.4, the default behavior is to NOT retain grouping columns. To change
* to that behavior, set config variable `spark.sql.retainGroupColumns` to `false`.
* {{{
* // Scala, 1.3.x:
* df.groupBy("department").agg($"department", max("age"), sum("expense"))
*
* // Java, 1.3.x:
* df.groupBy("department").agg(col("department"), max("age"), sum("expense"));
* }}}
*
* @since 1.3.0
*/
@scala.annotation.varargs
def agg(expr: Column, exprs: Column*): DataFrame = {
toDF((expr +: exprs).map {
case typed: TypedColumn[_, _] =>
typed.withInputType(df.exprEnc, df.logicalPlan.output).expr
case c => c.expr
})
}
/**
* Count the number of rows for each group.
* The resulting `DataFrame` will also contain the grouping columns.
*
* @since 1.3.0
*/
def count(): DataFrame = toDF(Seq(Alias(Count(Literal(1)).toAggregateExpression(), "count")()))
/**
* Compute the average value for each numeric columns for each group. This is an alias for `avg`.
* The resulting `DataFrame` will also contain the grouping columns.
* When specified columns are given, only compute the average values for them.
*
* @since 1.3.0
*/
@scala.annotation.varargs
def mean(colNames: String*): DataFrame = {
aggregateNumericColumns(colNames : _*)(Average)
}
/**
* Compute the max value for each numeric columns for each group.
* The resulting `DataFrame` will also contain the grouping columns.
* When specified columns are given, only compute the max values for them.
*
* @since 1.3.0
*/
@scala.annotation.varargs
def max(colNames: String*): DataFrame = {
aggregateNumericColumns(colNames : _*)(Max)
}
/**
* Compute the mean value for each numeric columns for each group.
* The resulting `DataFrame` will also contain the grouping columns.
* When specified columns are given, only compute the mean values for them.
*
* @since 1.3.0
*/
@scala.annotation.varargs
def avg(colNames: String*): DataFrame = {
aggregateNumericColumns(colNames : _*)(Average)
}
/**
* Compute the min value for each numeric column for each group.
* The resulting `DataFrame` will also contain the grouping columns.
* When specified columns are given, only compute the min values for them.
*
* @since 1.3.0
*/
@scala.annotation.varargs
def min(colNames: String*): DataFrame = {
aggregateNumericColumns(colNames : _*)(Min)
}
/**
* Compute the sum for each numeric columns for each group.
* The resulting `DataFrame` will also contain the grouping columns.
* When specified columns are given, only compute the sum for them.
*
* @since 1.3.0
*/
@scala.annotation.varargs
def sum(colNames: String*): DataFrame = {
aggregateNumericColumns(colNames : _*)(Sum)
}
/**
* Pivots a column of the current `DataFrame` and performs the specified aggregation.
*
* There are two versions of `pivot` function: one that requires the caller to specify the list
* of distinct values to pivot on, and one that does not. The latter is more concise but less
* efficient, because Spark needs to first compute the list of distinct values internally.
*
* {{{
* // Compute the sum of earnings for each year by course with each course as a separate column
* df.groupBy("year").pivot("course", Seq("dotNET", "Java")).sum("earnings")
*
* // Or without specifying column values (less efficient)
* df.groupBy("year").pivot("course").sum("earnings")
* }}}
*
* @param pivotColumn Name of the column to pivot.
* @since 1.6.0
*/
def pivot(pivotColumn: String): RelationalGroupedDataset = pivot(Column(pivotColumn))
/**
* Pivots a column of the current `DataFrame` and performs the specified aggregation.
* There are two versions of pivot function: one that requires the caller to specify the list
* of distinct values to pivot on, and one that does not. The latter is more concise but less
* efficient, because Spark needs to first compute the list of distinct values internally.
*
* {{{
* // Compute the sum of earnings for each year by course with each course as a separate column
* df.groupBy("year").pivot("course", Seq("dotNET", "Java")).sum("earnings")
*
* // Or without specifying column values (less efficient)
* df.groupBy("year").pivot("course").sum("earnings")
* }}}
*
* From Spark 3.0.0, values can be literal columns, for instance, struct. For pivoting by
* multiple columns, use the `struct` function to combine the columns and values:
*
* {{{
* df.groupBy("year")
* .pivot("trainingCourse", Seq(struct(lit("java"), lit("Experts"))))
* .agg(sum($"earnings"))
* }}}
*
* @param pivotColumn Name of the column to pivot.
* @param values List of values that will be translated to columns in the output DataFrame.
* @since 1.6.0
*/
def pivot(pivotColumn: String, values: Seq[Any]): RelationalGroupedDataset = {
pivot(Column(pivotColumn), values)
}
/**
* (Java-specific) Pivots a column of the current `DataFrame` and performs the specified
* aggregation.
*
* There are two versions of pivot function: one that requires the caller to specify the list
* of distinct values to pivot on, and one that does not. The latter is more concise but less
* efficient, because Spark needs to first compute the list of distinct values internally.
*
* {{{
* // Compute the sum of earnings for each year by course with each course as a separate column
* df.groupBy("year").pivot("course", Arrays.<Object>asList("dotNET", "Java")).sum("earnings");
*
* // Or without specifying column values (less efficient)
* df.groupBy("year").pivot("course").sum("earnings");
* }}}
*
* @param pivotColumn Name of the column to pivot.
* @param values List of values that will be translated to columns in the output DataFrame.
* @since 1.6.0
*/
def pivot(pivotColumn: String, values: java.util.List[Any]): RelationalGroupedDataset = {
pivot(Column(pivotColumn), values)
}
/**
* Pivots a column of the current `DataFrame` and performs the specified aggregation.
* This is an overloaded version of the `pivot` method with `pivotColumn` of the `String` type.
*
* {{{
* // Or without specifying column values (less efficient)
* df.groupBy($"year").pivot($"course").sum($"earnings");
* }}}
*
* @param pivotColumn he column to pivot.
* @since 2.4.0
*/
def pivot(pivotColumn: Column): RelationalGroupedDataset = {
// This is to prevent unintended OOM errors when the number of distinct values is large
val maxValues = df.sparkSession.sessionState.conf.dataFramePivotMaxValues
// Get the distinct values of the column and sort them so its consistent
val values = df.select(pivotColumn)
.distinct()
.limit(maxValues + 1)
.sort(pivotColumn) // ensure that the output columns are in a consistent logical order
.collect()
.map(_.get(0))
.toSeq
if (values.length > maxValues) {
throw new AnalysisException(
s"The pivot column $pivotColumn has more than $maxValues distinct values, " +
"this could indicate an error. " +
s"If this was intended, set ${SQLConf.DATAFRAME_PIVOT_MAX_VALUES.key} " +
"to at least the number of distinct values of the pivot column.")
}
pivot(pivotColumn, values)
}
/**
* Pivots a column of the current `DataFrame` and performs the specified aggregation.
* This is an overloaded version of the `pivot` method with `pivotColumn` of the `String` type.
*
* {{{
* // Compute the sum of earnings for each year by course with each course as a separate column
* df.groupBy($"year").pivot($"course", Seq("dotNET", "Java")).sum($"earnings")
* }}}
*
* @param pivotColumn the column to pivot.
* @param values List of values that will be translated to columns in the output DataFrame.
* @since 2.4.0
*/
def pivot(pivotColumn: Column, values: Seq[Any]): RelationalGroupedDataset = {
groupType match {
case RelationalGroupedDataset.GroupByType =>
val valueExprs = values.map(_ match {
case c: Column => c.expr
case v => Literal.apply(v)
})
new RelationalGroupedDataset(
df,
groupingExprs,
RelationalGroupedDataset.PivotType(pivotColumn.expr, valueExprs))
case _: RelationalGroupedDataset.PivotType =>
throw new UnsupportedOperationException("repeated pivots are not supported")
case _ =>
throw new UnsupportedOperationException("pivot is only supported after a groupBy")
}
}
/**
* (Java-specific) Pivots a column of the current `DataFrame` and performs the specified
* aggregation. This is an overloaded version of the `pivot` method with `pivotColumn` of
* the `String` type.
*
* @param pivotColumn the column to pivot.
* @param values List of values that will be translated to columns in the output DataFrame.
* @since 2.4.0
*/
def pivot(pivotColumn: Column, values: java.util.List[Any]): RelationalGroupedDataset = {
pivot(pivotColumn, values.asScala)
}
/**
* Applies the given serialized R function `func` to each group of data. For each unique group,
* the function will be passed the group key and an iterator that contains all of the elements in
* the group. The function can return an iterator containing elements of an arbitrary type which
* will be returned as a new `DataFrame`.
*
* This function does not support partial aggregation, and as a result requires shuffling all
* the data in the [[Dataset]]. If an application intends to perform an aggregation over each
* key, it is best to use the reduce function or an
* `org.apache.spark.sql.expressions#Aggregator`.
*
* Internally, the implementation will spill to disk if any given group is too large to fit into
* memory. However, users must take care to avoid materializing the whole iterator for a group
* (for example, by calling `toList`) unless they are sure that this is possible given the memory
* constraints of their cluster.
*
* @since 2.0.0
*/
private[sql] def flatMapGroupsInR(
f: Array[Byte],
packageNames: Array[Byte],
broadcastVars: Array[Broadcast[Object]],
outputSchema: StructType): DataFrame = {
val groupingNamedExpressions = groupingExprs.map(alias)
val groupingCols = groupingNamedExpressions.map(Column(_))
val groupingDataFrame = df.select(groupingCols : _*)
val groupingAttributes = groupingNamedExpressions.map(_.toAttribute)
Dataset.ofRows(
df.sparkSession,
FlatMapGroupsInR(
f,
packageNames,
broadcastVars,
outputSchema,
groupingDataFrame.exprEnc.deserializer,
df.exprEnc.deserializer,
df.exprEnc.schema,
groupingAttributes,
df.logicalPlan.output,
df.logicalPlan))
}
/**
* Applies a grouped vectorized python user-defined function to each group of data.
* The user-defined function defines a transformation: `pandas.DataFrame` -> `pandas.DataFrame`.
* For each group, all elements in the group are passed as a `pandas.DataFrame` and the results
* for all groups are combined into a new [[DataFrame]].
*
* This function does not support partial aggregation, and requires shuffling all the data in
* the [[DataFrame]].
*
* This function uses Apache Arrow as serialization format between Java executors and Python
* workers.
*/
private[sql] def flatMapGroupsInPandas(expr: PythonUDF): DataFrame = {
require(expr.evalType == PythonEvalType.SQL_GROUPED_MAP_PANDAS_UDF,
"Must pass a grouped map udf")
require(expr.dataType.isInstanceOf[StructType],
s"The returnType of the udf must be a ${StructType.simpleString}")
val groupingNamedExpressions = groupingExprs.map {
case ne: NamedExpression => ne
case other => Alias(other, other.toString)()
}
val groupingAttributes = groupingNamedExpressions.map(_.toAttribute)
val child = df.logicalPlan
val project = Project(groupingNamedExpressions ++ child.output, child)
val output = expr.dataType.asInstanceOf[StructType].toAttributes
val plan = FlatMapGroupsInPandas(groupingAttributes, expr, output, project)
Dataset.ofRows(df.sparkSession, plan)
}
/**
* Applies a vectorized python user-defined function to each cogrouped data.
* The user-defined function defines a transformation:
* `pandas.DataFrame`, `pandas.DataFrame` -> `pandas.DataFrame`.
* For each group in the cogrouped data, all elements in the group are passed as a
* `pandas.DataFrame` and the results for all cogroups are combined into a new [[DataFrame]].
*
* This function uses Apache Arrow as serialization format between Java executors and Python
* workers.
*/
private[sql] def flatMapCoGroupsInPandas(
r: RelationalGroupedDataset,
expr: PythonUDF): DataFrame = {
require(expr.evalType == PythonEvalType.SQL_COGROUPED_MAP_PANDAS_UDF,
"Must pass a cogrouped map udf")
require(expr.dataType.isInstanceOf[StructType],
s"The returnType of the udf must be a ${StructType.simpleString}")
val leftGroupingNamedExpressions = groupingExprs.map {
case ne: NamedExpression => ne
case other => Alias(other, other.toString)()
}
val rightGroupingNamedExpressions = r.groupingExprs.map {
case ne: NamedExpression => ne
case other => Alias(other, other.toString)()
}
val leftAttributes = leftGroupingNamedExpressions.map(_.toAttribute)
val rightAttributes = rightGroupingNamedExpressions.map(_.toAttribute)
val leftChild = df.logicalPlan
val rightChild = r.df.logicalPlan
val left = Project(leftGroupingNamedExpressions ++ leftChild.output, leftChild)
val right = Project(rightGroupingNamedExpressions ++ rightChild.output, rightChild)
val output = expr.dataType.asInstanceOf[StructType].toAttributes
val plan = FlatMapCoGroupsInPandas(leftAttributes, rightAttributes, expr, output, left, right)
Dataset.ofRows(df.sparkSession, plan)
}
override def toString: String = {
val builder = new StringBuilder
builder.append("RelationalGroupedDataset: [grouping expressions: [")
val kFields = groupingExprs.collect {
case expr: NamedExpression if expr.resolved =>
s"${expr.name}: ${expr.dataType.simpleString(2)}"
case expr: NamedExpression => expr.name
case o => o.toString
}
builder.append(kFields.take(2).mkString(", "))
if (kFields.length > 2) {
builder.append(" ... " + (kFields.length - 2) + " more field(s)")
}
builder.append(s"], value: ${df.toString}, type: $groupType]").toString()
}
}
private[sql] object RelationalGroupedDataset {
def apply(
df: DataFrame,
groupingExprs: Seq[Expression],
groupType: GroupType): RelationalGroupedDataset = {
new RelationalGroupedDataset(df, groupingExprs, groupType: GroupType)
}
/**
* The Grouping Type
*/
private[sql] trait GroupType {
override def toString: String = getClass.getSimpleName.stripSuffix("$").stripSuffix("Type")
}
/**
* To indicate it's the GroupBy
*/
private[sql] object GroupByType extends GroupType
/**
* To indicate it's the CUBE
*/
private[sql] object CubeType extends GroupType
/**
* To indicate it's the ROLLUP
*/
private[sql] object RollupType extends GroupType
/**
* To indicate it's the PIVOT
*/
private[sql] case class PivotType(pivotCol: Expression, values: Seq[Expression]) extends GroupType
}
| goldmedal/spark | sql/core/src/main/scala/org/apache/spark/sql/RelationalGroupedDataset.scala | Scala | apache-2.0 | 25,328 |
package com.lunatic.mlx.kddcup99.ml
import org.apache.spark.ml.Transformer
import org.apache.spark.ml.feature.VectorAssembler
import org.apache.spark.ml.param._
import org.json4s.JsonAST._
import org.json4s.jackson.Serialization
import org.json4s.jackson.Serialization._
import org.json4s.{CustomSerializer, NoTypeHints}
/**
*
*/
object TestJson1 extends App {
import com.lunatic.mlx.kddcup99.ml.JsonHelper1._
val toFeaturesVector = new VectorAssembler().
setInputCols(Array("test1", "test2")).
setOutputCol("features")
println(toFeaturesVector.explainParams())
// implicit val formats = Serialization.formats(NoTypeHints) + new VectorAssemblerSerializer
implicit val formats = Serialization.formats(NoTypeHints) +
new TransformerSerializer[VectorAssembler] +
new ParamPairSerializer[String] +
new ParamPairSerializer[Array[String]]
// implicit val formats = DefaultFormats + FieldSerializer[ParamPair]()
println("---------------------------------")
println(writePretty(toFeaturesVector))
// println("---------------------------------")
println(writePretty(toFeaturesVector.extractParamMap().toSeq))
println("---------------------------------")
val r = read[VectorAssembler](writePretty(toFeaturesVector))
println(r.explainParams())
// println("---------------------------------")
// println(toFeaturesVector.explainParams())
// println("---------------------------------")
// println(writePretty(r.params))
}
object JsonHelper1 {
implicit class AParams[P <: Params]( xparams: P) extends Params {
override def copy(extra: ParamMap): Params = xparams.copy(extra)
override val uid: String = xparams.uid
def setPP(pp: ParamPair[_]): P = xparams.set(pp).asInstanceOf[P]
}
implicit def paramToJObject(param: Param[_]): JObject =
JObject(JField("parent", JString(param.parent)) ::
JField("name", JString(param.name)) ::
JField("doc", JString(param.doc)) ::
Nil)
implicit def paramToJField(param: Param[_]): JField =
JField("param", param)
class ParamSerializer extends CustomSerializer[Param[_]](format => ( {
case JObject(JField("parent", JString(parent)) ::
JField("name", JString(name)) ::
JField("doc", JString(doc)) ::
Nil) =>
new Param(parent, name, doc)
}, {
case x: Param[_] => x
}
))
class ParamPairSerializer[T](implicit m: Manifest[T]) extends CustomSerializer[ParamPair[T]](format => ( {
case JObject(JField("param", JObject(param)) ::
JField("value", JObject(value)) ::
Nil) =>
new ParamPair[T](param.asInstanceOf[Param[T]], value.asInstanceOf[T])
}, {
case pp: ParamPair[_] => paramPairToJValue(pp)
}
))
class TransformerSerializer[T <: Transformer](implicit m: Manifest[T]) extends CustomSerializer[T](format => ( {
case JObject(JField("class", JString(classname))
:: JField("uid", JString(uid))
:: JField("params", JArray(params : List[ParamPair[_]]))
:: Nil) =>
val newTransformer = Class.forName(classname).getConstructor(classOf[String]).newInstance(uid).asInstanceOf[T]
println("-----------")
println(params.getClass)
println(params.mkString("\\n"))
println("-----------")
val transformer = params.
map(_.asInstanceOf[ParamPair[_]]).
foldLeft(newTransformer)((trans, param) => trans.setPP(param))
transformer
}, {
case trans: Transformer =>
// TODO add defaults as well, just to be on the safe side
val xx = trans.extractParamMap().toSeq.map(pm => paramPairToJValue(pm)).toList
JObject(JField("class", JString(trans.getClass.getName)) ::
JField("uid", JString(trans.uid)) ::
JField("params", JArray(xx)) ::
Nil)
}
))
implicit def paramPairToJValue(p: ParamPair[_]): JValue = p match {
case ParamPair(param: Param[_], value: String) => JObject(paramToJField(param) ::
JField("value", JString(value)) ::
Nil)
case ParamPair(param: LongParam, value: Long) => JObject(paramToJField(param) ::
JField("value", JInt(value)) ::
Nil)
case ParamPair(param: FloatParam, value: Float) => JObject(paramToJField(param) ::
JField("value", JDouble(value)) ::
Nil)
case ParamPair(param: DoubleParam, value: Double) => JObject(paramToJField(param) ::
JField("value", JDouble(value)) ::
Nil)
case ParamPair(param: BooleanParam, value: Boolean) => JObject(paramToJField(param) ::
JField("value", JBool(value)) ::
Nil)
case ParamPair(param: StringArrayParam, value: Array[String]) => JObject(paramToJField(param) ::
JField("value", JArray(value.map(JString(_)).toList)) ::
Nil)
case ParamPair(param: IntArrayParam, value: Array[Int]) => JObject(paramToJField(param) ::
JField("value", JArray(value.map(JInt(_)).toList)) ::
Nil)
case ParamPair(param: DoubleArrayParam, value: Array[Double]) => JObject(paramToJField(param) ::
JField("value", JArray(value.map(JDouble(_)).toList)) ::
Nil)
case _ => JNothing
}
class VectorAssemblerSerializer extends CustomSerializer[VectorAssembler](format => ( {
case JObject(JField("uid", JString(uid)) :: JField("inputCols", JArray(inputCols))
:: JField("outputCol", JString(outputCol)) :: Nil) =>
new VectorAssembler(uid).
setInputCols(inputCols.map(_.toString).toArray).
setOutputCol(outputCol)
}, {
case x: VectorAssembler =>
JObject(JField("uid", JString(x.uid)) ::
JField("inputCols", JArray(x.getInputCols.map(JString(_)).toList)) ::
JField("outputCol", JString(x.getOutputCol)) :: Nil)
}
))
}
| tupol/sparx-mllib | src/main/scala/com/lunatic/mlx/kddcup99/ml/TestJson1.scala | Scala | apache-2.0 | 5,676 |
package com.tsukaby.bean_validation_scala
import org.hibernate.validator.constraints.Length
import scala.annotation.meta.field
class LengthValidatorForOptionSpec extends BaseSpec {
private[this] case class TestBean(
@(Length@field)(min = 1, max = 1)
value: Option[String]
)
Seq(
(TestBean(Some("")), 1),
(TestBean(Some("a")), 0)
) foreach { case (bean, expected) =>
s"Check violations count. bean = $bean, count = $expected" >> {
test(bean, expected)
}
}
}
| bean-validation-scala/bean-validation-scala | src/test/scala/com/tsukaby/bean_validation_scala/LengthValidatorForOptionSpec.scala | Scala | mit | 603 |
package com.aidan.chapter10
import java.beans.PropertyChangeEvent
import java.beans.PropertyChangeListener
import java.beans.PropertyChangeSupport
import scala.Array.canBuildFrom
object Chapter10 {
}
trait RectangleLike {
def setFrame(x: Double, y: Double, w: Double, h: Double)
def getX: Double
def getY: Double
def getWidth: Double
def getHeight: Double
def translate(x: Int, y: Int) = {
setFrame(getX + x, getY + y, getWidth, getHeight)
}
def grow(h: Int, v: Int) = {
setFrame(getX - h, getY - v, getWidth + (2 * h), getHeight + (2 * v))
}
}
class OrderedPoint(x: Int, y: Int) extends java.awt.Point(x, y) with scala.math.Ordered[OrderedPoint] {
def compare(that: OrderedPoint) = {
if (this.x == that.x) this.y - that.y
else this.x - that.x
}
}
trait MessageLogger {
def log(msg: String) = msg
}
trait CaesarCryptLogger extends MessageLogger {
var rotation: Int
override def log(msg: String) = {
val rotated = for (ch <- msg) yield {
// Uppercase ASCII A-Z
if (ch >= 65 && ch <= 90) {
val rotated = ch + rotation
if (rotated > 90) {
((rotated - 90) + 64).toChar
} else if (rotated < 65) {
(90 - (64 - rotated)).toChar
} else rotated.toChar
} // Lowercase ASCII a-z
else if (ch >= 97 && ch <= 122) {
val rotated = ch + rotation
if (rotated > 122) {
((rotated - 122) + 96).toChar
} else if (rotated < 97) {
(122 - (96 - rotated)).toChar
} else rotated.toChar
} else {
// Not handling non alphas
ch.toChar
}
}
rotated.mkString
}
}
class CryptoLogger extends CaesarCryptLogger {
var rotation = 3
def this(rotation: Int) {
this()
this.rotation = rotation
}
}
/*
* Only implementing one of the PropertyChangeSupport methods!
*
* Passes method calls on to PropertyChangeSupport delegate.
*/
trait PropertyChangeSupportT {
val propChange = new PropertyChangeSupport(this)
def addPropertyChangeListener(listener: PropertyChangeListener) = {
propChange.addPropertyChangeListener(listener)
}
}
/*
* I dont really like my own solution! Means that each method in Point needs to be overriden to
* a) call the super class
* b) fire an event
*/
class ListeningPoint(x: Int, y: Int) extends java.awt.Point with PropertyChangeSupportT {
override def move(x: Int, y: Int) {
super.move(x, y)
// Not populated the PropertyChangeEvent object properly - the work isn't worth it for this demo
propChange.firePropertyChange(new PropertyChangeEvent(this, "X", "Get old value", "Output new value"))
}
}
trait Drinker {
var thirsty = true;
val favouriteDrink: String
val usualAmount = "glass"
def drink = {
//println("Drinking a " + usualAmount + " of " + favouriteDrink)
thirsty = false
}
}
trait BeerDrinker extends Drinker {
val favouriteDrink = "beer"
override val usualAmount = "pint"
}
trait Speaker {
def speak(what: String) = what
}
trait NoLowerCaseVowelSpeaker extends Speaker {
override def speak(what: String) = {
val noVowels = what.filter(p => !Array('a', 'e', 'i', 'o', 'u').contains(p))
super.speak(noVowels.mkString)
}
}
trait LoudSpeaker extends Speaker {
override def speak(what: String) = {
super.speak(what.toUpperCase())
}
}
class Programmer {
}
trait BufferTrait extends java.io.InputStream with Logger {
val bufferLength: Int
override def read: Int = {
val content = new Array[Byte](bufferLength)
val dataLength = super.read(content, 0, bufferLength)
log("Read " + dataLength + " bytes")
dataLength
}
def readContent: String = {
val content = new Array[Byte](bufferLength)
val dataLength = super.read(content, 0, bufferLength)
log("Read " + dataLength + " bytes")
(for (ch <- content) yield ch.toChar).mkString
}
}
trait Logger {
def log(msg: String)
}
trait ConsoleLogger extends Logger {
def log(msg: String) { println(msg) }
}
trait TimestampLogger extends ConsoleLogger {
override def log(msg: String) {
super.log(new java.util.Date() + " " + msg)
}
}
// ************ No IterableInputStream built. Bored with working with java.io ! **********************/ | aidanwhiteley/scala_impatient | src/main/scala/com/aidan/chapter10/Chapter10.scala | Scala | apache-2.0 | 4,244 |
package com.github.mdr.mash.ns.git.branch
import com.github.mdr.mash.functions.{ BoundParams, MashFunction, ParameterModel }
import com.github.mdr.mash.ns.git.remote.RemoteNameClass
import com.github.mdr.mash.ns.git.{ CommitHashClass, GitCommon, GitHelper }
import com.github.mdr.mash.runtime.{ MashList, MashObject, MashString }
import org.eclipse.jgit.api.Git
import org.eclipse.jgit.api.ListBranchCommand.ListMode
import org.eclipse.jgit.lib.{ Ref, Repository }
import scala.collection.JavaConverters._
import scala.collection.immutable.ListMap
object ListRemoteFunction extends MashFunction("git.branch.remoteBranches") {
val params = ParameterModel.Empty
def call(boundParams: BoundParams): MashList = {
GitHelper.withRepository { repo ⇒
val git = new Git(repo)
val branches = git.branchList.setListMode(ListMode.REMOTE).call().asScala.filterNot(_.getName endsWith "/HEAD")
MashList(branches.map(asMashObject(repo)))
}
}
def asMashObject(repo: Repository)(ref: Ref): MashObject = {
val id = ref.getObjectId.getName
val name = GitCommon.trimRemoteBranchPrefix(ref.getName)
val Seq(remote, branchName) = name.split("/", 2).toSeq
import RemoteBranchClass.Fields._
MashObject.of(
ListMap(
Remote -> MashString(remote, RemoteNameClass),
Name -> MashString(branchName, RemoteBranchNameClass),
Commit -> MashString(id, CommitHashClass)),
RemoteBranchClass)
}
override def typeInferenceStrategy = Seq(RemoteBranchClass)
override def summaryOpt = Some("List remote branches in the repository")
} | mdr/mash | src/main/scala/com/github/mdr/mash/ns/git/branch/ListRemoteFunction.scala | Scala | mit | 1,598 |
package hr.element.etb
package object io {
import sys.process._
import sys.process.BasicIO.transferFully
import java.io.{ File, ByteArrayInputStream, ByteArrayOutputStream }
import java.lang.{ Thread, Runnable, InterruptedException }
implicit val WaitPeriod = 60000L
implicit val WorkingDir = new File(".")
object Runner {
def apply(cmd: Seq[String], input: Array[Byte] = Array())(implicit workingDir: File, waitPeriod: Long) = {
val pb = Process(cmd, workingDir)
val oS = new ByteArrayOutputStream
val eS = new ByteArrayOutputStream
val pio = new ProcessIO(
in = { iSP =>
val iS = new ByteArrayInputStream(input)
transferFully(iS, iSP)
iSP.close()
}, out = transferFully(_, oS), err = transferFully(_, eS)
)
val process = pb run pio
val unmonitor = after(waitPeriod)(process.destroy)
val retcode = process.exitValue
unmonitor()
(retcode, oS.toByteArray, eS.toByteArray)
}
def after(time: Long)(cont: => Any): () => Boolean = {
val t = soloThread {
try { Thread sleep time; cont }
catch { case _: InterruptedException => }
}
() => t.isAlive && { t.interrupt(); true }
}
def soloThread(wat: => Any) = {
val t = new Thread(new Runnable {
def run { wat }
})
t.start()
t
}
}
}
| melezov/etb | util/src/main/scala/hr/element/etb/Runner.scala | Scala | unlicense | 1,395 |
package ca.hyperreal.avconv
import java.awt.{Graphics2D}
import java.awt.Color._
import java.awt.image.{BufferedImage}
import javax.imageio.ImageIO
import java.io.{InputStream, OutputStream, File}
import sys.process.{Process, ProcessIO}
class Avconv( rate: String, video: String )
{
var in: OutputStream = _
var out: InputStream = _
var err: InputStream = _
val avconv = Process( s"avconv -y -f image2pipe -r $rate -vcodec png -i - -vcodec libx264 -pix_fmt yuv420p $video.mp4" ).run(
new ProcessIO(os => in = os, is => out = is, is => err = is) )
var img: BufferedImage = null
private def write
{
if (img ne null)
{
ImageIO.write( img, "PNG", in )
}
}
def frame( width: Int, height: Int ) =
{
write
img = new BufferedImage( width, height, BufferedImage.TYPE_INT_ARGB )
img.getGraphics.asInstanceOf[Graphics2D]
}
def done =
{
write
in.flush
in.close
val result = avconv.exitValue
out.close
err.close
result
}
} | edadma/avconv | src/main/scala/Avconv.scala | Scala | gpl-3.0 | 973 |
package io.swagger.client.api
import io.swagger.client.model.Inline_response_200_9
import io.swagger.client.model.Credential
import io.swagger.client.model.Inline_response_200_10
import io.swagger.client.model.Inline_response_200_2
import io.swagger.client.core._
import io.swagger.client.core.CollectionFormats._
import io.swagger.client.core.ApiKeyLocations._
object CredentialApi {
/**
* Get all Credentials
*
* Expected answers:
* code 200 : Inline_response_200_9 (successful operation)
*
* @param connectorId connector_id
* @param attrKey attr_key
* @param attrValue attr_value
* @param createdAt created_at
* @param updatedAt updated_at
* @param limit limit
* @param offset offset
* @param sort sort
*/
def credentialsGet(connectorId: Option[Boolean] = None, attrKey: Option[String] = None, attrValue: Option[String] = None, createdAt: Option[String] = None, updatedAt: Option[String] = None, limit: Option[Int] = None, offset: Option[Int] = None, sort: Option[String] = None): ApiRequest[Inline_response_200_9] =
ApiRequest[Inline_response_200_9](ApiMethods.GET, "https://app.quantimo.do/api/v2", "/credentials", "application/json")
.withQueryParam("connector_id", connectorId)
.withQueryParam("attr_key", attrKey)
.withQueryParam("attr_value", attrValue)
.withQueryParam("created_at", createdAt)
.withQueryParam("updated_at", updatedAt)
.withQueryParam("limit", limit)
.withQueryParam("offset", offset)
.withQueryParam("sort", sort)
.withSuccessResponse[Inline_response_200_9](200)
/**
* Store Credential
*
* Expected answers:
* code 200 : Inline_response_200_10 (successful operation)
*
* @param body Credential that should be stored
*/
def credentialsPost(body: Option[Credential] = None): ApiRequest[Inline_response_200_10] =
ApiRequest[Inline_response_200_10](ApiMethods.POST, "https://app.quantimo.do/api/v2", "/credentials", "application/json")
.withBody(body)
.withSuccessResponse[Inline_response_200_10](200)
/**
* Get Credential
*
* Expected answers:
* code 200 : Inline_response_200_10 (successful operation)
*
* @param id connector id
* @param attrKey attrKey
*/
def credentialsIdGet(id: Int, attrKey: String): ApiRequest[Inline_response_200_10] =
ApiRequest[Inline_response_200_10](ApiMethods.GET, "https://app.quantimo.do/api/v2", "/credentials/{id}", "application/json")
.withQueryParam("attrKey", attrKey)
.withPathParam("id", id)
.withSuccessResponse[Inline_response_200_10](200)
/**
* Update Credential
*
* Expected answers:
* code 200 : Inline_response_200_2 (successful operation)
*
* @param id connector id
* @param attrKey attrKey
* @param body Credential that should be updated
*/
def credentialsIdPut(id: Int, attrKey: String, body: Option[Credential] = None): ApiRequest[Inline_response_200_2] =
ApiRequest[Inline_response_200_2](ApiMethods.PUT, "https://app.quantimo.do/api/v2", "/credentials/{id}", "application/json")
.withBody(body)
.withQueryParam("attrKey", attrKey)
.withPathParam("id", id)
.withSuccessResponse[Inline_response_200_2](200)
/**
* Delete Credential
*
* Expected answers:
* code 200 : Inline_response_200_2 (successful operation)
*
* @param id connector id
* @param attrKey attrKey
*/
def credentialsIdDelete(id: Int, attrKey: String): ApiRequest[Inline_response_200_2] =
ApiRequest[Inline_response_200_2](ApiMethods.DELETE, "https://app.quantimo.do/api/v2", "/credentials/{id}", "application/json")
.withQueryParam("attrKey", attrKey)
.withPathParam("id", id)
.withSuccessResponse[Inline_response_200_2](200)
}
| QuantiModo/QuantiModo-SDK-Akka-Scala | src/main/scala/io/swagger/client/api/CredentialApi.scala | Scala | gpl-2.0 | 3,830 |
package sbtandroid
import java.util.Properties
import proguard.{Configuration=>ProGuardConfiguration, ProGuard, ConfigurationParser, ConfigurationWriter}
import sbt._
import Keys._
import AndroidPlugin._
import AndroidHelpers._
import java.io.{File => JFile}
object AndroidInstall {
/**
* Task that installs a package on the target
*/
private val installTask =
(adbTarget, dbPath, packageApkPath, streams) map { (t, dp, p, s) =>
s.log.info("Installing %s".format(p.name))
t.installPackage(dp, s, p)
()
}
/**
* Task that uninstalls a package from the target
*/
private val uninstallTask =
(adbTarget, dbPath, manifestPackage, streams) map { (t, dp, p, s) =>
s.log.info("Uninstalling %s".format(p))
t.uninstallPackage(dp, s, p)
()
}
private def aaptPackageTask: Project.Initialize[Task[File]] =
(aaptPath, manifestPath, resPath, mainAssetsPath, libraryJarPath, resourcesApkPath, streams) map {
(aaptPath, manifestPath, resPath, mainAssetsPath, libraryJarPath, resourcesApkPath, streams) =>
// Make assets directory
mainAssetsPath.mkdirs
// Resource arguments
val libraryResPathArgs = resPath.flatMap(p => Seq("-S", p.absolutePath))
// AAPT command line
val aapt = Seq(aaptPath.absolutePath, "package",
"--auto-add-overlay", "-f",
"-M", manifestPath.head.absolutePath,
"-A", mainAssetsPath.absolutePath,
"-I", libraryJarPath.absolutePath,
"-F", resourcesApkPath.absolutePath) ++
libraryResPathArgs
// Package resources
streams.log.info("Packaging resources in " + resourcesApkPath.absolutePath)
streams.log.debug("Running: " + aapt.mkString(" "))
if (aapt.run(false).exitValue != 0) sys.error("Error packaging resources")
// Return the path to the resources APK
resourcesApkPath
}
private def dxTask: Project.Initialize[Task[File]] =
(dxPath, dxMemory, target, proguard, dxInputs, dxPredex,
proguardOptimizations, classDirectory, dxOutputPath, scalaInstance, streams) map {
(dxPath, dxMemory, target, proguard, dxInputs, dxPredex,
proguardOptimizations, classDirectory, dxOutputPath, scalaInstance, streams) =>
// Main dex command
def dexing(inputs: Seq[JFile], output: JFile) {
val uptodate = output.exists && inputs.forall(input =>
input.isDirectory match {
case true =>
(input ** "*").get.forall(_.lastModified <= output.lastModified)
case false =>
input.lastModified <= output.lastModified
}
)
if (!uptodate) {
val noLocals = if (proguardOptimizations.isEmpty) "" else "--no-locals"
val dxCmd = (Seq(dxPath.absolutePath,
dxMemoryParameter(dxMemory),
"--dex", noLocals,
"--num-threads="+java.lang.Runtime.getRuntime.availableProcessors,
"--output="+output.getAbsolutePath) ++
inputs.map(_.absolutePath)).filter(_.length > 0)
streams.log.debug(dxCmd.mkString(" "))
streams.log.info("Dexing "+output.getAbsolutePath)
streams.log.debug(dxCmd !!)
} else streams.log.debug("DEX file " + output.getAbsolutePath + "is up to date, skipping")
}
// First, predex the inputs in dxPredex
val dxPredexInputs = dxInputs filter (dxPredex contains _) map { jarPath =>
// Generate the output path
val outputPath = target / (jarPath.getName + ".apk")
// Predex the library
dexing(Seq(jarPath), outputPath)
// Return the output path
outputPath
}
// Non-predexed inputs
val dxClassInputs = dxInputs filterNot (dxPredex contains _)
// Generate the final DEX
dexing(dxClassInputs +++ dxPredexInputs get, dxOutputPath)
// Return the path to the generated final DEX file
dxOutputPath
}
private def proguardTask: Project.Initialize[Task[Option[File]]] =
(proguardConfiguration, proguardOutputPath, streams) map {
(proguardConfiguration, proguardOutputPath, streams) =>
proguardConfiguration map { configFile =>
// Execute Proguard
streams.log.info("Executing Proguard with configuration file " + configFile.getAbsolutePath)
// Parse the configuration
val config = new ProGuardConfiguration
val parser = new ConfigurationParser(configFile, new Properties)
parser.parse(config)
// Execute ProGuard
val proguard = new ProGuard(config)
proguard.execute
// Return the proguard-ed output JAR
proguardOutputPath
}
}
private def proguardConfigurationTask: Project.Initialize[Task[Option[File]]] =
(useProguard, proguardOptimizations, classDirectory,
generatedProguardConfigPath, includedClasspath, providedClasspath,
proguardOutputPath, manifestPackage, proguardOptions, sourceManaged,
proguardInJarsFilter) map {
(useProguard, proguardOptimizations, classDirectory,
genConfig, includedClasspath, providedClasspath,
proguardOutputPath, manifestPackage, proguardOptions, sourceManaged,
proguardInJarsFilter) =>
if (useProguard) {
val generatedOptions =
if(genConfig.exists())
scala.io.Source.fromFile(genConfig).getLines.filterNot(x => x.isEmpty || x.head == '#').toSeq
else Seq()
val optimizationOptions = if (proguardOptimizations.isEmpty) Seq("-dontoptimize") else proguardOptimizations
val sep = JFile.pathSeparator
// Input class files
val inClass = "\\"" + classDirectory.absolutePath + "\\""
// Input library JARs to be included in the APK
val inJars = includedClasspath
.map{ jar => "\\"%s\\"(%s)".format(jar, proguardInJarsFilter(jar).mkString(",")) }
.mkString(sep)
// Input library JARs to be provided at runtime
val inLibrary = providedClasspath
.map("\\"" + _.absolutePath + "\\"")
.mkString(sep)
// Output JAR
val outJar = "\\""+proguardOutputPath.absolutePath+"\\""
// Proguard arguments
val args = (
"-injars" :: inClass ::
"-injars" :: inJars ::
"-outjars" :: outJar ::
"-libraryjars" :: inLibrary ::
Nil) ++
generatedOptions ++
optimizationOptions ++ (
"-dontwarn" :: "-dontobfuscate" ::
"-dontnote scala.Enumeration" ::
"-dontnote org.xml.sax.EntityResolver" ::
"-keep public class * extends android.app.backup.BackupAgent" ::
"-keep public class * extends android.appwidget.AppWidgetProvider" ::
"-keep class scala.collection.SeqLike { public java.lang.String toString(); }" ::
"-keep class scala.reflect.ScalaSignature" ::
"-keep public class * implements junit.framework.Test { public void test*(); }" ::
"""
-keepclassmembers class * implements java.io.Serializable {
private static final java.io.ObjectStreamField[] serialPersistentFields;
private void writeObject(java.io.ObjectOutputStream);
private void readObject(java.io.ObjectInputStream);
java.lang.Object writeReplace();
java.lang.Object readResolve();
}
""" :: Nil) ++ proguardOptions
// Instantiate the Proguard configuration
val config = new ProGuardConfiguration
new ConfigurationParser(args.toArray[String], new Properties).parse(config)
// Write that to a file
val configFile = sourceManaged / "proguard.txt"
val writer = new ConfigurationWriter(configFile)
writer.write(config)
writer.close
// Return the configuration file
Some(configFile)
} else None
}
private val apkTask =
(useDebug, packageConfig, streams) map { (debug, c, s) =>
val builder = new ApkBuilder(c, debug)
builder.build.fold(sys.error(_), s.log.info(_))
s.log.debug(builder.outputStream.toString)
c.packageApkPath
}
lazy val settings: Seq[Setting[_]] = Seq(
// Resource package generation
aaptPackage <<= aaptPackageTask,
// Dexing (DX)
dx <<= dxTask,
// Clean generated APK
cleanApk <<= (packageApkPath) map (IO.delete(_)),
// Proguard
proguard <<= proguardTask,
proguard <<= proguard dependsOn (compile),
// Proguard configuration
proguardConfiguration <<= proguardConfigurationTask,
// Final APK generation
packageConfig <<=
(toolsPath, packageApkPath, resourcesApkPath, dxOutputPath,
nativeDirectories, dxInputs, resourceDirectory) map
(ApkConfig(_, _, _, _, _, _, _)),
apk <<= apkTask dependsOn (cleanApk, aaptPackage, dx, copyNativeLibraries),
// Package installation
install <<= installTask dependsOn apk,
// Package uninstallation
uninstall <<= uninstallTask
)
}
| jberkel/android-plugin | src/main/scala/AndroidInstall.scala | Scala | bsd-3-clause | 9,315 |
import org.apache.spark.SparkContext
import org.apache.spark.sql.SQLContext
/** Lazily instantiated singleton instance of SQLContext */
object SQLContextSingleton {
@transient private var instance: SQLContext = null
// Instantiate SQLContext on demand
def getInstance(sparkContext: SparkContext): SQLContext = synchronized {
if (instance == null) {
instance = new SQLContext(sparkContext)
}
instance
}
} | faganpe/KafkaStreamingPOC | src/main/scala/SQLContextSingleton.scala | Scala | apache-2.0 | 431 |
class Foo[+CC[X]] { type Coll = CC[_] }
| felixmulder/scala | test/files/neg/t8265.scala | Scala | bsd-3-clause | 40 |
package org.jetbrains.plugins.scala
package lang
package surroundWith
package surrounders
package expression
import com.intellij.lang.ASTNode
import com.intellij.openapi.util.TextRange
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.expr._
/**
* @author Alexander Podkhalyuzin
* Date: 04.05.2008
*/
class ScalaWithTryCatchFinallySurrounder extends ScalaExpressionSurrounder {
override def getTemplateAsString(elements: Array[PsiElement]): String = {
val arrow = if (elements.length == 0) "=>" else ScalaPsiUtil.functionArrow(elements(0).getProject)
"try {\\n" + super.getTemplateAsString(elements) + s"\\n} catch {\\n case _ $arrow \\n} finally {}"
}
//noinspection ScalaExtractStringToBundle
override def getTemplateDescription = "try / catch / finally"
override def getSurroundSelectionRange(withTryCatchNode: ASTNode): TextRange = {
val element: PsiElement = withTryCatchNode.getPsi match {
case x: ScParenthesisedExpr => x.innerElement match {
case Some(y) => y
case _ => return x.getTextRange
}
case x => x
}
val tryCatchStmt = element.asInstanceOf[ScTry]
val catchBlockPsiElement = tryCatchStmt.catchBlock.get
val caseClause =
catchBlockPsiElement.expression.get.asInstanceOf[ScBlockExpr].caseClauses.get.caseClauses.head.pattern.get
val offset = caseClause.getTextRange.getStartOffset
tryCatchStmt.getNode.removeChild(caseClause.getNode)
new TextRange(offset, offset)
}
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/surroundWith/surrounders/expression/ScalaWithTryCatchFinallySurrounder.scala | Scala | apache-2.0 | 1,581 |
package com.sksamuel.scapegoat.inspections.imports
import com.sksamuel.scapegoat.PluginRunner
import org.scalatest.{ FreeSpec, Matchers, OneInstancePerTest }
/** @author Stephen Samuel */
class WildcardImportTest extends FreeSpec with Matchers with PluginRunner with OneInstancePerTest {
override val inspections = Seq(new WildcardImport)
"WildcardImport" - {
"should report warning" - {
"for wildcard imports" in {
val code =
"""import scala.concurrent._
object Test { }""".stripMargin
compileCodeSnippet(code)
compiler.scapegoat.feedback.warnings.size shouldBe 1
}
}
}
}
| pwwpche/scalac-scapegoat-plugin | src/test/scala/com/sksamuel/scapegoat/inspections/imports/WildcardImportTest.scala | Scala | apache-2.0 | 652 |
package akka.cluster.client
import akka.actor.{ActorSystem, Props}
import akka.cluster.{AkkaCuratorClient, ZookeeperClusterSeedSettings}
import com.typesafe.config.{Config, ConfigValueFactory}
import org.apache.curator.framework.CuratorFramework
import org.apache.curator.framework.recipes.locks.{LockInternals, LockInternalsSorter, StandardLockInternalsDriver}
import scala.collection.JavaConverters._
import scala.collection.immutable
import scala.util.Try
object ZookeeperClusterClientSettings {
private val sorter = new LockInternalsSorter() {
override def fixForSorting(str: String, lockName: String): String =
StandardLockInternalsDriver.standardFixForSorting(str, lockName)
}
def apply(system: ActorSystem, overwrittenActorSettings: Option[Config] = None): ClusterClientSettings = {
val config = overwrittenActorSettings.getOrElse(system.settings.config).getConfig("akka.cluster.client")
val systemName = config.getString("zookeeper.name")
val receptionistPath = Try(config.getString("zookeeper.receptionistName")).getOrElse("/system/receptionist")
val settings = new ZookeeperClusterSeedSettings(system, "akka.cluster.client.zookeeper", overwrittenActorSettings)
val client = AkkaCuratorClient(settings)
val contacts = getClusterParticipants(client, settings.ZKPath + "/" + systemName).map(_ + receptionistPath)
system.log.info("component=zookeeper-cluster-client at=find-initial-contacts contacts={}", contacts)
client.close()
ClusterClientSettings(
config.withValue(
"initial-contacts",
ConfigValueFactory.fromIterable(immutable.List(contacts: _*).asJava)
)
)
}
private def getClusterParticipants(client: CuratorFramework, zkPath: String): Seq[String] = {
val participants = LockInternals.getParticipantNodes(client,
zkPath,
"latch-" /* magic string from LeaderLatch.LOCK_NAME */,
sorter).asScala
participants.map(path => new String(client.getData.forPath(path))).toSeq
}
}
object ZookeeperClusterClientProps {
def apply(system: ActorSystem): Props = ClusterClient.props(ZookeeperClusterClientSettings(system))
}
| sclasen/akka-zk-cluster-seed | src/main/scala/akka/cluster/client/ZookeeperClusterClientSettings.scala | Scala | apache-2.0 | 2,159 |
package io.youi.layout
case class HorizontalLayout(spacing: Double = 0.0, initialSpacing: Double = 0.0) extends Layout {
override def connect(container: Component): Unit = {
update(container, Vector.empty)
}
override def disconnect(container: Component): Unit = Component.childrenFor(container).foreach { c =>
Snap(c).horizontalReset()
}
override def childrenChanged(container: Component, removed: Vector[Component], added: Vector[Component]): Unit = {
super.childrenChanged(container, removed, added)
update(container, removed)
}
private def update(container: Component, removed: Vector[Component]): Unit = {
val items = Component.childrenFor(container)
removed.foreach { c =>
Snap(c).horizontalReset()
}
items.filter(c => c.visible() && c.includeInLayout()).foldLeft(Option.empty[Component])((previous, current) => {
Snap(current).horizontalReset().leftTo(previous.map(_.position.right + spacing).getOrElse(initialSpacing))
Some(current)
})
}
} | outr/youi | ui/js/src/main/scala/io/youi/layout/HorizontalLayout.scala | Scala | mit | 1,021 |
package org.jetbrains.sbt.shell
import java.io.File
import java.util
import java.util.UUID
import com.intellij.build.events.impl._
import com.intellij.build.{BuildViewManager, DefaultBuildDescriptor}
import com.intellij.compiler.impl.CompilerUtil
import com.intellij.execution.Executor
import com.intellij.execution.executors.DefaultRunExecutor
import com.intellij.execution.runners.ExecutionEnvironment
import com.intellij.openapi.compiler.ex.CompilerPathsEx
import com.intellij.openapi.components.ServiceManager
import com.intellij.openapi.externalSystem.model.ProjectKeys
import com.intellij.openapi.externalSystem.model.execution.ExternalSystemTaskExecutionSettings
import com.intellij.openapi.externalSystem.model.project.ExternalSystemSourceType
import com.intellij.openapi.externalSystem.service.project.ProjectDataManager
import com.intellij.openapi.externalSystem.util.{ExternalSystemUtil, ExternalSystemApiUtil => ES}
import com.intellij.openapi.fileEditor.FileDocumentManager
import com.intellij.openapi.module.{Module, ModuleType}
import com.intellij.openapi.progress.{PerformInBackgroundOption, ProgressIndicator, ProgressManager, Task}
import com.intellij.openapi.project.Project
import com.intellij.openapi.vfs.LocalFileSystem
import com.intellij.task._
import org.jetbrains.annotations.Nullable
import org.jetbrains.sbt.SbtUtil
import org.jetbrains.sbt.project.SbtProjectSystem
import org.jetbrains.sbt.project.module.SbtModuleType
import org.jetbrains.sbt.settings.SbtSystemSettings
import org.jetbrains.sbt.shell.SbtShellCommunication._
import scala.collection.JavaConverters._
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration.Duration
import scala.util.{Failure, Success}
/**
* Created by jast on 2016-11-25.
*/
class SbtProjectTaskRunner extends ProjectTaskRunner {
// will override the usual jps build thingies
override def canRun(projectTask: ProjectTask): Boolean = projectTask match {
case task: ModuleBuildTask =>
val module = task.getModule
ModuleType.get(module) match {
// TODO Android AARs are currently imported as modules. need a way to filter them away before building
case _: SbtModuleType =>
// SbtModuleType actually denotes `-build` modules, which are not part of the regular build
false
case _ =>
val project = task.getModule.getProject
val projectSettings = SbtSystemSettings.getInstance(project).getLinkedProjectSettings(module)
projectSettings.exists(_.useSbtShell) &&
ES.isExternalSystemAwareModule(SbtProjectSystem.Id, module)
}
case _: ArtifactBuildTask =>
// TODO should sbt handle this?
false
case _: ExecuteRunConfigurationTask =>
// TODO this includes tests (and what else?). sbt should handle it and test output should be parsed
false
case _ => false
}
override def run(project: Project,
context: ProjectTaskContext,
callback: ProjectTaskNotification,
tasks: util.Collection[_ <: ProjectTask]): Unit = {
val validTasks = tasks.asScala.collect {
case task: ModuleBuildTask => task
}
// the "build" button in IDEA always runs the build for all individual modules,
// and may work differently than just calling the products task from the main module in sbt
val moduleCommands = validTasks.flatMap(buildCommands)
val modules = validTasks.map(_.getModule)
// don't run anything if there's no module to run a build for
// TODO user feedback
if (moduleCommands.nonEmpty) {
val command =
if (moduleCommands.size == 1) moduleCommands.head
else moduleCommands.mkString("all ", " ", "")
FileDocumentManager.getInstance().saveAllDocuments()
// run this as a task (which blocks a thread) because it seems non-trivial to just update indicators asynchronously?
val task = new CommandTask(project, modules.toArray, command, Option(callback))
ProgressManager.getInstance().run(task)
}
}
private def buildCommands(task: ModuleBuildTask): Seq[String] = {
// TODO sensible way to find out what scopes to run it for besides compile and test?
// TODO make tasks should be user-configurable
SbtUtil.getSbtModuleData(task.getModule).toSeq.flatMap { sbtModuleData =>
val scope = SbtUtil.makeSbtProjectId(sbtModuleData)
// `products` task is a little more general than just `compile`
Seq(s"$scope/products", s"$scope/test:products")
}
}
@Nullable
override def createExecutionEnvironment(project: Project,
task: ExecuteRunConfigurationTask,
executor: Executor): ExecutionEnvironment = {
val taskSettings = new ExternalSystemTaskExecutionSettings
val executorId = Option(executor).map(_.getId).getOrElse(DefaultRunExecutor.EXECUTOR_ID)
ExternalSystemUtil.createExecutionEnvironment(
project,
SbtProjectSystem.Id,
taskSettings, executorId
)
}
}
private class CommandTask(project: Project, modules: Array[Module], command: String, callbackOpt: Option[ProjectTaskNotification]) extends
Task.Backgroundable(project, "sbt build", false, PerformInBackgroundOption.ALWAYS_BACKGROUND) {
import CommandTask._
override def run(indicator: ProgressIndicator): Unit = {
indicator.setIndeterminate(true)
indicator.setFraction(0) // TODO how does the fraction thing work?
indicator.setText("queued sbt build ...")
val shell = SbtShellCommunication.forProject(project)
val viewManager = ServiceManager.getService(project, classOf[BuildViewManager])
val taskId = UUID.randomUUID()
val buildDescriptor = new DefaultBuildDescriptor(taskId, "sbt build", project.getBasePath, System.currentTimeMillis())
val startEvent = new StartBuildEventImpl(buildDescriptor, "queued sbt build ...")
viewManager.onEvent(startEvent)
val resultAggregator: (TaskResultData,ShellEvent) => TaskResultData = { (data,event) =>
event match {
case TaskStart =>
// TODO looks like this isn't called?
indicator.setIndeterminate(true)
indicator.setFraction(0.1)
indicator.setText("building ...")
case TaskComplete =>
indicator.setText("")
case ErrorWaitForInput =>
// TODO should be a build error, but can only actually happen during reload anyway
case Output(text) =>
indicator.setText2(text)
}
taskResultAggregator(data,event)
}
val defaultTaskResult = TaskResultData(aborted = false, 0, 0)
val failedResult = new ProjectTaskResult(true, 1, 0)
// TODO consider running module build tasks separately
// may require collecting results individually and aggregating
val commandFuture = shell.command(command, defaultTaskResult, resultAggregator, showShell = true)
.map(data => new ProjectTaskResult(data.aborted, data.errors, data.warnings))
.recover {
case _ =>
// TODO some kind of feedback / rethrow
failedResult
}
.andThen {
case _ => refreshRoots(modules, indicator)
}
.andThen {
case Success(taskResult) =>
// TODO progress monitoring
callbackOpt.foreach(_.finished(taskResult))
indicator.setFraction(1)
indicator.setText("sbt build completed")
indicator.setText2("")
val successResult = new SuccessResultImpl
val successEvent =
new FinishEventImpl(taskId, null, System.currentTimeMillis(), "sbt build completed", successResult)
viewManager.onEvent(successEvent)
case Failure(err) =>
callbackOpt.foreach(_.finished(failedResult))
indicator.setText("sbt build failed")
indicator.setText2(err.getMessage)
val failureResult = new FailureResultImpl(err)
val failureEvent =
new FinishEventImpl(taskId, null, System.currentTimeMillis(), "sbt build failed", failureResult)
viewManager.onEvent(failureEvent)
}
// block thread to make indicator available :(
Await.ready(commandFuture, Duration.Inf)
}
// remove this if/when external system handles this refresh on its own
private def refreshRoots(modules: Array[Module], indicator: ProgressIndicator): Unit = {
indicator.setText("Synchronizing output directories...")
// simply refresh all the source roots to catch any generated files -- this MAY have a performance impact
// in which case it might be necessary to receive the generated sources directly from sbt and refresh them (see BuildManager)
val info = ProjectDataManager.getInstance().getExternalProjectData(project,SbtProjectSystem.Id, project.getBasePath)
val allSourceRoots = ES.findAllRecursively(info.getExternalProjectStructure, ProjectKeys.CONTENT_ROOT)
val generatedSourceRoots = allSourceRoots.asScala.flatMap { node =>
val data = node.getData
// sbt-side generated sources are still imported as regular sources
val generated = data.getPaths(ExternalSystemSourceType.SOURCE_GENERATED).asScala
val regular = data.getPaths(ExternalSystemSourceType.SOURCE).asScala
generated ++ regular
}.map(_.getPath).toSeq.distinct
val outputRoots = CompilerPathsEx.getOutputPaths(modules)
val toRefresh = generatedSourceRoots ++ outputRoots
CompilerUtil.refreshOutputRoots(toRefresh.asJavaCollection)
val toRefreshFiles = toRefresh.map(new File(_)).asJava
LocalFileSystem.getInstance().refreshIoFiles(toRefreshFiles, true, true, null)
indicator.setText("")
}
}
object CommandTask {
private case class TaskResultData(aborted: Boolean, errors: Int, warnings: Int)
private val taskResultAggregator: EventAggregator[TaskResultData] = (result, event) =>
event match {
case TaskStart => result
case TaskComplete => result
case Output(text) =>
if (text startsWith "[error]")
result.copy(errors = result.errors+1)
else if (text startsWith "[warning]")
result.copy(warnings = result.warnings+1)
else result
}
} | triplequote/intellij-scala | scala/scala-impl/src/org/jetbrains/sbt/shell/SbtProjectTaskRunner.scala | Scala | apache-2.0 | 10,270 |
package inloopio.util.nls
import java.util.Locale
import java.util.MissingResourceException
import java.util.ResourceBundle
import inloopio.util.ClassVar
/**
* Base class for all translation bundles that provides injection of translated
* texts into public String fields.
*/
abstract class TranslationBundle {
private var _effectiveLocale: Locale = _
private var _resourceBundle: ResourceBundle = _
/**
* @return the locale locale used for loading the resource bundle from which
* the field values were taken
*/
def effectiveLocale = _effectiveLocale
/**
* @return the resource bundle on which this translation bundle is based
*/
def resourceBundle = _resourceBundle
/**
* Injects locale specific text in all instance fields of this instance.
* Only public instance fields of type <code>String</code> are considered.
* <p>
* The name of this (sub)class plus the given <code>locale</code> parameter
* define the resource bundle to be loaded. In other words the
* <code>this.getClass().getName()</code> is used as the
* <code>baseName</code> parameter in the
* {@link ResourceBundle#getBundle(String, Locale)} parameter to load the
* resource bundle.
* <p>
*
* @param locale
* defines the locale to be used when loading the resource bundle
* @exception TranslationBundleLoadingException see {@link TranslationBundleLoadingException}
* @exception TranslationStringMissingException see {@link TranslationStringMissingException}
*/
@throws(classOf[TranslationBundleLoadingException])
def load(locale: Locale) {
val bundleClass = getClass
try {
_resourceBundle = ResourceBundle.getBundle(bundleClass.getName, locale)
} catch {
case e: MissingResourceException => throw new TranslationBundleLoadingException(bundleClass, locale, e)
}
_effectiveLocale = resourceBundle.getLocale
for (field @ ClassVar(name, getter, setter) <- ClassVar.getPublicVars(bundleClass) if field.getter.getReturnType == classOf[String]) {
try {
val translatedText = resourceBundle.getString(name)
field.asInstanceOf[ClassVar[TranslationBundle, String]].set(this, translatedText)
} catch {
case e: MissingResourceException =>
throw new TranslationStringMissingException(bundleClass, locale, name, e)
case e: IllegalArgumentException =>
throw new Error(e)
case e: IllegalAccessException =>
throw new Error(e)
}
}
}
} | dcaoyuan/inloopio-libs | inloopio-util/src/main/scala/inloopio/util/nls/TranslationBundle.scala | Scala | bsd-3-clause | 2,521 |
package io.atal.butterfly.action
import io.atal.butterfly.{Editor, Clipboard, Cursor}
import org.scalatest._
import Matchers._
/** RemoveCursor action unit test
*/
class RemoveCursorTest extends FlatSpec {
"The RemoveCursor action" should "remove a cursor to the editor" in {
val action = new RemoveCursor((0, 0))
val editor = new Editor()
val clipboard = new Clipboard()
action.execute(editor, clipboard)
editor.cursors should have length 0
}
}
| Matthieu-Riou/Butterfly | src/test/scala/io/atal/butterfly/action/RemoveCursorTest.scala | Scala | mit | 477 |
package com.sksamuel.elastic4s.searches.suggestions
import com.sksamuel.elastic4s.searches.suggestion.TermSuggestionDefinition
import org.elasticsearch.search.suggest.SuggestBuilders
import org.elasticsearch.search.suggest.term.TermSuggestionBuilder
object TermSuggestionBuilderFn {
def apply(sugg: TermSuggestionDefinition): TermSuggestionBuilder = {
val builder = SuggestBuilders.termSuggestion(sugg.fieldname)
sugg.analyzer.foreach(builder.analyzer)
sugg.shardSize.foreach(builder.shardSize(_))
sugg.size.foreach(builder.size)
sugg.text.foreach(builder.text)
sugg.accuracy.map(_.toFloat).foreach(builder.accuracy)
sugg.maxEdits.foreach(builder.maxEdits)
sugg.maxInspections.foreach(builder.maxInspections)
sugg.maxTermFreq.map(_.toFloat).foreach(builder.maxTermFreq)
sugg.minDocFreq.map(_.toFloat).foreach(builder.minDocFreq)
sugg.minWordLength.foreach(builder.minWordLength)
sugg.prefixLength.foreach(builder.prefixLength)
sugg.sort.foreach(builder.sort)
sugg.stringDistance.foreach(builder.stringDistance)
sugg.suggestMode.foreach(builder.suggestMode)
builder
}
}
| aroundus-inc/elastic4s | elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/searches/suggestions/TermSuggestionBuilderFn.scala | Scala | apache-2.0 | 1,144 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import java.text.SimpleDateFormat
import java.util.{Calendar, Locale}
import org.scalatest.exceptions.TestFailedException
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.plans.PlanTestBase
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.types._
import org.apache.spark.unsafe.types.{CalendarInterval, UTF8String}
class CsvExpressionsSuite extends SparkFunSuite with ExpressionEvalHelper with PlanTestBase {
val badCsv = "\\u0000\\u0000\\u0000A\\u0001AAA"
val gmtId = Option(DateTimeUtils.TimeZoneGMT.getID)
test("from_csv") {
val csvData = "1"
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal(csvData), gmtId),
InternalRow(1)
)
}
test("from_csv - invalid data") {
val csvData = "---"
val schema = StructType(StructField("a", DoubleType) :: Nil)
checkEvaluation(
CsvToStructs(schema, Map("mode" -> PermissiveMode.name), Literal(csvData), gmtId),
InternalRow(null))
// Default mode is Permissive
checkEvaluation(CsvToStructs(schema, Map.empty, Literal(csvData), gmtId), InternalRow(null))
}
test("from_csv null input column") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal.create(null, StringType), gmtId),
null
)
}
test("from_csv bad UTF-8") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal(badCsv), gmtId),
InternalRow(null))
}
test("from_csv with timestamp") {
val schema = StructType(StructField("t", TimestampType) :: Nil)
val csvData1 = "2016-01-01T00:00:00.123Z"
var c = Calendar.getInstance(DateTimeUtils.TimeZoneGMT)
c.set(2016, 0, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 123)
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal(csvData1), gmtId),
InternalRow(c.getTimeInMillis * 1000L)
)
// The result doesn't change because the CSV string includes timezone string ("Z" here),
// which means the string represents the timestamp string in the timezone regardless of
// the timeZoneId parameter.
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal(csvData1), Option("PST")),
InternalRow(c.getTimeInMillis * 1000L)
)
val csvData2 = "2016-01-01T00:00:00"
for (tz <- DateTimeTestUtils.outstandingTimezones) {
c = Calendar.getInstance(tz)
c.set(2016, 0, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
checkEvaluation(
CsvToStructs(
schema,
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss"),
Literal(csvData2),
Option(tz.getID)),
InternalRow(c.getTimeInMillis * 1000L)
)
checkEvaluation(
CsvToStructs(
schema,
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss",
DateTimeUtils.TIMEZONE_OPTION -> tz.getID),
Literal(csvData2),
gmtId),
InternalRow(c.getTimeInMillis * 1000L)
)
}
}
test("from_csv empty input column") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal.create(" ", StringType), gmtId),
InternalRow(null)
)
}
test("forcing schema nullability") {
val input = """1,,"foo""""
val csvSchema = new StructType()
.add("a", LongType, nullable = false)
.add("b", StringType, nullable = false)
.add("c", StringType, nullable = false)
val output = InternalRow(1L, null, UTF8String.fromString("foo"))
val expr = CsvToStructs(csvSchema, Map.empty, Literal.create(input, StringType), gmtId)
checkEvaluation(expr, output)
val schema = expr.dataType
val schemaToCompare = csvSchema.asNullable
assert(schemaToCompare == schema)
}
test("from_csv missing columns") {
val schema = new StructType()
.add("a", IntegerType)
.add("b", IntegerType)
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal.create("1"), gmtId),
InternalRow(1, null)
)
}
test("unsupported mode") {
val csvData = "---"
val schema = StructType(StructField("a", DoubleType) :: Nil)
val exception = intercept[TestFailedException] {
checkEvaluation(
CsvToStructs(schema, Map("mode" -> DropMalformedMode.name), Literal(csvData), gmtId),
InternalRow(null))
}.getCause
assert(exception.getMessage.contains("from_csv() doesn't support the DROPMALFORMED mode"))
}
test("infer schema of CSV strings") {
checkEvaluation(new SchemaOfCsv(Literal.create("1,abc")), "struct<_c0:int,_c1:string>")
}
test("infer schema of CSV strings by using options") {
checkEvaluation(
new SchemaOfCsv(Literal.create("1|abc"), Map("delimiter" -> "|")),
"struct<_c0:int,_c1:string>")
}
test("to_csv - struct") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
val struct = Literal.create(create_row(1), schema)
checkEvaluation(StructsToCsv(Map.empty, struct, gmtId), "1")
}
test("to_csv null input column") {
val schema = StructType(StructField("a", IntegerType) :: Nil)
val struct = Literal.create(null, schema)
checkEvaluation(
StructsToCsv(Map.empty, struct, gmtId),
null
)
}
test("to_csv with timestamp") {
val schema = StructType(StructField("t", TimestampType) :: Nil)
val c = Calendar.getInstance(DateTimeUtils.TimeZoneGMT)
c.set(2016, 0, 1, 0, 0, 0)
c.set(Calendar.MILLISECOND, 0)
val struct = Literal.create(create_row(c.getTimeInMillis * 1000L), schema)
checkEvaluation(StructsToCsv(Map.empty, struct, gmtId), "2016-01-01T00:00:00.000Z")
checkEvaluation(
StructsToCsv(Map.empty, struct, Option("PST")), "2015-12-31T16:00:00.000-08:00")
checkEvaluation(
StructsToCsv(
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss",
DateTimeUtils.TIMEZONE_OPTION -> gmtId.get),
struct,
gmtId),
"2016-01-01T00:00:00"
)
checkEvaluation(
StructsToCsv(
Map("timestampFormat" -> "yyyy-MM-dd'T'HH:mm:ss",
DateTimeUtils.TIMEZONE_OPTION -> "PST"),
struct,
gmtId),
"2015-12-31T16:00:00"
)
}
test("parse date with locale") {
Seq("en-US", "ru-RU").foreach { langTag =>
val locale = Locale.forLanguageTag(langTag)
val date = new SimpleDateFormat("yyyy-MM-dd").parse("2018-11-05")
val schema = new StructType().add("d", DateType)
val dateFormat = "MMM yyyy"
val sdf = new SimpleDateFormat(dateFormat, locale)
val dateStr = sdf.format(date)
val options = Map("dateFormat" -> dateFormat, "locale" -> langTag)
checkEvaluation(
CsvToStructs(schema, options, Literal.create(dateStr), gmtId),
InternalRow(17836)) // number of days from 1970-01-01
}
}
test("verify corrupt column") {
checkExceptionInExpression[AnalysisException](
CsvToStructs(
schema = StructType.fromDDL("i int, _unparsed boolean"),
options = Map("columnNameOfCorruptRecord" -> "_unparsed"),
child = Literal.create("a"),
timeZoneId = gmtId),
expectedErrMsg = "The field for corrupt records must be string type and nullable")
}
test("from/to csv with intervals") {
val schema = new StructType().add("a", "interval")
checkEvaluation(
StructsToCsv(Map.empty, Literal.create(create_row(new CalendarInterval(1, 2, 3)), schema)),
"1 months 2 days 0.000003 seconds")
checkEvaluation(
CsvToStructs(schema, Map.empty, Literal.create("1 day")),
InternalRow(new CalendarInterval(0, 1, 0)))
}
}
| goldmedal/spark | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/CsvExpressionsSuite.scala | Scala | apache-2.0 | 8,725 |
package com.sksamuel.scapegoat.inspections.collections
import com.sksamuel.scapegoat.{ Inspection, InspectionContext, Inspector, Levels }
/** @author Stephen Samuel */
class AvoidSizeNotEqualsZero extends Inspection {
def inspector(context: InspectionContext): Inspector = new Inspector(context) {
override def postTyperTraverser = Some apply new context.Traverser {
import context.global._
private val Size = TermName("size")
private val Length = TermName("length")
override def inspect(tree: Tree): Unit = {
tree match {
case Apply(Select(Select(_, Length | Size), TermName("$bang$eq")), List(Literal(Constant(0)))) =>
context.warn("Avoid Traversable.size == 0", tree.pos, Levels.Warning,
"Traversable.size is slow for some implementations. Prefer .nonEmpty which is O(1): " + tree
.toString().take(100), AvoidSizeNotEqualsZero.this)
case _ => continue(tree)
}
}
}
}
}
| jasonchaffee/scalac-scapegoat-plugin | src/main/scala/com/sksamuel/scapegoat/inspections/collections/AvoidSizeNotEqualsZero.scala | Scala | apache-2.0 | 997 |
package scala.collection.scalameter.immutable.TreeBag
import org.scalameter.api._
object TreeBag_exists extends TreeBagBenchmark {
def sizes = Gen.range("size")(20000, 200000, 20000)
def funName: String = "exists{result:=false}"
def fun(bag: TreeBag_product.Bag[BigInt]): Unit = bag.exists(_ => false)
def listFun(list: List[BigInt]): Unit = list.exists(_ => false)
runBenchmark()
}
| sageserpent-open/multisets | src/test/scala/scala/collection/scalameter/immutable/TreeBag/TreeBag_exists.scala | Scala | bsd-3-clause | 400 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.kernel.protocol.v5
object HeaderBuilder {
/**
* Creates a new Header instance with the provided id and type.
* @param msgType The type of the message
* @param msgId (Optional) The unique identifier of the message, generates a
* random UUID if none is provided
* @return The new Header instance
*/
def create(
msgType: String,
msgId: UUID = java.util.UUID.randomUUID.toString
) = Header(
msgId,
SparkKernelInfo.username,
SparkKernelInfo.session,
msgType,
SparkKernelInfo.protocolVersion
)
/**
* Represents an "empty" header where the message type and id are blank.
*/
val empty = create("", "")
}
| kapil-malik/incubator-toree | protocol/src/main/scala/org/apache/toree/kernel/protocol/v5/HeaderBuilder.scala | Scala | apache-2.0 | 1,526 |
package com.github.sonenko.elastichelper
package helper
import java.io.File
import org.joda.time.DateTime
import org.specs2.mutable.Specification
import org.specs2.specification.Scope
import org.specs2.mock.Mockito
import scala.util.{Success, Failure, Try}
import com.sksamuel.elastic4s.mappings.FieldType._
import com.sksamuel.elastic4s.ElasticClient
import com.sksamuel.elastic4s.ElasticDsl._
import com.github.sonenko.elastichelper.helper.json.HelperJsonProtocol
class ElasticHelperTest extends Specification {
implicit val duration = 10.seconds
sequential
case class A(value: String) extends Insert
object TestJsonJsonProtocol extends HelperJsonProtocol {
implicit val aF = jsonFormat1(A.apply)
}
import TestJsonJsonProtocol._
val client: ElasticClient = ElasticTestClient()
val instance = new ElasticHelper(client)
trait TestScope extends Scope {
val indexName = "index-name"
val dirName = "dir"
def createIndexIfNotExistsSync(): Unit =
if (!instance.isIndexExists(indexName).await) {
instance.createIndex(indexName, Map(
dirName -> List(
"value" typed StringType
)
)).await
instance.waitForRefresh()
}
}
"ElasticHelperTest.createIndex" should {
"create new index" in new TestScope {
if (!instance.isIndexExists(indexName).await)
instance.createIndex(indexName, Map(
dirName -> List(
"value" typed StringType
)
)
).await
instance.waitForRefresh()
instance.isIndexExists(indexName).await mustEqual true
instance.deleteIndex(indexName).await
}
"hate if index already exists" in new TestScope {
instance.createIndex(indexName).await
Try(instance.createIndex(indexName).await).isFailure mustEqual true
instance.deleteIndex(indexName).await
}
}
"ElasticHelperTest.isIndexExists" should {
"return true if index exists" in new TestScope {
createIndexIfNotExistsSync()
instance.isIndexExists(indexName).await mustEqual true
instance.deleteIndex(indexName).await
instance.waitForRefresh()
}
"return false if index not exists" in new TestScope {
instance.isIndexExists(indexName).await mustEqual false
}
}
"ElasticHelper.deleteIndex" should {
"delete index" in new TestScope {
createIndexIfNotExistsSync()
instance.deleteIndex(indexName).await
instance.waitForRefresh()
instance.isIndexExists(indexName).await shouldEqual false
}
"throw exception if index not exists" in new TestScope {
Try(instance.deleteIndex(indexName).await).toOption shouldEqual None
}
}
"ElasticHelper.insert" should {
"insert new record" in new TestScope {
createIndexIfNotExistsSync()
val a = A("hello there")
instance.insert(indexName, dirName, a).await
instance.waitForRefresh(indexName)
instance.list[A](indexName, dirName).await mustEqual List(a)
instance.deleteIndex(indexName).await
instance.waitForRefresh()
}
}
"ElasticHelper.list" should {
"get all records" in new TestScope {
createIndexIfNotExistsSync()
val recs = (1 to 20).toList.map(x => A(s"hello-$x"))
recs.foreach{ x =>
instance.insert(indexName, dirName, x).await
}
instance.waitForRefresh(indexName)
instance.list[A](indexName, dirName).await.toSet mustEqual recs.toSet
instance.deleteIndex(indexName).await
instance.waitForRefresh()
}
}
"ElasticHelper.dump" should {
"save dump in location specified by config" in new TestScope {
createIndexIfNotExistsSync()
val dumpName = "dump-name" + DateTime.now.toString("YYYY-MM-dd_mm-ss") + "1"
instance.dump(dumpName)
val file = new File(ElasticHelper.dumpsLocation)
file.exists() mustEqual true
file.isDirectory mustEqual true
file.delete()
instance.deleteIndex(indexName).await
instance.waitForRefresh()
}
}
"ElasticHelper.restoreDump" should {
"restore previously dumped data" in new TestScope {
createIndexIfNotExistsSync()
val dumpName = "dump-name" + DateTime.now.toString("YYYY-MM-dd_mm-ss") + "2"
val recs = (1 to 20).toList.map(x => A(s"hello-$x"))
recs.foreach{ x =>
instance.insert(indexName, dirName, x).await
}
instance.waitForRefresh(indexName)
instance.dump(dumpName)
(21 to 40).toList.map(x => instance.insert(indexName, dirName, A(s"hello-$x")).await)
instance.waitForRefresh(indexName)
RichFuture(client.close(indexName)).await
instance.restoreDump(dumpName, indexName)
instance.waitForRefresh(indexName)
Thread.sleep(3000)
instance.list[A](indexName, dirName).await.toSet mustEqual recs.toSet
instance.deleteIndex(indexName).await
instance.waitForRefresh()
}
}
}
| sonenko/elastic-helper | helper/src/test/scala/com/github/sonenko/elastichelper/helper/ElasticHelperTest.scala | Scala | apache-2.0 | 4,922 |
package controllers
import java.io.{BufferedOutputStream, File, FileOutputStream}
import java.util.zip.Deflater
import akka.actor.ActorSystem
import akka.stream.Materializer
import com.mohiva.play.silhouette.api.Silhouette
import com.scalableminds.util.accesscontext.{DBAccessContext, GlobalAccessContext}
import com.scalableminds.util.io.{NamedEnumeratorStream, ZipIO}
import com.scalableminds.util.tools.{Fox, FoxImplicits, TextUtils}
import com.scalableminds.webknossos.datastore.SkeletonTracing.{SkeletonTracing, SkeletonTracingOpt, SkeletonTracings}
import com.scalableminds.webknossos.datastore.VolumeTracing.{VolumeTracing, VolumeTracingOpt, VolumeTracings}
import com.scalableminds.webknossos.datastore.helpers.ProtoGeometryImplicits
import com.scalableminds.webknossos.datastore.models.datasource.{
AbstractSegmentationLayer,
DataLayerLike,
GenericDataSource,
SegmentationLayer
}
import com.scalableminds.webknossos.tracingstore.tracings.TracingType
import com.scalableminds.webknossos.tracingstore.tracings.volume.VolumeTracingDefaults
import com.typesafe.scalalogging.LazyLogging
import io.swagger.annotations._
import javax.inject.Inject
import models.analytics.{AnalyticsService, DownloadAnnotationEvent, UploadAnnotationEvent}
import models.annotation.AnnotationState._
import models.annotation._
import models.annotation.nml.NmlResults.{NmlParseResult, NmlParseSuccess}
import models.annotation.nml.{NmlResults, NmlWriter}
import models.binary.{DataSet, DataSetDAO, DataSetService}
import models.organization.OrganizationDAO
import models.project.ProjectDAO
import models.task._
import models.user._
import oxalis.security.WkEnv
import play.api.i18n.{Messages, MessagesProvider}
import play.api.libs.Files.{TemporaryFile, TemporaryFileCreator}
import play.api.libs.json.Json
import play.api.mvc.{Action, AnyContent, MultipartFormData}
import utils.ObjectId
import scala.concurrent.{ExecutionContext, Future}
@Api
class AnnotationIOController @Inject()(
nmlWriter: NmlWriter,
annotationDAO: AnnotationDAO,
projectDAO: ProjectDAO,
dataSetDAO: DataSetDAO,
organizationDAO: OrganizationDAO,
dataSetService: DataSetService,
userService: UserService,
taskDAO: TaskDAO,
taskTypeDAO: TaskTypeDAO,
tracingStoreService: TracingStoreService,
temporaryFileCreator: TemporaryFileCreator,
annotationService: AnnotationService,
analyticsService: AnalyticsService,
sil: Silhouette[WkEnv],
provider: AnnotationInformationProvider,
annotationUploadService: AnnotationUploadService)(implicit ec: ExecutionContext, val materializer: Materializer)
extends Controller
with FoxImplicits
with ProtoGeometryImplicits
with LazyLogging {
implicit val actorSystem: ActorSystem = ActorSystem()
@ApiOperation(
value =
"""Upload NML(s) or ZIP(s) of NML(s) to create a new explorative annotation.
Expects:
- As file attachment:
- Any number of NML files or ZIP files containing NMLs, optionally with volume data ZIPs referenced from an NML in a ZIP
- If multiple annotations are uploaded, they are merged into one.
- This is not supported if any of the annotations has multiple volume layers.
- As form parameter: createGroupForEachFile [String] should be one of "true" or "false"
- If "true": in merged annotation, create tree group wrapping the trees of each file
- If "false": in merged annotation, rename trees with the respective file name as prefix""",
nickname = "annotationUpload"
)
@ApiResponses(
Array(
new ApiResponse(
code = 200,
message =
"JSON object containing annotation information about the newly created annotation, including the assigned id"),
new ApiResponse(code = 400, message = badRequestLabel)
))
def upload: Action[MultipartFormData[TemporaryFile]] = sil.SecuredAction.async(parse.multipartFormData) {
implicit request =>
log() {
val shouldCreateGroupForEachFile: Boolean =
request.body.dataParts("createGroupForEachFile").headOption.contains("true")
val overwritingDataSetName: Option[String] =
request.body.dataParts.get("datasetName").flatMap(_.headOption)
val attachedFiles = request.body.files.map(f => (f.ref.path.toFile, f.filename))
val parsedFiles =
annotationUploadService.extractFromFiles(attachedFiles, useZipName = true, overwritingDataSetName)
val parsedFilesWraped =
annotationUploadService.wrapOrPrefixTrees(parsedFiles.parseResults, shouldCreateGroupForEachFile)
val parseResultsFiltered: List[NmlParseResult] = parsedFilesWraped.filter(_.succeeded)
if (parseResultsFiltered.isEmpty) {
returnError(parsedFiles)
} else {
for {
parseSuccesses <- Fox.serialCombined(parseResultsFiltered)(r => r.toSuccessBox)
name = nameForUploaded(parseResultsFiltered.map(_.fileName))
description = descriptionForNMLs(parseResultsFiltered.map(_.description))
_ <- assertNonEmpty(parseSuccesses)
skeletonTracings = parseSuccesses.flatMap(_.skeletonTracing)
// Create a list of volume layers for each uploaded (non-skeleton-only) annotation.
// This is what determines the merging strategy for volume layers
volumeLayersGroupedRaw = parseSuccesses.map(_.volumeLayers).filter(_.nonEmpty)
dataSet <- findDataSetForUploadedAnnotations(skeletonTracings,
volumeLayersGroupedRaw.flatten.map(_.tracing))
volumeLayersGrouped <- adaptVolumeTracingsToFallbackLayer(volumeLayersGroupedRaw, dataSet)
tracingStoreClient <- tracingStoreService.clientFor(dataSet)
mergedVolumeLayers <- mergeAndSaveVolumeLayers(volumeLayersGrouped,
tracingStoreClient,
parsedFiles.otherFiles)
mergedSkeletonLayers <- mergeAndSaveSkeletonLayers(skeletonTracings, tracingStoreClient)
annotation <- annotationService.createFrom(request.identity,
dataSet,
mergedSkeletonLayers ::: mergedVolumeLayers,
AnnotationType.Explorational,
name,
description)
_ = analyticsService.track(UploadAnnotationEvent(request.identity, annotation))
} yield
JsonOk(
Json.obj("annotation" -> Json.obj("typ" -> annotation.typ, "id" -> annotation.id)),
Messages("nml.file.uploadSuccess")
)
}
}
}
private def mergeAndSaveVolumeLayers(volumeLayersGrouped: Seq[List[UploadedVolumeLayer]],
client: WKRemoteTracingStoreClient,
otherFiles: Map[String, TemporaryFile]): Fox[List[AnnotationLayer]] = {
if (volumeLayersGrouped.isEmpty) return Fox.successful(List())
if (volumeLayersGrouped.length > 1 && volumeLayersGrouped.exists(_.length > 1))
return Fox.failure("Cannot merge multiple annotations that each have multiple volume layers.")
if (volumeLayersGrouped.length == 1) { // Just one annotation was uploaded, keep its layers separate
Fox.serialCombined(volumeLayersGrouped.toList.flatten) { uploadedVolumeLayer =>
for {
savedTracingId <- client.saveVolumeTracing(uploadedVolumeLayer.tracing,
uploadedVolumeLayer.getDataZipFrom(otherFiles))
} yield
AnnotationLayer(
savedTracingId,
AnnotationLayerType.Volume,
uploadedVolumeLayer.name
)
}
} else { // Multiple annotations with volume layers (but at most one each) was uploaded merge those volume layers into one
val uploadedVolumeLayersFlat = volumeLayersGrouped.toList.flatten
for {
mergedTracingId <- client.mergeVolumeTracingsByContents(
VolumeTracings(uploadedVolumeLayersFlat.map(v => VolumeTracingOpt(Some(v.tracing)))),
uploadedVolumeLayersFlat.map(v => v.getDataZipFrom(otherFiles)),
persistTracing = true
)
} yield
List(
AnnotationLayer(
mergedTracingId,
AnnotationLayerType.Volume,
None
))
}
}
private def mergeAndSaveSkeletonLayers(skeletonTracings: List[SkeletonTracing],
tracingStoreClient: WKRemoteTracingStoreClient): Fox[List[AnnotationLayer]] = {
if (skeletonTracings.isEmpty) return Fox.successful(List())
for {
mergedTracingId <- tracingStoreClient.mergeSkeletonTracingsByContents(
SkeletonTracings(skeletonTracings.map(t => SkeletonTracingOpt(Some(t)))),
persistTracing = true)
} yield List(AnnotationLayer(mergedTracingId, AnnotationLayerType.Skeleton, None))
}
private def assertNonEmpty(parseSuccesses: List[NmlParseSuccess]) =
bool2Fox(parseSuccesses.exists(p => p.skeletonTracing.nonEmpty || p.volumeLayers.nonEmpty)) ?~> "nml.file.noFile"
private def findDataSetForUploadedAnnotations(
skeletonTracings: List[SkeletonTracing],
volumeTracings: List[VolumeTracing])(implicit mp: MessagesProvider, ctx: DBAccessContext): Fox[DataSet] =
for {
dataSetName <- assertAllOnSameDataSet(skeletonTracings, volumeTracings) ?~> "nml.file.differentDatasets"
organizationNameOpt <- assertAllOnSameOrganization(skeletonTracings, volumeTracings) ?~> "nml.file.differentDatasets"
organizationIdOpt <- Fox.runOptional(organizationNameOpt) {
organizationDAO.findOneByName(_)(GlobalAccessContext).map(_._id)
} ?~> Messages("organization.notFound", organizationNameOpt.getOrElse("")) ~> NOT_FOUND
organizationId <- Fox.fillOption(organizationIdOpt) {
dataSetDAO.getOrganizationForDataSet(dataSetName)(GlobalAccessContext)
} ?~> Messages("dataSet.noAccess", dataSetName) ~> FORBIDDEN
dataSet <- dataSetDAO.findOneByNameAndOrganization(dataSetName, organizationId) ?~> Messages(
"dataSet.noAccess",
dataSetName) ~> FORBIDDEN
} yield dataSet
private def nameForUploaded(fileNames: Seq[String]) =
if (fileNames.size == 1)
fileNames.headOption.map(_.replaceAll("\\\\.nml$", "").replaceAll("\\\\.zip", ""))
else
None
private def descriptionForNMLs(descriptions: Seq[Option[String]]) =
if (descriptions.size == 1) descriptions.headOption.flatten.getOrElse("") else ""
private def returnError(zipParseResult: NmlResults.MultiNmlParseResult)(implicit messagesProvider: MessagesProvider) =
if (zipParseResult.containsFailure) {
val errors = zipParseResult.parseResults.flatMap {
case result: NmlResults.NmlParseFailure =>
Some("error" -> Messages("nml.file.invalid", result.fileName, result.error))
case _ => None
}
Future.successful(JsonBadRequest(errors))
} else {
Future.successful(JsonBadRequest(Messages("nml.file.noFile")))
}
private def assertAllOnSameDataSet(skeletons: List[SkeletonTracing], volumes: List[VolumeTracing]): Fox[String] =
for {
dataSetName <- volumes.headOption.map(_.dataSetName).orElse(skeletons.headOption.map(_.dataSetName)).toFox
_ <- bool2Fox(skeletons.forall(_.dataSetName == dataSetName))
_ <- bool2Fox(volumes.forall(_.dataSetName == dataSetName))
} yield dataSetName
private def assertAllOnSameOrganization(skeletons: List[SkeletonTracing],
volumes: List[VolumeTracing]): Fox[Option[String]] = {
// Note that organizationNames are optional. Tracings with no organization attribute are ignored here
val organizationNames = skeletons.flatMap(_.organizationName) ::: volumes.flatMap(_.organizationName)
for {
_ <- Fox.runOptional(organizationNames.headOption)(name => bool2Fox(organizationNames.forall(_ == name)))
} yield organizationNames.headOption
}
private def adaptVolumeTracingsToFallbackLayer(volumeLayersGrouped: List[List[UploadedVolumeLayer]],
dataSet: DataSet): Fox[List[List[UploadedVolumeLayer]]] =
for {
dataSource <- dataSetService.dataSourceFor(dataSet).flatMap(_.toUsable)
allAdapted <- Fox.serialCombined(volumeLayersGrouped) { volumeLayers =>
Fox.serialCombined(volumeLayers) { volumeLayer =>
for {
tracingAdapted <- adaptPropertiesToFallbackLayer(volumeLayer.tracing, dataSource)
} yield volumeLayer.copy(tracing = tracingAdapted)
}
}
} yield allAdapted
private def adaptPropertiesToFallbackLayer[T <: DataLayerLike](volumeTracing: VolumeTracing,
dataSource: GenericDataSource[T]): Fox[VolumeTracing] =
for {
_ <- Fox.successful(())
fallbackLayer = dataSource.dataLayers.flatMap {
case layer: SegmentationLayer if volumeTracing.fallbackLayer contains layer.name => Some(layer)
case layer: AbstractSegmentationLayer if volumeTracing.fallbackLayer contains layer.name => Some(layer)
case _ => None
}.headOption
} yield {
volumeTracing.copy(
boundingBox =
if (volumeTracing.boundingBox.isEmpty) boundingBoxToProto(dataSource.boundingBox)
else volumeTracing.boundingBox,
elementClass = fallbackLayer
.map(layer => elementClassToProto(layer.elementClass))
.getOrElse(elementClassToProto(VolumeTracingDefaults.elementClass)),
fallbackLayer = fallbackLayer.map(_.name),
largestSegmentId = fallbackLayer.map(_.largestSegmentId).getOrElse(VolumeTracingDefaults.largestSegmentId)
)
}
@ApiOperation(value = "Download an annotation as NML/ZIP", nickname = "annotationDownload")
@ApiResponses(
Array(
new ApiResponse(
code = 200,
message =
"NML or Zip file containing skeleton and/or volume data of this annotation. In case of Compound annotations, multiple such annotations wrapped in another zip"
),
new ApiResponse(code = 400, message = badRequestLabel)
))
def download(
@ApiParam(value =
"Type of the annotation, one of Task, Explorational, CompoundTask, CompoundProject, CompoundTaskType",
example = "Explorational") typ: String,
@ApiParam(
value =
"For Task and Explorational annotations, id is an annotation id. For CompoundTask, id is a task id. For CompoundProject, id is a project id. For CompoundTaskType, id is a task type id")
id: String,
skeletonVersion: Option[Long],
volumeVersion: Option[Long],
skipVolumeData: Option[Boolean]): Action[AnyContent] =
sil.UserAwareAction.async { implicit request =>
logger.trace(s"Requested download for annotation: $typ/$id")
for {
identifier <- AnnotationIdentifier.parse(typ, id)
_ = request.identity.foreach(user => analyticsService.track(DownloadAnnotationEvent(user, id, typ)))
result <- identifier.annotationType match {
case AnnotationType.View => Fox.failure("Cannot download View annotation")
case AnnotationType.CompoundProject => downloadProject(id, request.identity, skipVolumeData.getOrElse(false))
case AnnotationType.CompoundTask => downloadTask(id, request.identity, skipVolumeData.getOrElse(false))
case AnnotationType.CompoundTaskType =>
downloadTaskType(id, request.identity, skipVolumeData.getOrElse(false))
case _ =>
downloadExplorational(id,
typ,
request.identity,
skeletonVersion,
volumeVersion,
skipVolumeData.getOrElse(false))
}
} yield result
}
// TODO: select versions per layer
private def downloadExplorational(annotationId: String,
typ: String,
issuingUser: Option[User],
skeletonVersion: Option[Long],
volumeVersion: Option[Long],
skipVolumeData: Boolean)(implicit ctx: DBAccessContext) = {
def skeletonToTemporaryFile(dataSet: DataSet,
annotation: Annotation,
organizationName: String): Fox[TemporaryFile] =
for {
tracingStoreClient <- tracingStoreService.clientFor(dataSet)
fetchedAnnotationLayers <- Fox.serialCombined(annotation.skeletonAnnotationLayers)(
tracingStoreClient.getSkeletonTracing(_, skeletonVersion))
user <- userService.findOneById(annotation._user, useCache = true)
taskOpt <- Fox.runOptional(annotation._task)(taskDAO.findOne)
nmlStream = nmlWriter.toNmlStream(fetchedAnnotationLayers,
Some(annotation),
dataSet.scale,
None,
organizationName,
Some(user),
taskOpt)
nmlTemporaryFile = temporaryFileCreator.create()
temporaryFileStream = new BufferedOutputStream(new FileOutputStream(nmlTemporaryFile))
_ <- NamedEnumeratorStream("", nmlStream).writeTo(temporaryFileStream)
_ = temporaryFileStream.close()
} yield nmlTemporaryFile
def volumeOrHybridToTemporaryFile(dataset: DataSet,
annotation: Annotation,
name: String,
organizationName: String): Fox[TemporaryFile] =
for {
tracingStoreClient <- tracingStoreService.clientFor(dataset)
fetchedVolumeLayers: List[FetchedAnnotationLayer] <- Fox.serialCombined(annotation.volumeAnnotationLayers) {
volumeAnnotationLayer =>
tracingStoreClient.getVolumeTracing(volumeAnnotationLayer, volumeVersion, skipVolumeData)
}
fetchedSkeletonLayers: List[FetchedAnnotationLayer] <- Fox.serialCombined(annotation.skeletonAnnotationLayers) {
skeletonAnnotationLayer =>
tracingStoreClient.getSkeletonTracing(skeletonAnnotationLayer, skeletonVersion)
}
user <- userService.findOneById(annotation._user, useCache = true)
taskOpt <- Fox.runOptional(annotation._task)(taskDAO.findOne)
nmlStream = nmlWriter.toNmlStream(fetchedSkeletonLayers ::: fetchedVolumeLayers,
Some(annotation),
dataset.scale,
None,
organizationName,
Some(user),
taskOpt)
temporaryFile = temporaryFileCreator.create()
zipper = ZipIO.startZip(new BufferedOutputStream(new FileOutputStream(new File(temporaryFile.path.toString))))
_ <- zipper.addFileFromEnumerator(name + ".nml", nmlStream)
_ = fetchedVolumeLayers.zipWithIndex.map {
case (volumeLayer, index) =>
volumeLayer.volumeDataOpt.foreach { volumeData =>
val dataZipName = volumeLayer.volumeDataZipName(index, fetchedVolumeLayers.length == 1)
zipper.stream.setLevel(Deflater.BEST_SPEED)
zipper.addFileFromBytes(dataZipName, volumeData)
}
}
_ = zipper.close()
} yield temporaryFile
def annotationToTemporaryFile(dataSet: DataSet,
annotation: Annotation,
name: String,
organizationName: String): Fox[TemporaryFile] =
if (annotation.tracingType == TracingType.skeleton)
skeletonToTemporaryFile(dataSet, annotation, organizationName)
else
volumeOrHybridToTemporaryFile(dataSet, annotation, name, organizationName)
def exportExtensionForAnnotation(annotation: Annotation): String =
if (annotation.tracingType == TracingType.skeleton)
".nml"
else
".zip"
def exportMimeTypeForAnnotation(annotation: Annotation): String =
if (annotation.tracingType == TracingType.skeleton)
"application/xml"
else
"application/zip"
for {
annotation <- provider.provideAnnotation(typ, annotationId, issuingUser) ~> NOT_FOUND
restrictions <- provider.restrictionsFor(typ, annotationId)
name <- provider.nameFor(annotation) ?~> "annotation.name.impossible"
fileExtension = exportExtensionForAnnotation(annotation)
fileName = name + fileExtension
mimeType = exportMimeTypeForAnnotation(annotation)
_ <- restrictions.allowDownload(issuingUser) ?~> "annotation.download.notAllowed" ~> FORBIDDEN
dataSet <- dataSetDAO.findOne(annotation._dataSet)(GlobalAccessContext) ?~> "dataSet.notFoundForAnnotation" ~> NOT_FOUND
organization <- organizationDAO.findOne(dataSet._organization)(GlobalAccessContext) ?~> "organization.notFound" ~> NOT_FOUND
temporaryFile <- annotationToTemporaryFile(dataSet, annotation, name, organization.name)
} yield {
Ok.sendFile(temporaryFile, inline = false)
.as(mimeType)
.withHeaders(CONTENT_DISPOSITION ->
s"attachment;filename=${'"'}$fileName${'"'}")
}
}
private def downloadProject(projectId: String, userOpt: Option[User], skipVolumeData: Boolean)(
implicit ctx: DBAccessContext,
m: MessagesProvider) =
for {
user <- userOpt.toFox ?~> Messages("notAllowed") ~> FORBIDDEN
projectIdValidated <- ObjectId.parse(projectId)
project <- projectDAO.findOne(projectIdValidated) ?~> Messages("project.notFound", projectId) ~> NOT_FOUND
_ <- Fox.assertTrue(userService.isTeamManagerOrAdminOf(user, project._team)) ?~> "notAllowed" ~> FORBIDDEN
annotations <- annotationDAO.findAllFinishedForProject(projectIdValidated)
zip <- annotationService.zipAnnotations(annotations, project.name, skipVolumeData)
} yield {
val file = new File(zip.path.toString)
Ok.sendFile(file, inline = false, fileName = _ => Some(TextUtils.normalize(project.name + "_nmls.zip")))
}
private def downloadTask(taskId: String, userOpt: Option[User], skipVolumeData: Boolean)(
implicit ctx: DBAccessContext,
m: MessagesProvider) = {
def createTaskZip(task: Task): Fox[TemporaryFile] = annotationService.annotationsFor(task._id).flatMap {
annotations =>
val finished = annotations.filter(_.state == Finished)
annotationService.zipAnnotations(finished, task._id.toString, skipVolumeData)
}
for {
user <- userOpt.toFox ?~> Messages("notAllowed") ~> FORBIDDEN
task <- taskDAO.findOne(ObjectId(taskId)).toFox ?~> Messages("task.notFound") ~> NOT_FOUND
project <- projectDAO.findOne(task._project) ?~> Messages("project.notFound") ~> NOT_FOUND
_ <- Fox.assertTrue(userService.isTeamManagerOrAdminOf(user, project._team)) ?~> Messages("notAllowed") ~> FORBIDDEN
zip <- createTaskZip(task)
} yield {
val file = new File(zip.path.toString)
Ok.sendFile(file, inline = false, fileName = _ => Some(TextUtils.normalize(task._id.toString + "_nmls.zip")))
}
}
private def downloadTaskType(taskTypeId: String, userOpt: Option[User], skipVolumeData: Boolean)(
implicit ctx: DBAccessContext,
m: MessagesProvider) = {
def createTaskTypeZip(taskType: TaskType) =
for {
tasks <- taskDAO.findAllByTaskType(taskType._id)
annotations <- Fox
.serialCombined(tasks)(task => annotationService.annotationsFor(task._id))
.map(_.flatten)
.toFox
finishedAnnotations = annotations.filter(_.state == Finished)
zip <- annotationService.zipAnnotations(finishedAnnotations, taskType.summary, skipVolumeData)
} yield zip
for {
user <- userOpt.toFox ?~> Messages("notAllowed") ~> FORBIDDEN
taskTypeIdValidated <- ObjectId.parse(taskTypeId) ?~> "taskType.id.invalid"
taskType <- taskTypeDAO.findOne(taskTypeIdValidated) ?~> "taskType.notFound" ~> NOT_FOUND
_ <- Fox.assertTrue(userService.isTeamManagerOrAdminOf(user, taskType._team)) ?~> "notAllowed" ~> FORBIDDEN
zip <- createTaskTypeZip(taskType)
} yield {
val file = new File(zip.path.toString)
Ok.sendFile(file, inline = false, fileName = _ => Some(TextUtils.normalize(taskType.summary + "_nmls.zip")))
}
}
}
| scalableminds/webknossos | app/controllers/AnnotationIOController.scala | Scala | agpl-3.0 | 25,188 |
package com.github.log0ymxm
import org.scalatest._
import scala.math.BigDecimal.RoundingMode
import org.scalacheck.Properties
import org.scalacheck.Prop.forAll
class OverpunchSpec extends FunSuite with Matchers {
test("decode no decimal positive") {
Overpunch.decode("12345{") shouldEqual BigDecimal("1234.50")
}
test("decode no decimal negative") {
Overpunch.decode("12345}") shouldEqual BigDecimal("-1234.50")
}
test("decode 4 decimal negative") {
Overpunch.decode("12345}", decimals = 4) shouldEqual BigDecimal("-12.3450")
}
test("decode 0 decimal negative") {
Overpunch.decode("12345}", decimals = 0) shouldEqual BigDecimal("-123450")
}
test("encode no decimal positive") {
Overpunch.encode(BigDecimal("1234.50")) shouldEqual "12345{"
}
test("encode no decimal negative") {
Overpunch.encode(BigDecimal("-1234.50")) shouldEqual "12345}"
}
test("encode 4 decimal negative") {
Overpunch.encode(BigDecimal("-12.3450"), decimals = 4) shouldEqual "12345}"
}
test("encode 0 decimal negative") {
Overpunch.encode(BigDecimal("-123450"), decimals = 0) shouldEqual "12345}"
}
test("encode 2 decimal round default") {
Overpunch.encode(BigDecimal("12.3450"), decimals = 2) shouldEqual "123E"
}
test("encode 2 decimal negative round default") {
Overpunch.encode(BigDecimal("-12.3450"), decimals = 2) shouldEqual "123N"
}
test("encode 2 decimal round custom") {
Overpunch.encode(BigDecimal("12.3450"), decimals = 2, rounding = RoundingMode.FLOOR) shouldEqual "123D"
}
test("encode 2 decimal negative round custom") {
Overpunch.encode(BigDecimal("-12.3450"), decimals = 2, rounding = RoundingMode.FLOOR) shouldEqual "123N"
}
test("encode integer") {
Overpunch.encode(150, decimals = 0) shouldEqual "15{"
}
}
import org.scalacheck.Gen
import org.scalacheck.Arbitrary.arbitrary
object OverpunchCheckSpec extends Properties("BigDecimal") {
// TODO these don't pass yet, need to handle cases where these more exhaustive tests fail
// property("encode_decode") = forAll { (n: BigDecimal) =>
// Overpunch.decode(Overpunch.encode(n, decimals=n.precision), decimals=n.precision) == n
// }
val overpunchGen = for {
num <- arbitrary[Int]
punch <- Gen.oneOf('{', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', '}', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R')
} yield List(Math.abs(num), punch).mkString
// property("decode_encode") = forAll(overpunchGen) { (s) =>
// Overpunch.encode(Overpunch.decode(s)) == s
// }
}
| log0ymxm/scala-overpunch | src/test/scala/com/github/log0ymxm/OverpunchSpec.scala | Scala | mit | 2,546 |
package sri.universal.navigation.reducer
import scala.scalajs.js
@js.native
trait NavigationFindReducer extends js.Object{
def apply(reducers : js.Array[NavigationReducer],action : js.Object = ???) : NavigationReducer = js.native
}
| chandu0101/sri | universal/src/main/scala/sri/universal/navigation/reducer/NavigationFindReducer.scala | Scala | apache-2.0 | 238 |
package com.datastax.spark.connector.cql
import java.io.IOException
import java.net.InetAddress
import scala.collection.JavaConversions._
import scala.language.reflectiveCalls
import org.apache.spark.SparkConf
import com.datastax.driver.core.{Cluster, Host, Session}
import com.datastax.spark.connector.cql.CassandraConnectorConf.CassandraSSLConf
import com.datastax.spark.connector.util.SerialShutdownHooks
import com.datastax.spark.connector.util.Logging
/** Provides and manages connections to Cassandra.
*
* A `CassandraConnector` instance is serializable and
* can be safely sent over network,
* because it automatically reestablishes the connection
* to the same cluster after deserialization. Internally it saves
* a list of all nodes in the cluster, so a connection can be established
* even if the host given in the initial config is down.
*
* Multiple `CassandraConnector`s in the same JVM connected to the same
* Cassandra cluster will share a single underlying `Cluster` object.
* `CassandraConnector` will close the underlying `Cluster` object automatically
* whenever it is not used i.e. no `Session` or `Cluster` is open for longer
* than `spark.cassandra.connection.keep_alive_ms` property value.
*
* A `CassandraConnector` object is configured from [[CassandraConnectorConf]] object which
* can be either given explicitly or automatically configured from [[org.apache.spark.SparkConf SparkConf]].
* The connection options are:
* - `spark.cassandra.connection.host`: contact points to connect to the Cassandra cluster, defaults to spark master host
* - `spark.cassandra.connection.port`: Cassandra native port, defaults to 9042
* - `spark.cassandra.connection.factory`: name of a Scala module or class implementing [[CassandraConnectionFactory]] that allows to plugin custom code for connecting to Cassandra
* - `spark.cassandra.connection.keep_alive_ms`: how long to keep unused connection before closing it (default 250 ms)
* - `spark.cassandra.connection.timeout_ms`: how long to wait for connection to the Cassandra cluster (default 5 s)
* - `spark.cassandra.connection.reconnection_delay_ms.min`: initial delay determining how often to try to reconnect to a dead node (default 1 s)
* - `spark.cassandra.connection.reconnection_delay_ms.max`: final delay determining how often to try to reconnect to a dead node (default 60 s)
* - `spark.cassandra.auth.username`: login for password authentication
* - `spark.cassandra.auth.password`: password for password authentication
* - `spark.cassandra.auth.conf.factory`: name of a Scala module or class implementing [[AuthConfFactory]] that allows to plugin custom authentication configuration
* - `spark.cassandra.query.retry.count`: how many times to reattempt a failed query (default 10)
* - `spark.cassandra.query.retry.delay`: the delay between subsequent retries
* - `spark.cassandra.read.timeout_ms`: maximum period of time to wait for a read to return
* - `spark.cassandra.connection.ssl.enabled`: enable secure connection to Cassandra cluster
* - `spark.cassandra.connection.ssl.trustStore.path`: path for the trust store being used
* - `spark.cassandra.connection.ssl.trustStore.password`: trust store password
* - `spark.cassandra.connection.ssl.trustStore.type`: trust store type (default JKS)
* - `spark.cassandra.connection.ssl.protocol`: SSL protocol (default TLS)
* - `spark.cassandra.connection.ssl.enabledAlgorithms`: SSL cipher suites (default TLS_RSA_WITH_AES_128_CBC_SHA, TLS_RSA_WITH_AES_256_CBC_SHA)
*/
class CassandraConnector(conf: CassandraConnectorConf)
extends Serializable with Logging {
import com.datastax.spark.connector.cql.CassandraConnector._
private[this] var _config = conf
/** Known cluster hosts in the connected datacenter.*/
lazy val hosts: Set[InetAddress] =
// wrapped in a session, so we get full lists of hosts,
// not only those explicitly passed in the conf
withSessionDo { _ => _config.hosts }
/** Configured native port */
def port = _config.port
/** Configured authentication options */
def authConf = _config.authConf
/** Connection configurator */
def connectionFactory = _config.connectionFactory
/** Returns a shared session to Cassandra and increases the internal open
* reference counter. It does not release the session automatically,
* so please remember to close it after use. Closing a shared session
* decreases the session reference counter. If the reference count drops to zero,
* the session may be physically closed. */
def openSession() = {
val session = sessionCache.acquire(_config)
try {
val allNodes = session.getCluster.getMetadata.getAllHosts.toSet
val myNodes = LocalNodeFirstLoadBalancingPolicy
.nodesInTheSameDC(_config.hosts, allNodes)
.map(_.getAddress)
_config = _config.copy(hosts = myNodes)
// We need a separate SessionProxy here to protect against double closing the session.
// Closing SessionProxy is not really closing the session, because sessions are shared.
// Instead, refcount is decreased. But double closing the same Session reference must not
// decrease refcount twice. There is a guard in SessionProxy
// so any subsequent close calls on the same SessionProxy are no-ops.
SessionProxy.wrapWithCloseAction(session)(sessionCache.release(_, _config.keepAliveMillis))
}
catch {
case e: Throwable =>
sessionCache.release(session, 0)
throw e
}
}
/** Allows to use Cassandra `Session` in a safe way without
* risk of forgetting to close it. The `Session` object obtained through this method
* is a proxy to a shared, single `Session` associated with the cluster.
* Internally, the shared underlying `Session` will be closed shortly after all the proxies
* are closed. */
def withSessionDo[T](code: Session => T): T = {
closeResourceAfterUse(openSession()) { session =>
code(SessionProxy.wrap(session))
}
}
/** Allows to use Cassandra `Cluster` in a safe way without
* risk of forgetting to close it. Multiple, concurrent calls might share the same
* `Cluster`. The `Cluster` will be closed when not in use for some time.
* It is not recommended to obtain sessions from this method. Use [[withSessionDo]]
* instead which allows for proper session sharing. */
def withClusterDo[T](code: Cluster => T): T = {
withSessionDo { session =>
code(session.getCluster)
}
}
/** Returns the local node, if it is one of the cluster nodes. Otherwise returns any node. */
def closestLiveHost: Host = {
withClusterDo { cluster =>
LocalNodeFirstLoadBalancingPolicy
.sortNodesByStatusAndProximity(_config.hosts, cluster.getMetadata.getAllHosts.toSet)
.filter(_.isUp)
.headOption
.getOrElse(throw new IOException("Cannot connect to Cassandra: No live hosts found"))
}
}
/** Automatically closes resource after use. Handy for closing streams, files, sessions etc.
* Similar to try-with-resources in Java 7. */
def closeResourceAfterUse[T, C <: { def close() }](closeable: C)(code: C => T): T =
try code(closeable) finally {
closeable.close()
}
}
object CassandraConnector extends Logging {
private[cql] val sessionCache = new RefCountedCache[CassandraConnectorConf, Session](
createSession, destroySession, alternativeConnectionConfigs)
private def createSession(conf: CassandraConnectorConf): Session = {
lazy val endpointsStr = conf.hosts.map(_.getHostAddress).mkString("{", ", ", "}") + ":" + conf.port
logDebug(s"Attempting to open native connection to Cassandra at $endpointsStr")
val cluster = conf.connectionFactory.createCluster(conf)
try {
val clusterName = cluster.getMetadata.getClusterName
logInfo(s"Connected to Cassandra cluster: $clusterName")
cluster.connect()
}
catch {
case e: Throwable =>
cluster.close()
throw new IOException(s"Failed to open native connection to Cassandra at $endpointsStr", e)
}
}
private def destroySession(session: Session) {
val cluster = session.getCluster
val clusterName = cluster.getMetadata.getClusterName
session.close()
cluster.close()
PreparedStatementCache.remove(cluster)
logInfo(s"Disconnected from Cassandra cluster: $clusterName")
}
// This is to ensure the Cluster can be found by requesting for any of its hosts, or all hosts together.
private def alternativeConnectionConfigs(conf: CassandraConnectorConf, session: Session): Set[CassandraConnectorConf] = {
val cluster = session.getCluster
val hosts = LocalNodeFirstLoadBalancingPolicy.nodesInTheSameDC(conf.hosts, cluster.getMetadata.getAllHosts.toSet)
hosts.map(h => conf.copy(hosts = Set(h.getAddress))) + conf.copy(hosts = hosts.map(_.getAddress))
}
SerialShutdownHooks.add("Clearing session cache for C* connector")(() => {
sessionCache.shutdown()
})
/** Returns a CassandraConnector created from properties found in the [[org.apache.spark.SparkConf SparkConf]] object */
def apply(conf: SparkConf): CassandraConnector = {
new CassandraConnector(CassandraConnectorConf(conf))
}
/** Returns a CassandraConnector created from explicitly given connection configuration. */
def apply(hosts: Set[InetAddress],
port: Int = CassandraConnectorConf.ConnectionPortParam.default,
authConf: AuthConf = NoAuthConf,
localDC: Option[String] = None,
keepAliveMillis: Int = CassandraConnectorConf.KeepAliveMillisParam.default,
minReconnectionDelayMillis: Int = CassandraConnectorConf.MinReconnectionDelayParam.default,
maxReconnectionDelayMillis: Int = CassandraConnectorConf.MaxReconnectionDelayParam.default,
queryRetryCount: Int = CassandraConnectorConf.QueryRetryParam.default,
connectTimeoutMillis: Int = CassandraConnectorConf.ConnectionTimeoutParam.default,
readTimeoutMillis: Int = CassandraConnectorConf.ReadTimeoutParam.default,
connectionFactory: CassandraConnectionFactory = DefaultConnectionFactory,
cassandraSSLConf: CassandraSSLConf = CassandraConnectorConf.DefaultCassandraSSLConf,
queryRetryDelay: CassandraConnectorConf.RetryDelayConf = CassandraConnectorConf.QueryRetryDelayParam.default) = {
val config = CassandraConnectorConf(
hosts = hosts,
port = port,
authConf = authConf,
localDC = localDC,
keepAliveMillis = keepAliveMillis,
minReconnectionDelayMillis = minReconnectionDelayMillis,
maxReconnectionDelayMillis = maxReconnectionDelayMillis,
queryRetryCount = queryRetryCount,
connectTimeoutMillis = connectTimeoutMillis,
readTimeoutMillis = readTimeoutMillis,
connectionFactory = connectionFactory,
cassandraSSLConf = cassandraSSLConf,
queryRetryDelay = queryRetryDelay
)
new CassandraConnector(config)
}
def evictCache() {
sessionCache.evict()
}
}
| ponkin/spark-cassandra-connector | spark-cassandra-connector/src/main/scala/com/datastax/spark/connector/cql/CassandraConnector.scala | Scala | apache-2.0 | 11,369 |
package asobu.dsl
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import scala.concurrent.Await
import org.specs2.specification.{After, Scope}
import scala.concurrent.duration.Duration
trait WithSystem extends Scope with After {
implicit val system = ActorSystem()
implicit val mat = ActorMaterializer()
def after = Await.result(system.terminate(), Duration.Inf)
}
| kailuowang/asobu | dsl/src/test/scala/asobu/dsl/WithSystem.scala | Scala | apache-2.0 | 392 |
package io.mpjsons.impl.serializer
import io.mpjsons.JsonTypeSerializer
import io.mpjsons.impl.SerializerFactory
import io.mpjsons.impl.util.{Context, TypesUtil}
import scala.reflect.runtime.universe._
class EitherSerializer[L, R](serializerFactory: SerializerFactory, tpe: Type, context: Context) extends JsonTypeSerializer[Either[L, R]] {
private val subtypes = TypesUtil.getDoubleSubElementsType(tpe)
val leftSerializer = serializerFactory.getSerializer(subtypes._1, context).asInstanceOf[JsonTypeSerializer[L]]
val rightSerializer = serializerFactory.getSerializer(subtypes._2, context).asInstanceOf[JsonTypeSerializer[R]]
override def serialize(obj: Either[L, R], jsonBuilder: StringBuilder): Unit = {
jsonBuilder.append('{')
if (obj.isLeft) {
jsonBuilder.append("\"left\":")
leftSerializer.serialize(obj.left.get, jsonBuilder)
} else {
jsonBuilder.append("\"right\":")
rightSerializer.serialize(obj.right.get, jsonBuilder)
}
jsonBuilder.append('}')
}
} | marpiec/mpjsons | src/main/scala/io/mpjsons/impl/serializer/EitherSerializer.scala | Scala | apache-2.0 | 1,021 |
package applicant.ml.regression
import applicant.nlp.LuceneTokenizer
import applicant.ml.score._
import applicant.ml.regression._
import applicant.ml.naivebayes._
import applicant.etl._
import scala.collection.mutable.{ListBuffer, Map, HashMap}
import java.io.File
import java.util.regex
import scopt.OptionParser
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.elasticsearch.spark._
import org.apache.spark.mllib.feature.{Word2Vec, Word2VecModel}
import org.apache.spark.mllib.linalg.{Vectors, Vector}
import org.apache.spark.mllib.regression.LabeledPoint
import org.slf4j.{Logger, LoggerFactory}
/**
* MlModelGenerator is a class that will query elasticsearch for applicants who are favorited and
* archived and build various machine learning models out of their data
*/
object MlModelGenerator {
//Class to store command line options
case class Command(word2vecModel: String = "", sparkMaster: String = "",
esNodes: String = "", esPort: String = "", esAppIndex: String = "",
esLabelIndex: String = "", logisticModelDirectory: String = "",
naiveBayesModelDirectory: String = "", idfModelDirectory: String = "",
cityfilelocation: String = "")
val log: Logger = LoggerFactory.getLogger(getClass())
var applicantDataList = ListBuffer[ApplicantData]()
var modelData = ListBuffer[LabeledPoint]()
val labelsHashMap = new HashMap[String,Double]()
/**
* Function to create and save a naive bayes model
*
* @param options the command line options given
* @param sc The current spark context
*/
private def generateNaiveBayesModel(options: Command, sc: SparkContext) = {
log.info("Creating naive bayes model.")
//First check if the idf folder option exists
val checkFolder = new File(options.idfModelDirectory)
if (checkFolder.exists()) {
val list: ListBuffer[Seq[String]] = new ListBuffer[Seq[String]]()
applicantDataList.foreach { applicant =>
val tokenList = LuceneTokenizer.getTokens(applicant.fullText)
tokenList.foreach { tokens =>
list += tokens
}
}
//Now create the model out of raw TF vectors
val featureRDD = sc.parallelize(list.map { tokens =>
NaiveBayesFeatureGenerator.getFeatureVec(tokens)
})
val idfModel = IDFHelper.createModel(featureRDD)
//Make sure that the model is saved
IDFHelper.saveModel(idfModel, options.idfModelDirectory)
//Turn the applicant objects into Labeled Points
applicantDataList.foreach { applicant =>
val applicantScore = labelsHashMap(applicant.applicantid)
val tokenList = LuceneTokenizer.getTokens(applicant.fullText)
tokenList.foreach { tokens =>
modelData += LabeledPoint(applicantScore, NaiveBayesFeatureGenerator.getAdjustedFeatureVec(tokens, idfModel))
}
}
//Create and save the NaiveBayes model
val bayesModel = NaiveBayesHelper.createModel(sc, modelData)
NaiveBayesHelper.saveModel(bayesModel, sc, options.naiveBayesModelDirectory)
modelData.clear()
log.info("Naive bayes model created.")
}
else {
log.warn("The specified IDF folder location does not exist. Naive Bayes model not created.")
}
}
/**
* Function to create and save a logistic regression model
*
* @param options the command line options given
* @param sc The current spark context
*/
private def generateLogisticRegressionModel(options: Command, sc: SparkContext) = {
log.info("Creating logistic regression model.")
val bayesModel = NaiveBayesHelper.loadModel(sc, options.naiveBayesModelDirectory) match {
case Some(model) =>
model
case None =>
null
}
if (bayesModel != null) {
//Check the IDF model can be loaded
IDFHelper.loadModel(options.idfModelDirectory) match {
case Some(idfModel) =>
val settings = RegressionSettings(sc)
val generator = LogisticFeatureGenerator(bayesModel, idfModel, settings, options.cityfilelocation)
applicantDataList.foreach { applicant =>
val applicantScore = labelsHashMap(applicant.applicantid)
log.debug("---------Label = " + applicantScore + ", id = " + applicant.applicantid)
modelData += LabeledPoint(applicantScore, generator.getLogisticFeatureVec(applicant))
}
//Create and save the logistic regression model
val logisticModel = LogisticRegressionHelper.createModel(sc, modelData)
applicantDataList.foreach { applicant =>
val applicantScore = labelsHashMap(applicant.applicantid)
}
log.debug("Weights:")
log.debug(generator.getFeatureList().toString())
log.debug(logisticModel.weights.toString())
LogisticRegressionHelper.saveModel(logisticModel, sc, options.logisticModelDirectory)
log.info("Logistic regression model created.")
case None =>
log.warn("No IDF model could be load. Logistic model not created.")
}
}
else {
log.warn("The bayes model could not be loaded. No logistic regression model created.")
}
}
/**
* Function to create the models that were specified by the options
* at the location also specified in the options
*
* @param options The object that contains the command line options
*/
def generateMLmodels(options: Command) {
val conf = new SparkConf().setMaster(options.sparkMaster)
.setAppName("generateMLmodel").set("es.nodes", options.esNodes)
.set("es.port", options.esPort)
.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
//Create Spark RDD using conf
val sc = new SparkContext(conf)
//Load the archived and favorited users from elasticsearch
val archiveLabelsSeq = sc.esRDD(options.esLabelIndex + "/label").values.map{label =>
if (label("type").asInstanceOf[String] == "archive") {
label("id").asInstanceOf[String] -> 0.0
}
else {
label("id").asInstanceOf[String] -> 1.0
}
}.collect()
//Add these results to a hash map for faster lookup during the filter
archiveLabelsSeq.foreach(x => labelsHashMap += x)
//Query elasticsearch for every item in the applicant index
val appRDD = sc.esRDD(options.esAppIndex + "/applicant").values
//Filter out all of the applicants who do not have a proper label
val applicantsArray = appRDD.filter(applicantMap => labelsHashMap.contains(applicantMap("id").asInstanceOf[String])).collect()
//Turn the applicant Map stuctures into a workable type
for (app <- applicantsArray) {
applicantDataList += ApplicantData(app)
}
val modelData: ListBuffer[LabeledPoint] = new ListBuffer[LabeledPoint]()
//If the Naive Bayes flag was set
if (options.naiveBayesModelDirectory != "") {
generateNaiveBayesModel(options, sc)
modelData.clear()
}
//If the logistic regression flag was set
if (options.logisticModelDirectory != "") {
generateLogisticRegressionModel(options, sc)
}
modelData.clear()
applicantDataList.clear()
labelsHashMap.clear()
sc.stop()
}
/**
* Main method
*
* @param args Array of Strings: see options
*/
def main(args: Array[String]) {
//Command line option parser
val parser = new OptionParser[Command]("ResumeParser") {
opt[String]('m', "master") required() valueName("<master>") action { (x, c) =>
c.copy(sparkMaster = x)
} text ("Spark master argument.")
opt[String]('n', "nodes") required() valueName("<nodes>") action { (x, c) =>
c.copy(esNodes = x)
} text ("Elasticsearch node to connect to, usually IP address of ES server.")
opt[String]('p', "port") required() valueName("<port>") action { (x, c) =>
c.copy(esPort = x)
} text ("Default HTTP/REST port used for connecting to Elasticsearch, usually 9200.")
opt[String]('i', "applicantindex") required() valueName("<applicantindex>") action { (x, c) =>
c.copy(esAppIndex = x)
} text ("Name of the Elasticsearch index to read and write data.")
opt[String]('l', "labelindex") required() valueName("<labelindex>") action { (x, c) =>
c.copy(esLabelIndex = x)
} text ("Name of the Elasticsearch containing archived/favorited labels.")
opt[String]("logisticmodeldirectory") valueName("<logisticmodeldirectory>") action { (x, c) =>
c.copy(logisticModelDirectory = x)
} text("Path where the logistic regression model is to be saved")
opt[String]("naivebayesmodeldirectory") valueName("<naivebayesmodeldirectory>") action { (x, c) =>
c.copy(naiveBayesModelDirectory = x)
} text ("Path where the naive bayes model is be saved")
opt[String]("idfmodeldirectory") valueName("<idfmodeldirectory>") action { (x, c) =>
c.copy(idfModelDirectory = x)
} text ("Path where the IDF model is to be saved")
opt[String]("cityfilelocation") valueName("<cityfilelocation>") action { (x, c) =>
c.copy(cityfilelocation = x)
} text ("Path where the city file location data is saved")
note ("Pulls labeled resumes from elasticsearch and generates a logistic regression model \\n")
help("help") text("Prints this usage text")
}
// Parses command line arguments and passes them to the search
parser.parse(args, Command()) match {
//If the command line options were all present continue
case Some(options) =>
//Read all of the files in sourceDirectory and use Tika to grab the text from each
generateMLmodels(options)
//Elsewise, just exit
case None =>
}
}
}
| dataworks/internship-2016 | etl/src/scala/applicant/ml/MlModelGenerator.scala | Scala | apache-2.0 | 9,803 |
package scala
package reflect
package macros
/**
* <span class="badge badge-red" style="float: right;">EXPERIMENTAL</span>
*
* A slice of [[scala.reflect.macros.blackbox.Context the Scala macros context]] that
* provides facilities to communicate with the compiler's front end
* (emit warnings, errors and other sorts of messages).
*/
trait FrontEnds {
self: blackbox.Context =>
/** For sending a message which should not be labelled as a warning/error,
* but also shouldn't require -verbose to be visible.
* Use `enclosingPosition` if you're in doubt what position to pass to `pos`.
*/
def echo(pos: Position, msg: String): Unit
/** Emits an informational message, suppressed unless `-verbose` or `force=true`.
* Use `enclosingPosition` if you're in doubt what position to pass to `pos`.
*/
def info(pos: Position, msg: String, force: Boolean): Unit
/** Does the compilation session have any warnings?
*/
def hasWarnings: Boolean
/** Emits a warning.
* Use `enclosingPosition` if you're in doubt what position to pass to `pos`.
*/
def warning(pos: Position, msg: String): Unit
/** Does the compilation session have any errors?
*/
def hasErrors: Boolean
/** Emits a compilation error.
* Use `enclosingPosition` if you're in doubt what position to pass to `pos`.
*/
def error(pos: Position, msg: String): Unit
/** Abruptly terminates current macro expansion leaving a note about what happened.
* Use `enclosingPosition` if you're in doubt what position to pass to `pos`.
*/
def abort(pos: Position, msg: String): Nothing
}
| felixmulder/scala | src/reflect/scala/reflect/macros/FrontEnds.scala | Scala | bsd-3-clause | 1,614 |
package com.lucaswilkins.newtonfractals
import java.awt.event.{MouseListener, MouseEvent}
/**
* Neatened version of mouse listener for just mouse releases
*
* @param onRelease
*/
class MouseReleasedListener(onRelease: MouseEvent ⇒ Unit) extends MouseListener {
def mouseClicked(e: MouseEvent): Unit = {}
def mouseEntered(e: MouseEvent): Unit = {}
def mouseExited(e: MouseEvent): Unit = {}
def mousePressed(e: MouseEvent): Unit = {}
def mouseReleased(e: MouseEvent): Unit = onRelease(e)
}
| drlucaswilkins/newtonfractal | NewtonFractal/src/main/scala/com/lucaswilkins/newtonfractals/MouseReleasedListener.scala | Scala | gpl-2.0 | 525 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo.support
import io.truthencode.ddo.support.TraverseOps.MapOps
trait JoinAbleMap[X, Y, C <: Map[X, Y]] extends JoinAbleBase[(X, Y), C] {
implicit val joinOnKeys: Boolean // = false
private val someBool = implicitly[Boolean] // : Boolean = false
val source: Map[X, Y] // = Map[X, Y]()
private val list = source.take(sampleSize)
val listA: Map[X, Y] = list.take(portion)
val listB: Map[X, Y] = list.takeRight(portion)
val onlyB: Map[X, Y] = list.takeRight(remainder)
val onlyA: Map[X, Y] = list.take(remainder)
// This is a total intercept and NOT a Key intercept
val common: Map[X, Y] = listA.toSeq.intersect(listB.toSeq).toMap
val commonWithA: Map[X, Y] = list.take(sampleSize - remainder)
val commonWithB: Map[X, Y] = list.takeRight(sampleSize - remainder)
val leftJoinA: Map[X, Y] = listA.leftJoin(listB)
val rightJoinA: Map[X, Y] = listA.rightJoin(listB)
val leftJoinB: Map[X, Y] = listB.leftJoin(listA)
val rightJoinB: Map[X, Y] = listB.rightJoin(listA)
}
| adarro/ddo-calc | subprojects/common/ddo-core/src/test/scala/io/truthencode/ddo/support/JoinAbleMap.scala | Scala | apache-2.0 | 1,665 |
/**
* Generated by API Builder - https://www.apibuilder.io
* Service version: 0.15.4
* apibuilder 0.15.3 app.apibuilder.io/apicollective/apibuilder-spec/latest/play_2_8_client
*/
package io.apibuilder.spec.v0.models {
sealed trait ResponseCode extends _root_.scala.Product with _root_.scala.Serializable
/**
* Used to indicate an API concern for a field that is specific to the field's
* usage but not necessarily its data type. For example, you might use annotations
* to mark that certain fields contain PII or PCI data and thus should not be
* stored once processing is complete. Annotations communicate meaning to consumers
* of an API and may also be used within an implementation or tooling; for example,
* using static analysis tools to detect logging of sensitive data.
*/
final case class Annotation(
name: String,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None
)
final case class Apidoc(
version: String
)
/**
* @param key Unique key identifying this application
*/
final case class Application(
key: String
)
/**
* Represents an additional attribute that is attached to one of the objects in
* apibuilder. The main use case is to capture additional metadata that doesn't
* necessarily define the API but aids in code generation. Examples would be hints
* for certain code generators about classes to extend, interfaces to implement,
* annotations to add, names to assign to certain methods, etc. The specific
* attributes will be applicable only in the context of the specific code
* generators usings them.
*/
final case class Attribute(
name: String,
value: _root_.play.api.libs.json.JsObject,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None
)
final case class Body(
`type`: String,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
attributes: Seq[io.apibuilder.spec.v0.models.Attribute] = Nil
)
/**
* Describes the primary contact for this service
*/
final case class Contact(
name: _root_.scala.Option[String] = None,
url: _root_.scala.Option[String] = None,
email: _root_.scala.Option[String] = None
)
/**
* Indicates that this particular element is considered deprecated in the API. See
* the description for details
*/
final case class Deprecation(
description: _root_.scala.Option[String] = None
)
final case class Enum(
name: String,
plural: String,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
values: Seq[io.apibuilder.spec.v0.models.EnumValue],
attributes: Seq[io.apibuilder.spec.v0.models.Attribute] = Nil
)
/**
* @param value The actual string representation of this value. If not specified, defaults to
* 'name'
*/
final case class EnumValue(
name: String,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
attributes: Seq[io.apibuilder.spec.v0.models.Attribute] = Nil,
value: _root_.scala.Option[String] = None
)
final case class Field(
name: String,
`type`: String,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
default: _root_.scala.Option[String] = None,
required: Boolean,
minimum: _root_.scala.Option[Long] = None,
maximum: _root_.scala.Option[Long] = None,
example: _root_.scala.Option[String] = None,
attributes: Seq[io.apibuilder.spec.v0.models.Attribute] = Nil,
annotations: Seq[String] = Nil
)
final case class Header(
name: String,
`type`: String,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
required: Boolean,
default: _root_.scala.Option[String] = None,
attributes: Seq[io.apibuilder.spec.v0.models.Attribute] = Nil
)
/**
* An import is used to declare a dependency on another application. This allows
* you to reference the models and or enums from that application in your own app.
*
* @param uri Full URI to the service.json file of the service we are importing
* @param namespace the fully qualified namespace that we have imported
* @param version The version of the service that we are importing
* @param enums Enums made available by this import
* @param interfaces Interfaces made available by this import
* @param unions Unions made available by this import
* @param models Models made available by this import
* @param annotations Annotations made available by this import
*/
final case class Import(
uri: String,
namespace: String,
organization: io.apibuilder.spec.v0.models.Organization,
application: io.apibuilder.spec.v0.models.Application,
version: String,
enums: Seq[String] = Nil,
interfaces: Seq[String] = Nil,
unions: Seq[String] = Nil,
models: Seq[String] = Nil,
annotations: Seq[io.apibuilder.spec.v0.models.Annotation] = Nil
)
/**
* General metadata about this service
*/
final case class Info(
license: _root_.scala.Option[io.apibuilder.spec.v0.models.License] = None,
contact: _root_.scala.Option[io.apibuilder.spec.v0.models.Contact] = None
)
final case class Interface(
name: String,
plural: String,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
fields: Seq[io.apibuilder.spec.v0.models.Field],
attributes: Seq[io.apibuilder.spec.v0.models.Attribute] = Nil
)
/**
* Describes the software license contact for this service
*/
final case class License(
name: String,
url: _root_.scala.Option[String] = None
)
final case class Model(
name: String,
plural: String,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
fields: Seq[io.apibuilder.spec.v0.models.Field],
attributes: Seq[io.apibuilder.spec.v0.models.Attribute] = Nil,
interfaces: Seq[String] = Nil
)
/**
* @param path The full path to this operation, relative to the service's base url.
*/
final case class Operation(
method: io.apibuilder.spec.v0.models.Method,
path: String,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
body: _root_.scala.Option[io.apibuilder.spec.v0.models.Body] = None,
parameters: Seq[io.apibuilder.spec.v0.models.Parameter] = Nil,
responses: Seq[io.apibuilder.spec.v0.models.Response] = Nil,
attributes: Seq[io.apibuilder.spec.v0.models.Attribute] = Nil
)
/**
* @param key Unique key identifying the organization that owns this service
*/
final case class Organization(
key: String
)
final case class Parameter(
name: String,
`type`: String,
location: io.apibuilder.spec.v0.models.ParameterLocation,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
required: Boolean,
default: _root_.scala.Option[String] = None,
minimum: _root_.scala.Option[Long] = None,
maximum: _root_.scala.Option[Long] = None,
example: _root_.scala.Option[String] = None,
attributes: _root_.scala.Option[Seq[io.apibuilder.spec.v0.models.Attribute]] = None
)
/**
* @param `type` The type of this resource will map to a defined model, enum, or union type
* @param path The path to this specific resource. This was added in 2016 to help us
* differentiate between the resource path and the operation path which can be
* helpful when, for example, generating method names for operations. This field is
* optional as some of our input formats (e.g. swagger) do not explicitly
* differentiate resoure paths.
*/
final case class Resource(
`type`: String,
plural: String,
path: _root_.scala.Option[String] = None,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
operations: Seq[io.apibuilder.spec.v0.models.Operation],
attributes: Seq[io.apibuilder.spec.v0.models.Attribute] = Nil
)
final case class Response(
code: io.apibuilder.spec.v0.models.ResponseCode,
`type`: String,
headers: _root_.scala.Option[Seq[io.apibuilder.spec.v0.models.Header]] = None,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
attributes: _root_.scala.Option[Seq[io.apibuilder.spec.v0.models.Attribute]] = None
)
/**
* @param apidoc Documents that this is an apibuilder document, noting the specific version used.
* Internally the version is then used for backwards compatibility when applicable
* as new features are added to apibuilder. Note naming refers to the original name
* of this project, 'apidoc', and is left here to avoid a breaking change for
* preexisting services.
* @param namespace Fully qualified namespace for this service
*/
final case class Service(
apidoc: io.apibuilder.spec.v0.models.Apidoc,
name: String,
organization: io.apibuilder.spec.v0.models.Organization,
application: io.apibuilder.spec.v0.models.Application,
namespace: String,
version: String,
baseUrl: _root_.scala.Option[String] = None,
description: _root_.scala.Option[String] = None,
info: io.apibuilder.spec.v0.models.Info,
headers: Seq[io.apibuilder.spec.v0.models.Header] = Nil,
imports: Seq[io.apibuilder.spec.v0.models.Import] = Nil,
enums: Seq[io.apibuilder.spec.v0.models.Enum] = Nil,
interfaces: Seq[io.apibuilder.spec.v0.models.Interface] = Nil,
unions: Seq[io.apibuilder.spec.v0.models.Union] = Nil,
models: Seq[io.apibuilder.spec.v0.models.Model] = Nil,
resources: Seq[io.apibuilder.spec.v0.models.Resource] = Nil,
attributes: Seq[io.apibuilder.spec.v0.models.Attribute] = Nil,
annotations: Seq[io.apibuilder.spec.v0.models.Annotation] = Nil
)
/**
* @param discriminator If a type discriminator is provided, serialization of these union types will
* always contain a field named with the value of the discriminator that will
* contain the name of the type. This provides a simpler (for many use cases) JSON
* serialization/deserialization mechanism. When specified, apibuilder itself will
* verify that none of the types in the union type itself contain a field with the
* same name as the discriminator
* @param types The names of the types that make up this union type
*/
final case class Union(
name: String,
plural: String,
discriminator: _root_.scala.Option[String] = None,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
types: Seq[io.apibuilder.spec.v0.models.UnionType],
attributes: Seq[io.apibuilder.spec.v0.models.Attribute] = Nil,
interfaces: Seq[String] = Nil
)
/**
* Metadata about one of the types that is part of a union type
*
* @param `type` The name of a type (a primitive, model name, or enum name) that makes up this
* union type
* @param default If true, indicates that this type should be used as the default when
* deserializing union types. This field is only used by union types that require a
* discriminator and sets the default value for that disciminator during
* deserialization.
* @param discriminatorValue The discriminator value defines the string to use in the discriminator field to
* identify this type. If not specified, the discriminator value will default to
* the name of the type itself.
*/
final case class UnionType(
`type`: String,
description: _root_.scala.Option[String] = None,
deprecation: _root_.scala.Option[io.apibuilder.spec.v0.models.Deprecation] = None,
attributes: Seq[io.apibuilder.spec.v0.models.Attribute] = Nil,
default: _root_.scala.Option[Boolean] = None,
discriminatorValue: _root_.scala.Option[String] = None
)
/**
* Provides future compatibility in clients - in the future, when a type is added
* to the union ResponseCode, it will need to be handled in the client code. This
* implementation will deserialize these future types as an instance of this class.
*
* @param description Information about the type that we received that is undefined in this version of
* the client.
*/
final case class ResponseCodeUndefinedType(
description: String
) extends ResponseCode
/**
* Wrapper class to support the union types containing the datatype[integer]
*/
final case class ResponseCodeInt(
value: Int
) extends ResponseCode
sealed trait Method extends _root_.scala.Product with _root_.scala.Serializable
object Method {
case object Get extends Method { override def toString = "GET" }
case object Post extends Method { override def toString = "POST" }
case object Put extends Method { override def toString = "PUT" }
case object Patch extends Method { override def toString = "PATCH" }
case object Delete extends Method { override def toString = "DELETE" }
case object Head extends Method { override def toString = "HEAD" }
case object Connect extends Method { override def toString = "CONNECT" }
case object Options extends Method { override def toString = "OPTIONS" }
case object Trace extends Method { override def toString = "TRACE" }
/**
* UNDEFINED captures values that are sent either in error or
* that were added by the server after this library was
* generated. We want to make it easy and obvious for users of
* this library to handle this case gracefully.
*
* We use all CAPS for the variable name to avoid collisions
* with the camel cased values above.
*/
final case class UNDEFINED(override val toString: String) extends Method
/**
* all returns a list of all the valid, known values. We use
* lower case to avoid collisions with the camel cased values
* above.
*/
val all: scala.List[Method] = scala.List(Get, Post, Put, Patch, Delete, Head, Connect, Options, Trace)
private[this]
val byName: Map[String, Method] = all.map(x => x.toString.toLowerCase -> x).toMap
def apply(value: String): Method = fromString(value).getOrElse(UNDEFINED(value))
def fromString(value: String): _root_.scala.Option[Method] = byName.get(value.toLowerCase)
}
sealed trait ParameterLocation extends _root_.scala.Product with _root_.scala.Serializable
object ParameterLocation {
case object Path extends ParameterLocation { override def toString = "Path" }
case object Query extends ParameterLocation { override def toString = "Query" }
case object Form extends ParameterLocation { override def toString = "Form" }
case object Header extends ParameterLocation { override def toString = "Header" }
/**
* UNDEFINED captures values that are sent either in error or
* that were added by the server after this library was
* generated. We want to make it easy and obvious for users of
* this library to handle this case gracefully.
*
* We use all CAPS for the variable name to avoid collisions
* with the camel cased values above.
*/
final case class UNDEFINED(override val toString: String) extends ParameterLocation
/**
* all returns a list of all the valid, known values. We use
* lower case to avoid collisions with the camel cased values
* above.
*/
val all: scala.List[ParameterLocation] = scala.List(Path, Query, Form, Header)
private[this]
val byName: Map[String, ParameterLocation] = all.map(x => x.toString.toLowerCase -> x).toMap
def apply(value: String): ParameterLocation = fromString(value).getOrElse(UNDEFINED(value))
def fromString(value: String): _root_.scala.Option[ParameterLocation] = byName.get(value.toLowerCase)
}
sealed trait ResponseCodeOption extends ResponseCode
object ResponseCodeOption {
case object Default extends ResponseCodeOption { override def toString = "Default" }
/**
* UNDEFINED captures values that are sent either in error or
* that were added by the server after this library was
* generated. We want to make it easy and obvious for users of
* this library to handle this case gracefully.
*
* We use all CAPS for the variable name to avoid collisions
* with the camel cased values above.
*/
final case class UNDEFINED(override val toString: String) extends ResponseCodeOption
/**
* all returns a list of all the valid, known values. We use
* lower case to avoid collisions with the camel cased values
* above.
*/
val all: scala.List[ResponseCodeOption] = scala.List(Default)
private[this]
val byName: Map[String, ResponseCodeOption] = all.map(x => x.toString.toLowerCase -> x).toMap
def apply(value: String): ResponseCodeOption = fromString(value).getOrElse(UNDEFINED(value))
def fromString(value: String): _root_.scala.Option[ResponseCodeOption] = byName.get(value.toLowerCase)
}
}
package io.apibuilder.spec.v0.models {
package object json {
import play.api.libs.json.__
import play.api.libs.json.JsString
import play.api.libs.json.Writes
import play.api.libs.functional.syntax._
import io.apibuilder.spec.v0.models.json._
private[v0] implicit val jsonReadsUUID = __.read[String].map { str =>
_root_.java.util.UUID.fromString(str)
}
private[v0] implicit val jsonWritesUUID = new Writes[_root_.java.util.UUID] {
def writes(x: _root_.java.util.UUID) = JsString(x.toString)
}
private[v0] implicit val jsonReadsJodaDateTime = __.read[String].map { str =>
_root_.org.joda.time.format.ISODateTimeFormat.dateTimeParser.parseDateTime(str)
}
private[v0] implicit val jsonWritesJodaDateTime = new Writes[_root_.org.joda.time.DateTime] {
def writes(x: _root_.org.joda.time.DateTime) = {
JsString(_root_.org.joda.time.format.ISODateTimeFormat.dateTime.print(x))
}
}
private[v0] implicit val jsonReadsJodaLocalDate = __.read[String].map { str =>
_root_.org.joda.time.format.ISODateTimeFormat.dateTimeParser.parseLocalDate(str)
}
private[v0] implicit val jsonWritesJodaLocalDate = new Writes[_root_.org.joda.time.LocalDate] {
def writes(x: _root_.org.joda.time.LocalDate) = {
JsString(_root_.org.joda.time.format.ISODateTimeFormat.date.print(x))
}
}
implicit val jsonReadsApibuilderSpecMethod = new play.api.libs.json.Reads[io.apibuilder.spec.v0.models.Method] {
def reads(js: play.api.libs.json.JsValue): play.api.libs.json.JsResult[io.apibuilder.spec.v0.models.Method] = {
js match {
case v: play.api.libs.json.JsString => play.api.libs.json.JsSuccess(io.apibuilder.spec.v0.models.Method(v.value))
case _ => {
(js \\ "value").validate[String] match {
case play.api.libs.json.JsSuccess(v, _) => play.api.libs.json.JsSuccess(io.apibuilder.spec.v0.models.Method(v))
case err: play.api.libs.json.JsError =>
(js \\ "method").validate[String] match {
case play.api.libs.json.JsSuccess(v, _) => play.api.libs.json.JsSuccess(io.apibuilder.spec.v0.models.Method(v))
case err: play.api.libs.json.JsError => err
}
}
}
}
}
}
def jsonWritesApibuilderSpecMethod(obj: io.apibuilder.spec.v0.models.Method) = {
play.api.libs.json.JsString(obj.toString)
}
def jsObjectMethod(obj: io.apibuilder.spec.v0.models.Method) = {
play.api.libs.json.Json.obj("value" -> play.api.libs.json.JsString(obj.toString))
}
implicit def jsonWritesApibuilderSpecMethod: play.api.libs.json.Writes[Method] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Method] {
def writes(obj: io.apibuilder.spec.v0.models.Method) = {
jsonWritesApibuilderSpecMethod(obj)
}
}
}
implicit val jsonReadsApibuilderSpecParameterLocation = new play.api.libs.json.Reads[io.apibuilder.spec.v0.models.ParameterLocation] {
def reads(js: play.api.libs.json.JsValue): play.api.libs.json.JsResult[io.apibuilder.spec.v0.models.ParameterLocation] = {
js match {
case v: play.api.libs.json.JsString => play.api.libs.json.JsSuccess(io.apibuilder.spec.v0.models.ParameterLocation(v.value))
case _ => {
(js \\ "value").validate[String] match {
case play.api.libs.json.JsSuccess(v, _) => play.api.libs.json.JsSuccess(io.apibuilder.spec.v0.models.ParameterLocation(v))
case err: play.api.libs.json.JsError =>
(js \\ "parameter_location").validate[String] match {
case play.api.libs.json.JsSuccess(v, _) => play.api.libs.json.JsSuccess(io.apibuilder.spec.v0.models.ParameterLocation(v))
case err: play.api.libs.json.JsError => err
}
}
}
}
}
}
def jsonWritesApibuilderSpecParameterLocation(obj: io.apibuilder.spec.v0.models.ParameterLocation) = {
play.api.libs.json.JsString(obj.toString)
}
def jsObjectParameterLocation(obj: io.apibuilder.spec.v0.models.ParameterLocation) = {
play.api.libs.json.Json.obj("value" -> play.api.libs.json.JsString(obj.toString))
}
implicit def jsonWritesApibuilderSpecParameterLocation: play.api.libs.json.Writes[ParameterLocation] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.ParameterLocation] {
def writes(obj: io.apibuilder.spec.v0.models.ParameterLocation) = {
jsonWritesApibuilderSpecParameterLocation(obj)
}
}
}
implicit val jsonReadsApibuilderSpecResponseCodeOption = new play.api.libs.json.Reads[io.apibuilder.spec.v0.models.ResponseCodeOption] {
def reads(js: play.api.libs.json.JsValue): play.api.libs.json.JsResult[io.apibuilder.spec.v0.models.ResponseCodeOption] = {
js match {
case v: play.api.libs.json.JsString => play.api.libs.json.JsSuccess(io.apibuilder.spec.v0.models.ResponseCodeOption(v.value))
case _ => {
(js \\ "value").validate[String] match {
case play.api.libs.json.JsSuccess(v, _) => play.api.libs.json.JsSuccess(io.apibuilder.spec.v0.models.ResponseCodeOption(v))
case err: play.api.libs.json.JsError =>
(js \\ "response_code_option").validate[String] match {
case play.api.libs.json.JsSuccess(v, _) => play.api.libs.json.JsSuccess(io.apibuilder.spec.v0.models.ResponseCodeOption(v))
case err: play.api.libs.json.JsError => err
}
}
}
}
}
}
def jsonWritesApibuilderSpecResponseCodeOption(obj: io.apibuilder.spec.v0.models.ResponseCodeOption) = {
play.api.libs.json.JsString(obj.toString)
}
def jsObjectResponseCodeOption(obj: io.apibuilder.spec.v0.models.ResponseCodeOption) = {
play.api.libs.json.Json.obj("value" -> play.api.libs.json.JsString(obj.toString))
}
implicit def jsonWritesApibuilderSpecResponseCodeOption: play.api.libs.json.Writes[ResponseCodeOption] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.ResponseCodeOption] {
def writes(obj: io.apibuilder.spec.v0.models.ResponseCodeOption) = {
jsonWritesApibuilderSpecResponseCodeOption(obj)
}
}
}
implicit def jsonReadsApibuilderSpecAnnotation: play.api.libs.json.Reads[Annotation] = {
for {
name <- (__ \\ "name").read[String]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
} yield Annotation(name, description, deprecation)
}
def jsObjectAnnotation(obj: io.apibuilder.spec.v0.models.Annotation): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"name" -> play.api.libs.json.JsString(obj.name)
) ++ (obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
})
}
implicit def jsonWritesApibuilderSpecAnnotation: play.api.libs.json.Writes[Annotation] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Annotation] {
def writes(obj: io.apibuilder.spec.v0.models.Annotation) = {
jsObjectAnnotation(obj)
}
}
}
implicit def jsonReadsApibuilderSpecApidoc: play.api.libs.json.Reads[Apidoc] = {
(__ \\ "version").read[String].map { x => new Apidoc(version = x) }
}
def jsObjectApidoc(obj: io.apibuilder.spec.v0.models.Apidoc): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"version" -> play.api.libs.json.JsString(obj.version)
)
}
implicit def jsonWritesApibuilderSpecApidoc: play.api.libs.json.Writes[Apidoc] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Apidoc] {
def writes(obj: io.apibuilder.spec.v0.models.Apidoc) = {
jsObjectApidoc(obj)
}
}
}
implicit def jsonReadsApibuilderSpecApplication: play.api.libs.json.Reads[Application] = {
(__ \\ "key").read[String].map { x => new Application(key = x) }
}
def jsObjectApplication(obj: io.apibuilder.spec.v0.models.Application): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"key" -> play.api.libs.json.JsString(obj.key)
)
}
implicit def jsonWritesApibuilderSpecApplication: play.api.libs.json.Writes[Application] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Application] {
def writes(obj: io.apibuilder.spec.v0.models.Application) = {
jsObjectApplication(obj)
}
}
}
implicit def jsonReadsApibuilderSpecAttribute: play.api.libs.json.Reads[Attribute] = {
for {
name <- (__ \\ "name").read[String]
value <- (__ \\ "value").read[_root_.play.api.libs.json.JsObject]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
} yield Attribute(name, value, description, deprecation)
}
def jsObjectAttribute(obj: io.apibuilder.spec.v0.models.Attribute): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"name" -> play.api.libs.json.JsString(obj.name),
"value" -> obj.value
) ++ (obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
})
}
implicit def jsonWritesApibuilderSpecAttribute: play.api.libs.json.Writes[Attribute] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Attribute] {
def writes(obj: io.apibuilder.spec.v0.models.Attribute) = {
jsObjectAttribute(obj)
}
}
}
implicit def jsonReadsApibuilderSpecBody: play.api.libs.json.Reads[Body] = {
for {
`type` <- (__ \\ "type").read[String]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
attributes <- (__ \\ "attributes").read[Seq[io.apibuilder.spec.v0.models.Attribute]]
} yield Body(`type`, description, deprecation, attributes)
}
def jsObjectBody(obj: io.apibuilder.spec.v0.models.Body): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"type" -> play.api.libs.json.JsString(obj.`type`),
"attributes" -> play.api.libs.json.Json.toJson(obj.attributes)
) ++ (obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
})
}
implicit def jsonWritesApibuilderSpecBody: play.api.libs.json.Writes[Body] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Body] {
def writes(obj: io.apibuilder.spec.v0.models.Body) = {
jsObjectBody(obj)
}
}
}
implicit def jsonReadsApibuilderSpecContact: play.api.libs.json.Reads[Contact] = {
for {
name <- (__ \\ "name").readNullable[String]
url <- (__ \\ "url").readNullable[String]
email <- (__ \\ "email").readNullable[String]
} yield Contact(name, url, email)
}
def jsObjectContact(obj: io.apibuilder.spec.v0.models.Contact): play.api.libs.json.JsObject = {
(obj.name match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("name" -> play.api.libs.json.JsString(x))
}) ++
(obj.url match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("url" -> play.api.libs.json.JsString(x))
}) ++
(obj.email match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("email" -> play.api.libs.json.JsString(x))
})
}
implicit def jsonWritesApibuilderSpecContact: play.api.libs.json.Writes[Contact] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Contact] {
def writes(obj: io.apibuilder.spec.v0.models.Contact) = {
jsObjectContact(obj)
}
}
}
implicit def jsonReadsApibuilderSpecDeprecation: play.api.libs.json.Reads[Deprecation] = {
(__ \\ "description").readNullable[String].map { x => new Deprecation(description = x) }
}
def jsObjectDeprecation(obj: io.apibuilder.spec.v0.models.Deprecation): play.api.libs.json.JsObject = {
(obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
})
}
implicit def jsonWritesApibuilderSpecDeprecation: play.api.libs.json.Writes[Deprecation] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Deprecation] {
def writes(obj: io.apibuilder.spec.v0.models.Deprecation) = {
jsObjectDeprecation(obj)
}
}
}
implicit def jsonReadsApibuilderSpecEnum: play.api.libs.json.Reads[Enum] = {
for {
name <- (__ \\ "name").read[String]
plural <- (__ \\ "plural").read[String]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
values <- (__ \\ "values").read[Seq[io.apibuilder.spec.v0.models.EnumValue]]
attributes <- (__ \\ "attributes").read[Seq[io.apibuilder.spec.v0.models.Attribute]]
} yield Enum(name, plural, description, deprecation, values, attributes)
}
def jsObjectEnum(obj: io.apibuilder.spec.v0.models.Enum): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"name" -> play.api.libs.json.JsString(obj.name),
"plural" -> play.api.libs.json.JsString(obj.plural),
"values" -> play.api.libs.json.Json.toJson(obj.values),
"attributes" -> play.api.libs.json.Json.toJson(obj.attributes)
) ++ (obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
})
}
implicit def jsonWritesApibuilderSpecEnum: play.api.libs.json.Writes[Enum] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Enum] {
def writes(obj: io.apibuilder.spec.v0.models.Enum) = {
jsObjectEnum(obj)
}
}
}
implicit def jsonReadsApibuilderSpecEnumValue: play.api.libs.json.Reads[EnumValue] = {
for {
name <- (__ \\ "name").read[String]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
attributes <- (__ \\ "attributes").read[Seq[io.apibuilder.spec.v0.models.Attribute]]
value <- (__ \\ "value").readNullable[String]
} yield EnumValue(name, description, deprecation, attributes, value)
}
def jsObjectEnumValue(obj: io.apibuilder.spec.v0.models.EnumValue): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"name" -> play.api.libs.json.JsString(obj.name),
"attributes" -> play.api.libs.json.Json.toJson(obj.attributes)
) ++ (obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
}) ++
(obj.value match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("value" -> play.api.libs.json.JsString(x))
})
}
implicit def jsonWritesApibuilderSpecEnumValue: play.api.libs.json.Writes[EnumValue] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.EnumValue] {
def writes(obj: io.apibuilder.spec.v0.models.EnumValue) = {
jsObjectEnumValue(obj)
}
}
}
implicit def jsonReadsApibuilderSpecField: play.api.libs.json.Reads[Field] = {
for {
name <- (__ \\ "name").read[String]
`type` <- (__ \\ "type").read[String]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
default <- (__ \\ "default").readNullable[String]
required <- (__ \\ "required").read[Boolean]
minimum <- (__ \\ "minimum").readNullable[Long]
maximum <- (__ \\ "maximum").readNullable[Long]
example <- (__ \\ "example").readNullable[String]
attributes <- (__ \\ "attributes").read[Seq[io.apibuilder.spec.v0.models.Attribute]]
annotations <- (__ \\ "annotations").readWithDefault[Seq[String]](Nil)
} yield Field(name, `type`, description, deprecation, default, required, minimum, maximum, example, attributes, annotations)
}
def jsObjectField(obj: io.apibuilder.spec.v0.models.Field): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"name" -> play.api.libs.json.JsString(obj.name),
"type" -> play.api.libs.json.JsString(obj.`type`),
"required" -> play.api.libs.json.JsBoolean(obj.required),
"attributes" -> play.api.libs.json.Json.toJson(obj.attributes),
"annotations" -> play.api.libs.json.Json.toJson(obj.annotations)
) ++ (obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
}) ++
(obj.default match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("default" -> play.api.libs.json.JsString(x))
}) ++
(obj.minimum match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("minimum" -> play.api.libs.json.JsNumber(x))
}) ++
(obj.maximum match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("maximum" -> play.api.libs.json.JsNumber(x))
}) ++
(obj.example match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("example" -> play.api.libs.json.JsString(x))
})
}
implicit def jsonWritesApibuilderSpecField: play.api.libs.json.Writes[Field] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Field] {
def writes(obj: io.apibuilder.spec.v0.models.Field) = {
jsObjectField(obj)
}
}
}
implicit def jsonReadsApibuilderSpecHeader: play.api.libs.json.Reads[Header] = {
for {
name <- (__ \\ "name").read[String]
`type` <- (__ \\ "type").read[String]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
required <- (__ \\ "required").read[Boolean]
default <- (__ \\ "default").readNullable[String]
attributes <- (__ \\ "attributes").read[Seq[io.apibuilder.spec.v0.models.Attribute]]
} yield Header(name, `type`, description, deprecation, required, default, attributes)
}
def jsObjectHeader(obj: io.apibuilder.spec.v0.models.Header): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"name" -> play.api.libs.json.JsString(obj.name),
"type" -> play.api.libs.json.JsString(obj.`type`),
"required" -> play.api.libs.json.JsBoolean(obj.required),
"attributes" -> play.api.libs.json.Json.toJson(obj.attributes)
) ++ (obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
}) ++
(obj.default match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("default" -> play.api.libs.json.JsString(x))
})
}
implicit def jsonWritesApibuilderSpecHeader: play.api.libs.json.Writes[Header] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Header] {
def writes(obj: io.apibuilder.spec.v0.models.Header) = {
jsObjectHeader(obj)
}
}
}
implicit def jsonReadsApibuilderSpecImport: play.api.libs.json.Reads[Import] = {
for {
uri <- (__ \\ "uri").read[String]
namespace <- (__ \\ "namespace").read[String]
organization <- (__ \\ "organization").read[io.apibuilder.spec.v0.models.Organization]
application <- (__ \\ "application").read[io.apibuilder.spec.v0.models.Application]
version <- (__ \\ "version").read[String]
enums <- (__ \\ "enums").read[Seq[String]]
interfaces <- (__ \\ "interfaces").readWithDefault[Seq[String]](Nil)
unions <- (__ \\ "unions").read[Seq[String]]
models <- (__ \\ "models").read[Seq[String]]
annotations <- (__ \\ "annotations").readWithDefault[Seq[io.apibuilder.spec.v0.models.Annotation]](Nil)
} yield Import(uri, namespace, organization, application, version, enums, interfaces, unions, models, annotations)
}
def jsObjectImport(obj: io.apibuilder.spec.v0.models.Import): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"uri" -> play.api.libs.json.JsString(obj.uri),
"namespace" -> play.api.libs.json.JsString(obj.namespace),
"organization" -> jsObjectOrganization(obj.organization),
"application" -> jsObjectApplication(obj.application),
"version" -> play.api.libs.json.JsString(obj.version),
"enums" -> play.api.libs.json.Json.toJson(obj.enums),
"interfaces" -> play.api.libs.json.Json.toJson(obj.interfaces),
"unions" -> play.api.libs.json.Json.toJson(obj.unions),
"models" -> play.api.libs.json.Json.toJson(obj.models),
"annotations" -> play.api.libs.json.Json.toJson(obj.annotations)
)
}
implicit def jsonWritesApibuilderSpecImport: play.api.libs.json.Writes[Import] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Import] {
def writes(obj: io.apibuilder.spec.v0.models.Import) = {
jsObjectImport(obj)
}
}
}
implicit def jsonReadsApibuilderSpecInfo: play.api.libs.json.Reads[Info] = {
for {
license <- (__ \\ "license").readNullable[io.apibuilder.spec.v0.models.License]
contact <- (__ \\ "contact").readNullable[io.apibuilder.spec.v0.models.Contact]
} yield Info(license, contact)
}
def jsObjectInfo(obj: io.apibuilder.spec.v0.models.Info): play.api.libs.json.JsObject = {
(obj.license match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("license" -> jsObjectLicense(x))
}) ++
(obj.contact match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("contact" -> jsObjectContact(x))
})
}
implicit def jsonWritesApibuilderSpecInfo: play.api.libs.json.Writes[Info] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Info] {
def writes(obj: io.apibuilder.spec.v0.models.Info) = {
jsObjectInfo(obj)
}
}
}
implicit def jsonReadsApibuilderSpecInterface: play.api.libs.json.Reads[Interface] = {
for {
name <- (__ \\ "name").read[String]
plural <- (__ \\ "plural").read[String]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
fields <- (__ \\ "fields").read[Seq[io.apibuilder.spec.v0.models.Field]]
attributes <- (__ \\ "attributes").read[Seq[io.apibuilder.spec.v0.models.Attribute]]
} yield Interface(name, plural, description, deprecation, fields, attributes)
}
def jsObjectInterface(obj: io.apibuilder.spec.v0.models.Interface): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"name" -> play.api.libs.json.JsString(obj.name),
"plural" -> play.api.libs.json.JsString(obj.plural),
"fields" -> play.api.libs.json.Json.toJson(obj.fields),
"attributes" -> play.api.libs.json.Json.toJson(obj.attributes)
) ++ (obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
})
}
implicit def jsonWritesApibuilderSpecInterface: play.api.libs.json.Writes[Interface] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Interface] {
def writes(obj: io.apibuilder.spec.v0.models.Interface) = {
jsObjectInterface(obj)
}
}
}
implicit def jsonReadsApibuilderSpecLicense: play.api.libs.json.Reads[License] = {
for {
name <- (__ \\ "name").read[String]
url <- (__ \\ "url").readNullable[String]
} yield License(name, url)
}
def jsObjectLicense(obj: io.apibuilder.spec.v0.models.License): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"name" -> play.api.libs.json.JsString(obj.name)
) ++ (obj.url match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("url" -> play.api.libs.json.JsString(x))
})
}
implicit def jsonWritesApibuilderSpecLicense: play.api.libs.json.Writes[License] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.License] {
def writes(obj: io.apibuilder.spec.v0.models.License) = {
jsObjectLicense(obj)
}
}
}
implicit def jsonReadsApibuilderSpecModel: play.api.libs.json.Reads[Model] = {
for {
name <- (__ \\ "name").read[String]
plural <- (__ \\ "plural").read[String]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
fields <- (__ \\ "fields").read[Seq[io.apibuilder.spec.v0.models.Field]]
attributes <- (__ \\ "attributes").read[Seq[io.apibuilder.spec.v0.models.Attribute]]
interfaces <- (__ \\ "interfaces").readWithDefault[Seq[String]](Nil)
} yield Model(name, plural, description, deprecation, fields, attributes, interfaces)
}
def jsObjectModel(obj: io.apibuilder.spec.v0.models.Model): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"name" -> play.api.libs.json.JsString(obj.name),
"plural" -> play.api.libs.json.JsString(obj.plural),
"fields" -> play.api.libs.json.Json.toJson(obj.fields),
"attributes" -> play.api.libs.json.Json.toJson(obj.attributes),
"interfaces" -> play.api.libs.json.Json.toJson(obj.interfaces)
) ++ (obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
})
}
implicit def jsonWritesApibuilderSpecModel: play.api.libs.json.Writes[Model] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Model] {
def writes(obj: io.apibuilder.spec.v0.models.Model) = {
jsObjectModel(obj)
}
}
}
implicit def jsonReadsApibuilderSpecOperation: play.api.libs.json.Reads[Operation] = {
for {
method <- (__ \\ "method").read[io.apibuilder.spec.v0.models.Method]
path <- (__ \\ "path").read[String]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
body <- (__ \\ "body").readNullable[io.apibuilder.spec.v0.models.Body]
parameters <- (__ \\ "parameters").read[Seq[io.apibuilder.spec.v0.models.Parameter]]
responses <- (__ \\ "responses").read[Seq[io.apibuilder.spec.v0.models.Response]]
attributes <- (__ \\ "attributes").read[Seq[io.apibuilder.spec.v0.models.Attribute]]
} yield Operation(method, path, description, deprecation, body, parameters, responses, attributes)
}
def jsObjectOperation(obj: io.apibuilder.spec.v0.models.Operation): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"method" -> play.api.libs.json.JsString(obj.method.toString),
"path" -> play.api.libs.json.JsString(obj.path),
"parameters" -> play.api.libs.json.Json.toJson(obj.parameters),
"responses" -> play.api.libs.json.Json.toJson(obj.responses),
"attributes" -> play.api.libs.json.Json.toJson(obj.attributes)
) ++ (obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
}) ++
(obj.body match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("body" -> jsObjectBody(x))
})
}
implicit def jsonWritesApibuilderSpecOperation: play.api.libs.json.Writes[Operation] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Operation] {
def writes(obj: io.apibuilder.spec.v0.models.Operation) = {
jsObjectOperation(obj)
}
}
}
implicit def jsonReadsApibuilderSpecOrganization: play.api.libs.json.Reads[Organization] = {
(__ \\ "key").read[String].map { x => new Organization(key = x) }
}
def jsObjectOrganization(obj: io.apibuilder.spec.v0.models.Organization): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"key" -> play.api.libs.json.JsString(obj.key)
)
}
implicit def jsonWritesApibuilderSpecOrganization: play.api.libs.json.Writes[Organization] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Organization] {
def writes(obj: io.apibuilder.spec.v0.models.Organization) = {
jsObjectOrganization(obj)
}
}
}
implicit def jsonReadsApibuilderSpecParameter: play.api.libs.json.Reads[Parameter] = {
for {
name <- (__ \\ "name").read[String]
`type` <- (__ \\ "type").read[String]
location <- (__ \\ "location").read[io.apibuilder.spec.v0.models.ParameterLocation]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
required <- (__ \\ "required").read[Boolean]
default <- (__ \\ "default").readNullable[String]
minimum <- (__ \\ "minimum").readNullable[Long]
maximum <- (__ \\ "maximum").readNullable[Long]
example <- (__ \\ "example").readNullable[String]
attributes <- (__ \\ "attributes").readNullable[Seq[io.apibuilder.spec.v0.models.Attribute]]
} yield Parameter(name, `type`, location, description, deprecation, required, default, minimum, maximum, example, attributes)
}
def jsObjectParameter(obj: io.apibuilder.spec.v0.models.Parameter): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"name" -> play.api.libs.json.JsString(obj.name),
"type" -> play.api.libs.json.JsString(obj.`type`),
"location" -> play.api.libs.json.JsString(obj.location.toString),
"required" -> play.api.libs.json.JsBoolean(obj.required)
) ++ (obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
}) ++
(obj.default match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("default" -> play.api.libs.json.JsString(x))
}) ++
(obj.minimum match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("minimum" -> play.api.libs.json.JsNumber(x))
}) ++
(obj.maximum match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("maximum" -> play.api.libs.json.JsNumber(x))
}) ++
(obj.example match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("example" -> play.api.libs.json.JsString(x))
}) ++
(obj.attributes match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("attributes" -> play.api.libs.json.Json.toJson(x))
})
}
implicit def jsonWritesApibuilderSpecParameter: play.api.libs.json.Writes[Parameter] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Parameter] {
def writes(obj: io.apibuilder.spec.v0.models.Parameter) = {
jsObjectParameter(obj)
}
}
}
implicit def jsonReadsApibuilderSpecResource: play.api.libs.json.Reads[Resource] = {
for {
`type` <- (__ \\ "type").read[String]
plural <- (__ \\ "plural").read[String]
path <- (__ \\ "path").readNullable[String]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
operations <- (__ \\ "operations").read[Seq[io.apibuilder.spec.v0.models.Operation]]
attributes <- (__ \\ "attributes").read[Seq[io.apibuilder.spec.v0.models.Attribute]]
} yield Resource(`type`, plural, path, description, deprecation, operations, attributes)
}
def jsObjectResource(obj: io.apibuilder.spec.v0.models.Resource): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"type" -> play.api.libs.json.JsString(obj.`type`),
"plural" -> play.api.libs.json.JsString(obj.plural),
"operations" -> play.api.libs.json.Json.toJson(obj.operations),
"attributes" -> play.api.libs.json.Json.toJson(obj.attributes)
) ++ (obj.path match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("path" -> play.api.libs.json.JsString(x))
}) ++
(obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
})
}
implicit def jsonWritesApibuilderSpecResource: play.api.libs.json.Writes[Resource] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Resource] {
def writes(obj: io.apibuilder.spec.v0.models.Resource) = {
jsObjectResource(obj)
}
}
}
implicit def jsonReadsApibuilderSpecResponse: play.api.libs.json.Reads[Response] = {
for {
code <- (__ \\ "code").read[io.apibuilder.spec.v0.models.ResponseCode]
`type` <- (__ \\ "type").read[String]
headers <- (__ \\ "headers").readNullable[Seq[io.apibuilder.spec.v0.models.Header]]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
attributes <- (__ \\ "attributes").readNullable[Seq[io.apibuilder.spec.v0.models.Attribute]]
} yield Response(code, `type`, headers, description, deprecation, attributes)
}
def jsObjectResponse(obj: io.apibuilder.spec.v0.models.Response): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"code" -> jsObjectResponseCode(obj.code),
"type" -> play.api.libs.json.JsString(obj.`type`)
) ++ (obj.headers match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("headers" -> play.api.libs.json.Json.toJson(x))
}) ++
(obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
}) ++
(obj.attributes match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("attributes" -> play.api.libs.json.Json.toJson(x))
})
}
implicit def jsonWritesApibuilderSpecResponse: play.api.libs.json.Writes[Response] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Response] {
def writes(obj: io.apibuilder.spec.v0.models.Response) = {
jsObjectResponse(obj)
}
}
}
implicit def jsonReadsApibuilderSpecService: play.api.libs.json.Reads[Service] = {
for {
apidoc <- (__ \\ "apidoc").read[io.apibuilder.spec.v0.models.Apidoc]
name <- (__ \\ "name").read[String]
organization <- (__ \\ "organization").read[io.apibuilder.spec.v0.models.Organization]
application <- (__ \\ "application").read[io.apibuilder.spec.v0.models.Application]
namespace <- (__ \\ "namespace").read[String]
version <- (__ \\ "version").read[String]
baseUrl <- (__ \\ "base_url").readNullable[String]
description <- (__ \\ "description").readNullable[String]
info <- (__ \\ "info").read[io.apibuilder.spec.v0.models.Info]
headers <- (__ \\ "headers").read[Seq[io.apibuilder.spec.v0.models.Header]]
imports <- (__ \\ "imports").read[Seq[io.apibuilder.spec.v0.models.Import]]
enums <- (__ \\ "enums").read[Seq[io.apibuilder.spec.v0.models.Enum]]
interfaces <- (__ \\ "interfaces").readWithDefault[Seq[io.apibuilder.spec.v0.models.Interface]](Nil)
unions <- (__ \\ "unions").read[Seq[io.apibuilder.spec.v0.models.Union]]
models <- (__ \\ "models").read[Seq[io.apibuilder.spec.v0.models.Model]]
resources <- (__ \\ "resources").read[Seq[io.apibuilder.spec.v0.models.Resource]]
attributes <- (__ \\ "attributes").read[Seq[io.apibuilder.spec.v0.models.Attribute]]
annotations <- (__ \\ "annotations").readWithDefault[Seq[io.apibuilder.spec.v0.models.Annotation]](Nil)
} yield Service(apidoc, name, organization, application, namespace, version, baseUrl, description, info, headers, imports, enums, interfaces, unions, models, resources, attributes, annotations)
}
def jsObjectService(obj: io.apibuilder.spec.v0.models.Service): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"apidoc" -> jsObjectApidoc(obj.apidoc),
"name" -> play.api.libs.json.JsString(obj.name),
"organization" -> jsObjectOrganization(obj.organization),
"application" -> jsObjectApplication(obj.application),
"namespace" -> play.api.libs.json.JsString(obj.namespace),
"version" -> play.api.libs.json.JsString(obj.version),
"info" -> jsObjectInfo(obj.info),
"headers" -> play.api.libs.json.Json.toJson(obj.headers),
"imports" -> play.api.libs.json.Json.toJson(obj.imports),
"enums" -> play.api.libs.json.Json.toJson(obj.enums),
"interfaces" -> play.api.libs.json.Json.toJson(obj.interfaces),
"unions" -> play.api.libs.json.Json.toJson(obj.unions),
"models" -> play.api.libs.json.Json.toJson(obj.models),
"resources" -> play.api.libs.json.Json.toJson(obj.resources),
"attributes" -> play.api.libs.json.Json.toJson(obj.attributes),
"annotations" -> play.api.libs.json.Json.toJson(obj.annotations)
) ++ (obj.baseUrl match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("base_url" -> play.api.libs.json.JsString(x))
}) ++
(obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
})
}
implicit def jsonWritesApibuilderSpecService: play.api.libs.json.Writes[Service] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Service] {
def writes(obj: io.apibuilder.spec.v0.models.Service) = {
jsObjectService(obj)
}
}
}
implicit def jsonReadsApibuilderSpecUnion: play.api.libs.json.Reads[Union] = {
for {
name <- (__ \\ "name").read[String]
plural <- (__ \\ "plural").read[String]
discriminator <- (__ \\ "discriminator").readNullable[String]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
types <- (__ \\ "types").read[Seq[io.apibuilder.spec.v0.models.UnionType]]
attributes <- (__ \\ "attributes").read[Seq[io.apibuilder.spec.v0.models.Attribute]]
interfaces <- (__ \\ "interfaces").readWithDefault[Seq[String]](Nil)
} yield Union(name, plural, discriminator, description, deprecation, types, attributes, interfaces)
}
def jsObjectUnion(obj: io.apibuilder.spec.v0.models.Union): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"name" -> play.api.libs.json.JsString(obj.name),
"plural" -> play.api.libs.json.JsString(obj.plural),
"types" -> play.api.libs.json.Json.toJson(obj.types),
"attributes" -> play.api.libs.json.Json.toJson(obj.attributes),
"interfaces" -> play.api.libs.json.Json.toJson(obj.interfaces)
) ++ (obj.discriminator match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("discriminator" -> play.api.libs.json.JsString(x))
}) ++
(obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
})
}
implicit def jsonWritesApibuilderSpecUnion: play.api.libs.json.Writes[Union] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.Union] {
def writes(obj: io.apibuilder.spec.v0.models.Union) = {
jsObjectUnion(obj)
}
}
}
implicit def jsonReadsApibuilderSpecUnionType: play.api.libs.json.Reads[UnionType] = {
for {
`type` <- (__ \\ "type").read[String]
description <- (__ \\ "description").readNullable[String]
deprecation <- (__ \\ "deprecation").readNullable[io.apibuilder.spec.v0.models.Deprecation]
attributes <- (__ \\ "attributes").read[Seq[io.apibuilder.spec.v0.models.Attribute]]
default <- (__ \\ "default").readNullable[Boolean]
discriminatorValue <- (__ \\ "discriminator_value").readNullable[String]
} yield UnionType(`type`, description, deprecation, attributes, default, discriminatorValue)
}
def jsObjectUnionType(obj: io.apibuilder.spec.v0.models.UnionType): play.api.libs.json.JsObject = {
play.api.libs.json.Json.obj(
"type" -> play.api.libs.json.JsString(obj.`type`),
"attributes" -> play.api.libs.json.Json.toJson(obj.attributes)
) ++ (obj.description match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("description" -> play.api.libs.json.JsString(x))
}) ++
(obj.deprecation match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("deprecation" -> jsObjectDeprecation(x))
}) ++
(obj.default match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("default" -> play.api.libs.json.JsBoolean(x))
}) ++
(obj.discriminatorValue match {
case None => play.api.libs.json.Json.obj()
case Some(x) => play.api.libs.json.Json.obj("discriminator_value" -> play.api.libs.json.JsString(x))
})
}
implicit def jsonWritesApibuilderSpecUnionType: play.api.libs.json.Writes[UnionType] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.UnionType] {
def writes(obj: io.apibuilder.spec.v0.models.UnionType) = {
jsObjectUnionType(obj)
}
}
}
implicit def jsonReadsApibuilderSpecResponseCodeInt: play.api.libs.json.Reads[ResponseCodeInt] = {
(__ \\ "value").read[Int].map { x => new ResponseCodeInt(value = x) }
}
implicit def jsonReadsApibuilderSpecResponseCode: play.api.libs.json.Reads[ResponseCode] = {
(
(__ \\ "integer").read(jsonReadsApibuilderSpecResponseCodeInt).asInstanceOf[play.api.libs.json.Reads[ResponseCode]]
orElse
(__ \\ "response_code_option").read(jsonReadsApibuilderSpecResponseCodeOption).asInstanceOf[play.api.libs.json.Reads[ResponseCode]]
orElse
play.api.libs.json.Reads(jsValue => play.api.libs.json.JsSuccess(io.apibuilder.spec.v0.models.ResponseCodeUndefinedType(jsValue.toString))).asInstanceOf[play.api.libs.json.Reads[ResponseCode]]
)
}
def jsObjectResponseCode(obj: io.apibuilder.spec.v0.models.ResponseCode): play.api.libs.json.JsObject = {
obj match {
case x: io.apibuilder.spec.v0.models.ResponseCodeInt => play.api.libs.json.Json.obj("integer" -> play.api.libs.json.Json.obj("value" -> play.api.libs.json.JsNumber(x.value)))
case x: io.apibuilder.spec.v0.models.ResponseCodeOption => play.api.libs.json.Json.obj("response_code_option" -> play.api.libs.json.JsString(x.toString))
case x: io.apibuilder.spec.v0.models.ResponseCodeUndefinedType => sys.error(s"The type[io.apibuilder.spec.v0.models.ResponseCodeUndefinedType] should never be serialized")
}
}
implicit def jsonWritesApibuilderSpecResponseCode: play.api.libs.json.Writes[ResponseCode] = {
new play.api.libs.json.Writes[io.apibuilder.spec.v0.models.ResponseCode] {
def writes(obj: io.apibuilder.spec.v0.models.ResponseCode) = {
jsObjectResponseCode(obj)
}
}
}
}
}
package io.apibuilder.spec.v0 {
object Bindables {
import play.api.mvc.{PathBindable, QueryStringBindable}
// import models directly for backwards compatibility with prior versions of the generator
import Core._
import Models._
object Core {
implicit def pathBindableDateTimeIso8601(implicit stringBinder: QueryStringBindable[String]): PathBindable[_root_.org.joda.time.DateTime] = ApibuilderPathBindable(ApibuilderTypes.dateTimeIso8601)
implicit def queryStringBindableDateTimeIso8601(implicit stringBinder: QueryStringBindable[String]): QueryStringBindable[_root_.org.joda.time.DateTime] = ApibuilderQueryStringBindable(ApibuilderTypes.dateTimeIso8601)
implicit def pathBindableDateIso8601(implicit stringBinder: QueryStringBindable[String]): PathBindable[_root_.org.joda.time.LocalDate] = ApibuilderPathBindable(ApibuilderTypes.dateIso8601)
implicit def queryStringBindableDateIso8601(implicit stringBinder: QueryStringBindable[String]): QueryStringBindable[_root_.org.joda.time.LocalDate] = ApibuilderQueryStringBindable(ApibuilderTypes.dateIso8601)
}
object Models {
import io.apibuilder.spec.v0.models._
val methodConverter: ApibuilderTypeConverter[io.apibuilder.spec.v0.models.Method] = new ApibuilderTypeConverter[io.apibuilder.spec.v0.models.Method] {
override def convert(value: String): io.apibuilder.spec.v0.models.Method = io.apibuilder.spec.v0.models.Method(value)
override def convert(value: io.apibuilder.spec.v0.models.Method): String = value.toString
override def example: io.apibuilder.spec.v0.models.Method = io.apibuilder.spec.v0.models.Method.Get
override def validValues: Seq[io.apibuilder.spec.v0.models.Method] = io.apibuilder.spec.v0.models.Method.all
}
implicit def pathBindableMethod(implicit stringBinder: QueryStringBindable[String]): PathBindable[io.apibuilder.spec.v0.models.Method] = ApibuilderPathBindable(methodConverter)
implicit def queryStringBindableMethod(implicit stringBinder: QueryStringBindable[String]): QueryStringBindable[io.apibuilder.spec.v0.models.Method] = ApibuilderQueryStringBindable(methodConverter)
val parameterLocationConverter: ApibuilderTypeConverter[io.apibuilder.spec.v0.models.ParameterLocation] = new ApibuilderTypeConverter[io.apibuilder.spec.v0.models.ParameterLocation] {
override def convert(value: String): io.apibuilder.spec.v0.models.ParameterLocation = io.apibuilder.spec.v0.models.ParameterLocation(value)
override def convert(value: io.apibuilder.spec.v0.models.ParameterLocation): String = value.toString
override def example: io.apibuilder.spec.v0.models.ParameterLocation = io.apibuilder.spec.v0.models.ParameterLocation.Path
override def validValues: Seq[io.apibuilder.spec.v0.models.ParameterLocation] = io.apibuilder.spec.v0.models.ParameterLocation.all
}
implicit def pathBindableParameterLocation(implicit stringBinder: QueryStringBindable[String]): PathBindable[io.apibuilder.spec.v0.models.ParameterLocation] = ApibuilderPathBindable(parameterLocationConverter)
implicit def queryStringBindableParameterLocation(implicit stringBinder: QueryStringBindable[String]): QueryStringBindable[io.apibuilder.spec.v0.models.ParameterLocation] = ApibuilderQueryStringBindable(parameterLocationConverter)
val responseCodeOptionConverter: ApibuilderTypeConverter[io.apibuilder.spec.v0.models.ResponseCodeOption] = new ApibuilderTypeConverter[io.apibuilder.spec.v0.models.ResponseCodeOption] {
override def convert(value: String): io.apibuilder.spec.v0.models.ResponseCodeOption = io.apibuilder.spec.v0.models.ResponseCodeOption(value)
override def convert(value: io.apibuilder.spec.v0.models.ResponseCodeOption): String = value.toString
override def example: io.apibuilder.spec.v0.models.ResponseCodeOption = io.apibuilder.spec.v0.models.ResponseCodeOption.Default
override def validValues: Seq[io.apibuilder.spec.v0.models.ResponseCodeOption] = io.apibuilder.spec.v0.models.ResponseCodeOption.all
}
implicit def pathBindableResponseCodeOption(implicit stringBinder: QueryStringBindable[String]): PathBindable[io.apibuilder.spec.v0.models.ResponseCodeOption] = ApibuilderPathBindable(responseCodeOptionConverter)
implicit def queryStringBindableResponseCodeOption(implicit stringBinder: QueryStringBindable[String]): QueryStringBindable[io.apibuilder.spec.v0.models.ResponseCodeOption] = ApibuilderQueryStringBindable(responseCodeOptionConverter)
}
trait ApibuilderTypeConverter[T] {
def convert(value: String): T
def convert(value: T): String
def example: T
def validValues: Seq[T] = Nil
def errorMessage(key: String, value: String, ex: java.lang.Exception): String = {
val base = s"Invalid value '$value' for parameter '$key'. "
validValues.toList match {
case Nil => base + "Ex: " + convert(example)
case values => base + ". Valid values are: " + values.mkString("'", "', '", "'")
}
}
}
object ApibuilderTypes {
val dateTimeIso8601: ApibuilderTypeConverter[_root_.org.joda.time.DateTime] = new ApibuilderTypeConverter[_root_.org.joda.time.DateTime] {
override def convert(value: String): _root_.org.joda.time.DateTime = _root_.org.joda.time.format.ISODateTimeFormat.dateTimeParser.parseDateTime(value)
override def convert(value: _root_.org.joda.time.DateTime): String = _root_.org.joda.time.format.ISODateTimeFormat.dateTime.print(value)
override def example: _root_.org.joda.time.DateTime = _root_.org.joda.time.DateTime.now
}
val dateIso8601: ApibuilderTypeConverter[_root_.org.joda.time.LocalDate] = new ApibuilderTypeConverter[_root_.org.joda.time.LocalDate] {
override def convert(value: String): _root_.org.joda.time.LocalDate = _root_.org.joda.time.format.ISODateTimeFormat.dateTimeParser.parseLocalDate(value)
override def convert(value: _root_.org.joda.time.LocalDate): String = _root_.org.joda.time.format.ISODateTimeFormat.date.print(value)
override def example: _root_.org.joda.time.LocalDate = _root_.org.joda.time.LocalDate.now
}
}
final case class ApibuilderQueryStringBindable[T](
converters: ApibuilderTypeConverter[T]
) extends QueryStringBindable[T] {
override def bind(key: String, params: Map[String, Seq[String]]): _root_.scala.Option[_root_.scala.Either[String, T]] = {
params.getOrElse(key, Nil).headOption.map { v =>
try {
Right(
converters.convert(v)
)
} catch {
case ex: java.lang.Exception => Left(
converters.errorMessage(key, v, ex)
)
}
}
}
override def unbind(key: String, value: T): String = {
s"$key=${converters.convert(value)}"
}
}
final case class ApibuilderPathBindable[T](
converters: ApibuilderTypeConverter[T]
) extends PathBindable[T] {
override def bind(key: String, value: String): _root_.scala.Either[String, T] = {
try {
Right(
converters.convert(value)
)
} catch {
case ex: java.lang.Exception => Left(
converters.errorMessage(key, value, ex)
)
}
}
override def unbind(key: String, value: T): String = {
converters.convert(value)
}
}
}
}
package io.apibuilder.spec.v0 {
object Constants {
val Namespace = "io.apibuilder.spec.v0"
val UserAgent = "apibuilder 0.15.3 app.apibuilder.io/apicollective/apibuilder-spec/latest/play_2_8_client"
val Version = "0.15.4"
val VersionMajor = 0
}
class Client(
ws: play.api.libs.ws.WSClient,
val baseUrl: String,
auth: scala.Option[io.apibuilder.spec.v0.Authorization] = None,
defaultHeaders: Seq[(String, String)] = Nil
) extends interfaces.Client {
import io.apibuilder.spec.v0.models.json._
private[this] val logger = play.api.Logger("io.apibuilder.spec.v0.Client")
logger.info(s"Initializing io.apibuilder.spec.v0.Client for url $baseUrl")
def _requestHolder(path: String): play.api.libs.ws.WSRequest = {
val holder = ws.url(baseUrl + path).addHttpHeaders(
"User-Agent" -> Constants.UserAgent,
"X-Apidoc-Version" -> Constants.Version,
"X-Apidoc-Version-Major" -> Constants.VersionMajor.toString
).addHttpHeaders(defaultHeaders : _*)
auth.fold(holder) {
case Authorization.Basic(username, password) => {
holder.withAuth(username, password.getOrElse(""), play.api.libs.ws.WSAuthScheme.BASIC)
}
case a => sys.error("Invalid authorization scheme[" + a.getClass + "]")
}
}
def _logRequest(method: String, req: play.api.libs.ws.WSRequest): play.api.libs.ws.WSRequest = {
val queryComponents = for {
(name, values) <- req.queryString
value <- values
} yield s"$name=$value"
val url = s"${req.url}${queryComponents.mkString("?", "&", "")}"
auth.fold(logger.info(s"curl -X $method '$url'")) { _ =>
logger.info(s"curl -X $method -u '[REDACTED]:' '$url'")
}
req
}
def _executeRequest(
method: String,
path: String,
queryParameters: Seq[(String, String)] = Nil,
requestHeaders: Seq[(String, String)] = Nil,
body: Option[play.api.libs.json.JsValue] = None
): scala.concurrent.Future[play.api.libs.ws.WSResponse] = {
method.toUpperCase match {
case "GET" => {
_logRequest("GET", _requestHolder(path).addHttpHeaders(requestHeaders:_*).addQueryStringParameters(queryParameters:_*)).get()
}
case "POST" => {
_logRequest("POST", _requestHolder(path).addHttpHeaders(_withJsonContentType(requestHeaders):_*).addQueryStringParameters(queryParameters:_*)).post(body.getOrElse(play.api.libs.json.Json.obj()))
}
case "PUT" => {
_logRequest("PUT", _requestHolder(path).addHttpHeaders(_withJsonContentType(requestHeaders):_*).addQueryStringParameters(queryParameters:_*)).put(body.getOrElse(play.api.libs.json.Json.obj()))
}
case "PATCH" => {
_logRequest("PATCH", _requestHolder(path).addHttpHeaders(requestHeaders:_*).addQueryStringParameters(queryParameters:_*)).patch(body.getOrElse(play.api.libs.json.Json.obj()))
}
case "DELETE" => {
_logRequest("DELETE", _requestHolder(path).addHttpHeaders(requestHeaders:_*).addQueryStringParameters(queryParameters:_*)).delete()
}
case "HEAD" => {
_logRequest("HEAD", _requestHolder(path).addHttpHeaders(requestHeaders:_*).addQueryStringParameters(queryParameters:_*)).head()
}
case "OPTIONS" => {
_logRequest("OPTIONS", _requestHolder(path).addHttpHeaders(requestHeaders:_*).addQueryStringParameters(queryParameters:_*)).options()
}
case _ => {
_logRequest(method, _requestHolder(path).addHttpHeaders(requestHeaders:_*).addQueryStringParameters(queryParameters:_*))
sys.error("Unsupported method[%s]".format(method))
}
}
}
/**
* Adds a Content-Type: application/json header unless the specified requestHeaders
* already contain a Content-Type header
*/
def _withJsonContentType(headers: Seq[(String, String)]): Seq[(String, String)] = {
headers.find { _._1.toUpperCase == "CONTENT-TYPE" } match {
case None => headers ++ Seq(("Content-Type" -> "application/json; charset=UTF-8"))
case Some(_) => headers
}
}
}
object Client {
def parseJson[T](
className: String,
r: play.api.libs.ws.WSResponse,
f: (play.api.libs.json.JsValue => play.api.libs.json.JsResult[T])
): T = {
f(play.api.libs.json.Json.parse(r.body)) match {
case play.api.libs.json.JsSuccess(x, _) => x
case play.api.libs.json.JsError(errors) => {
throw io.apibuilder.spec.v0.errors.FailedRequest(r.status, s"Invalid json for class[" + className + "]: " + errors.mkString(" "))
}
}
}
}
sealed trait Authorization extends _root_.scala.Product with _root_.scala.Serializable
object Authorization {
final case class Basic(username: String, password: Option[String] = None) extends Authorization
}
package interfaces {
trait Client {
def baseUrl: String
}
}
package errors {
final case class FailedRequest(responseCode: Int, message: String, requestUri: Option[_root_.java.net.URI] = None) extends _root_.java.lang.Exception(s"HTTP $responseCode: $message")
}
} | gheine/apidoc | generated/app/ApicollectiveApibuilderSpecV0Client.scala | Scala | mit | 79,099 |
package com.wavesplatform.state.diffs.smart
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils.{Base64, EitherExt2}
import com.wavesplatform.lang.directives.DirectiveSet
import com.wavesplatform.lang.directives.values._
import com.wavesplatform.lang.utils._
import com.wavesplatform.lang.v1.compiler.ExpressionCompiler
import com.wavesplatform.lang.v1.compiler.Terms.EVALUATED
import com.wavesplatform.lang.v1.evaluator.EvaluatorV1
import com.wavesplatform.lang.v1.parser.Parser
import com.wavesplatform.lang.v1.traits.Environment
import com.wavesplatform.state.Blockchain
import com.wavesplatform.transaction.smart.BlockchainContext.In
import com.wavesplatform.transaction.smart.{BlockchainContext, buildThisValue}
import com.wavesplatform.transaction.transfer.TransferTransaction
import com.wavesplatform.transaction.{DataTransaction, Transaction}
import com.wavesplatform.utils.EmptyBlockchain
import monix.eval.Coeval
import shapeless.Coproduct
package object predef {
val chainId: Byte = 'u'
def runScript[T <: EVALUATED](script: String, version: StdLibVersion, t: In, blockchain: Blockchain, chainId: Byte): Either[String, T] = {
val expr = Parser.parseExpr(script).get.value
for {
compileResult <- ExpressionCompiler(compilerContext(version, Expression, isAssetScript = false), expr)
(typedExpr, _) = compileResult
directives = DirectiveSet(version, Account, Expression).explicitGet()
evalContext <- BlockchainContext.build(version,
chainId,
Coeval.evalOnce(buildThisValue(t, blockchain, directives, Coproduct[Environment.Tthis](Environment.AssetId(Array())))).map(_.explicitGet()),
Coeval.evalOnce(blockchain.height),
blockchain,
isTokenContext = false,
isContract = false,
Coproduct[Environment.Tthis](Environment.AssetId(Array())),
ByteStr.empty)
r <- EvaluatorV1().apply[T](evalContext, typedExpr)
} yield r
}
def runScript[T <: EVALUATED](script: String, t: In = null, ctxV: StdLibVersion = V1, chainId: Byte = chainId): Either[String, T] =
runScript[T](script, ctxV, t, EmptyBlockchain, chainId)
def runScript[T <: EVALUATED](script: String, t: In, chainId: Byte): Either[String, T] =
runScript[T](script, V1, t, EmptyBlockchain, chainId)
def runScript[T <: EVALUATED](script: String, tx: Transaction, blockchain: Blockchain): Either[String, T] =
runScript[T](script, V1, Coproduct(tx), blockchain, chainId)
def runScriptWithCustomContext[T <: EVALUATED](
script: String,
t: In,
chainId: Byte,
ctxV: StdLibVersion = V1,
blockchain: Blockchain = EmptyBlockchain
): Either[String, T] =
runScript[T](script, ctxV, t, blockchain, chainId)
private def dropLastLine(str: String): String = str.replace("\\r", "").split('\\n').init.mkString("\\n")
def scriptWithAllV1Functions(tx: DataTransaction, t: TransferTransaction): String =
s"""${dropLastLine(scriptWithV1PureFunctions(tx, t))}
|${dropLastLine(scriptWithV1WavesFunctions(tx, t))}
|${dropLastLine(scriptWithCryptoFunctions)}
|if rnd then pure && waves else crypto""".stripMargin
def scriptWithV1PureFunctions(tx: DataTransaction, t: TransferTransaction): String =
s"""
| # Pure context
| # 1) basic(+ eq) -> mulLong, divLong, modLong, sumLong, subLong, sumString, sumByteVector
|
| let rnd = tx.timestamp % 2 == 0
| let longAll = 1000 * 2 == 2000 && 1000 / 2 == 500 && 1000 % 2 == 0 && 1000 + 2 == 1002 && 1000 - 2 == 998
| let sumString = "ha" + "-" +"ha" == "ha-ha"
| let sumByteVector = match tx {
| case d0: DataTransaction =>
| let body = d0.bodyBytes
| body + base64'${Base64.encode(tx.bodyBytes())}' == base64'${Base64.encode(tx.bodyBytes())}' + base64'${Base64.encode(tx.bodyBytes())}'
| case _: TransferTransaction => true
| case _ => false
| }
|
| let eqUnion = match tx {
| case _: DataTransaction => true
| case t0: TransferTransaction => t0.recipient == Address(base58'${t.recipient.toString}')
| case _ => false
| }
|
| let basic = longAll && sumString && sumByteVector && eqUnion
|
| # 2) ne
| let nePrim = 1000 != 999 && "ha" +"ha" != "ha-ha" && tx.bodyBytes != base64'hahaha'
| let neDataEntryAndGetElement = match tx {
| case d1: DataTransaction => d1.data[0] != DataEntry("ha", true)
| case _: TransferTransaction => true
| case _ => false
| }
|
| let neOptionAndExtractHeight = match tx {
| case _: DataTransaction => true
| case _: TransferTransaction => extract(transactionHeightById(tx.id)) != 0
| case _ => false
| }
|
| let ne = nePrim && neDataEntryAndGetElement && neOptionAndExtractHeight
|
|# 3) gt, ge
| let gteLong = 1000 > 999 && 1000 >= 999
|
|# 4) getListSize
| let getListSize = match tx {
| case d2: DataTransaction => size(d2.data) != 0
| case _: TransferTransaction => true
| case _ => false
| }
|
|# 5) unary
| let unary = -1 == -1 && false == !true
|
|# 6) fraction, sizeBytes, takeBytes, dropBytes, takeRightBytes, dropRightBytes, sizeString, takeString, dropString,
|# takeRightString, dropRightString, isDefined
| let frAction = fraction(12, 3, 4) == 9
| let bytesOps = match tx {
| case d3: DataTransaction =>
| size(d3.bodyBytes) != 0 && take(d3.bodyBytes, 1) != base58'ha' && drop(d3.bodyBytes, 1) != base58'ha' && takeRight(d3.bodyBytes, 1) != base58'ha' && dropRight(d3.bodyBytes, 1) != base58'ha'
| case t1: TransferTransaction => isDefined(t1.feeAssetId) == false
| case _ => false
| }
| let strOps = size("haha") != 0 && take("haha", 1) != "" && drop("haha", 0) != "" && takeRight("haha", 1) != "" && dropRight("haha", 0) != ""
|
| let pure = basic && ne && gteLong && getListSize && unary && frAction && bytesOps && strOps
| pure""".stripMargin
def scriptWithV1WavesFunctions(tx: DataTransaction, t: TransferTransaction): String =
s""" # Waves context
| let txById = match tx {
| case _: DataTransaction => true
| case _: TransferTransaction =>
| let g = extract(transactionById(base58'${tx.id().toString}'))
| g.id == base58'${tx.id().toString}'
| case _ => false
| }
| let entries = match tx {
| case d: DataTransaction =>
| let int = extract(getInteger(d.data, "${tx.data(0).key}"))
| let bool = extract(getBoolean(d.data, "${tx.data(1).key}"))
| let blob = extract(getBinary(d.data, "${tx.data(2).key}"))
| let str = extract(getString(d.data, "${tx.data(3).key}"))
| let dataByKey = toString(int) == "${tx.data(0).value}" || toString(bool) == "${tx.data(1).value}" ||
| size(blob) > 0 || str == "${tx.data(3).value}"
|
| let d0 = extract(getInteger(d.data, 0))
| let d1 = extract(getBoolean(d.data, 1))
| let d2 = getBinary(d.data, 2)
| let d3 = getString(d.data, 3)
| let dataByIndex = toBytes(d0) == base64'abcdef' || toBytes(d1) == base64'ghijkl' ||
| isDefined(d2) || toBytes(extract(d3)) == base64'mnopqr'
|
| dataByKey && dataByIndex
|
| case _: TransferTransaction =>
| let add = Address(base58'${t.recipient}')
| let long = extract(getInteger(add,"${tx.data(0).key}")) == ${tx.data(0).value}
| let bool1 = extract(getBoolean(add,"${tx.data(1).key}")) == ${tx.data(1).value}
| let bin = extract(getBinary(add,"${tx.data(2).key}")) == base58'${tx.data(2).value}'
| let str1 = extract(getString(add,"${tx.data(3).key}")) == "${tx.data(3).value}"
| long && bool1 && bin && str1
|
| case _: CreateAliasTransaction => throw("oh no")
| case _: BurnTransaction => throw()
| case _ => false
| }
|
| let aFromPK = addressFromPublicKey(tx.senderPublicKey) == tx.sender
| let aFromStrOrRecip = match tx {
| case _: DataTransaction => addressFromString("${tx.sender.toAddress}") == Address(base58'${tx.sender.toAddress}')
| case t1: TransferTransaction => addressFromRecipient(t1.recipient) == Address(base58'${t.recipient}')
| case _ => false
| }
|
| let balances = assetBalance(tx.sender, unit) > 0 && wavesBalance(tx.sender) != 0
|
| let waves = txById && entries && balances && aFromPK && aFromStrOrRecip && height > 0
| waves""".stripMargin
def scriptWithCryptoFunctions: String =
s"""
| # Crypto context
| let bks = blake2b256(base58'') != base58'' && keccak256(base58'') != base58'' && sha256(base58'') != base58''
| let sig = sigVerify(base58'333', base58'123', base58'567') != true
| let str58 = fromBase58String(toBase58String(tx.id)) == tx.id
| let str64 = fromBase64String(toBase64String(tx.id)) == tx.id
|
| let crypto = bks && sig && str58 && str64
| crypto""".stripMargin
}
| wavesplatform/Waves | node/src/test/scala/com/wavesplatform/state/diffs/smart/predef/package.scala | Scala | mit | 9,723 |
package org.kirhgoff.morphoid.engine
import java.util.concurrent.atomic.AtomicLong
import scala.util.Random
/**
* Created by <a href="mailto:[email protected]">kirhgoff</a> on 2/9/17.
*/
object Dice {
def randomOne() = if (random.nextBoolean()) 1 else 0
val idCounter = new AtomicLong()
val tickCounter = new AtomicLong()
val random = Random
def randomInt(maxInclusive:Int) = random.nextInt(maxInclusive)
def randomDirection = Direction.byIndex(random.nextInt(4))
//TODO format with zeros
def makeId(prefix:String = "") = prefix + idCounter.incrementAndGet().toString
def nextTickNumber = tickCounter.getAndIncrement()
}
| kirhgoff/morphoid | morphoid-engine/src/main/scala/org/kirhgoff/morphoid/engine/Dice.scala | Scala | lgpl-2.1 | 658 |
package dialectic.micro
import org.scalacheck._
import org.scalacheck.Arbitrary._
import org.specs2._
import scalaz.{ IMap, Order }
import scalaz.std.anyVal.intInstance
import scalaz.scalacheck.ScalazArbitrary.Arbitrary_==>>
import scalaz.scalacheck.ScalazProperties.equal
class GoalStateTest extends Specification with ScalaCheck {
import GoalStateTestHelper._
def is =
s2"""
GoalState
should have a lawful equality ${equal.laws[GoalState[Int]]}
"""
}
object GoalStateTestHelper {
import TermTestHelper._
implicit def goalStateArbitrary[A : Arbitrary : Order]: Arbitrary[GoalState[A]] =
Arbitrary(for {
m <- arbitrary[IMap[Term[A], Term[A]]]
i <- arbitrary[Int]
} yield GoalState(m, i))
}
| adelbertc/dialectic | micro/src/test/scala/dialectic/micro/GoalStateTest.scala | Scala | bsd-3-clause | 743 |
/*
* Copyright 2015 University of Basel, Graphics and Vision Research Group
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package scalismo.registration
import scalismo.common.{DifferentiableField, Field, Scalar}
import scalismo.geometry.NDSpace
import scalismo.numerics.Sampler
import scalismo.transformations.TransformationSpace
/**
* The mean squares image to image metric.
* It is implemented as the squared loss function in terms of the pointwise pixel differences.
*/
case class MeanSquaresMetric[D: NDSpace, A: Scalar](fixedImage: Field[D, A],
movingImage: DifferentiableField[D, A],
transformationSpace: TransformationSpace[D],
sampler: Sampler[D])
extends MeanPointwiseLossMetric[D, A](fixedImage, movingImage, transformationSpace, sampler) {
val scalar = Scalar[A]
override val ndSpace = implicitly[NDSpace[D]]
override protected def lossFunction(v: A): Double = {
val value = scalar.toDouble(v)
value * value;
}
override protected def lossFunctionDerivative(v: A): Double = {
2.0 * scalar.toDouble(v)
}
}
| unibas-gravis/scalismo | src/main/scala/scalismo/registration/MeanSquaresMetric.scala | Scala | apache-2.0 | 1,727 |
package us.theatr.spray.nozzle
/*
Copyright 2012 Yann Ramin
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import sun.misc.{BASE64Encoder, BASE64Decoder}
import javax.crypto.SecretKeyFactory
import javax.crypto.spec.PBEKeySpec
import java.security.SecureRandom
/**
*
* A standard/boring way to store passwords safely using PBKDF#2
* and a random salt. All parameters are encoded into the salt text string
* to allow porting of the default algorithms and iterations at a later date.
*/
object PasswordUtil {
/**
* The default iterations parameter - this is the number of rounds of PBKDF#2 to run
*/
val iterations = 200000
/**
* The default algorithm for hashing. Certain algorithms require GnuCrypto,
* such as HmacSHA256
*/
val default_algo = "PBKDF2WithHmacSHA1"
/**
* This must match the expected output of the algorithm in question.
*/
val hash_length = 160
/**
* Given a user password, the hashed password from the datastore, and the stored salt, check
* if the password matches.
* @param password User supplied password
* @param dbpassword Data-store password - previously generated by securePassword
* @param salt Data-store salt - previously generated by securePassword
* @return
*/
def checkPassword(password: String, dbpassword: String, salt: String) : Boolean = {
val spl = salt.split(':')
val saltarray = new BASE64Decoder().decodeBuffer(spl(2))
val algo = spl(0)
val iter = spl(1).toInt
securePassword(password, saltarray, iter, algo)._1 == dbpassword
}
/**
* Secure a password given in password. All other parameters are default.
* @param password User supplied password
* @param salt (optional)
* @param iter (optional) Number of iterations
* @param algo (optional) Algorithm
* @param hash_len (optional) Hash length output
* @return A tuple of Strings containing: (Base64(password_hash), Salt)
*/
def securePassword(password: String, salt: Array[Byte] = generateSalt, iter: Int = iterations, algo: String = default_algo, hash_len: Int = hash_length) : (String, String) = {
val fac = SecretKeyFactory.getInstance(algo)
val pbks = new PBEKeySpec(password.toCharArray, salt, iterations, hash_len)
(new BASE64Encoder().encode(fac.generateSecret(pbks).getEncoded),
algo + ":" + iterations.toString + ":" + new BASE64Encoder().encode(salt))
}
/**
* Generate a salt (random array of bytes)
* @return Array of random bytes
*/
def generateSalt : Array[Byte] = {
val bytes = Array.fill(128) {0.toByte}
new SecureRandom().nextBytes(bytes)
bytes
}
}
| theatrus/spray-nozzle | src/main/scala/us/theatr/spray/nozzle/PasswordUtil.scala | Scala | apache-2.0 | 3,035 |
package util.chrome
import scala.scalajs.js
@js.native
object api extends js.GlobalScope {
def chrome: Chrome = js.native
@js.native
trait Chrome extends js.Object {
def storage: ChromeStorage = js.native
def runtime: Runtime = js.native
}
@js.native
trait ChromeStorage extends js.Object {
def local: Storage = js.native
}
@js.native
trait Storage extends js.Object {
def set(map: js.Dynamic, callback: js.Function0[Any]): Unit = js.native
def get(key: String, callback: js.Function1[js.Dynamic, Any]): Unit = js.native
def remove(key: String, callback: js.Function0[Any]): Unit = js.native
}
@js.native
trait Runtime extends js.Object {
def getPlatformInfo(callback: js.Function1[PlatformInfo, Any]): Unit = js.native
def getManifest(): Manifest = js.native
}
@js.native
trait PlatformInfo extends js.Object {
def os: String = js.native
def arch: String = js.native
def nacl_arch: String = js.native
}
@js.native
trait Manifest extends js.Object {
def version: String = js.native
}
}
| felixgborrego/simple-docker-ui | chromeapp/src/main/scala/util/ChromeApi.scala | Scala | mit | 1,089 |
package ghtorrent.models
case class User(id: Int, login: String)
| PRioritizer/PRioritizer-analyzer | src/main/scala/ghtorrent/models/User.scala | Scala | mit | 66 |
package com.harrys.hyppo.worker.proto
import com.harrys.hyppo.executor.proto.com.CreateIngestionTasksCommand
import com.harrys.hyppo.executor.proto.res.CreateIngestionTasksResult
import com.harrys.hyppo.worker.{ProcessedDataStub, TestObjects}
import scala.concurrent.duration._
/**
* Created by jpetty on 7/23/15.
*/
class CreateTasksCommandTest extends ExecutorCommandTest {
override def integrationClass = classOf[ProcessedDataStub]
"The create tasks operation" must {
val testJob = TestObjects.testIngestionJob
val output = commander.executeCommand(new CreateIngestionTasksCommand(testJob))
val resultObject = output.result
"produce a correct result type" in {
resultObject shouldBe a [CreateIngestionTasksResult]
}
val taskResult = resultObject.asInstanceOf[CreateIngestionTasksResult]
"successfully produce an integration task result" in {
taskResult.getJob.getId shouldEqual testJob.getId
taskResult.getCreatedTasks.size should be > 0
}
"then exit cleanly" in {
commander.sendExitCommandAndWait(Duration(100, MILLISECONDS)) shouldEqual 0
}
}
}
| harrystech/hyppo-worker | worker/src/test/scala/com/harrys/hyppo/worker/proto/CreateTasksCommandTest.scala | Scala | mit | 1,132 |
/*
* Skylark
* http://skylark.io
*
* Copyright 2012-2017 Quantarray, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.quantarray.skylark.measure
import scala.annotation.implicitNotFound
/**
* Can divide measure type class.
*
* @author Araik Grigoryan
*/
@implicitNotFound("Cannot find CanDivideMeasure implementation that divides ${N} by ${D}, resulting in ${R}.")
trait CanDivideMeasure[N, D, R]
{
def divide(numerator: N, denominator: D): R
}
| quantarray/skylark | skylark-measure/src/main/scala/com/quantarray/skylark/measure/CanDivideMeasure.scala | Scala | apache-2.0 | 986 |
package chapter11
object Exercise6 {
} | amolnayak311/functional-programming-in-scala | src/chapter11/Exercise6.scala | Scala | unlicense | 47 |
package haru.dao
import haru.util.RedisKeyGenerator
import com.redis.RedisClient
import com.redis.RedisClientPool
object SchemaDao {
val clients = new RedisClientPool("stage.haru.io", 6400)
def getClasses(appid: String): Option[Set[Option[String]]] = {
clients.withClient {
client =>
{
val key = RedisKeyGenerator.getClass(appid)
val classes = client.smembers(key);
return classes;
}
}
}
def getSchema(appid: String, classes: String): Option[Map[String,String]] = {
clients.withClient {
client =>
{
val key = RedisKeyGenerator.getSchema(appid, classes)
val schema = client.hgetall[String, String](key);
return schema;
}
}
}
def addColumn(appid: String, classes: String, columnname:String, columntype:String): Boolean = {
clients.withClient {
client =>
{
val key = RedisKeyGenerator.getSchema(appid, classes)
val schema = client.hsetnx(key, columnname, columntype);
return schema;
}
}
}
} | haruio/haru-admin | src/main/scala/haru/dao/SchemaDao.scala | Scala | mit | 1,101 |
package com.github.unisay.mockserver.scala
import java.util.concurrent.TimeUnit
import com.github.unisay.mockserver.scala.DSL.Headers._
import com.github.unisay.mockserver.scala.DSL.Statuses._
import com.github.unisay.mockserver.scala.DSL._
import org.mockserver.client.server.{ForwardChainExpectation, MockServerClient}
import org.mockserver.matchers.Times
import org.mockserver.model.HttpRequest.{request => mockRequest}
import org.mockserver.model.HttpResponse.{response => mockResponse}
import org.mockserver.model.{HttpRequest, HttpResponse, RegexBody}
import org.scalamock.scalatest.MockFactory
import org.scalatest.FlatSpec
import scala.language.postfixOps
class DSLTest extends FlatSpec with MockFactory {
val mockServerClient = mock[NoArgMockServerClient]
val forwardChain = mock[NoArgForwardChain]
"Any method for any path" must "respond with status code 200" in {
expectRequestResponse(mockRequest(), mockResponse().withStatusCode(200))
implicit val client = mockServerClient
forAnyRequest respond Ok always
}
"Any method for any path with explicit client" must "respond with status code 200" in {
expectRequestResponse(mockRequest(), mockResponse().withStatusCode(200))
forAnyRequest(mockServerClient) respond Ok always
}
"GET for any path" must "respond with status code 200" in {
expectRequestResponse(
mockRequest().withMethod("GET"),
mockResponse().withStatusCode(200))
implicit val client = mockServerClient
when get *** respond Ok always
}
"GET for any path with explicit client" must "respond with status code 200" in {
expectRequestResponse(mockRequest().withMethod("GET"), mockResponse().withStatusCode(200))
when(mockServerClient) get *** respond Ok always
}
"POST for any path" must "respond with status code 200" in {
expectRequestResponse(
mockRequest().withMethod("POST"),
mockResponse().withStatusCode(200))
implicit val client = mockServerClient
when post *** respond Ok always
}
"GET /path" must "respond with status code 200" in {
expectRequestResponse(
mockRequest().withMethod("GET").withPath("/path"),
mockResponse().withStatusCode(200))
implicit val client = mockServerClient
when get "/path" respond Ok always
}
"POST /path" must "respond with status code 200" in {
expectRequestResponse(
mockRequest().withMethod("POST").withPath("/path"),
mockResponse().withStatusCode(200))
implicit val client = mockServerClient
when post "/path" respond Ok always
}
"PUT /path" must "respond with status code 200" in {
expectRequestResponse(
mockRequest().withMethod("PUT").withPath("/path"),
mockResponse().withStatusCode(200))
implicit val client = mockServerClient
when put "/path" respond Ok always
}
"DELETE /path" must "respond with status code 200" in {
expectRequestResponse(
mockRequest().withMethod("DELETE").withPath("/path"),
mockResponse().withStatusCode(200))
implicit val client = mockServerClient
when delete "/path" respond Ok always
}
"GET /path with one query parameter" must "respond with status code 200" in {
expectRequestResponse(
mockRequest()
.withMethod("GET")
.withPath("/path")
.withQueryStringParameter("k", "v"),
mockResponse()
.withStatusCode(200))
implicit val client = mockServerClient
when get "/path" has param("k", "v") respond Ok always
}
"GET /path with many query parameters" must "respond with status code 200" in {
expectRequestResponse(
mockRequest()
.withMethod("GET")
.withPath("/path")
.withQueryStringParameter("k1", "v1")
.withQueryStringParameter("k2", "v2"),
mockResponse()
.withStatusCode(200))
implicit val client = mockServerClient
when get "/path" has {
param("k1", "v1") and param("k2", "v2")
} respond Ok always
}
"GET /path with one query parameter and one header" must "respond with status code 200" in {
expectRequestResponse(
mockRequest()
.withMethod("GET")
.withPath("/path")
.withQueryStringParameter("p", "1")
.withHeader("h", "2"),
mockResponse()
.withStatusCode(200))
implicit val client = mockServerClient
when get "/path" has {
param("p", 1) and header("h", 2)
} respond Ok always
}
"GET /path with many query parameters and many headers" must "respond with status code 400" in {
expectRequestResponse(
mockRequest()
.withMethod("GET")
.withPath("/path")
.withQueryStringParameter("p1", "pv1")
.withQueryStringParameter("p2", "pv2")
.withHeader("Connection", "keep-alive")
.withHeader("Cache-Control", "no-cache"),
mockResponse()
.withStatusCode(400))
implicit val client = mockServerClient
when get "/path" has {
param("p1", "pv1") and
param("p2", "pv2") and
Connection("keep-alive") and
CacheControl("no-cache")
} respond {
BadRequest
} always
}
"GET /path with many query parameters + many headers" must "respond with status code 200" in {
expectRequestResponse(
mockRequest()
.withMethod("GET")
.withPath("/path")
.withQueryStringParameter("p1", "pv1")
.withQueryStringParameter("p2", "pv2")
.withHeader("h1", "hv1")
.withHeader("h2", "hv2"),
mockResponse()
.withStatusCode(200))
implicit val client = mockServerClient
when get "/path" has param("p1", "pv1") + param("p2", "pv2") + header("h1", "hv1") + header("h2", "hv2") respond Ok always
}
"GET /path with string body" must "respond with status code 200 and string body" in {
expectRequestResponse(
mockRequest()
.withMethod("POST")
.withBody("The Request Body"),
mockResponse()
.withStatusCode(200)
.withBody("The Response Body"))
implicit val client = mockServerClient
when post *** has "The Request Body" respond Ok + "The Response Body" always
}
"GET /path with byte body" must "respond with status code 200 and byte body" in {
expectRequestResponse(
mockRequest()
.withMethod("POST")
.withBody("The Request Body".getBytes),
mockResponse()
.withStatusCode(200)
.withBody("The Response Body".getBytes))
implicit val client = mockServerClient
when post *** has "The Request Body".getBytes respond Ok + "The Response Body".getBytes always
}
"GET /path with regex body" must "respond with status code 200 and string body" in {
expectRequestResponse(
mockRequest()
.withMethod("POST")
.withBody(new RegexBody(".*Request.*")),
mockResponse()
.withStatusCode(200)
.withBody("The Response Body"))
implicit val client = mockServerClient
when post *** has regexBody(".*Request.*") respond Ok + "The Response Body" always
}
"PUT /path" must "after 10.seconds respond Ok" in {
expectRequestResponse(
mockRequest()
.withMethod("PUT")
.withPath("/path"),
mockResponse()
.withStatusCode(200)
.withDelay(TimeUnit.MILLISECONDS, 10000)
)
implicit val client = mockServerClient
import scala.concurrent.duration._
when put "/path" after 10.seconds respond Ok always
}
"PUT /path" must "respond with delay 10.seconds" in {
expectRequestResponse(
mockRequest()
.withMethod("PUT")
.withPath("/path"),
mockResponse()
.withStatusCode(200)
.withDelay(TimeUnit.MILLISECONDS, 10000)
)
implicit val client = mockServerClient
import scala.concurrent.duration._
when put "/path" respond Ok + delay(10.seconds) always
}
"GET /path" must "respond once" in {
expectRequestResponse(
mockRequest().withMethod("GET").withPath("/path"),
mockResponse().withStatusCode(200),
Times.once()
)
implicit val client = mockServerClient
when get "/path" respond Ok once
}
"GET /path" must "respond exactly 42 times" in {
expectRequestResponse(
mockRequest().withMethod("GET").withPath("/path"),
mockResponse().withStatusCode(200),
Times.exactly(42)
)
implicit val client = mockServerClient
when get "/path" respond Ok exactly 42.times
}
class NoArgMockServerClient extends MockServerClient("localhost", 1234)
class NoArgForwardChain extends ForwardChainExpectation(null, null)
private def expectRequestResponse(expectedRequest: HttpRequest,
expectedResponse: HttpResponse,
times: Times = Times.unlimited()) = {
{ mockServerClient.when(_: HttpRequest, _: Times) }.expects(expectedRequest, times).returns(forwardChain)
(forwardChain.respond _).expects(expectedResponse)
}
}
| Unisay/mockserver-client-scala | src/test/scala/com/github/unisay/mockserver/scala/DSLTest.scala | Scala | mit | 8,965 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import java.util.Properties
import scala.collection._
import kafka.log._
import kafka.utils._
import kafka.admin.AdminUtils
import org.I0Itec.zkclient.{IZkChildListener, ZkClient}
import collection.JavaConverters._
/**
* This class initiates and carries out topic config changes.
*
* It works as follows.
*
* Config is stored under the path
* /brokers/topics/<topic_name>/config
* This znode stores the topic-overrides for this topic (but no defaults) in properties format.
*
* To avoid watching all topics for changes instead we have a notification path
* /brokers/config_changes
* The TopicConfigManager has a child watch on this path.
*
* To update a topic config we first update the topic config properties. Then we create a new sequential
* znode under the change path which contains the name of the topic that was updated, say
* /brokers/config_changes/config_change_13321
*
* This will fire a watcher on all brokers. This watcher works as follows. It reads all the config change notifications.
* It keeps track of the highest config change suffix number it has applied previously. For any previously applied change it finds
* it checks if this notification is larger than a static expiration time (say 10mins) and if so it deletes this notification.
* For any new changes it reads the new configuration, combines it with the defaults, and updates the log config
* for all logs for that topic (if any) that it has.
*
* Note that config is always read from the config path in zk, the notification is just a trigger to do so. So if a broker is
* down and misses a change that is fine--when it restarts it will be loading the full config anyway. Note also that
* if there are two consecutive config changes it is possible that only the last one will be applied (since by the time the
* broker reads the config the both changes may have been made). In this case the broker would needlessly refresh the config twice,
* but that is harmless.
*
* On restart the config manager re-processes all notifications. This will usually be wasted work, but avoids any race conditions
* on startup where a change might be missed between the initial config load and registering for change notifications.
*
*/
class TopicConfigManager(private val zkClient: ZkClient,
private val logManager: LogManager,
private val changeExpirationMs: Long = 10*60*1000,
private val time: Time = SystemTime) extends Logging {
private var lastExecutedChange = -1L
/**
* Begin watching for config changes
*/
def startup() {
ZkUtils.makeSurePersistentPathExists(zkClient, ZkUtils.TopicConfigChangesPath)
zkClient.subscribeChildChanges(ZkUtils.TopicConfigChangesPath, ConfigChangeListener)
processAllConfigChanges()
}
/**
* Process all config changes
*/
private def processAllConfigChanges() {
val configChanges = zkClient.getChildren(ZkUtils.TopicConfigChangesPath)
processConfigChanges(configChanges.asScala.toBuffer.sorted)
}
/**
* Process the given list of config changes
*/
private def processConfigChanges(notifications: Seq[String]) {
if (notifications.size > 0) {
info("Processing %d topic config change notification(s)...".format(notifications.size))
val now = time.milliseconds
val logs = logManager.logsByTopicPartition.toBuffer
val logsByTopic = logs.groupBy(_._1.topic).mapValues(_.map(_._2))
val lastChangeId = notifications.map(changeNumber).max
for (notification <- notifications) {
val changeId = changeNumber(notification)
if (changeId > lastExecutedChange) {
val changeZnode = ZkUtils.TopicConfigChangesPath + "/" + notification
val (topicJson, stat) = ZkUtils.readData(zkClient, changeZnode)
val topic = topicJson.substring(1, topicJson.length - 1) // dequote
if (logsByTopic.contains(topic)) {
/* combine the default properties with the overrides in zk to create the new LogConfig */
val props = new Properties(logManager.defaultConfig.toProps)
props.putAll(AdminUtils.fetchTopicConfig(zkClient, topic))
val logConfig = LogConfig.fromProps(props)
for (log <- logsByTopic(topic))
log.config = logConfig
lastExecutedChange = changeId
info("Processed topic config change %d for topic %s, setting new config to %s.".format(changeId, topic, props))
} else if (now - stat.getCtime > changeExpirationMs) {
/* this change is now obsolete, try to delete it unless it is the last change left */
ZkUtils.deletePath(zkClient, changeZnode)
}
}
}
}
}
/* get the change number from a change notification znode */
private def changeNumber(name: String): Long = name.substring(AdminUtils.TopicConfigChangeZnodePrefix.length).toLong
/**
* A listener that applies config changes to logs
*/
object ConfigChangeListener extends IZkChildListener {
override def handleChildChange(path: String, chillins: java.util.List[String]) {
try {
processConfigChanges(chillins.asScala.toBuffer)
} catch {
case e: Exception => error("Error processing config change:", e)
}
}
}
}
| Digsolab/kafka_2.10 | core/src/main/scala/kafka/server/TopicConfigManager.scala | Scala | apache-2.0 | 6,147 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan
import org.apache.flink.api.scala._
import org.apache.flink.table.api.scala._
import org.apache.flink.table.utils.TableTestBase
import org.apache.flink.table.utils.TableTestUtil._
import org.junit.Test
class QueryDecorrelationTest extends TableTestBase {
@Test
def testCorrelationScalarAggAndFilter(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, String, String, Int, Int)]("emp", 'empno, 'ename, 'job, 'salary, 'deptno)
util.addTable[(Int, String)]("dept", 'deptno, 'name)
val sql = "SELECT e1.empno\n" +
"FROM emp e1, dept d1 where e1.deptno = d1.deptno\n" +
"and e1.deptno < 10 and d1.deptno < 15\n" +
"and e1.salary > (select avg(salary) from emp e2 where e1.empno = e2.empno)"
val expectedQuery = unaryNode(
"DataSetCalc",
binaryNode(
"DataSetJoin",
unaryNode(
"DataSetCalc",
binaryNode(
"DataSetJoin",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "empno", "salary", "deptno"),
term("where", "<(deptno, 10)")
),
unaryNode(
"DataSetCalc",
batchTableNode(1),
term("select", "deptno"),
term("where", "<(deptno, 15)")
),
term("where", "=(deptno, deptno0)"),
term("join", "empno", "salary", "deptno", "deptno0"),
term("joinType", "InnerJoin")
),
term("select", "empno", "salary")
),
unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "empno", "salary"),
term("where", "IS NOT NULL(empno)")
),
term("groupBy", "empno"),
term("select", "empno", "AVG(salary) AS EXPR$0")
),
term("where", "AND(=(empno, empno0), >(salary, EXPR$0))"),
term("join", "empno", "salary", "empno0", "EXPR$0"),
term("joinType", "InnerJoin")
),
term("select", "empno")
)
util.verifySql(sql, expectedQuery)
}
@Test
def testDecorrelateWithMultiAggregate(): Unit = {
val util = batchTestUtil()
util.addTable[(Int, String, String, Int, Int)]("emp", 'empno, 'ename, 'job, 'salary, 'deptno)
util.addTable[(Int, String)]("dept", 'deptno, 'name)
val sql = "select sum(e1.empno) from emp e1, dept d1 " +
"where e1.deptno = d1.deptno " +
"and e1.salary > (" +
" select avg(e2.salary) from emp e2 where e2.deptno = d1.deptno" +
")"
val expectedQuery = unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
binaryNode(
"DataSetJoin",
unaryNode(
"DataSetCalc",
binaryNode(
"DataSetJoin",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "empno", "salary", "deptno")
),
unaryNode(
"DataSetCalc",
batchTableNode(1),
term("select", "deptno")
),
term("where", "=(deptno, deptno0)"),
term("join", "empno", "salary", "deptno", "deptno0"),
term("joinType", "InnerJoin")
),
term("select", "empno", "salary", "deptno0")
),
unaryNode(
"DataSetAggregate",
unaryNode(
"DataSetCalc",
batchTableNode(0),
term("select", "salary", "deptno"),
term("where", "IS NOT NULL(deptno)")
),
term("groupBy", "deptno"),
term("select", "deptno", "AVG(salary) AS EXPR$0")
),
term("where", "AND(=(deptno0, deptno), >(salary, EXPR$0))"),
term("join", "empno", "salary", "deptno0", "deptno", "EXPR$0"),
term("joinType", "InnerJoin")
),
term("select", "empno")
),
term("select", "SUM(empno) AS EXPR$0")
)
util.verifySql(sql, expectedQuery)
}
}
| mylog00/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/plan/QueryDecorrelationTest.scala | Scala | apache-2.0 | 4,971 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.rules.dataSet
import java.math.BigDecimal
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rex._
import org.apache.flink.table.api.TableException
import org.apache.flink.table.calcite.FlinkTypeFactory
import org.apache.flink.table.expressions._
import org.apache.flink.table.plan.logical.{LogicalWindow, SessionGroupWindow, SlidingGroupWindow, TumblingGroupWindow}
import org.apache.flink.table.plan.rules.common.LogicalWindowAggregateRule
import org.apache.flink.table.typeutils.TimeIntervalTypeInfo
import org.apache.flink.table.validate.BasicOperatorTable
class DataSetLogicalWindowAggregateRule
extends LogicalWindowAggregateRule("DataSetLogicalWindowAggregateRule") {
/** Returns the operand of the group window function. */
override private[table] def getInAggregateGroupExpression(
rexBuilder: RexBuilder,
windowExpression: RexCall): RexNode = windowExpression.getOperands.get(0)
/** Returns a zero literal of the correct type. */
override private[table] def getOutAggregateGroupExpression(
rexBuilder: RexBuilder,
windowExpression: RexCall): RexNode = {
val literalType = windowExpression.getOperands.get(0).getType
rexBuilder.makeZeroLiteral(literalType)
}
override private[table] def translateWindowExpression(
windowExpr: RexCall,
rowType: RelDataType): LogicalWindow = {
def getOperandAsLong(call: RexCall, idx: Int): Long =
call.getOperands.get(idx) match {
case v: RexLiteral => v.getValue.asInstanceOf[BigDecimal].longValue()
case _ => throw new TableException("Only constant window descriptors are supported")
}
def getFieldReference(operand: RexNode): PlannerExpression = {
operand match {
case ref: RexInputRef =>
// resolve field name of window attribute
val fieldName = rowType.getFieldList.get(ref.getIndex).getName
val fieldType = rowType.getFieldList.get(ref.getIndex).getType
ResolvedFieldReference(fieldName, FlinkTypeFactory.toTypeInfo(fieldType))
}
}
val timeField = getFieldReference(windowExpr.getOperands.get(0))
windowExpr.getOperator match {
case BasicOperatorTable.TUMBLE =>
val interval = getOperandAsLong(windowExpr, 1)
TumblingGroupWindow(
WindowReference("w$", Some(timeField.resultType)),
timeField,
Literal(interval, TimeIntervalTypeInfo.INTERVAL_MILLIS)
)
case BasicOperatorTable.HOP =>
val (slide, size) = (getOperandAsLong(windowExpr, 1), getOperandAsLong(windowExpr, 2))
SlidingGroupWindow(
WindowReference("w$", Some(timeField.resultType)),
timeField,
Literal(size, TimeIntervalTypeInfo.INTERVAL_MILLIS),
Literal(slide, TimeIntervalTypeInfo.INTERVAL_MILLIS)
)
case BasicOperatorTable.SESSION =>
val gap = getOperandAsLong(windowExpr, 1)
SessionGroupWindow(
WindowReference("w$", Some(timeField.resultType)),
timeField,
Literal(gap, TimeIntervalTypeInfo.INTERVAL_MILLIS)
)
}
}
}
object DataSetLogicalWindowAggregateRule {
val INSTANCE = new DataSetLogicalWindowAggregateRule
}
| ueshin/apache-flink | flink-table/flink-table-planner/src/main/scala/org/apache/flink/table/plan/rules/dataSet/DataSetLogicalWindowAggregateRule.scala | Scala | apache-2.0 | 4,066 |
package se.nimsa.sbx.forwarding
import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.testkit.{ImplicitSender, TestKit}
import akka.util.Timeout
import org.scalatest.{BeforeAndAfterAll, BeforeAndAfterEach, Matchers, WordSpecLike}
import se.nimsa.sbx.anonymization.{AnonymizationProfile, ConfidentialityOption}
import se.nimsa.sbx.app.GeneralProtocol._
import se.nimsa.sbx.box.BoxProtocol._
import se.nimsa.sbx.dicom.DicomHierarchy.Image
import se.nimsa.sbx.dicom.DicomPropertyValue._
import se.nimsa.sbx.forwarding.ForwardingProtocol._
import se.nimsa.sbx.metadata.MetaDataProtocol._
import se.nimsa.sbx.storage.RuntimeStorage
import se.nimsa.sbx.util.FutureUtil.await
import se.nimsa.sbx.util.TestUtil
import scala.concurrent.ExecutionContextExecutor
import scala.concurrent.duration.DurationInt
class ForwardingServiceActorTest(_system: ActorSystem) extends TestKit(_system) with ImplicitSender
with WordSpecLike with Matchers with BeforeAndAfterAll with BeforeAndAfterEach {
def this() = this(ActorSystem("ForwardingServiceActorTestSystem"))
implicit val ec: ExecutionContextExecutor = system.dispatcher
implicit val timeout: Timeout = Timeout(30.seconds)
val dbConfig = TestUtil.createTestDb("forwardingserviceactortest")
val db = dbConfig.db
val forwardingDao = new ForwardingDAO(dbConfig)
await(forwardingDao.create())
var deletedImages = Seq.empty[Long]
val storage: RuntimeStorage = new RuntimeStorage() {
override def deleteFromStorage(imageIds: Seq[Long]): Unit = {
deletedImages = deletedImages ++ imageIds
super.deleteFromStorage(imageIds)
}
}
case class SetSource(source: Source)
val setSourceReply = "Source set"
val metaDataService: ActorRef = system.actorOf(Props(new Actor {
var source: Option[Source] = None
def receive: Receive = {
case GetImage(imageId) =>
sender ! (imageId match {
case 10 => Some(image1)
case 23 => Some(image2)
case 38 => Some(image3)
case _ => None
})
case GetSourceForSeries(_) =>
sender ! source.map(SeriesSource(-1, _))
case DeleteMetaData(_) =>
sender ! MetaDataDeleted(Seq.empty, Seq.empty, Seq.empty, Seq.empty)
case SetSource(newSource) =>
source = Option(newSource)
sender ! setSourceReply
}
}), name = "MetaDataService")
case object ResetSentImages
case object GetSentImages
val resetSentImagesReply = "Send images reset"
val boxService: ActorRef = system.actorOf(Props(new Actor {
var sentImages = Seq.empty[Long]
def receive: Receive = {
case GetBoxById(id) =>
sender ! Some(Box(id, "box", "1.2.3.4", "http://example.com", BoxSendMethod.PUSH, AnonymizationProfile(Seq(ConfidentialityOption.BASIC_PROFILE)), online = false))
case SendToRemoteBox(box, bulkAnonymizationData) =>
sentImages = sentImages ++ bulkAnonymizationData.imageTagValuesSet.map(_.imageId)
sender ! ImagesAddedToOutgoing(box.id, bulkAnonymizationData.imageTagValuesSet.map(_.imageId))
case ResetSentImages =>
sentImages = Seq.empty[Long]
sender ! resetSentImagesReply
case GetSentImages =>
sender ! sentImages
}
}), name = "BoxService")
val forwardingService: ActorRef = system.actorOf(Props(new ForwardingServiceActor(forwardingDao, storage, 1000.hours)(Timeout(30.seconds))), name = "ForwardingService")
override def beforeEach(): Unit = {
deletedImages = Seq.empty[Long]
}
override def afterEach(): Unit = {
await(forwardingDao.clear())
metaDataService ! SetSource(null)
expectMsg(setSourceReply)
boxService ! ResetSentImages
expectMsg(resetSentImagesReply)
}
override def afterAll: Unit = TestKit.shutdownActorSystem(system)
"A ForwardingServiceActor" should {
"support adding and listing forwarding rules" in {
forwardingService ! GetForwardingRules(0, 1)
expectMsg(ForwardingRules(List.empty))
val rule1 = scpToBoxRule
val rule2 = userToBoxRule
forwardingService ! AddForwardingRule(rule1)
forwardingService ! AddForwardingRule(rule2)
val dbRule1 = expectMsgType[ForwardingRuleAdded].forwardingRule
val dbRule2 = expectMsgType[ForwardingRuleAdded].forwardingRule
forwardingService ! GetForwardingRules(0, 10)
expectMsg(ForwardingRules(List(dbRule1, dbRule2)))
}
"support deleting forwarding rules" in {
val rule1 = scpToBoxRule
forwardingService ! AddForwardingRule(rule1)
val dbRule1 = expectMsgType[ForwardingRuleAdded].forwardingRule
forwardingService ! GetForwardingRules(0, 10)
expectMsg(ForwardingRules(List(dbRule1)))
forwardingService ! RemoveForwardingRule(dbRule1.id)
expectMsg(ForwardingRuleRemoved(dbRule1.id))
forwardingService ! GetForwardingRules(0, 1)
expectMsg(ForwardingRules(List.empty))
}
}
"not forward an added image if there are no forwarding rules" in {
forwardingService ! ImageAdded(image1.id, scpSource, overwrite = false)
expectMsgPF() {
case ImageRegisteredForForwarding(imageId, applicableRules) =>
imageId shouldBe image1.id
applicableRules shouldBe empty
}
expectNoMessage(3.seconds)
await(forwardingDao.listForwardingRules(0, 1)) should be(empty)
await(forwardingDao.listForwardingTransactions) should be(empty)
await(forwardingDao.listForwardingTransactionImages) should be(empty)
}
"not forward an added image if there are no matching forwarding rules" in {
val rule = scpToBoxRule
forwardingService ! AddForwardingRule(rule)
expectMsgType[ForwardingRuleAdded]
forwardingService ! ImageAdded(image1.id, userSource, overwrite = false)
expectMsgPF() {
case ImageRegisteredForForwarding(imageId, applicableRules) =>
imageId shouldBe image1.id
applicableRules shouldBe empty
}
await(forwardingDao.listForwardingTransactions) should be(empty)
await(forwardingDao.listForwardingTransactionImages) should be(empty)
}
"forward an added image if there are matching forwarding rules" in {
val rule = userToBoxRule
metaDataService ! SetSource(userSource)
expectMsg(setSourceReply)
forwardingService ! AddForwardingRule(rule)
expectMsgType[ForwardingRuleAdded]
forwardingService ! ImageAdded(image1.id, userSource, overwrite = false)
expectMsgPF() {
case ImageRegisteredForForwarding(imageId, applicableRules) =>
imageId shouldBe image1.id
applicableRules should have length 1
}
await(forwardingDao.listForwardingTransactions).length should be(1)
await(forwardingDao.listForwardingTransactionImages).length should be(1)
}
"create multiple transactions when there are multiple rules with the same source and an image with that source is received" in {
val rule1 = userToBoxRule
val rule2 = userToAnotherBoxRule
metaDataService ! SetSource(userSource)
expectMsg(setSourceReply)
forwardingService ! AddForwardingRule(rule1)
forwardingService ! AddForwardingRule(rule2)
expectMsgType[ForwardingRuleAdded]
expectMsgType[ForwardingRuleAdded]
forwardingService ! ImageAdded(image1.id, userSource, overwrite = false)
expectMsgPF() {
case ImageRegisteredForForwarding(imageId, applicableRules) =>
imageId shouldBe image1.id
applicableRules should have length 2
}
await(forwardingDao.listForwardingTransactions).length should be(2)
await(forwardingDao.listForwardingTransactionImages).length should be(2)
}
"not send queued images if the corresponding transaction was recently updated" in {
val rule = userToBoxRule
metaDataService ! SetSource(userSource)
expectMsg(setSourceReply)
forwardingService ! AddForwardingRule(rule)
expectMsgType[ForwardingRuleAdded]
forwardingService ! ImageAdded(image1.id, userSource, overwrite = false)
expectMsgType[ImageRegisteredForForwarding]
forwardingService ! PollForwardingQueue
expectMsg(TransactionsEnroute(List.empty))
await(forwardingDao.listForwardingTransactions).length should be(1)
await(forwardingDao.listForwardingTransactionImages).length should be(1)
}
"send queued images if the corresponding transaction has expired (i.e. has not been updated in a while)" in {
val rule = userToBoxRule
metaDataService ! SetSource(userSource)
expectMsg(setSourceReply)
forwardingService ! AddForwardingRule(rule)
expectMsgType[ForwardingRuleAdded]
forwardingService ! ImageAdded(image1.id, userSource, overwrite = false)
expectMsgType[ImageRegisteredForForwarding]
expireTransaction(0)
forwardingService ! PollForwardingQueue
expectMsgPF() {
case TransactionsEnroute(transactions) => transactions.length should be(1)
}
await(forwardingDao.listForwardingTransactions).length should be(1)
await(forwardingDao.listForwardingTransactionImages).length should be(1)
val transaction = await(forwardingDao.listForwardingTransactions).head
transaction.enroute should be(true)
transaction.delivered should be(false)
}
"mark forwarding transaction as delivered after images have been sent" in {
val rule = userToBoxRule
metaDataService ! SetSource(userSource)
expectMsg(setSourceReply)
forwardingService ! AddForwardingRule(rule)
expectMsgType[ForwardingRuleAdded]
val image = image1
forwardingService ! ImageAdded(image.id, userSource, overwrite = false)
expectMsgType[ImageRegisteredForForwarding]
expireTransaction(0)
forwardingService ! PollForwardingQueue
expectMsgPF() {
case TransactionsEnroute(transactions) => transactions.length should be(1)
}
forwardingService ! ImagesSent(rule.destination, Seq(image.id))
expectMsgPF() {
case TransactionMarkedAsDelivered(transactionMaybe) => transactionMaybe should be(defined)
}
await(forwardingDao.listForwardingTransactions).length should be(1)
await(forwardingDao.listForwardingTransactionImages).length should be(1)
val transaction = await(forwardingDao.listForwardingTransactions).head
transaction.enroute should be(false)
transaction.delivered should be(true)
}
"remove transaction, transaction images and stored images when a forwarding transaction is finalized" in {
val rule = userToBoxRule
metaDataService ! SetSource(userSource)
expectMsg(setSourceReply)
forwardingService ! AddForwardingRule(rule)
expectMsgType[ForwardingRuleAdded]
val image = image1
forwardingService ! ImageAdded(image.id, userSource, overwrite = false)
expectMsgType[ImageRegisteredForForwarding]
expireTransaction(0)
forwardingService ! PollForwardingQueue
expectMsgPF() {
case TransactionsEnroute(transactions) => transactions.length should be(1)
}
forwardingService ! ImagesSent(rule.destination, Seq(image.id))
expectMsgPF() {
case TransactionMarkedAsDelivered(transactionMaybe) => transactionMaybe should be(defined)
}
forwardingService ! FinalizeSentTransactions
expectMsgPF() {
case TransactionsFinalized(removedTransactions) =>
removedTransactions.length should be(1)
}
await(forwardingDao.listForwardingTransactions) should be(empty)
await(forwardingDao.listForwardingTransactionImages) should be(empty)
// wait for deletion of images to finish
expectNoMessage(3.seconds)
deletedImages shouldBe Seq(image.id)
}
"remove transaction, transaction images but not stored images when a forwarding transaction is finalized for a rule with keepImages set to true" in {
val rule = userToBoxRuleKeepImages
metaDataService ! SetSource(userSource)
expectMsg(setSourceReply)
forwardingService ! AddForwardingRule(rule)
expectMsgType[ForwardingRuleAdded]
val image = image1
forwardingService ! ImageAdded(image.id, userSource, overwrite = false)
expectMsgType[ImageRegisteredForForwarding]
expireTransaction(0)
forwardingService ! PollForwardingQueue
expectMsgPF() {
case TransactionsEnroute(transactions) => transactions.length should be(1)
}
forwardingService ! ImagesSent(rule.destination, Seq(image.id))
expectMsgPF() {
case TransactionMarkedAsDelivered(transactionMaybe) => transactionMaybe should be(defined)
}
forwardingService ! FinalizeSentTransactions
expectMsgPF() {
case TransactionsFinalized(removedTransactions) =>
removedTransactions.length should be(1)
}
await(forwardingDao.listForwardingTransactions) should be(empty)
await(forwardingDao.listForwardingTransactionImages) should be(empty)
deletedImages shouldBe Seq.empty
}
"create a new transaction for a newly added image as soon as a transaction has been marked as enroute" in {
val rule = userToBoxRule
metaDataService ! SetSource(userSource)
expectMsg(setSourceReply)
forwardingService ! AddForwardingRule(rule)
expectMsgType[ForwardingRuleAdded]
forwardingService ! ImageAdded(image1.id, userSource, overwrite = false)
expectMsgType[ImageRegisteredForForwarding]
expireTransaction(0)
forwardingService ! PollForwardingQueue
expectMsgPF() {
case TransactionsEnroute(trans) => trans.length should be(1)
}
forwardingService ! ImageAdded(image2.id, userSource, overwrite = false)
expectMsgType[ImageRegisteredForForwarding]
val transactions = await(forwardingDao.listForwardingTransactions)
transactions.length should be(2)
val transaction1 = transactions.head
val transaction2 = transactions(1)
transaction1.enroute should be(true)
transaction1.delivered should be(false)
transaction2.enroute should be(false)
transaction2.delivered should be(false)
}
"create a single transaction when forwarding multiple transactions in succession with a box source" in {
val rule = boxToBoxRule
metaDataService ! SetSource(boxSource)
expectMsg(setSourceReply)
forwardingService ! AddForwardingRule(rule)
expectMsgType[ForwardingRuleAdded]
forwardingService ! ImageAdded(image3.id, boxSource, overwrite = false)
expectMsgType[ImageRegisteredForForwarding]
forwardingService ! ImageAdded(image1.id, boxSource, overwrite = false)
expectMsgType[ImageRegisteredForForwarding]
val transactions = await(forwardingDao.listForwardingTransactions)
transactions.length should be(1)
}
"forward the correct list of images" in {
val rule = userToBoxRule
metaDataService ! SetSource(userSource)
expectMsg(setSourceReply)
forwardingService ! AddForwardingRule(rule)
expectMsgType[ForwardingRuleAdded]
forwardingService ! ImageAdded(image1.id, userSource, overwrite = false)
forwardingService ! ImageAdded(image3.id, userSource, overwrite = false)
forwardingService ! ImageAdded(image2.id, userSource, overwrite = false)
expectMsgType[ImageRegisteredForForwarding]
expectMsgType[ImageRegisteredForForwarding]
expectMsgType[ImageRegisteredForForwarding]
await(forwardingDao.listForwardingTransactions) should have length 1
expireTransaction(0)
forwardingService ! PollForwardingQueue
expectMsgPF() {
case TransactionsEnroute(transactions) => transactions should have length 1
}
// wait for box transfer to complete
Thread.sleep(3000)
boxService ! GetSentImages
expectMsg(Seq(image1.id, image3.id, image2.id))
}
def scpSource = Source(SourceType.SCP, "My SCP", 1)
def userSource = Source(SourceType.USER, "Admin", 35)
def boxSource = Source(SourceType.BOX, "Source box", 11)
def boxDestination = Destination(DestinationType.BOX, "Remote box", 1)
def boxDestination2 = Destination(DestinationType.BOX, "Another remote box", 2)
def scpToBoxRule = ForwardingRule(-1, scpSource, boxDestination, keepImages = false)
def userToBoxRule = ForwardingRule(-1, userSource, boxDestination, keepImages = false)
def userToAnotherBoxRule = ForwardingRule(-1, userSource, boxDestination2, keepImages = false)
def boxToBoxRule = ForwardingRule(-1, boxSource, boxDestination, keepImages = false)
def userToBoxRuleKeepImages = ForwardingRule(-1, userSource, boxDestination, keepImages = true)
def image1 = Image(10, 22, SOPInstanceUID("sopuid1"), ImageType("it"), InstanceNumber("in1"))
def image2 = Image(23, 22, SOPInstanceUID("sopuid2"), ImageType("it"), InstanceNumber("in2"))
def image3 = Image(38, 22, SOPInstanceUID("sopuid3"), ImageType("it"), InstanceNumber("in3"))
def expireTransaction(index: Int): Int = {
val transactions = await(forwardingDao.listForwardingTransactions)
await(forwardingDao.updateForwardingTransaction(transactions(index).copy(updated = 0)))
}
} | slicebox/slicebox | src/test/scala/se/nimsa/sbx/forwarding/ForwardingServiceActorTest.scala | Scala | apache-2.0 | 16,832 |
/*
* Copyright 2014, by Vladimir Kostyukov and Contributors.
*
* This file is a part of a Finch library that may be found at
*
* https://github.com/finagle/finch
*
* Licensed under the Apache License, Version 2.0 (the "License");
* You may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Contributor(s):
* Ben Edwards
*/
package io.finch
import com.twitter.finagle.{SimpleFilter, Service}
import com.twitter.finagle.httpx.Request
import com.twitter.util.{Await, Future}
import org.scalatest.{Matchers, FlatSpec}
class FilterOpsSpec extends FlatSpec with Matchers {
private[finch] class PrefixFilter(val prefix: String) extends SimpleFilter[Request, String] {
def apply(req: Request, service: Service[Request, String]): Future[String] = {
service(req) map { rep => prefix ++ rep }
}
}
val bar = Service.mk { (_: Request) => Future.value("bar") }
val req = Request("/")
"FilterOps" should "allow for chaining a filter to a service" in {
val foo = new PrefixFilter("foo")
val combined = foo ! bar
Await.result(combined(req)) shouldBe "foobar"
}
it should "allow for chaining filters to filters" in {
val fo = new PrefixFilter("fo")
val oo = new PrefixFilter("oo")
val combined = fo ! oo ! bar
Await.result(combined(req)) shouldBe "fooobar"
}
}
| penland365/finch | core/src/test/scala/io/finch/FilterOpsSpec.scala | Scala | apache-2.0 | 1,760 |
/*
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
* */
package io.github.mandar2812.dynaml.examples
import breeze.linalg.{DenseMatrix, DenseVector}
import io.github.mandar2812.dynaml.graphics.charts.Highcharts._
import io.github.mandar2812.dynaml.DynaMLPipe
import io.github.mandar2812.dynaml.evaluation.RegressionMetrics
import io.github.mandar2812.dynaml.kernels.{
CovarianceFunction,
LocalScalarKernel
}
import io.github.mandar2812.dynaml.models.svm.DLSSVM
import io.github.mandar2812.dynaml.optimization.{
GradBasedGlobalOptimizer,
GridSearch
}
import io.github.mandar2812.dynaml.pipes._
import org.apache.log4j.Logger
/**
* Created by mandar on 4/3/16.
*/
object DaisyPowerPlant {
def apply(
kernel: LocalScalarKernel[DenseVector[Double]],
deltaT: Int = 2,
timelag: Int = 0,
stepPred: Int = 3,
num_training: Int = 150,
column: Int = 7,
opt: Map[String, String]
) =
runExperiment(kernel, deltaT, timelag, stepPred, num_training, column, opt)
def runExperiment(
kernel: LocalScalarKernel[DenseVector[Double]],
deltaT: Int = 2,
timelag: Int = 0,
stepPred: Int = 3,
num_training: Int = 150,
column: Int = 7,
opt: Map[String, String]
): Seq[Seq[AnyVal]] = {
//Load Daisy data into a stream
//Extract the time and Dst values
val logger = Logger.getLogger(this.getClass)
val names = Map(
6 -> "steam pressure",
7 -> "main stem temperature",
8 -> "reheat steam temperature"
)
//pipe training data to model and then generate test predictions
//create RegressionMetrics instance and produce plots
val modelTrainTest =
(trainTest: (
(
Iterable[(DenseVector[Double], Double)],
Iterable[(DenseVector[Double], Double)]
),
(DenseVector[Double], DenseVector[Double])
)) => {
val model = new DLSSVM(trainTest._1._1.toStream, num_training, kernel)
val gs = opt("globalOpt") match {
case "GS" =>
new GridSearch[model.type](model)
.setGridSize(opt("grid").toInt)
.setStepSize(opt("step").toDouble)
.setLogScale(false)
case "ML" => new GradBasedGlobalOptimizer[DLSSVM](model)
}
val startConf = kernel.state ++ Map(
"regularization" ->
opt("regularization").toDouble
)
val (_, conf) = gs.optimize(startConf, opt)
model.setRegParam(opt("regularization").toDouble).learn()
val res = trainTest._1._2
.map(testpoint => (model.predict(testpoint._1), testpoint._2))
val scoresAndLabelsPipe = DataPipe(
(list: List[(Double, Double)]) =>
list.map { l =>
(
l._1 * trainTest._2._2(-1) + trainTest._2._1(-1),
l._2 * trainTest._2._2(-1) + trainTest._2._1(-1)
)
}
)
val scoresAndLabels = scoresAndLabelsPipe.run(res.toList)
val metrics =
new RegressionMetrics(scoresAndLabels, scoresAndLabels.length)
metrics.setName(names(column))
metrics.print()
metrics.generatePlots()
//Plotting time series prediction comparisons
line((1 to scoresAndLabels.length).toList, scoresAndLabels.map(_._2))
hold()
line((1 to scoresAndLabels.length).toList, scoresAndLabels.map(_._1))
legend(
List(
names(column),
"Predicted " + names(column) + " (one hour ahead)"
)
)
title("Pont-sur-Sambre 120 MW power plant: " + names(column))
unhold()
Seq(
Seq(
deltaT,
1,
num_training,
200 - num_training,
metrics.mae,
metrics.rmse,
metrics.Rsq,
metrics.corr,
metrics.modelYield
)
)
}
val preProcessPipe = DynaMLPipe.fileToStream >
DynaMLPipe.trimLines >
DynaMLPipe.replaceWhiteSpaces >
DynaMLPipe.extractTrainingFeatures(
List(0, column, 1, 2, 3, 4, 5),
Map()
) >
DynaMLPipe.removeMissingLines >
IterableDataPipe((line: String) => {
val splits = line.split(",")
val timestamp = splits.head.toDouble
val feat = DenseVector(splits.tail.map(_.toDouble))
(timestamp, feat)
}) >
DynaMLPipe.deltaOperationVec(deltaT)
val trainTestPipe = DynaMLPipe.duplicate(preProcessPipe) >
DynaMLPipe.splitTrainingTest(num_training, 200 - num_training) >
DynaMLPipe.trainTestGaussianStandardization >
DataPipe(modelTrainTest)
val dataFile = dataDir + "/powerplant.csv"
trainTestPipe((dataFile, dataFile))
}
}
| transcendent-ai-labs/DynaML | dynaml-examples/src/main/scala/io/github/mandar2812/dynaml/examples/DaisyPowerPlant.scala | Scala | apache-2.0 | 5,475 |
/*
Copyright 2013 Josh Conrad
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package groupcache
import java.net.URL
import scala.language.implicitConversions
object Implicits {
/**
* Implicitly converts a string to a URL. Convenient when
* constructing peers and groups.
* @param string
* @return
*/
implicit def string2Url(string: String): URL = new URL(string)
/**
* Implicitly converts a string to a byte view.
* @param string
* @return
*/
implicit def string2ByteView(string: String): ByteView = ByteView(string)
/**
* Implicitly converts a byte view to a string.
* @param byteView
* @return
*/
implicit def byteView2String(byteView: ByteView): String = byteView.toString
/**
* Implicitly converts a byte array to a byte view.
* @param byteArray
* @return
*/
implicit def byteArray2ByteView(byteArray: Array[Byte]): ByteView = ByteView(byteArray)
/**
* Implicitly converts a byte view to a byte array.
* @param byteView
* @return
*/
implicit def byteView2ByteArray(byteView: ByteView): Array[Byte] = byteView.byteSlice
}
| jmconrad/scala-groupcache | src/main/scala/groupcache/Implicits.scala | Scala | apache-2.0 | 1,599 |
import org.sbtidea.test.util.AbstractScriptedTestBuild
import sbt._
import sbt.Keys._
import org.sbtidea.SbtIdeaPlugin._
object ScriptedTestBuild extends AbstractScriptedTestBuild("with-extra-tests") {
lazy val root = Project("main", file("."), settings = Defaults.defaultSettings ++ scriptedTestSettings ++ Seq(
libraryDependencies += "junit" % "junit" % "4.8.2",
ideaExtraTestConfigurations := Seq(LoadTest)
))
.configs( IntegrationTest )
.settings( Defaults.itSettings : _*)
.configs( LoadTest)
.settings( loadTestSettings : _*)
lazy val LoadTest = config("test-load") extend Test
lazy val loadTestSettings : Seq[Setting[_]] = inConfig(LoadTest)(Defaults.testSettings ++ Seq(sourceDirectory in LoadTest <<= (sourceDirectory in LoadTest)(_ / ".." / "test-load")))
}
| mpeltonen/sbt-idea | src/sbt-test/sbt-idea/with-extra-tests/project/Build.scala | Scala | bsd-3-clause | 803 |
package com.cloudray.scalapress.account.controller.admin
import org.springframework.stereotype.Controller
import org.springframework.web.bind.annotation.{PathVariable, ModelAttribute, RequestMethod, RequestMapping}
import scala.Array
import org.springframework.ui.ModelMap
import com.cloudray.scalapress.section.SectionDao
import com.cloudray.scalapress.theme.MarkupDao
import com.cloudray.scalapress.account.{AccountType, AccountTypeDao}
import org.springframework.beans.factory.annotation.Autowired
import com.cloudray.scalapress.framework.ScalapressContext
/** @author Stephen Samuel */
@Controller
@Autowired
@RequestMapping(Array("backoffice/accounttype/{id}"))
class AccountTypeEditController(accountTypeDao: AccountTypeDao,
markupDao: MarkupDao,
sectionDao: SectionDao,
context: ScalapressContext) {
@RequestMapping(method = Array(RequestMethod.GET), produces = Array("text/html"))
def edit(@ModelAttribute("accountType") t: AccountType) = "admin/account/type/edit.vm"
@RequestMapping(method = Array(RequestMethod.POST), produces = Array("text/html"))
def save(@ModelAttribute("accountType") t: AccountType) = {
accountTypeDao.save(t)
edit(t)
}
@ModelAttribute("accountType")
def accountType(@PathVariable("id") id: java.lang.Long, model: ModelMap) = accountTypeDao.find(id)
}
| vidyacraghav/scalapress | src/main/scala/com/cloudray/scalapress/account/controller/admin/AccountTypeEditController.scala | Scala | apache-2.0 | 1,407 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.internal.operators
import monix.eval.Task
import monix.execution.internal.Platform
import monix.reactive.{BaseTestSuite, Observable}
import concurrent.duration._
import scala.util.Success
object UncancelableSuite extends BaseTestSuite {
implicit val opts: Task.Options = Task.defaultOptions.disableAutoCancelableRunLoops
test("uncancelable works") { implicit ec =>
val obs = Observable
.eval(1)
.delayExecution(1.second)
.uncancelable
val f = obs.runAsyncGetFirst
ec.tick()
assertEquals(f.value, None)
f.cancel()
ec.tick()
assertEquals(f.value, None)
assert(ec.state.tasks.nonEmpty, "tasks.nonEmpty")
ec.tick(1.second)
assert(ec.state.tasks.isEmpty, "tasks.isEmpty")
assertEquals(f.value, Some(Success(Some(1))))
}
test("uncancelable works for suspend loop") { implicit ec =>
def loop(n: Int): Observable[Int] =
Observable.suspend {
if (n > 0)
loop(n - 1).executeAsync.uncancelable
else
Observable.now(1)
}
val n = if (Platform.isJVM) 10000 else 1000
val f = loop(n).runAsyncGetFirst
f.cancel()
ec.tick()
assertEquals(f.value, Some(Success(Some(1))))
}
}
| monix/monix | monix-reactive/shared/src/test/scala/monix/reactive/internal/operators/UncancelableSuite.scala | Scala | apache-2.0 | 1,908 |
package tastytest
object Refinements {
trait Foo {
type T
type U
def foo: (T, U)
}
class Bar[A, B] {
def bar[F <: Foo { type T = A; type U = B }](member: F): (member.T, member.U) = {
member.foo
}
}
class Baz[A, B, F <: Foo { type T = A; type U = B }] {
def baz(member: F): (member.T, member.U) = {
member.foo
}
}
}
| lrytz/scala | test/tasty/neg/src-3/Refinements.scala | Scala | apache-2.0 | 372 |
package org.bitcoins.spvnode.messages.data
import org.bitcoins.core.protocol.transaction.Transaction
import org.bitcoins.core.util.Factory
import org.bitcoins.spvnode.messages._
import org.bitcoins.spvnode.serializers.messages.data.RawTransactionMessageSerializer
/**
* Created by chris on 6/2/16.
* Companion factory object for the TransactionMessage on the p2p network
* https://bitcoin.org/en/developer-reference#tx
*/
object TransactionMessage extends Factory[TransactionMessage] {
private case class TransactionMessageImpl(transaction : Transaction) extends TransactionMessage
def fromBytes(bytes : Seq[Byte]) : TransactionMessage = RawTransactionMessageSerializer.read(bytes)
def apply(transaction: Transaction) : TransactionMessage = TransactionMessageImpl(transaction)
}
| Christewart/bitcoin-s-spv-node | src/main/scala/org/bitcoins/spvnode/messages/data/TransactionMessage.scala | Scala | mit | 798 |
package eu.execom.FabutPresentation.rest
import javax.servlet.http.{HttpServletRequest, HttpServletResponse}
import org.joda.time.DateTime
import org.joda.time.format.DateTimeFormat
trait CacheControlSupport {
implicit def request: HttpServletRequest
implicit def response: HttpServletResponse
val RFC1123_PATTERN = "EEE, dd MMM yyyyy HH:mm:ss z"
val dateTimeFormat = DateTimeFormat.forPattern(RFC1123_PATTERN)
def noCache() = {
response.addHeader("Cache-Control", "no-store, no-cache, must-revalidate")
response.addHeader("Pragma", "no-cache")
}
def maxAge(seconds: Long) = {
response.addHeader("Cache-Control", "max-age=" + seconds)
}
def expires(expiration: DateTime) = {
response.addHeader("Expires", dateTimeFormat.print(expiration))
}
}
| idostanic/FabutPresentation | src/main/scala/eu/execom/FabutPresentation/rest/CacheControlSupport.scala | Scala | apache-2.0 | 789 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.spanner
import com.google.cloud.spanner._
import com.spotify.scio.ScioContext
import com.spotify.scio.io.{EmptyTap, EmptyTapOf, ScioIO, Tap}
import com.spotify.scio.values.SCollection
import org.apache.beam.sdk.io.gcp.spanner.SpannerIO.FailureMode
import org.apache.beam.sdk.io.gcp.spanner.{SpannerConfig, SpannerIO => BSpannerIO}
import scala.jdk.CollectionConverters._
import com.spotify.scio.io.TapT
sealed trait SpannerIO[T] extends ScioIO[T] {
final override val tapT: TapT.Aux[T, Nothing] = EmptyTapOf[T]
val config: SpannerConfig
override def testId: String =
s"${getClass.getSimpleName}" +
s"(${config.getProjectId}, ${config.getInstanceId}, ${config.getDatabaseId})"
}
object SpannerRead {
object ReadParam {
private[spanner] val DefaultWithTransaction = false
private[spanner] val DefaultWithBatching = true
}
sealed trait ReadMethod
final case class FromTable(tableName: String, columns: Seq[String]) extends ReadMethod
final case class FromQuery(query: String) extends ReadMethod
final case class ReadParam private (
readMethod: ReadMethod,
withTransaction: Boolean = ReadParam.DefaultWithTransaction,
withBatching: Boolean = ReadParam.DefaultWithBatching
)
}
final case class SpannerRead(config: SpannerConfig) extends SpannerIO[Struct] {
import SpannerRead._
override type ReadP = ReadParam
override type WriteP = Nothing
override protected def read(sc: ScioContext, params: ReadP): SCollection[Struct] = {
var transform = BSpannerIO
.read()
.withSpannerConfig(config)
.withBatching(params.withBatching)
transform = params.readMethod match {
case x: FromTable => transform.withTable(x.tableName).withColumns(x.columns.asJava)
case y: FromQuery => transform.withQuery(y.query)
}
if (params.withTransaction) {
val txn = BSpannerIO.createTransaction().withSpannerConfig(config)
transform = transform.withTransaction(sc.applyInternal(txn))
}
sc.applyTransform(transform)
}
override protected def write(data: SCollection[Struct], params: WriteP): Tap[Nothing] =
throw new UnsupportedOperationException("SpannerRead is read-only")
override def tap(params: ReadP): Tap[Nothing] = EmptyTap
}
object SpannerWrite {
object WriteParam {
private[spanner] val DefaultFailureMode = FailureMode.FAIL_FAST
private[spanner] val DefaultBatchSizeBytes = 1024L * 1024L
}
final case class WriteParam private (
failureMode: FailureMode = WriteParam.DefaultFailureMode,
batchSizeBytes: Long = WriteParam.DefaultBatchSizeBytes
)
}
final case class SpannerWrite(config: SpannerConfig) extends SpannerIO[Mutation] {
override type ReadP = Nothing
override type WriteP = SpannerWrite.WriteParam
override protected def write(data: SCollection[Mutation], params: WriteP): Tap[Nothing] = {
val transform = BSpannerIO
.write()
.withSpannerConfig(config)
.withBatchSizeBytes(params.batchSizeBytes)
.withFailureMode(params.failureMode)
data.applyInternal(transform)
EmptyTap
}
override protected def read(sc: ScioContext, params: ReadP): SCollection[Mutation] = sc.wrap {
throw new UnsupportedOperationException("SpannerWrite is write-only")
}
override def tap(params: ReadP): Tap[Nothing] = EmptyTap
}
| spotify/scio | scio-google-cloud-platform/src/main/scala/com/spotify/scio/spanner/SpannerIO.scala | Scala | apache-2.0 | 3,942 |
package org.rovak.steamclient.steam3
object MsgUtils {
val ProtoMask = 0x80000000
val EMsgMask = ~ProtoMask
def ToMsg(msg: Int) = msg & EMsgMask
def IsProtoBuf(msg: Int) = (msg & 0xffffffffL & ProtoMask) > 0
}
| Rovak/scala-steamkit | steamkit/src/main/scala/org/rovak/steamclient/steam3/MsgUtils.scala | Scala | mit | 222 |
/*
* Copyright 2022 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package services.mocks
import models.api.PartyType
import models.external.IncorporatedEntity
import models.external.incorporatedentityid.IncorpIdJourneyConfig
import org.mockito.ArgumentMatchers
import org.mockito.Mockito.when
import org.scalatestplus.mockito.MockitoSugar
import services.IncorpIdService
import uk.gov.hmrc.http.HeaderCarrier
import scala.concurrent.Future
trait IncorpIdServiceMock {
this: MockitoSugar =>
val mockIncorpIdService: IncorpIdService = mock[IncorpIdService]
def mockCreateJourney(incorpIdJourneyConfig: IncorpIdJourneyConfig, partyType: PartyType)(response: Future[String]): Unit =
when(mockIncorpIdService.createJourney(
ArgumentMatchers.eq(incorpIdJourneyConfig),
ArgumentMatchers.eq(partyType)
)(ArgumentMatchers.any[HeaderCarrier]))
.thenReturn(response)
def mockGetDetails(journeyId: String)(response: Future[IncorporatedEntity]): Unit =
when(mockIncorpIdService.getDetails(ArgumentMatchers.eq(journeyId))(ArgumentMatchers.any[HeaderCarrier]))
.thenReturn(response)
}
| hmrc/vat-registration-frontend | test/services/mocks/IncorpIdServiceMock.scala | Scala | apache-2.0 | 1,660 |
sealed trait Option[+A]{
def map[B](f: A => B): Option[B] = {
this match {
case None => None
case Some(x) => Some(f(x))
}
}
//TODO: Return here and implement without using pattern matching
def flatMap[B](f: A => Option[B]): Option[B] = this match {
case None => None
case Some(x) => f(x)
}
def getOrElse[B >: A](default: => B): B = this match {
case None => default
case Some(x) => x
}
def orElse[B >: A](ob: => Option[B]): Option[B] = this flatMap((f) => ob)
def filter(f: A => Boolean): Option[A] = {
this.flatMap((x) => {
if(f(x) == true) Some(x)
else None
}
)
}
def lift[A,B](f: A => B): Option[A] => Option[B] = _ map f
}
case class Some[+A](get: A) extends Option[A]
case object None extends Option[Nothing]
/*
Write a generic function map2 that combines two Option values using a binary function.
If either Option value is None, then the return value is too. Here is its signature:
def map2[A,B,C](a: Option[A], b: Option[B])(f: (A, B) => C): Option[C]
*/
//TODO: Can I do this without pattern matching?
def map2[A,B,C](a: Option[A], b: Option[B])(f: (A, B) => C): Option[C] = {
(a,b) match {
case (None, _) => None
case (_, None) => None
case (Some(x), Some(y)) => Some(f(x,y))
}
}
assert(map2(Some(1), Some(2)) ((a,b) => a + b) == Some(3))
assert(map2(Some(1), None) ((a,b) => a + b) == None)
assert(map2(None, Some(2)) ((a: Int,b: Int) => a + b) == None)
assert(map2(None, None) ((a: Int,b: Int) => a + b) == None)
| wkimeria/fp_scala_for_mortals | chapter_4/exercises/exercise_3.scala | Scala | mit | 1,493 |
class C {
val func = Seq { x += 1; 42 } apply _
}
class D {
var i = 0
def f(n: Unit)(j: Int): Int = ???
val g = x => f(y += 1)(x)
}
class E {
var i = 0
def f(n: Unit)(j: Int): Int = ???
val g = x => f(x += 1)(x)
}
class Convo {
def f(i: Int)(z: Any): Int = ???
val g = (x, y) => f(42)(x, y)
}
| scala/scala | test/files/neg/t9745.scala | Scala | apache-2.0 | 314 |
/*
* Copyright (C) 2011 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.core.tools.io
import java.io.OutputStream
class StringBuilderOutputStream(val builder: StringBuilder = new StringBuilder) extends OutputStream {
override def write(b: Int) = builder.append(b.toChar)
}
| ISCPIF/PSEExperiments | openmole-src/openmole/core/org.openmole.core.tools/src/main/scala/org/openmole/core/tools/io/StringBuilderOutputStream.scala | Scala | agpl-3.0 | 942 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.batch.sql
import java.util
import org.apache.flink.table.catalog.{CatalogPartitionImpl, CatalogPartitionSpec, ObjectPath}
import org.apache.flink.table.planner.expressions.utils.Func1
import org.apache.flink.table.planner.factories.TestValuesCatalog
import org.apache.flink.table.planner.utils.TableTestBase
import org.junit.{Before, Test}
import org.junit.runner.RunWith
import org.junit.runners.Parameterized
import scala.collection.JavaConversions._
@RunWith(classOf[Parameterized])
class PartitionableSourceTest(
val sourceFetchPartitions: Boolean,
val useCatalogFilter: Boolean) extends TableTestBase{
private val util = batchTestUtil()
@Before
def setup() : Unit = {
val partitionableTable =
"""
|CREATE TABLE PartitionableTable (
| id int,
| name string,
| part1 string,
| part2 int,
| virtualField as part2 + 1)
| partitioned by (part1, part2)
| with (
| 'connector' = 'values',
| 'bounded' = 'true',
| 'partition-list' = '%s'
|)
|""".stripMargin
// test when PushDownFilter can consume all filters including fields partitionKeys
val partitionableAndFilterableTable =
"""
|CREATE TABLE PartitionableAndFilterableTable (
| id int,
| name string,
| part1 string,
| part2 int,
| virtualField as part2 + 1)
| partitioned by (part1, part2)
| with (
| 'connector' = 'values',
| 'bounded' = 'true',
| 'partition-list' = '%s',
| 'filterable-fields' = 'id;part1;part2'
|)
|""".stripMargin
if (sourceFetchPartitions) {
val partitions = "part1:A,part2:1;part1:A,part2:2;part1:B,part2:3;part1:C,part2:1"
util.tableEnv.executeSql(String.format(partitionableTable, partitions))
util.tableEnv.executeSql(String.format(partitionableAndFilterableTable, partitions))
} else {
val catalog =
new TestValuesCatalog("test_catalog", "test_database", useCatalogFilter)
util.tableEnv.registerCatalog("test_catalog", catalog)
util.tableEnv.useCatalog("test_catalog")
// register table without partitions
util.tableEnv.executeSql(String.format(partitionableTable, ""))
util.tableEnv.executeSql(String.format(partitionableAndFilterableTable, ""))
val partitionableTablePath = ObjectPath.fromString("test_database.PartitionableTable")
val partitionableAndFilterableTablePath =
ObjectPath.fromString("test_database.PartitionableAndFilterableTable")
// partition map
val partitions = Seq(
Map("part1"->"A", "part2"->"1"),
Map("part1"->"A", "part2"->"2"),
Map("part1"->"B", "part2"->"3"),
Map("part1"->"C", "part2"->"1"))
partitions.foreach(partition => {
val catalogPartitionSpec = new CatalogPartitionSpec(partition)
val catalogPartition = new CatalogPartitionImpl(
new java.util.HashMap[String, String](), "")
catalog.createPartition(
partitionableTablePath, catalogPartitionSpec, catalogPartition, true)
catalog.createPartition(
partitionableAndFilterableTablePath, catalogPartitionSpec, catalogPartition, true)
})
}
}
@Test
def testSimplePartitionFieldPredicate1(): Unit = {
util.verifyExecPlan("SELECT * FROM PartitionableTable WHERE part1 = 'A'")
}
@Test
def testPartialPartitionFieldPredicatePushDown(): Unit = {
util.verifyExecPlan(
"SELECT * FROM PartitionableTable WHERE (id > 2 OR part1 = 'A') AND part2 > 1")
}
@Test
def testWithUdfAndVirtualColumn(): Unit = {
util.addFunction("MyUdf", Func1)
util.verifyExecPlan("SELECT * FROM PartitionableTable WHERE id > 2 AND MyUdf(part2) < 3")
}
@Test
def testUnconvertedExpression(): Unit = {
util.verifyExecPlan("select * from PartitionableTable where trim(part1) = 'A' and part2 > 1")
}
@Test
def testPushDownPartitionAndFiltersContainPartitionKeys(): Unit = {
util.verifyExecPlan(
"select * from PartitionableAndFilterableTable " +
"where part1 = 'A' and part2 > 1 and id > 1")
}
@Test
def testPushDownPartitionAndFiltersContainPartitionKeysWithSingleProjection(): Unit = {
util.verifyExecPlan(
"select name from PartitionableAndFilterableTable " +
"where part1 = 'A' and part2 > 1 and id > 1")
}
}
object PartitionableSourceTest {
@Parameterized.Parameters(name = "sourceFetchPartitions={0}, useCatalogFilter={1}")
def parameters(): util.Collection[Array[Any]] = {
Seq[Array[Any]](
Array(true, false),
Array(false, false),
Array(false, true)
)
}
}
| apache/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/plan/batch/sql/PartitionableSourceTest.scala | Scala | apache-2.0 | 5,577 |
package com.twitter.finagle.service
import com.twitter.conversions.time._
import com.twitter.finagle._
import com.twitter.finagle.stats.StatsReceiver
import com.twitter.finagle.util.Proc
import com.twitter.util.{Future, Duration, Time, Throw, Return, Timer, TimerTask}
import scala.util.Random
private[finagle] object FailFastFactory {
private sealed trait State
private case object Ok extends State
private case class Retrying(since: Time, task: TimerTask, ntries: Int, backoffs: Stream[Duration]) extends State
private object Observation extends Enumeration {
type t = Value
val Success, Fail, Timeout, TimeoutFail, Close = Value
}
private val defaultBackoffs = (Backoff.exponential(1.second, 2) take 5) ++ Backoff.const(32.seconds)
private val rng = new Random
val role = Stack.Role("FailFast")
/**
* Creates a [[com.twitter.finagle.Stackable]] [[com.twitter.finagle.service.FailFastFactory]].
*/
def module[Req, Rep]: Stackable[ServiceFactory[Req, Rep]] =
new Stack.Simple[ServiceFactory[Req, Rep]] {
val role = FailFastFactory.role
val description =
"Backoff exponentially from hosts to which we cannot establish a connection"
def make(next: ServiceFactory[Req, Rep])(implicit params: Params) = {
val param.Stats(statsReceiver) = get[param.Stats]
val param.Timer(timer) = get[param.Timer]
new FailFastFactory(next, statsReceiver.scope("failfast"), timer)
}
}
}
/**
* A fail-fast factory that attempts to reduce the amount of requests dispatched
* to endpoints that will anyway fail. It works by marking a host dead on
* failure, launching a background process that attempts to reestablish the
* connection with the given backoff schedule. At this time, the factory is
* marked unavailable (and thus the load balancer above it will avoid its
* use). The factory becomes available again on success or when the backoff
* schedule runs out.
*
* Inflight attempts to connect will continue uninterrupted. However, trying to
* connect *after* being marked dead will fail fast until the background process
* is able to establish a connection.
*/
private[finagle] class FailFastFactory[Req, Rep](
self: ServiceFactory[Req, Rep],
statsReceiver: StatsReceiver,
timer: Timer,
backoffs: Stream[Duration] = FailFastFactory.defaultBackoffs
) extends ServiceFactoryProxy(self) {
import FailFastFactory._
// This perhaps should be a write exception, but in reality it's
// only dispatched when all hosts in the cluster are failed, and so
// we don't want to retry. This is a bit of a kludge--we should
// reconsider having this logic in the load balancer instead.
private[this] val failedFastExc = Future.exception {
val url = "https://twitter.github.io/finagle/guide/FAQ.html#why-do-clients-see-com-twitter-finagle-failedfastexception-s"
new FailedFastException(s"Endpoint is marked down. For more details see: $url")
}
private[this] def getBackoffs(): Stream[Duration] = backoffs map { duration =>
// Add a 10% jitter to reduce correlation.
val ms = duration.inMilliseconds
(ms + ms*(rng.nextFloat()*0.10)).toInt.milliseconds
}
@volatile private[this] var state: State = Ok
private[this] var proc: Proc[Observation.t] = _
proc = Proc[Observation.t] {
case Observation.Success if state != Ok =>
val Retrying(_, task, _, _) = state
task.cancel()
state = Ok
case Observation.Fail if state == Ok =>
val wait #:: rest = getBackoffs()
val now = Time.now
val task = timer.schedule(now + wait) { proc ! Observation.Timeout }
markedDeadCounter.incr()
state = Retrying(now, task, 0, rest)
case Observation.TimeoutFail if state != Ok =>
state match {
case Retrying(_, _, _, Stream.Empty) =>
state = Ok
case Retrying(since, _, ntries, wait #:: rest) =>
val task = timer.schedule(Time.now + wait) { proc ! Observation.Timeout }
state = Retrying(since, task, ntries+1, rest)
case Ok => assert(false)
}
case Observation.Timeout if state != Ok =>
self(ClientConnection.nil) respond {
case Throw(exc) => proc ! Observation.TimeoutFail
case Return(service) =>
proc ! Observation.Success
service.close()
}
case Observation.Close =>
state match {
case Retrying(_, task, _, _) =>
task.cancel()
case _ =>
}
state = Ok
proc.close()
case _ => ()
}
private[this] val unhealthyForMsGauge =
statsReceiver.addGauge("unhealthy_for_ms") {
state match {
case Retrying(since, _, _, _) => since.untilNow.inMilliseconds
case _ => 0
}
}
private[this] val unhealthyNumRetriesGauge =
statsReceiver.addGauge("unhealthy_num_tries") {
state match {
case Retrying(_, _, ntries, _) => ntries
case _ => 0
}
}
private[this] val markedDeadCounter = statsReceiver.counter("marked_dead")
override def apply(conn: ClientConnection) =
if (state != Ok) failedFastExc else {
self(conn) respond {
case Throw(_) => proc ! Observation.Fail
case Return(_) if state != Ok => proc ! Observation.Success
case _ =>
}
}
override def isAvailable = self.isAvailable && state == Ok
override val toString = "fail_fast_%s".format(self.toString)
override def close(deadline: Time) = {
proc ! Observation.Close
self.close(deadline)
}
}
| yancl/finagle-6.22.0 | finagle-core/src/main/scala/com/twitter/finagle/service/FailFastFactory.scala | Scala | apache-2.0 | 5,522 |
package com.redhat.samples.camel
import org.apache.camel.builder.RouteBuilder
import org.apache.camel.test.junit4.CamelTestSupport
import org.junit.Test
class BasicTest extends CamelTestSupport {
override def createRouteBuilder = new RouteBuilder {
override def configure: Unit = {
from("direct:in").to("mock:out")
}
}
@Test
def hello: Unit = {
val out = getMockEndpoint("mock:out")
out.expectedMessageCount(1)
out.expectedBodiesReceived("Hello!")
template.sendBody("direct:in", "Hello!")
out.assertIsSatisfied()
}
}
| tadayosi/samples-camel | basic/src/test/scala/com/redhat/samples/camel/BasicTest.scala | Scala | apache-2.0 | 567 |
package org.jetbrains.plugins.scala
package codeInspection
package syntacticSimplification
import com.intellij.codeInspection._
import com.intellij.openapi.project.Project
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.extensions.PsiElementExt
import org.jetbrains.plugins.scala.lang.psi.api.expr.ScReturnStmt
import org.jetbrains.plugins.scala.lang.psi.api.statements.{ScFunction, ScFunctionDefinition}
class RemoveRedundantReturnInspection extends AbstractInspection("ScalaRedundantReturn", "Redundant Return") {
def actionFor(holder: ProblemsHolder): PartialFunction[PsiElement, Unit] = {
case function: ScFunctionDefinition =>
for (body <- function.body) {
val returns = body.calculateReturns()
body.depthFirst {
!_.isInstanceOf[ScFunction]
}.foreach {
case r: ScReturnStmt =>
if (returns.contains(r)) {
holder.registerProblem(r.returnKeyword, "Return keyword is redundant",
ProblemHighlightType.GENERIC_ERROR_OR_WARNING, new RemoveReturnKeywordQuickFix(r))
}
case _ =>
}
}
}
}
class RemoveReturnKeywordQuickFix(r: ScReturnStmt)
extends AbstractFixOnPsiElement(ScalaBundle.message("remove.return.keyword"), r) {
def doApplyFix(project: Project) {
val ret = getElement
if (!ret.isValid) return
ret.expr match {
case Some(e) => ret.replace(e.copy())
case None => ret.delete()
}
}
} | ilinum/intellij-scala | src/org/jetbrains/plugins/scala/codeInspection/syntacticSimplification/RemoveRedundantReturnInspection.scala | Scala | apache-2.0 | 1,477 |
package org.deepdive.inference
import org.deepdive.settings.FactorFunctionVariable
import org.deepdive.calibration._
import org.deepdive.settings._
import java.io.File
trait InferenceRunner {
/* Initializes the data store. This method must be called before any other methods in this class. */
def init() : Unit
/*
* The number of tuples in each batch. If not defined, we use one large batch.
* The user can overwrite this number using the inference.batch_size config setting.
*/
def BatchSize : Option[Int]
/* Generate a grounded graph based on the factor description */
def groundFactorGraph(schema: Map[String, _ <: VariableDataType],
factorDescs: Seq[FactorDesc], calibrationSettings: CalibrationSettings,
skipLearning: Boolean, weightTable: String, dbSettings: DbSettings = null) : Unit
/*
* Writes inference results produced by the sampler back to the data store.
* The given file is a space-separated file with three columns:
* VariableID, LastSampleValue, ExpectedValue
*/
def writebackInferenceResult(variableSchema: Map[String, _ <: VariableDataType],
variableOutputFile: String, weightsOutputFile: String, dbSettings: DbSettings) : Unit
/*
* Gets calibration data for the given buckets.
* writebackInferenceResult must be called before this method can be called.
*/
def getCalibrationData(variable: String, dataType: VariableDataType, buckets: List[Bucket]) : Map[Bucket, BucketData]
}
/* Stores the factor graph and inference results. */
trait InferenceRunnerComponent {
def inferenceRunner : InferenceRunner
}
| sai16vicky/deepdive | src/main/scala/org/deepdive/inference/InferenceRunner.scala | Scala | apache-2.0 | 1,651 |
trait SCL10006 {
import cats._
val x: /*start*/Unapply.Aux2Right[Applicative, Either[String, Int], Either, String, Int]#M[List[Unapply.Aux2Right[Applicative, Either[String, Int], Either, String, Int]#A]]/*end*/
}
/*
trait SCL10006 {
import cats._
val x: Either[String, List[Int]]
}
*/ | ilinum/intellij-scala | testdata/adjustTypes/SCL10006.scala | Scala | apache-2.0 | 296 |
/*
* Copyright (c) 2014-2018 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0, and
* you may not use this file except in compliance with the Apache License
* Version 2.0. You may obtain a copy of the Apache License Version 2.0 at
* http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the Apache License Version 2.0 is distributed on an "AS
* IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
* implied. See the Apache License Version 2.0 for the specific language
* governing permissions and limitations there under.
*/
package com.snowplowanalytics.iglu.server
package service
import org.json4s.{JArray, JValue}
import org.json4s.jackson.parseJson
import org.json4s.JsonDSL._
// Scala
import scala.concurrent.duration._
import java.net.URLEncoder
import java.nio.charset.StandardCharsets
// Akka
import akka.actor.{ActorRef, Props}
// this project
import com.snowplowanalytics.iglu.server.actor.{ApiKeyActor, SchemaActor}
// Akka Http
import akka.http.scaladsl.model.HttpEntity
import akka.http.scaladsl.model.Multipart.FormData
import akka.http.scaladsl.testkit.{RouteTestTimeout, Specs2RouteTest}
import akka.http.scaladsl.model.StatusCodes._
import akka.http.scaladsl.model.ContentTypes._
import akka.http.scaladsl.server.Route
// Specs2
import org.specs2.mutable.Specification
class SchemaServiceSpec extends Specification
with Api with Specs2RouteTest with SetupAndDestroy {
override def afterAll() = super.afterAll()
val schemaActor: ActorRef = system.actorOf(Props(classOf[SchemaActor], config), "schemaActor2")
val apiKeyActor: ActorRef = system.actorOf(Props(classOf[ApiKeyActor], config), "apiKeyActor2")
implicit val routeTestTimeout: RouteTestTimeout = RouteTestTimeout(20 seconds)
val readKey = "6eadba20-9b9f-4648-9c23-770272f8d627"
val writeKey = "a89c5f27-fe76-4754-8a07-d41884af1074"
val faultyKey = "51ffa158-ba4b-4e27-a4ff-dfb5639b5453"
val wrongVendorKey = "83e7c051-cd68-4e44-8b36-09182fa158d5"
val notUuidKey = "83e7c051-cd68-4e44-8b36-09182f8d5"
val vendor = "com.snowplowanalytics.snowplow"
val vendor2 = "com.snowplowanalytics.self-desc"
val otherVendor = "com.benfradet.ben"
val otherVendor2 = "com.benfradet.snowplow"
val name = "ad_click"
val name2 = "ad_click2"
val format = "jsonschema"
val format2 = "jsontable"
val version = "1-0-0"
val version2 = "1-0-1"
val validSchema =
"""{
"$schema" : "http://iglucentral.com/schemas/com.snowplowanalytics.self-desc/schema/jsonschema/1-0-0#",""" + s"""
"self": {
"vendor": "$vendor",
"name": "$name",
"format": "$format",
"version": "$version"
}
}"""
val invalidSchema = """{ "some": "invalid schema" }"""
val notJson = "notjson"
val validSchemaUri = URLEncoder.encode(validSchema, StandardCharsets.UTF_8.toString).toLowerCase
val invalidSchemaUri = invalidSchema
.replaceAll(" ", "%20")
.replaceAll("\\"", "%22")
val start = "/api/schemas/"
//get urls
val publicSchemasUrl = s"${start}public"
val metaPublicSchemasUrl = s"${publicSchemasUrl}?filter=metadata"
val allSchemasUrl = s"${start}"
val metaAllSchemasUrl = s"${allSchemasUrl}?filter=metadata"
val url = s"${start}${vendor}/${name}/${format}/${version}"
val publicUrl = s"${start}${otherVendor}/${name}/${format}/${version}"
val multiUrl = s"${url},${vendor}/${name2}/${format}/${version}"
val multiPublicUrl = s"${url},${otherVendor}/${name}/${format}/${version}"
val faultyUrl = s"${start}${vendor}/${name}/jsonchema/${version}"
val multiVersionUrl = s"${url},${version2}"
val multiVersionPublicUrl = s"${publicUrl},${version2}"
val metaUrl = s"${url}?filter=metadata"
val metaPublicUrl = s"${publicUrl}?filter=metadata"
val metaMultiUrl = s"${multiUrl}?filter=metadata"
val metaMultiPublicUrl = s"${multiPublicUrl}?filter=metadata"
val metaMultiVersionUrl = s"${multiVersionUrl}?filter=metadata"
val metaMultiVersionPublicUrl = s"${multiVersionPublicUrl}?filter=metadata"
val multiVendor = s"${start}${vendor},${vendor2}/${name}/${format}/${version}"
val multiVendorPublic =
s"${start}${otherVendor},${otherVendor2}/${name}/${format}/${version}"
val metaMultiVendor = s"${multiVendor}?filter=metadata"
val metaMultiVendorPublic = s"${multiVendorPublic}?filter=metadata"
val multiFormat = s"${start}${vendor}/${name}/${format},${format2}/${version}"
val multiFormatPublic =
s"${start}${otherVendor}/${name}/${format},${format2}/${version}"
val metaMultiFormat = s"${multiFormat}?filter=metadata"
val metaMultiFormatPublic = s"${multiFormatPublic}?filter=metadata"
var multiName = s"${start}${vendor}/${name},${name2}/${format}/${version}"
val multiNamePublic =
s"${start}${otherVendor}/${name},${name2}/${format}/${version}"
val metaMultiName = s"${multiName}?filter=metadata"
val metaMultiNamePublic = s"${multiNamePublic}?filter=metadata"
val vendorUrl = s"${start}${vendor}"
val vendorPublicUrl = s"${start}${otherVendor}"
val metaVendorUrl = s"${vendorUrl}?filter=metadata"
val metaVendorPublicUrl = s"${vendorPublicUrl}?filter=metadata"
val nameUrl = s"${start}${vendor}/${name}"
val namePublicUrl = s"${start}${otherVendor}/${name}"
val metaNameUrl = s"${nameUrl}?filter=metadata"
val metaNamePublicUrl = s"${namePublicUrl}?filter=metadata"
val formatUrl = s"${start}${vendor}/${name}/${format}"
val formatPublicUrl = s"${start}${otherVendor}/${name}/${format}"
val metaFormatUrl = s"${formatUrl}?filter=metadata"
val metaFormatPublicUrl = s"${formatPublicUrl}?filter=metadata"
val otherVendorUrl = s"${start}com.benfradet.project"
val otherNameUrl = s"${start}com.benfradet.project/${name}"
val otherFormatUrl = s"${start}com.benfradet.project/${name}/${format}"
val metadataIncludedUrl = s"$url?metadata=1"
//post urls
val postUrl1 = s"$start$vendor/unit_test1/$format/$version"
val postUrl2 = s"$start$vendor/unit_test2/$format/$version?schema=$validSchemaUri"
val postUrl3 = s"$start$vendor/unit_test3/$format/$version"
val postUrl4 = s"$start$vendor/unit_test4/$format/$version" +
s"?schema=$validSchemaUri"
val postUrl6 = s"$url?schema=$validSchemaUri"
val postUrl7 = s"$start$vendor/unit_test7/$format/$version" +
s"?schema=$invalidSchemaUri"
val postUrl8 = s"$start$vendor/unit_test8/$format/$version" +
s"?schema=$notJson"
val postUrl9 = s"${start}${vendor}/unit_test9/${format}/${version}" +
s"?isPublic=true"
val postUrl10 = s"${start}${vendor}/unit_test10/${format}/${version}" +
s"?schema=${validSchemaUri}&isPublic=true"
val postUrl11 = s"${start}${vendor}/unit_test11/${format}/${version}"
val postUrl12 = s"${start}${vendor}/unit_test12/${format}/${version}"
//put urls
val putUrl1 = s"${start}${vendor}/unit_test13/${format}/${version}"
val putUrl2 = s"${start}${vendor}/unit_test14/${format}/${version}" +
s"?schema=${validSchemaUri}"
val putUrl3 = s"${start}${vendor}/unit_test15/${format}/${version}"
sequential
"SchemaService" should {
"for GET requests" should {
"for the /api/schemas endpoint" should {
"return a proper catalog of public and private schemas if apikey is provided" in {
Get(allSchemasUrl) ~> addHeader("apikey", readKey) ~> routes ~>
check {
status === OK
contentType === `application/json`
responseAs[String] must contain(name2)
selfExtractor(responseAs[String]) must haveLength(5)
}
}
"return a proper catalog of public schemas without read key" in {
Get(allSchemasUrl) ~> routes ~>
check {
status === OK
contentType === `application/json`
responseAs[String] must contain(otherVendor)
selfExtractor(responseAs[String]) must haveLength(2)
}
}
"return proper metadata for every public schema" in {
Get(metaAllSchemasUrl) ~> addHeader("apikey", readKey) ~>
routes ~> check {
status === OK
contentType === `application/json`
responseAs[String] must contain(otherVendor)
}
}
}
"for version based urls" should {
"return a proper json for well-formed single GET requests" +
s"($url)" in {
Get(url) ~> addHeader("apikey", readKey) ~> routes ~> check {
status === OK
contentType === `application/json`
responseAs[String] must contain(name)
}
}
s"return a proper json for a public schema (${publicUrl})" in {
Get(publicUrl) ~> addHeader("apikey", wrongVendorKey) ~> routes ~> check {
status === OK
contentType === `application/json`
responseAs[String] must contain(otherVendor)
}
}
"return proper metadata for well-formed single GET requests" +
s"($metaUrl)" in {
Get(metaUrl) ~> addHeader("apikey", readKey) ~> routes ~> check {
status === OK
contentType === `application/json`
responseAs[String] must contain(vendor) and contain(name) and
contain(format) and contain(version)
}
}
"return schema without metadata for GET " +
s"($url)" in {
Get(url) ~> addHeader("apikey", readKey) ~> routes ~> check {
status === OK
contentType === `application/json`
responseAs[String] must contain(vendor) and contain(name) and
contain(format) and contain(version) and not contain("metadata")
}
}
"return schema with metadata for GET " +
s"($metadataIncludedUrl)" in {
Get(metadataIncludedUrl) ~> addHeader("apikey", readKey) ~> routes ~> check {
status === OK
contentType === `application/json`
responseAs[String] must contain(vendor) and contain(name) and
contain(format) and contain(version) and contain("metadata")
}
}
s"return proper metadata for a public schema $metaPublicUrl" in {
Get(metaPublicUrl) ~> addHeader("apikey", wrongVendorKey) ~> routes ~>
check {
status === OK
contentType === `application/json`
responseAs[String] must contain(otherVendor) and contain(name) and
contain(format) and contain(version)
}
}
"return a 404 for GET requests for which the schema is not in the db" in
{
Get(faultyUrl) ~> addHeader("apikey", readKey) ~> routes ~> check {
status === NotFound
contentType === `application/json`
responseAs[String] must
contain("There are no schemas available here")
}
}
"return a 404 if no apikey is found in the db" in {
Get(url) ~> addHeader("apikey", faultyKey) ~> Route.seal(routes) ~>
check {
status === NotFound
contentType === `application/json`
responseAs[String] must
contain("There are no schemas available here")
}
}
"return a 404 if the API key provided is not an uuid" in {
Get(url) ~> addHeader("apikey", notUuidKey) ~> Route.seal(routes) ~>
check {
status === NotFound
contentType === `application/json`
responseAs[String] must
contain("There are no schemas available here")
}
}
"return a 404 if no apikey is provided" in {
Get(url) ~> Route.seal(routes) ~> check {
status === NotFound
contentType === `application/json`
responseAs[String] must
contain("There are no schemas available here")
}
}
"""return a 404 if the owner of the API key is not a prefix of the
schema's vendor""" in {
Get(url) ~> addHeader("apikey", wrongVendorKey) ~>
Route.seal(routes) ~> check {
status === NotFound
contentType === `application/json`
responseAs[String] must
contain("There are no schemas available here")
}
}
"return a 200 if schema is public and no apikey is provided" in {
Get(s"$start$otherVendor/$name2/$format/$version") ~> Route.seal(routes) ~> check {
status === OK
contentType === `application/json`
}
}
"return a 404 if schema is private and no apikey is provided" in {
Get(s"$start$otherVendor/$name/$format/$version") ~> Route.seal(routes) ~> check {
status === NotFound
contentType === `application/json`
responseAs[String] must
contain("There are no schemas available here")
}
}
"leave GET requests to other paths unhandled" in {
Get("/test") ~> routes ~> check {
handled must beFalse
}
}
}
"for vendor based urls" should {
"return the catalog of available schemas for this vendor" +
s"(${vendorUrl})" in {
Get(vendorUrl) ~> addHeader("apikey", readKey) ~> routes ~> check {
status === OK
contentType === `application/json`
responseAs[String] must contain(name) and contain(name2)
}
}
"return the catalog of available public schemas for another vendor" +
s"(${vendorPublicUrl})" in {
Get(vendorPublicUrl) ~> addHeader("apikey", readKey) ~> routes ~>
check {
status === OK
contentType === `application/json`
responseAs[String] must contain(otherVendor) and contain(name) and
contain(name2)
}
}
"return metadata about every schema for this vendor" +
s"(${metaVendorUrl})" in {
Get(metaVendorUrl) ~> addHeader("apikey", readKey) ~> routes ~>
check {
status === OK
contentType === `application/json`
responseAs[String] must contain(vendor)
}
}
"return metadata about every public schema for another vendor" +
s"(${metaVendorPublicUrl})" in {
Get(metaVendorPublicUrl) ~> addHeader("apikey", readKey) ~> routes ~>
check {
status === OK
contentType === `application/json`
responseAs[String] must contain(otherVendor)
}
}
"return a 404 for a vendor which has no schemas" in {
Get(otherVendorUrl) ~> addHeader("apikey", wrongVendorKey) ~>
routes ~> check {
status === NotFound
contentType === `application/json`
responseAs[String] must
contain("There are no schemas available here")
}
}
"return a 404 if the owner is not a prefix of the vendor" in {
Get(vendorUrl) ~> addHeader("apikey", wrongVendorKey) ~> routes ~>
check {
status === NotFound
contentType === `application/json`
responseAs[String] must contain("There are no schemas available here")
}
}
}
"for name based urls" should {
"return a 200 if schema is public and no apikey is provided" in {
Get(s"$start$otherVendor/$name2") ~> Route.seal(routes) ~> check {
status === OK
contentType === `application/json`
}
}
"return a 404 if schema is private and no apikey is provided" in {
Get(s"$start$otherVendor/$name") ~> Route.seal(routes) ~> check {
status === NotFound
contentType === `application/json`
responseAs[String] must
contain("There are no schemas available here")
}
}
"return the catalog of available schemas for this name" +
s"(${nameUrl})" in {
Get(nameUrl) ~> addHeader("apikey", readKey) ~> routes ~> check {
status === OK
contentType === `application/json`
responseAs[String] must contain(version) and contain(version2)
}
}
"return metadata about every schema having this vendor, name" +
s"(${metaNameUrl})" in {
Get(metaNameUrl) ~> addHeader("apikey", readKey) ~> routes ~> check {
status === OK
contentType === `application/json`
responseAs[String] must contain(vendor) and contain(name)
}
}
"return a 404 for a vendor/name combination which has no schemas" in {
Get(otherNameUrl) ~> addHeader("apikey", wrongVendorKey) ~> routes ~>
check {
status === NotFound
contentType === `application/json`
responseAs[String] must
contain("There are no schemas available here")
}
}
"return a 404 if the owner is not a prefix of the vendor" in {
Get(nameUrl) ~> addHeader("apikey", wrongVendorKey) ~> routes ~>
check {
status === NotFound
contentType === `application/json`
responseAs[String] must contain("There are no schemas available here")
}
}
}
"for format based urls" should {
"return the catalog of available schemas for this format" +
s"(${formatUrl})" in {
Get(formatUrl) ~> addHeader("apikey", readKey) ~> routes ~> check {
status === OK
contentType === `application/json`
responseAs[String] must contain(version) and contain(version2)
}
}
"""return a 404 for a vendor/name/format combination which has
no schemas""" in {
Get(otherFormatUrl) ~> addHeader("apikey", wrongVendorKey) ~>
routes ~> check {
status === NotFound
contentType === `application/json`
responseAs[String] must
contain("There are no schemas available here")
}
}
"return a 404 if the owner is not a prefix of the vendor" in {
Get(formatUrl) ~> addHeader("apikey", wrongVendorKey) ~> routes ~>
check {
status === NotFound
contentType === `application/json`
responseAs[String] must contain("There are no schemas available here")
}
}
}
}
"for POST requests" should {
//should be removed from db before running tests for now
"return success if the schema is passed as form data" in {
Post(postUrl1, FormData(Map("schema" -> HttpEntity(`application/json`, validSchema)))) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === Created
contentType === `application/json`
responseAs[String] must contain("The schema has been successfully added") and
contain(vendor)
}
}
//should be removed from db before running tests for now
"return success if the schema is passed as query parameter" in {
Post(postUrl2) ~> addHeader("apikey", writeKey) ~> Route.seal(routes) ~>
check {
status === Created
contentType === `application/json`
responseAs[String] must contain("The schema has been successfully added") and contain(vendor)
}
}
//should be removed from db before running tests for now
"return success if the schema is passed as request body" in {
Post(postUrl11, HttpEntity(`application/json`, validSchema)) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === Created
contentType === `application/json`
responseAs[String] must contain("The schema has been successfully added") and
contain(vendor)
}
}
//should be removed from db before running tests for now
"return success if the schema is passed as form data and is public" in {
Post(postUrl9, FormData(Map("schema" -> HttpEntity(`application/json`, validSchema)))) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === Created
contentType === `application/json`
responseAs[String] must contain("The schema has been successfully added") and
contain(vendor)
}
}
//should be removed from db before running tests for now
"return success if the schema is passed as query param and is public" in {
Post(postUrl10) ~> addHeader("apikey", writeKey) ~>
Route.seal(routes) ~> check {
status === Created
contentType === `application/json`
responseAs[String] must contain("The schema has been successfully added") and
contain(vendor)
}
}
//should be removed from db before running tests for now
"""return success if the schemas is passed as request body and is
public""" in {
Post(postUrl12, HttpEntity(`application/json`, validSchema)) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === Created
contentType === `application/json`
responseAs[String] must contain("The schema has been successfully added") and
contain(vendor)
}
}
"""return a 400 if no form data or query param or body request is specified""" in {
Post(postUrl3) ~> addHeader("apikey", writeKey) ~>
Route.seal(routes) ~> check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must
contain("The schema provided is not valid")
}
}
"""return 401 if the API key doesn't have sufficient permissions with
query param""" in {
Post(postUrl4) ~> addHeader("apikey", readKey) ~>
Route.seal(routes) ~> check {
status === Unauthorized
contentType === `application/json`
responseAs[String] must
contain("You do not have sufficient privileges")
}
}
"""return a 401 if the API key doesn't have sufficient permissions with form data""" in {
Post(postUrl3, FormData(Map("schema" -> HttpEntity(`application/json`, validSchema)))) ~>
addHeader("apikey", readKey) ~> Route.seal(routes) ~> check {
status === Unauthorized
contentType === `application/json`
responseAs[String] must
contain("You do not have sufficient privileges")
}
}
"""return a 401 if the API key doesn't have sufficient permissions with body request""" in {
Post(postUrl3, HttpEntity(`application/json`, validSchema)) ~>
addHeader("apikey", readKey) ~> Route.seal(routes) ~> check {
status === Unauthorized
contentType === `application/json`
responseAs[String] must
contain("You do not have sufficient privileges")
}
}
"""return a 401 if the owner of the API key is not a prefix of the schema's vendor with query param""" in {
Post(postUrl6) ~> addHeader("apikey", wrongVendorKey) ~>
Route.seal(routes) ~> check {
status === Unauthorized
contentType === `application/json`
responseAs[String] must
contain("You do not have sufficient privileges")
}
}
"""return a 401 if the owner of the API key is not a prefix of the schema's vendor with form data""" in {
Post(postUrl3, FormData(Map("schema" -> HttpEntity(`application/json`, validSchema)))) ~>
addHeader("apikey", wrongVendorKey) ~> Route.seal(routes) ~> check {
status === Unauthorized
contentType === `application/json`
responseAs[String] must
contain("You do not have sufficient privileges")
}
}
"""return a 401 if the owner of the API key is not a prefix of the schema's vendor with body request""" in {
Post(postUrl3, HttpEntity(`application/json`, validSchema)) ~>
addHeader("apikey", wrongVendorKey) ~> Route.seal(routes) ~> check {
status === Unauthorized
contentType === `application/json`
responseAs[String] must
contain("You do not have sufficient privileges")
}
}
"""return a 400 if the supplied schema is not self-describing with query param and contain a validation failure report""" in {
Post(postUrl7) ~> addHeader("apikey", writeKey) ~> Route.seal(routes) ~>
check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must
contain("Schema is not self-describing")
}
}
"""return a 400 if the supplied schema is not self-describing with form data """ in {
Post(postUrl3, FormData(Map("schema" -> HttpEntity(`application/json`, invalidSchema)))) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must
contain("Schema is not self-describing")
}
}
"""return a 400 if the supplied schema is not self-describing with body request""" in {
Post(postUrl3, HttpEntity(`application/json`, invalidSchema)) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must
contain("Schema is not self-describing")
}
}
"return a 400 if the supplied string is not a schema with query param" in
{
Post(postUrl8) ~> addHeader("apikey", writeKey) ~> Route.seal(routes) ~>
check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must contain("The schema provided is not valid")
}
}
"return a 400 if the supplied string is not a schema with form data" in {
Post(postUrl3, FormData(Map("schema" -> HttpEntity(`text/plain(UTF-8)`, notJson)))) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must contain("The schema provided is not valid")
}
}
"""return a 400 if the supplied string is not a schema with body request""" in {
Post(postUrl3, HttpEntity(`application/json`, notJson)) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must contain("The schema provided is not valid")
}
}
}
"for PUT requests" should {
"return a 201 if the schema doesnt already exist with form data" in {
Put(putUrl1, FormData(Map("schema" -> HttpEntity(`application/json`, validSchema)))) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === Created
contentType === `application/json`
responseAs[String] must contain("The schema has been successfully added") and
contain(vendor)
}
}
"return a 201 if the schema doesnt already exist with query param" in {
Put(putUrl2) ~> addHeader("apikey", writeKey) ~> Route.seal(routes) ~>
check {
status === Created
contentType === `application/json`
responseAs[String] must contain("The schema has been successfully added") and
contain(vendor)
}
}
"return a 201 if the schema doesnt already exist with body request" in {
Put(putUrl3, HttpEntity(`application/json`, validSchema)) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === Created
contentType === `application/json`
responseAs[String] must contain("The schema has been successfully added") and
contain(vendor)
}
}
"""return a 400 if no form data or query param or body request is specified""" in {
Put(postUrl3) ~> addHeader("apikey", writeKey) ~>
Route.seal(routes) ~> check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must
contain("The schema provided is not valid")
}
}
"""return 401 if the API key doesn't have sufficient permissions with
query param""" in {
Put(postUrl4) ~> addHeader("apikey", readKey) ~>
Route.seal(routes) ~> check {
status === Unauthorized
contentType === `application/json`
responseAs[String] must
contain("You do not have sufficient privileges")
}
}
"""return a 401 if the API key doesn't have sufficient permissions with form data""" in {
Put(postUrl3, FormData(Map("schema" -> HttpEntity(`application/json`, validSchema)))) ~>
addHeader("apikey", readKey) ~> Route.seal(routes) ~> check {
status === Unauthorized
contentType === `application/json`
responseAs[String] must
contain("You do not have sufficient privileges")
}
}
"""return a 401 if the API key doesn't have sufficient permissions with body request""" in {
Put(postUrl3, HttpEntity(`application/json`, validSchema)) ~>
addHeader("apikey", readKey) ~> Route.seal(routes) ~> check {
status === Unauthorized
contentType === `application/json`
responseAs[String] must
contain("You do not have sufficient privileges")
}
}
"""return a 401 if the owner of the API key is not a prefix of the schema's vendor with query param""" in {
Put(postUrl6) ~> addHeader("apikey", wrongVendorKey) ~>
Route.seal(routes) ~> check {
status === Unauthorized
contentType === `application/json`
responseAs[String] must
contain("You do not have sufficient privileges")
}
}
"""return a 401 if the owner of the API key is not a prefix of the schema's vendor with form data""" in {
Put(postUrl3, FormData(Map("schema" -> HttpEntity(`application/json`, validSchema)))) ~>
addHeader("apikey", wrongVendorKey) ~> Route.seal(routes) ~> check {
status === Unauthorized
contentType === `application/json`
responseAs[String] must
contain("You do not have sufficient privileges")
}
}
"""return a 401 if the owner of the API key is not a prefix of the schema's vendor with body request""" in {
Put(postUrl3, HttpEntity(`application/json`, validSchema)) ~>
addHeader("apikey", wrongVendorKey) ~> Route.seal(routes) ~> check {
status === Unauthorized
contentType === `application/json`
responseAs[String] must
contain("You do not have sufficient privileges")
}
}
"""return a 400 if the supplied schema is not self-describing with query param""" in {
Put(postUrl7) ~> addHeader("apikey", writeKey) ~> Route.seal(routes) ~>
check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must contain("Schema is not self-describing")
}
}
"""return a 400 if the supplied schema is not self-describing with form data""" in {
Put(postUrl3, FormData(Map("schema" -> HttpEntity(`application/json`, invalidSchema)))) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must contain("Schema is not self-describing")
}
}
"""return a 400 if the supplied schema is not self-describing with body request""" in {
Put(postUrl3, HttpEntity(`application/json`, invalidSchema)) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must contain("Schema is not self-describing")
}
}
"return a 400 if the supplied string is not a schema with query param" in
{
Put(postUrl8) ~> addHeader("apikey", writeKey) ~> Route.seal(routes) ~>
check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must contain("The schema provided is not valid")
}
}
"return a 400 if the supplied string is not a schema with form data" in {
Put(postUrl3, FormData(Map("schema" -> HttpEntity(`text/plain(UTF-8)` , notJson)))) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must contain("The schema provided is not valid")
}
}
"""return a 400 if the supplied string is not a schema with body request""" in {
Put(postUrl3, HttpEntity(`application/json`, notJson)) ~>
addHeader("apikey", writeKey) ~> Route.seal(routes) ~> check {
status === BadRequest
contentType === `text/plain(UTF-8)`
responseAs[String] must contain("The schema provided is not valid")
}
}
}
}
private def selfExtractor(text: String): List[JValue] =
parseJson(text) match {
case JArray(objects) => objects.map(_ \\ "self")
case _ => throw new RuntimeException("Not an array")
}
}
| snowplow/iglu | 2-repositories/iglu-server/src/test/scala/com.snowplowanalytics.iglu.server/service/SchemaServiceSpec.scala | Scala | apache-2.0 | 34,071 |
package edu.gemini.phase2.template.factory.impl.gmos
import edu.gemini.pot.sp.{ISPObservation, ISPGroup}
import edu.gemini.spModel.gemini.gmos.blueprint.SpGmosSBlueprintIfuNs
import edu.gemini.spModel.gemini.gmos.GmosSouthType.FPUnitSouth._
import edu.gemini.spModel.gemini.gmos.GmosSouthType.FilterSouth._
import edu.gemini.phase2.template.factory.impl.TemplateDb
import edu.gemini.spModel.gemini.gmos.InstGmosCommon
import edu.gemini.spModel.gemini.gmos.GmosSouthType.FilterSouth
case class GmosSIfuNs(blueprint:SpGmosSBlueprintIfuNs) extends GmosSBase[SpGmosSBlueprintIfuNs] {
// IF SPECTROSCOPY MODE == IFU N&S
// INCLUDE FROM 'IFU N&S BP' IN,
// Target group: {43}-{44}
// Baseline folder: {45}-{50}
// Where FPU!=None in BP (static and iterator), SET FPU from PI
// IFU acq obs have an iterator titled "Field image" with
// FPU=None, the FPU must not be set here.
// IF FPU = 'IFU 1 SLIT' in PI then SET FPU='IFU N & S Right Slit (red)' in OT
// If not acq SET DISPERSER FROM PI
// For filter changes below, do not adjust exposure times.
// If acq ({43}, {49})
// If filter from PI != None, SET FILTER in static component
// to ugriz filter closest in central wavelength to the filter
// from PI
// else SET FILTER=r (as in BP)
// else SET FILTER FROM PI
// IF FPU = 'IFU 2 slits' in PI (IFU or IFU N&S mode):
// IF FILTER=None, SET FILTER=r_G0326
// SET CENTRAL WAVELENGTHS TO THE FILTER EFF WAVELENGTH
// AND EFF WAVELENGTH + 5nm (if iteration over wavelength)
// See http://www.gemini.edu/node/10637
val targetGroup = 43 to 44
val baselineFolder = 45 to 50
val notes = Seq.empty
val acq = Seq(43, 49)
def initialize(grp:ISPGroup, db:TemplateDb):Either[String, Unit] = forObservations(grp, forAll)
def piFpu = if (blueprint.fpu == IFU_2) IFU_3 else blueprint.fpu
def noneOrPiFpu(libFpu: Any) = if (libFpu == FPU_NONE) FPU_NONE else piFpu
def forAll(o:ISPObservation):Either[String, Unit] = for {
// Where FPU!=None in BP (static and iterator), SET FPU from PI
// (here BP means the library not the blueprint!)
fpuInLIB <- o.getFpu.right
_ <- o.setFpu(noneOrPiFpu(fpuInLIB)).right
_ <- o.ed.modifySeqAllKey(InstGmosCommon.FPU_PROP_NAME) {
case libFpu => noneOrPiFpu(libFpu)
}.right
// If not acq SET DISPERSER FROM PI
_ <- when(!o.memberOf(acq)) {
o.setDisperser(blueprint.disperser)
}.right
_ <- (if (o.memberOf(acq)) {
if (blueprint.filter != NONE)
o.setFilter(closestUGRIZ(blueprint.filter.getWavelength.toDouble))
else
o.setFilter(r_G0326)
} else {
for {
_ <- o.setFilter(blueprint.filter).right
// For IFU 2-slit N&S, set wavelengths
_ <- when(blueprint.fpu == IFU_N) {
// Handle empty filter
val f = if (blueprint.filter == FilterSouth.NONE) FilterSouth.r_G0326 else blueprint.filter
// Set observing wavelength
val lambda = f.getWavelength.toDouble * 1000.0 // seriously ?!??
for {
_ <- o.setFilter(f).right
_ <- o.setDisperserLambda(lambda).right
_ <- o.ed.modifySeqAllKey(InstGmosCommon.DISPERSER_LAMBDA_PROP.getName){
case 780 => lambda
case 785 => lambda + 5.0
}.right
} yield ()
}.right
} yield ()
}).right
} yield ()
}
| arturog8m/ocs | bundle/edu.gemini.phase2.skeleton.servlet/src/main/scala/edu/gemini/phase2/template/factory/impl/gmos/GmosSIfuNs.scala | Scala | bsd-3-clause | 3,622 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.fuberlin.wiwiss.silk.learning.active.linkselector
import math.log
import de.fuberlin.wiwiss.silk.evaluation.ReferenceEntities
import de.fuberlin.wiwiss.silk.entity.{Entity, Link}
import de.fuberlin.wiwiss.silk.util.DPair
import de.fuberlin.wiwiss.silk.linkagerule.LinkageRule
/**
* Selects links based on the Jensen-Shannon divergence from the closest reference link.
*/
case class JensenShannonDivergenceSelector(fulfilledOnly: Boolean = true) extends LinkSelector {
/**
* Returns the links with the highest Jensen-Shannon divergence from any reference link.
*/
override def apply(rules: Seq[WeightedLinkageRule], unlabeledLinks: Seq[Link], referenceEntities: ReferenceEntities): Seq[Link] = {
val posDist = referenceEntities.positive.values.map(referencePair => new ReferenceLinkDistance(referencePair, rules, true))
val negDist = referenceEntities.negative.values.map(referencePair => new ReferenceLinkDistance(referencePair, rules, false))
val dist = posDist ++ negDist
val rankedLinks = unlabeledLinks.par.map(rankLink(dist))
rankedLinks.seq.sortBy(-_.confidence.get).take(3)
}
/**
* Ranks a link by updating its confidence to the distance from the closes reference link.
*/
def rankLink(dist: Traversable[ReferenceLinkDistance])(link: Link): Link = {
val minDist = dist.map(_(link)).min
link.update(confidence = Some(minDist))
}
/**
* Computes the Jensen-Shannon divergence from a specific reference link.
*/
private class ReferenceLinkDistance(entities: DPair[Entity], rules: Seq[LinkageRule], isPos: Boolean) {
/**
* Returns the Jensen-Shannon divergence from a reference link.
*/
def apply(link: Link) = {
val qLink = q(link)
jensenShannonDivergence(p, qLink) + 0.5 * entropy(qLink)
}
/** All linkage rules which fulfill this reference link */
private val fulfilledRules = {
if (fulfilledOnly) {
if(isPos) rules.filter(rule => rule(entities) > 0.0) else rules.filter(rule => rule(entities) <= 0.0)
}
else
rules
}
private val p = project(fulfilledRules, entities)
private def q(link: Link) = project(fulfilledRules, link.entities.get)
private def project(rules: Seq[LinkageRule], entityPair: DPair[Entity]) = {
rules.map(rule => probability(rule, entityPair)).sum / rules.size
}
private def probability(rule: LinkageRule, entityPair: DPair[Entity]) = {
rule(entityPair) * 0.5 + 0.5
}
/**
* Computes the Jensen-Shannon divergence between two binary variables.
*/
private def jensenShannonDivergence(p1: Double, p2: Double) = {
entropy(0.5 * (p1 + p2)) - 0.5 * (entropy(p1) + entropy(p2))
}
/**
* Computes the binary entropy.
*/
private def entropy(p: Double) = {
if(p <= 0.0 || p >= 1.0)
0.0
else
(-p * log(p) - (1 - p) * log(1 - p)) / log(2)
}
}
}
| fusepoolP3/p3-silk | silk-learning/src/main/scala/de/fuberlin/wiwiss/silk/learning/active/linkselector/JensenShannonDivergenceSelector.scala | Scala | apache-2.0 | 3,532 |
package fr.unice.polytech.ogl.championships.library
import eu.ace_design.island.bot.IExplorerRaid
trait Teams {
def players: Map[String, IExplorerRaid]
def playerNames: String = players.map { case (n,_) => n.toUpperCase }.toSeq.sorted mkString ", "
}
trait SI3 extends Teams {
def players: Map[String, IExplorerRaid] = g1 ++ g2 ++ g3 ++ g4
private lazy val g1 = Map(
"islaa" -> new fr.unice.polytech.ogl.islaa.Explorer(),
"islab" -> new fr.unice.polytech.ogl.islab.Explorer(),
"islac" -> new fr.unice.polytech.ogl.islac.Explorer(),
"islad" -> new fr.unice.polytech.ogl.islad.Explorer(),
"islae" -> new fr.unice.polytech.ogl.islae.Explorer()
)
private lazy val g2 = Map(
"islba" -> new fr.unice.polytech.ogl.islba.Explorer(),
"islbb" -> new fr.unice.polytech.ogl.islbb.Explorer(),
"islbc" -> new fr.unice.polytech.ogl.islbc.Explorer(),
"islbd" -> new fr.unice.polytech.ogl.islbd.Explorer(),
"islbe" -> new fr.unice.polytech.ogl.islbe.Explorer()
)
private lazy val g3 = Map(
"islca" -> new fr.unice.polytech.ogl.islca.Explorer(),
"islcb" -> new fr.unice.polytech.ogl.islcb.Explorer(),
"islcc" -> new fr.unice.polytech.ogl.islcc.Explorer(),
"islcd" -> new fr.unice.polytech.ogl.islcd.Explorer(),
"islce" -> new fr.unice.polytech.ogl.islce.Explorer(),
"islcf" -> new fr.unice.polytech.ogl.islcf.Explorer()
)
private lazy val g4 = Map(
"islda" -> new fr.unice.polytech.ogl.islda.Explorer(),
"isldb" -> new fr.unice.polytech.ogl.isldb.Explorer(),
"isldc" -> new fr.unice.polytech.ogl.isldc.Explorer(),
"isldd" -> new fr.unice.polytech.ogl.isldd.Explorer(),
"islde" -> new fr.unice.polytech.ogl.islde.Explorer(),
"isldf" -> new fr.unice.polytech.ogl.isldf.Explorer()
)
}
| mosser/ogl-2015 | src/main/scala/fr/unice/polytech/ogl/championships/library/Teams.scala | Scala | lgpl-3.0 | 1,785 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.