code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package io.surfkit.clientlib.wysihtml5
import scala.scalajs.js
import scala.scalajs.js.annotation.JSName
/**
* Created by suroot on 17/05/16.
*/
@JSName("wysihtml.dom")
@js.native
object dom extends js.Object{
def getAsDom(html: String, context: org.scalajs.dom.raw.Document): org.scalajs.dom.raw.Element = js.native
}
| coreyauger/scala-js-wysihtml | src/main/scala/io/surfkit/clientlib/wysihtml5/dom.scala | Scala | mit | 328 |
package models.user
import models.utils.MyPostgresDriver.simple._
import play.api.Play.current
import java.sql.Timestamp
import models.survey._
import models.daos.slick.DBTableDefinitions.{DBUser, UserTable}
import scala.slick.lifted.ForeignKeyQuery
case class UserSurveyTextSubmission(userSurveyTextSubmissionId: Int, userId: String, surveyQuestionId: Int, surveyTextSubmission: Option[String], timeSubmitted: Timestamp, numMissionsCompleted: Int)
class UserSurveyTextSubmissionTable(tag: Tag) extends Table[UserSurveyTextSubmission](tag, Some("sidewalk"), "user_survey_text_submission") {
def userSurveyTextSubmissionId = column[Int]("user_survey_text_submission_id", O.PrimaryKey, O.AutoInc)
def userId = column[String]("user_id", O.NotNull)
def surveyQuestionId = column[Int]("survey_question_id", O.NotNull)
def surveyTextSubmission = column[Option[String]]("survey_text_submission", O.Nullable)
def timeSubmitted = column[Timestamp]("time_submitted", O.Nullable)
def numMissionsCompleted = column[Int]("num_missions_completed", O.NotNull)
def * = (userSurveyTextSubmissionId, userId, surveyQuestionId, surveyTextSubmission, timeSubmitted, numMissionsCompleted) <> ((UserSurveyTextSubmission.apply _).tupled, UserSurveyTextSubmission.unapply)
def user: ForeignKeyQuery[UserTable, DBUser] =
foreignKey("user_survey_text_submission_user_id_fkey", userId, TableQuery[UserTable])(_.userId)
def survey_question: ForeignKeyQuery[SurveyQuestionTable, SurveyQuestion] =
foreignKey("user_survey_text_submission_survey_question_id_fkey", surveyQuestionId, TableQuery[SurveyQuestionTable])(_.surveyQuestionId)
}
object UserSurveyTextSubmissionTable{
val db = play.api.db.slick.DB
val userSurveyTextSubmissions = TableQuery[UserSurveyTextSubmissionTable]
def save(userSurveyTextSubmission: UserSurveyTextSubmission): Int = db.withTransaction { implicit session =>
val userSurveyTextSubmissionId: Int =
(userSurveyTextSubmissions returning userSurveyTextSubmissions.map(_.userSurveyTextSubmissionId)) += userSurveyTextSubmission
userSurveyTextSubmissionId
}
}
| ProjectSidewalk/SidewalkWebpage | app/models/user/UserSurveyTextSubmission.scala | Scala | mit | 2,107 |
package asobu.dsl.extractors
import asobu.dsl.{ExtractResult, FallbackResult, Extractor}
import Extractor._
import ExtractResult._
import asobu.dsl.util.Read
import scala.concurrent.ExecutionContext
import scala.util.{Success, Failure, Try}
trait PrimitiveExtractors {
def stringOption[T: Read](ifEmpty: ⇒ Throwable)(implicit fbr: FallbackResult, ex: ExecutionContext): Extractor[Option[String], T] = (strO: Option[String]) ⇒ {
val parsed: Try[T] = for {
v ← strO.fold[Try[String]](Failure[String](ifEmpty))(Success(_))
r ← Read[T].parse(v)
} yield r
fromTry(parsed)
}
}
object PrimitiveExtractors extends PrimitiveExtractors
| iheartradio/asobu | dsl/src/main/scala/asobu/dsl/extractors/PrimitiveExtractors.scala | Scala | apache-2.0 | 666 |
package io.surfkit.driver
import play.api.libs.ws.DefaultWSClientConfig
import play.api.libs.ws.ning.NingAsyncHttpClientConfigBuilder
import play.api.libs.ws.ning.NingWSClient
import com.ning.http.client.AsyncHttpClientConfig
import scala.concurrent.ExecutionContext.Implicits.global
import scala.Predef._
/**
*
* Created by Corey Auger
*/
object Scraper extends App with LocalSetup{
override def main(args: Array[String]) {
import sqlContext.implicits._
val config = new NingAsyncHttpClientConfigBuilder(DefaultWSClientConfig()).build
val builder = new AsyncHttpClientConfig.Builder(config)
val WS = new NingWSClient(builder.build)
val p = new java.io.PrintWriter("./output/opento.json")
WS.url(nytimesEndpoint).get.map{ res =>
println(res.json)
}
p.close()
sc.stop()
}
}
| coreyauger/spark-ashley-madison-ml | src/main/scala/io/surfkit/driver/Scraper.scala | Scala | mit | 838 |
import scala.quoted.*
import scala.language.implicitConversions
case class Xml(parts: String, args: List[Any])
object XmlQuote {
implicit class SCOps(ctx: StringContext) {
inline def xml(args: => Any*): Xml = ${XmlQuote.impl('this, 'args)}
}
def impl(receiver: Expr[SCOps], args: Expr[Seq[Any]])
(using Quotes) : Expr[Xml] = {
import quotes.reflect.*
// for debugging purpose
def pp(tree: Tree): Unit = {
println(tree.show(using Printer.TreeStructure))
println(tree.show)
}
def liftListOfAny(lst: List[Term]): Expr[List[Any]] = lst match {
case x :: xs =>
val head = x.asExpr
val tail = liftListOfAny(xs)
'{ $head :: $tail }
case Nil => '{Nil}
}
def isStringConstant(tree: Term) = tree match {
case Literal(_) => true
case _ => false
}
def isSCOpsConversion(tree: Term) =
tree.symbol.fullName == "XmlQuote$.SCOps"
def isStringContextApply(tree: Term) =
tree.symbol.fullName == "scala.StringContext$.apply"
// XmlQuote.SCOps(StringContext.apply([p0, ...]: String*)
val parts = receiver.asTerm.underlyingArgument match {
case Apply(conv, List(Apply(fun, List(Typed(Repeated(values, _), _)))))
if isSCOpsConversion(conv) &&
isStringContextApply(fun) &&
values.forall(isStringConstant) =>
values.collect { case Literal(StringConstant(value)) => value }
case tree =>
report.error(s"String literal expected, but ${tree.show(using Printer.TreeStructure)} found")
return '{ ??? }
}
// [a0, ...]: Any*
val Typed(Repeated(args0, _), _) = args.asTerm.underlyingArgument
val string = parts.mkString("??")
'{new Xml(${Expr(string)}, ${liftListOfAny(args0)})}
}
}
| dotty-staging/dotty | tests/run-macros/xml-interpolation-1/XmlQuote_1.scala | Scala | apache-2.0 | 1,799 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.expressions.utils
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.java.typeutils.RowTypeInfo
import org.apache.flink.table.api.Types
import org.apache.flink.table.functions.ScalarFunction
import org.apache.flink.types.Row
class ScalarOperatorsTestBase extends ExpressionTestBase {
def testData: Row = {
val testData = new Row(14)
testData.setField(0, 1: Byte)
testData.setField(1, 1: Short)
testData.setField(2, 1)
testData.setField(3, 1L)
testData.setField(4, 1.0f)
testData.setField(5, 1.0d)
testData.setField(6, true)
testData.setField(7, 0.0d)
testData.setField(8, 5)
testData.setField(9, 10)
testData.setField(10, "String")
testData.setField(11, false)
testData.setField(12, null)
testData.setField(13, Row.of("foo", null))
testData
}
def typeInfo: TypeInformation[Any] = {
new RowTypeInfo(
Types.BYTE,
Types.SHORT,
Types.INT,
Types.LONG,
Types.FLOAT,
Types.DOUBLE,
Types.BOOLEAN,
Types.DOUBLE,
Types.INT,
Types.INT,
Types.STRING,
Types.BOOLEAN,
Types.BOOLEAN,
Types.ROW(Types.STRING, Types.STRING)
).asInstanceOf[TypeInformation[Any]]
}
override def functions: Map[String, ScalarFunction] = Map(
"shouldNotExecuteFunc" -> ShouldNotExecuteFunc
)
}
| mtunique/flink | flink-libraries/flink-table/src/test/scala/org/apache/flink/table/expressions/utils/ScalarOperatorsTestBase.scala | Scala | apache-2.0 | 2,210 |
package org.sisioh.aws4s.eb.model
import java.util.Date
import com.amazonaws.services.elasticbeanstalk.model._
import org.sisioh.aws4s.PimpedType
object TerminateEnvironmentResultFactory {
def create(): TerminateEnvironmentResult = new TerminateEnvironmentResult()
}
class RichTerminateEnvironmentResult(val underlying: TerminateEnvironmentResult)
extends AnyVal with PimpedType[TerminateEnvironmentResult] {
def environmentNameOpt: Option[String] = Option(underlying.getEnvironmentName)
def environmentNameOpt_=(value: Option[String]): Unit =
underlying.setEnvironmentName(value.orNull)
def withEnvironmentNameOpt(value: Option[String]): TerminateEnvironmentResult =
underlying.withEnvironmentName(value.orNull)
// ---
def environmentIdOpt: Option[String] = Option(underlying.getEnvironmentId)
def environmentIdOpt_=(value: Option[String]): Unit = underlying.setEnvironmentId(value.orNull)
def withEnvironmentIdOpt(value: Option[String]): TerminateEnvironmentResult =
underlying.withEnvironmentId(value.orNull)
// ---
def applicationNameOpt: Option[String] = Option(underlying.getApplicationName)
def applicationNameOpt_=(value: Option[String]): Unit =
underlying.setApplicationName(value.orNull)
def withApplicationNameOpt(value: Option[String]): TerminateEnvironmentResult =
underlying.withApplicationName(value.orNull)
// ---
def versionLabelOpt: Option[String] = Option(underlying.getVersionLabel)
def versionLabelOpt_=(value: Option[String]): Unit =
underlying.setVersionLabel(value.orNull)
def withVersionLabelOpt(value: Option[String]): TerminateEnvironmentResult =
underlying.withVersionLabel(value.orNull)
// ---
def solutionStackNameOpt: Option[String] = Option(underlying.getSolutionStackName)
def solutionStackNameOpt_=(value: Option[String]): Unit =
underlying.setSolutionStackName(value.orNull)
def withSolutionStackNameOpt(value: Option[String]): TerminateEnvironmentResult =
underlying.withSolutionStackName(value.orNull)
// ---
def templateNameOpt: Option[String] = Option(underlying.getTemplateName)
def templateNameOpt_=(value: Option[String]): Unit =
underlying.setTemplateName(value.orNull)
def withTemplateNameOpt(value: Option[String]): TerminateEnvironmentResult =
underlying.withTemplateName(value.orNull)
// ---
def descriptionOpt: Option[String] = Option(underlying.getDescription)
def descriptionOpt_=(value: Option[String]): Unit =
underlying.setDescription(value.orNull)
def withDescriptionOpt(value: Option[String]): TerminateEnvironmentResult =
underlying.withDescription(value.orNull)
// ---
def endpointURLOpt: Option[String] = Option(underlying.getEndpointURL)
def endpointURLOpt_=(value: Option[String]): Unit =
underlying.setEndpointURL(value.orNull)
def withEndpointURLOpt(value: Option[String]): TerminateEnvironmentResult =
underlying.withEndpointURL(value.orNull)
// ---
def cNAMEOpt: Option[String] = Option(underlying.getCNAME)
def cNAMEOpt_=(value: Option[String]): Unit =
underlying.setCNAME(value.orNull)
def withCNAMEOpt(value: Option[String]): TerminateEnvironmentResult =
underlying.withCNAME(value.orNull)
// ---
def dateCreatedOpt: Option[Date] = Option(underlying.getDateCreated)
def dateCreatedOpt_=(value: Option[Date]): Unit =
underlying.setDateCreated(value.orNull)
def withDateCreateOpt(value: Option[Date]): TerminateEnvironmentResult =
underlying.withDateCreated(value.orNull)
// ---
def dateUpdatedOpt: Option[Date] = Option(underlying.getDateUpdated)
def dateUpdatedOpt_=(value: Option[Date]): Unit =
underlying.setDateUpdated(value.orNull)
def withDateUpdatedOpt(value: Option[Date]): TerminateEnvironmentResult =
underlying.withDateUpdated(value.orNull)
// ---
def statusOpt: Option[String] = Option(underlying.getStatus)
def statusOpt_=(value: Option[String]): Unit =
underlying.setStatus(value.orNull)
def withStatusOpt(value: Option[String]): TerminateEnvironmentResult =
underlying.withStatus(value.orNull)
// ---
def healthOpt: Option[String] = Option(underlying.getHealth)
def healthOpt_=(value: Option[String]): Unit =
underlying.setHealth(value.orNull)
def withHealthOpt(value: Option[String]): TerminateEnvironmentResult =
underlying.withHealth(value.orNull)
// ---
def resourcesOpt: Option[EnvironmentResourcesDescription] = Option(underlying.getResources)
def resourcesOpt_=(value: Option[EnvironmentResourcesDescription]): Unit =
underlying.setResources(value.orNull)
def withResourcesOpt(value: Option[EnvironmentResourcesDescription]): TerminateEnvironmentResult =
underlying.withResources(value.orNull)
// ---
def tierOpt: Option[EnvironmentTier] = Option(underlying.getTier)
def tierOpt_=(value: Option[EnvironmentTier]): Unit =
underlying.setTier(value.orNull)
def withTierOpt(value: Option[EnvironmentTier]): TerminateEnvironmentResult =
underlying.withTier(value.orNull)
}
| everpeace/aws4s | aws4s-eb/src/main/scala/org/sisioh/aws4s/eb/model/RichTerminateEnvironmentResult.scala | Scala | mit | 5,062 |
package week5
import akka.actor.Actor
import akka.actor.Props
import akka.event.LoggingReceive
class TransferMain extends Actor {
val accountFrom = context.actorOf(Props[BankAccount], "accountFrom")
val accountTo = context.actorOf(Props[BankAccount], "accountTo")
accountFrom ! BankAccount.Deposit(100)
accountFrom ! BankAccount.Deposit(100)
def receive = {
case BankAccount.Done => transfer(150)
}
def transfer(amount: BigInt): Unit = {
val transaction = context.actorOf(Props[WireTransfer], "transfer")
transaction ! WireTransfer.Transfer(accountFrom, accountTo, amount)
context.become(LoggingReceive {
case WireTransfer.Done => {
println("Transfer succeeded.")
context.stop(self)
}
case WireTransfer.Failed => {
println("Transfer failed.")
context.stop(self)
}
})
}
} | M4573R/playground-notes | principles-of-reactive-programming/week-5/src/main/scala/TransferMain.scala | Scala | mit | 871 |
package org.coursera.naptime.sbt
import sbtbuildinfo.BuildInfo
object ScalaLibraryLocator {
def getPath: Option[String] = {
val pattern = "library jar: ([^,]+)".r
pattern.findFirstMatchIn(BuildInfo.scalaInstance).map(_.group(1))
}
}
| vkuo-coursera/naptime | naptime-sbt-plugin/src/main/scala/org/coursera/naptime/sbt/ScalaLibraryLocator.scala | Scala | apache-2.0 | 247 |
/**
* Model.scala
*/
package bloom.model
import bloom.View
import bloom.ClockMode
import bloom.globals._
import scala.collection.mutable.ArrayBuffer
object Model {
/**
* Object members
*/
// Clock mode representation
val clock: ClockMode.Value = ClockMode.Standard
// Internal representation of minutes - if this changes, update view
var min: Int = -1
// ArrayBuffer list of user categories
var categories: Categories = ArrayBuffer[Category]()
// Keeps track of the open modal
var openModalId: String = "null"
/**
* Object functions
*/
// Return category by UUID
def getCategoryByID(id: Int): Category = categories(id)
// Return list of all categories as HTML
def toHTML(): String = categories.map(_.toHTML).mkString("")
// Callback function used for JavaScript chrome API
def callback(results: String): Unit = {}
// Storage location
def getLocation(): Unit = Storage.location(callback _)
// Update time based on clockMode value (military or standard)
def updateTime(m: Int, h: Int): Unit =
if (m != min) {
min = m
// Update the view time
if (clock == ClockMode.Military)
View.updateTime(Date.getMilitaryTime(m, h))
else
View.updateTime(Date.getStandardTime(m, h))
}
// Adds a category with given name
def addCategory(name: String): Unit = {
categories += new Category(uuid, name)
View.updateTodoList()
}
// Sets the id of the current modal displayed - null if none
def setCurrentOpenModal(id: String): Unit = {
// Hide current modal if it's displayed
View.hide(s"section.$openModalId")
// Don't display the modal if the id hasn't changed (ie. toggle it)
if (openModalId != id) {
openModalId = id
View.display(s"section.$openModalId")
} else
openModalId = "null"
}
}
| AndrewMcBurney/bloom | src/main/scala/bloom/model/Model.scala | Scala | mit | 1,852 |
package japgolly.scalajs.react.vdom
trait CssUnitsOps {
@inline final implicit def vdomAttrVtCssUnits[N: Numeric](n: N): CssUnits =
new CssUnits(n)
}
/**
* Extends numbers to provide a bunch of useful methods, allowing you to write
* CSS-lengths in a nice syntax without resorting to strings.
*/
final class CssUnits private[vdom] (private val _n: Any) extends AnyVal {
private def addSuffix(suffix: String): String = {
val s = _n.toString
if (s == "0") s else s + suffix
}
/**
* Relative to the viewing device. For screen display, typically one device
* pixel (dot) of the display.
*
* For printers and very high resolution screens one CSS pixel implies
* multiple device pixels, so that the number of pixel per inch stays around
* 96.
*/
def px = addSuffix("px")
/** One point which is 1/72 of an inch. */
def pt = addSuffix("pt")
/** One millimeter. */
def mm = addSuffix("mm")
/** One centimeter 10 millimeters. */
def cm = addSuffix("cm")
/** One inch 2.54 centimeters. */
def in = addSuffix("in")
/** One pica which is 12 points. */
def pc = addSuffix("pc")
/**
* This unit represents the calculated font-size of the element. If used on
* the font-size property itself, it represents the inherited font-size
* of the element.
*/
def em = addSuffix("em")
/**
* This unit represents the width, or more precisely the advance measure, of
* the glyph '0' zero, the Unicode character U+0030 in the element's font.
*/
def ch = addSuffix("ch")
/**
* This unit represents the x-height of the element's font. On fonts with the
* 'x' letter, this is generally the height of lowercase letters in the font;
* 1ex ≈ 0.5em in many fonts.
*/
def ex = addSuffix("ex")
/**
* This unit represents the font-size of the root element e.g. the font-size
* of the html element. When used on the font-size on this root element,
* it represents its initial value.
*/
def rem = addSuffix("rem")
/**
* An angle in degrees. One full circle is 360deg. E.g. 0deg, 90deg, 360deg.
*/
def deg = addSuffix("deg")
/**
* An angle in gradians. One full circle is 400grad. E.g. 0grad, 100grad,
* 400grad.
*/
def grad = addSuffix("grad")
/**
* An angle in radians. One full circle is 2π radians which approximates
* to 6.2832rad. 1rad is 180/π degrees. E.g. 0rad, 1.0708rad, 6.2832rad.
*/
def rad = addSuffix("rad")
/**
* The number of turns the angle is. One full circle is 1turn. E.g. 0turn,
* 0.25turn, 1turn.
*/
def turn = addSuffix("turn")
/** A percentage value */
def pct = addSuffix("%")
/** A percentage value */
def `%` = addSuffix("%")
} | japgolly/scalajs-react | coreGeneric/src/main/scala-2/japgolly/scalajs/react/vdom/CssUnits.scala | Scala | apache-2.0 | 2,722 |
package org.scalajs.jsenv.test
import org.scalajs.jsenv.phantomjs.PhantomJSEnv
import org.junit.Test
class PhantomJSTest extends JSEnvTest with ComTests {
protected def newJSEnv: PhantomJSEnv = new PhantomJSEnv
}
| mdedetrich/scala-js | js-envs/src/test/scala/org/scalajs/jsenv/test/PhantomJSTest.scala | Scala | bsd-3-clause | 218 |
// Jubatus: Online machine learning framework for distributed environment
// Copyright (C) 2014-2015 Preferred Networks and Nippon Telegraph and Telephone Corporation.
//
// This library is free software; you can redistribute it and/or
// modify it under the terms of the GNU Lesser General Public
// License version 2.1 as published by the Free Software Foundation.
//
// This library is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
// Lesser General Public License for more details.
//
// You should have received a copy of the GNU Lesser General Public
// License along with this library; if not, write to the Free Software
// Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
package us.jubat.yarn.client
import org.apache.hadoop.fs.Path
import scala.concurrent.Future
import scala.util.{Success, Failure, Try}
import java.net.InetAddress
import us.jubat.yarn.common._
import scala.Some
import us.jubat.yarn.client.JubatusYarnApplication.ApplicationContext
import org.apache.hadoop.yarn.api.records.{FinalApplicationStatus, ApplicationReport}
// TODO ExecutionContextをとりあえず追加。これで問題ないかあとで確認。
import scala.concurrent.ExecutionContext.Implicits.global
case class Resource(priority: Int, memory: Int, virtualCores: Int)
case class JubatusYarnApplicationStatus(jubatusProxy: java.util.Map[String, java.util.Map[String, String]], jubatusServers: java.util.Map[String, java.util.Map[String, String]], yarnApplication: ApplicationReport)
object JubatusYarnApplication extends HasLogger {
private case class ApplicationContext(controller: YarnClientController, proxy: ApplicationMasterProxy, service: JubatusYarnService)
private def waitForStarted(aContext: ApplicationContext): JubatusYarnApplication = {
// ApplicationMaster 上で JubatusProxy が起動し、すべての Container 上で jubatusServer が起動するまでブロック
logger.info("wait for ApplicationMaster status 'Wait'")
while (aContext.proxy.status != ControllerStatus.Wait) {
aContext.controller.getFinalStatus match {
case FinalApplicationStatus.FAILED =>
aContext.controller.kill()
throw new IllegalStateException("Application is failed.")
case FinalApplicationStatus.KILLED =>
aContext.controller.kill()
throw new IllegalStateException("Application is killed.")
case FinalApplicationStatus.SUCCEEDED =>
aContext.controller.kill()
throw new IllegalStateException("Application is finished.")
case FinalApplicationStatus.UNDEFINED =>
}
Thread.sleep(100)
}
val tJubatusProxy = aContext.proxy.jubatusLocation.get
val tJubatusServers = aContext.proxy.containers.values.map { tLocation =>
tLocation.jubatusLocation.get
}.toList
logger.info(
"new JubatusYarnApplication\\n"
+ s"\\n jubatusProxy: $tJubatusProxy\\n"
+ s"\\n jubatusServers: $tJubatusServers"
)
new JubatusYarnApplication(tJubatusProxy, tJubatusServers, aContext)
}
/**
* JubatusYarnApplication を起動します。
*
* juba${aLearningMachineType_proxy} がひとつ, juba${aLearningMachineType} が ${aNodeCount} だけ起動します。
* 各 juba${aLearningMachineType} の使用するリソースを ${aResource} に指定してください。
*
* @param aLearningMachineName learning machine name
* @param aLearningMachineType learning machine type
* @param aZookeepers ZooKeeper locations
* @param aConfigString config json string
* @param aResource computer resources in the cluster
* @param aNodeCount number of cluster
* @return [[JubatusYarnApplication]]
*/
def start(aLearningMachineName: String, aLearningMachineType: LearningMachineType, aZookeepers: List[Location], aConfigString: String, aResource: Resource, aNodeCount: Int): Future[JubatusYarnApplication] = {
start(aLearningMachineName, aLearningMachineType, aZookeepers, aConfigString, aResource, aNodeCount, new Path("hdfs:///jubatus-on-yarn"))
}
/**
* JubatusYarnApplication を起動します。
*
* juba${aLearningMachineType_proxy} がひとつ, juba${aLearningMachineType} が ${aNodeCount} だけ起動します。
* 各 juba${aLearningMachineType} の使用するリソースを ${aResource} に指定してください。
*
* @param aLearningMachineName learning machine name
* @param aLearningMachineType learning machine type
* @param aZookeepers ZooKeeper locations
* @param aConfigString config json string
* @param aResource computer resources in the cluster
* @param aNodeCount number of cluster
* @param aBasePath base path of jar and sh files
* @return [[JubatusYarnApplication]]
*/
def start(aLearningMachineName: String, aLearningMachineType: LearningMachineType, aZookeepers: List[Location], aConfigString: String, aResource: Resource, aNodeCount: Int, aBasePath: Path): Future[JubatusYarnApplication] = Future {
require(aResource.memory > 0, "specify memory than 1MB.")
require(aNodeCount > 0, "specify node count than 1")
val tService = new JubatusYarnService()
tService.start()
tService.yarnClientController match {
case None => throw new IllegalStateException("Service not running.")
case Some(tYarnClientController) =>
// ApplicationMaster 起動
logger.info(s"startJubatusApplication $aLearningMachineName, $aLearningMachineType, $aZookeepers, $aConfigString, $aResource, $aNodeCount")
val tApplicationMasterProxy = tYarnClientController.startJubatusApplication(aLearningMachineName, aLearningMachineType, aZookeepers, aConfigString, aResource, aNodeCount, aBasePath)
waitForStarted(ApplicationContext(tYarnClientController, tApplicationMasterProxy, tService))
}
}
/**
* JubatusYarnApplication を起動します。
*
* juba${aLearningMachineType_proxy} がひとつ, juba${aLearningMachineType} が ${aNodeCount} だけ起動します。
* 各 juba${aLearningMachineType} の使用するリソースを ${aResource} に指定してください。
*
* @param aLearningMachineName learning machine name
* @param aLearningMachineType learning machine type
* @param aZookeepers ZooKeeper locations
* @param aConfigFile config file
* @param aResource computer resources in the cluster
* @param aNodeCount number of cluster
* @return [[JubatusYarnApplication]]
*/
def start(aLearningMachineName: String, aLearningMachineType: LearningMachineType, aZookeepers: List[Location], aConfigFile: Path, aResource: Resource, aNodeCount: Int): Future[JubatusYarnApplication] = {
start(aLearningMachineName, aLearningMachineType, aZookeepers, aConfigFile, aResource, aNodeCount, new Path("hdfs:///jubatus-on-yarn"))
}
/**
* JubatusYarnApplication を起動します。
*
* juba${aLearningMachineType_proxy} がひとつ, juba${aLearningMachineType} が ${aNodeCount} だけ起動します。
* 各 juba${aLearningMachineType} の使用するリソースを ${aResource} に指定してください。
*
* @param aLearningMachineName learning machine name
* @param aLearningMachineType learning machine type
* @param aZookeepers ZooKeeper locations
* @param aConfigFile config file
* @param aResource computer resources in the cluster
* @param aNodeCount number of cluster
* @return [[JubatusYarnApplication]]
*/
def start(aLearningMachineName: String, aLearningMachineType: LearningMachineType, aZookeepers: List[Location], aConfigFile: Path, aResource: Resource, aNodeCount: Int, aBasePath: Path): Future[JubatusYarnApplication] = Future {
require(aResource.memory > 0, "specify memory than 1MB.")
require(aNodeCount > 0, "specify node count than 1")
val tService = new JubatusYarnService()
tService.start()
tService.yarnClientController match {
case None => throw new IllegalStateException("Service not running.")
case Some(tYarnClientController) =>
// ApplicationMaster 起動
logger.info(s"startJubatusApplication $aLearningMachineName, $aLearningMachineType, $aZookeepers, $aConfigFile, $aResource, $aNodeCount")
val tApplicationMasterProxy = tYarnClientController.startJubatusApplication(aLearningMachineName, aLearningMachineType, aZookeepers, aConfigFile, aResource, aNodeCount, aBasePath)
waitForStarted(ApplicationContext(tYarnClientController, tApplicationMasterProxy, tService))
}
}
}
class JubatusYarnApplication(val jubatusProxy: Location, val jubatusServers: List[Location], aContext: ApplicationContext) extends HasLogger {
/**
* JubatusYarnApplication のステータスを取得します。
*
* @return [[JubatusYarnApplicationStatus]]
*/
def status: JubatusYarnApplicationStatus = {
logger.info("status")
aContext.controller.status
}
/**
* JubatusYarnApplication を停止します。
*
* ノード内で実行されている juba*, juba*_proxy のプロセスも停止します。
*/
def stop(): Future[Unit] = Future {
logger.info(s"stop ${aContext.proxy.name}")
aContext.controller.stop() match {
case Success(_) =>
case Failure(e) =>
throw e
}
while (!aContext.controller.isFinished) {
Thread.sleep(100)
}
aContext.service.stop()
}
/**
* JubatusYarnApplication を強制的に終了します。
*
* ノード内で実行されている juba*, juba*_proxy のプロセスも停止します。
*/
def kill(): Unit = {
logger.info("kill")
aContext.controller.kill()
aContext.service.stop()
}
/**
* モデルデータを読み込みます。
*
* $aModelPathPrefix/$aModelId/$seq.jubatus からモデルデータを読み込みます。
* $seq にはノード番号が入ります。
*
* @param aModelPathPrefix HDFS path prefix
*/
def loadModel(aModelPathPrefix: Path, aModelId: String): Try[JubatusYarnApplication] = Try {
logger.info(s"loadModel $aModelPathPrefix, $aModelId")
aContext.controller.loadModel(aModelPathPrefix, aModelId)
this
}
/**
* モデルデータを保存します。
*
* モデルデータは $aModelPathPrefix/$aModelId/$seq.jubatus に格納されます。
* $seq にはノード番号が入ります。
*
* @param aModelPathPrefix HDFS path prefix
*/
def saveModel(aModelPathPrefix: Path, aModelId: String): Try[JubatusYarnApplication] = Try {
logger.info(s"saveModel $aModelPathPrefix, $aModelId")
aContext.controller.saveModel(aModelPathPrefix, aModelId)
this
}
}
| jubatus/jubatus-on-yarn | jubatusonyarn/jubatus-on-yarn-client/src/main/scala/us/jubat/yarn/client/JubatusYarnApplication.scala | Scala | lgpl-2.1 | 10,794 |
package scalajs.antdesign
import japgolly.scalajs.react._
import org.scalajs.dom.Event
import scala.scalajs.js
import scala.scalajs.js.{Dynamic, Object, |}
/**
* @see https://ant.design/components/table/#API
* @param rowSelection row selection config
* @param pagination pagination config, hide it via setting to false
* @param size size of table: [[scalajs.antdesign.Table.Size.Default]], [[scalajs.antdesign.Table.Size.Middle]] or [[scalajs.antdesign.Table.Size.Small]]
* @param dataSource data record array to be rendered
* @param columns columns of table
* @param rowKey get row's key, could be a string or function
* @param rowClassName get row's className
* @param expandedRowRender expanded container render for each row
* @param defaultExpandedRowKeys initial expanded row keys
* @param expandedRowKeys current expanded rows keys
* @param defaultExpandAllRows expand all rows initially
* @param onExpandedRowsChange function to call when the expanded rows change
* @param onExpand function to call when click expand icon
* @param onChange callback that is called when pagination, filters, sorter is changed
* @param loading loading status of table
* @param locale i18n text include filter, sort, empty text...etc
* @param indentSize index pixel size of tree data
* @param onRowClick callback that is called when click a row
* @param bordered whether to show table border completely
* @param showHeader whether to show table header
* @param footer table footer renderer
* @param title table title renderer
* @param scroll whether table can be scroll in x/y direction, x or y can be a number that indicated the width and height of table body
*/
case class Table(
rowSelection: js.UndefOr[Table.RowSelection] = js.undefined,
pagination: js.UndefOr[Boolean | Pagination] = js.undefined,
size: js.UndefOr[Table.Size] = js.undefined,
dataSource: js.UndefOr[js.Dictionary[js.Any]] = js.undefined,
columns: js.UndefOr[js.Array[Table.Column]] = js.undefined,
rowKey: js.UndefOr[String | (js.Dictionary[js.Any] => CallbackTo[Unit])] = js.undefined,
rowClassName: js.UndefOr[(js.Dictionary[js.Any], Int) => CallbackTo[String]] = js.undefined,
expandedRowRender: js.UndefOr[js.Dictionary[js.Any] => CallbackTo[ReactNode]] = js.undefined,
defaultExpandedRowKeys: js.UndefOr[js.Array[String]] = js.undefined,
expandedRowKeys: js.UndefOr[js.Array[String]] = js.undefined,
defaultExpandAllRows: js.UndefOr[Boolean] = js.undefined,
onExpandedRowsChange: js.UndefOr[js.Array[js.Dictionary[js.Any]] => CallbackTo[Unit]] = js.undefined,
onExpand: js.UndefOr[(js.Array[js.Dictionary[js.Any]], js.Dictionary[js.Any]) => CallbackTo[Unit]] = js.undefined,
onChange: js.UndefOr[(js.Dictionary[js.Any], js.Dictionary[js.Any], js.Dictionary[js.Any]) => CallbackTo[Unit]] =
js.undefined,
loading: js.UndefOr[Boolean] = js.undefined,
locale: js.UndefOr[Table.Locale] = js.undefined,
indentSize: js.UndefOr[Int] = js.undefined,
onRowClick: js.UndefOr[(js.Dictionary[js.Any], Int) => CallbackTo[Unit]] = js.undefined,
bordered: js.UndefOr[Boolean] = js.undefined,
showHeader: js.UndefOr[Boolean] = js.undefined,
footer: js.UndefOr[js.Dictionary[js.Any] => CallbackTo[String]] = js.undefined,
title: js.UndefOr[js.Dictionary[js.Any] => CallbackTo[String]] = js.undefined,
scroll: js.UndefOr[Table.Scroll] = js.undefined) {
def toJS: Object with Dynamic = {
val p = js.Dynamic.literal()
rowSelection.foreach { x =>
p.updateDynamic("rowSelection")(x.toJS)
}
pagination.foreach { x =>
p.updateDynamic("pagination")((x: Any) match {
case b: Boolean => b
case p: Pagination => p.toJS
})
}
size.foreach { x =>
p.updateDynamic("size")(x.id)
}
dataSource.foreach { x =>
p.updateDynamic("dataSource")(x)
}
columns.foreach { x =>
p.updateDynamic("columns")(x.map(_.toJS))
}
rowKey.foreach { x =>
p.updateDynamic("rowKey")((x: Any) match {
case func: ((js.Dictionary[js.Any]) => CallbackTo[String]) @unchecked =>
(v1: js.Dictionary[js.Any]) =>
func(v1).runNow()
case s: String => s
})
}
rowClassName.foreach { x =>
p.updateDynamic("rowClassName")(
(v1: js.Dictionary[js.Any], v2: Int) => x(v1, v2).runNow()
)
}
expandedRowRender.foreach { x =>
p.updateDynamic("expandedRowRender")(
(v1: js.Dictionary[js.Any]) => x(v1).runNow()
)
}
defaultExpandedRowKeys.foreach { x =>
p.updateDynamic("defaultExpandedRowKeys")(x)
}
expandedRowKeys.foreach { x =>
p.updateDynamic("expandedRowKeys")(x)
}
defaultExpandAllRows.foreach { x =>
p.updateDynamic("defaultExpandAllRows")(x)
}
onExpandedRowsChange.foreach { x =>
p.updateDynamic("onExpandedRowsChange")((v1: js.Array[js.Dictionary[js.Any]]) => x(v1).runNow())
}
onExpand.foreach { x =>
p.updateDynamic("onExpand")(
(v1: js.Array[js.Dictionary[js.Any]], v2: js.Dictionary[js.Any]) => x(v1, v2).runNow()
)
}
onChange.foreach { x =>
p.updateDynamic("onChange")(
(v1: js.Dictionary[js.Any], v2: js.Dictionary[js.Any], v3: js.Dictionary[js.Any]) => x(v1, v2, v3).runNow()
)
}
loading.foreach { x =>
p.updateDynamic("loading")(x)
}
locale.foreach { x =>
p.updateDynamic("locale")(x.toJS)
}
indentSize.foreach { x =>
p.updateDynamic("indentSize")(x)
}
onRowClick.foreach { x =>
p.updateDynamic("onRowClick")(
(v1: js.Dictionary[js.Any], v2: Int) => x(v1, v2).runNow()
)
}
bordered.foreach { x =>
p.updateDynamic("bordered")(x)
}
showHeader.foreach { x =>
p.updateDynamic("showHeader")(x)
}
footer.foreach { x =>
p.updateDynamic("footer")(
(v1: js.Dictionary[js.Any]) => x(v1).runNow()
)
}
title.foreach { x =>
p.updateDynamic("title")(
(v1: js.Dictionary[js.Any]) => x(v1).runNow()
)
}
scroll.foreach { x =>
p.updateDynamic("scroll")(x.toJS)
}
p
}
def apply(children: ReactNode*): ReactComponentU_ = {
val f =
React.asInstanceOf[js.Dynamic].createFactory(js.Dynamic.global.antd.Table)
f(toJS, children.toJsArray).asInstanceOf[ReactComponentU_]
}
}
object Table {
/**
* https://ant.design/components/table/#rowSelection
* @param `type` [[scalajs.antdesign.Table.RowSelection.Type.Checkbox]] or [[scalajs.antdesign.Table.RowSelection.Type.Radio]]
* @param selectedRowKeys controlled selected row keys
* @param onChange callback that is called when selected rows change
* @param getCheckboxProps get Checkbox or Radio props
* @param onSelect callback that is called when select/deselect one row
* @param onSelectAll callback that is called when select/deselect all
*/
case class RowSelection(
`type`: js.UndefOr[RowSelection.Type] = js.undefined,
selectedRowKeys: js.UndefOr[js.Array[String]] = js.undefined,
onChange: js.UndefOr[(js.Array[String], js.Array[String]) => CallbackTo[Unit]] = js.undefined,
getCheckboxProps: js.UndefOr[js.Dictionary[js.Any] => CallbackTo[js.Dictionary[js.Any]]] = js.undefined,
onSelect: js.UndefOr[(js.Dictionary[js.Any], Boolean, js.Array[js.Dictionary[js.Any]]) => CallbackTo[Unit]] =
js.undefined,
onSelectAll: js.UndefOr[(Boolean, js.Array[js.Dictionary[js.Any]], js.Array[js.Dictionary[js.Any]]) => CallbackTo[
Unit]] = js.undefined) {
def toJS: Object with Dynamic = {
val p = js.Dynamic.literal()
`type`.foreach { x =>
p.updateDynamic("type")(x.id)
}
selectedRowKeys.foreach { x =>
p.updateDynamic("selectedRowKeys")(x)
}
onChange.foreach { x =>
p.updateDynamic("onChange")(
(v1: js.Array[String], v2: js.Array[String]) => x(v1, v2).runNow()
)
}
getCheckboxProps.foreach { x =>
p.updateDynamic("getCheckboxProps")(
(v1: js.Dictionary[js.Any]) => x(v1).runNow()
)
}
onSelect.foreach { x =>
p.updateDynamic("onSelect")(
(v1: js.Dictionary[js.Any], v2: Boolean, v3: js.Array[js.Dictionary[js.Any]]) => x(v1, v2, v3).runNow()
)
}
onSelectAll.foreach { x =>
p.updateDynamic("onSelectAll")(
(v1: Boolean, v2: js.Array[js.Dictionary[js.Any]], v3: js.Array[js.Dictionary[js.Any]]) =>
x(v1, v2, v3).runNow()
)
}
p
}
}
object RowSelection {
sealed abstract class Type(val id: String)
object Type {
case object Radio extends Type("radio")
case object Checkbox extends Type("checkbox")
}
}
sealed abstract class Size(val id: String)
object Size {
case object Default extends Size("default")
case object Middle extends Size("middle")
case object Small extends Size("small")
}
case class Locale(filterTitle: String, filterConfirm: String, filterReset: String, emptyText: String) {
def toJS: Object with Dynamic = {
val p = js.Dynamic.literal()
p.updateDynamic("filterTitle")(filterTitle)
p.updateDynamic("filterConfirm")(filterConfirm)
p.updateDynamic("filterReset")(filterReset)
p.updateDynamic("emptyText")(emptyText)
p
}
}
/**
* https://ant.design/components/table/#Column
* @param title title of this column
* @param key key of this column
* @param dataIndex display field of the data record, could be set like a.b.c
* @param render renderer of table cell, has three params: text, record and index of this row. The render value should be a ReactNode, or a object for colSpan/rowSpan config
* @param filters filter menu config
* @param onFilter callback that is called when when click confirm filter button
* @param filterMultiple whether to select multiple filtered item
* @param filterDropdown customized filter overlay
* @param filterDropdownVisible whether filterDropdown is visible
* @param onFilterDropdownVisibleChange called when filterDropdownVisible is changed
* @param filteredValue controlled filtered value
* @param sorter sorter sort function for local sort. If you need sort buttons only, set it true
* @param colSpan span of this column's title
* @param width width of this column
* @param className className of this column
* @param fixed set column to be fixed: true(same as left) [[scalajs.antdesign.Table.Column.Fixed.Left]] [[scalajs.antdesign.Table.Column.Fixed.Right]]
* @param sortOrder controlled sorted value: [[scalajs.antdesign.SortOrder.Ascend]] [[scalajs.antdesign.SortOrder.Descend]] false
* @param onCellClick callback when click cell
*/
case class Column(
title: js.UndefOr[String | ReactNode] = js.undefined,
key: js.UndefOr[String] = js.undefined,
dataIndex: js.UndefOr[String] = js.undefined,
render: js.UndefOr[(String, js.Dictionary[js.Any], Int) => CallbackTo[ReactNode]] = js.undefined,
filters: js.UndefOr[js.Array[js.Dictionary[js.Any]]] = js.undefined,
onFilter: js.UndefOr[(String, js.Dictionary[js.Any]) => CallbackTo[Boolean]] = js.undefined,
filterMultiple: js.UndefOr[Boolean] = js.undefined,
filterDropdown: js.UndefOr[ReactNode] = js.undefined,
filterDropdownVisible: js.UndefOr[Boolean] = js.undefined,
onFilterDropdownVisibleChange: js.UndefOr[Boolean => CallbackTo[Unit]] = js.undefined,
filteredValue: js.UndefOr[js.Array[String]] = js.undefined,
sorter: js.UndefOr[String | (js.Dictionary[js.Any], js.Dictionary[js.Any]) => CallbackTo[Boolean]] = js.undefined,
colSpan: js.UndefOr[Int] = js.undefined,
width: js.UndefOr[String | Boolean] = js.undefined,
className: js.UndefOr[String] = js.undefined,
fixed: js.UndefOr[Boolean | Column.Fixed] = js.undefined,
sortOrder: js.UndefOr[Boolean | SortOrder] = js.undefined,
onCellClick: js.UndefOr[(js.Dictionary[js.Any], Event) => CallbackTo[Unit]] = js.undefined) {
def toJS: Object with Dynamic = {
val p = js.Dynamic.literal()
title.foreach { x =>
p.updateDynamic("title")(x.asInstanceOf[js.Any])
}
key.foreach { x =>
p.updateDynamic("key")(x)
}
dataIndex.foreach { x =>
p.updateDynamic("dataIndex")(x)
}
render.foreach { x =>
p.updateDynamic("render")(
(v1: String, v2: js.Dictionary[js.Any], v3: Int) => x(v1, v2, v3).runNow()
)
}
filters.foreach { x =>
p.updateDynamic("filters")(x)
}
onFilter.foreach { x =>
p.updateDynamic("onFilter")(
(v1: String, v2: js.Dictionary[js.Any]) => x(v1, v2).runNow()
)
}
filterMultiple.foreach { x =>
p.updateDynamic("filterMultiple")(x)
}
filterDropdown.foreach { x =>
p.updateDynamic("filterDropdown")(x)
}
filterDropdownVisible.foreach { x =>
p.updateDynamic("filterDropdownVisible")(x)
}
onFilterDropdownVisibleChange.foreach { x =>
p.updateDynamic("onFilterDropdownVisibleChange")(
(v1: Boolean) => x(v1).runNow()
)
}
filteredValue.foreach { x =>
p.updateDynamic("filteredValue")(x)
}
sorter.foreach { x =>
p.updateDynamic("sorter")((x: Any) match {
case s: String => s
case func: ((js.Dictionary[js.Any], js.Dictionary[js.Any]) => CallbackTo[String]) @unchecked =>
(v1: js.Dictionary[js.Any], v2: js.Dictionary[js.Any]) =>
func(v1, v2).runNow()
})
}
colSpan.foreach { x =>
p.updateDynamic("colSpan")(x)
}
width.foreach { x =>
p.updateDynamic("width")(x.asInstanceOf[js.Any])
}
className.foreach { x =>
p.updateDynamic("className")(x)
}
fixed.foreach { x =>
p.updateDynamic("fixed")((x: Any) match {
case b: Boolean => b
case c: Column.Fixed => c.id
})
}
sortOrder.foreach { x =>
p.updateDynamic("sortOrder")((x: Any) match {
case b: Boolean => b
case s: SortOrder => s.id
})
}
onCellClick.foreach { x =>
p.updateDynamic("onCellClick")(
(v1: js.Dictionary[js.Any], v2: Event) => x(v1, v2).runNow()
)
}
p
}
}
object Column {
sealed abstract class Fixed(val id: String)
object Fixed {
case object Left extends Fixed("left")
case object Right extends Fixed("right")
}
}
case class Scroll(x: js.UndefOr[Int], y: js.UndefOr[Int]) {
def toJS: Object with Dynamic = {
val p = js.Dynamic.literal()
x.foreach { _x =>
p.updateDynamic("x")(_x)
}
y.foreach { y =>
p.updateDynamic("y")(y)
}
p
}
}
}
| mdedetrich/scalajs-antdesign | src/main/scala/scalajs/antdesign/Table.scala | Scala | bsd-3-clause | 15,010 |
package ml.combust.bundle.dsl
import ml.bundle.Socket
/** Companion object for holding constant values.
*/
object NodeShape {
val standardInputPort: String = "input"
val standardOutputPort: String = "output"
/** Default constructor.
*
* @return empty shape
*/
def apply(): NodeShape = new NodeShape(inputs = Seq(),
outputs = Seq(),
inputLookup = Map(),
outputLookup = Map())
/** Construct a shape with inputs and outputs.
*
* @param inputs input sockets
* @param outputs output sockets
* @return shape with inputs/outputs
*/
def apply(inputs: Seq[Socket],
outputs: Seq[Socket]): NodeShape = {
val inputLookup = inputs.map(s => (s.port, s)).toMap
val outputLookup = outputs.map(s => (s.port, s)).toMap
new NodeShape(inputs = inputs,
outputs = outputs,
inputLookup = inputLookup,
outputLookup = outputLookup)
}
/** Create a shape from a bundle shape.
*
* @param shape bundle shape
* @return dsl shape
*/
def fromBundle(shape: ml.bundle.NodeShape): NodeShape = NodeShape(inputs = shape.inputs,
outputs = shape.outputs)
}
/** Class for holding the input fields and output fields of a [[Node]].
* The shape also holds information for connecting the input/output fields
* to the underlying ML model.
*
* A [[NodeShape]] contains input and output sockets. Sockets map field data
* to certain functionality within a [[Model]]. For instance, say we want
* to run a "label" field through a string indexer and have the result
* output to the field "label_name". We could wire up the node like so:
*
* {{{
* scala> import ml.bundle.dsl._
* scala> Shape().withInput("label", "input"). // connect the "label" field to the model input
* withOutput("label_name", "output") // connect the model output to the "label_name" field
* }}}
*
* Or more concisely:
* {{{
* scala> import ml.bundle.dsl._
* scala> Shape().withStandardIO("label", "label_name") // shorthand for the above code
* }}}
*
* @param inputs input sockets
* @param outputs output sockets
* @param inputLookup input sockets lookup by port
* @param outputLookup output sockets lookup by port
*/
case class NodeShape private(inputs: Seq[Socket],
outputs: Seq[Socket],
inputLookup: Map[String, Socket],
outputLookup: Map[String, Socket]) {
/** Convert to bundle shape.
*
* @return bundle shape
*/
def asBundle: ml.bundle.NodeShape = ml.bundle.NodeShape(inputs = inputs,
outputs = outputs)
/** Get the standard input socket.
*
* The standard input socket is on port "input".
*
* @return standard input socket
*/
def standardInput: Socket = input(NodeShape.standardInputPort)
/** Get the standard output socket.
*
* The standard output socket is on port "output".
*
* @return standard output socket
*/
def standardOutput: Socket = output(NodeShape.standardOutputPort)
/** Add standard input/output sockets to the shape.
*
* This is the same as calling [[NodeShape#withStandardInput]] and
* [[NodeShape#withStandardOutput]].
*
* @param inputName name of the input socket
* @param outputName name of the output socket
* @return copy of the shape with standard input/output sockets added
*/
def withStandardIO(inputName: String,
outputName: String): NodeShape = {
withStandardInput(inputName).withStandardOutput(outputName)
}
/** Add standard input socket to the shape.
*
* @param name name of standard input socket
* @return copy of the shape with standard input socket added
*/
def withStandardInput(name: String): NodeShape = withInput(NodeShape.standardInputPort, name)
/** Add standard output socket to the shape.
*
* @param name name of standard output socket
* @return copy of the shape with standard output socket added
*/
def withStandardOutput(name: String): NodeShape = withOutput(NodeShape.standardOutputPort, name)
/** Get the bundle protobuf shape.
*
* @return bundle protobuf shape
*/
def bundleShape: ml.bundle.NodeShape = ml.bundle.NodeShape(inputs = inputs,
outputs = outputs)
/** Get an input by the port name.
*
* @param port name of port
* @return socket for named port
*/
def input(port: String): Socket = inputLookup(port)
/** Get an output by the port name.
*
* @param port name of port
* @return socket for named port
*/
def output(port: String): Socket = outputLookup(port)
/** Get an optional input by the port name.
*
* @param port name of the port
* @return optional socket for the named port
*/
def getInput(port: String): Option[Socket] = inputLookup.get(port)
/** Get an optional output by the port name.
*
* @param port name of the port
* @return optional socket for the named port
*/
def getOutput(port: String): Option[Socket] = outputLookup.get(port)
/** Add an input socket to the shape.
*
* @param port port of input socket
* @param name name of input socket
* @return copy of the shape with input socket added
*/
def withInput(port: String, name: String): NodeShape = {
require(!inputLookup.contains(port), s"input already exists for port: $port")
val socket = Socket(port, name)
val inputLookup2 = inputLookup + (port -> socket)
copy(inputs = inputs :+ socket, inputLookup = inputLookup2)
}
/** Add an output socket to the shape.
*
* @param port port of output socket
* @param name name of output socket
* @return copy of the shape with output socket added
*/
def withOutput(port: String, name: String): NodeShape = {
require(!outputLookup.contains(port), s"output already exists for port: $port")
val socket = Socket(port, name)
val outputLookup2 = outputLookup + (port -> socket)
copy(outputs = outputs :+ socket, outputLookup = outputLookup2)
}
}
| combust/mleap | bundle-ml/src/main/scala/ml/combust/bundle/dsl/NodeShape.scala | Scala | apache-2.0 | 6,138 |
/*******************************************************************************
* Copyright (c) 2014 Łukasz Szpakowski.
*
* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/.
******************************************************************************/
package pl.luckboy.purfuncor.frontend.instant
import scalaz._
import scalaz.Scalaz._
case class InstantiationLambdaInfo[+T](insts: Seq[Instance[T]])
| luckboy/Purfuncor | src/main/scala/pl/luckboy/purfuncor/frontend/instant/InstantiationLambdaInfo.scala | Scala | mpl-2.0 | 560 |
import scala.scalajs.js
import js.annotation._
package importedjs {
package `node-slack` {
@JSName("node-slack.Slack")
class Slack protected () extends js.Object {
def this(hookUrl: String, option: Slack.Option = ???) = this()
def send(message: Slack.Message): js.Dynamic = js.native
def send(message: Slack.Message, callback: Slack.SendCallback): request.Request = js.native
def respond(query: Slack.Query): Slack.TextResponse = js.native
def respond(query: Slack.Query, callback: Slack.ResponseCallback): Slack.TextResponse = js.native
}
trait Option extends js.Object {
var proxy: String = js.native
}
trait Message extends js.Object {
var text: String = js.native
var channel: String = js.native
var username: String = js.native
var icon_emoji: String = js.native
var attachments: js.Array[js.Any] = js.native
var unfurl_links: Boolean = js.native
var link_names: Double = js.native
}
trait SendCallback extends js.Object {
def apply(err: js.Any, body: js.Any): js.Dynamic = js.native
}
trait Query extends js.Object {
var token: String = js.native
var team_id: String = js.native
var channel_id: String = js.native
var channel_name: String = js.native
var timestamp: Double = js.native
var user_id: String = js.native
var user_name: String = js.native
var text: String = js.native
}
trait TextResponse extends js.Object {
var text: String = js.native
}
trait ResponseCallback extends js.Object {
def apply(err: js.Any, query: Query): js.Dynamic = js.native
}
}
}
| ara-ta3/scalajs-hubot-doorkeeper-api | src/main/scala/dark/util/NodeSlack.scala | Scala | mit | 1,528 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.storage
import scala.collection.mutable
import scala.language.implicitConversions
import scala.util.Random
import org.scalatest.{BeforeAndAfter, Matchers}
import org.apache.spark.{LocalSparkContext, SparkFunSuite}
class RandomBlockReplicationPolicyBehavior extends SparkFunSuite
with Matchers
with BeforeAndAfter
with LocalSparkContext {
// Implicitly convert strings to BlockIds for test clarity.
protected implicit def StringToBlockId(value: String): BlockId = new TestBlockId(value)
val replicationPolicy: BlockReplicationPolicy = new RandomBlockReplicationPolicy
val blockId = "test-block"
/**
* Test if we get the required number of peers when using random sampling from
* BlockReplicationPolicy
*/
test("block replication - random block replication policy") {
val numBlockManagers = 10
val storeSize = 1000
val blockManagers = generateBlockManagerIds(numBlockManagers, Seq("/Rack-1"))
val candidateBlockManager = BlockManagerId("test-store", "localhost", 1000, None)
(1 to 10).foreach { numReplicas =>
logDebug(s"Num replicas : $numReplicas")
val randomPeers = replicationPolicy.prioritize(
candidateBlockManager,
blockManagers,
mutable.HashSet.empty[BlockManagerId],
blockId,
numReplicas
)
logDebug(s"Random peers : ${randomPeers.mkString(", ")}")
assert(randomPeers.toSet.size === numReplicas)
// choosing n peers out of n
val secondPass = replicationPolicy.prioritize(
candidateBlockManager,
randomPeers,
mutable.HashSet.empty[BlockManagerId],
blockId,
numReplicas
)
logDebug(s"Random peers : ${secondPass.mkString(", ")}")
assert(secondPass.toSet.size === numReplicas)
}
}
/**
* Returns a sequence of [[BlockManagerId]], whose rack is randomly picked from the given `racks`.
* Note that, each rack will be picked at least once from `racks`, if `count` is greater or equal
* to the number of `racks`.
*/
protected def generateBlockManagerIds(count: Int, racks: Seq[String]): Seq[BlockManagerId] = {
val randomizedRacks: Seq[String] = Random.shuffle(
racks ++ racks.length.until(count).map(_ => racks(Random.nextInt(racks.length)))
)
(0 until count).map { i =>
BlockManagerId(s"Exec-$i", s"Host-$i", 10000 + i, Some(randomizedRacks(i)))
}
}
}
class TopologyAwareBlockReplicationPolicyBehavior extends RandomBlockReplicationPolicyBehavior {
override val replicationPolicy = new BasicBlockReplicationPolicy
test("All peers in the same rack") {
val racks = Seq("/default-rack")
val numBlockManager = 10
(1 to 10).foreach {numReplicas =>
val peers = generateBlockManagerIds(numBlockManager, racks)
val blockManager = BlockManagerId("Driver", "Host-driver", 10001, Some(racks.head))
val prioritizedPeers = replicationPolicy.prioritize(
blockManager,
peers,
mutable.HashSet.empty,
blockId,
numReplicas
)
assert(prioritizedPeers.toSet.size == numReplicas)
assert(prioritizedPeers.forall(p => p.host != blockManager.host))
}
}
test("Peers in 2 racks") {
val racks = Seq("/Rack-1", "/Rack-2")
(1 to 10).foreach {numReplicas =>
val peers = generateBlockManagerIds(10, racks)
val blockManager = BlockManagerId("Driver", "Host-driver", 9001, Some(racks.head))
val prioritizedPeers = replicationPolicy.prioritize(
blockManager,
peers,
mutable.HashSet.empty,
blockId,
numReplicas
)
assert(prioritizedPeers.toSet.size == numReplicas)
val priorityPeers = prioritizedPeers.take(2)
assert(priorityPeers.forall(p => p.host != blockManager.host))
if(numReplicas > 1) {
// both these conditions should be satisfied when numReplicas > 1
assert(priorityPeers.exists(p => p.topologyInfo == blockManager.topologyInfo))
assert(priorityPeers.exists(p => p.topologyInfo != blockManager.topologyInfo))
}
}
}
}
| akopich/spark | core/src/test/scala/org/apache/spark/storage/BlockReplicationPolicySuite.scala | Scala | apache-2.0 | 4,905 |
package com.twitter.finatra.kafkastreams.internal.admin
import com.twitter.finagle.Service
import com.twitter.finagle.http._
import com.twitter.util.Future
import java.util.Properties
import scala.collection.JavaConverters._
private[kafkastreams] object KafkaStreamsPropertiesHandler {
/**
* Create a service function that extracts the key/value of kafka properties and formats it in
* HTML.
* @param properties Kafka Properties
* @return HTML formatted properties
*/
def apply(properties: Properties): Service[Request, Response] = {
new Service[Request, Response] {
override def apply(request: Request): Future[Response] = {
val response = Response(Version.Http11, Status.Ok)
response.setContentType(MediaType.Html)
val sortedProperties = properties
.propertyNames().asScala.map { property =>
s"$property=${properties.get(property)}"
}.toSeq.sorted.mkString("\\n")
ResponseWriter(response)(_.print(s"<pre>$sortedProperties</pre>"))
}
}
}
}
| twitter/finatra | kafka-streams/kafka-streams/src/main/scala/com/twitter/finatra/kafkastreams/internal/admin/KafkaStreamsPropertiesHandler.scala | Scala | apache-2.0 | 1,048 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.api.scala.completeness
import java.lang.reflect.Method
import org.apache.flink.util.TestLogger
import org.junit.Assert._
import org.junit.Test
import scala.language.existentials
/**
* Test base for checking whether the Scala API is up to feature parity with the Java API.
* Right now is very simple, it is only checked whether a method with the same name exists.
*
* When adding excluded methods to the lists you should give a good reason in a comment.
*
* Note: This is inspired by the JavaAPICompletenessChecker from Spark.
*/
abstract class ScalaAPICompletenessTestBase extends TestLogger {
/**
* Determines whether a method is excluded by name.
*/
protected def isExcludedByName(method: Method): Boolean
/**
* Utility to be called during the test.
*/
protected def checkMethods(
javaClassName: String,
scalaClassName: String,
javaClass: Class[_],
scalaClass: Class[_]) {
val javaMethods = javaClass.getMethods
.filterNot(_.isAccessible)
.filterNot(isExcludedByName)
.map(m => m.getName).toSet
val scalaMethods = scalaClass.getMethods
.filterNot(_.isAccessible)
.filterNot(isExcludedByName)
.map(m => m.getName).toSet
val missingMethods = javaMethods -- scalaMethods
for (javaMethod <- missingMethods) {
// check if the method simply follows different getter / setter conventions in Scala / Java
// for example Java: getFoo() should match Scala: foo()
if (!containsScalaGetterLike(javaMethod, scalaMethods)) {
fail(s"Method $javaMethod from $javaClass is missing from $scalaClassName.")
}
}
}
protected def containsScalaGetterLike(javaMethod: String, scalaMethods: Set[String]): Boolean = {
if (javaMethod.startsWith("get") && javaMethod.length >= 4) {
val scalaMethodName = Character.toLowerCase(javaMethod.charAt(3)) + javaMethod.substring(4)
scalaMethods.contains(scalaMethodName)
} else {
false
}
}
/**
* Tests to be performed to ensure API completeness.
*/
@Test
protected def testCompleteness(): Unit
}
| tzulitai/flink | flink-tests/src/test/scala/org/apache/flink/api/scala/completeness/ScalaAPICompletenessTestBase.scala | Scala | apache-2.0 | 2,943 |
/*
* Copyright (c) 2015 Alpine Data Labs
* All rights reserved.
*/
package com.alpine.json
import java.lang.reflect.Type
import com.google.gson._
/**
* Gson adapter to be used to serialize and serialize [[TypeWrapper]] to and from JSON
* with Gson.
*
* To be used when the developer wishes to to include a interface type in the model object,
* or one of its fields.
*
* e.g. to store a List[Bird], or just val b: Bird, where
* trait Bird
* class Robin() extends Bird
* class Eagle() extends Bird
* the Bird type should be wrapped in TypeWrapper:
* case class Aviary(specialBird: TypeWrapper[Bird], commonBirds: List[TypeWrapper[Bird]])
*/
class GsonTypeAdapter(typeHints: TypeHints = EmptyTypeHints()) extends JsonSerializer[TypeWrapper[_]] with JsonDeserializer[TypeWrapper[_]] {
@throws(classOf[JsonParseException])
def deserialize(jsonElement: JsonElement, t: Type, jdc: JsonDeserializationContext): TypeWrapper[_] = {
val jsonObj: JsonObject = jsonElement.getAsJsonObject
val valueClass: JsonElement = jsonObj.get(JsonUtil.typeKey)
val className: String = valueClass.getAsString
val value: JsonElement = jsonObj.get(JsonUtil.dataKey)
try {
val clz = typeHints.getTypeForKey(className)
val newValue: Any = jdc.deserialize(value, clz)
new TypeWrapper(newValue)
} catch {
case e: ClassNotFoundException => throw new JsonParseException(e)
}
}
def serialize(obj: TypeWrapper[_], t: Type, jdc: JsonSerializationContext): JsonElement = {
val resultObj: JsonObject = new JsonObject
val valueType: Class[_] = obj.value.getClass
val jsonEle2: JsonElement = jdc.serialize(typeHints.getKeyForType(valueType))
resultObj.add(JsonUtil.typeKey, jsonEle2)
val jsonEle: JsonElement = jdc.serialize(obj.value, valueType)
resultObj.add(JsonUtil.dataKey, jsonEle)
resultObj
}
}
/**
* To be used as a wrapper for interfaces.
* It includes the actual class name in the serialization in order
* to instantiate the new object on deserialization.
* @param value Actual value to serialize.
* @tparam T Type of value.
*/
case class TypeWrapper[T](value: T) {
val valueClass: Class[_ <: T] = value.getClass
}
trait TypeHints {
def getKeyForType(t: Class[_]): String
def getTypeForKey(x: String): Type
}
case class EmptyTypeHints() extends TypeHints {
override def getKeyForType(t: Class[_]): String = t.getCanonicalName
override def getTypeForKey(x: String): Type = Class.forName(x)
}
/**
* For known types, we store the type in JSON by a simple String key, which we map to and form the class.
* For types not in the type map, we use the class path as the key, and use Class.forName(name) to get the class.
*/
case class KnownTypes(typeToKeyMap: Map[Class[_], String]) extends TypeHints {
lazy val keyToTypeMap = typeToKeyMap.map(_.swap)
def getKeyForType(t: Class[_]): String = {
typeToKeyMap.get(t) match {
case Some(x: String) => x
case None => t.getCanonicalName
}
}
def getTypeForKey(x: String): Type = {
keyToTypeMap.get(x) match {
case Some(t: Type) => t
case None => Class.forName(x)
}
}
}
| holdenk/PluginSDK | alpine-model-api/src/main/scala/com/alpine/json/GsonTypeAdapter.scala | Scala | apache-2.0 | 3,162 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dllib.keras.objectives
import com.intel.analytics.bigdl.dllib.nn.{LogSoftMax, SoftMax}
import com.intel.analytics.bigdl.dllib.tensor.Tensor
import com.intel.analytics.bigdl.dllib.keras.layers.{KerasRunner, Loss}
import com.intel.analytics.bigdl.dllib.tensor.TensorNumericMath.TensorNumeric.NumericFloat
import com.intel.analytics.bigdl.dllib.keras.layers.KerasBaseSpec
import scala.math.abs
class SparseCategoricalCrossEntropySpec extends KerasBaseSpec {
"SparseCategoricalCrossEntropy" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[3, ])
|target_tensor = Input(batch_shape=[3, ])
|loss = sparse_categorical_crossentropy(target_tensor, input_tensor)
|input = input = np.array([[0.6, 0.3, 0.1], [0.2, 0.5, 0.3], [0.1, 0.1, 0.8]])
|Y = np.array([0.0, 1.0, 2.0])
""".stripMargin
val loss = SparseCategoricalCrossEntropy[Float](logProbAsInput = false)
val (gradInput, gradWeight, weights, input, target, output) =
KerasRunner.run(kerasCode, Loss)
val boutput = loss.forward(input, target)
val koutput = output.mean()
NumericFloat.nearlyEqual(boutput, koutput, 1e-5) should be (true)
}
"SparseCategoricalCrossEntropy" should "generate correct output and grad" in {
val criterion = SparseCategoricalCrossEntropy[Double](logProbAsInput = true)
val input = Tensor[Double](3, 3)
input(Array(1, 1)) = -1.0262627674932
input(Array(1, 2)) = -1.2412600935171
input(Array(1, 3)) = -1.0423174168648
input(Array(2, 1)) = -0.90330565804228
input(Array(2, 2)) = -1.3686840144413
input(Array(2, 3)) = -1.0778380454479
input(Array(3, 1)) = -0.99131220658219
input(Array(3, 2)) = -1.0559142847536
input(Array(3, 3)) = -1.2692712660404
val target = Tensor[Double](3)
target(Array(1)) = 0
target(Array(2)) = 1
target(Array(3)) = 2
val expectedOutput = 1.2214060159916
val expectedGrad = Tensor[Double](3, 3)
expectedGrad(Array(1, 1)) = -0.33333333333333
expectedGrad(Array(1, 2)) = 0
expectedGrad(Array(1, 3)) = 0
expectedGrad(Array(2, 1)) = 0
expectedGrad(Array(2, 2)) = -0.33333333333333
expectedGrad(Array(2, 3)) = 0
expectedGrad(Array(3, 1)) = 0
expectedGrad(Array(3, 2)) = 0
expectedGrad(Array(3, 3)) = -0.33333333333333
val output = criterion.forward(input, target)
val gradInput = criterion.backward(input, target)
assert(abs(expectedOutput - output) < 1e-6)
expectedGrad.map(gradInput, (v1, v2) => {
assert(abs(v1 - v2) < 1e-6)
v1
})
}
"SparseCategoricalCrossEntropy with weight" should "generate correct output and grad" in {
val weight = Tensor[Double](3)
weight(Array(1)) = 0.539598016534
weight(Array(2)) = 0.20644677849486
weight(Array(3)) = 0.67927200254053
val criterion = SparseCategoricalCrossEntropy[Double](
weights = weight, logProbAsInput = true)
val input = Tensor[Double](3, 3)
input(Array(1, 1)) = -1.2412808758149
input(Array(1, 2)) = -1.4300331461186
input(Array(1, 3)) = -0.75144359487463
input(Array(2, 1)) = -1.2200775853117
input(Array(2, 2)) = -1.1747087276299
input(Array(2, 3)) = -0.92663456371434
input(Array(3, 1)) = -1.1718541533533
input(Array(3, 2)) = -1.0983546295516
input(Array(3, 3)) = -1.0306113735619
val target = Tensor[Double](3)
target(Array(1)) = 0
target(Array(2)) = 1
target(Array(3)) = 2
val expectedOutput = 1.1312383221403
val expectedGrad = Tensor[Double](3, 3)
expectedGrad(Array(1, 1)) = -0.37858111084791
expectedGrad(Array(1, 2)) = 0
expectedGrad(Array(1, 3)) = 0
expectedGrad(Array(2, 1)) = 0
expectedGrad(Array(2, 2)) = -0.14484273169791
expectedGrad(Array(2, 3)) = 0
expectedGrad(Array(3, 1)) = 0
expectedGrad(Array(3, 2)) = 0
expectedGrad(Array(3, 3)) = -0.47657615745419
val output = criterion.forward(input, target)
val gradInput = criterion.backward(input, target)
assert(abs(expectedOutput - output) < 1e-6)
expectedGrad.map(gradInput, (v1, v2) => {
assert(abs(v1 - v2) < 1e-6)
v1
})
}
"SparseCategoricalCrossEntropy with sizeAverage false and 1-based label" should
"generate correct output and grad" in {
val criterion = SparseCategoricalCrossEntropy[Double](
zeroBasedLabel = false, sizeAverage = false, logProbAsInput = true)
val input = Tensor[Double](3, 3)
input(Array(1, 1)) = -1.10821131127
input(Array(1, 2)) = -0.92179085988591
input(Array(1, 3)) = -1.3017876357682
input(Array(2, 1)) = -0.72992115377362
input(Array(2, 2)) = -1.2817109257719
input(Array(2, 3)) = -1.4250730090114
input(Array(3, 1)) = -1.1074577039332
input(Array(3, 2)) = -1.0506933510994
input(Array(3, 3)) = -1.1397251596433
val target = Tensor[Double](3)
target(Array(1)) = 1
target(Array(2)) = 2
target(Array(3)) = 3
val expectedOutput = 3.5296473966852
val expectedGrad = Tensor[Double](3, 3)
expectedGrad(Array(1, 1)) = -1
expectedGrad(Array(1, 2)) = 0
expectedGrad(Array(1, 3)) = 0
expectedGrad(Array(2, 1)) = 0
expectedGrad(Array(2, 2)) = -1
expectedGrad(Array(2, 3)) = 0
expectedGrad(Array(3, 1)) = 0
expectedGrad(Array(3, 2)) = 0
expectedGrad(Array(3, 3)) = -1
val output = criterion.forward(input, target)
val gradInput = criterion.backward(input, target)
assert(abs(expectedOutput - output) < 1e-6)
expectedGrad.map(gradInput, (v1, v2) => {
assert(abs(v1 - v2) < 1e-6)
v1
})
}
"SparseCategoricalCrossEntropy with probabilities input" should
"generate correct output and grad" in {
val input = Tensor[Float](Array(4, 4)).rand()
val target = Tensor[Float](Array[Float](0, 1, 2, 3), Array(4))
val logSoftMax = LogSoftMax[Float]()
val softMax = SoftMax[Float]()
val logProb = logSoftMax.forward(input)
val prob = softMax.forward(input)
val referenceLayer = SparseCategoricalCrossEntropy[Float](logProbAsInput = true)
val testedLayer = SparseCategoricalCrossEntropy[Float]()
val expectedLoss = referenceLayer.forward(logProb, target)
val loss = testedLayer.forward(prob, target)
val expectedGradInput = logSoftMax.backward(input, referenceLayer.backward(logProb, target))
val gradInput = softMax.backward(input, testedLayer.backward(prob, target))
math.abs(expectedLoss - loss) < 1e-5 should be (true)
expectedGradInput.almostEqual(gradInput, 1e-5) should be (true)
}
}
| intel-analytics/BigDL | scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/keras/objectives/SparseCategoricalCrossEntropySpec.scala | Scala | apache-2.0 | 7,189 |
/** When this files is opened within the IDE, a typing error is reported. */
class A[B] extends TestIterable[B] {
//import collection.convert.ImplicitConversionsToScala._
implicit def `iterator asScala`[A](it: ju.Iterator[A]): Iterator[A] = ???
implicit def `enumeration AsScalaIterator`[A](i: ju.Enumeration[A]): Iterator[A] = ???
def iterator: other.TestIterator[Nothing] = ???
iterator./*!*/
}
object other {
trait TestIterator[T] {
def hasNext: Boolean
def next: T
}
}
| scala/scala | test/files/presentation/ide-bug-1000531/src/CrashOnLoad.scala | Scala | apache-2.0 | 498 |
package org.eso.ias.tranfer
import java.util.Properties
import com.typesafe.scalalogging.Logger
import org.eso.ias.asce.transfer.{IasIO, IasioInfo, ScalaTransferExecutor}
import org.eso.ias.logging.IASLogger
import org.eso.ias.types.{Alarm, IASTypes, OperationalMode}
import scala.util.matching.Regex
/**
* The AntPadInhibitor transfer functions gets 2 inputs:
* - the association of antennas to pads (string)
* - an ALARM
* It inhibits the alarm if there are no antennas in the pads
* whose names match with the given pattern.
* It is also possible to add a filter by antenna type by setting
* antTypePropName to one of the possible antenna types: the
* alarm in output is set if the alarm in input is set AND
* there are antennas in the pads AND
* at least one antenna of the passed type.
*
* No check is done on the ID of the alarm in input.
* The TF produces an alarm that is always CLEAR if there
* are no antennas in the pads or the alarm in input is CLEAR.
* If the input is SET and there are antennas in the PAD, the output
* is set with the same priority of the alarm in input.
*
* The association of antennas to pad is a monitor point that contains
* all the antennas and the names of the relative pads where they seat.
* The matching is done by a regular expression provided as a java property.
*
* This is an example of the value (string) of the Array-AntennasToPads,
* the monitor points that contains the pads of all the antennas:
* DV01:A045,DV02:A025,...
*/
class AntPadInhibitor(asceId: String, asceRunningId: String, validityTimeFrame:Long, props: Properties)
extends ScalaTransferExecutor[Alarm](asceId,asceRunningId,validityTimeFrame,props) {
/**
* Names of pads must match with this regular expression
*/
val antPadRegExp: Regex = {
val propVal = Option(props.getProperty(AntPadInhibitor.PadNameMatcherName))
require(propVal.isDefined,AntPadInhibitor.PadNameMatcherName+" property not defined")
new Regex(propVal.get)
}
AntPadInhibitor.logger.info("Pad names pattern: {}",antPadRegExp.toString())
/*
*The optional antenna type to add the filter by antenna type
*
* The type can only be one if the string in AntPadInhibitor.AntennaTypes
*/
val antType: Option[String] = {
val propVal = Option(props.getProperty(AntPadInhibitor.AntTypePropName))
propVal.map(antTypeStr => {
val antTypeUpperCase = antTypeStr.toUpperCase
require(AntPadInhibitor.AntennaTypes.contains(antTypeUpperCase),
"Unrecognized antenna type: "+antTypeUpperCase+" not in "+AntPadInhibitor.AntTypePropName.mkString(","))
AntPadInhibitor.logger.info("Added a filter by antenna type {}",antTypeUpperCase)
antTypeUpperCase
})
}
/**
* Initialize the TF by making some consistency checks
*
* @param inputsInfo The IDs and types of the inputs
* @param outputInfo The Id and type of thr output
**/
override def initialize(inputsInfo: Set[IasioInfo], outputInfo: IasioInfo): Unit = {
require(inputsInfo.size==2,"Expected inputs are the alarm and the anttenna to pad monitor points")
val antsPadsIasioInfo = inputsInfo.filter(_.iasioId==AntPadInhibitor.AntennasToPadsID)
require(antsPadsIasioInfo.nonEmpty,AntPadInhibitor.AntennasToPadsID+" is not in input")
require (antsPadsIasioInfo.head.iasioType==IASTypes.STRING,AntPadInhibitor.AntennasToPadsID+" is not a STRING")
val otherIasios = inputsInfo--antsPadsIasioInfo
require(otherIasios.head.iasioType==IASTypes.ALARM,"Expected ALARM input missing")
AntPadInhibitor.logger.info("Input is {}",otherIasios.head.iasioId)
require(outputInfo.iasioType==IASTypes.ALARM,"Wrong type of output (ALARM expected)")
}
/**
* @see TransferExecutor#shutdown()
*/
override def shutdown() {}
/**
* Build the comma separated list of names of affected antennas, i.e.
* the names of the antennas whose names match with
* the regular expression passed in the java properties
*
* @param antsPadsMP: the natennas to pads string received in input
* @return the comma separated list of names of affected antennas
*/
def affectedAntennas(antsPadsMP: String): String = {
// Association of antennas to pad: one entry for each antenna like DV02:A507
val antsPads = antsPadsMP.split(",")
// Select only the antennas that are in the proper pads
val antennasInPads= antsPads.filter(antPad => {
assert(antPad.isEmpty || antPad.count(_==':')==1,"Antenna/Pad mismatch: \\""+antPad+"\\" should be name:pad")
val couple = antPad.split(":")
antPad.nonEmpty &&
antPadRegExp.pattern.matcher(couple(1)).matches() &&
antType.forall(aType => couple(0).toUpperCase().startsWith(aType))
})
// Extracts only the names of the antennas
antennasInPads.map(ap => ap.split(":")(0)).mkString(",")
}
/**
* Produces the output of the component by evaluating the inputs.
*
* @return the computed output of the ASCE
*/
override def eval(compInputs: Map[String, IasIO[_]], actualOutput: IasIO[Alarm]): IasIO[Alarm] = {
val antPadMp= getValue(compInputs,AntPadInhibitor.AntennasToPadsID)
assert(antPadMp.isDefined,AntPadInhibitor.AntennasToPadsID+" inputs not defined!")
val antPadMPValue = antPadMp.get.value.get.asInstanceOf[String]
val antennasInPads = affectedAntennas(antPadMPValue)
val foundAntennaInPad = antennasInPads.nonEmpty
val alarmInput = compInputs.values.filter(_.iasType==IASTypes.ALARM).head
val alarmOut = if (foundAntennaInPad) {
actualOutput.updateValue(alarmInput.value.get)
} else {
// No antennas in the pads of the WS
actualOutput.updateValue(Alarm.CLEARED)
}
val mode = if (alarmInput.mode==OperationalMode.OPERATIONAL && antPadMp.get.mode==OperationalMode.OPERATIONAL) {
OperationalMode.OPERATIONAL
} else if (antPadMp.get.mode==OperationalMode.OPERATIONAL) {
alarmInput.mode
} else {
OperationalMode.UNKNOWN
}
val outputWiithUpdatedMode=alarmOut.updateMode(mode)
if (foundAntennaInPad && alarmOut.value.get.isSet)
outputWiithUpdatedMode.updateProps(alarmInput.props++Map(AntPadInhibitor.AffectedAntennaAlarmPropName -> antennasInPads))
else
outputWiithUpdatedMode.updateProps(alarmInput.props)
}
}
object AntPadInhibitor {
/** The logger */
val logger: Logger = IASLogger.getLogger(ThresholdWithBackupsAndDelay.getClass)
/** The name of the property to pass the regular expression */
val PadNameMatcherName: String = "org.eso.ias.antpadinhibitor.padnameregexp"
/** The name of the property to pass the regular expression */
val AntTypePropName: String = "org.eso.ias.antpadinhibitor.anttype"
/** The ID of the monitor point with the position (pad) of the antennas */
val AntennasToPadsID="Array-AntennasToPads"
/** The possible antenna type to be set in the AntTypePropName property */
val AntennaTypes = List("DV","DA","CM","PM")
/** The property set in the alarm in output ith the list of affected antennas */
val AffectedAntennaAlarmPropName = "affectedAntennas"
}
| IntegratedAlarmSystem-Group/ias | TransferFunctions/src/main/scala/org/eso/ias/tranfer/AntPadInhibitor.scala | Scala | lgpl-3.0 | 7,169 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.features.serialization
import java.util.{UUID, Collections => jCollections, List => jList, Map => jMap}
import org.locationtech.jts.geom._
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.AttributeConfigs.{UserDataListType, UserDataMapKeyType, UserDataMapValueType}
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes.AttributeOptions._
import org.opengis.feature.`type`.AttributeDescriptor
object ObjectType extends Enumeration {
type ObjectType = Value
val STRING, INT, LONG, FLOAT, DOUBLE, BOOLEAN, DATE, UUID, GEOMETRY, LIST, MAP, BYTES = Value
// geometry sub-types
val POINT, LINESTRING, POLYGON, MULTIPOINT, MULTILINESTRING, MULTIPOLYGON, GEOMETRY_COLLECTION = Value
// string sub-types
val JSON = Value
/**
* @see selectType(clazz: Class[_], metadata: java.util.Map[_, _])
*
* @param descriptor attribute descriptor
* @return
*/
def selectType(descriptor: AttributeDescriptor): Seq[ObjectType] =
selectType(descriptor.getType.getBinding, descriptor.getUserData)
/**
* Turns a SimpleFeatureType attribute class binding into an enumeration.
*
* The first element in the result will be the primary binding. For geometries, lists and maps,
* the result will also contain secondary types.
*
* Lists will contain the type of the list elements.
* Maps will contain the type of the map keys, then the type of the map values.
* Geometries will contain the specific geometry type.
*
* Note: geometries will always return GEOMETRY as the primary type to allow for generic matching.
*
* @param clazz class, must be valid for a SimpleFeatureType attribute
* @param metadata attribute metadata (user data)
* @return binding
*/
def selectType(clazz: Class[_], metadata: jMap[_, _] = jCollections.emptyMap()): Seq[ObjectType] = {
clazz match {
case c if classOf[java.lang.String].isAssignableFrom(c) =>
if (metadata.get(OptJson) == "true") { Seq(STRING, JSON) } else { Seq(STRING) }
case c if classOf[java.lang.Integer].isAssignableFrom(c) => Seq(INT)
case c if classOf[java.lang.Long].isAssignableFrom(c) => Seq(LONG)
case c if classOf[java.lang.Float].isAssignableFrom(c) => Seq(FLOAT)
case c if classOf[java.lang.Double].isAssignableFrom(c) => Seq(DOUBLE)
case c if classOf[java.lang.Boolean].isAssignableFrom(c) => Seq(BOOLEAN)
case c if classOf[java.util.Date].isAssignableFrom(c) => Seq(DATE)
case c if classOf[UUID].isAssignableFrom(c) => Seq(UUID)
case c if classOf[Geometry].isAssignableFrom(c) => geometryType(c.asInstanceOf[Class[_ <: Geometry]])
case c if classOf[Array[Byte]].isAssignableFrom(c) => Seq(BYTES)
case c if classOf[jList[_]].isAssignableFrom(c) => listType(metadata)
case c if classOf[jMap[_, _]].isAssignableFrom(c) => mapType(metadata)
case _ => throw new IllegalArgumentException(s"Class $clazz can't be serialized")
}
}
private def geometryType(clazz: Class[_ <: Geometry]): Seq[ObjectType] = {
val subtype = clazz match {
case c if c == classOf[Point] => POINT
case c if c == classOf[LineString] => LINESTRING
case c if c == classOf[Polygon] => POLYGON
case c if c == classOf[MultiLineString] => MULTILINESTRING
case c if c == classOf[MultiPolygon] => MULTIPOLYGON
case c if c == classOf[MultiPoint] => MULTIPOINT
case c if c == classOf[GeometryCollection] => GEOMETRY_COLLECTION
case _ => GEOMETRY
}
Seq(GEOMETRY, subtype)
}
private def listType(metadata: jMap[_, _]): Seq[ObjectType] = {
val clazz = Class.forName(metadata.get(UserDataListType).asInstanceOf[String])
selectType(clazz) match {
case Seq(binding) => Seq(LIST, binding)
case _ => throw new IllegalArgumentException(s"Can't serialize list sub-type of ${clazz.getName}")
}
}
private def mapType(metadata: jMap[_, _]): Seq[ObjectType] = {
val keyClass = Class.forName(metadata.get(UserDataMapKeyType).asInstanceOf[String])
val keyType = selectType(keyClass) match {
case Seq(binding) => binding
case _ => throw new IllegalArgumentException(s"Can't serialize map key type of ${keyClass.getName}")
}
val valueClass = Class.forName(metadata.get(UserDataMapValueType).asInstanceOf[String])
val valueType = selectType(valueClass) match {
case Seq(binding) => binding
case _ => throw new IllegalArgumentException(s"Can't serialize map value type of ${valueClass.getName}")
}
Seq(MAP, keyType, valueType)
}
}
| elahrvivaz/geomesa | geomesa-features/geomesa-feature-common/src/main/scala/org/locationtech/geomesa/features/serialization/ObjectType.scala | Scala | apache-2.0 | 5,164 |
package im.actor.server.http
import im.actor.api.rpc.ClientData
import im.actor.api.rpc.counters.UpdateCountersChanged
import im.actor.api.rpc.messaging.{ TextMessage, UpdateMessage }
import im.actor.api.rpc.misc.ResponseSeq
import im.actor.server._
import im.actor.server.api.http.json.Text
import im.actor.server.api.http.webhooks.WebhooksHandler
import im.actor.server.api.rpc.service.GroupsServiceHelpers
import im.actor.server.api.rpc.service.groups.{ GroupInviteConfig, GroupsServiceImpl }
import im.actor.server.group.GroupOffice
import im.actor.server.presences.{ GroupPresenceManager, PresenceManager }
class WebhookHandlerSpec
extends BaseAppSuite
with GroupsServiceHelpers
with MessageParsing
with ImplicitGroupRegions
with ImplicitSequenceService
with ImplicitSessionRegionProxy
with ImplicitAuthService
with SequenceMatchers {
behavior of "WebhookHandler"
it should "create group bot on group creation" in t.createGroupAndBot()
it should "allow bot to send message to it's group" in t.sendInGroup()
implicit val presenceManagerRegion = PresenceManager.startRegion()
implicit val groupPresenceManagerRegion = GroupPresenceManager.startRegion()
val groupInviteConfig = GroupInviteConfig("http://actor.im")
implicit val groupsService = new GroupsServiceImpl(groupInviteConfig)
object t {
val (user1, authId1, _) = createUser()
val (user2, authId2, _) = createUser()
val sessionId = createSessionId()
implicit val clientData = ClientData(authId1, sessionId, Some(user1.id))
def createGroupAndBot() = {
val groupOutPeer = createGroup("Bot test group", Set(user2.id)).groupPeer
whenReady(db.run(persist.GroupBot.findByGroup(groupOutPeer.groupId))) { optBot ⇒
optBot shouldBe defined
val bot = optBot.get
bot.groupId shouldEqual groupOutPeer.groupId
}
}
def sendInGroup() = {
val handler = new WebhooksHandler()
val groupResponse = createGroup("Bot test group", Set(user2.id))
val groupOutPeer = groupResponse.groupPeer
val initSeq = groupResponse.seq
val initState = groupResponse.state
Thread.sleep(1000)
val token = whenReady(GroupOffice.getIntegrationToken(groupOutPeer.groupId, user1.id)) { optToken ⇒
optToken shouldBe defined
optToken.get
}
val firstMessage = Text("Alert! All tests are failed!")
whenReady(handler.send(firstMessage, token)) { _ ⇒
expectUpdatesUnordered(failUnmatched)(initSeq, initState, Set(UpdateMessage.header, UpdateCountersChanged.header)) {
case (UpdateMessage.header, u) ⇒
val update = parseUpdate[UpdateMessage](u)
update.message shouldEqual TextMessage(firstMessage.text, Vector.empty, None)
case (UpdateCountersChanged.header, update) ⇒ parseUpdate[UpdateCountersChanged](update)
}
}
val (seq1, state1) = whenReady(sequenceService.handleGetState()) { resp ⇒
val ResponseSeq(seq, state) = resp.toOption.get
(seq, state)
}
val secondMessage = Text("It's ok now!")
whenReady(handler.send(secondMessage, token)) { _ ⇒
expectUpdatesUnordered(failUnmatched)(seq1, state1, Set(UpdateMessage.header, UpdateCountersChanged.header)) {
case (UpdateMessage.header, u) ⇒
val update = parseUpdate[UpdateMessage](u)
update.message shouldEqual TextMessage(secondMessage.text, Vector.empty, None)
case (UpdateCountersChanged.header, update) ⇒ parseUpdate[UpdateCountersChanged](update)
}
}
}
}
}
| luoxiaoshenghustedu/actor-platform | actor-server/actor-tests/src/test/scala/im/actor/server/http/WebhookHandlerSpec.scala | Scala | mit | 3,612 |
package de.zalando.swagger
import java.net.URI
import de.zalando.apifirst.Domain
import Domain._
import TypeMetaConverter._
import de.zalando.apifirst.naming._
import strictModel._
import scala.collection.mutable
import scala.language.{implicitConversions, postfixOps}
/**
* @author slasch
* @since 14.10.2015.
*/
class TypeConverter(base: URI, model: strictModel.SwaggerModel, keyPrefix: String) extends ParameterNaming with DiscriminatorMemoizer {
lazy val convert: NamedTypes =
fromDefinitions(model.definitions) ++
fromPaths(model.paths) ++
fromParameters(model.parameters)
private type TypeConstructor = TypeMeta => Type
private type TypeConstructors = Seq[TypeConstructor]
private def fromParameters(parameters: ParameterDefinitions): NamedTypes =
Option(parameters).toSeq.flatten flatMap { p =>
fromParamListItem(base / "parameters" / p._1, p._2)
}
private def fromDefinitions(definitions: Definitions): NamedTypes =
Option(definitions).toSeq.flatten flatMap { d =>
fromSchema(base / "definitions" / d._1, d._2, None)
}
private def fromPaths(paths: Paths): NamedTypes =
fromPathParameters(paths) ++ fromResponses(paths).flatten ++ fromOperationParameters(paths).toSeq.flatten
private def fromPathParameters(paths: Paths): NamedTypes =
allPathItems(paths) flatMap fromNamedParamListItem
private def forAllOperations[T](paths: Paths, logic: (Reference, Operation) => T) = for {
(prefix, path) <- Option(paths).toSeq.flatten
operationName <- path.operationNames
operation = path.operation(operationName)
name = base / "paths" / prefix / operationName
} yield logic(name, operation)
private def fromOperationParameters(paths: Paths): Iterable[NamedTypes] =
forAllOperations(paths, parametersCollector)
private def parametersCollector(name: Reference, operation: Operation): NamedTypes =
Option(operation.parameters).toSeq.flatten flatMap {
fromParamListItem(name, _)
}
private def allPathItems(paths: Paths): Seq[(Reference, ParametersListItem)] = for {
(url, pathItem) <- Option(paths).toSeq.flatten
parameterList <- Option(pathItem.parameters).toSeq
paramListItem <- parameterList
name = base / "paths" / url / ""
} yield name -> paramListItem
private def responseCollector: (Reference, Operation) => (Reference, Responses) = (name, op) => name -> op.responses
private def fromResponses(paths: Paths): Seq[NamedTypes] = for {
(prefix, responses) <- forAllOperations(paths, responseCollector)
(suffix, response) <- responses
fullName = prefix / Reference.responses / suffix
} yield fromSchemaOrFileSchema(fullName, response.schema, Some(Nil))
private def fromNamedParamListItem[T](pair: (Reference, ParametersListItem)): NamedTypes =
fromParamListItem(pair._1, pair._2)
private def fromParamListItem[T](name: Reference, param: ParametersListItem): NamedTypes = param match {
case r : JsonReference => Seq(fromReference(name, r, None))
case nb: NonBodyParameterCommons[_, _] => Seq(fromNonBodyParameter(name, nb))
case bp: BodyParameter[_] => fromBodyParameter(name, bp)
case nbp: NonBodyParameter[_] =>
throw new IllegalStateException("Something went wrong, this case should not be reachable")
}
private def fromBodyParameter[T](name: Reference, param: BodyParameter[T]): NamedTypes =
fromSchemaOrFileSchema(name / param.name, param.schema,
if (param.required) Some(Seq(param.name)) else Some(Nil))
private def fromSchemaOrReference[T](name: Reference, param: SchemaOrReference[T], required: Option[Seq[String]]): NamedTypes =
Option(param).toSeq flatMap {
case Left(s) => fromSchema(name, s, required)
case Right(r: JsonReference) => Seq(fromReference(name, r, required))
}
private def fromSchemaOrFileSchema[T](name: Reference, param: SchemaOrFileSchema[T], required: Option[Seq[String]]): NamedTypes =
param match {
case any if any == null => Seq(fromNull(name))
case Left(s: SchemaOrReference[_]) => fromSchemaOrReference(name, s, required)
case Right(fs: FileSchema[_]) => Seq(fromFileSchema(fs, required))
}
private def fromSchemaOrSchemaArray[T](name: Reference, param: SchemaOrSchemaArray[T], required: Option[Seq[String]]): NamedTypes =
param match {
case Right(sa) => fromSchemaArray(name, sa, required)
case Left(sr) => fromSchemaOrReference(name, sr, required)
}
private def fromSchemaArray(name: Reference, sa: SchemaArray, required: Option[Seq[String]]): NamedTypes =
sa flatMap { s => fromSchemaOrFileSchema(name, s, required) }
private def fromSchema[T](name: Reference, param: Schema[_], required: Option[Seq[String]]): NamedTypes = {
val tpe = if (param.`type` != null) param.`type` else PrimitiveType.OBJECT
tpe match {
case t: ArrayJsonSchemaType => Seq(fromArrayJsonSchema(name, param, t))
case p: PrimitiveType.Val => fromPrimitiveType(name, param, p, required)
}
}
private def fromPrimitiveType(name: Reference, param: Schema[_], p: PrimitiveType.Val, required: Option[Seq[String]]): NamedTypes = {
p match {
case PrimitiveType.ARRAY =>
require(param.items.nonEmpty, s"Items should not be empty for $name")
val types = fromSchemaOrSchemaArray(name, param.items.get, None)
val meta = arrayTypeMeta(param.comment.getOrElse(param.format), param)
checkRequired(name, required, wrapInArray(types.head, meta, None), param.default) +: types.tail
case PrimitiveType.OBJECT =>
val obj = param.allOf map { p =>
val everythingIsRequired = None
extensionType(name, everythingIsRequired)(p)
} getOrElse {
val typeName = typeNameFromInlinedReference(param) getOrElse name
val catchAll = fromSchemaOrBoolean(name / "additionalProperties", param.additionalProperties, param)
val normal = fromSchemaProperties(name, param.properties, paramRequired(param.required, param.default))
val types = fromTypes(name, normal ++ catchAll.toSeq.flatten, typeName)
Option(param.discriminator) foreach { d => memoizeDiscriminator(name, typeName / d) }
checkRequired(name, required, types, param.default)
}
Seq(obj)
case tpe if param.enum.isDefined =>
val meta = enumTypeMeta(param.enum.get.size)
val typeName = typeNameFromInlinedReference(param) getOrElse name
val primitiveType = (p, param.format)(param)
val leaves = param.enum.get map { value =>
EnumObject(primitiveType, value.toString, TypeMeta(Some(value.toString)))
}
val rootType = typeName -> EnumTrait(primitiveType, meta, leaves)
Seq(checkRequired(name, required, rootType, param.default))
case _ =>
val primitiveType = name -> (p, param.format)(param)
Seq(checkRequired(name, required, primitiveType, param.default))
}
}
private def typeNameFromInlinedReference(param: Schema[_]): Option[Reference] =
param.vendorExtensions.get("x-$ref").map(Reference.deref).map(Reference(base.toString, _))
private def fromSchemaProperties[T](name: Reference, param: SchemaProperties, required: Option[Seq[String]]): NamedTypes =
Option(param).toSeq.flatten flatMap { p =>
fromSchemaOrFileSchema(name / p._1, p._2, required)
}
// FIXME the boolean value is basically ignored here
private def fromSchemaOrBoolean[T](name: Reference, param: SchemaOrBoolean[T], meta: TypeMeta): Option[NamedTypes] =
Option(param) map {
case Left(s) =>
val typeDefs = fromSchemaOrReference(name, s, None)
val topMeta = s match {
case Left(schema) => schemaTypeMeta(schema)
case _ => meta
}
wrapInCatchAll(typeDefs.head, topMeta) +: typeDefs.tail
case Right(true) => Seq(wrapInCatchAll(name -> Str(None, meta), meta))
case Right(false) => Seq(wrapInCatchAll(name -> Str(None, meta), meta))
}
// ------------------------------------ Single Types ------------------------------------
private def fromNull(name: Reference): NamedType = name -> Null(TypeMeta(None))
private def extensionType[T](name: Reference, required: Option[Seq[String]])
(schema: SchemaArray): NamedType = {
val allOf = fromSchemaArray(name, schema, required).map(_._2)
val root = schema.collect {
case Left(Left(s: Schema[_])) if s.discriminator != null => typeNameFromInlinedReference(s).map(_ / s.discriminator)
}.flatten.headOption
val inheritedRoot = allOf.collect { case c: Composite => c.root }.flatten.headOption
name -> AllOf(name / name.simple, schema, allOf, root orElse inheritedRoot)
}
private def fromReference(name: Reference, ref: JsonReference, required: Option[Seq[String]]): NamedType = {
assert(ref != null && ref.$ref != null)
checkRequired(name, required, name -> TypeRef(base / Reference.deref(ref.$ref)), null: Default[String])
}
private def fromPrimitivesItems[T](name: Reference, items: PrimitivesItems[T]): NamedType = {
if (items.isArray) {
val meta = arrayTypeMeta(items.comment.getOrElse(items.format), items)
wrapInArray(fromPrimitivesItems(name, items.items), meta, Option(items.collectionFormat).map(_.toString))
} else {
name -> (items.`type`, items.format)(items)
}
}
private def fromArrayJsonSchema[T](name: Reference, param: Schema[_], t: ArrayJsonSchemaType): NamedType = {
val descendants = t.toSeq map { d =>
val typeDef = PrimitiveType.fromString(d)
fromPrimitiveType(typeDef, param.format)(param)
}
name -> OneOf(name, param, descendants)
}
private def fromNonBodyParameter[T, CF](name: Reference, param: NonBodyParameterCommons[T, CF]): NamedType = {
val fullName = name / param.name
val result =
if (param.isArray) {
val meta = arrayTypeMeta(param.comment.getOrElse(param.format), param.items)
wrapInArray(fromPrimitivesItems(fullName, param.items), meta, Option(param.collectionFormat).map(_.toString))
} else {
fullName -> (param.`type`, param.format)(param)
}
if (!param.required && param.default == null) wrapInOption(result) else result
}
private def fromTypes(name: Reference, types: NamedTypes, typeName: Reference): NamedType = {
val fields = types map { t => Field(typeName / t._1.simple, t._2) }
name -> Domain.TypeDef(typeName, fields, types)
}
private def fromFileSchema[T](schema: FileSchema[T], required: Option[Seq[String]]): NamedType = ???
// ------------------------------------ Primitives ------------------------------------
private implicit def fromParameterType(tpe: (ParameterType.Value, String)): TypeConstructor =
(tpe._1, Option(tpe._2).map(_.toLowerCase)) match {
case (ParameterType.INTEGER, Some("int64")) => Domain.Lng
case (ParameterType.INTEGER, Some("int32")) => Domain.Intgr
case (ParameterType.INTEGER, _) => Domain.BInt
case (ParameterType.NUMBER, Some("float")) => Domain.Flt
case (ParameterType.NUMBER, Some("double")) => Domain.Dbl
case (ParameterType.NUMBER, _) => Domain.BDcml
case (ParameterType.BOOLEAN, _) => Domain.Bool
case (ParameterType.STRING, Some("binary")) => Domain.BinaryString
case (ParameterType.STRING, Some("byte")) => Domain.Base64String
case (ParameterType.STRING, Some("date")) => Domain.Date
case (ParameterType.STRING, Some("date-time")) => Domain.DateTime
case (ParameterType.STRING, Some("password")) => Domain.Password
case (ParameterType.STRING, Some("uuid")) => Domain.UUID
case (ParameterType.STRING, fmt) => Domain.Str.curried(fmt)
case (ParameterType.FILE, _) => Domain.File
case (a, b) => throw new IllegalArgumentException(s"Combination if $a and $b is not supported")
}
private implicit def fromPrimitiveType(tpe: (PrimitiveType.Val, String)): TypeConstructor =
(tpe._1, Option(tpe._2).map(_.toLowerCase)) match {
case (PrimitiveType.INTEGER, Some("int64")) => Domain.Lng
case (PrimitiveType.INTEGER, Some("int32")) => Domain.Intgr
case (PrimitiveType.INTEGER, _) => Domain.BInt
case (PrimitiveType.NUMBER, Some("float")) => Domain.Flt
case (PrimitiveType.NUMBER, Some("double")) => Domain.Dbl
case (PrimitiveType.NUMBER, _) => Domain.BDcml
case (PrimitiveType.BOOLEAN, _) => Domain.Bool
case (PrimitiveType.STRING, Some("binary")) => Domain.BinaryString
case (PrimitiveType.STRING, Some("byte")) => Domain.Base64String
case (PrimitiveType.STRING, Some("date")) => Domain.Date
case (PrimitiveType.STRING, Some("date-time")) => Domain.DateTime
case (PrimitiveType.STRING, Some("password")) => Domain.Password
case (PrimitiveType.STRING, Some("uuid")) => Domain.UUID
case (PrimitiveType.STRING, fmt) => Domain.Str.curried(fmt)
case (PrimitiveType.NULL, _) => Domain.Null
case (a, b) => throw new IllegalArgumentException(s"Combination if $a and $b is not supported")
}
// ------------------------------------ Wrappers ------------------------------------
private def wrapInArray(t: NamedType, m: TypeMeta, collectionFormat: Option[String]): NamedType = {
val wrapper =
if (t._1.isResponsePath) Domain.ArrResult(t._2, m)
else Domain.Arr(t._2, m, collectionFormat.map(_.toString).getOrElse(CollectionFormat.default.toString))
t._1 -> wrapper
}
private def wrapInOption(t: NamedType): NamedType =
t._1 -> Domain.Opt(t._2, TypeMeta(None))
private def wrapInCatchAll(t: NamedType, m: TypeMeta): NamedType =
t._1 -> Domain.CatchAll(t._2, m)
// ------------------------------------ Helper methods ------------------------------------
private def paramRequired(required: Seq[String], default: Default[_]) =
Some(if (default != null || required == null) Nil else required)
private def checkRequired(name: Reference, required: Option[Seq[String]], tpe: NamedType, default: Default[_]): NamedType =
if (isRequired(name, required, default)) tpe else wrapInOption(tpe)
// Use required = None if everything is required
// Use required = Some(listOfFields) to specify what exactly is required
// Use required = Some(Nil) to define that everything is optional
// The return type is also required by definition
private def isRequired[T](name: Reference, required: Option[Seq[String]], default: Default[T]): Boolean =
default != null || required.isEmpty || required.get.contains(name.simple) || name.parent.isTopResponsePath
}
/*
* It's safe to use mutable map without locking in current setup
*/
trait DiscriminatorMemoizer {
val discriminators = new mutable.HashMap[Reference, Reference]()
def memoizeDiscriminator(name: Reference, discriminator: Reference) =
Option(discriminator) map {
discriminators += name -> _
}
def findDiscriminator(allOf: Seq[Type]): Option[Reference] =
allOf find { t =>
discriminators.contains(t.name)
} map { t => discriminators(t.name) }
} | zalando/play-swagger | swagger-parser/src/main/scala/de/zalando/swagger/typeConverter.scala | Scala | mit | 15,106 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.benchmark
import java.time.Instant
import org.apache.spark.benchmark.Benchmark
import org.apache.spark.sql.internal.SQLConf
/**
* Synthetic benchmark for the extract function.
* To run this benchmark:
* {{{
* 1. without sbt:
* bin/spark-submit --class <this class> --jars <spark core test jar> <sql core test jar>
* 2. build/sbt "sql/test:runMain <this class>"
* 3. generate result:
* SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this class>"
* Results will be written to "benchmarks/ExtractBenchmark-results.txt".
* }}}
*/
object ExtractBenchmark extends SqlBasedBenchmark {
private def doBenchmark(cardinality: Long, exprs: String*): Unit = {
val sinceSecond = Instant.parse("2010-01-01T00:00:00Z").getEpochSecond
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> "true") {
spark
.range(sinceSecond, sinceSecond + cardinality, 1, 1)
.selectExpr(exprs: _*)
.write
.format("noop")
.save()
}
}
private def run(
benchmark: Benchmark,
cardinality: Long,
name: String,
exprs: String*): Unit = {
benchmark.addCase(name, numIters = 3) { _ =>
doBenchmark(cardinality, exprs: _*)
}
}
private def castExpr(from: String): String = from match {
case "timestamp" => s"cast(id as timestamp)"
case "date" => s"cast(cast(id as timestamp) as date)"
case other => throw new IllegalArgumentException(
s"Unsupported column type $other. Valid column types are 'timestamp' and 'date'")
}
private def run(
benchmark: Benchmark,
func: String,
cardinality: Long,
field: String,
from: String): Unit = {
val expr = func match {
case "extract" => s"EXTRACT($field FROM ${castExpr(from)})"
case "date_part" => s"DATE_PART('$field', ${castExpr(from)})"
case other => throw new IllegalArgumentException(
s"Unsupported function '$other'. Valid functions are 'extract' and 'date_part'.")
}
benchmark.addCase(s"$field of $from", numIters = 3) { _ =>
doBenchmark(cardinality, expr)
}
}
override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
val N = 10000000L
val fields = Seq(
"MILLENNIUM", "CENTURY", "DECADE", "YEAR",
"ISOYEAR", "QUARTER", "MONTH", "WEEK",
"DAY", "DAYOFWEEK", "DOW", "ISODOW",
"DOY", "HOUR", "MINUTE", "SECOND",
"MILLISECONDS", "MICROSECONDS", "EPOCH")
Seq("extract", "date_part").foreach { func =>
Seq("timestamp", "date").foreach { dateType =>
val benchmark = new Benchmark(s"Invoke $func for $dateType", N, output = output)
run(benchmark, N, s"cast to $dateType", castExpr(dateType))
fields.foreach(run(benchmark, func, N, _, dateType))
benchmark.run()
}
}
}
}
| bdrillard/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/ExtractBenchmark.scala | Scala | apache-2.0 | 3,667 |
package com.twitter.finatra.http.internal.exceptions
import com.twitter.finagle.http.{Request, Response}
import com.twitter.finatra.http.exceptions.{DefaultExceptionMapper, ExceptionMapper}
import com.twitter.inject.Injector
import com.twitter.inject.TypeUtils.singleTypeParam
import java.lang.reflect.Type
import java.util.concurrent.ConcurrentHashMap
import javax.inject.{Inject, Singleton}
import net.codingwell.scalaguice.typeLiteral
import scala.annotation.tailrec
import scala.collection.JavaConversions.mapAsScalaConcurrentMap
/**
* A class to register ExceptionMappers and handle exceptions.
*
* Given some exception, an ExceptionManager will find an ExceptionMapper
* to handle that particular class of exceptions. If the mapper for that
* exception isn't registered, ExceptionManager will try its parent
* class, and so on, until it reaches the Throwable class. At that point
* the DefaultExceptionMapper will run which should be defined over all
* Throwables.
*
* @throws java.lang.IllegalStateException when an exception type is
* registered twice.
*
* Note: When searching for the parent exception mapper, it would be nice
* to traverse the entire class linearization so it works for
* traits/mixins too [1]. Unfortunately, implementing this would require
* a lot more reflection and it might not be threadsafe [2]. Doing it in
* Scala 2.11 might be easier and safer.
*
* [1] http://stackoverflow.com/questions/15623498/handy-ways-to-show-linearization-of-a-class
* [2] http://docs.scala-lang.org/overviews/reflection/thread-safety.html
*/
@Singleton
class ExceptionManager @Inject()(
injector: Injector,
defaultExceptionMapper: DefaultExceptionMapper) {
// TODO (AF-112): Investigate using com.twitter.util.Memoize.
private val mappers = mapAsScalaConcurrentMap(
new ConcurrentHashMap[Type, ExceptionMapper[_]]())
/* Public */
def add[T <: Throwable : Manifest](mapper: ExceptionMapper[T]) {
add(manifest[T].runtimeClass, mapper)
}
def add[T <: ExceptionMapper[_]: Manifest] {
val mapperType = typeLiteral[T].getSupertype(classOf[ExceptionMapper[_]]).getType
val throwableType = singleTypeParam(mapperType)
add(throwableType, injector.instance[T])
}
def toResponse(request: Request, throwable: Throwable): Response = {
val mapper = cachedGetMapper(throwable.getClass)
mapper.asInstanceOf[ExceptionMapper[Throwable]].toResponse(request, throwable)
}
/* Private */
private def add(throwableType: Type, mapper: ExceptionMapper[_]) {
if (mappers.contains(throwableType)) {
throw new IllegalStateException(s"ExceptionMapper for $throwableType already registered")
} else {
mappers(throwableType) = mapper // mutation
}
}
// Assumes mappers are never explicitly registered after configuration
// phase, otherwise we'd need to invalidate the cache.
private def cachedGetMapper(cls: Class[_]): ExceptionMapper[_] = {
mappers.getOrElseUpdate(cls, getMapper(cls))
}
// Get mapper for this throwable class if it exists, otherwise
// search for parent throwable class. If we reach the Throwable
// class then return the default mapper.
//
// Note: we avoid getOrElse so we have tail recursion
@tailrec
private def getMapper(cls: Class[_]): ExceptionMapper[_] = {
if (cls == classOf[Throwable]) {
defaultExceptionMapper
} else {
mappers.get(cls) match {
case Some(mapper) => mapper
case None => getMapper(cls.getSuperclass)
}
}
}
}
| nkhuyu/finatra | http/src/main/scala/com/twitter/finatra/http/internal/exceptions/ExceptionManager.scala | Scala | apache-2.0 | 3,519 |
package com.reactific.jfxtend.scene.shape
import javafx.scene.shape.Sphere
/** Unit Tests For Anchor */
class Anchor extends Sphere {
}
| reactific/jfxtensions | src/main/scala/com/reactific/jfxtend/scene/shape/Anchor.scala | Scala | apache-2.0 | 140 |
package inloopio.indicator.function
import inloopio.math.StatsFunctions
import inloopio.math.timeseries.Null
import inloopio.math.timeseries.TBaseSer
import inloopio.math.timeseries.TVar
import inloopio.math.indicator.Factor
/**
*
* @author Caoyuan Deng
*/
class SUMFunction(_baseSer: TBaseSer, var baseVar: TVar[Double], var period: Factor) extends Function(_baseSer) {
final protected def isum(idx: Int, baseVar: TVar[Double], period: Double, prev: Double): Double = {
StatsFunctions.isum(idx, baseVar.values, period.toInt, prev)
}
val _sum = TVar[Double]()
override def set(args: Any*): Unit = {
baseVar = args(0).asInstanceOf[TVar[Double]]
period = args(1).asInstanceOf[Factor]
}
protected def computeSpot(i: Int): Unit = {
if (i < period.value - 1) {
_sum(i) = Null.Double
} else {
_sum(i) = isum(i, baseVar, period.value, _sum(i - 1))
}
}
def sum(sessionId: Long, idx: Int): Double = {
computeTo(sessionId, idx)
_sum(idx)
}
}
| dcaoyuan/inloopio-libs | inloopio-indicator/src/main/scala/inloopio/indicator/function/SUMFunction.scala | Scala | bsd-3-clause | 1,011 |
/*
* Copyright 2010 LinkedIn
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import java.util.concurrent.CountDownLatch
import org.apache.log4j.Logger
import java.nio.channels.{ClosedChannelException, ClosedByInterruptException}
import kafka.common.{OffsetOutOfRangeException, ErrorMapping}
import kafka.cluster.{Partition, Broker}
import kafka.api.{MultiFetchResponse, OffsetRequest, FetchRequest}
import org.I0Itec.zkclient.ZkClient
import kafka.utils._
import java.io.IOException
class FetcherRunnable(val name: String,
val zkClient : ZkClient,
val config: ConsumerConfig,
val broker: Broker,
val partitionTopicInfos: List[PartitionTopicInfo])
extends Thread(name) {
private val logger = Logger.getLogger(getClass())
private val shutdownLatch = new CountDownLatch(1)
private val simpleConsumer = new SimpleConsumer(broker.host, broker.port, config.socketTimeoutMs,
config.socketBufferSize)
@volatile
private var stopped = false
def shutdown(): Unit = {
stopped = true
interrupt
logger.debug("awaiting shutdown on fetcher " + name)
shutdownLatch.await
logger.debug("shutdown of fetcher " + name + " thread complete")
}
override def run() {
for (info <- partitionTopicInfos)
logger.info(name + " start fetching topic: " + info.topic + " part: " + info.partition.partId + " offset: "
+ info.getFetchOffset + " from " + broker.host + ":" + broker.port)
try {
while (!stopped) {
val fetches = partitionTopicInfos.map(info =>
new FetchRequest(info.topic, info.partition.partId, info.getFetchOffset, config.fetchSize))
if (logger.isTraceEnabled)
logger.trace("fetch request: " + fetches.toString)
val response = simpleConsumer.multifetch(fetches : _*)
var read = 0
for((messages, info) <- response.zip(partitionTopicInfos)) {
try {
var done = false
if(messages.errorCOde == ErrorMapping.OFFSET_OUT_OF_RANGE_CODE) {
logger.info("offset " + info.getFetchOffset + " out of range")
// see if we can fix this error
val resetOffset = resetConsumerOffsets(info.topic, info.partition)
if(resetOffset >= 0) {
info.resetFetchOffset(resetOffset)
info.resetConsumeOffset(resetOffset)
done = true
}
}
if (!done)
read += info.enqueue(messages, info.getFetchOffset)
}
catch {
case e1: IOException =>
// something is wrong with the socket, re-throw the exception to stop the fetcher
throw e1
case e2 =>
if (!stopped) {
// this is likely a repeatable error, log it and trigger an exception in the consumer
logger.error("error in FetcherRunnable for " + info, e2)
info.enqueueError(e2, info.getFetchOffset)
}
// re-throw the exception to stop the fetcher
throw e2
}
}
if (logger.isTraceEnabled)
logger.trace("fetched bytes: " + read)
if(read == 0) {
logger.debug("backing off " + config.backoffIncrementMs + " ms")
Thread.sleep(config.backoffIncrementMs)
}
}
}
catch {
case e =>
if (stopped)
logger.info("FecherRunnable " + this + " interrupted")
else
logger.error("error in FetcherRunnable ", e)
}
logger.info("stopping fetcher " + name + " to host " + broker.host)
Utils.swallow(logger.info, simpleConsumer.close)
shutdownComplete()
}
/**
* Record that the thread shutdown is complete
*/
private def shutdownComplete() = shutdownLatch.countDown
private def resetConsumerOffsets(topic : String,
partition: Partition) : Long = {
var offset : Long = 0
config.autoOffsetReset match {
case OffsetRequest.SMALLEST_TIME_STRING => offset = OffsetRequest.EARLIEST_TIME
case OffsetRequest.LARGEST_TIME_STRING => offset = OffsetRequest.LATEST_TIME
case _ => return -1
}
// get mentioned offset from the broker
val offsets = simpleConsumer.getOffsetsBefore(topic, partition.partId, offset, 1)
val topicDirs = new ZKGroupTopicDirs(config.groupId, topic)
// reset manually in zookeeper
logger.info("updating partition " + partition.name + " with " + (if(offset == OffsetRequest.EARLIEST_TIME) "earliest " else " latest ") + "offset " + offsets(0))
ZkUtils.updatePersistentPath(zkClient, topicDirs.consumerOffsetDir + "/" + partition.name, offsets(0).toString)
offsets(0)
}
}
| quipo/kafka | core/src/main/scala/kafka/consumer/FetcherRunnable.scala | Scala | apache-2.0 | 5,341 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.stream.sql
import org.apache.flink.api.scala.createTypeInformation
import org.apache.flink.table.api.bridge.scala.tableConversions
import org.apache.flink.table.planner.factories.TestValuesTableFactory
import org.apache.flink.table.planner.runtime.utils.BatchTestBase.row
import org.apache.flink.table.planner.runtime.utils.{StreamingTestBase, TestData, TestingAppendSink}
import org.apache.flink.table.utils.LegacyRowResource
import org.apache.flink.types.Row
import org.junit.Assert.assertEquals
import org.junit.{Rule, Test}
import java.time.LocalDateTime
/**
* Tests for pushing filters into a table scan
*/
class FilterableSourceITCase extends StreamingTestBase {
@Rule
def usesLegacyRows: LegacyRowResource = LegacyRowResource.INSTANCE
@Test
def testFilterPushdown(): Unit = {
val data = Seq(
row(1, 2L, LocalDateTime.parse("2020-11-21T19:00:05.23")),
row(2, 3L, LocalDateTime.parse("2020-11-21T21:00:05.23"))
)
val dataId = TestValuesTableFactory.registerData(data)
val ddl =
s"""
| CREATE TABLE MyTable(
| a INT,
| b BIGINT,
| c TIMESTAMP(3),
| WATERMARK FOR c AS c
| ) WITH (
| 'connector' = 'values',
| 'enable-watermark-push-down' = 'true',
| 'filterable-fields' = 'a;c;d',
| 'bounded' = 'false',
| 'disable-lookup' = 'true',
| 'data-id' = '$dataId'
| )
|""".stripMargin
tEnv.executeSql(ddl)
val query = "SELECT * FROM MyTable WHERE a > 1"
val expectedData = Seq("2,3,2020-11-21T21:00:05.230")
val result = tEnv.sqlQuery(query).toAppendStream[Row]
val sink = new TestingAppendSink()
result.addSink(sink)
env.execute()
assertEquals(expectedData.sorted, sink.getAppendResults.sorted)
}
@Test
def testWithRejectedFilter(): Unit = {
val data = Seq(
row(1, 2L, LocalDateTime.parse("2020-11-21T19:00:05.23")),
row(2, 3L, LocalDateTime.parse("2020-11-21T21:00:05.23"))
)
val dataId = TestValuesTableFactory.registerData(data)
// Reject the filter by leaving out 'a' from 'filterable-fields'
val ddl =
s"""
| CREATE TABLE MyTable(
| a INT,
| b BIGINT,
| c TIMESTAMP(3),
| WATERMARK FOR c AS c
| ) WITH (
| 'connector' = 'values',
| 'enable-watermark-push-down' = 'true',
| 'filterable-fields' = 'c;d',
| 'bounded' = 'false',
| 'disable-lookup' = 'true',
| 'data-id' = '$dataId'
| )
|""".stripMargin
tEnv.executeSql(ddl)
val query = "SELECT * FROM MyTable WHERE a > 1"
val expectedData = Seq("2,3,2020-11-21T21:00:05.230")
val result = tEnv.sqlQuery(query).toAppendStream[Row]
val sink = new TestingAppendSink()
result.addSink(sink)
env.execute()
assertEquals(expectedData.sorted, sink.getAppendResults.sorted)
}
@Test
def testProjectWithWatermarkFilterPushdown(): Unit = {
val data = Seq(
row(1, 2L, "Hello", LocalDateTime.parse("2020-11-21T19:00:05.23")),
row(2, 3L, "World", LocalDateTime.parse("2020-11-21T21:00:05.23"))
)
val dataId = TestValuesTableFactory.registerData(data)
val ddl =
s"""
|CREATE TABLE TableWithWatermark (
| a int,
| b bigint,
| c string,
| d timestamp(3),
| WATERMARK FOR d as d
|) WITH (
| 'connector' = 'values',
| 'filterable-fields' = 'c',
| 'enable-watermark-push-down' = 'true',
| 'data-id' = '$dataId',
| 'bounded' = 'false',
| 'disable-lookup' = 'true'
|)
""".stripMargin
tEnv.executeSql(ddl)
val result =
tEnv.sqlQuery (
"select a,b from TableWithWatermark WHERE LOWER(c) = 'world'"
).toAppendStream[Row]
val sink = new TestingAppendSink
result.addSink(sink)
env.execute()
val expected = List("2,3")
assertEquals(expected.sorted, sink.getAppendResults.sorted)
}
}
| lincoln-lil/flink | flink-table/flink-table-planner/src/test/scala/org/apache/flink/table/planner/runtime/stream/sql/FilterableSourceITCase.scala | Scala | apache-2.0 | 4,958 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.image
import scala.util.Random
import org.apache.commons.io.FilenameUtils
import org.apache.hadoop.conf.{Configuration, Configured}
import org.apache.hadoop.fs.{Path, PathFilter}
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat
import org.apache.spark.sql.SparkSession
private object RecursiveFlag {
/**
* Sets the spark recursive flag and then restores it.
*
* @param value Value to set
* @param spark Existing spark session
* @param f The function to evaluate after setting the flag
* @return Returns the evaluation result T of the function
*/
def withRecursiveFlag[T](value: Boolean, spark: SparkSession)(f: => T): T = {
val flagName = FileInputFormat.INPUT_DIR_RECURSIVE
// scalastyle:off hadoopconfiguration
val hadoopConf = spark.sparkContext.hadoopConfiguration
// scalastyle:on hadoopconfiguration
val old = Option(hadoopConf.get(flagName))
hadoopConf.set(flagName, value.toString)
try f finally {
// avoid false positive of DLS_DEAD_LOCAL_STORE_IN_RETURN by SpotBugs
if (old.isDefined) {
hadoopConf.set(flagName, old.get)
} else {
hadoopConf.unset(flagName)
}
}
}
}
/**
* Filter that allows loading a fraction of HDFS files.
*/
private class SamplePathFilter extends Configured with PathFilter {
val random = new Random()
// Ratio of files to be read from disk
var sampleRatio: Double = 1
override def setConf(conf: Configuration): Unit = {
if (conf != null) {
sampleRatio = conf.getDouble(SamplePathFilter.ratioParam, 1)
val seed = conf.getLong(SamplePathFilter.seedParam, 0)
random.setSeed(seed)
}
}
override def accept(path: Path): Boolean = {
// Note: checking fileSystem.isDirectory is very slow here, so we use basic rules instead
!SamplePathFilter.isFile(path) || random.nextDouble() < sampleRatio
}
}
private object SamplePathFilter {
val ratioParam = "sampleRatio"
val seedParam = "seed"
def isFile(path: Path): Boolean = FilenameUtils.getExtension(path.toString) != ""
/**
* Sets the HDFS PathFilter flag and then restores it.
* Only applies the filter if sampleRatio is less than 1.
*
* @param sampleRatio Fraction of the files that the filter picks
* @param spark Existing Spark session
* @param seed Random number seed
* @param f The function to evaluate after setting the flag
* @return Returns the evaluation result T of the function
*/
def withPathFilter[T](
sampleRatio: Double,
spark: SparkSession,
seed: Long)(f: => T): T = {
val sampleImages = sampleRatio < 1
if (sampleImages) {
val flagName = FileInputFormat.PATHFILTER_CLASS
// scalastyle:off hadoopconfiguration
val hadoopConf = spark.sparkContext.hadoopConfiguration
// scalastyle:on hadoopconfiguration
val old = hadoopConf.getClass(flagName, null)
hadoopConf.setDouble(SamplePathFilter.ratioParam, sampleRatio)
hadoopConf.setLong(SamplePathFilter.seedParam, seed)
hadoopConf.setClass(flagName, classOf[SamplePathFilter], classOf[PathFilter])
try f finally {
hadoopConf.unset(SamplePathFilter.ratioParam)
hadoopConf.unset(SamplePathFilter.seedParam)
old match {
case null => hadoopConf.unset(flagName)
case v => hadoopConf.setClass(flagName, v, classOf[PathFilter])
}
}
} else {
f
}
}
}
| mahak/spark | mllib/src/main/scala/org/apache/spark/ml/image/HadoopUtils.scala | Scala | apache-2.0 | 4,265 |
/*
* Copyright (C) 2010 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.core.tools.service
import java.util.UUID
import org.apache.commons.math3.random.{ RandomGenerator, Well44497b, RandomAdaptor }
object Random { random ⇒
implicit def uuid2long(uuid: UUID) = uuid.getMostSignificantBits ^ uuid.getLeastSignificantBits
val default = newRNG(UUID.randomUUID)
def newRNG(seed: Long) = new SynchronizedRandom(new Well44497b(seed))
def newUnsychronizedRNG(seed: Long) = new RandomAdaptor(new Well44497b(seed))
class SynchronizedRandom(generator: RandomGenerator) extends java.util.Random {
override def nextBoolean = synchronized { generator.nextBoolean }
override def nextBytes(bytes: Array[Byte]) = synchronized { generator.nextBytes(bytes) }
override def nextDouble = synchronized { generator.nextDouble }
override def nextFloat = synchronized { generator.nextFloat }
override def nextGaussian = synchronized { generator.nextGaussian }
override def nextInt = synchronized { generator.nextInt }
override def nextInt(n: Int) = synchronized { generator.nextInt(n) }
override def nextLong = synchronized { generator.nextLong }
override def setSeed(seed: Long) = synchronized { generator.setSeed(seed) }
def toScala = new util.Random(this)
}
@transient lazy val longInterval = {
val min = BigDecimal(Long.MinValue)
val max = BigDecimal(Long.MaxValue) + 1
max - min
}
def shuffle[T](a: Array[T])(implicit rng: util.Random) = {
for (i ← 1 until a.size reverse) {
val j = rng.nextInt(i + 1)
val t = a(i)
a(i) = a(j)
a(j) = t
}
a
}
def shuffled[T](a: Iterable[T])(implicit rng: util.Random) = {
val indexed = a.toIndexedSeq
shuffle((0 until a.size).toArray).map(i ⇒ indexed(i))
}
implicit def randomDecorator(rng: util.Random) = new {
def shuffle[T](a: Array[T]) = random.shuffle(a)(rng)
def nextLong(max: Long): Long = {
val v = BigDecimal(rng.nextLong)
((v - Long.MinValue) * (BigDecimal(max) / longInterval)).toLong
}
}
implicit def iterableShuffleDecorator[T](a: Iterable[T]) = new {
def shuffled(implicit rng: util.Random): Seq[T] = random.shuffled(a)(rng)
}
}
| ISCPIF/PSEExperiments | openmole-src/openmole/core/org.openmole.core.tools/src/main/scala/org/openmole/core/tools/service/Random.scala | Scala | agpl-3.0 | 2,897 |
object SCL4150 {
class U[T]
class JK extends U[Int]
class B[T] extends JK
trait Z[T]
trait C[T] extends Z[Int]
class A[T, K] extends B[T] with C[T]
def foo[T[_], H](t: T[H]): T[H] = ???
/*start*/foo(new A[Int, String])/*end*/
}
/*
SCL4150.B[Int]
[Scala_2_13]SCL4150.A[Int, String]
*/ | JetBrains/intellij-scala | scala/scala-impl/testdata/typeInference/bugs5/SCL4150B.scala | Scala | apache-2.0 | 300 |
package models
case class UserAddModel( userid : String,
email : String,
role : String,
templates: String,
admin: String,
master: String)
| HiP-App/HiPBackend | app/models/UserAddModel.scala | Scala | apache-2.0 | 270 |
package breeze
import breeze.generic.{UFunc, VariableUFunc}
import breeze.linalg.{DenseMatrix, DenseVector}
import breeze.macros.cforRange
import spire.math.poly.PolyDense
import breeze.macros._
import spire.implicits.DoubleAlgebra
package object polynomial {
object densePolyval extends UFunc {
implicit object doubleImpl extends Impl2[PolyDenseUFuncWrapper, Double, Double] {
def apply(k: PolyDenseUFuncWrapper, v: Double) = k.p(v)
}
implicit object denseVectorImpl extends Impl2[PolyDenseUFuncWrapper, DenseVector[Double], DenseVector[Double]] {
/* This implementation uses Horner's Algorithm:
* http://en.wikipedia.org/wiki/Horner's_method
*
* Iterating over the polynomial coefficients first and the
* vector coefficients second is about 3x faster than
* the other way around.
*/
def apply(k: PolyDenseUFuncWrapper, v: DenseVector[Double]) = {
val coeffs: Array[Double] = k.p.coeffs
var i = coeffs.length - 1
val result = DenseVector.fill[Double](v.size, coeffs(i))
while (i > 0) {
i -= 1
val c = coeffs(i)
cforRange(0 until result.size)(j => {
result(j) = result(j) * v(j) + c
})
}
result
}
}
implicit object denseMatrixImpl extends Impl2[PolyDenseUFuncWrapper, DenseMatrix[Double], DenseMatrix[Double]] {
/* This implementation uses Horner's Algorithm:
* http://en.wikipedia.org/wiki/Horner's_method
*
* Iterating over the polynomial coefficients first and the
* vector coefficients second is about 3x faster than
* the other way around.
*/
def apply(k: PolyDenseUFuncWrapper, v: DenseMatrix[Double]) = {
if (v.rows != v.cols) {
throw new IllegalArgumentException("Can only apply polynomial to square matrix.")
}
val n = v.rows
val coeffs: Array[Double] = k.p.coeffs
var i = coeffs.length - 1
var result = DenseMatrix.eye[Double](n) * coeffs(i)
while (i > 0) {
i -= 1
result = result * v //WILDLY INEFFICIENT, FIGURE OUT IN PLACE MULTIPLY
val c = coeffs(i)
cforRange(0 until n)(i => {
result.update(i, i, result(i, i) + c)
})
}
result
}
}
}
implicit class PolyDenseUFuncWrapper(val p: PolyDense[Double])
extends VariableUFunc[densePolyval.type, PolyDenseUFuncWrapper]
}
| scalanlp/breeze | math/src/main/scala/breeze/polynomial/package.scala | Scala | apache-2.0 | 2,495 |
package de.kaufhof.jsonhomeclient
import org.scalatest.mockito.MockitoSugar
import org.scalatest.{BeforeAndAfterAll, FunSpec, Matchers}
abstract class UnitSpec extends FunSpec with Matchers with BeforeAndAfterAll with MockitoSugar | Galeria-Kaufhof/jsonhomeclient | src/test/scala/de/kaufhof/jsonhomeclient/UnitSpec.scala | Scala | apache-2.0 | 232 |
package com.rafkind.paintown.animator
import javax.swing._
import java.awt.event._
import java.awt.Color
import java.io.File
import javax.swing.event.ChangeListener
import javax.swing.event.ChangeEvent
import javax.swing.event.DocumentListener
import javax.swing.event.DocumentEvent
import com.rafkind.paintown.Undo
import com.rafkind.paintown.MaskedImage
import org.swixml.SwingEngine;
object Tools{
def makeBackgroundTool(character:AnimatedObject, area:DrawArea):JPanel = {
var panel = new JPanel()
val color = new JColorChooser(area.backgroundColor());
color.setPreviewPanel(new JPanel());
panel.add(color);
color.getSelectionModel().addChangeListener(new ChangeListener(){
val self = this
def stateChanged(change:ChangeEvent){
val old = character.getDrawProperties().getBackgroundColor()
character.getDrawProperties().setBackgroundColor(color.getSelectionModel().getSelectedColor())
area.repaint()
Undo.addUndo("Set color to " + old, () => {
color.getSelectionModel().removeChangeListener(self)
character.getDrawProperties().setBackgroundColor(old)
color.getSelectionModel().setSelectedColor(old)
area.repaint()
color.getSelectionModel().addChangeListener(self)
})
}
});
character.getDrawProperties().addListener(new DrawProperties.Listener(){
override def updateBackgroundColor(newColor:Color){
color.setColor(newColor);
area.repaint();
}
});
panel
}
def makeGridTool(area:DrawArea):JPanel = {
val context = new SwingEngine("animator/animation-tools.xml")
val guide = context.find("guide").asInstanceOf[JSlider];
guide.setValue(area.getGuideSize());
guide.addChangeListener(new ChangeListener(){
def stateChanged(change:ChangeEvent){
area.setGuideSize(guide.getValue());
area.repaint();
}
});
context.find("grid").asInstanceOf[JPanel]
}
def makeOverlayImageTool(parent:JPanel, area:DrawArea):JPanel = {
val context = new SwingEngine("animator/animation-tools.xml")
val enableButton = context.find("overlay:enable").asInstanceOf[JCheckBox]
var lastFile:String = ""
def update(path:String){
lastFile = path
if (enableButton.isSelected()){
try{
area.setOverlayImage(MaskedImage.load(path))
} catch {
case e:Exception => {
area.setOverlayImage(null)
}
}
} else {
area.setOverlayImage(null)
}
area.repaint()
}
enableButton.addActionListener(new AbstractAction(){
def actionPerformed(event:ActionEvent){
if (enableButton.isSelected()){
enableButton.setText("Enabled")
} else {
enableButton.setText("Disabled")
}
update(lastFile)
}
})
val filename = context.find("overlay:file").asInstanceOf[JTextField]
val choose = context.find("overlay:choose").asInstanceOf[JButton]
choose.addActionListener(new AbstractAction(){
def actionPerformed(event:ActionEvent){
val file = new JFileChooser()
val value = file.showOpenDialog(parent)
if (value == JFileChooser.APPROVE_OPTION){
val selected = file.getSelectedFile()
filename.setText(selected.getPath())
update(selected.getPath())
}
}
})
filename.addActionListener(new AbstractAction(){
def actionPerformed(event:ActionEvent){
update(filename.getText())
}
})
val rotationText = context.find("overlay:rotation-text").asInstanceOf[JLabel]
rotationText.setText("Rotation: 0")
val rotation = context.find("overlay:rotation").asInstanceOf[JSlider]
rotation.setValue(0)
rotation.addChangeListener(new ChangeListener(){
def stateChanged(change:ChangeEvent){
rotationText.setText("Rotation: " + rotation.getValue().asInstanceOf[Int])
area.setOverlayImageRotation(rotation.getValue().asInstanceOf[Int])
area.repaint();
}
})
val flipx = context.find("overlay:flip-x").asInstanceOf[JCheckBox]
val flipy = context.find("overlay:flip-y").asInstanceOf[JCheckBox]
flipx.addActionListener(new AbstractAction(){
def actionPerformed(event:ActionEvent){
area.setOverlayImageFlipX(flipx.isSelected());
area.repaint();
}
})
flipy.addActionListener(new AbstractAction(){
def actionPerformed(event:ActionEvent){
area.setOverlayImageFlipY(flipy.isSelected());
area.repaint();
}
})
val relativeOffset = context.find("overlay:relative").asInstanceOf[JCheckBox]
relativeOffset.addActionListener(new AbstractAction(){
def actionPerformed(event:ActionEvent){
area.setOverlayRelativeOffset(relativeOffset.isSelected())
area.repaint()
}
})
val offsetx = context.find("overlay:x").asInstanceOf[JSpinner]
val offsety = context.find("overlay:y").asInstanceOf[JSpinner]
offsetx.setValue(new Integer(area.getOverlayImageOffsetX()))
offsetx.addChangeListener(new ChangeListener(){
def stateChanged(event:ChangeEvent){
area.setOverlayImageOffsetX(offsetx.getValue().asInstanceOf[Integer].intValue())
area.repaint()
}
})
offsety.setValue(new Integer(area.getOverlayImageOffsetY()))
offsety.addChangeListener(new ChangeListener(){
def stateChanged(event:ChangeEvent){
area.setOverlayImageOffsetY(offsety.getValue().asInstanceOf[Integer].intValue())
area.repaint()
}
})
val alphaText = context.find("overlay:alpha-text").asInstanceOf[JLabel]
val alpha = context.find("overlay:alpha").asInstanceOf[JSlider]
alpha.setValue((area.getOverlayImageAlpha() * alpha.getMaximum()).toInt);
alphaText.setText("Transparency " + area.getOverlayImageAlpha());
alpha.addChangeListener(new ChangeListener(){
def stateChanged(change:ChangeEvent){
area.setOverlayImageAlpha(alpha.getValue().asInstanceOf[Double].doubleValue() /
alpha.getMaximum().asInstanceOf[Double].doubleValue())
alphaText.setText("Transparency " + area.getOverlayImageAlpha())
area.repaint();
}
})
val front = context.find("overlay:front").asInstanceOf[JRadioButton];
val back = context.find("overlay:back").asInstanceOf[JRadioButton];
front.setActionCommand("front");
back.setActionCommand("back");
val change = new AbstractAction(){
def actionPerformed(event:ActionEvent){
if (event.getActionCommand().equals("front")){
area.setOverlayImageFront();
} else {
area.setOverlayImageBehind();
}
area.repaint();
}
}
front.addActionListener(change);
back.addActionListener(change);
context.find("overlay-image").asInstanceOf[JPanel]
}
}
| boyjimeking/paintown | editor/src/com/rafkind/paintown/animator/tools.scala | Scala | bsd-3-clause | 6,902 |
package net.white_azalea.datas.arguments
import java.io.File
/**
* Application argument parameter.
*
* @param template Result template file path.
* @param javaDocXml JavaDoc XML file path.
* @param junitResultDir JUnit test results XML dir path.
*/
case class Config(
template: File,
javaDocXml: File,
junitResultDir: File
) | Sunao-Yoshii/JUnitDocMarge | src/main/scala/net/white_azalea/datas/arguments/Config.scala | Scala | apache-2.0 | 351 |
package models
case class CategoryTree()
| mmx900/PlayCode-Wiki | app/models/CategoryTree.scala | Scala | apache-2.0 | 42 |
package franka
package lang
import scala.language.implicitConversions
abstract class Ast {
type Data
sealed trait Exp
case class Literal (data : Data) extends Exp
implicit def dataToLiteral (data : Data) : Exp = Literal (data)
case class Ident (name : Name) extends Exp
implicit def nameToIdent (name : Name) : Ident = Ident (name)
implicit def symbolToIdent (name : Symbol) : Ident = Ident (name)
case class Apply (fun : Exp, arg : Exp) extends Exp
case class Lambda (argName : Name, body : Exp) extends Exp
/** A [[Select]] node allows you to select a specific field of a [[Types.Record]] value.
*
* @param target the target expression
* @param tag the name of the field to select
*/
case class Select (target : Exp, tag : Name) extends Exp
case class Let (binding : (Name, Exp), in : Exp) extends Exp
object Apply {
object Curry {
def apply (fun : Exp, args : Exp*) : Exp =
args match {
case Seq (arg) =>
Apply (fun, arg)
case other =>
Apply (apply (fun, other.init : _*), other.last)
}
def unapplySeq (exp : Exp) : Option [Seq [Exp]] =
Some (exp) collect {
case Apply (Curry (fun, init @ _*), last) =>
fun +: (init :+ last)
case Apply (fun, last) =>
Seq (fun, last)
}
}
}
def app (fun: Exp, args: Exp*) : Exp =
Apply.Curry (fun, args : _*)
object Lambda {
object Curry {
def apply (args : Seq[Name], body : Exp) : Exp =
args match {
case Seq (arg) =>
Lambda (arg, body)
case args =>
Lambda (args.head, apply (args.tail, body))
}
def unapply (exp : Exp) : Option[(Seq[Name], Exp)] =
Some (exp) collect {
case Lambda (argHead, Curry (argTail, body)) =>
(argHead +: argTail, body)
case Lambda (arg, body) =>
(Seq (arg), body)
}
}
}
def lam (args : Name*)(body : Exp) : Exp =
Lambda.Curry (args, body)
object Select {
object Names {
def apply (path : Name*) : Exp =
path match {
case Seq (singleName) =>
Ident (singleName)
case other =>
Select (apply (other.init : _*), other.last)
}
}
object NameSeq {
def apply (path : Seq [Name]) : Exp =
path match {
case Seq (singleName) =>
Ident (singleName)
case other =>
Select (apply (other.init), other.last)
}
def unapply (exp : Exp) : Option [Seq [Name]] =
Some (exp) collect {
case Ident (singleName) =>
Seq (singleName)
case Select (NameSeq (init), last) =>
init :+ last
}
}
}
def sel (path : Name*) : Exp =
Select.Names (path : _*)
object Let {
object Curry {
def apply (bindings : Seq[(Name, Exp)], in : Exp) : Exp =
bindings match {
case Seq (binding) =>
Let (binding, in)
case other =>
Let (other.head, apply (other.tail, in))
}
def unapply (exp : Exp) : Option [(Seq[(Name, Exp)], Exp)] =
Some (exp) collect {
case Let (head, Curry (tail, in)) =>
(head +: tail, in)
case Let (binding, in) =>
(Seq (binding), in)
}
}
}
def let (bindings : (Name, Exp)*) (in : Exp) : Exp =
Let.Curry (bindings, in)
}
| bylt/franka | src/main/scala/franka/lang/Ast.scala | Scala | mit | 4,212 |
package org.yotchang4s.ch2.response
import java.io._
import scala.collection._
import org.yotchang4s.scala.Loan
import org.yotchang4s.scala.Loan._
import org.yotchang4s.ch2._
import org.yotchang4s.http.Http
import org.yotchang4s.ch2.thread.ThreadId
import java.util.regex.Pattern
private[ch2] trait ResponseComponentImpl extends ResponseComponent {
class ResponseRepositoryImpl extends ResponseRepository {
private val responseRegexPtn = Pattern.compile("""^(.*)<>(.*)<>(.*?)( ID:(.+?))?( BE:(.+?))?<> (.*)<>(.*)$""")
def findResponses(threadId: ThreadId)(implicit config: Ch2Config): Either[Ch2Exception, (String, List[Response])] = {
try {
val http = new Http
config.userAgent.foreach(http.userAgent(_))
val boardId = threadId.value._1
val threadKey = threadId.value._2
val url = "http://" + boardId.host + "/" + boardId.name + "/dat/" + threadKey + ".dat"
val response = http.get(url)
var number = 1
var subject: String = ""
val responses = mutable.ListBuffer[Response]()
for (reader <- Loan(new BufferedReader(response.asReader("MS932")))) {
Iterator.continually(reader.readLine).takeWhile(_ != null).foreach { line =>
val responseMatcher = responseRegexPtn.matcher(line)
if (responseMatcher.find) {
val name = responseMatcher.group(1)
val emailAddress = responseMatcher.group(2)
val date = responseMatcher.group(3)
val id = Option(responseMatcher.group(5))
val be = Option(responseMatcher.group(7))
val body = responseMatcher.group(8)
if (number == 1) { subject = responseMatcher.group(9) }
val emailAddressOpt = if (emailAddress.isEmpty) None else Some(emailAddress)
responses += new ResponseImpl(ResponseId(threadId, number), name, emailAddressOpt, date, id, be, body)
number = number + 1
}
}
}
Right((subject, responses.toList))
} catch {
case e: IOException => Left(new Ch2Exception(Ch2Exception.IOError, e))
case e: Exception => Left(new Ch2Exception(Ch2Exception.UnknownError, e))
}
}
}
}
private[ch2] class ResponseImpl(
val identity: ResponseId,
val name: String,
val emailAddress: Option[String],
val date: String,
val id: Option[String],
val be: Option[String],
val body: String) extends Response {
}
| yotchang4s/gikolet | src/org/yotchang4s/ch2/response/ResponseComponentImpl.scala | Scala | bsd-3-clause | 2,484 |
package progscala2.typesystem.valuetypes
trait Logger {
def log(message: String): Unit
}
class ConsoleLogger extends Logger {
def log(message: String): Unit = println(s"log: $message")
}
trait Service {
// Logger의 abstract type alias 지정
type Log <: Logger
val logger: Log
}
class Service1 extends Service {
type Log = ConsoleLogger
val logger: ConsoleLogger = new ConsoleLogger
} | younggi/books | programming_scala/progscala2/src/main/scala/progscala2/typesystem/valuetypes/type-projection.scala | Scala | mit | 403 |
package sds.util
import collection.mutable.{
ArrayBuffer => Buffer,
HashMap => Hash
}
import sds.Classfile
import sds.classfile.{MemberInfo => Member}
import sds.classfile.attribute.{
AttributeInfo, BootstrapMethods, Code, InnerClasses,
LineNumberTable, LocalVariable, RuntimeAnnotations,
RuntimeParameterAnnotations, RuntimeTypeAnnotations,
TypeAnnotation, StackMapTable
}
import sds.classfile.attribute.AnnotationGenerator.generate
import sds.classfile.bytecode.{OpcodeInfo => Opcode}
import sds.classfile.constant_pool.ConstantInfo
import sds.classfile.constant_pool.Utf8ValueExtractor.extract
import sds.util.AccessFlag.get
class ClassfilePrinter(cf: Classfile) {
private val pool: Array[ConstantInfo] = cf.pool
def _print(): Unit = {
println("<<< Magic Number >>>")
println(s" ${Integer.toHexString(cf.magic)}")
println("<<< Version >>>")
println(s" ${cf.major}.${cf.minor}")
printPool()
printClass()
printField()
printMethod()
printAttribute(" ", cf.attributes, "class")
}
private def printPool(): Unit = {
println("<<< Constant Pool >>>")
pool.indices.foreach((i) => println(s" [${i + 1}]: ${pool(i).toString()}"))
}
private def printClass(): Unit = {
println("<<< Class >>>")
val thisClass: String = extract(cf.thisClass, pool)
val superClass: String = if(check(cf.superClass)) s" extends ${extract(cf.superClass, pool)}" else " "
val interface: String = if(cf.interfaces.length > 0) {
s" implements ${cf.interfaces.map(extract(_, pool)).mkString(", ")}"
} else {
""
}
println(s" ${get(cf.access, "class")}$thisClass$superClass$interface")
}
private def printField(): Unit = {
println(" <<< Field >>>")
val fields: Array[Member] = cf.fields
fields.indices.foreach((i) => {
println(s" [${i + 1}]: ${fields(i).toString()}")
printAttribute(" ", fields(i).attributes, "field")
})
}
private def printMethod(): Unit = {
println(" <<< Method >>>")
val methods: Array[Member] = cf.methods
methods.indices.foreach((i) => {
println(s" [${i + 1}]: ${methods(i).toString()}")
printAttribute(" ", methods(i).attributes, "method")
})
}
private def printAttribute(indent: String, attr: Array[AttributeInfo], _type: String): Unit = {
if(attr.length == 0) return
println(s"$indent<<< Attribute in ${_type} >>>")
attr.indices.foreach((i) => {
attr(i) match {
case boot: BootstrapMethods =>
println(s"$indent [${i + 1} in ${_type}]: BootstrapMethods")
val bsm: Array[(String, Array[String])] = boot.bsm
bsm.indices.foreach((i: Int) => {
val t: (String, Array[String]) = bsm(i)
println(s"$indent ($i): ")
println(s"$indent bsm_ref : ${t._1}")
println(s"$indent bsm_args: ${t._2.mkString(", ")}")
})
case code: Code =>
println(s"$indent [${i + 1} in ${_type}]: Code")
println(s"$indent max_stack: ${code.maxStack}, max_locals: ${code.maxLocals}")
val opcodes: Array[Opcode] = code.opcodes
opcodes.indices.foreach((i: Int) => println(s"$indent ${opcodes(i)}"))
val table: Array[(Array[Int], String)] = code.exTable
if(table.length > 0) {
println(s"$indent Exception Table:")
table.indices.foreach((i: Int) => {
val t: Array[Int] = table(i)._1
val target: String = table(i)._2
println(s"$indent [$i]: ${t(0)}-${t(1)}, ${t(2)}: $target")
})
}
printAttribute(s"$indent ", code.attributes, "Code")
case ic: InnerClasses =>
println(s"$indent [${i + 1} in ${_type}]: InnerClasses")
val inner: Array[Array[String]] = ic.classes
inner.indices.foreach((i: Int) => {
val first: String = s"$indent (${i + 1}): "
val second: String = s"${inner(i)(3)}${inner(i)(0)} ${inner(i)(2)}"
val third: String = if(inner(i)(1).length > 0) s" {in ${inner(i)(1)}}" else ""
println(s"$first$second$third")
})
case line: LineNumberTable =>
println(s"$indent [${i + 1} in ${_type}]: LineNumberTable")
val lines: Array[String] = line.getTableStr()
lines.indices.foreach((i: Int) => println(s"$indent [$i]: ${lines(i)}"))
case local: LocalVariable =>
println(s"$indent [${i + 1} in ${_type}]: LocalVariable")
val name: Array[Array[String]] = local.getNameTable()
val table: Array[Array[Int]] = local.getTable()
name.indices.foreach((i: Int) => {
val first: String = s"$indent [$i]: "
val second: String = s"${name(i)(1)} ${name(i)(0)}"
val third: String = s" {${table(i)(0)}-${table(i)(1)}}"
println(s"$first$second$third")
})
case ra: RuntimeAnnotations =>
println(s"$indent [${i + 1} in ${_type}]: RuntimeAnnotation")
ra.annotations.foreach((a: String) => println(s"$indent $a"))
case rpa: RuntimeParameterAnnotations =>
println(s"$indent [${i + 1} in ${_type}]: RuntimeParameterAnnotation")
rpa.annotations.foreach((a: Array[String]) => println(s"$indent ${a.mkString(", ")}"))
case rta: RuntimeTypeAnnotations =>
println(s"$indent [${i + 1} in ${_type}]: RuntimeTypeAnnotation")
rta.annotations.foreach((_type: TypeAnnotation) => {
print(s"$indent ${generate(_type, pool)} (${_type.target})")
println(s", (${_type.path.map(_.toString()).mkString("_")})")
})
case stack: StackMapTable =>
println(s"$indent [${i + 1} in ${_type}]: StackMapTable")
stack.entries.foreach((entry: ((Int, Int), Hash[String, Buffer[String]])) => {
val key: (Int, Int) = entry._1
println(s"$indent ${getFrame(key._1)} - tag:${key._1}, offset:${key._2}")
println(s"$indent stack - ${entry._2("stack").mkString("[", ", ", "]")}")
println(s"$indent locals - ${entry._2("local").mkString("[", ", ", "]")}")
})
case _ => println(s"$indent [${i + 1} in ${_type}]: ${attr(i).toString()}")
}
})
}
private def getFrame(tag: Int): String = {
if((0 to 63).contains(tag)) return "SameFrame"
if((64 to 127).contains(tag)) return "SameLocals1StackItemFrame"
if(tag == 247) return "SameLocals1StackItemFrameExtended"
if((248 to 250).contains(tag)) return "ChopFrame"
if(tag == 251) return "SameFrameExtended"
if((252 to 254).contains(tag)) return "AppendFrame"
if(tag == 255) return "FullFrame"
throw new RuntimeException(s"unknown tag($tag)")
}
private def check(index: Int): Boolean = pool.indices.contains(index)
} | g1144146/sds_for_scala | src/main/scala/sds/util/ClassfilePrinter.scala | Scala | apache-2.0 | 7,936 |
package com.github.ldaniels528.trifecta.ui.models
import play.api.libs.json.{Json, Reads, Writes}
case class ConsumerOffsetJs(groupId: String,
topic: String,
partition: Int,
offset: Long,
topicStartOffset: Option[Long],
topicEndOffset: Option[Long],
messages: Option[Long],
lastModifiedTime: Option[Long])
object ConsumerOffsetJs {
implicit val ConsumerOffsetReads: Reads[ConsumerOffsetJs] = Json.reads[ConsumerOffsetJs]
implicit val ConsumerOffsetWrites: Writes[ConsumerOffsetJs] = Json.writes[ConsumerOffsetJs]
}
| ldaniels528/trifecta | app-play/app/com/github/ldaniels528/trifecta/ui/models/ConsumerOffsetJs.scala | Scala | apache-2.0 | 720 |
package spinoco.protocol.http.header
import spinoco.protocol.http.header.value.{EntityTagRange, HeaderCodecDefinition}
/**
* RFC 7231 section 3.2
*
* @see https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/If-None-Match
*/
sealed case class `If-None-Match`(value: EntityTagRange) extends DefaultHeader
object `If-None-Match` { val codec =
HeaderCodecDefinition[`If-None-Match`](EntityTagRange.codec.xmap (`If-None-Match`.apply, _.value))
}
| Spinoco/protocol | http/src/main/scala/spinoco/protocol/http/header/If-None-Match.scala | Scala | mit | 467 |
package com.github.mrpowers.spark.daria.sql
import utest._
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{IntegerType, StringType, StructField, StructType}
object DataFrameSchemaCheckerTest extends TestSuite with SparkSessionTestWrapper {
val tests = Tests {
'missingStructFields - {
"returns the StructFields missing from a DataFrame" - {
val sourceData =
List(
Row(
1,
1
),
Row(
-8,
8
),
Row(
-5,
5
),
Row(
null,
null
)
)
val sourceSchema = List(
StructField(
"num1",
IntegerType,
true
),
StructField(
"num2",
IntegerType,
true
)
)
val sourceDF =
spark.createDataFrame(
spark.sparkContext.parallelize(sourceData),
StructType(sourceSchema)
)
val requiredSchema = StructType(
List(
StructField(
"num1",
IntegerType,
true
),
StructField(
"num2",
IntegerType,
true
),
StructField(
"name",
StringType,
true
)
)
)
val c = new DataFrameSchemaChecker(
sourceDF,
requiredSchema
)
assert(
c.missingStructFields == List(
StructField(
"name",
StringType,
true
)
)
)
}
"returns the empty list if StructFields aren't missing" - {
val sourceData =
List(
Row(
1,
1
),
Row(
-8,
8
),
Row(
-5,
5
),
Row(
null,
null
)
)
val sourceSchema = List(
StructField(
"num1",
IntegerType,
true
),
StructField(
"num2",
IntegerType,
true
)
)
val sourceDF =
spark.createDataFrame(
spark.sparkContext.parallelize(sourceData),
StructType(sourceSchema)
)
val requiredSchema =
StructType(
List(
StructField(
"num1",
IntegerType,
true
)
)
)
val c = new DataFrameSchemaChecker(
sourceDF,
requiredSchema
)
assert(c.missingStructFields == List())
}
}
'missingColumnsMessage - {
"provides a descriptive message of the StructFields that are missing" - {
val sourceData =
List(
Row(
1,
1
),
Row(
-8,
8
),
Row(
-5,
5
),
Row(
null,
null
)
)
val sourceSchema = List(
StructField(
"num1",
IntegerType,
true
),
StructField(
"num2",
IntegerType,
true
)
)
val sourceDF =
spark.createDataFrame(
spark.sparkContext.parallelize(sourceData),
StructType(sourceSchema)
)
val requiredSchema = StructType(
List(
StructField(
"num1",
IntegerType,
true
),
StructField(
"num2",
IntegerType,
true
),
StructField(
"name",
StringType,
true
)
)
)
val c = new DataFrameSchemaChecker(
sourceDF,
requiredSchema
)
val expected =
"The [StructField(name,StringType,true)] StructFields are not included in the DataFrame with the following StructFields [StructType(StructField(num1,IntegerType,true), StructField(num2,IntegerType,true))]"
assert(c.missingStructFieldsMessage() == expected)
}
}
'validateSchema - {
"throws an exception if a required StructField is missing" - {
val sourceData =
List(
Row(
1,
1
),
Row(
-8,
8
),
Row(
-5,
5
),
Row(
null,
null
)
)
val sourceSchema = List(
StructField(
"num1",
IntegerType,
true
),
StructField(
"num2",
IntegerType,
true
)
)
val sourceDF =
spark.createDataFrame(
spark.sparkContext.parallelize(sourceData),
StructType(sourceSchema)
)
val requiredSchema = StructType(
List(
StructField(
"num1",
IntegerType,
true
),
StructField(
"num2",
IntegerType,
true
),
StructField(
"name",
StringType,
true
)
)
)
val c = new DataFrameSchemaChecker(
sourceDF,
requiredSchema
)
val e = intercept[InvalidDataFrameSchemaException] {
c.validateSchema()
}
}
"does nothing if there aren't any StructFields missing" - {
val sourceData =
List(
Row(
1,
1
),
Row(
-8,
8
),
Row(
-5,
5
),
Row(
null,
null
)
)
val sourceSchema = List(
StructField(
"num1",
IntegerType,
true
),
StructField(
"num2",
IntegerType,
true
)
)
val sourceDF =
spark.createDataFrame(
spark.sparkContext.parallelize(sourceData),
StructType(sourceSchema)
)
val requiredSchema =
StructType(
List(
StructField(
"num1",
IntegerType,
true
)
)
)
val c = new DataFrameSchemaChecker(
sourceDF,
requiredSchema
)
c.validateSchema()
}
}
}
}
| MrPowers/spark-daria | src/test/scala/com/github/mrpowers/spark/daria/sql/DataFrameSchemaCheckerTest.scala | Scala | mit | 7,199 |
package main.scala
import java.util
import org.apache.spark.sql.simba.SimbaSession
import ca.pfv.spmf.algorithms.frequentpatterns.fpgrowth.AlgoFPMax
import org.apache.spark.sql.Row
import java.util.Calendar
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.simba.index.RTreeType
/**
* Created by and on 3/20/17.
*/
object PBFE {
case class PointItem(id: Int, x: Double, y: Double)
case class Pair(id1: Int, id2: Int, x1: Double, y1: Double, x2: Double, y2: Double)
// var master: String = "local[*]"
// var epsilon: Double = 100.0
// var mu: Integer = 3
// var filename: String = "/opt/Datasets/Beijing/P10K.csv"
// var logs: String = "ERROR"
var X = 0.0
var Y = 0.0
var D2 = 0.0
var root = 0.0
var h1 = 0.0
var h2 = 0.0
var k1 = 0.0
var k2 = 0.0
def calculateDisks(pair: Row, r2: Double): Pair = {
X = pair.getDouble(1) - pair.getDouble(4)
Y = pair.getDouble(2) - pair.getDouble(5)
D2 = math.pow(X, 2) + math.pow(Y, 2)
if (D2 == 0) throw new UnsupportedOperationException("Identical points...")
root = math.pow(math.abs(4.0 * (r2 / D2) - 1.0), 0.5)
h1 = ((X + Y * root) / 2) + pair.getDouble(4)
h2 = ((X - Y * root) / 2) + pair.getDouble(4)
k1 = ((Y - X * root) / 2) + pair.getDouble(5)
k2 = ((Y + X * root) / 2) + pair.getDouble(5)
Pair(pair.getInt(0), pair.getInt(3), h1, k1, h2, k2)
}
def main(args: Array[String]): Unit = {
val master = args(0)
val filename = args(1)
val epsilon = args(2).toDouble
val mu = args(3).toInt
val logs = args(4)
val r2: Double = math.pow(epsilon / 2, 2)
val simbaSession = SimbaSession
.builder()
.master(master)
.appName("PBFE")
.config("simba.index.partitions", "128")
.getOrCreate()
import simbaSession.simbaImplicits._
import simbaSession.implicits._
import scala.collection.JavaConversions._
val sc = simbaSession.sparkContext
sc.setLogLevel(logs)
val tag = filename.substring(filename.lastIndexOf("/") + 1).split("\\\\.")(0).substring(1)
val p1 = sc.textFile(filename)
.map(_.split(","))
.map(p => PointItem(p(0).trim.toInt, p(1).trim.toDouble, p(2).trim.toDouble))
.toDF
p1.index(RTreeType, "pointsRT", Array("x", "y"))
val p2 = p1.toDF("id2", "x2", "y2")
p1.count()
var time1 = System.currentTimeMillis()
val pairs = p1.distanceJoin(p2, Array("x", "y"), Array("x2", "y2"), epsilon)
val disks = pairs.rdd.filter((x: Row) => x.getInt(0) > x.getInt(3)).map((x: Row) => calculateDisks(x, r2))
val ndisks = disks.count()
var time2 = System.currentTimeMillis()
val diskGenerationTime = (time2 - time1) / 1000.0
//disks.map(())
val centers1 = disks.toDF.select("x1", "y1")
val centers2 = disks.toDF.select("x2", "y2")
val centers = centers1.union(centers2)
centers.index(RTreeType, "centersRT", Array("x1", "y1"))
val membersRDD = centers.distanceJoin(p1, Array("x1", "y1"), Array("x", "y"), (epsilon / 2) + 0.01)
.select("x1", "y1", "id")
// TODO: run the group by here...
.show()
//.map { d => ( (d(0).asInstanceOf[Double], d(1).asInstanceOf[Double]) , d(2).asInstanceOf[Integer] ) }
/* val members = membersRDD.groupByKey()
.map{ m => ( m._1._1, m._1._2, m._2.toArray[Integer] ) }
.toDF("x", "y", "IDs")
.index(RTreeType, "membersRT", Array("x", "y"))
val temp = members.rdd.mapPartitionsWithIndex{ (index, partition) =>
System.out.println(s"$index : ")
partition.foreach(println)
partition.toIterator
//val b = {
// partition.map { t => new util.ArrayList(t.map(_.asInstanceOf[Integer])) }.toBuffer
//}
//val ts = new util.ArrayList(b)
//val fpmax = new AlgoFPMax
//val itemsets = fpmax.runAlgorithm(ts, 1)
//itemsets.getItemsets(mu).iterator()
}
temp.foreach(println)
temp.count()*/
/**************************************
* Begin of tests...
*************************************/
/**************************************
* End of tests...
*************************************/
/*
val arrList = new ArrayList[Integer]()
x.split(" ").map(y => arrList.add(y.toInt))
Collections.sort(arrList)
ts.add(arrList)
*/
/*
val minsup = 1
time1 = System.currentTimeMillis()
val dataset = new Dataset(ts)
val lcm = new AlgoLCM
var itemsets = lcm.runAlgorithm(minsup, dataset)
//lcm.printStats
//itemsets.printItemsets
time2 = System.currentTimeMillis()
val lcmTime = (time2 - time1) / 1000.0
val lcmNItemsets = itemsets.countItemsets(3)
*/
/* time1 = System.currentTimeMillis()
val fpmax = new AlgoFPMax
val itemsets = fpmax.runAlgorithm(new util.ArrayList(temp.toBuffer) , 1)
//fpmax.printStats
//itemsets.printItemsets
time2 = System.currentTimeMillis()
val fpMaxTime = (time2 - time1) / 1000.0
val fpMaxNItemsets = itemsets.countItemsets(3)
println("PBFE3,"
+ epsilon + ","
+ tag + ","
+ 2 * ndisks + ","
+ diskGenerationTime + ","
+ fpMaxTime + ","
+ fpMaxNItemsets + ","
+ Calendar.getInstance().getTime)*/
sc.stop()
}
}
| aocalderon/PhD | Y2Q3/PBFE4/src/main/scala/PBFE.scala | Scala | lgpl-3.0 | 5,274 |
package com.sksamuel.elastic4s
import com.sksamuel.elastic4s.ElasticDsl._
import org.scalatest.FlatSpec
import org.scalatest.mock.MockitoSugar
/** @author Stephen Samuel */
class DeleteTest extends FlatSpec with MockitoSugar with ElasticSugar {
client.execute(
bulk(
index into "places/cities" id 99 fields (
"name" -> "London",
"country" -> "UK"
),
index into "places/cities" id 44 fields (
"name" -> "Philadelphia",
"country" -> "USA"
),
index into "places/cities" id 615 fields (
"name" -> "Middlesbrough",
"country" -> "UK",
"continent" -> "Europe"
)
)
).await
refresh("places")
blockUntilCount(3, "places")
"an index" should "do nothing when deleting a document where the id does not exist using where" in {
client.execute {
delete from "places" -> "cities" where "name" -> "sammy"
}
refresh("places")
Thread.sleep(1000)
blockUntilCount(3, "places")
}
it should "do nothing when deleting a document where the id does not exist using id" in {
client.execute {
delete id 141212 from "places" -> "cities"
}
refresh("places")
Thread.sleep(1000)
blockUntilCount(3, "places")
}
it should "do nothing when deleting a document where the query returns no results" in {
client.execute {
delete from "places" types "cities" where "paris"
}
refresh("places")
Thread.sleep(1000)
blockUntilCount(3, "places")
}
it should "remove a document when deleting by id" in {
client.sync.execute {
delete id 99 from "places/cities"
}
refresh("places")
blockUntilCount(2, "places")
}
it should "remove a document when deleting by query" in {
client.execute {
delete from "places" types "cities" where matchQuery("continent", "Europe")
}.await
refresh("places")
blockUntilCount(1, "places")
}
}
| maxcom/elastic4s | src/test/scala/com/sksamuel/elastic4s/DeleteTest.scala | Scala | apache-2.0 | 1,933 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2014, Gary Keorkunian **
** **
\\* */
package squants
import org.scalatest.matchers.{ MatchResult, Matcher }
trait CustomMatchers {
class QuantityWithinRangeMatcher[A <: Quantity[A]](range: QuantityRange[A]) extends Matcher[A] {
def apply(left: A) = {
implicit val r = range
MatchResult(
r.contains(left),
s"$left was not within $range",
s"$left was within $range")
}
}
def beWithin[A <: Quantity[A]](range: QuantityRange[A]) =
new QuantityWithinRangeMatcher(range)
class QuantityApproximatelyEqualTo[A <: Quantity[A]](expectedValue: A)(implicit tolerance: A)
extends Matcher[A] {
def apply(left: A) = {
MatchResult(
left approx expectedValue,
s"$left was not approximately equal to $expectedValue ($tolerance)",
s"$left was approximately equal to $expectedValue ($tolerance)")
}
}
def beApproximately[A <: Quantity[A]](expectedValue: A)(implicit tolerance: A) =
new QuantityApproximatelyEqualTo(expectedValue)(tolerance)
class DoubleApproximatelyEqualTo(expectedValue: Double)(implicit tolerance: Double)
extends Matcher[Double] {
def apply(left: Double) = {
MatchResult(
(expectedValue - tolerance) <= left & left <= (expectedValue + tolerance),
s"$left was not approximately equal to $expectedValue ($tolerance)",
s"$left was approximately equal to $expectedValue ($tolerance)")
}
}
def beApproximately(expectedValue: Double)(implicit tolerance: Double) =
new DoubleApproximatelyEqualTo(expectedValue)
}
| non/squants | src/test/scala/squants/CustomMatchers.scala | Scala | apache-2.0 | 2,065 |
package com.criteo.dev.cluster.copy
import com.criteo.dev.cluster.Node
/**
* Most of the cases, we create the metadata of the copied tables on the cluster.
*
* In S3 case (shared data/metadata), we save it on S3 itself.
*/
abstract class CreateMetadataAction(conf: Map[String, String], target: Node) {
def apply(tableInfos: TableInfo): Unit
}
| criteo/berilia | src/main/scala/com/criteo/dev/cluster/copy/CreateMetadataAction.scala | Scala | apache-2.0 | 356 |
package threesixty.ProcessingMethods.interpolation
import threesixty.data.metadata.{Resolution, Scaling}
import threesixty.data.{ProcessedData, TaggedDataPoint, InputDataSkeleton}
import threesixty.data.Data.{Identifier}
import threesixty.data.Implicits.timestamp2Long
import threesixty.processor.{ProcessingMixins, SingleProcessingMethod, ProcessingMethodCompanion, ProcessingStep}
import spray.json._
import DefaultJsonProtocol._
import threesixty.visualizer.VisualizationConfig
import threesixty.visualizer.visualizations.barChart.BarChartConfig
import threesixty.visualizer.visualizations.lineChart.LineChartConfig
import threesixty.visualizer.visualizations.pieChart.PieChartConfig
import threesixty.visualizer.visualizations.scatterChart.ScatterChartConfig
object SplineInterpolation extends ProcessingMethodCompanion {
trait Mixin extends ProcessingMixins {
abstract override def processingInfos: Map[String, ProcessingMethodCompanion] =
super.processingInfos + ("splineinterpolation" -> SplineInterpolation)
}
def name = "Spline Interpolation"
def fromString: (String) => ProcessingStep = { s => apply(s).asProcessingStep }
def usage = """ The spline interpolation is currently out of order """
def apply(jsonString: String): SplineInterpolation = {
implicit val splineInterpolationFormat =
jsonFormat({ idm: Map[Identifier, Identifier] => SplineInterpolation.apply(idm) }, "idMapping")
jsonString.parseJson.convertTo[SplineInterpolation]
}
def default(idMapping: Map[Identifier, Identifier]): ProcessingStep =
SplineInterpolation(idMapping).asProcessingStep
def computeDegreeOfFit(inputData: InputDataSkeleton): Double = {
var temp = 0.0
val meta = inputData.metadata
if (meta.scaling == Scaling.Ordinal) {
temp += 0.4
}
if (meta.size >= 5) {
temp += 0.2
}
if (meta.size >= 50) {
temp += 0.2 //overall 0.4 because >= 50 includes >= 5
}
if (meta.resolution == Resolution.High) {
temp += 0.1
}
if (meta.resolution == Resolution.Middle) {
temp += 0.2
}
temp
}
def computeDegreeOfFit(targetVisualization: VisualizationConfig, inputData: InputDataSkeleton): Double = {
val strategyFactor = computeDegreeOfFit(inputData)
val visFactor = targetVisualization match {
//good
case _:LineChartConfig => 1.0
case _:BarChartConfig => 0.8
//bad
case _:ScatterChartConfig => 0.2
case _:PieChartConfig => 0.1
//default
case _ => 0.5
}
strategyFactor * visFactor
}
}
/**
* Spline interpolator
* Creates the spline interpolated function out of a set of data
*
* @author Jens Woehrle
*/
case class SplineInterpolation(idMapping: Map[Identifier, Identifier])
extends SingleProcessingMethod {
def companion: ProcessingMethodCompanion = SplineInterpolation
/**
* Created a new dataset with ID as specified in idMapping.
* Inserts interpolated values along the original ones into
* this new dataset and adds tags to identify interpolated
* and original values.
*
* @param data Data to interpolate
* @return One element Set containing the new dataset
*/
@throws[NoSuchElementException]("if data.id can not be found in idMapping")
def apply(data: ProcessedData): Set[ProcessedData] = {
val odata = data.dataPoints.sortBy(d => timestamp2Long(d.timestamp))
val x = new Array[Long](odata.length)
val y = new Array[Double](odata.length)
for( j <- 0 until odata.length) {
x(j) = timestamp2Long(odata(j).timestamp)
y(j) = odata(j).value.value
}
if (x.length < 3) {
throw new NotImplementedError
}
// Number of intervals. The number of data points is n + 1.
val n = x.length - 1
// Differences between knot points
val h = Array.tabulate(n)(i => x(i+1) - x(i))
var mu: Array[Double] = Array.fill(n)(0)
var z: Array[Double] = Array.fill(n+1)(0)
var i = 1
while (i < n) {
val g = 2.0 * (x(i+1) - x(i-1)) - h(i-1) * mu(i-1)
mu(i) = h(i) / g
z(i) = (3.0 * (y(i+1) * h(i-1) - y(i) * (x(i+1) - x(i-1))+ y(i-1) * h(i)) /
(h(i-1) * h(i)) - h(i-1) * z(i-1)) / g
i += 1
}
// cubic spline coefficients -- b is linear, c quadratic, d is cubic (original y's are constants)
var b: Array[Double] = Array.fill(n)(0)
var c: Array[Double] = Array.fill(n+1)(0)
var d: Array[Double] = Array.fill(n)(0)
var j = n-1
while (j >= 0) {
c(j) = z(j) - mu(j) * c(j + 1)
b(j) = (y(j+1) - y(j)) / h(j) - h(j) * (c(j+1) + 2.0 * c(j)) / 3.0
d(j) = (c(j+1) - c(j)) / (3.0 * h(j))
j -= 1
}
// by this point we created the polynomials coefficients of the splines
// Now we start generating the Datasetpoints
var tp = 0;
//val dataPoints : List[TaggedDataPoint] = new TaggedDataPoint(new Timestamp(x(0)), y(0), data.data(0).tags)
var dataPoints = List[TaggedDataPoint]()
while( tp < n ) {
//dataPoints += new TaggedDataPoint(new Timestamp(x(0)), y(0), data.data(0).tags)
tp += 1
//einfügen dern andere...
}
val newID = idMapping(data.id)
Set(new ProcessedData(newID, dataPoints))
}
}
| elordin/threesixty | src/main/scala/threesixty/ProcessingMethods/interpolation/SplineInterpolation.scala | Scala | mit | 5,742 |
package com.circusoc.simplesite.hire
import org.dbunit.DBTestCase
import org.scalatest.{BeforeAndAfter, FlatSpecLike}
import com.circusoc.simplesite._
import scalikejdbc.ConnectionPool
import java.sql.{DriverManager, Connection}
import org.dbunit.database.DatabaseConnection
import org.dbunit.operation.DatabaseOperation
import org.dbunit.dataset.IDataSet
import org.dbunit.dataset.xml.FlatXmlDataSetBuilder
import scala.concurrent.Await
import scala.concurrent.duration.Duration
import org.codemonkey.simplejavamail.{Mailer, Email}
import org.scalatest.Matchers._
import org.scalatest.prop.PropertyChecks
class HireSpec extends DBTestCase with FlatSpecLike with BeforeAndAfter with PropertyChecks {
val config = new PartialConfig({_ => Unit})
def getJDBC(): Connection = {
Class.forName("org.h2.Driver")
val c = DriverManager.getConnection("jdbc:h2:mem:hirespec;DB_CLOSE_DELAY=-1", "sa", "")
c.setAutoCommit(true)
c
}
config.db.setup()
DBSetup.setup()(config)
val conn = new DatabaseConnection(getJDBC())
DatabaseOperation.CLEAN_INSERT.execute(conn, getDataSet())
override def getDataSet: IDataSet = new FlatXmlDataSetBuilder().
build(classOf[HireSpec].
getResourceAsStream("/com/circusoc/simplesite/hire/HireDBSpec.xml"))
it should "send emails and delete their log entries" in {
var sends = 0
def mockSend(e: Email) {
sends += 1
}
implicit val mockConfig = new PartialConfig(mockSend)
val hire = Hire.hire(EmailAddress("[email protected]"), Some(Location("sydney")), List("Fire", "Juggles"))
Await.result(hire, Duration.Inf)
sends should be(1)
Hire.pendingHireQueueSize() should be(0)
}
it should "send emails and delete their log entries with non locations" in {
var sends = 0
def mockSend(e: Email) {
sends += 1
}
implicit val mockConfig = new PartialConfig(mockSend)
val hire = Hire.hire(EmailAddress("[email protected]"), None, List("Fire", "Juggles"))
Await.result(hire, Duration.Inf)
sends should be(1)
Hire.pendingHireQueueSize() should be(0)
}
it should "queue emails if they don't send and then send them" in {
var badSends = 0
def badSend(e: Email) {
badSends += 1
throw new Exception()
}
val mockConfig1 = new PartialConfig(badSend)
val hire = Hire.hire(EmailAddress("[email protected]"), Some(Location("sydney")), List("Fire", "Juggles"))(mockConfig1)
intercept[Exception]{
Await.result(hire, Duration.Inf)
}
Hire.pendingHireQueueSize()(mockConfig1) should be(1)
var goodSends = 0
def goodSend(e: Email) {
goodSends += 1
}
val mockConfig2 = new PartialConfig(goodSend)
Hire.processPendingQueue()(mockConfig2)
Hire.pendingHireQueueSize()(mockConfig2) should be(0)
goodSends should be(1)
}
it should "send nothing when there is nothing to send" in {
var goodSends = 0
def goodSend(e: Email) {
goodSends += 1
}
val mockConfig2 = new PartialConfig(goodSend)
Hire.pendingHireQueueSize()(mockConfig2) should be(0)
Hire.processPendingQueue()(mockConfig2)
Hire.pendingHireQueueSize()(mockConfig2) should be(0)
goodSends should be(0)
}
it should "not send made up emails" in {
var goodSends = 0
def goodSend(e: Email) {
goodSends += 1
}
val mockConfig2 = new PartialConfig(goodSend)
Hire.pendingHireQueueSize()(mockConfig2) should be(0)
Hire.processHireRequest(-1)(mockConfig2)
goodSends should be(0)
}
it should "really send an email" ignore {
val mailer = new Mailer(config.hire.smtpHost, config.hire.smtpPort, config.hire.smtpUser, config.hire.smtpPass)
def sendMail(email: Email): Unit = mailer.sendMail(email)
val realConfig = new PartialConfig(sendMail)
val hire = Hire.hire(EmailAddress("[email protected]"), Some(Location("sydney")), List("Fire", "Juggles"))(realConfig)
Await.result(hire, Duration.Inf)
}
}
class PartialConfig(mockMailer: Email => Unit) extends WithConfig {
override val port: Int = 8080
override val db: DB = new DB {
override val poolName = 'hirespec
override def setup() = {
Class.forName("org.h2.Driver")
val url = s"jdbc:h2:mem:${poolName.name};DB_CLOSE_DELAY=-1"
ConnectionPool.add(poolName, url, "sa", "")
}
}
override val hire: Hire = new Hire {}
override val mailer: MailerLike = new MailerLike{
override def sendMail(email: Email): Unit = mockMailer(email)
}
override val paths: PathConfig = new PathConfig {}
}
| ririw/circusoc-backend | src/test/scala/com/circusoc/simplesite/hire/HireSpec.scala | Scala | agpl-3.0 | 4,539 |
/*
* # Trove
*
* This file is part of Trove - A FREE desktop budgeting application that
* helps you track your finances, FREES you from complex budgeting, and
* enables you to build your TROVE of savings!
*
* Copyright © 2016-2019 Eric John Fredericks.
*
* Trove is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Trove is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Trove. If not, see <http://www.gnu.org/licenses/>.
*/
package trove.core.infrastructure.persist
import grizzled.slf4j.Logging
import slick.jdbc.SQLiteProfile.backend._
import trove.core.Project
import trove.core.accounts.AccountsServiceImpl
import trove.core.infrastructure.persist.lock.ProjectLock
private[persist] class ProjectImpl(
val name: String,
val lock: ProjectLock,
val db: DatabaseDef)
extends Project with Logging {
override def toString: String = s"Project($name)"
val accountsService = new AccountsServiceImpl
def close(): Unit = {
db.close()
logger.debug(s"Database for project $name closed")
lock.release()
logger.debug(s"Lock for project $name released")
logger.info(s"Closed project $name")
}
}
| emanchgo/budgetfree | src/main/scala/trove/core/infrastructure/persist/ProjectImpl.scala | Scala | gpl-3.0 | 1,610 |
import sbt._
import sbt.Keys._
object ApplicationBuild extends Build {
val appName = "test-sbt-parallel"
val appVersion = "0.1.0-SNAPSHOT"
val appScalaVersion = "2.10.3"
val appScalaOptions = Seq(
"-deprecation",
"-unchecked",
"-feature",
// "-optimize",
"-encoding", "utf-8"
)
val appResolvers = Seq(
"Sonatype OSS Releases" at "http://oss.sonatype.org/content/repositories/releases",
"Sonatype OSS Snapshots" at "http://oss.sonatype.org/content/repositories/snapshots",
"Typesafe Repository" at "http://repo.typesafe.com/typesafe/releases/"
)
val appDependencies = Seq(
"org.scalatest" %% "scalatest" % "1.9.1" % "test",
"org.specs2" %% "specs2" % "1.13" % "test",
"net.debasishg" % "redisclient_2.10" % "2.11"
)
lazy val main = Project(
id = appName.toLowerCase,
base = file(".")
).settings(
name := appName,
version := appVersion,
scalaVersion := appScalaVersion,
scalacOptions ++= appScalaOptions,
resolvers ++= appResolvers,
libraryDependencies ++= appDependencies,
parallelExecution in Test := false
)
}
| mogproject/docker-sbt-test | example/project/Build.scala | Scala | apache-2.0 | 1,122 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server
import com.yammer.metrics.core.Gauge
import kafka.cluster.BrokerEndPoint
import kafka.metrics.KafkaYammerMetrics
import kafka.utils.TestUtils
import org.apache.kafka.common.TopicPartition
import org.easymock.EasyMock
import org.junit.jupiter.api.{BeforeEach, Test}
import org.junit.jupiter.api.Assertions._
import scala.jdk.CollectionConverters._
class AbstractFetcherManagerTest {
@BeforeEach
def cleanMetricRegistry(): Unit = {
TestUtils.clearYammerMetrics()
}
private def getMetricValue(name: String): Any = {
KafkaYammerMetrics.defaultRegistry.allMetrics.asScala.filter { case (k, _) => k.getName == name }.values.headOption.get.
asInstanceOf[Gauge[Int]].value()
}
@Test
def testAddAndRemovePartition(): Unit = {
val fetcher: AbstractFetcherThread = EasyMock.mock(classOf[AbstractFetcherThread])
val fetcherManager = new AbstractFetcherManager[AbstractFetcherThread]("fetcher-manager", "fetcher-manager", 2) {
override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): AbstractFetcherThread = {
fetcher
}
}
val fetchOffset = 10L
val leaderEpoch = 15
val tp = new TopicPartition("topic", 0)
val initialFetchState = InitialFetchState(
leader = new BrokerEndPoint(0, "localhost", 9092),
currentLeaderEpoch = leaderEpoch,
initOffset = fetchOffset)
EasyMock.expect(fetcher.start())
EasyMock.expect(fetcher.addPartitions(Map(tp -> initialFetchState)))
.andReturn(Set(tp))
EasyMock.expect(fetcher.fetchState(tp))
.andReturn(Some(PartitionFetchState(fetchOffset, None, leaderEpoch, Truncating, lastFetchedEpoch = None)))
EasyMock.expect(fetcher.removePartitions(Set(tp))).andReturn(Map.empty)
EasyMock.expect(fetcher.fetchState(tp)).andReturn(None)
EasyMock.replay(fetcher)
fetcherManager.addFetcherForPartitions(Map(tp -> initialFetchState))
assertEquals(Some(fetcher), fetcherManager.getFetcher(tp))
fetcherManager.removeFetcherForPartitions(Set(tp))
assertEquals(None, fetcherManager.getFetcher(tp))
EasyMock.verify(fetcher)
}
@Test
def testMetricFailedPartitionCount(): Unit = {
val fetcher: AbstractFetcherThread = EasyMock.mock(classOf[AbstractFetcherThread])
val fetcherManager = new AbstractFetcherManager[AbstractFetcherThread]("fetcher-manager", "fetcher-manager", 2) {
override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): AbstractFetcherThread = {
fetcher
}
}
val tp = new TopicPartition("topic", 0)
val metricName = "FailedPartitionsCount"
// initial value for failed partition count
assertEquals(0, getMetricValue(metricName))
// partition marked as failed increments the count for failed partitions
fetcherManager.failedPartitions.add(tp)
assertEquals(1, getMetricValue(metricName))
// removing fetcher for the partition would remove the partition from set of failed partitions and decrement the
// count for failed partitions
fetcherManager.removeFetcherForPartitions(Set(tp))
assertEquals(0, getMetricValue(metricName))
}
@Test
def testDeadThreadCountMetric(): Unit = {
val fetcher: AbstractFetcherThread = EasyMock.mock(classOf[AbstractFetcherThread])
val fetcherManager = new AbstractFetcherManager[AbstractFetcherThread]("fetcher-manager", "fetcher-manager", 2) {
override def createFetcherThread(fetcherId: Int, sourceBroker: BrokerEndPoint): AbstractFetcherThread = {
fetcher
}
}
val fetchOffset = 10L
val leaderEpoch = 15
val tp = new TopicPartition("topic", 0)
val initialFetchState = InitialFetchState(
leader = new BrokerEndPoint(0, "localhost", 9092),
currentLeaderEpoch = leaderEpoch,
initOffset = fetchOffset)
EasyMock.expect(fetcher.start())
EasyMock.expect(fetcher.addPartitions(Map(tp -> initialFetchState)))
.andReturn(Set(tp))
EasyMock.expect(fetcher.isThreadFailed).andReturn(true)
EasyMock.replay(fetcher)
fetcherManager.addFetcherForPartitions(Map(tp -> initialFetchState))
assertEquals(1, fetcherManager.deadThreadCount)
EasyMock.verify(fetcher)
EasyMock.reset(fetcher)
EasyMock.expect(fetcher.isThreadFailed).andReturn(false)
EasyMock.replay(fetcher)
assertEquals(0, fetcherManager.deadThreadCount)
EasyMock.verify(fetcher)
}
}
| guozhangwang/kafka | core/src/test/scala/unit/kafka/server/AbstractFetcherManagerTest.scala | Scala | apache-2.0 | 5,216 |
/**
* This code is generated using [[https://www.scala-sbt.org/contraband/ sbt-contraband]].
*/
// DO NOT EDIT MANUALLY
package sbt.internal.langserver.codec
import _root_.sjsonnew.{ Unbuilder, Builder, JsonFormat, deserializationError }
trait DiagnosticFormats { self: sbt.internal.langserver.codec.RangeFormats with sjsonnew.BasicJsonProtocol =>
implicit lazy val DiagnosticFormat: JsonFormat[sbt.internal.langserver.Diagnostic] = new JsonFormat[sbt.internal.langserver.Diagnostic] {
override def read[J](__jsOpt: Option[J], unbuilder: Unbuilder[J]): sbt.internal.langserver.Diagnostic = {
__jsOpt match {
case Some(__js) =>
unbuilder.beginObject(__js)
val range = unbuilder.readField[sbt.internal.langserver.Range]("range")
val severity = unbuilder.readField[Option[Long]]("severity")
val code = unbuilder.readField[Option[String]]("code")
val source = unbuilder.readField[Option[String]]("source")
val message = unbuilder.readField[String]("message")
unbuilder.endObject()
sbt.internal.langserver.Diagnostic(range, severity, code, source, message)
case None =>
deserializationError("Expected JsObject but found None")
}
}
override def write[J](obj: sbt.internal.langserver.Diagnostic, builder: Builder[J]): Unit = {
builder.beginObject()
builder.addField("range", obj.range)
builder.addField("severity", obj.severity)
builder.addField("code", obj.code)
builder.addField("source", obj.source)
builder.addField("message", obj.message)
builder.endObject()
}
}
}
| sbt/sbt | protocol/src/main/contraband-scala/sbt/internal/langserver/codec/DiagnosticFormats.scala | Scala | apache-2.0 | 1,576 |
package pages
import java.io.InputStream
import java.util.zip.ZipInputStream
import code.model.User
import net.liftweb.http.js.JsCmd
import net.liftweb.util.{CssSel, ClearNodes}
import net.liftweb.util.Helpers._
import pages.theme.Libs.Lib
import pages.theme.{Styles, Libs}
import pages.theme.Styles._
import util.ImplicitHelpers.Pipe
import scala.io.Source
import scala.xml.{NodeSeq, Text}
trait DemoCodePage extends StandardPage {
override def libs: Set[Lib] = {
import Libs._
super.libs + sh_core + sh_brush
}
override def styles: Set[Style] = {
import Styles._
super.styles + shCore + shThemeDefault
}
def pageCodeFile: String
lazy val is: InputStream = this.getClass.getResourceAsStream("/public/app.zip")
lazy val zipIs: ZipInputStream = new ZipInputStream(is)
lazy val file: Option[String] = {
Iterator.continually(zipIs.getNextEntry).takeWhile(_ != null).collectFirst({
case entry if entry != null && entry.getName == pageCodeFile =>
Source.fromInputStream(zipIs).getLines().mkString("\\n")
})
}
override def render: (NodeSeq) => NodeSeq = super.render andThen {
file.map(file => {
".view-all-code *" #> {
<button class="btn btn-sm btn-primary pull-right" onclick={xsh.ajaxInvoke(() => new TH.DefaultModal() {
override def modalStyle: TH.ModalStyle.Style = TH.ModalStyle.Default
override def modalTitleStr: String = s"File: ${pageCodeFile}"
// Header:
override def modalEnableCloseBtn: Boolean = false
override def modalSaveBtnLbl: String = "Close"
override def modalOnSaveClientSide: JsCmd = hideAndDestroy()
override def modalFullscreen: Boolean = true
override def modalContents: NodeSeq = {
TH.Highlight(file).render
}
}.showAndInstall()).toJsCmd}>
<span class="fa fa-file-text-o"></span> View Full Code</button>
}
}).getOrElse(".view-all-code" #> ClearNodes)
}
def codeWidget(codeId: String) = file.map(file => {
new TH.Widget {
override def widgetTitle: String = "Code"
override def widgetBody: NodeSeq = {
TH.Highlight(
file
.split("\\\\n")
.dropWhile(l => !l.contains(s"// $codeId") && !l.contains(s"""<!-- $codeId -->"""))
.drop(1)
.takeWhile(l => !l.contains(s"// $codeId") && !l.contains(s"""<!-- $codeId -->"""))
.mkString("\\n")
).render
}
}.renderedWidget
}).getOrElse(NodeSeq.Empty)
} | slynx-fw/slynx-demo | app/pages/DemoCodePage.scala | Scala | apache-2.0 | 2,573 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.producer
import async.{CallbackHandler, EventHandler}
import kafka.serializer.Encoder
import kafka.utils._
import java.util.Properties
import kafka.cluster.{Partition, Broker}
import java.util.concurrent.atomic.AtomicBoolean
import kafka.common.{NoBrokersForPartitionException, InvalidConfigException, InvalidPartitionException}
import kafka.api.ProducerRequest
class Producer[K,V](config: ProducerConfig,
partitioner: Partitioner[K],
producerPool: ProducerPool[V],
populateProducerPool: Boolean,
private var brokerPartitionInfo: BrokerPartitionInfo) /* for testing purpose only. Applications should ideally */
/* use the other constructor*/
extends Logging {
private val hasShutdown = new AtomicBoolean(false)
private val random = new java.util.Random
// check if zookeeper based auto partition discovery is enabled
private val zkEnabled = Utils.propertyExists(config.zkConnect)
if(brokerPartitionInfo == null) {
zkEnabled match {
case true =>
val zkProps = new Properties()
zkProps.put("zk.connect", config.zkConnect)
zkProps.put("zk.sessiontimeout.ms", config.zkSessionTimeoutMs.toString)
zkProps.put("zk.connectiontimeout.ms", config.zkConnectionTimeoutMs.toString)
zkProps.put("zk.synctime.ms", config.zkSyncTimeMs.toString)
brokerPartitionInfo = new ZKBrokerPartitionInfo(new ZKConfig(zkProps), producerCbk)
case false =>
brokerPartitionInfo = new ConfigBrokerPartitionInfo(config)
}
}
// pool of producers, one per broker
if(populateProducerPool) {
val allBrokers = brokerPartitionInfo.getAllBrokerInfo
allBrokers.foreach(b => producerPool.addProducer(new Broker(b._1, b._2.host, b._2.host, b._2.port)))
}
/**
* This constructor can be used when all config parameters will be specified through the
* ProducerConfig object
* @param config Producer Configuration object
*/
def this(config: ProducerConfig) = this(config, Utils.getObject(config.partitionerClass),
new ProducerPool[V](config, Utils.getObject(config.serializerClass)), true, null)
/**
* This constructor can be used to provide pre-instantiated objects for all config parameters
* that would otherwise be instantiated via reflection. i.e. encoder, partitioner, event handler and
* callback handler. If you use this constructor, encoder, eventHandler, callback handler and partitioner
* will not be picked up from the config.
* @param config Producer Configuration object
* @param encoder Encoder used to convert an object of type V to a kafka.message.Message. If this is null it
* throws an InvalidConfigException
* @param eventHandler the class that implements kafka.producer.async.IEventHandler[T] used to
* dispatch a batch of produce requests, using an instance of kafka.producer.SyncProducer. If this is null, it
* uses the DefaultEventHandler
* @param cbkHandler the class that implements kafka.producer.async.CallbackHandler[T] used to inject
* callbacks at various stages of the kafka.producer.AsyncProducer pipeline. If this is null, the producer does
* not use the callback handler and hence does not invoke any callbacks
* @param partitioner class that implements the kafka.producer.Partitioner[K], used to supply a custom
* partitioning strategy on the message key (of type K) that is specified through the ProducerData[K, T]
* object in the send API. If this is null, producer uses DefaultPartitioner
*/
def this(config: ProducerConfig,
encoder: Encoder[V],
eventHandler: EventHandler[V],
cbkHandler: CallbackHandler[V],
partitioner: Partitioner[K]) =
this(config, if(partitioner == null) new DefaultPartitioner[K] else partitioner,
new ProducerPool[V](config, encoder, eventHandler, cbkHandler), true, null)
/**
* Sends the data, partitioned by key to the topic using either the
* synchronous or the asynchronous producer
* @param producerData the producer data object that encapsulates the topic, key and message data
*/
def send(producerData: ProducerData[K,V]*) {
zkEnabled match {
case true => zkSend(producerData: _*)
case false => configSend(producerData: _*)
}
}
private def zkSend(producerData: ProducerData[K,V]*) {
val producerPoolRequests = producerData.map { pd =>
var brokerIdPartition: Option[Partition] = None
var brokerInfoOpt: Option[Broker] = None
var numRetries: Int = 0
while(numRetries <= config.zkReadRetries && brokerInfoOpt.isEmpty) {
if(numRetries > 0) {
info("Try #" + numRetries + " ZK producer cache is stale. Refreshing it by reading from ZK again")
brokerPartitionInfo.updateInfo
}
val topicPartitionsList = getPartitionListForTopic(pd)
val totalNumPartitions = topicPartitionsList.length
val partitionId = getPartition(pd.getKey, totalNumPartitions)
brokerIdPartition = Some(topicPartitionsList(partitionId))
brokerInfoOpt = brokerPartitionInfo.getBrokerInfo(brokerIdPartition.get.brokerId)
numRetries += 1
}
brokerInfoOpt match {
case Some(brokerInfo) =>
debug("Sending message to broker " + brokerInfo.host + ":" + brokerInfo.port +
" on partition " + brokerIdPartition.get.partId)
case None =>
throw new NoBrokersForPartitionException("Invalid Zookeeper state. Failed to get partition for topic: " +
pd.getTopic + " and key: " + pd.getKey)
}
producerPool.getProducerPoolData(pd.getTopic,
new Partition(brokerIdPartition.get.brokerId, brokerIdPartition.get.partId),
pd.getData)
}
producerPool.send(producerPoolRequests: _*)
}
private def configSend(producerData: ProducerData[K,V]*) {
val producerPoolRequests = producerData.map { pd =>
// find the broker partitions registered for this topic
val topicPartitionsList = getPartitionListForTopic(pd)
val totalNumPartitions = topicPartitionsList.length
val randomBrokerId = random.nextInt(totalNumPartitions)
val brokerIdPartition = topicPartitionsList(randomBrokerId)
val brokerInfo = brokerPartitionInfo.getBrokerInfo(brokerIdPartition.brokerId).get
debug("Sending message to broker " + brokerInfo.host + ":" + brokerInfo.port +
" on a randomly chosen partition")
val partition = ProducerRequest.RandomPartition
debug("Sending message to broker " + brokerInfo.host + ":" + brokerInfo.port + " on a partition " +
brokerIdPartition.partId)
producerPool.getProducerPoolData(pd.getTopic,
new Partition(brokerIdPartition.brokerId, partition),
pd.getData)
}
producerPool.send(producerPoolRequests: _*)
}
private def getPartitionListForTopic(pd: ProducerData[K,V]): Seq[Partition] = {
debug("Getting the number of broker partitions registered for topic: " + pd.getTopic)
val topicPartitionsList = brokerPartitionInfo.getBrokerPartitionInfo(pd.getTopic).toSeq
debug("Broker partitions registered for topic: " + pd.getTopic + " = " + topicPartitionsList)
val totalNumPartitions = topicPartitionsList.length
if(totalNumPartitions == 0) throw new NoBrokersForPartitionException("Partition = " + pd.getKey)
topicPartitionsList
}
/**
* Retrieves the partition id and throws an InvalidPartitionException if
* the value of partition is not between 0 and numPartitions-1
* @param key the partition key
* @param numPartitions the total number of available partitions
* @returns the partition id
*/
private def getPartition(key: K, numPartitions: Int): Int = {
if(numPartitions <= 0)
throw new InvalidPartitionException("Invalid number of partitions: " + numPartitions +
"\\n Valid values are > 0")
val partition = if(key == null) random.nextInt(numPartitions)
else partitioner.partition(key , numPartitions)
if(partition < 0 || partition >= numPartitions)
throw new InvalidPartitionException("Invalid partition id : " + partition +
"\\n Valid values are in the range inclusive [0, " + (numPartitions-1) + "]")
partition
}
/**
* Callback to add a new producer to the producer pool. Used by ZKBrokerPartitionInfo
* on registration of new broker in zookeeper
* @param bid the id of the broker
* @param host the hostname of the broker
* @param port the port of the broker
*/
private def producerCbk(bid: Int, host: String, port: Int) = {
if(populateProducerPool) producerPool.addProducer(new Broker(bid, host, host, port))
else debug("Skipping the callback since populateProducerPool = false")
}
/**
* Close API to close the producer pool connections to all Kafka brokers. Also closes
* the zookeeper client connection if one exists
*/
def close() = {
val canShutdown = hasShutdown.compareAndSet(false, true)
if(canShutdown) {
producerPool.close
brokerPartitionInfo.close
}
}
}
| tnachen/kafka | core/src/main/scala/kafka/producer/Producer.scala | Scala | apache-2.0 | 10,002 |
package com.twitter.finagle.buoyant.h2
import com.twitter.finagle.{Dtab => FDtab, Status => _, _}
import com.twitter.finagle.buoyant.{Dst => BuoyantDst}
import com.twitter.finagle.context.{Contexts, Deadline => FDeadline}
import com.twitter.finagle.tracing._
import com.twitter.io.Buf
import com.twitter.util.{Future, Return, Throw, Time, Try}
import java.util.Base64
import scala.collection.breakOut
/**
* The finagle http stack manages a set of context headers that are
* read from server requests and written to client requests. The
* [[LinkerdHeaders]] module replaces these headers with
* linkerd-specific headers (prefixed by l5d-).
*
* Context headers, read and written by each linkerd instance, include:
*
* - `l5d-ctx-deadline`
* - `l5d-ctx-dtab`
* - `l5d-ctx-trace`
*
* Additionally, linkerd honors the following headers on incoming requests:
*
* - `l5d-dtab`: a client-specified delegation override
* - `l5d-sample`: a client-specified trace sample rate override
*
* In addition to the context headers, linkerd may emit the following
* headers on outgoing requests:
*
* - `l5d-dst-logical`: the logical name of the request as identified by linkerd
* - `l5d-dst-concrete`: the concrete client name after delegation
* - `l5d-dst-residual`: an optional residual path remaining after delegation
* - `l5d-reqid`: a token that may be used to correlate requests in
* a callgraph across services and linkerd instances
*
* And in addition to the context headers, lay may emit the following
* headers on outgoing responses:
*
* - `l5d-err`: indicates a linkerd-generated error. Error responses
* that do not have this header are application errors.
*/
object LinkerdHeaders {
val Prefix = "l5d-"
object Ctx {
/**
* A serverside stack module that extracts contextual information
* from requests and configures the local
* `com.twitter.finagle.Context` appropriately. Currently this includes:
* - Deadline
* - Dtab
*
* Note that the dtabs read by this module are appeneded to that specified
* by the `l5d-dtab` header.
*
* Note that trace configuration is handled separately.
*/
val serverModule: Stackable[ServiceFactory[Request, Response]] =
new Stack.Module0[ServiceFactory[Request, Response]] {
val role = Stack.Role("ServerContextFilter")
val description = "Extracts linkerd context from http headers"
val deadline = new Deadline.ServerFilter
val dtab = new Dtab.ServerFilter
def make(next: ServiceFactory[Request, Response]) =
deadline.andThen(dtab).andThen(next)
}
val clearServerModule: Stackable[ServiceFactory[Request, Response]] =
new Stack.Module0[ServiceFactory[Request, Response]] {
val role = serverModule.role
val description = "Clears linkerd context from http request headers"
val deadline = new Deadline.ClearServerFilter
val dtab = new Dtab.ClearServerFilter
val sample = new Sample.ClearServerFilter
val trace = new Trace.ClearServerFilter
val misc = new ClearMiscServerFilter
def make(next: ServiceFactory[Request, Response]) =
deadline.andThen(dtab).andThen(sample).andThen(trace).andThen(misc).andThen(next)
}
/**
* A clientside stack module that injects local contextual
* information onto downstream requests. Currently this includes:
* - Deadline
*
* Note that trace configuration is handled separately.
*/
val clientModule: Stackable[ServiceFactory[Request, Response]] =
new Stack.Module0[ServiceFactory[Request, Response]] {
val role = Stack.Role("ClientContextFilter")
val description = "Injects linkerd context into http headers"
val deadline = new Deadline.ClientFilter
val dtab = new Dtab.ClientFilter
def make(next: ServiceFactory[Request, Response]) =
deadline.andThen(dtab).andThen(next)
}
val Prefix = LinkerdHeaders.Prefix + "ctx-"
/**
* The `l5d-ctx-deadline` header propagates a request
* deadline. Each router server may use this deadline to cancel or
* reject work.
*
* Each router client sets a deadline that it is at least as
* strict as the deadline it received. If an incoming request has
* a deadline, the outgoing request MUST have a
* deadline. Otherwise, outgoing requests MAY have a deadline.
*/
object Deadline {
val Key = Prefix + "deadline"
def read(v: String): FDeadline = {
val values = v.split(' ')
val timestamp = Time.fromNanoseconds(values(0).toLong)
val deadline = Time.fromNanoseconds(values(1).toLong)
FDeadline(timestamp, deadline)
}
/**
* Read all `l5d-ctx-deadline` headers and return the strictest
* combination.
*/
def get(headers: Headers): Option[FDeadline] =
headers.getAll(Key).foldLeft[Option[FDeadline]](None) { (d0, v) =>
(d0, Try(read(v)).toOption) match {
case (Some(d0), Some(d1)) => Some(FDeadline.combined(d0, d1))
case (d0, d1) => d0.orElse(d1)
}
}
def write(d: FDeadline): String =
s"${d.timestamp.inNanoseconds} ${d.deadline.inNanoseconds}"
def set(headers: Headers, deadline: FDeadline): Unit = {
val _ = headers.set(Key, write(deadline))
}
def clear(headers: Headers): Unit = {
val _ = headers.remove(Key)
}
/**
* Extract the deadline from the request and, if it exists, use
* either the strictest combination of deadlines.
*
* Clears deadline headers from the request. This means that the
* client is responsible for encoding outgoing deadlines.
*/
class ServerFilter extends SimpleFilter[Request, Response] {
def apply(req: Request, service: Service[Request, Response]) =
get(req.headers) match {
case None => service(req)
case Some(reqDeadline) =>
clear(req.headers)
val deadline = FDeadline.current match {
case None => reqDeadline
case Some(current) => FDeadline.combined(reqDeadline, current)
}
Contexts.broadcast.let(FDeadline, deadline) {
service(req)
}
}
}
class ClearServerFilter extends SimpleFilter[Request, Response] {
def apply(req: Request, service: Service[Request, Response]) = {
clear(req.headers)
service(req)
}
}
/**
* If a deadline is set, encode it on downstream requests.
*
* Clears any existing deadline headers from the request.
*/
class ClientFilter extends SimpleFilter[Request, Response] {
def apply(req: Request, service: Service[Request, Response]) =
FDeadline.current match {
case None => service(req)
case Some(deadline) =>
set(req.headers, deadline)
service(req)
}
}
}
/**
* There are two headers used to control local Dtabs in linkerd:
*
* 1. `l5d-ctx-dtab` is read and _written_ by linkerd. It is
* intended to managed entirely by linkerd, and applications
* should only forward requests prefixed by `l5d-ctx-*`.
*
* 2. `l5d-dtab` is to be provided by users. Applications are
* not required to forward `l5d-dtab` when fronted by
* linkerd.
*
* `l5d-dtab` is appended to `l5d-ctx-dtab`, so that user-provided
* delegations take precdence.
*/
object Dtab {
val CtxKey = Ctx.Prefix + "dtab"
val UserKey = LinkerdHeaders.Prefix + "dtab"
private val EmptyReturn = Return(FDtab.empty)
def get(headers: Headers, key: String): Try[FDtab] =
if (!headers.contains(key)) EmptyReturn
else Try { FDtab(headers.getAll(key).flatMap(FDtab.read(_))(breakOut)) }
def get(headers: Headers): Try[FDtab] =
for {
ctx <- get(headers, CtxKey)
user <- get(headers, UserKey)
} yield ctx ++ user
def clear(headers: Headers): Unit = {
val _c = headers.remove(CtxKey)
val _u = headers.remove(UserKey)
}
def set(dtab: FDtab, msg: Message): Unit =
if (dtab.nonEmpty) {
val _ = msg.headers.set(CtxKey, dtab.show)
}
/**
* Extract a Dtab from the L5d-Ctx-Dtab and L5d-Dtab headers (in
* that order) and append them to the local context.
*
* The L5d-Ctx-Dtab header is intended to be set by a linkerd
* instance, while the L5d-Dtab header is intended to be set by
* a user who wants to override delegation.
*
* @todo use DtabFilter.Injector once it is released.
*/
class ServerFilter extends SimpleFilter[Request, Response] {
def apply(req: Request, service: Service[Request, Response]) =
get(req.headers) match {
case Throw(e) =>
Future.value(Err.respond(e.getMessage, Status.BadRequest))
case Return(dtab) =>
clear(req.headers)
FDtab.local ++= dtab
service(req)
}
}
class ClearServerFilter extends SimpleFilter[Request, Response] {
def apply(req: Request, service: Service[Request, Response]) = {
clear(req.headers)
service(req)
}
}
/**
* Encodes the local dtab into the L5d-Ctx-Dtab header.
*
* @todo use DtabFilter.Extractor once it is released.
*/
class ClientFilter extends SimpleFilter[Request, Response] {
def apply(req: Request, service: Service[Request, Response]) = {
set(FDtab.local, req)
service(req)
}
}
}
object Trace {
val Key = Prefix + "trace"
/**
* Get a trace id from a base64 encoded buffer.
*
* Based on com.twitter.finagle.tracing.Trace.idCtx.tryUnmarshal
*
* The wire format is (big-endian):
* ''reqId:8 parentId:8 traceId:8 flags:8''
*/
def read(b64: String): Try[TraceId] =
Try { Base64.getDecoder.decode(b64) }.flatMap(TraceId.deserialize(_))
def get(headers: Headers): Option[TraceId] =
for {
header <- headers.get(Key)
traceId <- read(header).toOption
} yield traceId
def set(headers: Headers, id: TraceId): Unit = {
val bytes = TraceId.serialize(id)
val b64 = Base64.getEncoder.encodeToString(bytes)
val _ = headers.set(Key, b64)
}
def clear(headers: Headers): Unit = {
val _ = headers.remove(Key)
}
class ClearServerFilter extends SimpleFilter[Request, Response] {
def apply(req: Request, service: Service[Request, Response]) = {
clear(req.headers)
service(req)
}
}
}
}
/**
* The `l5d-reqid` header is used to provide applications with a
*
* token that can be used in logging to correlate requests. We use
* the _root_ span id so that this key can be used to correlate all
* related requests (i.e. in log messages) across services and
* linkerd instances.
*/
object RequestId {
val Key = Prefix + "reqid"
def set(headers: Headers, traceId: TraceId): Unit = {
val _ = headers.set(Key, traceId.traceId.toString)
}
}
/**
* The `l5d-sample` lets clients determine the sample rate of a
* given request. Tracers may, of course, choose to enforce
* additional sampling, so setting this header cannot ensure that a
* trace is recorded.
*
* `l5d-sample` values should be on [0.0, 1.0], however values
* outside of this range are rounded to the nearest valid value so
* that negative numbers are treated as 0 and positive numbers
* greater than 1 are rounded to 1. At 1.0, the trace is marked as
* sampled on all downstream requestes.
*/
object Sample {
val Key = Prefix + "sample"
def get(headers: Headers): Option[Float] =
headers.get(Key).flatMap { s =>
Try(s.toFloat).toOption.map {
case v if v < 0 => 0.0f
case v if v > 1 => 1.0f
case v => v
}
}
def clear(headers: Headers): Unit = {
val _ = headers.remove(Key)
}
class ClearServerFilter extends SimpleFilter[Request, Response] {
def apply(req: Request, service: Service[Request, Response]) = {
clear(req.headers)
service(req)
}
}
}
/**
* Dst headers are encoded on outgoing requests so that downstream
* services are able to know how they are named by
* linkerd. Specifically, the `l5d-dst-residual` header may be
* useful to services that act as proxies and need to determine the
* next hop.
*/
object Dst {
val Path = Prefix + "dst-service"
val Bound = Prefix + "dst-client"
val Residual = Prefix + "dst-residual"
/** Encodes `l5d-dst-service` on outgoing requests. */
class PathFilter(path: Path) extends SimpleFilter[Request, Response] {
private[this] val pathShow = path.show
def apply(req: Request, service: Service[Request, Response]) = {
req.headers.set(Path, pathShow)
service(req)
}
}
object PathFilter {
val module: Stackable[ServiceFactory[Request, Response]] =
new Stack.Module1[BuoyantDst.Path, ServiceFactory[Request, Response]] {
val role = Stack.Role("LinkerdHeaders.Path")
val description = s"Adds the '$Path' header to requests and responses"
def make(dst: BuoyantDst.Path, factory: ServiceFactory[Request, Response]) =
new PathFilter(dst.path).andThen(factory)
}
}
/**
* Encodes bound and residual paths onto downstream requests
*/
class BoundFilter(bound: Name.Bound) extends SimpleFilter[Request, Response] {
private[this] val boundShow = bound.idStr
private[this] val pathShow = bound.path match {
case com.twitter.finagle.Path.empty => None
case path => Some(path.show)
}
private[this] def annotate(msg: Message): Unit = {
msg.headers.set(Bound, boundShow)
pathShow match {
case None =>
case Some(p) => msg.headers.set(Residual, p); ()
}
}
def apply(req: Request, service: Service[Request, Response]) = {
annotate(req)
service(req)
}
}
object BoundFilter {
val module: Stackable[ServiceFactory[Request, Response]] =
new Stack.Module1[BuoyantDst.Bound, ServiceFactory[Request, Response]] {
val role = Stack.Role("LinkerdHeaders.Bound")
val description = s"Adds the $Bound and $Residual headers to requests and responses"
def make(dst: BuoyantDst.Bound, factory: ServiceFactory[Request, Response]) =
new BoundFilter(dst.name).andThen(factory)
}
}
}
class ClearMiscServerFilter extends SimpleFilter[Request, Response] {
def apply(req: Request, service: Service[Request, Response]) = {
for ((k, _) <- req.headers.toSeq) {
if (k.toLowerCase.startsWith(LinkerdHeaders.Prefix)) {
req.headers.remove(k)
}
}
service(req)
}
}
/**
* The `l5d-err` header is set on all responses in which linkerd
* encountered an error. It can be used to distinguish linkerd
* responses from application responses.
*/
object Err {
val Key = Prefix + "err"
def respond(msg: String, status: Status = Status.InternalServerError): Response = {
val rsp = Response(status, Stream.const(Buf.Utf8(msg)))
rsp.headers.add(Key, msg)
rsp.headers.set("content-type", "text/plain")
rsp
}
}
}
| denverwilliams/linkerd | linkerd/protocol/h2/src/main/scala/com/twitter/finagle/buoyant/h2/LinkerdHeaders.scala | Scala | apache-2.0 | 15,931 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.samza.serializers
import org.apache.samza.config.Config
import org.codehaus.jackson.map.ObjectMapper
import java.util.Map
import java.nio.ByteBuffer
import org.apache.samza.metrics.reporter.MetricsSnapshot
class MetricsSnapshotSerde extends Serde[MetricsSnapshot] {
val jsonMapper = new ObjectMapper
def toBytes(obj: MetricsSnapshot) = jsonMapper
.writeValueAsString(obj.getAsMap)
.getBytes("UTF-8")
def fromBytes(bytes: Array[Byte]) = {
val metricMap = jsonMapper.readValue(bytes, classOf[java.util.Map[String, java.util.Map[String, Object]]])
MetricsSnapshot.fromMap(metricMap)
}
}
class MetricsSnapshotSerdeFactory extends SerdeFactory[MetricsSnapshot] {
def getSerde(name: String, config: Config) = new MetricsSnapshotSerde
}
| InnovaCo/samza | samza-core/src/main/scala/org/apache/samza/serializers/MetricsSnapshotSerde.scala | Scala | apache-2.0 | 1,588 |
package ch6_purely_functional_state
/**
* Ex 6.1
* Write a function that uses RNG.nextInt to generate a random integer between 0 and Int.maxValue (inclusive).
* Make sure to handle the corner case when nextInt returns Int.MinValue,
* which doesn’t have a non-negative counterpart.
*
* def nonNegativeInt(rng: RNG): (Int, RNG)
*
* Ex 6.2
* Write a function to generate a Double between 0 and 1, not including 1. Note: You can
* use Int.MaxValue to obtain the maximum positive integer value, and you can use
* x.toDouble to convert an x: Int to a Double.
*
* def double(rng: RNG): (Double, RNG)
*
* Ex 6.3
* Write functions to generate an (Int, Double) pair, a (Double, Int) pair, and a
* (Double, Double, Double) 3-tuple. You should be able to reuse the functions you’ve
* already written.
*
* def intDouble(rng: RNG): ((Int,Double), RNG)
* def doubleInt(rng: RNG): ((Double,Int), RNG)
* def double3(rng: RNG): ((Double,Double,Double), RNG)
*
* Ex 6.4
* Write a function to generate a list of random integers.
*
* def ints(count: Int)(rng: RNG): (List[Int], RNG)
*
* Ex 6.5
* Use map to reimplement double in a more elegant way. See exercise 6.2.
*
* Ex 6.6
* Write the implementation of map2 based on the following signature. This function
* takes two actions, ra and rb, and a function f for combining their results, and returns
* a new action that combines them:
*
* def map2[A,B,C](ra: Rand[A], rb: Rand[B])(f: (A, B) => C): Rand[C]
*
* Ex 6.7
* Hard: If you can combine two RNG transitions, you should be able to combine a whole
* list of them. Implement sequence for combining a List of transitions into a single
* transition. Use it to reimplement the ints function you wrote before. For the latter,
* you can use the standard library function List.fill(n)(x) to make a list with x
* repeated n times.
*
* def sequence[A](fs: List[Rand[A]]): Rand[List[A]]
*
* Ex 6.8
* Implement flatMap, and then use it to implement nonNegativeLessThan.
*
* def flatMap[A,B](f: Rand[A])(g: A => Rand[B]): Rand[B]
*
* Ex 6.9
* Reimplement map and map2 in terms of flatMap.
* The fact that this is possible is what we’re referring to
* when we say that flatMap is more powerful than map and map2.
*
* Ex 6.10
* Generalize the functions unit, map, map2, flatMap, and sequence.
* Add them as methods on the State case class where possible.
* Otherwise you should put them in a State companion object.
*
* Ex 6.11
* Hard: To gain experience with the use of State, implement a finite state automaton
* that models a simple candy dispenser. The machine has two types of input: you can
* insert a coin, or you can turn the knob to dispense candy. It can be in one of two
* states: locked or unlocked. It also tracks how many candies are left and how many
* coins it contains.
*
* sealed trait Input
* case object Coin extends Input
* case object Turn extends Input
*
* case class Machine(locked: Boolean, candies: Int, coins: Int)
*
* The rules of the machine are as follows:
* * Inserting a coin into a locked machine will cause it to unlock if there’s any
* candy left.
* * Turning the knob on an unlocked machine will cause it to dispense candy and
* become locked.
* * Turning the knob on a locked machine or inserting a coin into an unlocked
* machine does nothing.
* * A machine that’s out of candy ignores all inputs.
*
* The method simulateMachine should operate the machine based on the list of inputs
* and return the number of coins and candies left in the machine at the end.
* For example, if the input Machine has 10 coins and 5 candies,
* and a total of 4 candies are successfully bought, the output should be (14, 1).
*
* def simulateMachine(inputs: List[Input]): State[Machine, (Int, Int)]
*/
object Ex6_State {
trait RNG {
def nextInt: (Int, RNG)
}
type Rand[+A] = RNG => (A, RNG)
case class SimpleRNG(seed: Long) extends RNG {
def nextInt: (Int, RNG) = {
val newSeed = (seed * 0x5DEECE66DL + 0xBL) & 0xFFFFFFFFFFFFL
val nextRNG = SimpleRNG(newSeed)
val n = (newSeed >>> 16).toInt
(n, nextRNG)
}
}
object RNG {
def nonNegativeInt(rng: RNG): (Int, RNG) = {
val (i, r) = rng.nextInt
(if (i < 0) -(i + 1) else i, r)
}
def double(rng: RNG): (Double, RNG) = {
val (i, r) = nonNegativeInt(rng)
(i/(Int.MaxValue.toDouble + 1), r)
}
def intDouble(rng: RNG): ((Int,Double), RNG) = {
val (i, r1) = rng.nextInt
val (d, r2) = double(r1)
((i, d), r2)
}
def doubleInt(rng: RNG): ((Double,Int), RNG) = {
val ((i, d), r) = intDouble(rng)
((d, i), r)
}
def double3(rng: RNG): ((Double,Double,Double), RNG) = {
val (d1, r1) = double(rng)
val (d2, r2) = double(r1)
val (d3, r3) = double(r2)
((d1, d2, d3), r3)
}
def ints(count: Int)(rng: RNG): (List[Int], RNG) =
if (count > 0) {
val (i, r) = rng.nextInt
val (l, rr) = ints(count - 1)(r)
(i :: l, rr)
} else {
(Nil, rng)
}
def unit[A](a: A): Rand[A] =
rng => (a, rng)
def map[A,B](s: Rand[A])(f: A => B): Rand[B] =
rng => {
val (a, rng2) = s(rng)
(f(a), rng2)
}
def nonNegativeEven: Rand[Int] =
map(nonNegativeInt)(i => i - i % 2)
def doubleViaMap: Rand[Double] =
map(nonNegativeInt)(i => i/(Int.MaxValue.toDouble + 1))
def map2[A,B,C](ra: Rand[A], rb: Rand[B])(f: (A, B) => C): Rand[C] =
/* my wrong answer:
rng => {
val (a, rnga) = ra(rng)
val (b, rngb) = rb(rng)
(f(a, b), rnga)
}
*/
rng => {
val (a, r1) = ra(rng)
val (b, r2) = rb(r1)
(f(a, b), r2)
}
// copied from fpinscala GitHub
def sequence[A](fs: List[Rand[A]]): Rand[List[A]] =
fs.foldRight(unit(List[A]()))((sa, acc) => map2(sa, acc)(_ :: _))
val int: Rand[Int] = _.nextInt
def _ints(count: Int): Rand[List[Int]] =
sequence(List.fill(count)(int))
def nonNegativeLessThan(n: Int): Rand[Int] = { rng =>
val (i, rng2) = nonNegativeInt(rng)
val mod = i % n
if (i + (n-1) - mod >= 0)
(mod, rng2)
else nonNegativeLessThan(n)(rng)
}
def flatMap[A,B](f: Rand[A])(g: A => Rand[B]): Rand[B] =
rng => {
val (a, r1) = f(rng)
g(a)(r1)
}
def nonNegativeLessThanViaFlatMap(n: Int): Rand[Int] =
flatMap(nonNegativeInt) { i =>
val mod = i % n
if (i + (n-1) - mod >= 0) unit(mod) else nonNegativeLessThan(n)
}
def mapFM[A,B](s: Rand[A])(f: A => B): Rand[B] =
flatMap(s)(a => unit(f(a)))
// copied from fpinscala GitHub
// TODO understand it
def map2FM[A,B,C](ra: Rand[A], rb: Rand[B])(f: (A, B) => C): Rand[C] =
flatMap(ra)(a => map(rb)(b => f(a, b)))
}
import State._
case class State[S, +A](run: S => (A, S)) {
def map[B](f: A => B): State[S, B] =
State(s => {
val (a, s1) = run(s)
(f(a), s1)
})
def mapFM[B](f: A => B): State[S, B] =
flatMap(a => unit(f(a)))
def map2[B, C](sb: State[S, B])(f: (A, B) => C): State[S, C] =
State(s => {
val (a, s1) = run(s)
val (b, s2) = sb.run(s1)
(f(a, b), s2)
})
def map2FM[B, C](sb: State[S, B])(f: (A, B) => C): State[S, C] =
flatMap(a => sb.map(b => f(a, b)))
def flatMap[B](f: A => State[S, B]): State[S, B] =
State(s => {
val (a, s1) = run(s)
f(a).run(s1)
})
}
object State {
type Rand[A] = State[RNG, A]
def unit[S, A](a: A): State[S, A] =
State(s => (a, s))
// TODO copied from github, understand it
// This implementation uses a loop internally and is the same recursion
// pattern as a left fold. It is quite common with left folds to build
// up a list in reverse order, then reverse it at the end.
// (We could also use a collection.mutable.ListBuffer internally.)
def sequence[S, A](sas: List[State[S, A]]): State[S, List[A]] = {
def go(s: S, actions: List[State[S,A]], acc: List[A]): (List[A],S) =
actions match {
case Nil => (acc.reverse,s)
case h :: t => h.run(s) match { case (a,s2) => go(s2, t, a :: acc) }
}
State((s: S) => go(s,sas,List()))
}
// TODO copied from github, understand it
// We can also write the loop using a left fold. This is tail recursive like the
// previous solution, but it reverses the list _before_ folding it instead of after.
// You might think that this is slower than the `foldRight` solution since it
// walks over the list twice, but it's actually faster! The `foldRight` solution
// technically has to also walk the list twice, since it has to unravel the call
// stack, not being tail recursive. And the call stack will be as tall as the list
// is long.
def sequenceViaFoldLeft[S,A](l: List[State[S, A]]): State[S, List[A]] =
l.reverse.foldLeft(unit[S, List[A]](List()))((acc, f) => f.map2(acc)( _ :: _ ))
def modify[S](f: S => S): State[S, Unit] = for {
s <- get // Gets the current state and assigns it to `s`.
_ <- set(f(s)) // Sets the new state to `f` applied to `s`.
} yield ()
def get[S]: State[S, S] = State(s => (s, s))
def set[S](s: S): State[S, Unit] = State(_ => ((), s))
}
sealed trait Input
case object Coin extends Input
case object Turn extends Input
case class Machine(locked: Boolean, candies: Int, coins: Int)
// TODO copied from github, understand it
object Candy {
def simulateMachine(inputs: List[Input]): State[Machine, (Int, Int)] = for {
_ <- sequence(inputs.map(i => modify((s: Machine) => (i, s) match {
case (_, Machine(_, 0, _)) => s
case (Coin, Machine(false, _, _)) => s
case (Turn, Machine(true, _, _)) => s
case (Coin, Machine(true, candy, coin)) =>
Machine(false, candy, coin + 1)
case (Turn, Machine(false, candy, coin)) =>
Machine(true, candy - 1, coin)
})))
s <- get
} yield (s.coins, s.candies)
}
def main(args: Array[String]): Unit = {
val rng = SimpleRNG(1)
// Ex 6.1
assert(RNG.nonNegativeInt(rng)._1 > 0, "nonNegativeInt test case 1")
assert(RNG.nonNegativeInt(rng)._1 == RNG.nonNegativeInt(rng)._1, "nonNegativeInt test case 2")
// Ex 6.2
assert(RNG.double(rng)._1 >= 0 && RNG.double(rng)._1 < 1, "double test case 1")
assert(RNG.double(rng)._1 == RNG.double(rng)._1, "double test case 2")
println("All tests finished.")
// Ex 6.3
assert(RNG.intDouble(rng)._1._1 == RNG.doubleInt(rng)._1._2, "intDouble/doubleInt test case 1")
assert(RNG.intDouble(rng)._1._2 == RNG.doubleInt(rng)._1._1, "intDouble/doubleInt test case 2")
assert(RNG.double(RNG.double(RNG.double(rng)._2)._2)._1 == RNG.double3(rng)._1._3, "double3 test case 1")
// Ex 6.4
assert(RNG.ints(10)(rng)._1.length == 10, "ints test case 1")
// Ex 6.5
assert(RNG.doubleViaMap(rng) == RNG.double(rng), "doubleViaMap test case 1")
// Ex 6.6
assert(RNG.map2(RNG.nonNegativeEven, RNG.nonNegativeEven)(_ + _)(rng)._1 % 2 == 0, "map2 test case 1")
// Ex 6.7
assert(RNG.sequence(List[Rand[Double]](RNG.double, RNG.double, RNG.double))(rng)._1(2) == RNG.double3(rng)._1._3, "sequence test case 1")
// Ex 6.8
assert(RNG.nonNegativeLessThan(10)(rng)._1 == RNG.nonNegativeLessThanViaFlatMap(10)(rng)._1, "flatMap test case 1")
// Ex 6.9
assert(RNG.mapFM(RNG.double)(_ * 2)(rng) == RNG.map(RNG.double)(_ * 2)(rng), "mapFM test case 1")
assert(RNG.map2FM(RNG.double, RNG.nonNegativeEven)(_ * _)(rng) == RNG.map2(RNG.double, RNG.nonNegativeEven)(_ * _)(rng), "map2FM test case 1")
// Ex 6.10
// ignored
}
}
| zenja/exercise-fp-in-scala | src/ch6_purely_functional_state/Ex6_State.scala | Scala | mit | 11,851 |
package com.wavesplatform.state.patch
import com.wavesplatform.account.PublicKey
import com.wavesplatform.common.state.ByteStr
import com.wavesplatform.common.utils._
import com.wavesplatform.db.WithDomain
import com.wavesplatform.db.WithState.AddrWithBalance
import com.wavesplatform.features.BlockchainFeatures
import com.wavesplatform.history.Domain
import com.wavesplatform.settings.WavesSettings
import com.wavesplatform.test.FlatSpec
import com.wavesplatform.transaction.TxHelpers
import org.scalamock.scalatest.PathMockFactory
import org.scalatest.BeforeAndAfterAll
class CancelLeasesToDisabledAliasesSpec extends FlatSpec with PathMockFactory with WithDomain with BeforeAndAfterAll {
val MainnetSettings: WavesSettings = {
import SettingsFromDefaultConfig.blockchainSettings.{functionalitySettings => fs}
SettingsFromDefaultConfig.copy(
blockchainSettings = SettingsFromDefaultConfig.blockchainSettings.copy(
addressSchemeCharacter = 'W',
functionalitySettings = fs.copy(
preActivatedFeatures = fs.preActivatedFeatures ++ Map(
BlockchainFeatures.NG.id -> 0,
BlockchainFeatures.SmartAccounts.id -> 0,
BlockchainFeatures.SynchronousCalls.id -> 2
)
)
)
)
}
"CancelLeasesToDisabledAliases" should "be applied only once" in
withDomain(MainnetSettings, AddrWithBalance.enoughBalances(TxHelpers.defaultSigner)) { d =>
testLeaseBalance(d).out shouldBe 0L
d.appendKeyBlock()
testLeaseBalance(d).out shouldBe -2562590821L
d.appendMicroBlock(TxHelpers.transfer())
d.appendMicroBlock(TxHelpers.transfer())
d.appendMicroBlock(TxHelpers.transfer())
d.appendKeyBlock()
testLeaseBalance(d).out shouldBe -2562590821L
}
it should "be applied on extension apply" in
withDomain(MainnetSettings, AddrWithBalance.enoughBalances(TxHelpers.defaultSigner)) { d =>
testLeaseBalance(d).out shouldBe 0L
d.appendBlock()
testLeaseBalance(d).out shouldBe -2562590821L
d.appendBlock()
testLeaseBalance(d).out shouldBe -2562590821L
d.appendBlock()
testLeaseBalance(d).out shouldBe -2562590821L
}
private def testLeaseBalance(d: Domain) = {
d.blockchain.leaseBalance(PublicKey(ByteStr(Base58.decode("6NxhjzayDTd52MJL2r6XupGDb7E1xQW7QppSPqo63gsx"))).toAddress)
}
}
| wavesplatform/Waves | node/src/test/scala/com/wavesplatform/state/patch/CancelLeasesToDisabledAliasesSpec.scala | Scala | mit | 2,388 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
// scalastyle:off println
package org.apache.spark.examples.ml
// $example on$
import org.apache.spark.ml.classification.NaiveBayes
import org.apache.spark.ml.evaluation.MulticlassClassificationEvaluator
// $example off$
import org.apache.spark.sql.SparkSession
object NaiveBayesExample {
def main(args: Array[String]): Unit = {
val spark = SparkSession
.builder
.appName("NaiveBayesExample")
.getOrCreate()
// $example on$
// Load the data stored in LIBSVM format as a DataFrame.
val data = spark.read.format("libsvm").load("data/mllib/sample_libsvm_data.txt")
// Split the data into training and test sets (30% held out for testing)
val Array(trainingData, testData) = data.randomSplit(Array(0.7, 0.3), seed = 1234L)
// Train a NaiveBayes model.
val model = new NaiveBayes()
.fit(trainingData)
// Select example rows to display.
val predictions = model.transform(testData)
predictions.show()
// Select (prediction, true label) and compute test error
val evaluator = new MulticlassClassificationEvaluator()
.setLabelCol("label")
.setPredictionCol("prediction")
.setMetricName("accuracy")
val accuracy = evaluator.evaluate(predictions)
println("Test set accuracy = " + accuracy)
// $example off$
spark.stop()
}
}
// scalastyle:on println
| alec-heif/MIT-Thesis | spark-bin/examples/src/main/scala/org/apache/spark/examples/ml/NaiveBayesExample.scala | Scala | mit | 2,162 |
package com.xah.chat.datamodel.tables
import android.database.sqlite.SQLiteDatabase
import java.sql.Timestamp
import android.provider.BaseColumns
import android.net.Uri
import com.xah.chat.datamodel.{TableHelper, xah}
import android.util.Log
import scala.language.implicitConversions
object MessageFields extends Enumeration {
type Field = Value
val _ID, ContactName, Message, Time, isSent = Value
val projection =
(for (v <- values) yield if (v == MessageFields._ID) BaseColumns._ID else v.toString).toArray
}
class Message(val Contact: String, val Message: String,
val TimeStamp: Timestamp, val isSent: Boolean)
object Messages {
val _ID = BaseColumns._ID
val _COUNT = BaseColumns._COUNT
val TABLE_NAME = "Messages"
final val CONTENT_URI = Uri.parse(s"content://${xah.AUTHORITY}/messages")
final val CONTENT_TYPE = "vnd.android.cursor.dir/vnd.xah.message"
final val CONTENT_ITEM_TYPE = "vnd.android.cursor.item/vnd.xah.message"
final val DEFAULT_SORT_ORDER = "Time DESC"
}
class MessagesHelper extends TableHelper {
def TAG = "com.xah.MessagesHelper"
def CreateStatement = s"""
create table ${Messages.TABLE_NAME} (
${BaseColumns._ID} integer primary key autoincrement,
${MessageFields.ContactName} Text,
${MessageFields.Message} Text,
${MessageFields.Time} long,
${MessageFields.isSent} Boolean
)"""
def onUpgrade(db: SQLiteDatabase, oldVersion: Int, newVersion: Int): Unit = {
db.execSQL(s"drop table if exists ${Messages.TABLE_NAME}")
onCreate(db)
}
} | lemonxah/xaHChat | src/main/scala/com/xah/chat/datamodel/tables/Messages.scala | Scala | mit | 1,569 |
package com.google.javascript.jscomp
import scalaxy.js._
import scala.collection.JavaConversions._
import com.google.javascript.rhino.Node
import com.google.javascript.rhino.JSTypeExpression
import com.google.javascript.rhino.jstype._
object ClosureCompilerUtils {
def defaultExterns: List[SourceFile] = CommandLineRunner.getDefaultExterns().toList
/**
* See node.js externs: https://github.com/dcodeIO/node.js-closure-compiler-externs
* Other extends: https://code.google.com/p/closure-compiler/wiki/ExternsForCommonLibraries
* And yet other ones: http://closureplease.com/externs/
*
* Automatic externs extractor:
* http://blog.dotnetwise.com/2009/11/closure-compiler-externs-extractor.html
*/
def scanExterns(externs: List[SourceFile] = defaultExterns): ClosureCompiler = {
val code = "window.console.loge('yay');"
val compiler = new Compiler
val options = new CompilerOptions()
options.checkTypes = true
options.inferTypes = true
//val externs = SourceFile.fromCode("externs.js", "") :: Nil
// val externs = CommandLineRunner.getDefaultExterns()
val inputs = java.util.Collections.singletonList(SourceFile.fromCode("input.js", code))
// compile() returns a Result, but it is not needed here.
compiler.compile(externs, inputs, options)
val scopeCreator = new TypedScopeCreator(compiler)
val scope = scopeCreator.createScope(compiler.getRoot, null)
new ClosureCompiler(compiler, scope)
}
}
| ochafik/ScalaScript | Generator/src/main/scala/scalaxy/js/externs/ClosureCompilerUtils.scala | Scala | bsd-3-clause | 1,484 |
/*
* Copyright 2008-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package net.liftweb
package util
import common._
/**
* This trait is used to represent a PartialFunction with additional
* associated metadata, a name that allows the NamedPartialFunction
* to be looked up dynamically.
*/
trait NamedPartialFunction[-A, +B] extends PartialFunction[A, B] {
def functionName: String
}
/**
* This class is the base implementation of the NamedPartialFunction trait.
*/
class NamedPF[-A, +B](name: String, f: PartialFunction[A, B]) extends NamedPartialFunction[A, B] {
override def isDefinedAt(x: A): Boolean = f.isDefinedAt(x)
override def apply(x: A): B = f(x)
def functionName = name
}
object NamedPF {
/**
* Curried constructor for NamedPF
*/
def apply[A, B](name: String)(f: PartialFunction[A,B]):
NamedPartialFunction[A,B] = new NamedPF(name, f)
/**
* Find the first partial function in the specified sequence that
* is defined at the given value.
*
* @param value the value to use to test each PartialFunction
* @param lst the sequence to search for a PartialFunction defined at <code>value</code>
* @return a Full Box containing the PartialFunction if found,
* or Empty othewise.
*/
def find[A, B](value: A, lst: Seq[PartialFunction[A, B]]):
Box[PartialFunction[A, B]] = lst.find(_.isDefinedAt(value))
/**
* Determine whether any PartialFunction in the specified sequence
* is defined at the specified value.
*
* @param value the value to use to test each PartialFunction
* @param lst the sequence to search for a PartialFunction defined at <code>value</code>
* @return whether such a PartialFunction is found
*/
def isDefinedAt[A, B](value: A, lst: Seq[PartialFunction[A, B]]): Boolean =
find(value, lst).isDefined
/**
* Find the first PartialFunction in the specified sequence that is defined
* at the specified value, apply it to that value and return the result
* or throw a MatchError on failure to find such a function.
*
* @param value the value to use to test each PartialFunction
* @param lst the sequence to search for a PartialFunction defined at <code>value</code>
* @return the result of applying any such PartialFunction to the specified value.
* @throws MatchError on failure to find such a PartialFunction
*/
def apply[A, B](value: A, lst: Seq[PartialFunction[A, B]]): B =
find(value, lst) match {
case Full(pf) => pf.apply(value)
case _ => throw new MatchError(value)
}
/**
* Find the first PartialFunction in the specified sequence that is defined
* at the specified value, apply it to that value and return the result
* in a Full Box if found; return Empty otherwise
*
* @param value the value to use to test each PartialFunction
* @param lst the sequence to search for a PartialFunction defined at <code>value</code>
* @return a Full Box containing the result of applying the first PartialFunction which is
* defined at the specified value to that value, or Empty if no such PartialFunction is found
*/
def applyBox[A, B](value: A, lst: Seq[PartialFunction[A, B]]): Box[B] =
find(value, lst).map(_.apply(value))
}
| lzpfmh/framework-2 | core/util/src/main/scala/net/liftweb/util/NamedPartialFunction.scala | Scala | apache-2.0 | 3,757 |
package io.udash.web.homepage.styles.partials
import io.udash.css.{CssBase, CssStyle}
import io.udash.web.commons.styles.utils._
import scala.concurrent.duration.DurationInt
import scala.language.postfixOps
object ButtonsStyle extends CssBase {
import dsl._
val btn: CssStyle = style(
CommonStyleUtils.transition(),
position.relative,
cursor.pointer,
whiteSpace.nowrap,
textAlign.center,
userSelect.none,
textDecoration := none,
overflow.hidden,
&.hover {
textDecoration := none
}
)
private val btnDefaultLine = mixin(
content.string(" "),
position.absolute,
backgroundColor.white
)
private val btnDefaultLineHor = mixin(
CommonStyleUtils.transition(),
left(`0`),
width(100 %%),
height(2 px),
transform := "scaleX(0)"
)
private val btnDefaultLineVert = mixin(
CommonStyleUtils.transition(),
transitionDelay(250 milliseconds),
width(2 px),
height(100 %%),
top(`0`),
transform := "scaleY(0)"
)
val btnDefault: CssStyle = style(
btn,
UdashFonts.roboto(FontWeight.Bold),
display.inlineBlock,
color.white,
fontSize(1.75 rem),
color.white,
backgroundColor(StyleConstants.Colors.Red),
&.before (
btnDefaultLine,
btnDefaultLineHor,
top(`0`),
transformOrigin := "0 50%"
),
&.after (
btnDefaultLine,
btnDefaultLineHor,
bottom(`0`),
transformOrigin := "100% 50%"
),
MediaQueries.desktop(
&.hover(
&.before (
transform := "scaleX(1)"
),
&.after (
transform := "scaleX(1)"
),
unsafeChild(s".${btnDefaultInner.className}") (
&.before (
transform := "scaleY(1)"
),
&.after (
transform := "scaleY(1)"
)
)
)
),
MediaQueries.phone(
width(100 %%),
textAlign.center
)
)
lazy val btnDefaultInner: CssStyle = style(
padding(.625 rem, 3.125 rem,.8125 rem, 3.125 rem),
transform := "translate3d(0,0,0)",
&.before (
btnDefaultLine,
btnDefaultLineVert,
left(`0`),
transformOrigin := "50% 100%"
),
&.after (
btnDefaultLine,
btnDefaultLineVert,
right(`0`),
transformOrigin := "50% 0"
),
MediaQueries.phone(
paddingLeft(`0`),
paddingRight(`0`)
)
)
val btnDefaultBlack: CssStyle = style(
&.before (
backgroundColor.black
),
&.after (
backgroundColor.black
)
)
val btnDefaultInnerBlack: CssStyle = style(
&.before (
backgroundColor.black
),
&.after (
backgroundColor.black
)
)
}
| UdashFramework/udash-core | guide/shared/src/main/scala/io/udash/web/homepage/styles/partials/ButtonsStyle.scala | Scala | apache-2.0 | 2,719 |
package play.api.cache.redis.configuration
import play.api.cache.redis._
import org.specs2.mutable.Specification
class RedisHostSpec extends Specification {
import Implicits._
private implicit val loader = RedisHost
"host with database and password" in new WithConfiguration(
"""
|play.cache.redis {
| host: localhost
| port: 6378
| database: 1
| password: something
|}
"""
) {
configuration.get[RedisHost]("play.cache.redis") mustEqual RedisHost("localhost", 6378, database = 1, password = "something")
}
"host without database and password" in new WithConfiguration(
"""
|play.cache.redis {
| host: localhost
| port: 6378
|}
"""
) {
configuration.get[RedisHost]("play.cache.redis") mustEqual RedisHost("localhost", 6378, database = 0)
}
"host from connection string" in {
RedisHost.fromConnectionString("redis://redis:something@localhost:6378") mustEqual RedisHost("localhost", 6378, password = "something")
RedisHost.fromConnectionString("redis://localhost:6378") mustEqual RedisHost("localhost", 6378)
// test invalid string
RedisHost.fromConnectionString("redis:/localhost:6378") must throwA[IllegalArgumentException]
}
}
| KarelCemus/play-redis | src/test/scala/play/api/cache/redis/configuration/RedisHostSpec.scala | Scala | mpl-2.0 | 1,276 |
/*
Stratagem is a model checker for transition systems described using rewriting
rules and strategies.
Copyright (C) 2013 - SMV@Geneva University.
Program written by Edmundo Lopez Bobeda <edmundo [at] lopezbobeda.net>.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package ch.unige.cui.smv.stratagem.petrinets
import ch.unige.cui.smv.stratagem.adt.PredefADT
/**
* This object defines basic adts for petri nets.
* @author mundacho
*
*/
object PetriNetADT {
val PLACE_SORT_NAME = "place"
val ENDPLACE = "endplace"
lazy val basicPetriNetSignature = PredefADT.basicNatSignature
.withSort(PLACE_SORT_NAME)
.withGenerator(ENDPLACE, PLACE_SORT_NAME)
} | didierbuchs/oldstratagem | src/main/scala/ch/unige/cui/smv/stratagem/petrinets/PetriNetADT.scala | Scala | gpl-2.0 | 1,298 |
package scala.util.control
/** Library implementation of nonlocal return.
*
* Usage:
*
* import scala.util.control.NonLocalReturns._
*
* returning { ... throwReturn(x) ... }
*/
object NonLocalReturns {
class ReturnThrowable[T] extends ControlThrowable {
private var myResult: T = _
def throwReturn(result: T): Nothing = {
myResult = result
throw this
}
def result: T = myResult
}
/** Performs a nonlocal return by throwing an exception. */
def throwReturn[T](result: T)(using returner: ReturnThrowable[T]): Nothing =
returner.throwReturn(result)
/** Enable nonlocal returns in `op`. */
def returning[T](op: ReturnThrowable[T] ?=> T): T = {
val returner = new ReturnThrowable[T]
try op(using returner)
catch {
case ex: ReturnThrowable[T] =>
if (ex.eq(returner)) ex.result else throw ex
}
}
}
| lampepfl/dotty | library/src/scala/util/control/NonLocalReturns.scala | Scala | apache-2.0 | 885 |
package com.github.basp1.pulsar
import akka.actor.{ActorRef, ActorSystem}
import akka.pattern.ask
import com.github.basp1.pulsar.Worker.{Stop, Time}
import com.github.basp1.pulsar.radix.Chunk
import org.junit.runner.RunWith
import org.scalatest._
import org.scalatest.junit._
import org.scalatest.prop._
import scala.concurrent.Await
import scala.concurrent.duration._
@RunWith(classOf[JUnitRunner])
class WorkerTests extends FunSuite with Checkers {
val system: ActorSystem = ActorSystem("main")
test("Stop") {
val worker: ActorRef = system.actorOf(Worker.props())
worker ! Stop
}
test("Append") {
val worker: ActorRef = system.actorOf(Worker.props())
worker ! new Worker.Append(new Track(1L, 1001, 1, 0))
worker ! new Worker.Append(new Track(1L, 1002, 2, 0))
worker ! new Worker.Append(new Track(1L, 1003, 3, 0))
worker ! Stop
}
test("Time") {
val worker: ActorRef = system.actorOf(Worker.props())
worker ! new Worker.Append(new Track(1L, 1001, 1, 0))
worker ! new Worker.Append(new Track(1L, 1002, 2, 0))
worker ! new Worker.Append(new Track(1L, 1003, 3, 0))
worker ! Time(1003)
val track = new Track(1L, 1003, 3, 0)
assert(1L == track.owner.id)
assert(1003 == track.time)
assert(3 == track.value)
worker ! Stop
}
}
| basp1/pulsar | src/test/scala/WorkerTests.scala | Scala | mit | 1,315 |
package sbtrelease
import sbt.State
// State monad
trait Step[A] extends (State => (State,A)) { self =>
def apply(s: State) : (State, A)
def map[B](f: A => B) : Step[B] =
new Step[B] {
def apply(s: State) : (State, B) = {
val (s1,a) = self.apply(s)
(s1,f(a))
}
}
def flatMap[B](f: A => Step[B]) : Step[B] =
new Step[B] {
def apply(s: State) = {
val (s1,a) = self.apply(s)
f(a).apply(s1)
}
}
}
object Step {
def unit(f: State => State) : Step[Unit] = new Step[Unit] {
def apply(s: State) = (f(s),())
}
def sideEffect(f: State => Unit) : Step[Unit] = new Step[Unit] {
def apply(s: State) = (s,f(s))
}
def apply[A](f: State => (State,A)) : Step[A] = new Step[A] {
def apply(s: State) = f(s)
}
}
| lancegatlin/sbt-release-gitflow | src/main/scala/sbtrelease/Step.scala | Scala | mit | 800 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License
*/
package org.apache.toree.utils
import org.apache.spark.sql.{Dataset, Row}
import org.apache.toree.plugins.Plugin
import play.api.libs.json.{JsObject, Json}
import scala.util.{Failure, Try}
import org.apache.toree.plugins.annotations.Init
class DataFrameConverter extends Plugin with LogLike {
@Init def init() = {
register(this)
}
def convert(
df: Dataset[Row], outputType: String, limit: Int = 10
): Try[String] = {
Try(
outputType.toLowerCase() match {
case "html" =>
convertToHtml(df = df, limit = limit)
case "json" =>
convertToJson(df = df, limit = limit)
case "csv" =>
convertToCsv(df = df, limit = limit)
}
)
}
private def convertToHtml(df: Dataset[Row], limit: Int = 10): String = {
import df.sqlContext.implicits._
val columnFields = df.schema.fieldNames.map(columnName => {
s"<th>${columnName}</th>"
}).reduce(_ + _)
val columns = s"<tr>${columnFields}</tr>"
val rows = df.rdd.map(row => {
val fieldValues = row.toSeq.map(field => {
s"<td>${field.toString}</td>"
}).reduce(_ + _)
s"<tr>${fieldValues}</tr>"
}).take(limit).reduce(_ + _)
s"<table>${columns}${rows}</table>"
}
private def convertToJson(df: Dataset[Row], limit: Int = 10): String = {
import df.sqlContext.implicits._
val schema = Json.toJson(df.schema.fieldNames)
val transformed = df.rdd.map(row =>
row.toSeq.map(_.toString).toArray)
val rows = transformed.take(limit)
JsObject(Seq(
"columns" -> schema,
"rows" -> Json.toJson(rows)
)).toString()
}
private def convertToCsv(df: Dataset[Row], limit: Int = 10): String = {
import df.sqlContext.implicits._
val headers = df.schema.fieldNames.reduce(_ + "," + _)
val rows = df.rdd.map(row => {
row.toSeq.map(field => field.toString).reduce(_ + "," + _)
}).take(limit).reduce(_ + "\\n" + _)
s"${headers}\\n${rows}"
}
} | chipsenkbeil/incubator-toree | kernel/src/main/scala/org/apache/toree/utils/DataFrameConverter.scala | Scala | apache-2.0 | 2,823 |
package org.scaladebugger.test.info
/**
* Provides test of examining class information.
*
* @note Should have a class name of org.scaladebugger.test.info.Classes
*/
object Classes {
def main(args: Array[String]): Unit = {
// Ensure that all desired classes are loaded
val c1 = new ExternalNormalClass
val c2 = ExternalCaseClass(0, "")
val c3 = ExternalObjectClass
val c4 = ExternalCaseObjectClass
val c5 = new InternalNormalClass
val c6 = InternalCaseClass(0, "")
val c7 = InternalObjectClass
val c8 = InternalCaseObjectClass
while(true) Thread.sleep(1000)
}
class InternalNormalClass
case class InternalCaseClass(x: Int, y: String)
object InternalObjectClass
case object InternalCaseObjectClass
}
class ExternalNormalClass {
def method1(): Unit = {}
def method2: String = "some value"
def method3(x: Int): Int = x
}
case class ExternalCaseClass(x: Int, y: String)
object ExternalObjectClass
case object ExternalCaseObjectClass
| chipsenkbeil/scala-debugger | scala-debugger-test/src/main/scala/org/scaladebugger/test/info/Classes.scala | Scala | apache-2.0 | 1,002 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.bwsw.sj.common.si
import com.bwsw.sj.common.dal.repository.GenericMongoRepository
import com.bwsw.sj.common.si.result.{CreationResult, DeletionResult}
/**
* Provides methods to access entities in [[com.bwsw.sj.common.dal.repository.GenericMongoRepository]]
*
* @tparam M type of entities
* @tparam T type of domain entities, storing in [[com.bwsw.sj.common.dal.repository.GenericMongoRepository]]
*/
trait ServiceInterface[M, T] {
protected val entityRepository: GenericMongoRepository[T]
/**
* Saves entity to [[entityRepository]]
*
* @param entity
*/
def create(entity: M): CreationResult
def getAll(): Seq[M]
def get(name: String): Option[M]
/**
* Deletes entity from [[entityRepository]] by name
*
* @param name name of entity
*/
def delete(name: String): DeletionResult
}
| bwsw/sj-platform | core/sj-common/src/main/scala/com/bwsw/sj/common/si/ServiceInterface.scala | Scala | apache-2.0 | 1,659 |
package io.github.raptros.bson
import com.mongodb.{BasicDBObject, DBObject}
/** Builders are all the things you need to build up DBObjects using available [[EncodeBson]]s and [[EncodeBsonField]]s. */
trait Builders {
/** apply this to a bunch of [[DBOKV]]s and get a DBObject.
* {{{
* DBO("k0" :> true, "k1" :> List(1, 2, 3), k3 :> "a string")
* }}}
*/
object DBO {
def empty: DBObject= new BasicDBObject
def apply(tuples: DBOKV[_]*): DBObject = (tuples foldRight empty) { _ write _ }
}
/** this enables a syntax for constructing key-value pairs with values that can be written to a DBObject -
* i.e. because the value type has a [[EncodeBsonField]] instance
* {{{
* "string" :> <some encodable>
* }}}
* also, you can get an Option of a DBOKV:
* {{{
* "string" :> Some(<encodable>) //gives you Some(DBOKV("string", <encodable>))
* "string" :> None //gives you None
* }}}
*/
implicit class StringToDBOKV(k: String) {
def :>[V: EncodeBsonField](v: V): DBOKV[V] = DBOKV(k, v)
def :?>[V: EncodeBsonField](v: Option[V]): Option[DBOKV[V]] = v map { DBOKV(k, _) }
}
/** wraps up a key and a value that can be encoded as a field along with the [[EncodeBsonField]] that will encode it.
* used by the syntax provided by [[StringToDBOKV]].
*/
case class DBOKV[V](k: String, v: V)(implicit encode: EncodeBsonField[V]) {
/** uses the encoder for V to write k and v to the passed in `DBObject`, returning the same instance. */
def write(dbo: DBObject): DBObject = encode(dbo, k, v)
}
/** allows you to call asBson on any value that some [[EncodeBson]] instance applies to. */
implicit class ValueToBson[A](a: A) {
def asBson(implicit e: EncodeBson[A]): DBObject = e(a)
}
/** this permits a syntax for adding key-value pairs to `DBObject`s.
* @note the methods given by this class mutate the underlying `DBObject`.
* you should really only construct a `DBObject` right near where you will use it -
* don't let those mutable, untyped things spread around your codebase.
*/
implicit class DBOBuilder(dbo: DBObject) {
/** directly appends a key and value to the dbo, with a Unit return type; no fancy syntax here. */
def write[A](k: String, v: A)(implicit f: EncodeBsonField[A]): Unit = f.writeTo(dbo, k, v)
/** this appends a [[io.github.raptros.bson.Builders.DBOKV]] to a dbo, and returns the same dbo.
* {{{
* dbo +@+ ("k0" :> <encodable1>) +@+ ("k1" :> <encodable2>)
* }}}
*/
def +@+[A](kv: DBOKV[A]): DBObject = kv.write(dbo)
/** this appends multiple [[DBOKV]]s to a dbo.
* {{{
* dbo ++@++ List("k0" :> <encodable1>, "k1" :> <encodable2>) +@+ ("k2" :> <encodable2>)
* }}}
*/
def ++@++(kvs: Seq[DBOKV[_]]): DBObject = {
kvs foreach { _.write(dbo) }
dbo
}
/** this optionally appends optional [[DBOKV]]s to a dbo.
* {{{
* //this will end up adding the keys k0 (with the encoded value of encodable0) and k1, but not the key k1.
* dbo +?+ ("k0" :?> Some(<encodable0>)) +?+ ("k1" :?> None) +@+ ("k2" :> <encodable1>)
* }}}
*/
def +?+[A](okv: Option[DBOKV[A]]): DBObject = (okv fold dbo) { _.write(dbo) }
}
}
object Builders extends Builders
| raptros/the-bson | core/src/main/scala/io/github/raptros/bson/Builders.scala | Scala | bsd-3-clause | 3,317 |
/*
* Copyright (c) 2011-2017 Interfaculty Department of Geoinformatics, University of
* Salzburg (Z_GIS) & Institute of Geological and Nuclear Sciences Limited (GNS Science)
* in the SMART Aquifer Characterisation (SAC) programme funded by the New Zealand
* Ministry of Business, Innovation and Employment (MBIE)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models.db
import java.sql.Connection
import play.api.db.Database
/**
* trait to test with database mock
*/
trait AbstractDatabaseSessionHolder {
val db: Database
def viaConnection[T](func: Connection => T) : T
def viaTransaction[T](func: Connection => T) : T
}
| ZGIS/smart-portal-backend | app/models/db/AbstractDatabaseSessionHolder.scala | Scala | apache-2.0 | 1,164 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.ml.outlier
/** An implementation of the Stochastic Outlier Selection algorithm by Jeroen Jansen
*
* For more information about SOS, see https://github.com/jeroenjanssens/sos
* J.H.M. Janssens, F. Huszar, E.O. Postma, and H.J. van den Herik. Stochastic
* Outlier Selection. Technical Report TiCC TR 2012-001, Tilburg University,
* Tilburg, the Netherlands, 2012.
*
* @example
* {{{
* val data = env.fromCollection(List(
* LabeledVector(0.0, DenseVector(1.0, 1.0)),
* LabeledVector(1.0, DenseVector(2.0, 1.0)),
* LabeledVector(2.0, DenseVector(1.0, 2.0)),
* LabeledVector(3.0, DenseVector(2.0, 2.0)),
* LabeledVector(4.0, DenseVector(5.0, 8.0)) // The outlier!
* ))
*
* val sos = new StochasticOutlierSelection().setPerplexity(3)
*
* val outputVector = sos
* .transform(data)
* .collect()
*
* val expectedOutputVector = Map(
* 0 -> 0.2790094479202896,
* 1 -> 0.25775014551682535,
* 2 -> 0.22136130977995766,
* 3 -> 0.12707053787018444,
* 4 -> 0.9922779902453757 // The outlier!
* )
*
* outputVector.foreach(output => expectedOutputVector(output._1) should be(output._2))
* }}}
*
* =Parameters=
*
* - [[org.apache.flink.ml.outlier.StochasticOutlierSelection.Perplexity]]:
* Perplexity can be interpreted as the k in k-nearest neighbor algorithms. The difference is
* in SOS being a neighbor is not a binary property, but a probabilistic one, and therefore it
* a real number. Must be between 1 and n-1, where n is the number of points.
* (Default value: '''30''')
*
* - [[org.apache.flink.ml.outlier.StochasticOutlierSelection.ErrorTolerance]]:
* The accepted error tolerance when computing the perplexity. When increasing this number, it
* will sacrifice accuracy in return for reduced computational time.
* (Default value: '''1e-20''')
*
* - [[org.apache.flink.ml.outlier.StochasticOutlierSelection.MaxIterations]]:
* The maximum number of iterations to perform to constrain the computational time.
* (Default value: '''5000''')
*/
import breeze.linalg.functions.euclideanDistance
import breeze.linalg.{sum, DenseVector => BreezeDenseVector, Vector => BreezeVector}
import org.apache.flink.api.common.operators.Order
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.api.scala._
import org.apache.flink.api.scala.utils._
import org.apache.flink.ml.common.{LabeledVector, Parameter, ParameterMap, WithParameters}
import org.apache.flink.ml.math.Breeze._
import org.apache.flink.ml.math.{BreezeVectorConverter, Vector}
import org.apache.flink.ml.pipeline.{TransformDataSetOperation, Transformer}
import scala.language.implicitConversions
import scala.reflect.ClassTag
class StochasticOutlierSelection extends Transformer[StochasticOutlierSelection] {
import StochasticOutlierSelection._
/** Sets the perplexity of the outlier selection algorithm, can be seen as the k of kNN
* For more information, please read the Stochastic Outlier Selection algorithm technical paper.
*
* @param perplexity the perplexity of the affinity fit
* @return
*/
def setPerplexity(perplexity: Double): StochasticOutlierSelection = {
require(perplexity >= 1, "Perplexity must be at least one.")
parameters.add(Perplexity, perplexity)
this
}
/** The accepted error tolerance to reduce computational time when approximating the affinity.
*
* @param errorTolerance the accepted error tolerance with respect to the affinity
* @return
*/
def setErrorTolerance(errorTolerance: Double): StochasticOutlierSelection = {
require(errorTolerance >= 0, "Error tolerance cannot be negative.")
parameters.add(ErrorTolerance, errorTolerance)
this
}
/** The maximum number of iterations to approximate the affinity of the algorithm.
*
* @param maxIterations the maximum number of iterations.
* @return
*/
def setMaxIterations(maxIterations: Int): StochasticOutlierSelection = {
require(maxIterations > 0, "Maximum iterations must be positive.")
parameters.add(MaxIterations, maxIterations)
this
}
}
object StochasticOutlierSelection extends WithParameters {
// ========================================= Parameters ==========================================
case object Perplexity extends Parameter[Double] {
val defaultValue: Option[Double] = Some(30)
}
case object ErrorTolerance extends Parameter[Double] {
val defaultValue: Option[Double] = Some(1e-20)
}
case object MaxIterations extends Parameter[Int] {
val defaultValue: Option[Int] = Some(5000)
}
// ==================================== Factory methods ==========================================
def apply(): StochasticOutlierSelection = {
new StochasticOutlierSelection()
}
// ===================================== Operations ==============================================
case class BreezeLabeledVector(idx: Int, data: BreezeVector[Double])
implicit val transformLabeledVectors = {
new TransformDataSetOperation[StochasticOutlierSelection, LabeledVector, (Int, Double)] {
/** Overrides the method of the parent class and applies the stochastic outlier selection
* algorithm.
*
* @param instance Instance of the class
* @param transformParameters The user defined parameters of the algorithm
* @param input A data set which consists of all the LabeledVectors, which should have an
* index or unique integer label as vector.
* @return The outlierness of the vectors compared to each other
*/
override def transformDataSet(instance: StochasticOutlierSelection,
transformParameters: ParameterMap,
input: DataSet[LabeledVector]): DataSet[(Int, Double)] = {
val resultingParameters = instance.parameters ++ transformParameters
val vectorsWithIndex = input.map(labeledVector => {
BreezeLabeledVector(labeledVector.label.toInt, labeledVector.vector.asBreeze)
})
// Don't map back to a labeled-vector since the output of the algorithm is
// a single double instead of vector
outlierSelection(vectorsWithIndex, resultingParameters)
}
}
}
/** [[TransformDataSetOperation]] applies the stochastic outlier selection algorithm on a
* [[Vector]] which will transform the high-dimensional input to a single Double output.
*
* @tparam T Type of the input and output data which has to be a subtype of [[Vector]]
* @return [[TransformDataSetOperation]] a single double which represents the oulierness of
* the input vectors, where the output is in [0, 1]
*/
implicit def transformVectors[T <: Vector : BreezeVectorConverter : TypeInformation : ClassTag]
= {
new TransformDataSetOperation[StochasticOutlierSelection, T, Double] {
override def transformDataSet(instance: StochasticOutlierSelection,
transformParameters: ParameterMap,
input: DataSet[T]): DataSet[Double] = {
val resultingParameters = instance.parameters ++ transformParameters
// Map to the right format
val vectorsWithIndex = input.zipWithUniqueId.map(vector => {
BreezeLabeledVector(vector._1.toInt, vector._2.asBreeze)
})
outlierSelection(vectorsWithIndex, resultingParameters).map(_._2)
}
}
}
/** Internal entry point which will execute the different stages of the algorithm using a single
* interface
*
* @param inputVectors Input vectors on which the stochastic outlier selection algorithm
* will be applied which should be the index or a unique integer value
* @param transformParameters The user defined parameters of the algorithm
* @return The outlierness of the vectors compared to each other
*/
private def outlierSelection(inputVectors: DataSet[BreezeLabeledVector],
transformParameters: ParameterMap): DataSet[(Int, Double)] = {
val dissimilarityVectors = computeDissimilarityVectors(inputVectors)
val affinityVectors = computeAffinity(dissimilarityVectors, transformParameters)
val bindingProbabilityVectors = computeBindingProbabilities(affinityVectors)
val outlierProbability = computeOutlierProbability(bindingProbabilityVectors)
outlierProbability
}
/** Compute pair-wise distance from each vector, to all other vectors.
*
* @param inputVectors The input vectors, will compare the vector to all other vectors based
* on an distance method.
* @return Returns new set of [[BreezeLabeledVector]] with dissimilarity vector
*/
def computeDissimilarityVectors(inputVectors: DataSet[BreezeLabeledVector]):
DataSet[BreezeLabeledVector] =
inputVectors.cross(inputVectors) {
(a, b) => (a.idx, b.idx, euclideanDistance(a.data, b.data))
}.filter(dist => dist._1 != dist._2) // Filter out the diagonal, this contains no information.
.groupBy(0)
.sortGroup(1, Order.ASCENDING)
.reduceGroup {
distancesIterator => {
val distances = distancesIterator.toList
val distanceVector = distances.map(_._3).toArray
BreezeLabeledVector(distances.head._1, BreezeDenseVector(distanceVector))
}
}
/** Approximate the affinity by fitting a Gaussian-like function
*
* @param dissimilarityVectors The dissimilarity vectors which represents the distance to the
* other vectors in the data set.
* @param resultingParameters The user defined parameters of the algorithm
* @return Returns new set of [[BreezeLabeledVector]] with dissimilarity vector
*/
def computeAffinity(dissimilarityVectors: DataSet[BreezeLabeledVector],
resultingParameters: ParameterMap): DataSet[BreezeLabeledVector] = {
val logPerplexity = Math.log(resultingParameters(Perplexity))
val maxIterations = resultingParameters(MaxIterations)
val errorTolerance = resultingParameters(ErrorTolerance)
dissimilarityVectors.map(vec => {
val breezeVec = binarySearch(vec.data, logPerplexity, maxIterations, errorTolerance)
BreezeLabeledVector(vec.idx, breezeVec)
})
}
/** Normalizes the input vectors so each row sums up to one.
*
* @param affinityVectors The affinity vectors which is the quantification of the relationship
* between the original vectors.
* @return Returns new set of [[BreezeLabeledVector]] with represents the binding
* probabilities, which is in fact the affinity where each row sums up to one.
*/
def computeBindingProbabilities(affinityVectors: DataSet[BreezeLabeledVector]):
DataSet[BreezeLabeledVector] =
affinityVectors.map(vec => BreezeLabeledVector(vec.idx, vec.data :/ sum(vec.data)))
/** Compute the final outlier probability by taking the product of the column.
*
* @param bindingProbabilityVectors The binding probability vectors where the binding
* probability is based on the affinity and represents the
* probability of a vector binding with another vector.
* @return Returns a single double which represents the final outlierness of the input vector.
*/
def computeOutlierProbability(bindingProbabilityVectors: DataSet[BreezeLabeledVector]):
DataSet[(Int, Double)] = bindingProbabilityVectors
.flatMap(vec => vec.data.toArray.zipWithIndex.map(pair => {
// The DistanceMatrix removed the diagonal, but we need to compute the product
// of the column, so we need to correct the offset.
val columnIndex = if (pair._2 >= vec.idx) {
1
} else {
0
}
(columnIndex + pair._2, pair._1)
})).groupBy(0).reduceGroup {
probabilities => {
var rowNumber = -1
var outlierProbability = 1.0
for (probability <- probabilities) {
rowNumber = probability._1
outlierProbability = outlierProbability * (1.0 - probability._2)
}
(rowNumber, outlierProbability)
}
}
/** Performs a binary search to get affinities in such a way that each conditional Gaussian has
* the same perplexity.
*
* @param dissimilarityVector The input dissimilarity vector which represents the current
* vector distance to the other vectors in the data set
* @param logPerplexity The log of the perplexity, which represents the probability of having
* affinity with another vector.
* @param maxIterations The maximum iterations to limit the computational time.
* @param tolerance The allowed tolerance to sacrifice precision for decreased computational
* time.
* @param beta: The current beta
* @param betaMin The lower bound of beta
* @param betaMax The upper bound of beta
* @param iteration The current iteration
* @return Returns the affinity vector of the input vector.
*/
def binarySearch(
dissimilarityVector: BreezeVector[Double],
logPerplexity: Double,
maxIterations: Int,
tolerance: Double,
beta: Double = 1.0,
betaMin: Double = Double.NegativeInfinity,
betaMax: Double = Double.PositiveInfinity,
iteration: Int = 0)
: BreezeVector[Double] = {
val newAffinity = dissimilarityVector.map(d => Math.exp(-d * beta))
val sumA = sum(newAffinity)
val hCurr = Math.log(sumA) + beta * sum(dissimilarityVector :* newAffinity) / sumA
val hDiff = hCurr - logPerplexity
if (iteration < maxIterations && Math.abs(hDiff) > tolerance) {
// Compute the Gaussian kernel and entropy for the current precision
val (newBeta, newBetaMin, newBetaMax) = if (hDiff.isNaN) {
(beta / 10.0, betaMin, betaMax) // Reduce beta to get it in range
} else {
if (hDiff > 0) {
val newBeta =
if (betaMax == Double.PositiveInfinity || betaMax == Double.NegativeInfinity) {
beta * 2.0
} else {
(beta + betaMax) / 2.0
}
(newBeta, beta, betaMax)
} else {
val newBeta =
if (betaMin == Double.PositiveInfinity || betaMin == Double.NegativeInfinity) {
beta / 2.0
} else {
(beta + betaMin) / 2.0
}
(newBeta, betaMin, beta)
}
}
binarySearch(dissimilarityVector,
logPerplexity,
maxIterations,
tolerance,
newBeta,
newBetaMin,
newBetaMax,
iteration + 1)
}
else {
newAffinity
}
}
}
| zhangminglei/flink | flink-libraries/flink-ml/src/main/scala/org/apache/flink/ml/outlier/StochasticOutlierSelection.scala | Scala | apache-2.0 | 15,707 |
package io.giovannini.externalApi
import play.api.libs.json.{JsObject, Json}
class ExternalApi() {
def get: JsObject = Json.obj(
"title" -> "Utiliser un DTO pour s'interfacer avec une API",
"author" -> "Thomas GIOVANNINI",
"email" -> "[email protected]",
"publicationDate" -> "2018-03-18"
)
} | Giovannini/giovannini.github.io | projects/2018-03-18-utilite-d-un-dto/external-api/src/main/scala/io/giovannini/externalApi/ExternalApi.scala | Scala | mit | 326 |
package com.avsystem.commons
package redis.util
import akka.actor.ActorSystem
import com.avsystem.commons.concurrent.RunNowEC
import scala.concurrent.duration.{Duration, FiniteDuration}
object DelayedFuture {
def apply(delay: FiniteDuration)(implicit system: ActorSystem): Future[Unit] =
if (delay <= Duration.Zero) Future.unit
else {
val promise = Promise[Unit]()
system.scheduler.scheduleOnce(delay)(promise.success(()))(RunNowEC)
promise.future
}
}
| AVSystem/scala-commons | commons-redis/src/main/scala/com/avsystem/commons/redis/util/DelayedFuture.scala | Scala | mit | 487 |
package edu.gemini.seqexec.server
import java.util.logging.Logger
import argonaut._
import Argonaut._
import org.apache.commons.httpclient.{HttpMethod, HttpClient}
import org.apache.commons.httpclient.methods.{EntityEnclosingMethod, PutMethod, PostMethod}
import scala.io.Source
import scalaz.concurrent.Task
import scalaz.EitherT
/**
* Created by jluhrs on 11/5/15.
*/
object DhsClient {
val baseURI = "http://cpodhsxx:9090/axis2/services/dhs/images"
type ObsId = String
sealed case class ErrorType(str: String)
object BadRequest extends ErrorType("BAD_REQUEST")
object DhsError extends ErrorType("DHS_ERROR")
object InternalServerError extends ErrorType("INTERNAL_SERVER_ERROR")
implicit def errorTypeDecode: DecodeJson[ErrorType] = DecodeJson[ErrorType]( c => c.as[String].map {
case BadRequest.str => BadRequest
case DhsError.str => DhsError
case InternalServerError.str => InternalServerError
}
)
final case class Error(t: ErrorType, msg: String) {
override def toString = "(" + t.str + ") " + msg
}
implicit def errorDecode: DecodeJson[Error] = DecodeJson[Error]( c => for {
t <- (c --\\ "type").as[ErrorType]
msg <- (c --\\ "message").as[String]
} yield Error(t, msg)
)
implicit def obsIdDecode: DecodeJson[TrySeq[ObsId]] = DecodeJson[TrySeq[ObsId]]( c => {
val r = c --\\ "response"
val s = (r --\\ "status").as[String]
s flatMap {
case "success" => (r --\\ "result").as[String].map(TrySeq(_))
case "error" => (r --\\ "errors").as[List[Error]].map(
l => TrySeq.fail[ObsId](SeqexecFailure.Unexpected(l.mkString(", "))))
}
} )
implicit def unitDecode: DecodeJson[TrySeq[Unit]] = DecodeJson[TrySeq[Unit]]( c => {
val r = c --\\ "response"
val s = (r --\\ "status").as[String]
s flatMap {
case "success" => DecodeResult.ok(TrySeq(()))
case "error" => (r --\\ "errors").as[List[Error]].map(
l => TrySeq.fail[Unit](SeqexecFailure.Unexpected(l.mkString(", "))))
}
} )
type Contributor = String
sealed case class Lifetime(str: String)
object Permanent extends Lifetime("PERMANENT")
object Temporary extends Lifetime("TEMPORARY")
object Transient extends Lifetime("TRANSIENT")
final case class ImageParameters(lifetime: Lifetime, contributors: List[Contributor])
implicit def imageParametersEncode: EncodeJson[ImageParameters] = EncodeJson[ImageParameters]( p =>
("lifetime" := p.lifetime.str) ->: ("contributors" := p.contributors) ->: Json.jEmptyObject )
// TODO: Implement the unsigned types, if needed.
sealed case class KeywordType protected (str: String)
object TypeInt8 extends KeywordType("INT8")
object TypeInt16 extends KeywordType("INT16")
object TypeInt32 extends KeywordType("INT32")
object TypeFloat extends KeywordType("FLOAT")
object TypeDouble extends KeywordType("DOUBLE")
object TypeBoolean extends KeywordType("BOOLEAN")
object TypeString extends KeywordType("STRING")
// The developer uses these classes to define all the typed keywords
sealed class Keyword[T] protected (val n: String, val t: KeywordType, val v: T)
final case class Int8Keyword(name: String, value: Byte) extends Keyword[Byte](name, TypeInt8, value)
final case class Int16Keyword(name: String, value: Short) extends Keyword[Short](name, TypeInt16, value)
final case class Int32Keyword(name: String, value: Int) extends Keyword[Int](name, TypeInt32, value)
final case class FloatKeyword(name: String, value: Float) extends Keyword[Float](name, TypeFloat, value)
final case class DoubleKeyword(name: String, value: Double) extends Keyword[Double](name, TypeDouble, value)
final case class BooleanKeyword(name: String, value: Boolean) extends Keyword[Boolean](name, TypeBoolean, value)
final case class StringKeyword(name: String, value: String) extends Keyword[String](name, TypeString, value)
// At the end, I want to just pass a list of keywords to be sent to the DHS. I cannot do this with Keyword[T],
// because I cannot mix different types in a List. But at the end I only care about the value as a String, so I
// use an internal representation, and offer a class to the developer (KeywordBag) to create the list from typed
// keywords.
final protected case class InternalKeyword(name: String, keywordType: KeywordType, value: String)
protected implicit def internalKeywordConvert[T](k: Keyword[T]): InternalKeyword = InternalKeyword(k.n, k.t, k.v.toString)
final case class KeywordBag(keywords: List[InternalKeyword]) {
def add[T](k: Keyword[T]): KeywordBag = KeywordBag(internalKeywordConvert(k) :: keywords)
def append(other: KeywordBag): KeywordBag = KeywordBag(keywords ::: other.keywords)
}
//TODO: Add more apply methods if necessary
object KeywordBag {
def apply: KeywordBag = KeywordBag(List())
def apply[A](k1: Keyword[A]): KeywordBag = KeywordBag(List(internalKeywordConvert(k1)))
def apply[A, B](k1: Keyword[A], k2: Keyword[B]): KeywordBag = KeywordBag(List(internalKeywordConvert(k1), internalKeywordConvert(k2)))
def apply[A, B, C](k1: Keyword[A], k2: Keyword[B], k3: Keyword[C]): KeywordBag =
KeywordBag(List(internalKeywordConvert(k1), internalKeywordConvert(k2), internalKeywordConvert(k3)))
def apply[A, B, C, D](k1: Keyword[A], k2: Keyword[B], k3: Keyword[C], k4: Keyword[D]): KeywordBag =
KeywordBag(List(internalKeywordConvert(k1), internalKeywordConvert(k2), internalKeywordConvert(k3),
internalKeywordConvert(k4)))
def apply[A, B, C, D, E](k1: Keyword[A], k2: Keyword[B], k3: Keyword[C], k4: Keyword[D], k5: Keyword[E]): KeywordBag =
KeywordBag(List(internalKeywordConvert(k1), internalKeywordConvert(k2), internalKeywordConvert(k3),
internalKeywordConvert(k4), internalKeywordConvert(k5)))
def apply[A, B, C, D, E, F](k1: Keyword[A], k2: Keyword[B], k3: Keyword[C], k4: Keyword[D], k5: Keyword[E], k6: Keyword[F]): KeywordBag =
KeywordBag(List(internalKeywordConvert(k1), internalKeywordConvert(k2), internalKeywordConvert(k3),
internalKeywordConvert(k4), internalKeywordConvert(k5), internalKeywordConvert(k6)))
}
implicit def keywordEncode: EncodeJson[InternalKeyword] = EncodeJson[InternalKeyword]( k =>
("name" := k.name) ->: ("type" := k.keywordType.str) ->: ("value" := k.value) ->: Json.jEmptyObject )
private def sendRequest[T](method: EntityEnclosingMethod, body: Json, errMsg: String)(implicit decoder: argonaut.DecodeJson[TrySeq[T]]): SeqAction[T] = EitherT ( Task.delay {
val client = new HttpClient()
method.addRequestHeader("Content-Type", "application/json")
method.setRequestBody(body.nospaces)
client.executeMethod(method)
val r = Source.fromInputStream(method.getResponseBodyAsStream).getLines().mkString.decodeOption[TrySeq[T]](decoder)
method.releaseConnection()
r.getOrElse(TrySeq.fail[T](SeqexecFailure.Execution(errMsg)))
} )
private def createImage(reqBody: Json): SeqAction[ObsId] =
sendRequest[ObsId](new PostMethod(baseURI), Json.jSingleObject("createImage", reqBody), "Unable to get label")
def createImage: SeqAction[ObsId] = createImage(Json.jEmptyObject)
def createImage(p: ImageParameters): SeqAction[ObsId] = createImage(p.asJson)
def setParameters(id: ObsId, p: ImageParameters): SeqAction[Unit] =
sendRequest[Unit](new PutMethod(baseURI + "/" + id), Json.jSingleObject("setParameters", p.asJson), "Unable to set parameters for image " + id)
def setKeywords(id: ObsId, keywords: KeywordBag, finalFlag: Boolean = false): SeqAction[Unit] =
sendRequest[Unit](new PutMethod(baseURI + "/" + id + "/keywords"),
Json.jSingleObject("setKeywords", ("final" := finalFlag) ->: ("keywords" := keywords.keywords) ->: Json.jEmptyObject ),
"Unable to write keywords for image " + id)
}
| arturog8m/ocs | bundle/edu.gemini.seqexec.server/src/main/scala/edu/gemini/seqexec/server/DhsClient.scala | Scala | bsd-3-clause | 7,872 |
package git
import org.scalatest.{Matchers, FlatSpec}
import java.io.File
import git.util.FileUtil
import org.joda.time.{DateTimeZone, DateTime}
class CommitSpec extends FlatSpec with Matchers {
// Create an example commit.
val commit = Commit(
id = ObjectId("6987b5626be09f59e84cb64c36b8e8a15a198798"),
header = ObjectHeader(ObjectType.Commit),
authorName = "Kai",
authorDate = new DateTime(2014, 1, 2, 3, 4, 5, DateTimeZone.forOffsetHours(2)).toDate,
authorEmail = "[email protected]",
commitDate = new DateTime(2014, 1, 2, 3, 4, 6, DateTimeZone.forOffsetHours(2)).toDate,
committerEmail = "[email protected]",
committerName = "Kai 2",
message = "foo bar baz qux",
treeId = ObjectId("b744d5cddb5095249299d95ee531cbd990741140"),
parentIds = Seq(ObjectId("b744d5cddb5095249299d95ee531cbd990741141"), ObjectId("b744d5cddb5095249299d95ee531cbd990741142"))
)
"an encoded commit" should "decode back to itself" in {
val commit2 = Commit.decode(Commit.encode(commit))
commit2.id.sha shouldBe commit.id.sha
commit2.header.typ shouldBe commit.header.typ
commit2.authorName shouldBe commit.authorName
commit2.authorDate shouldBe commit.authorDate
commit2.authorEmail shouldBe commit.authorEmail
commit2.commitDate shouldBe commit.commitDate
commit2.committerEmail shouldBe commit.committerEmail
commit2.committerName shouldBe commit.committerName
commit2.message shouldBe commit.message
commit2.treeId shouldBe commit.treeId
commit2.parentIds shouldBe commit.parentIds
}
// Testing fixtures.
val commit2 = Commit.decode(FileUtil.readContents(new File("src/test/resources/objects/commit/6987b5626be09f59e84cb64c36b8e8a15a198798")))
"commit 6987b5626be09f59e84cb64c36b8e8a15a198798" should "have an ID of '6987b5626be09f59e84cb64c36b8e8a15a198798'" in {
commit2.id.sha shouldBe "6987b5626be09f59e84cb64c36b8e8a15a198798"
}
it should "be type of Commit" in {
commit2.header.typ shouldBe ObjectType.Commit
}
it should "have correct author details" in {
commit2.authorDate shouldBe new DateTime(2014, 1, 2, 3, 4, 5, DateTimeZone.forOffsetHours(2)).toDate
commit2.authorEmail shouldBe "[email protected]"
commit2.authorName shouldBe "Kai"
}
it should "have correct committer details" in {
commit2.commitDate shouldBe new DateTime(2014, 1, 2, 3, 4, 6, DateTimeZone.forOffsetHours(2)).toDate
commit2.committerEmail shouldBe "[email protected]"
commit2.committerName shouldBe "Kai 2"
}
it should "have the message 'foo bar baz qux'" in {
commit2.message shouldBe "foo bar baz qux"
}
it should "point to tree ID 'b744d5cddb5095249299d95ee531cbd990741140'" in {
commit2.treeId shouldBe ObjectId("b744d5cddb5095249299d95ee531cbd990741140")
}
it should "have two parents with correct IDs." in {
commit2.parentIds.length shouldBe 2
commit2.parentIds should contain (ObjectId("b744d5cddb5095249299d95ee531cbd990741141"))
commit2.parentIds should contain (ObjectId("b744d5cddb5095249299d95ee531cbd990741142"))
}
// Search the default repository for a commit.
"The commit '6987b5626be09f59e84cb64c36b8e8a15a198798'" should "exist and be loaded properly" in {
val repo = Repository.open(new File("src/test/resources/repositories/default/.git").getAbsolutePath)
val commit = Commit.findById(ObjectId("6987b5626be09f59e84cb64c36b8e8a15a198798"))(repo)
assert(commit.isDefined)
}
} | kaisellgren/ScalaGit | src/test/scala/git/CommitSpec.scala | Scala | mit | 3,577 |
package uk.gov.dvla.vehicles.presentation.common.controllers.k2kacquire
import org.scalatest.mock.MockitoSugar
import play.api.mvc.{Request, Result}
import uk.gov.dvla.vehicles.presentation.common.clientsidesession.ClientSideSessionFactory
import uk.gov.dvla.vehicles.presentation.common.controllers.BusinessKeeperDetailsBase
import uk.gov.dvla.vehicles.presentation.common.model.{BusinessKeeperDetailsViewModel, CacheKeyPrefix}
import scala.collection.mutable.ArrayBuffer
object BusinessKeeperDetailsTesting extends MockitoSugar {
import play.api.mvc.Results.{Ok, BadRequest}
val presentTestResult = Ok("presentResult")
val successTestResult = Ok("successResult")
val missingVehicleDetailsTestResult = Ok("missingVehicleDetailsResult")
val invalidFormTestResult = BadRequest("invalidFormResult")
}
class BusinessKeeperDetailsTesting(implicit override val clientSideSessionFactory: ClientSideSessionFactory,
prefix: CacheKeyPrefix) extends BusinessKeeperDetailsBase {
import BusinessKeeperDetailsTesting._
val presentResultArgs = ArrayBuffer[BusinessKeeperDetailsViewModel]()
val invalidFormResultArgs = ArrayBuffer[BusinessKeeperDetailsViewModel]()
override protected def presentResult(model: BusinessKeeperDetailsViewModel)(implicit request: Request[_]): Result = {
presentResultArgs.append(model)
presentTestResult
}
override protected def success(implicit request: Request[_]): Result = successTestResult
override protected def missingVehicleDetails(implicit request: Request[_]): Result = missingVehicleDetailsTestResult
override protected def invalidFormResult(model: BusinessKeeperDetailsViewModel)
(implicit request: Request[_]): Result = {
invalidFormResultArgs.append(model)
invalidFormTestResult
}
}
| dvla/vehicles-presentation-common | test/uk/gov/dvla/vehicles/presentation/common/controllers/k2kacquire/BusinessKeeperDetailsTesting.scala | Scala | mit | 1,842 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.aliyun.tablestore
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql.aliyun.tablestore.TableStoreSourceProvider.isDefaultField
import org.apache.spark.sql.catalyst.encoders.RowEncoder
import org.apache.spark.sql.catalyst.expressions.GenericRow
class TableStoreDataSuite extends SparkFunSuite {
private val testUtils = new TableStoreTestUtil()
private val testSchema = {
val options =
testUtils.getTestOptions(
Map("catalog" -> TableStoreTestUtil.catalog)
)
TableStoreSource.tableStoreSchema(TableStoreCatalog(options).schema)
}
test("test tablestore data encoder") {
val schemaFieldPos: Map[String, Int] = testSchema.fieldNames
.filter(
fieldName => !isDefaultField(fieldName)
)
.zipWithIndex
.toMap
val schemaFieldPosSize = schemaFieldPos.size
val columnArray = Array.tabulate(schemaFieldPosSize)(
_ => (null, null).asInstanceOf[(String, Any)]
)
columnArray(4) = ("col1", null)
// val td = Array(("PkString", "1"), (null, null), ("PkInt", 2), (null, null), ("col1", ""))
val data = new SchemaTableStoreData("PUT", 12345678, columnArray)
val encoderForDataColumns = RowEncoder(testSchema).resolveAndBind()
encoderForDataColumns.toRow(new GenericRow(data.toArray))
}
}
| aliyun/aliyun-emapreduce-sdk | emr-tablestore/src/test/scala/org/apache/spark/sql/aliyun/tablestore/TableStoreDataSuite.scala | Scala | artistic-2.0 | 2,133 |
package metal
package macros
import spire.macros.compat.Context
import spire.macros.SyntaxUtil
trait Call[C <: Context with Singleton] {
val c: C
/** Instantiates a call to the function/tree `body`, using the elements pointed to by the pointer
* named `pointerName` on container `containerName`.
*/
def apply(util: SyntaxUtil[c.type], lhs: c.Tree, containerName: c.TermName, pointerName: c.TermName, body: c.Tree): c.Tree
/** Instantiates a call to the function/tree `body`, using the elements pointed to by the pointer
* named `pointerName` on container `containerName`, providing `value` as a first argument to the
* function (i.e. as in `foldLeft`).
*/
def withValue(util: SyntaxUtil[c.type], lhs: c.Tree, containerName: c.TermName, pointerName: c.TermName, body: c.Tree, value: c.TermName): c.Tree
}
trait CallElements1[C <: Context with Singleton, E1] extends Call[C] {
def tagE1: c.WeakTypeTag[E1]
def apply(util: SyntaxUtil[c.type], lhs: c.Tree, containerName: c.TermName, pointerName: c.TermName, body: c.Tree): c.Tree = {
import c.universe._
val e1 = util.name("e1")
q"""
val $e1: $tagE1 = $containerName.ptrElement1[$tagE1]($pointerName)
$body($e1)
"""
}
def withValue(util: SyntaxUtil[c.type], lhs: c.Tree, containerName: c.TermName, pointerName: c.TermName, body: c.Tree, value: c.TermName): c.Tree = {
import c.universe._
val e1 = util.name("e1")
q"""
val $e1: $tagE1 = $containerName.ptrElement1[$tagE1]($pointerName)
$body($value, $e1)
"""
}
}
object CallElements1 {
def apply[C <: Context with Singleton, E1:_c.WeakTypeTag](_c: C): CallElements1[C, E1] =
new CallElements1[C, E1] {
val c: C = _c
def tagE1 = implicitly[c.WeakTypeTag[E1]]
}
}
trait CallElements2[C <: Context with Singleton, E1, E2] extends Call[C] {
def tagE1: c.WeakTypeTag[E1]
def tagE2: c.WeakTypeTag[E2]
def apply(util: SyntaxUtil[c.type], lhs: c.Tree, containerName: c.TermName, pointerName: c.TermName, body: c.Tree): c.Tree = {
import c.universe._
val List(e1, e2) = util.names("e1", "e2")
q"""
val $e1: $tagE1 = $containerName.ptrElement1[$tagE1]($pointerName)
val $e2: $tagE2 = $containerName.ptrElement2[$tagE2]($pointerName)
$body($e1, $e2)
"""
}
def withValue(util: SyntaxUtil[c.type], lhs: c.Tree, containerName: c.TermName, pointerName: c.TermName, body: c.Tree, value: c.TermName): c.Tree = {
import c.universe._
val List(e1, e2) = util.names("e1", "e2")
q"""
val $e1: $tagE1 = $containerName.ptrElement1[$tagE1]($pointerName)
val $e2: $tagE2 = $containerName.ptrElement2[$tagE2]($pointerName)
$body($value, $e1, $e2)
"""
}
}
object CallElements2 {
def apply[C <: Context with Singleton, E1:_c.WeakTypeTag, E2:_c.WeakTypeTag](_c: C): CallElements2[C, E1, E2] =
new CallElements2[C, E1, E2] {
val c: C = _c
def tagE1 = implicitly[c.WeakTypeTag[E1]]
def tagE2 = implicitly[c.WeakTypeTag[E2]]
}
}
trait CallElements3[C <: Context with Singleton, E1, E2, E3] extends Call[C] {
def tagE1: c.WeakTypeTag[E1]
def tagE2: c.WeakTypeTag[E2]
def tagE3: c.WeakTypeTag[E3]
def apply(util: SyntaxUtil[c.type], lhs: c.Tree, containerName: c.TermName, pointerName: c.TermName, body: c.Tree): c.Tree = {
import c.universe._
val List(e1, e2, e3) = util.names("e1", "e2", "e3")
q"""
val $e1: $tagE1 = $containerName.ptrElement1[$tagE1]($pointerName)
val $e2: $tagE2 = $containerName.ptrElement2[$tagE2]($pointerName)
val $e3: $tagE3 = $containerName.ptrElement3[$tagE3]($pointerName)
$body($e1, $e2, $e3)
"""
}
def withValue(util: SyntaxUtil[c.type], lhs: c.Tree, containerName: c.TermName, pointerName: c.TermName, body: c.Tree, value: c.TermName): c.Tree = {
import c.universe._
val List(e1, e2, e3) = util.names("e1", "e2", "e3")
q"""
val $e1: $tagE1 = $containerName.ptrElement1[$tagE1]($pointerName)
val $e2: $tagE2 = $containerName.ptrElement2[$tagE2]($pointerName)
val $e3: $tagE3 = $containerName.ptrElement3[$tagE3]($pointerName)
$body($value, $e1, $e2, $e3)
"""
}
}
object CallElements3 {
def apply[C <: Context with Singleton, E1:_c.WeakTypeTag, E2:_c.WeakTypeTag, E3:_c.WeakTypeTag](_c: C): CallElements3[C, E1, E2, E3] =
new CallElements3[C, E1, E2, E3] {
val c: C = _c
def tagE1 = implicitly[c.WeakTypeTag[E1]]
def tagE2 = implicitly[c.WeakTypeTag[E2]]
def tagE3 = implicitly[c.WeakTypeTag[E3]]
}
}
| denisrosset/ptrcoll | core/src/main/scala/metal/macros/Call.scala | Scala | mit | 4,447 |
/*
* The MIT License (MIT)
*
* Copyright (c) 2016 Ian McIntosh
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package f3
package web
import scala.scalajs.js
import org.scalajs.dom._
import org.scalajs.jquery._
import cats.implicits._
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.prefix_<^._
import semanticui.js._
import f3.sample._
import f3.web.view._
object app extends js.JSApp {
override def main(): Unit = {
val appContentElem = document.getElementById("app-content")
val conn = AppCircuit.connect(x => x)
val renderable = conn { p =>
val content =
Map(
StandingsContent -> Standings(p),
PowerRankingsContent -> PowerRankings(p),
ELOContent -> ELORankings(p)
)
<.div(
TopNav(p),
ContentNav(p),
<.div(
^.cls := "ui main container",
Content(p, content)
),
BottomNav(p)
)
}
// TODO: This is hardcoded for the moment...
AppCircuit.dispatch(LoadLeagues(List(fantasy.football.league)))
AppCircuit.dispatch(SelectLeague(0.some))
ReactDOM.render(renderable, appContentElem)
initSemantic()
}
private[this] def initSemantic() = {
jQuery(document).ready {
jQuery(".ui.dropdown").dropdown(DropdownSettings(observeChanges = true))
}
}
}
| cranst0n/f3 | modules/web/src/main/scala/f3/web/app.scala | Scala | mit | 2,395 |
import diode._
import diode.react.{ModelProxy, ReactConnector}
import scala.scalajs.js
import org.scalajs.dom
import japgolly.scalajs.react._
import japgolly.scalajs.react.vdom.html_<^._
import japgolly.scalajs.react.WebpackRequire
import japgolly.scalajs.react.extra.router.{BaseUrl, Redirect, Router, RouterConfigDsl, RouterCtl}
sealed trait Pages
case object Page1 extends Pages
case object Page2 extends Pages
case class RootModel(data1: String, data2: Int)
case class ChangeData1(newVal: String) extends Action
object AppCircuit extends Circuit[RootModel] with ReactConnector[RootModel] {
override def initialModel: RootModel = RootModel("testData1", 123)
val data1Reader = AppCircuit.zoom(_.data1)
val firstHandler = new ActionHandler(AppCircuit.zoomTo(_.data1)) {
override def handle = {
case ChangeData1(newVal) => updated(newVal)
}
}
override val actionHandler = composeHandlers(firstHandler)
}
object Main extends js.JSApp {
val data1Connection = AppCircuit.connect(_.data1)
val routerConfig = RouterConfigDsl[Pages].buildConfig { dsl =>
import dsl._
(
staticRoute(root, Page1) ~> renderR {ctrl =>
Comp1.comp1(Comp1.Props("Root page", ctrl))
}
| staticRoute("/#test", Page2) ~> renderR { ctrl =>
data1Connection(proxy => Comp2.comp2(Comp2.Props(proxy)))
}
).notFound(redirectToPage(Page1)(Redirect.Replace))
}
def require(): Unit = {
WebpackRequire.React
WebpackRequire.ReactDOM
()
}
override def main(): Unit = {
println("Hello world..")
require()
val domTarget = dom.document.getElementById("root")
val router = Router(BaseUrl.fromWindowOrigin, routerConfig)
router().renderIntoDOM(domTarget)
}
}
object Comp1 {
case class Props(theProp: String, routerCtl: RouterCtl[Pages])
val comp1 = ScalaComponent.builder[Props]("MyComp")
.render_P(p => <.div(
"hello2 " + p.theProp,
^.onClick --> p.routerCtl.set(Page2)))
.build
}
object Comp2 {
case class Props(proxy: ModelProxy[String])
val comp2 = ScalaComponent.builder[Props]("MyComp")
.render_P {p =>
<.div(
"hello2 " + p.proxy.value,
^.onClick --> p.proxy.dispatchCB(ChangeData1("New value"))
)
}
.build
}
| makespacer/makespace | client/src/main/scala/Main.scala | Scala | apache-2.0 | 2,272 |
package com.sksamuel.elastic4s.searches.aggs
import com.sksamuel.elastic4s.ScriptBuilder
import com.sksamuel.elastic4s.script.SortBuilderFn
import com.sksamuel.elastic4s.searches.aggs.pipeline.PipelineAggregationBuilderFn
import com.sksamuel.elastic4s.searches.sort.SortDefinition
import org.elasticsearch.search.aggregations.AggregationBuilders
import org.elasticsearch.search.aggregations.metrics.tophits.TopHitsAggregationBuilder
import org.elasticsearch.search.sort.SortBuilder
import scala.collection.JavaConverters._
object TopHitsAggregationBuilder {
def apply(agg: TopHitsAggregationDefinition): TopHitsAggregationBuilder = {
val builder = AggregationBuilders.topHits(agg.name)
agg.explain.foreach(builder.explain)
agg.fetchSource.foreach(builder.fetchSource)
agg.trackScores.foreach(builder.trackScores)
agg.version.foreach(builder.version)
agg.size.foreach(builder.size)
agg.storedFields.foreach(builder.storedField)
agg.scripts.foreach { case (name, script) =>
builder.scriptField(name, ScriptBuilder(script))
}
def addSort[T <: SortBuilder[T]](sort: SortDefinition) = builder.sort(SortBuilderFn(sort).asInstanceOf[T])
agg.sorts.foreach(addSort)
agg.subaggs.map(AggregationBuilder.apply).foreach(builder.subAggregation)
agg.pipelines.map(PipelineAggregationBuilderFn.apply).foreach(builder.subAggregation)
if (agg.metadata.nonEmpty) builder.setMetaData(agg.metadata.asJava)
builder
}
}
| tyth/elastic4s | elastic4s-tcp/src/main/scala/com/sksamuel/elastic4s/searches/aggs/TopHitsAggregationBuilder.scala | Scala | apache-2.0 | 1,474 |
import sbt._
import Keys._
object Dependencies{
val playVersion = "2.2.1"
val dependenciesList = Seq(
"com.typesafe.play" %% "routes-compiler" % playVersion,
"com.typesafe.play" % "play-exceptions" % playVersion,
"commons-logging" % "commons-logging" % "1.1.3",
"org.raml" % "raml-parser" % "0.8.2",
"com.github.scala-incubator.io" %% "scala-io-file" % "0.4.2"
)
}
object Resolvers{
val resolversList = Seq(
"typesafe" at "http://repo.typesafe.com/typesafe/releases",
"mulesoft" at "https://repository-master.mulesoft.org/releases/"
)
}
object Publish {
object TargetRepository {
def scmio: Def.Initialize[Option[sbt.Resolver]] = version { (version: String) =>
val rootDir = "/srv/maven/"
val path =
if (version.trim.endsWith("SNAPSHOT"))
rootDir + "snapshots/"
else
rootDir + "releases/"
Some(Resolver.sftp("scm.io intern repo", "scm.io", 44144, path))
}
def sonatype: Def.Initialize[Option[sbt.Resolver]] = version { (version: String) =>
val nexus = "https://oss.sonatype.org/"
if (version.trim.endsWith("SNAPSHOT"))
Some("snapshots" at nexus + "content/repositories/snapshots")
else
Some("releases" at nexus + "service/local/staging/deploy/maven2")
}
}
lazy val settings = Seq(
publishMavenStyle := true,
publishTo <<= TargetRepository.scmio,
publishArtifact in Test := false,
pomIncludeRepository := { _ => false },
organization := "com.scalableminds",
organizationName := "scalable minds UG (haftungsbeschränkt) & Co. KG",
organizationHomepage := Some(url("http://scalableminds.com")),
startYear := Some(2014),
description := "Sbt plugin to replace playframeworks integrated routes definitions with RAML files",
licenses := Seq("Apache 2" -> url("http://www.apache.org/licenses/LICENSE-2.0.txt")),
homepage := Some(url("https://github.com/sclableminds/sbt-play-raml")),
scmInfo := Some(ScmInfo(url("https://github.com/sclableminds/sbt-play-raml"), "https://github.com/scalableminds/sbt-play-raml.git"))
)
}
object BuildSettings{
val settings = Seq(
sbtPlugin := true,
resolvers := Resolvers.resolversList,
libraryDependencies ++= Dependencies.dependenciesList
)
}
object ApplicationBuild extends Build {
lazy val sbtPlayRaml = Project(
id = "sbt-play-raml",
base = file("."),
settings = Project.defaultSettings ++ BuildSettings.settings ++ Publish.settings)
} | scalableminds/sbt-play-raml | sbt-plugin/project/Build.scala | Scala | apache-2.0 | 2,505 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.webmvc.support.action
import org.beangle.commons.lang.Strings
import org.beangle.data.model.Entity
import org.beangle.data.transfer.importer.listener.ForeignerListener
import org.beangle.data.transfer.importer.{DefaultEntityImporter, ImportResult, ImportSetting}
import org.beangle.web.action.view.View
import org.beangle.webmvc.support.helper.{ImportHelper, PopulateHelper}
trait ImportSupport[T <: Entity[_]] {
self: EntityAction[T] =>
def importForm(): View = {
forward("/components/importData/form")
}
/**
* 导入信息
*/
def importData(): View = {
val tr = new ImportResult()
val setting = new ImportSetting
val entityClazz = this.entityDao.domain.getEntity(this.entityName).get.clazz
val shortName = Strings.uncapitalize(Strings.substringAfterLast(entityClazz.getName, "."))
setting.entityClazz = entityClazz
setting.shortName = shortName
setting.reader = ImportHelper.buildReader()
configImport(setting)
if (null == setting.importer) {
val importer = new DefaultEntityImporter(setting.entityClazz, setting.shortName)
importer.domain = this.entityDao.domain
importer.populator = PopulateHelper.populator
setting.importer = importer
setting.listeners foreach { l =>
importer.addListener(l)
}
}
val importer = setting.importer
if (null == setting.reader) {
return forward("/components/importData/error")
}
try {
importer.reader = setting.reader
importer.transfer(tr)
put("importer", importer)
put("importResult", tr)
if (tr.hasErrors) {
forward("/components/importData/error")
} else {
forward("/components/importData/result")
}
} catch {
case e: Exception =>
logger.error("import error", e)
tr.addFailure(getText("error.importformat"), e.getMessage)
put("importResult", tr)
forward("/components/importData/error")
}
}
protected def configImport(setting: ImportSetting): Unit = {
setting.listeners = List(new ForeignerListener(entityDao))
}
}
| beangle/webmvc | support/src/main/scala/org/beangle/webmvc/support/action/ImportSupport.scala | Scala | lgpl-3.0 | 2,837 |
package assigner.search
import assigner.model._
import assigner.search.tabu._
import org.coinor.opents._
/** Entry point for running the algorithm. */
class Assigner(course: Course) {
val manager = new Manager (course)
val objective = new Objective(course)
def solution: (Assignment, Seq[(Move, Double)], Assignment) =
(1 to course.settings.startingPoints).par
.map { _ => singleSolution }
.maxBy { case (_, _, solution) => objective score solution }
def singleSolution: (Assignment, Seq[(Move, Double)], Assignment) = {
var log = List.empty[(Move, Double)]
val initial = StartingPoint(course)
val tabuSearch = new SingleThreadedTabuSearch(
initial.clone, manager, objective,
new TabuQueue(course.settings.tabuSize),
new BestEverAspirationCriteria,
true)
// Add a listener in order to get the same behaviour as in the paper where they terminate the
// algorithm when the algorithm is not improving the objective function for a pre-set number of
// moves
tabuSearch.addTabuSearchListener(new TabuSearchAdapter {
override def newCurrentSolutionFound(e: TabuSearchEvent) = {
e.getTabuSearch.getCurrentSolution match {
case assignment: Assignment =>
log = (assignment.lastMove -> assignment.getObjectiveValue.head) :: log
case _ =>
}
}
override def newBestSolutionFound(e: TabuSearchEvent) = {
e.getTabuSearch.setIterationsToGo(course.settings.iterations)
}
})
tabuSearch.setIterationsToGo(course.settings.iterations)
tabuSearch.startSolving()
val solution = tabuSearch.getBestSolution.asInstanceOf[Assignment]
(initial, log.reverse, solution)
}
def swap(assignment: Assignment): (Assignment, Seq[(Move, Double)], Assignment) = {
var log = List.empty[(Move, Double)]
val studInCourse = assignment.studentMap
.filter { case (stud, group) => group == -1 }
.keys
.map(id => course.studentMap(id))
.toList
// TODO: Test the impact of using MultiThreadedTabuSearch
val tabuSearch = new SingleThreadedTabuSearch(
assignment.clone, manager, objective,
new TabuQueueWithAssignment(course.settings.tabuSize, studInCourse),
new BestEverAspirationCriteria,
true)
// Add a listener in order to get the same behaviour as in the paper where they terminate the
// algorithm when the algorithm is not improving the objective function for a pre-set number of
// moves
tabuSearch.addTabuSearchListener(new TabuSearchAdapter {
override def newCurrentSolutionFound(e: TabuSearchEvent) = {
e.getTabuSearch.getCurrentSolution match {
case assignment: Assignment =>
log = (assignment.lastMove -> assignment.getObjectiveValue.head) :: log
case _ =>
}
}
override def newBestSolutionFound(e: TabuSearchEvent) = {
e.getTabuSearch.setIterationsToGo(course.settings.iterations)
}
})
tabuSearch.setIterationsToGo(course.settings.iterations)
tabuSearch.startSolving()
val solution = tabuSearch.getBestSolution.asInstanceOf[Assignment]
(assignment, log.reverse, solution)
}
// def swap(assignment: Assignment) = {
// var log = List.empty[(Move, Double)]
//
// val emptySpots = (for {
// (groupId, students) <- assignment.groupMap
// spots <- 0 until course.groupMap(groupId).maxSize - students.size
// } yield groupId).toList
// val waiting = assignment.waitingList
//
// waiting
// .combinations(emptySpots.size)
// .toList
// .permutations
// .map(_.map { per =>
// val assgn = emptySpots.zip(per) ++ assignment.studentMap
//
// })
// }
}
| joroKr21/IoS-Algorithm | src/main/scala/assigner/search/Assigner.scala | Scala | mit | 3,762 |
/*
* Copyright 2014-2021 Netflix, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.netflix.atlas.wiki
import java.io.File
import java.io.InputStream
import java.io.OutputStream
import java.nio.charset.StandardCharsets
import java.util.Locale
import java.util.regex.Pattern
import com.netflix.atlas.core.db.StaticDatabase
import com.netflix.atlas.core.model.DataVocabulary
import com.netflix.atlas.core.model.FilterVocabulary
import com.netflix.atlas.core.model.MathVocabulary
import com.netflix.atlas.core.model.QueryVocabulary
import com.netflix.atlas.core.model.StatefulVocabulary
import com.netflix.atlas.core.model.StyleVocabulary
import com.netflix.atlas.core.stacklang.StandardVocabulary
import com.netflix.atlas.core.stacklang.Vocabulary
import com.netflix.atlas.core.util.Streams._
import com.netflix.atlas.json.Json
import com.netflix.atlas.wiki.pages._
import com.typesafe.config.ConfigFactory
import com.typesafe.scalalogging.StrictLogging
import scala.util.Using
/**
* Simple script for processing the wiki docs. Custom pages can be generated by creating a simple
* class. Markdown pages can include a line that starts with `/api/v1/graph` to include a
* rendered image using the graph api and a formatted expression.
*/
object Main extends StrictLogging {
type ListBuilder = scala.collection.mutable.Builder[String, List[String]]
val GraphImage = """(.*)<img[^><]+src="([^"]+)"[^><]+>(.*)""".r
val config = ConfigFactory.load()
val db = StaticDatabase.demo
val vocabs = List(
StandardVocabulary,
QueryVocabulary,
DataVocabulary,
MathVocabulary,
StatefulVocabulary,
FilterVocabulary,
StyleVocabulary
)
val vocabDocs = Map(
"std" ->
"""
|Standard operations for manipulating the stack.
""".stripMargin,
"query" ->
"""
|Query expression used to select a set of time series. For more information see the
|[stack language tutorial](Stack-Language#query).
""".stripMargin,
"data" ->
"""
|Expression for how to get data from the underlying storage. This is the minimal set that
|a storage layer would need to support. For more information see the
|[stack language tutorial](Stack-Language#aggregation).
""".stripMargin,
"math" ->
"""
|Defines mathematical operators to transform or combine time series. The base set can be
|supported in a global or online streaming context. For more information see the
|[stack language tutorial](Stack-Language#math).
""".stripMargin,
"stateful" ->
"""
|Mathematical operations that require state, i.e., data from previous time intervals to
|compute the result. May not be supported in all contexts.
""".stripMargin,
"filter" ->
"""
|Mathematical operations that require all data across the time being considered to
|compute. These are typically used for filtering after the fact. Only supported in a
|global evaluation context.
""".stripMargin,
"style" ->
"""
|Applies presentation attributes to the data. For more information see the
|[stack language tutorial](Stack-Language#presentation).
""".stripMargin
)
val overrides = Map(
DesEpicSignal.word -> DesEpicSignal,
DesEpicViz.word -> DesEpicViz,
DesFast.word -> DesFast,
DesSlow.word -> DesSlow,
DesSlower.word -> DesSlower,
DesSimple.word -> DesSimple,
SDesFast.word -> SDesFast,
SDesSlow.word -> SDesSlow,
SDesSlower.word -> SDesSlower,
DistAvg.word -> DistAvg,
DistMax.word -> DistMax,
DistStddev.word -> DistStddev,
Stddev.word -> Stddev,
Line.word -> Line,
Area.word -> Area,
Stack.word -> Stack,
VSpan.word -> VSpan
)
private def writeFile(data: String, f: File): Unit = {
Using.resource(fileOut(f)) { _.write(data.getBytes("UTF-8")) }
}
@scala.annotation.tailrec
private def process(
lines: List[String],
output: ListBuilder,
graph: GraphHelper
): List[String] = {
lines match {
case v :: vs if v.trim.startsWith("/api/v1/graph") =>
output += graph.image(v)
process(vs, output, graph)
case GraphImage(pre, v, post) :: vs if v.trim.startsWith("/api/v1/graph") =>
output += (pre + graph.imageHtml(v) + post)
process(vs, output, graph)
case v :: vs =>
output += v
process(vs, output, graph)
case Nil =>
output.result()
}
}
private def processTemplate(f: File, output: File): Unit = {
// atlas.wiki is the repo name, for templates in root path do not use that as a prefix
val path =
if (output.getName == "atlas.wiki") "gen-images" else s"${output.getName}/gen-images"
val graph = new GraphHelper(db, new File(output, "gen-images"), path)
val template = Using.resource(fileIn(f)) { in =>
lines(in).toList
}
val processed = process(template, List.newBuilder[String], graph)
writeFile(processed.mkString("\\n"), new File(output, f.getName))
}
private def copyVerbatim(f: File, output: File): Unit = {
logger.info(s"copy verbatim: $f to $output")
copyVerbatim(fileIn(f), fileOut(new File(output, f.getName)))
}
private def copyVerbatim(fin: InputStream, fout: OutputStream): Unit = {
Using.resources(fout, fin) { (out, in) =>
val buf = new Array[Byte](4096)
var length = in.read(buf)
while (length > 0) {
out.write(buf, 0, length)
length = in.read(buf)
}
}
}
private def copy(input: File, output: File): Unit = {
if (!output.exists) {
logger.info(s"creating directory: $output")
output.mkdir()
}
require(output.isDirectory, s"could not find or create directory: $output")
input.listFiles.foreach {
case f if f.isDirectory => copy(f, new File(output, f.getName))
case f if f.getName.endsWith(".md") => processTemplate(f, output)
case f => copyVerbatim(f, output)
}
}
private def generateStackLangRef(output: File): Unit = {
val dir = new File(output, "stacklang")
dir.mkdirs()
val graph = new GraphHelper(db, new File(dir, "gen-images"), "stacklang/gen-images")
val sidebar = new StringBuilder
vocabs.foreach { vocab =>
sidebar.append(s"* [${vocab.name}](Reference-${vocab.name})\\n")
}
writeFile(sidebar.toString(), new File(dir, "_Sidebar.md"))
vocabs.foreach { v =>
generateVocabRef(dir, graph, v)
}
}
def generateVocabRef(output: File, graph: GraphHelper, vocab: Vocabulary): Unit = {
val dir = new File(output, vocab.name)
dir.mkdirs()
val header = s"> [[Home]] ▸ [[Stack Language Reference]] ▸ __${vocab.name}__\\n\\n"
writeFile(header + vocabDocs(vocab.name), new File(dir, s"Reference-${vocab.name}.md"))
val sidebar = new StringBuilder
vocabs.foreach { v =>
if (v.name == vocab.name) {
sidebar.append(s"* __${v.name}__\\n")
vocab.words.sortWith(_.name < _.name).foreach { w =>
val page = overrides.getOrElse(w, BasicStackWordPage(vocab, w))
val fname = page.name
sidebar.append(s" * [${w.name}]($fname)\\n")
}
} else {
sidebar.append(s"* [${v.name}](Reference-${v.name})\\n")
}
}
writeFile(sidebar.toString(), new File(dir, "_Sidebar.md"))
vocab.words.sortWith(_.name < _.name).foreach { w =>
val page = overrides.getOrElse(w, BasicStackWordPage(vocab, w))
val fname = page.name
val f = new File(dir, s"$fname.md")
writeFile(header + page.content(graph), f)
}
}
def generateScriptedPages(output: File, pages: List[Page]): Unit = {
val graph = new GraphHelper(db, new File(output, "gen-images"), "gen-images")
pages.foreach { p =>
writeFile(p.content(graph), p.file(output))
}
}
private def listFiles(f: File): List[File] = {
if (f.isDirectory) f.listFiles().flatMap(listFiles).toList else List(f)
}
private def sectionDocs(name: String, text: String): List[Document] = {
val lines = text.split("\\n")
val pattern = Pattern.compile("""^#+\\s+(.+)$""")
val sections = List.newBuilder[Document]
var title = null.asInstanceOf[String]
val buffer = new StringBuilder
lines.foreach { line =>
val matcher = pattern.matcher(line)
if (matcher.matches()) {
if (title != null) {
sections += Document(toLink(name, Some(title)), buffer.toString(), title)
}
title = matcher.group(1)
buffer.clear()
} else {
buffer.append(line).append('\\n')
}
}
sections.result()
}
private def toLink(fname: String, title: Option[String] = None): String = {
val href = fname.replace(".md", "")
title.fold(href) { t =>
val anchor = t.trim
.toLowerCase(Locale.US)
.replace(' ', '-')
.replaceAll("[^-a-z0-9]", "")
s"$href#$anchor"
}
}
private def toTitle(fname: String): String = {
fname.replace(".md", "").replace('-', ' ')
}
def generateSearchIndex(output: File): Unit = {
val files = listFiles(output).filter { f =>
val n = f.getName
n.endsWith(".md") && !n.startsWith("_")
}
val docs = files.flatMap { file =>
val text = new String(Using.resource(fileIn(file))(byteArray), StandardCharsets.UTF_8)
val loc = toLink(file.getName)
val title = toTitle(file.getName)
Document(loc, text, title) :: sectionDocs(file.getName, text)
}
val json = Json.encode(Index(docs))
Using.resource(fileOut(new File(output, "search_index.json"))) { out =>
out.write(json.getBytes(StandardCharsets.UTF_8))
}
}
def main(args: Array[String]): Unit = {
if (args.length != 2) {
System.err.println("Usage: Main <input-dir> <output-dir>")
System.exit(1)
}
val input = new File(args(0))
require(input.isDirectory, s"input-dir is not a directory: $input")
val output = new File(args(1))
output.mkdirs()
require(output.isDirectory, s"could not find or create output directory: $output")
copy(input, output)
generateStackLangRef(output)
generateScriptedPages(
output,
List(
new DES,
new StackLanguageReference(vocabs, vocabDocs),
new TimeZones
)
)
generateSearchIndex(output)
}
case class Index(docs: List[Document])
case class Document(location: String, text: String, title: String)
}
| brharrington/atlas | atlas-wiki/src/main/scala/com/netflix/atlas/wiki/Main.scala | Scala | apache-2.0 | 11,139 |
package notebook.front.widgets.charts
import scala.xml.{NodeSeq, UnprefixedAttribute, Null}
import play.api.libs.json._
import notebook._
import notebook.front._
import notebook.JsonCodec._
import notebook.front.widgets.{Texts, Utils}
import notebook.front.widgets.Utils.Defaults.DEFAULT_MAX_POINTS
import notebook.front.widgets.magic
import notebook.front.widgets.magic._
import notebook.front.widgets.magic.Implicits._
import notebook.front.widgets.magic.SamplerImplicits._
abstract class DataToRenderableConverter[C: ToPoints : Sampler](originalData: C, maxPoints: Int)
extends JsWorld[Seq[(String, Any)], Seq[(String, Any)]] {
// conversion from any renderable format (List, Array, DataFrame),
// into a generic Seq of items (Seq[MagicRenderPoint])
def sampler = implicitly[Sampler[C]]
def toPoints = implicitly[ToPoints[C]]
lazy val initialItems: Seq[MagicRenderPoint] = toPoints(originalData, maxPoints)
// conversion into data format passed into javascript widgets (via observable)
def mToSeq(t:MagicRenderPoint):Seq[(String, Any)]
def computeData(pts:Seq[MagicRenderPoint]) = pts.map(mToSeq)
// initial items to be displayed in JS (and later saved in notebook)
lazy val data: Seq[Seq[(String, Any)]] = computeData(initialItems)
lazy val headers = toPoints.headers(originalData)
lazy val numOfFields = headers.size
}
abstract class Chart[C:ToPoints:Sampler](originalData: C, maxPoints: Int)
extends DataToRenderableConverter[C](originalData, maxPoints)
with JsWorld[Seq[(String, Any)], Seq[(String, Any)]]
with Texts
with Utils {
import notebook.JSBus._
def sizes:(Int, Int)=(600, 400)
@volatile var currentC = originalData
@volatile var currentPoints = initialItems
@volatile var currentMax = maxPoints
def approxTotalItemCount(): String = {
sampler.samplingStrategy match {
// on DataFrames, do not call df.count() as it's rather expensive
case magic.LimitBasedSampling() =>
val sampledCount = currentPoints.length
if (currentMax > sampledCount) s"$sampledCount" else s"$sampledCount or more"
case _ => s"${toPoints.count(currentC)}"
}
}
def samplingWarningMsg(): String = {
sampler.samplingStrategy match {
case magic.LimitBasedSampling() =>
if (currentMax > currentPoints.length) ""
else " (Warning: showing only first " + currentMax + " rows)"
case _ if currentMax <= toPoints.count(currentC) =>
" (Warning: randomly sampled "+currentMax + " entries)"
case _ => ""
}
}
// initialize sampling warning on Chart initialization
val totalRowCount = outWithInitialValue(approxTotalItemCount)
val warnSamplingInUse = outWithInitialValue(samplingWarningMsg)
// ---- Helpers to mutate the chart reactively ----
// ------------------------------------------------
def updateChartStatus() = {
warnSamplingInUse(samplingWarningMsg)
totalRowCount(approxTotalItemCount)
}
def newMax(max:Int) = {
//update state
currentMax = max
applyOn(currentC)
}
def applyOn(newData:C) = apply {
currentC = newData
currentPoints = toPoints(newData, currentMax)
val d = currentPoints map mToSeq
updateChartStatus()
this.apply(d)
d
}
//val log = org.slf4j.LoggerFactory.getLogger("Chart")
private[this] var first = true
def addAndApply(otherData:C, resetInit:Boolean=false) = {
if (resetInit && first) {
first = false
applyOn(otherData)
} else {
apply {
currentC = toPoints.append(currentC, otherData)
currentPoints = toPoints(currentC, currentMax)
updateChartStatus()
val d = currentPoints map mToSeq
this.apply(d)
d
}
}
}
override val singleCodec = jsStringAnyCodec
override val singleToO = identity[Seq[(String, Any)]] _
val extendedContent:Option[scala.xml.Elem] = None
override val content = Some {
val container = <div>
<span class="chart-total-item-count">{totalRowCount.toHtml} entries total</span>
<span class="chart-sampling-warning">{warnSamplingInUse.toHtml}</span>
<div>
</div>
</div>
extendedContent.map(c => container.copy(child = container.child ++ c)).getOrElse(container)
}
}
trait Sequencifiable[C] { self: Chart[C] =>
val fields: Option[(String, String)]
val groupField: Option[String]
val (f1, f2) = self.fields.getOrElse((headers(0), headers(1)))
def mToSeq(t:MagicRenderPoint):Seq[(String, Any)] = {
val stripedData = t.data.toSeq.filter{ case (k, v) =>
(groupField.isDefined && groupField.get == k) || (self.fields.isEmpty || f1 == k || f2 == k)
}
stripedData
}
}
trait Charts extends Utils {
def tabs[C:ToPoints:Sampler](originalData:C, pages:Seq[(String, Chart[C])]) = Tabs(originalData, pages)
def pairs[C:ToPoints:Sampler](originalData:C, maxPoints:Int=DEFAULT_MAX_POINTS) = {
val data:Seq[MagicRenderPoint] = implicitly[ToPoints[C]].apply(originalData, maxPoints)
val firstElem = data.head
val headers = firstElem.headers
lazy val dataMap = firstElem.data
val ds = for {
r <- headers
c <- headers
} yield {
val (f1, f2) = (dataMap(r), dataMap(c))
if (isNumber(f1) && isNumber(f2)) {
ScatterChart(originalData, Some((r, c)), (600/headers.size, 400/headers.size),maxPoints=maxPoints)
} else if (isNumber(f2)) {
BarChart(originalData, Some((r, c)), (600/headers.size, 400/headers.size),maxPoints=maxPoints)
} else {
TableChart(originalData, Some(List(r, c)), (600/headers.size, 400/headers.size),maxPoints=5)
}
}
val m = ds grouped headers.size
<table class="table" style="width: 100%">
<thead>{
<tr>{headers.map{ h =>
<th>{h}</th>
}}</tr>
}</thead>
<tbody>{
m.map { row =>
<tr>{
row.map { cell =>
<td>{cell}</td>
}
}</tr>
}
}</tbody></table>
}
def display[C:ToPoints:Sampler](originalData:C, fields:Option[(String, String)]=None, maxPoints:Int=DEFAULT_MAX_POINTS):Widget = {
val dataConverter = implicitly[ToPoints[C]]
val initialDataToDisplay: Seq[MagicRenderPoint] = dataConverter(originalData, maxPoints)
var allTabs: Seq[(String, Chart[C])] = Seq()
allTabs :+= "table" → new TableChart(originalData, maxPoints=maxPoints) {
override lazy val initialItems = initialDataToDisplay
}
// two field charts used only if dataset is non empty
initialDataToDisplay.headOption match {
case None => Nil
case Some(firstElem) =>
val members = firstElem.values
val dataMap = firstElem.data
val numOfFields = firstElem.numOfFields
if (numOfFields == 2 || fields.isDefined) {
val (f1, f2) = fields.map { case (f1, f2) => (dataMap(f1), dataMap(f2)) }
.getOrElse((members(0), members(1)))
if (isNumber(f1) && isNumber(f2)) {
allTabs ++= Seq(
"dot-circle-o" → new ScatterChart(originalData, fields, maxPoints = maxPoints) {
override lazy val initialItems = initialDataToDisplay
},
"line-chart" → new LineChart(originalData, fields, maxPoints = maxPoints) {
override lazy val initialItems = initialDataToDisplay
})
}
if (isNumber(f2)) {
allTabs :+= "bar-chart" → new BarChart(originalData, fields, maxPoints = maxPoints) {
override lazy val initialItems = initialDataToDisplay
}
}
if (!isNumber(f1)) {
allTabs :+= "pie-chart" → new PieChart(originalData, fields, maxPoints = maxPoints) {
override lazy val initialItems = initialDataToDisplay
}
}
}
}
allTabs :+= "cubes" → new PivotChart(originalData, maxPoints=maxPoints) {
override lazy val initialItems = initialDataToDisplay
}
tabs(originalData, allTabs)
}
}
| andypetrella/spark-notebook | modules/common/src/main/scala/notebook/front/widgets/charts/Chart.scala | Scala | apache-2.0 | 8,050 |
package edu.umass.cs.iesl.scalacommons.layout
package boxter
object Commons {
def head[A](a:Seq[A]) = a.head
def head[A](a:List[A]) = a.head
def tail[A](a:Seq[A]) = a.tail
def tail[A](a:List[A]) = a.tail
def csv = (s:String) => (s.split(",") map (_.trim)).toList
def wsv = (s:String) => (s.split(" ") map (_.trim)).toList
}
import Commons._
object Boxes {
import scalaz._
import Scalaz._
// The basic data type. A box has a specified size and some sort of
// contents.
case class Box(rows:Int, cols:Int, content: Content) {
// Paste two boxes together horizontally, using a default (top) alignment.
def + : Box => Box = beside
def beside : Box => Box =
r => hcat(top) (List(this,r))
// Paste two boxes together horizontally with a single intervening
// column of space, using a default (top) alignment.
def +| : Box => Box = besideS
def besideS: Box => Box =
r => hcat(top)(List(this, emptyBox(0)(1), r))
// Paste two boxes together vertically, using a default (left)
// alignment.
def % : Box => Box = atop
def atop(b: Box): Box =
vcat(left)(List(this,b))
// Paste two boxes together vertically with a single intervening row
// of space, using a default (left) alignment.
def %| : Box => Box = atopS
def atopS : Box => Box =
b => vcat(left)(List(this,emptyBox(1)(0), b))
}
object Box {
val rows : Lens[Box, Int] = Lens(_.rows, (obj, v) => obj copy (rows = v))
val cols : Lens[Box, Int] = Lens(_.cols, (obj, v) => obj copy (cols = v))
val content : Lens[Box, Content] = Lens(_.content, (obj, v) => obj copy (content = v))
}
// Convenient ability to use bare string literals as boxes.
// implicit def str2box(s:String): Box = text(s)
// Data type for specifying the alignment of boxes.
sealed trait Alignment
case object AlignFirst extends Alignment
case object AlignLast extends Alignment
case object AlignCenter1 extends Alignment
case object AlignCenter2 extends Alignment
// Align boxes along their top/bottom/left/right
def top = AlignFirst
def bottom = AlignLast
def left = AlignFirst
def right = AlignLast
// Align boxes centered, but biased to the left/top (center1) or
// right/bottom (center2) in the case of unequal parities.
def center1 = AlignCenter1
def center2 = AlignCenter2
// Contents of a box.
sealed trait Content
case object Blank extends Content
case class Text(s:String) extends Content
case class Row(bs:List[Box]) extends Content
case class Col(bs:List[Box]) extends Content
case class SubBox(a1: Alignment, a2: Alignment, b:Box) extends Content
case class AnnotatedBox(props:Map[String, String], b:Box) extends Content
// The null box, which has no content and no size.
def nullBox = emptyBox(0)(0)
// @emptyBox r c@ is an empty box with @r@ rows and @c@ columns.
// Useful for effecting more fine-grained positioning of other
// boxes, by inserting empty boxes of the desired size in between
// them.
def emptyBox: Int => Int => Box =
r => c => Box(r, c, Blank)
// A @1x1@ box containing a single character.
def char: Char => Box =
c => Box(1, 1, Text(c.toString))
// A (@1 x len@) box containing a string of length @len@.
def text: String => Box =
s => Box(1, s.length, Text(s))
// Glue a list of boxes together horizontally, with the given alignment.
def hcat: Alignment => List[Box] => Box =
a => bs => {
def h = (0 :: (bs ∘ (_.rows))) max
def w = (bs ∘ (_.cols)) sum
val aligned = alignVert(a)(h)
Box(h, w, Row(bs ∘ aligned))
}
// @hsep sep a bs@ lays out @bs@ horizontally with alignment @a@,
// with @sep@ amount of space in between each.
def hsep: Int => Alignment => List[Box] => Box =
sep => a => bs => punctuateH(a)(emptyBox(0)(sep))(bs)
// Glue a list of boxes together vertically, with the given alignment.
def vcat: Alignment => List[Box] => Box =
a => bs => {
def h = (bs ∘ (_.rows)).sum
def w = (0 :: (bs ∘ (_.cols))) max
val aligned = alignHoriz(a)(w)
Box(h, w, Col(bs ∘ aligned))
}
// @vsep sep a bs@ lays out @bs@ vertically with alignment @a@,
// with @sep@ amount of space in between each.
def vsep: Int => Alignment => List[Box] => Box =
sep => a => bs => punctuateV(a)(emptyBox(sep)(0))(bs)
// @punctuateH a p bs@ horizontally lays out the boxes @bs@ with a
// copy of @p@ interspersed between each.
def punctuateH: Alignment => Box => List[Box] => Box =
a => p => bs => hcat(a)(bs intersperse p)
// A vertical version of 'punctuateH'.
def punctuateV: Alignment => Box => List[Box] => Box =
a => p => bs => vcat(a)(bs intersperse p)
//------------------------------------------------------------------------------
// Paragraph flowing ---------------------------------------------------------
//------------------------------------------------------------------------------
// @para algn w t@ is a box of width @w@, containing text @t@,
// aligned according to @algn@, flowed to fit within the given
// width.
def para: Alignment => Int => String => Box =
a => n => t =>
flow(n)(t) |> (ss => mkParaBox(a) (ss.length) (ss))
// ((ss:List[String]) => mkParaBox(a) (ss.length) (ss)) (flow(n)(t))
// @columns w h t@ is a list of boxes, each of width @w@ and height
// at most @h@, containing text @t@ flowed into as many columns as
// necessary.
def columns : (Alignment, Int, Int, String) => List[Box] =
(a, w, h, t) => flow(w)(t) ∘ (_.grouped(h).toList) ∘ (mkParaBox(a)(h))
// @mkParaBox a n s@ makes a box of height @n@ with the text @s@
// aligned according to @a@.
def mkParaBox : Alignment => Int => List[String] => Box =
a => n => alignVert(top)(n) compose vcat(a) compose (_.map(text))
def words = wsv
def unwords = (ws:List[String]) => ws.mkString(" ")
// Flow the given text into the given width.
def flow : Int => String => List[String] =
n => t => {
val wrds = words(t) ∘ mkWord
val para = wrds.foldl (emptyPara(n)) { addWordP }
para |> getLines |> (_.map(_.take(n)))
}
sealed trait ParaContent
case class Para(paraWidth : Int, paraContent : ParaContent)
val paraWidth: Lens[Para, Int] = Lens(_.paraWidth, (obj, v) => obj copy (paraWidth = v))
val paraContent: Lens[Para, ParaContent] = Lens(_.paraContent, (obj, v) => obj copy (paraContent = v))
case class Block(fullLines : List[Line], lastLine : Line) extends ParaContent
val fullLines: Lens[Block, List[Line]] = Lens(_.fullLines, (obj, v) => obj copy (fullLines = v))
val lastLine: Lens[Block, Line] = Lens(_.lastLine, (obj, v) => obj copy (lastLine = v))
def emptyPara(pw: Int) : Para =
Para(pw, (Block(nil, (Line(0, nil)))))
def getLines : Para => List[String] =
p => {
def process = (l:List[Line]) => l.reverse ∘ Line.getWords ∘ (_.map(Word.getWord)) ∘ (_.reverse) ∘ unwords
p match {
case Para(_, (Block(ls, l))) =>
if (l.len == 0) process(ls)
else process(l::ls)
}
}
case class Line(len: Int, words: List[Word])
object Line {
val lenL: Lens[Line, Int] = Lens(_.len, (obj, v) => obj copy (len = v))
val wordsL: Lens[Line, List[Word]] = Lens(_.words, (obj, v) => obj copy (words = v))
val getLen = lenL.apply _
val getWords = wordsL.apply _
}
//
def mkLine : List[Word] => Line =
ws => Line((ws ∘ Word.getLen).sum + ws.length - 1, ws)
def startLine : Word => Line =
w => mkLine(w :: Nil)
case class Word(len:Int, word:String)
object Word {
val lenL: Lens[Word, Int] = Lens(_.len, (obj, v) => obj copy (len = v))
val wordL: Lens[Word, String] = Lens(_.word, (obj, v) => obj copy (word = v))
val getLen = lenL.apply _
val getWord = wordL.apply _
}
def mkWord : String => Word =
w => Word(w.length, w)
def addWordP : (Para, Word) => Para =
(p, w) => {
p match {
case Para(pw, (Block(fl,l))) =>
if (wordFits(pw,w,l))
Para(pw, Block(fl, addWordL(w, l)))
else
Para(pw, Block((l::fl), startLine(w)))
}
}
def addWordL : (Word, Line) => Line =
(w, l) => l match {
case Line(len, ws) => Line((len + w.len + 1), (w::ws))
}
def wordFits : (Int, Word, Line) => Boolean =
(pw, w, l) =>
l.len == 0 || l.len + w.len + 1 <= pw
//------------------------------------------------------------------------------
// Alignment -----------------------------------------------------------------
//------------------------------------------------------------------------------
// @alignHoriz algn n bx@ creates a box of width @n@, with the
// contents and height of @bx@, horizontally aligned according to
// @algn@.
def alignHoriz: Alignment => Int => Box => Box =
a => c => b => {
Box(b.rows, c, SubBox(a, AlignFirst, b))
}
// @alignVert algn n bx@ creates a box of height @n@, with the
// contents and width of @bx@, vertically aligned according to
// @algn@.
def alignVert: Alignment => Int => Box => Box =
a => r => b =>
Box(r, (b.cols), SubBox(AlignFirst, a, b))
// @align ah av r c bx@ creates an @r@ x @c@ box with the contents
// of @bx@, aligned horizontally according to @ah@ and vertically
// according to @av@.
def align : (Alignment, Alignment, Int, Int, Box) => Box =
(ah, av, r, c, bx) => Box(r, c, SubBox(ah, av, bx))
// Move a box \\"up\\" by putting it in a larger box with extra rows,
// aligned to the top. See the disclaimer for 'moveLeft'.
def moveUp : Int => Box => Box =
n => b => alignVert(top)(b.rows + n)(b)
// Move a box down by putting it in a larger box with extra rows,
// aligned to the bottom. See the disclaimer for 'moveLeft'.
def moveDown : Int => Box => Box =
n => b => alignVert(bottom)(b.rows + n)(b)
// Move a box left by putting it in a larger box with extra columns,
// aligned left. Note that the name of this function is
// something of a white lie, as this will only result in the box
// being moved left by the specified amount if it is already in a
// larger right-aligned context.
def moveLeft : Int => Box => Box =
n => b => alignHoriz(left)(b.cols + n)(b)
// Move a box right by putting it in a larger box with extra
// columns, aligned right. See the disclaimer for 'moveLeft'.
def moveRight : Int => Box => Box =
n => b => alignHoriz(right)(b.cols + n)(b)
//------------------------------------------------------------------------------
// Implementation ------------------------------------------------------------
//------------------------------------------------------------------------------
// Render a 'Box' as a String, suitable for writing to the screen or
// a file.
def render : Box => String =
b => renderBox(b) |> (_.mkString("\\n"))
// \\"Padded take\\": @takeP a n xs@ is the same as @take n xs@, if @n
// <= length xs@; otherwise it is @xs@ followed by enough copies of
// @a@ to make the length equal to @n@.
def takeP[A] : A => Int => List[A] => List[A] =
a => n => aas => {
val pad = if (n <= aas.length) 0 else n - aas.length
// aas.take(n) ::: a.repeat[List].take(pad)
aas.take(n) ::: a.replicate[List](pad)
}
// @takePA @ is like 'takeP', but with alignment. That is, we
// imagine a copy of @xs@ extended infinitely on both sides with
// copies of @a@, and a window of size @n@ placed so that @xs@ has
// the specified alignment within the window; @takePA algn a n xs@
// returns the contents of this window.
def takePA[A] : Alignment => A => Int => List[A] => List[A] =
c => b => n => aas => {
def numFwd(a:Alignment, n:Int): Int = a match {
case AlignFirst => n
case AlignLast => 0
case AlignCenter1 => n / 2
case AlignCenter2 => (n+1) / 2
}
def numRev(n:Int) = (_:Alignment) match {
case AlignFirst => 0
case AlignLast => n
case AlignCenter1 => (n+1) / 2
case AlignCenter2 => n / 2
}
def split: List[A] => (List[A], List[A]) =
as => ((_:List[A]) reverse).first apply as.splitAt(numRev(as.length)(c))
def pad = (takeP(b)(numRev(n)(c)) *** takeP(b)(numFwd(c,n)));
pad apply split(aas) fold (_ ++ _)
}
// Generate a string of spaces.
def blanks : Int => String =
n => " " * n
// Render a box as a list of lines.
def renderBox : Box => List[String] =
box => box match {
case Box(r, c, Blank) => resizeBox(r, c, List(""))
case Box(r, c, Text(t)) => resizeBox(r, c, List(t))
case Box(r, c, Col(bs)) => (bs >>= renderBoxWithCols(c)) |> (resizeBox(r, c, _))
case Box(r, c, SubBox(ha, va, b)) => resizeBoxAligned(r, c, ha, va)(renderBox(b))
case Box(r, c, Row(bs)) => {
def merge: List[List[String]] => List[String] =
sss => sss.foldr("".repeat[Stream]) ({
case (a, b) => (a.toStream zip b) map {case (x, y) => x+y}
}).toList
bs ∘ renderBoxWithRows(r) |> merge |> (resizeBox(r, c, _))
}
}
// Render a box as a list of lines, using a given number of rows.
def renderBoxWithRows : Int => Box => List[String] =
r => b => renderBox (Box.rows.set(b, r))
// Render a box as a list of lines, using a given number of columns.
def renderBoxWithCols : Int => Box => List[String] =
c => b => renderBox (Box.cols.set(b, c))
// Resize a rendered list of lines.
def resizeBox : (Int, Int, List[String]) => List[String] =
(r, c, ss) => {
val taker = takeP(" "*c)(r)
val takec = ss map (s => (takeP(' ')(c)(s.toList)).mkString(""))
taker(takec)
}
// Resize a rendered list of lines, using given alignments.
def resizeBoxAligned : (Int, Int, Alignment, Alignment) => List[String] => List[String] =
(r, c, ha, va) => ss =>
takePA(va)(blanks(c))(r){
(ss.map (_.toList)) ∘ (takePA(ha)(' ')(c)) ∘ (_.mkString(""))
}
// A convenience function for rendering a box to stdout.
def printBox : Box => Unit =
box => println(render(box))
}
| iesl/scalacommons | src-attic/layout/boxter/boxes.scala | Scala | apache-2.0 | 14,418 |
package com.toscaruntime.compiler.parser
import com.toscaruntime.compiler.tosca._
import scala.util.parsing.combinator.{JavaTokenParsers, PackratParsers}
trait YamlParser extends JavaTokenParsers with PackratParsers {
override def skipWhitespace = false
val keyPattern = regex( """\w+[^:\[\]\{\}>|\p{Blank},]*""".r).withFailureMessage("Expecting yaml key")
val nestedListStartPattern = regex( """\[ *""".r).withFailureMessage("Expecting '[' to start an inline list")
val nestedListEndPattern = regex( """ *\]""".r).withFailureMessage("Expecting ']' to end an inline list")
val nestedMapStartPattern = regex( """\{ *""".r).withFailureMessage("Expecting '{' to start an inline complex object")
val nestedMapEndPattern = regex( """ *\}""".r).withFailureMessage("Expecting '}' to end an inline complex object")
val nestedEntrySeparator = regex( """\s*,\s*""".r).withFailureMessage("Expecting ',' to separate nested map or list entry")
val nonQuotedTextValuePattern = regex( """[^\[\]\{\}\r\n#][^#\r\n]*""".r).withFailureMessage("Expecting non quoted text")
val nestedNonQuotedTextValuePattern = regex( """[^,\[\]\{\}\r\n#]*""".r).withFailureMessage("Expecting nested non quoted text")
val quotedTextValuePattern = regex( """"[^"\r\n]*"""".r).withFailureMessage("Expecting quoted text")
val nullValuePattern = regex( """(?:null|~)""".r).withFailureMessage("Expecting null value")
val trueValueToken = regex( """true""".r).withFailureMessage("Expecting true")
val falseValueToken = regex( """false""".r).withFailureMessage("Expecting false")
val commentRegex = """\p{Blank}*(?:#[^\r\n]*)?"""
val endOfLineRegex = """(?:(?:\r?\n)|\Z)"""
val blankLineRegex = commentRegex + endOfLineRegex
val lineEndingRegex = s"""(?:$blankLineRegex)+"""
val lineEndingPattern = regex(lineEndingRegex.r).withFailureMessage("Unexpected token, expecting new line or end of file")
val keyValueSeparatorPattern = regex( """: """.r).withFailureMessage("Expecting ': ' to separate key and value")
val keyLongTextSeparatorPattern = regex( """>[ \t]*\r?\n(?:\r?\n)*""".r).withFailureMessage("Expecting '>' to start a long text")
val keyLongTextWithNewLineSeparatorPattern = regex( """\|[ \t]*\r?\n(?:\r?\n)*""".r).withFailureMessage("Expecting '|' to start a multilines text")
val keyComplexSeparatorPattern = regex((""":""" + lineEndingRegex).r).withFailureMessage("Expecting ':' to start a complex object")
def listIndicator = regex("""- """.r).withFailureMessage("Expecting '- ' for list entry")
def indentAtLeast(numberOfWhitespaces: Int): Parser[Int] = regex(("^ {" + numberOfWhitespaces + ",}").r).withFailureMessage(s"Expecting at least $numberOfWhitespaces white space for indentation") ^^ (_.length)
def indent(numberOfWhitespaces: Int): Parser[Int] = regex(("^ {" + numberOfWhitespaces + "}").r).withFailureMessage(s"Expecting exactly $numberOfWhitespaces white space for indentation") ^^ (_.length)
def mapWithoutFirstIndentation[T](mapEntryParser: (Int => Parser[(ParsedValue[String], T)]))(indentLength: Int): Parser[Map[ParsedValue[String], T]] =
(mapEntryParser(indentLength) ~ opt(rep1(indent(indentLength) ~> mapEntryParser(indentLength)))) ^^ {
case (firstEntry ~ None) => Map(firstEntry)
case (firstEntry ~ Some(entryList)) => ((List() :+ firstEntry) ++ entryList).toMap
}
def map[T](mapEntryParser: (Int => Parser[(ParsedValue[String], T)]))(minIndent: Int): Parser[Map[ParsedValue[String], T]] =
(indentAtLeast(minIndent) into (newIndentLength => mapWithoutFirstIndentation(mapEntryParser)(newIndentLength))) | failure("Expecting complex object")
def nestedMap[T](mapEntryParser: Parser[(ParsedValue[String], T)]): Parser[Map[ParsedValue[String], T]] =
((nestedMapStartPattern ~> repsep(mapEntryParser, nestedEntrySeparator) <~ nestedMapEndPattern) ^^ (_.toMap)) | failure("Expecting nested complex object")
def listWithoutFirstIndentation[T](listEntryParser: (Int => Parser[T]))(indentLength: Int): Parser[List[T]] = {
val newListEntryLevel = indentLength + 2
listIndicator ~> listEntryParser(newListEntryLevel) ~ opt(rep1(indent(indentLength) ~> listIndicator ~> listEntryParser(newListEntryLevel)))
} ^^ {
case (firstEntry ~ None) => List(firstEntry)
case (firstEntry ~ Some(entryList)) => (List() :+ firstEntry) ++ entryList
}
def list[T](listEntryParser: (Int => Parser[T]))(indentLevel: Int): Parser[List[T]] =
(indentAtLeast(indentLevel) into { newIndentLength =>
listWithoutFirstIndentation(listEntryParser)(newIndentLength)
}) | failure("Expecting list")
def nestedList[T](listEntryParser: Parser[T]): Parser[List[T]] =
((nestedListStartPattern ~> repsep(listEntryParser, nestedEntrySeparator) <~ nestedListEndPattern) ^^ (_.toList)) | failure("Expecting nested list")
def textValueWithSeparator(indentLevel: Int) =
(((keyLongTextSeparatorPattern ~> longTextValue(indentLevel + 1, withNewLine = false)) |
(keyLongTextWithNewLineSeparatorPattern ~> longTextValue(indentLevel + 1, withNewLine = true)) |
textValue) <~ lineEndingPattern) | failure("Expecting text value")
def textEntry(key: String)(indentLevel: Int): Parser[(ParsedValue[String], ParsedValue[String])] =
textEntry(wrapTextWithPosition(key))(indentLevel) | failure(s"Expecting text entry with key '$key'")
def textEntry(keyParser: Parser[ParsedValue[String]])(indentLevel: Int): Parser[(ParsedValue[String], ParsedValue[String])] =
(((keyParser <~ keyValueSeparatorPattern) ~ textValueWithSeparator(indentLevel)) ^^ entryParseResultHandler) | failure("Expecting text entry")
def nestedTextEntry(key: String): Parser[(ParsedValue[String], ParsedValue[String])] =
nestedTextEntry(wrapTextWithPosition(key)) | failure(s"Expecting nested text entry with key '$key'")
def nestedTextEntry(keyParser: Parser[ParsedValue[String]]): Parser[(ParsedValue[String], ParsedValue[String])] =
((keyParser ~ (keyValueSeparatorPattern ~> nestedTextValue)) ^^ entryParseResultHandler) | failure("Expecting nested text entry")
def booleanEntry(key: String): Parser[(ParsedValue[String], ParsedValue[Boolean])] =
((wrapTextWithPosition(key) ~ (keyValueSeparatorPattern ~> booleanValue) <~ lineEndingPattern) ^^ entryParseResultHandler) | failure(s"Expecting boolean entry with key $key")
def intEntry(key: String): Parser[(ParsedValue[String], ParsedValue[Int])] =
((wrapTextWithPosition(key) ~ (keyValueSeparatorPattern ~> boundedIntValue) <~ lineEndingPattern) ^^ entryParseResultHandler) | failure(s"Expecting int entry with key $key")
def mapEntry[T](keyParser: Parser[ParsedValue[String]])(mapEntryParser: (Int => Parser[(ParsedValue[String], T)]))(indentLevel: Int): Parser[(ParsedValue[String], Map[ParsedValue[String], T])] =
((keyParser ~ (keyComplexSeparatorPattern ~> map(mapEntryParser)(indentLevel + 1))) ^^ entryParseResultHandler) | failure("Expecting map entry")
def mapEntry[T](key: String)(mapEntryParser: (Int => Parser[(ParsedValue[String], T)]))(indentLevel: Int): Parser[(ParsedValue[String], Map[ParsedValue[String], T])] =
mapEntry(wrapTextWithPosition(key))(mapEntryParser)(indentLevel) | failure(s"Expecting map entry with key '$key'")
def nestedMapEntry[T](keyParser: Parser[ParsedValue[String]])(mapEntryParser: Parser[(ParsedValue[String], T)]): Parser[(ParsedValue[String], Map[ParsedValue[String], T])] =
(keyParser ~ (keyValueSeparatorPattern ~> nestedMap(mapEntryParser))) ^^ entryParseResultHandler
def nestedMapEntryWithLineFeed[T](key: String)(mapEntryParser: Parser[(ParsedValue[String], T)]): Parser[(ParsedValue[String], Map[ParsedValue[String], T])] =
nestedMapEntryWithLineFeed(wrapTextWithPosition(key))(mapEntryParser) | failure(s"Expecting nested map entry with key '$key'")
def nestedMapEntryWithLineFeed[T](keyParser: Parser[ParsedValue[String]])(mapEntryParser: Parser[(ParsedValue[String], T)]): Parser[(ParsedValue[String], Map[ParsedValue[String], T])] =
(nestedMapEntry(keyParser)(mapEntryParser) <~ lineEndingPattern) | failure("Expecting map entry")
def complexEntry[T](keyParser: Parser[ParsedValue[String]])(complexParser: (Int => Parser[T]))(indentLevel: Int): Parser[(ParsedValue[String], T)] = {
((keyParser ~ (keyComplexSeparatorPattern ~> complexParser(indentLevel + 1))) ^^ entryParseResultHandler) | failure("Expecting complex entry")
}
def complexEntry[T](key: String)(complexParser: (Int => Parser[T]))(indentLevel: Int): Parser[(ParsedValue[String], T)] =
complexEntry(wrapTextWithPosition(key))(complexParser)(indentLevel) | failure(s"Expecting complex entry with key '$key'")
def nestedComplexEntry[T](keyParser: Parser[ParsedValue[String]])(complexParser: Parser[T]): Parser[(ParsedValue[String], T)] =
(keyParser ~ (keyValueSeparatorPattern ~> nestedMapStartPattern ~> complexParser <~ nestedMapEndPattern)) ^^ entryParseResultHandler
def nestedComplexEntryWithLineFeed[T](key: String)(complexParser: Parser[T]): Parser[(ParsedValue[String], T)] =
nestedComplexEntryWithLineFeed(wrapTextWithPosition(key))(complexParser) | failure(s"Expecting nested complex entry with key '$key'")
def nestedComplexEntryWithLineFeed[T](keyParser: Parser[ParsedValue[String]])(complexParser: Parser[T]): Parser[(ParsedValue[String], T)] =
(nestedComplexEntry(keyParser)(complexParser) <~ lineEndingPattern) | failure("Expecting nested complex entry")
def listEntry[T](keyParser: Parser[ParsedValue[String]])(listEntryParser: (Int => Parser[T]))(indentLevel: Int): Parser[(ParsedValue[String], List[T])] =
((keyParser ~ (keyComplexSeparatorPattern ~> list(listEntryParser)(indentLevel + 1))) ^^ entryParseResultHandler) | failure("Expecting list entry")
def listEntry[T](key: String)(listEntryParser: (Int => Parser[T]))(indentLevel: Int): Parser[(ParsedValue[String], List[T])] =
listEntry(wrapTextWithPosition(key))(listEntryParser)(indentLevel) | failure(s"Expecting list entry with key '$key'")
def nestedListEntry[T](keyParser: Parser[ParsedValue[String]])(listEntryParser: Parser[T]): Parser[(ParsedValue[String], List[T])] =
(keyParser ~ (keyValueSeparatorPattern ~> nestedList(listEntryParser))) ^^ entryParseResultHandler
def nestedListEntryWithLineFeed[T](key: String)(listEntryParser: Parser[T]): Parser[(ParsedValue[String], List[T])] =
nestedListEntryWithLineFeed(wrapTextWithPosition(key))(listEntryParser) | failure(s"Expecting nested list entry with key '$key'")
def nestedListEntryWithLineFeed[T](keyParser: Parser[ParsedValue[String]])(listEntryParser: Parser[T]): Parser[(ParsedValue[String], List[T])] =
(nestedListEntry(keyParser)(listEntryParser) <~ lineEndingPattern) | failure("Expecting nested list entry")
def keyValue = positioned(keyPattern ^^ (k => ParsedValue(k))) | failure("Expecting key")
def intValue = positioned(decimalNumber ^^ (i => ParsedValue(i.toInt))) | failure("Expecting int value")
def boundedIntValue = positioned((intValue | "unbounded" | "UNBOUNDED") ^^ {
case "unbounded" | "UNBOUNDED" => ParsedValue[Int](Int.MaxValue)
case i: ParsedValue[Int] => i
}) | failure("Expecting int value or unbounded")
def floatValue = positioned(floatingPointNumber ^^ (f => ParsedValue(f.toDouble))) | failure("Expecting float value")
def booleanValue = positioned((trueValueToken ^^ (_ => ParsedValue(true))) | (falseValueToken ^^ (_ => ParsedValue(false)))) | failure("Expecting boolean value")
private def quotedTextValue: Parser[ParsedValue[String]] =
nullValuePattern ^^ (_ => ParsedValue[String](null)) |
quotedTextValuePattern ^^ (scalarText => ParsedValue(scalarText.substring(1, scalarText.length - 1)))
def textValue: Parser[ParsedValue[String]] =
positioned(quotedTextValue | nonQuotedTextValuePattern ^^ (scalarText => ParsedValue(scalarText.trim))) | failure("Expecting text value")
def nestedTextValue: Parser[ParsedValue[String]] =
positioned(quotedTextValue | nestedNonQuotedTextValuePattern ^^ (scalarText => ParsedValue(scalarText.trim))) | failure("Expecting nested text value")
private def longTextLine = wrapParserWithPosition( """[^\n]+""".r)
def longTextValue(indentLevel: Int, withNewLine: Boolean): Parser[ParsedValue[String]] =
positioned((indentAtLeast(indentLevel) into (newIndentLength => longTextLine ~ opt(lineEndingPattern ~> rep1sep(indent(newIndentLength) ~> longTextLine, lineEndingPattern)))) ^^ {
case (firstLine ~ None) => firstLine
case (firstLine ~ Some(lineList)) => lineList.fold(firstLine) { (existingText, nextLine) =>
val sep = if (withNewLine) "\n" else " "
val concat = ParsedValue(existingText.value + sep + nextLine.value)
concat.setPos(firstLine.pos)
concat
}
}) | failure("Expecting long text " + (if (withNewLine) " multi-lines "))
def entryParseResultHandler[T](input: (ParsedValue[String] ~ T)) = {
input match {
case (key ~ value) => (key, value)
}
}
def wrapValueWithPosition[T](input: T) = ParsedValue(input)
def wrapParserWithPosition[T](parser: Parser[T]) = positioned(parser ^^ wrapValueWithPosition)
def wrapTextWithPosition[T](text: String) = positioned(text ^^ wrapValueWithPosition) | failure(s"Expecting token '$text'")
def wrapMapEntryParserWithPosition(parser: Parser[(ParsedValue[String], Map[ParsedValue[String], FieldValue])]) = {
parser ^^ {
case (key: ParsedValue[String], value: Map[ParsedValue[String], FieldValue]) => (key, ComplexValue(value))
}
}
def wrapListEntryParserWithPosition(parser: Parser[(ParsedValue[String], List[FieldValue])]) = {
parser ^^ {
case (key: ParsedValue[String], value: List[FieldValue]) => (key, ListValue(value))
}
}
def wrapMapValue(parser: Parser[Map[ParsedValue[String], FieldValue]]) = parser ^^ { map => ComplexValue(map) }
def wrapListValue(parser: Parser[List[FieldValue]]) = parser ^^ { list => ListValue(list) }
def wrapScalarValue(parser: Parser[ParsedValue[String]]) = parser ^^ { value => ScalarValue(value) }
def yamlNode(indentLevel: Int): PackratParser[FieldValue] = {
(wrapMapValue(mapWithoutFirstIndentation(yamlEntry)(indentLevel)) |
wrapListValue(listWithoutFirstIndentation(yamlNode)(indentLevel)) |
wrapScalarValue(textValueWithSeparator(indentLevel)) |
nestedYamlNodeWithLineFeed) | failure(s"Expecting yaml node")
}
def nestedYamlNode: PackratParser[FieldValue] = {
(wrapMapValue(nestedMap(nestedYamlEntry)) |
wrapListValue(nestedList(nestedYamlNode)) |
wrapScalarValue(nestedTextValue)) | failure(s"Expecting nested yaml node")
}
def nestedYamlNodeWithLineFeed: PackratParser[FieldValue] = {
(nestedYamlNode <~ lineEndingPattern) | failure(s"Expecting nested yaml node with line feed")
}
def yamlEntry(indentLevel: Int): PackratParser[(ParsedValue[String], FieldValue)] = {
(yamlMapEntry(indentLevel) |
yamlListEntry(indentLevel) |
yamlTextEntry(indentLevel) |
nestedYamlEntryWithLineFeed) | failure(s"Expecting a (key,value) yaml entry")
}
def nestedYamlEntry: PackratParser[(ParsedValue[String], FieldValue)] = {
(nestedYamlMapEntry | nestedYamlListEntry | nestedYamlTextEntry(keyValue)) | failure(s"Expecting a nested (key,value) yaml entry")
}
def nestedYamlEntryWithLineFeed: PackratParser[(ParsedValue[String], FieldValue)] = {
(nestedYamlEntry <~ lineEndingPattern) | failure(s"Expecting a nested (key,value) yaml entry with line feed")
}
def yamlMapEntry(keyParser: Parser[ParsedValue[String]])(indentLevel: Int): PackratParser[(ParsedValue[String], ComplexValue)] = {
wrapMapEntryParserWithPosition(mapEntry(keyParser)(yamlEntry)(indentLevel)) | failure(s"Expecting a (key,value) yaml entry, value of type complex")
}
def yamlMapEntry(indentLevel: Int): PackratParser[(ParsedValue[String], ComplexValue)] = {
yamlMapEntry(keyValue)(indentLevel) | failure(s"Expecting a (key,value) yaml entry, value of type complex")
}
def nestedYamlMapEntry: PackratParser[(ParsedValue[String], ComplexValue)] = {
nestedYamlMapEntry(keyValue) | failure(s"Expecting a nested (key,value) yaml entry, value of type complex")
}
def nestedYamlMapEntry(keyParser: Parser[ParsedValue[String]]): PackratParser[(ParsedValue[String], ComplexValue)] = {
wrapMapEntryParserWithPosition(nestedMapEntry(keyParser)(nestedYamlEntry)) | failure(s"Expecting a nested (key,value) yaml entry, value of type complex")
}
def nestedYamlMapEntryWithLineFeed: PackratParser[(ParsedValue[String], ComplexValue)] = {
(nestedYamlMapEntryWithLineFeed(keyValue) <~ lineEndingPattern) | failure(s"Expecting a nested (key,value) yaml entry, value of type complex, with line feed")
}
def nestedYamlMapEntryWithLineFeed(keyParser: Parser[ParsedValue[String]]): PackratParser[(ParsedValue[String], ComplexValue)] = {
(nestedYamlMapEntry(keyParser) <~ lineEndingPattern) | failure(s"Expecting a nested (key,value) yaml entry, value of type complex, with line feed")
}
def yamlListEntry(indentLevel: Int): PackratParser[(ParsedValue[String], ListValue)] = {
yamlListEntry(keyValue)(indentLevel) | failure(s"Expecting a (key,value) yaml entry, value of type list")
}
def yamlListEntry(keyParser: Parser[ParsedValue[String]])(indentLevel: Int): PackratParser[(ParsedValue[String], ListValue)] = {
wrapListEntryParserWithPosition(listEntry(keyParser)(yamlNode)(indentLevel)) | failure(s"Expecting a (key,value) yaml entry, value of type list")
}
def nestedYamlListEntry: PackratParser[(ParsedValue[String], ListValue)] = {
nestedYamlListEntry(keyValue) | failure(s"Expecting a nested (key,value) yaml entry, value of type list")
}
def nestedYamlListEntry(keyParser: Parser[ParsedValue[String]]): PackratParser[(ParsedValue[String], ListValue)] = {
wrapListEntryParserWithPosition(nestedListEntry(keyParser)(nestedYamlNode)) | failure(s"Expecting a nested (key,value) yaml entry, value of type list")
}
def nestedYamlListEntryWithLineFeed: PackratParser[(ParsedValue[String], ListValue)] = {
nestedYamlListEntryWithLineFeed(keyValue) | failure(s"Expecting a nested (key,value) yaml entry, value of type list, with line feed")
}
def nestedYamlListEntryWithLineFeed(keyParser: Parser[ParsedValue[String]]): PackratParser[(ParsedValue[String], ListValue)] = {
(nestedYamlListEntry(keyParser) <~ lineEndingPattern) | failure(s"Expecting a nested (key,value) yaml entry, value of type list, with line feed")
}
def yamlTextEntry(indentLevel: Int): PackratParser[(ParsedValue[String], ScalarValue)] = {
yamlTextEntry(keyValue)(indentLevel) | failure(s"Expecting a yaml text entry")
}
def yamlTextEntry(keyParser: Parser[ParsedValue[String]])(indentLevel: Int): PackratParser[(ParsedValue[String], ScalarValue)] = {
textEntry(keyParser)(indentLevel) ^^ {
case (key, value: ParsedValue[String]) => (key, ScalarValue(value))
} | failure(s"Expecting a yaml text entry")
}
def nestedYamlTextEntry: PackratParser[(ParsedValue[String], ScalarValue)] = {
nestedYamlTextEntry(keyValue) | failure(s"Expecting a yaml text entry")
}
def nestedYamlTextEntry(keyParser: Parser[ParsedValue[String]]): PackratParser[(ParsedValue[String], ScalarValue)] = {
nestedTextEntry(keyParser) ^^ {
case (key, value: ParsedValue[String]) => (key, ScalarValue(value))
} | failure(s"Expecting a nested yaml text entry")
}
} | vuminhkh/tosca-runtime | compiler/src/main/scala/com/toscaruntime/compiler/parser/YamlParser.scala | Scala | mit | 19,476 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.concurrent.atomic.AtomicReference
import scala.collection.mutable.{ArrayBuffer, HashMap, HashSet}
import org.apache.spark.SparkConf
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config
import org.apache.spark.util.{Clock, SystemClock, Utils}
/**
* BlacklistTracker is designed to track problematic executors and nodes. It supports blacklisting
* executors and nodes across an entire application (with a periodic expiry). TaskSetManagers add
* additional blacklisting of executors and nodes for individual tasks and stages which works in
* concert with the blacklisting here.
*
* The tracker needs to deal with a variety of workloads, eg.:
*
* * bad user code -- this may lead to many task failures, but that should not count against
* individual executors
* * many small stages -- this may prevent a bad executor for having many failures within one
* stage, but still many failures over the entire application
* * "flaky" executors -- they don't fail every task, but are still faulty enough to merit
* blacklisting
*
* See the design doc on SPARK-8425 for a more in-depth discussion.
*
* THREADING: As with most helpers of TaskSchedulerImpl, this is not thread-safe. Though it is
* called by multiple threads, callers must already have a lock on the TaskSchedulerImpl. The
* one exception is [[nodeBlacklist()]], which can be called without holding a lock.
*/
private[scheduler] class BlacklistTracker (
conf: SparkConf,
clock: Clock = new SystemClock()) extends Logging {
BlacklistTracker.validateBlacklistConfs(conf)
private val MAX_FAILURES_PER_EXEC = conf.get(config.MAX_FAILURES_PER_EXEC)
private val MAX_FAILED_EXEC_PER_NODE = conf.get(config.MAX_FAILED_EXEC_PER_NODE)
val BLACKLIST_TIMEOUT_MILLIS = BlacklistTracker.getBlacklistTimeout(conf)
/**
* A map from executorId to information on task failures. Tracks the time of each task failure,
* so that we can avoid blacklisting executors due to failures that are very far apart. We do not
* actively remove from this as soon as tasks hit their timeouts, to avoid the time it would take
* to do so. But it will not grow too large, because as soon as an executor gets too many
* failures, we blacklist the executor and remove its entry here.
*/
private val executorIdToFailureList = new HashMap[String, ExecutorFailureList]()
val executorIdToBlacklistStatus = new HashMap[String, BlacklistedExecutor]()
val nodeIdToBlacklistExpiryTime = new HashMap[String, Long]()
/**
* An immutable copy of the set of nodes that are currently blacklisted. Kept in an
* AtomicReference to make [[nodeBlacklist()]] thread-safe.
*/
private val _nodeBlacklist = new AtomicReference[Set[String]](Set())
/**
* Time when the next blacklist will expire. Used as a
* shortcut to avoid iterating over all entries in the blacklist when none will have expired.
*/
var nextExpiryTime: Long = Long.MaxValue
/**
* Mapping from nodes to all of the executors that have been blacklisted on that node. We do *not*
* remove from this when executors are removed from spark, so we can track when we get multiple
* successive blacklisted executors on one node. Nonetheless, it will not grow too large because
* there cannot be many blacklisted executors on one node, before we stop requesting more
* executors on that node, and we clean up the list of blacklisted executors once an executor has
* been blacklisted for BLACKLIST_TIMEOUT_MILLIS.
*/
val nodeToBlacklistedExecs = new HashMap[String, HashSet[String]]()
/**
* Un-blacklists executors and nodes that have been blacklisted for at least
* BLACKLIST_TIMEOUT_MILLIS
*/
def applyBlacklistTimeout(): Unit = {
val now = clock.getTimeMillis()
// quickly check if we've got anything to expire from blacklist -- if not, avoid doing any work
if (now > nextExpiryTime) {
// Apply the timeout to blacklisted nodes and executors
val execsToUnblacklist = executorIdToBlacklistStatus.filter(_._2.expiryTime < now).keys
if (execsToUnblacklist.nonEmpty) {
// Un-blacklist any executors that have been blacklisted longer than the blacklist timeout.
logInfo(s"Removing executors $execsToUnblacklist from blacklist because the blacklist " +
s"for those executors has timed out")
execsToUnblacklist.foreach { exec =>
val status = executorIdToBlacklistStatus.remove(exec).get
val failedExecsOnNode = nodeToBlacklistedExecs(status.node)
failedExecsOnNode.remove(exec)
if (failedExecsOnNode.isEmpty) {
nodeToBlacklistedExecs.remove(status.node)
}
}
}
val nodesToUnblacklist = nodeIdToBlacklistExpiryTime.filter(_._2 < now).keys
if (nodesToUnblacklist.nonEmpty) {
// Un-blacklist any nodes that have been blacklisted longer than the blacklist timeout.
logInfo(s"Removing nodes $nodesToUnblacklist from blacklist because the blacklist " +
s"has timed out")
nodeIdToBlacklistExpiryTime --= nodesToUnblacklist
_nodeBlacklist.set(nodeIdToBlacklistExpiryTime.keySet.toSet)
}
updateNextExpiryTime()
}
}
private def updateNextExpiryTime(): Unit = {
val execMinExpiry = if (executorIdToBlacklistStatus.nonEmpty) {
executorIdToBlacklistStatus.map{_._2.expiryTime}.min
} else {
Long.MaxValue
}
val nodeMinExpiry = if (nodeIdToBlacklistExpiryTime.nonEmpty) {
nodeIdToBlacklistExpiryTime.values.min
} else {
Long.MaxValue
}
nextExpiryTime = math.min(execMinExpiry, nodeMinExpiry)
}
def updateBlacklistForSuccessfulTaskSet(
stageId: Int,
stageAttemptId: Int,
failuresByExec: HashMap[String, ExecutorFailuresInTaskSet]): Unit = {
// if any tasks failed, we count them towards the overall failure count for the executor at
// this point.
val now = clock.getTimeMillis()
failuresByExec.foreach { case (exec, failuresInTaskSet) =>
val appFailuresOnExecutor =
executorIdToFailureList.getOrElseUpdate(exec, new ExecutorFailureList)
appFailuresOnExecutor.addFailures(stageId, stageAttemptId, failuresInTaskSet)
appFailuresOnExecutor.dropFailuresWithTimeoutBefore(now)
val newTotal = appFailuresOnExecutor.numUniqueTaskFailures
val expiryTimeForNewBlacklists = now + BLACKLIST_TIMEOUT_MILLIS
// If this pushes the total number of failures over the threshold, blacklist the executor.
// If its already blacklisted, we avoid "re-blacklisting" (which can happen if there were
// other tasks already running in another taskset when it got blacklisted), because it makes
// some of the logic around expiry times a little more confusing. But it also wouldn't be a
// problem to re-blacklist, with a later expiry time.
if (newTotal >= MAX_FAILURES_PER_EXEC && !executorIdToBlacklistStatus.contains(exec)) {
logInfo(s"Blacklisting executor id: $exec because it has $newTotal" +
s" task failures in successful task sets")
val node = failuresInTaskSet.node
executorIdToBlacklistStatus.put(exec, BlacklistedExecutor(node, expiryTimeForNewBlacklists))
updateNextExpiryTime()
// In addition to blacklisting the executor, we also update the data for failures on the
// node, and potentially put the entire node into a blacklist as well.
val blacklistedExecsOnNode = nodeToBlacklistedExecs.getOrElseUpdate(node, HashSet[String]())
blacklistedExecsOnNode += exec
// If the node is already in the blacklist, we avoid adding it again with a later expiry
// time.
if (blacklistedExecsOnNode.size >= MAX_FAILED_EXEC_PER_NODE &&
!nodeIdToBlacklistExpiryTime.contains(node)) {
logInfo(s"Blacklisting node $node because it has ${blacklistedExecsOnNode.size} " +
s"executors blacklisted: ${blacklistedExecsOnNode}")
nodeIdToBlacklistExpiryTime.put(node, expiryTimeForNewBlacklists)
_nodeBlacklist.set(nodeIdToBlacklistExpiryTime.keySet.toSet)
}
}
}
}
def isExecutorBlacklisted(executorId: String): Boolean = {
executorIdToBlacklistStatus.contains(executorId)
}
/**
* Get the full set of nodes that are blacklisted. Unlike other methods in this class, this *IS*
* thread-safe -- no lock required on a taskScheduler.
*/
def nodeBlacklist(): Set[String] = {
_nodeBlacklist.get()
}
def isNodeBlacklisted(node: String): Boolean = {
nodeIdToBlacklistExpiryTime.contains(node)
}
def handleRemovedExecutor(executorId: String): Unit = {
// We intentionally do not clean up executors that are already blacklisted in
// nodeToBlacklistedExecs, so that if another executor on the same node gets blacklisted, we can
// blacklist the entire node. We also can't clean up executorIdToBlacklistStatus, so we can
// eventually remove the executor after the timeout. Despite not clearing those structures
// here, we don't expect they will grow too big since you won't get too many executors on one
// node, and the timeout will clear it up periodically in any case.
executorIdToFailureList -= executorId
}
/**
* Tracks all failures for one executor (that have not passed the timeout).
*
* In general we actually expect this to be extremely small, since it won't contain more than the
* maximum number of task failures before an executor is failed (default 2).
*/
private[scheduler] final class ExecutorFailureList extends Logging {
private case class TaskId(stage: Int, stageAttempt: Int, taskIndex: Int)
/**
* All failures on this executor in successful task sets.
*/
private var failuresAndExpiryTimes = ArrayBuffer[(TaskId, Long)]()
/**
* As an optimization, we track the min expiry time over all entries in failuresAndExpiryTimes
* so its quick to tell if there are any failures with expiry before the current time.
*/
private var minExpiryTime = Long.MaxValue
def addFailures(
stage: Int,
stageAttempt: Int,
failuresInTaskSet: ExecutorFailuresInTaskSet): Unit = {
failuresInTaskSet.taskToFailureCountAndFailureTime.foreach {
case (taskIdx, (_, failureTime)) =>
val expiryTime = failureTime + BLACKLIST_TIMEOUT_MILLIS
failuresAndExpiryTimes += ((TaskId(stage, stageAttempt, taskIdx), expiryTime))
if (expiryTime < minExpiryTime) {
minExpiryTime = expiryTime
}
}
}
/**
* The number of unique tasks that failed on this executor. Only counts failures within the
* timeout, and in successful tasksets.
*/
def numUniqueTaskFailures: Int = failuresAndExpiryTimes.size
def isEmpty: Boolean = failuresAndExpiryTimes.isEmpty
/**
* Apply the timeout to individual tasks. This is to prevent one-off failures that are very
* spread out in time (and likely have nothing to do with problems on the executor) from
* triggering blacklisting. However, note that we do *not* remove executors and nodes from
* the blacklist as we expire individual task failures -- each have their own timeout. Eg.,
* suppose:
* * timeout = 10, maxFailuresPerExec = 2
* * Task 1 fails on exec 1 at time 0
* * Task 2 fails on exec 1 at time 5
* --> exec 1 is blacklisted from time 5 - 15.
* This is to simplify the implementation, as well as keep the behavior easier to understand
* for the end user.
*/
def dropFailuresWithTimeoutBefore(dropBefore: Long): Unit = {
if (minExpiryTime < dropBefore) {
var newMinExpiry = Long.MaxValue
val newFailures = new ArrayBuffer[(TaskId, Long)]
failuresAndExpiryTimes.foreach { case (task, expiryTime) =>
if (expiryTime >= dropBefore) {
newFailures += ((task, expiryTime))
if (expiryTime < newMinExpiry) {
newMinExpiry = expiryTime
}
}
}
failuresAndExpiryTimes = newFailures
minExpiryTime = newMinExpiry
}
}
override def toString(): String = {
s"failures = $failuresAndExpiryTimes"
}
}
}
private[scheduler] object BlacklistTracker extends Logging {
private val DEFAULT_TIMEOUT = "1h"
/**
* Returns true if the blacklist is enabled, based on checking the configuration in the following
* order:
* 1. Is it specifically enabled or disabled?
* 2. Is it enabled via the legacy timeout conf?
* 3. Default is off
*/
def isBlacklistEnabled(conf: SparkConf): Boolean = {
conf.get(config.BLACKLIST_ENABLED) match {
case Some(enabled) =>
enabled
case None =>
// if they've got a non-zero setting for the legacy conf, always enable the blacklist,
// otherwise, use the default.
val legacyKey = config.BLACKLIST_LEGACY_TIMEOUT_CONF.key
conf.get(config.BLACKLIST_LEGACY_TIMEOUT_CONF).exists { legacyTimeout =>
if (legacyTimeout == 0) {
logWarning(s"Turning off blacklisting due to legacy configuration: $legacyKey == 0")
false
} else {
logWarning(s"Turning on blacklisting due to legacy configuration: $legacyKey > 0")
true
}
}
}
}
def getBlacklistTimeout(conf: SparkConf): Long = {
conf.get(config.BLACKLIST_TIMEOUT_CONF).getOrElse {
conf.get(config.BLACKLIST_LEGACY_TIMEOUT_CONF).getOrElse {
Utils.timeStringAsMs(DEFAULT_TIMEOUT)
}
}
}
/**
* Verify that blacklist configurations are consistent; if not, throw an exception. Should only
* be called if blacklisting is enabled.
*
* The configuration for the blacklist is expected to adhere to a few invariants. Default
* values follow these rules of course, but users may unwittingly change one configuration
* without making the corresponding adjustment elsewhere. This ensures we fail-fast when
* there are such misconfigurations.
*/
def validateBlacklistConfs(conf: SparkConf): Unit = {
def mustBePos(k: String, v: String): Unit = {
throw new IllegalArgumentException(s"$k was $v, but must be > 0.")
}
Seq(
config.MAX_TASK_ATTEMPTS_PER_EXECUTOR,
config.MAX_TASK_ATTEMPTS_PER_NODE,
config.MAX_FAILURES_PER_EXEC_STAGE,
config.MAX_FAILED_EXEC_PER_NODE_STAGE,
config.MAX_FAILURES_PER_EXEC,
config.MAX_FAILED_EXEC_PER_NODE
).foreach { config =>
val v = conf.get(config)
if (v <= 0) {
mustBePos(config.key, v.toString)
}
}
val timeout = getBlacklistTimeout(conf)
if (timeout <= 0) {
// first, figure out where the timeout came from, to include the right conf in the message.
conf.get(config.BLACKLIST_TIMEOUT_CONF) match {
case Some(t) =>
mustBePos(config.BLACKLIST_TIMEOUT_CONF.key, timeout.toString)
case None =>
mustBePos(config.BLACKLIST_LEGACY_TIMEOUT_CONF.key, timeout.toString)
}
}
val maxTaskFailures = conf.get(config.MAX_TASK_FAILURES)
val maxNodeAttempts = conf.get(config.MAX_TASK_ATTEMPTS_PER_NODE)
if (maxNodeAttempts >= maxTaskFailures) {
throw new IllegalArgumentException(s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key} " +
s"( = ${maxNodeAttempts}) was >= ${config.MAX_TASK_FAILURES.key} " +
s"( = ${maxTaskFailures} ). Though blacklisting is enabled, with this configuration, " +
s"Spark will not be robust to one bad node. Decrease " +
s"${config.MAX_TASK_ATTEMPTS_PER_NODE.key}, increase ${config.MAX_TASK_FAILURES.key}, " +
s"or disable blacklisting with ${config.BLACKLIST_ENABLED.key}")
}
}
}
private final case class BlacklistedExecutor(node: String, expiryTime: Long)
| sh-cho/cshSpark | scheduler/BlacklistTracker.scala | Scala | apache-2.0 | 16,785 |
package chatapp.client
import java.net.InetSocketAddress
import akka.actor.{Actor, ActorSystem, Kill}
import akka.io.Tcp._
import akka.io.{IO, Tcp}
import akka.util.ByteString
import chatapp.client.ClientMessage.SendMessage
/**
* Created by Niels Bokmans on 30-3-2016.
*/
class ClientActor(address: InetSocketAddress, actorSystem: ActorSystem) extends Actor {
IO(Tcp)(actorSystem) ! Connect(address)
def receive = {
case CommandFailed(command: Tcp.Command) =>
println("Failed to connect to " + address.toString)
self ! Kill
actorSystem.terminate()
case Connected(remote, local) =>
println("Successfully connected to " + address)
val connection = sender()
connection ! Register(self)
context become {
case Received(data) =>
println(data.decodeString("US-ASCII"))
case SendMessage(message) =>
connection ! Write(ByteString(message))
}
}
}
| nielsje41/7l7wScala | chatapp/client/ClientActor.scala | Scala | mit | 944 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.