code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package gitbucket.core.util
import com.typesafe.config.ConfigFactory
import java.io.File
import Directory._
import liquibase.database.AbstractJdbcDatabase
import liquibase.database.core.{PostgresDatabase, MySQLDatabase, H2Database}
import org.apache.commons.io.FileUtils
object DatabaseConfig {
private lazy val config = {
val file = new File(GitBucketHome, "database.conf")
if(!file.exists){
FileUtils.write(file,
"""db {
| url = "jdbc:h2:${DatabaseHome};MVCC=true"
| user = "sa"
| password = "sa"
|# connectionTimeout = 30000
|# idleTimeout = 600000
|# maxLifetime = 1800000
|# minimumIdle = 10
|# maximumPoolSize = 10
|}
|""".stripMargin, "UTF-8")
}
ConfigFactory.parseFile(file)
}
private lazy val dbUrl = config.getString("db.url")
def url(directory: Option[String]): String =
dbUrl.replace("${DatabaseHome}", directory.getOrElse(DatabaseHome))
lazy val url : String = url(None)
lazy val user : String = config.getString("db.user")
lazy val password : String = config.getString("db.password")
lazy val jdbcDriver : String = DatabaseType(url).jdbcDriver
lazy val slickDriver : slick.driver.JdbcProfile = DatabaseType(url).slickDriver
lazy val liquiDriver : AbstractJdbcDatabase = DatabaseType(url).liquiDriver
lazy val connectionTimeout : Option[Long] = getOptionValue("db.connectionTimeout", config.getLong)
lazy val idleTimeout : Option[Long] = getOptionValue("db.idleTimeout" , config.getLong)
lazy val maxLifetime : Option[Long] = getOptionValue("db.maxLifetime" , config.getLong)
lazy val minimumIdle : Option[Int] = getOptionValue("db.minimumIdle" , config.getInt)
lazy val maximumPoolSize : Option[Int] = getOptionValue("db.maximumPoolSize" , config.getInt)
private def getOptionValue[T](path: String, f: String => T): Option[T] = {
if(config.hasPath(path)) Some(f(path)) else None
}
}
sealed trait DatabaseType {
val jdbcDriver: String
val slickDriver: slick.driver.JdbcProfile
val liquiDriver: AbstractJdbcDatabase
}
object DatabaseType {
def apply(url: String): DatabaseType = {
if(url.startsWith("jdbc:h2:")){
H2
} else if(url.startsWith("jdbc:mysql:")){
MySQL
} else if(url.startsWith("jdbc:postgresql:")){
PostgreSQL
} else {
throw new IllegalArgumentException(s"${url} is not supported.")
}
}
object H2 extends DatabaseType {
val jdbcDriver = "org.h2.Driver"
val slickDriver = slick.driver.H2Driver
val liquiDriver = new H2Database()
}
object MySQL extends DatabaseType {
val jdbcDriver = "com.mysql.jdbc.Driver"
val slickDriver = slick.driver.MySQLDriver
val liquiDriver = new MySQLDatabase()
}
object PostgreSQL extends DatabaseType {
val jdbcDriver = "org.postgresql.Driver2"
val slickDriver = new slick.driver.PostgresDriver {
override def quoteIdentifier(id: String): String = {
val s = new StringBuilder(id.length + 4) append '"'
for(c <- id) if(c == '"') s append "\\"\\"" else s append c.toLower
(s append '"').toString
}
}
val liquiDriver = new PostgresDatabase()
}
}
| zhoffice/gitbucket | src/main/scala/gitbucket/core/util/DatabaseConfig.scala | Scala | apache-2.0 | 3,347 |
/*
* Copyright (C) 2009-2018 Lightbend Inc. <https://www.lightbend.com>
*/
package play.api.libs.typedmap
/**
* A TypedKey is a key that can be used to get and set values in a
* [[TypedMap]] or any object with typed keys. This class uses reference
* equality for comparisons, so each new instance is different key.
*
* @param displayName The name to display for this key or `null` if
* no display name has been provided. This name is only used for debugging.
* Keys with the same name are not considered to be equal.
* @tparam A The type of values associated with this key.
*/
final class TypedKey[A] private (val displayName: Option[String]) {
/**
* Bind this key to a value. This is equivalent to the `->` operator.
*
* @param value The value to bind this key to.
* @return A bound value.
*/
def bindValue(value: A): TypedEntry[A] = TypedEntry(this, value)
/**
* Bind this key to a value. Equivalent to [[bindValue]].
*
* @param value The value to bind.
* @return An entry binding this key to a value of the right type.
*/
def ->(value: A): TypedEntry[A] = bindValue(value)
override def toString: String = displayName.getOrElse(super.toString)
/**
* @return The Java version for this key.
*/
def asJava: play.libs.typedmap.TypedKey[A] = new play.libs.typedmap.TypedKey[A](this)
}
/**
* Helper for working with `TypedKey`s.
*/
object TypedKey {
/**
* Creates a [[TypedKey]] without a name.
*
* @tparam A The type of value this key is associated with.
* @return A fresh key.
*/
def apply[A]: TypedKey[A] = new TypedKey[A](None)
/**
* Creates a [[TypedKey]] with the given name.
*
* @param displayName The name to display when printing this key.
* @tparam A The type of value this key is associated with.
* @return A fresh key.
*/
def apply[A](displayName: String): TypedKey[A] = new TypedKey[A](Some(displayName))
} | Shenker93/playframework | framework/src/play/src/main/scala/play/api/libs/typedmap/TypedKey.scala | Scala | apache-2.0 | 1,928 |
package im.actor.server.presences
import akka.actor.PoisonPill
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import akka.testkit.TestProbe
import akka.util.Timeout
import org.scalatest.time.{ Seconds, Span }
import im.actor.server.ActorSuite
import im.actor.server.db.DbExtension
class GroupPresenceManagerSpec extends ActorSuite {
behavior of "GroupPresenceManager"
it should "subscribe/unsubscribe to group presences" in e1
it should "not consider presence change on second device online when first is online" in e2
it should "not consider presence change on second device offline when first is online" in e3
import GroupPresenceManager._
implicit val ec: ExecutionContext = system.dispatcher
override implicit val patienceConfig = PatienceConfig(timeout = Span(5, Seconds))
implicit val timeout: Timeout = Timeout(5.seconds)
DbExtension(system).clean()
DbExtension(system).migrate()
implicit val userPresenceRegion = PresenceManager.startRegion()
implicit val region = GroupPresenceManager.startRegion()
def e1() = {
val userId = util.Random.nextInt
val groupId = util.Random.nextInt
val probe = TestProbe()
whenReady(subscribe(groupId, probe.ref)) { _ ⇒ }
probe.expectMsgPF() {
case GroupPresenceState(100, 0) ⇒
}
GroupPresenceManager.notifyGroupUserAdded(groupId, userId)
PresenceManager.presenceSetOnline(userId, 1L, 1000)
probe.expectMsgPF() {
case GroupPresenceState(g, 1) if g == groupId ⇒
}
probe.expectMsgPF() {
case GroupPresenceState(g, 0) if g == groupId ⇒
}
whenReady(unsubscribe(groupId, probe.ref)) { _ ⇒ }
probe.expectNoMsg()
probe.ref ! PoisonPill
}
def e2() = {
val userId = util.Random.nextInt
val groupId = util.Random.nextInt
val probe = TestProbe()
GroupPresenceManager.notifyGroupUserAdded(groupId, userId)
whenReady(subscribe(groupId, probe.ref)) { _ ⇒ }
probe.expectMsgPF() {
case GroupPresenceState(g, 0) if g == groupId ⇒
}
PresenceManager.presenceSetOnline(userId, 1L, 300)
probe.expectMsgPF() {
case GroupPresenceState(g, 1) if g == groupId ⇒
}
PresenceManager.presenceSetOnline(userId, 2L, 600)
probe.expectNoMsg(400.millis)
probe.expectMsgPF() {
case GroupPresenceState(g, 0) if g == groupId ⇒
}
}
def e3() = {
val userId = util.Random.nextInt
val groupId = util.Random.nextInt
val probe = TestProbe()
GroupPresenceManager.notifyGroupUserAdded(groupId, userId)
whenReady(subscribe(groupId, probe.ref)) { _ ⇒ }
probe.expectMsgPF() {
case GroupPresenceState(g, 0) if g == groupId ⇒
}
PresenceManager.presenceSetOnline(userId, 1L, 300)
probe.expectMsgPF() {
case GroupPresenceState(g, 1) if g == groupId ⇒
}
PresenceManager.presenceSetOnline(userId, 2L, 300)
PresenceManager.presenceSetOffline(userId, 2L, 300)
// should not consuder user offline as the first device is still online
probe.expectNoMsg(200.millis)
// finally consider user offline as first device's online is timed out
probe.expectMsgPF() {
case GroupPresenceState(g, 0) if g == groupId ⇒
}
}
}
| darioajr/actor-platform | actor-server/actor-tests/src/test/scala/im/actor/server/presences/GroupPresenceManagerSpec.scala | Scala | mit | 3,259 |
package chapter.twelve
object ExerciseNine {
def corresponds2[A, B](as: Array[A], bs: Array[B], fun: (A, B) => Boolean): Boolean = {
(as zip bs).map(tuple => fun(tuple._1, tuple._2)).reduce(_ & _)
}
}
| deekim/impatient-scala | src/main/scala/chapter/twelve/ExerciseNine.scala | Scala | apache-2.0 | 212 |
package com.boldradius.astrolabe.client.services
import com.boldradius.astrolabe.http.ClusterProtocol
import org.scalajs.dom
import org.scalajs.dom.raw._
import upickle.default._
import com.boldradius.astrolabe.http.Json._
object WebSocketClient {
var open: Boolean = false
lazy val websocket = new WebSocket(getWebsocketUri(dom.document))
websocket.onopen = { (event: Event) =>
ClusterService.findDiscoveringClusters()
ClusterService.findDiscoveredClusters()
event
}
websocket.onerror = { (event: ErrorEvent) => }
websocket.onmessage = { (event: MessageEvent) =>
val msg: ClusterProtocol = read[ClusterProtocol](event.data.toString)
MainDispatcher.dispatch(msg)
event
}
websocket.onclose = { (event: Event) => }
def getWebsocketUri(document: Document): String = {
val wsProtocol = if (dom.document.location.protocol == "https:") "wss" else "ws"
s"$wsProtocol://${dom.document.location.host}/events"
}
def send(msg: ClusterProtocol): Unit = {
websocket.send(write(msg))
}
}
| boldradius/cluster-console | js/src/main/scala/com/boldradius/astrolabe/client/services/WebSocketClient.scala | Scala | bsd-3-clause | 1,045 |
/*******************************************************************************
* Copyright (c) 2016 Logimethods
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the MIT License (MIT)
* which accompanies this distribution, and is available at
* http://opensource.org/licenses/MIT
*******************************************************************************/
package com.logimethods.nats.demo
import akka.actor.{ActorRef, Props}
import io.gatling.core.Predef._
import io.gatling.core.action.builder.ActionBuilder
import com.logimethods.connector.gatling.to_nats._
import scala.concurrent.duration._
import java.util.Properties
import io.nats.client.Constants.PROP_URL
class NatsStreamingInjection extends Simulation {
val natsUrl = System.getenv("NATS_URI")
val clusterID = System.getenv("NATS_CLUSTER_ID")
var subject = System.getenv("GATLING_TO_NATS_SUBJECT")
if (subject == null) {
println("No Subject has been defined through the 'GATLING_TO_NATS_SUBJECT' Environment Variable!!!")
} else {
println("Will emit messages to " + subject)
val natsProtocol = NatsStreamingProtocol(natsUrl, clusterID, subject)
val natsScn = scenario("NATS call").exec(NatsStreamingBuilder(new ValueProvider("")))
setUp(
natsScn.inject(constantUsersPerSec(15) during (1 minute))
).protocols(natsProtocol)
}
} | Logimethods/docker-nats-connector-spark | inject/user-files/simulations/nats/NatsStreamingInjection.scala | Scala | mit | 1,418 |
/*
* Copyright 2014 Commonwealth Computer Research, Inc.
*
* Licensed under the Apache License, Version 2.0 (the License);
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an AS IS BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.locationtech.geomesa.core.security
import org.apache.accumulo.core.security.Authorizations
import org.locationtech.geomesa.core.data.AccumuloDataStoreFactory
import scala.collection.JavaConversions._
/**
* AuthorizationsProvider that wraps another provider and ensures that the auths returned do not exceed a pre-set list
*/
class FilteringAuthorizationsProvider (val wrappedProvider: AuthorizationsProvider)
extends AuthorizationsProvider {
var filter: Option[Array[String]] = None
override def getAuthorizations: Authorizations =
filter match {
case None => wrappedProvider.getAuthorizations
case Some(_) => {
val filtered = wrappedProvider.getAuthorizations.getAuthorizations.map(new String(_)).intersect(filter.get)
new Authorizations(filtered:_*)
}
}
override def configure(params: java.util.Map[String, java.io.Serializable]) {
val authString = AccumuloDataStoreFactory.params.authsParam.lookUp(params).asInstanceOf[String]
if (authString != null && !authString.isEmpty)
filter = Option(authString.split(","))
wrappedProvider.configure(params)
}
}
| kevinwheeler/geomesa | geomesa-core/src/main/scala/org/locationtech/geomesa/core/security/FilteringAuthorizationsProvider.scala | Scala | apache-2.0 | 1,762 |
/*
* @author Philip Stutz
* @author Mihaela Verman
*
* Copyright 2013 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.triplerush
import java.util.concurrent.atomic.AtomicInteger
import scala.language.implicitConversions
import com.signalcollect.triplerush.EfficientIndexPattern._
import com.signalcollect.triplerush.sparql.VariableEncoding
import QueryParticle._
object ParticleDebug {
def apply(p: Array[Int]): ParticleDebug = {
val particle = new QueryParticle(p)
ParticleDebug(
particle.queryId,
particle.tickets,
(-1 to -particle.bindings.length by -1).zip(particle.bindings).filter(_._2 != 0).toMap,
particle.patterns.toList.reverse,
particle.numberOfBindings,
particle.r.length - 4)
}
def validate(p: Array[Int], msg: String): Unit = {
if (p != null) {
val validTotalLength = (p.length - 4 - p.numberOfBindings) % 3 == 0
val validNumberOfBindingsLength = (p.length - 4 - p.numberOfBindings) >= 0
if (!validTotalLength) {
val debug = ParticleDebug(p)
throw new Exception(s"$msg remaining length after numberofbindings not divisible by 3 (cannot represent TP): $debug")
}
if (!validNumberOfBindingsLength) {
val debug = ParticleDebug(p)
throw new Exception(s"$msg array too short to accomodate bindings $debug")
}
} else {
throw new Exception(s"$msg validate particle was null")
}
}
}
case class ParticleDebug(
id: Int,
tickets: Long,
bindings: Map[Int, Int],
unmatched: List[TriplePattern],
numberOfBindings: Int,
intsForBindingsAndUnmatched: Int)
object QueryParticle {
implicit def arrayToParticle(a: Array[Int]): QueryParticle = new QueryParticle(a)
def apply(
patterns: Seq[TriplePattern],
queryId: Int,
numberOfSelectVariables: Int,
tickets: Long = Long.MaxValue): Array[Int] = {
val ints = 4 + numberOfSelectVariables + 3 * patterns.length
val r = new Array[Int](ints)
r.writeQueryId(queryId)
r.writeNumberOfBindings(numberOfSelectVariables)
r.writeTickets(tickets)
r.writePatterns(patterns)
r
}
val failed: Array[Int] = null.asInstanceOf[Array[Int]]
}
/**
* The array packs:
* 0 int: queryId,
* 1-2 long: tickets (long encoded as 2 ints)
* 3 int: numberOfBindings
* 4-? ints: bindings
* ? ?*3 ints: triple patterns in reverse matching order (last one
* gets matched first).
*/
class QueryParticle(val r: Array[Int]) extends AnyVal {
def validate(msg: String): Unit = ParticleDebug.validate(r, msg)
def isBindingQuery: Boolean = queryId > 0
def copy: Array[Int] = {
val c = new Array[Int](r.length)
System.arraycopy(r, 0, c, 0, r.length)
c
}
def bindSubject(
toMatchS: Int,
toMatchP: Int,
toMatchO: Int,
toBindS: Int,
toBindP: Int,
toBindO: Int,
copyBeforeWrite: Boolean): Array[Int] = {
// Subject is conflicting constant. No binding possible.
if (toMatchS > 0) {
return QueryParticle.failed
}
// Subject is a variable that needs to be bound to the constant in the triple pattern.
// Bind value to variable.
val variable = toMatchS
val boundValue = toBindS
if (isBindingIncompatible(toMatchP, toBindP, variable, boundValue)
|| isBindingIncompatible(toMatchO, toBindO, variable, boundValue)) {
return QueryParticle.failed
}
// No conflicts, we bind the value to the variable.
val currentParticle: Array[Int] = {
if (copyBeforeWrite) {
copyWithoutLastPattern
} else {
r
}
}
if (isBindingQuery) {
val variableIndex = VariableEncoding.variableIdToDecodingIndex(variable)
currentParticle.writeBinding(variableIndex, boundValue)
}
currentParticle.bindVariablesInPatterns(variable, boundValue)
val result = currentParticle.bindPredicate(toMatchP, toMatchO, toBindP, toBindO, false)
result
}
def bindPredicate(
toMatchP: Int,
toMatchO: Int,
toBindP: Int,
toBindO: Int,
copyBeforeWrite: Boolean): Array[Int] = {
if (toMatchP == toBindP) {
// Predicate is compatible constant. No binding necessary.
return bindObject(toMatchO, toBindO, copyBeforeWrite)
}
// Predicate is conflicting constant. No binding possible.
if (toMatchP > 0) {
return QueryParticle.failed
}
// Predicate is a variable that needs to be bound to the constant in the triple pattern.
// Bind value to variable.
val variable = toMatchP
val boundValue = toBindP
if (isBindingIncompatible(toMatchO, toBindO, variable, boundValue)) {
return QueryParticle.failed
}
// No conflicts, we bind the value to the variable.
val currentParticle = {
if (copyBeforeWrite) {
copyWithoutLastPattern
} else {
r
}
}
if (isBindingQuery) {
val variableIndex = -(variable + 1)
currentParticle.writeBinding(variableIndex, boundValue)
}
currentParticle.bindVariablesInPatterns(variable, boundValue)
val result = currentParticle.bindObject(toMatchO, toBindO, false)
result
}
def bindObject(
toMatchO: Int,
toBindO: Int,
copyBeforeWrite: Boolean): Array[Int] = {
if (toMatchO == toBindO) {
// Object is compatible constant. No binding necessary.
// val result = {
// if (copyBeforeWrite) {
// // We need to cut off the last pattern, even if we never write.
// particle.copyWithoutLastPattern
// } else {
// particle
// }
// }
// In theory the check above would be necessary. In practice this
// execution path is only reached if a particle copy was made before.
return r
}
// Object is conflicting constant. No binding possible.
if (toMatchO > 0) {
return QueryParticle.failed
}
// Object is a variable that needs to be bound to the constant in the triple pattern.
// Bind value to variable.
val variable = toMatchO
val boundValue = toBindO
// No conflicts, we bind the value to the variable.
val currentParticle = {
if (copyBeforeWrite) {
copyWithoutLastPattern
} else {
r
}
}
if (isBindingQuery) {
val variableIndex = -(variable + 1)
currentParticle.writeBinding(variableIndex, boundValue)
}
currentParticle.bindVariablesInPatterns(variable, boundValue)
currentParticle
}
// If the variable appears multiple times in the same pattern, then all the bindings have to be compatible.
@inline private[this] def isBindingIncompatible(otherAttribute: Int, tpAttribute: Int, variable: Int, boundValue: Int): Boolean = {
(otherAttribute == variable && tpAttribute != boundValue)
}
// Updates an attribute with a new binding.
@inline private[this] def updatedAttribute(attribute: Int, variable: Int, boundValue: Int): Int = {
if (attribute == variable) boundValue else attribute
}
def bindings: Array[Int] = {
val numBindings = numberOfBindings
val b = new Array[Int](numBindings)
System.arraycopy(r, 4, b, 0, numBindings)
b
}
def isResult: Boolean = r.length == 4 + numberOfBindings
def queryId: Int = r(0)
def writeQueryId(id: Int): Unit = r(0) = id
def tickets: Long = {
((r(1) | 0L) << 32) | (r(2) & 0x00000000FFFFFFFFL)
}
def writeTickets(t: Long) = {
r(1) = (t >> 32).toInt
r(2) = t.toInt
}
def numberOfBindings: Int = r(3)
def writeNumberOfBindings(numberOfBindings: Int): Unit = {
r(3) = numberOfBindings
}
def writeBindings(bindings: Seq[Int]): Unit = {
r(3) = bindings.length
var i = 0
while (i < bindings.length) {
writeBinding(i, bindings(i))
i += 1
}
}
def writeBinding(bindingIndex: Int, boundValue: Int): Unit = {
val numBindings = numberOfBindings
if (bindingIndex < numBindings) {
val baseBindingIndex = 4
r(baseBindingIndex + bindingIndex) = boundValue
}
}
def binding(bindingIndex: Int): Int = {
val contentIntIndex = bindingIndex + 4
r(contentIntIndex)
}
def writePatterns(unmatched: Seq[TriplePattern]): Unit = {
var i = 0
var tpByteIndex = r.length - 3 // index of subject of last pattern
while (i < unmatched.length) {
writePattern(tpByteIndex, unmatched(i))
tpByteIndex -= 3
i += 1
}
}
/**
* Requires the index where the subject will be written.
* Pattern is written in spo order.
*/
def writePattern(subjectIndex: Int, p: TriplePattern): Unit = {
r(subjectIndex) = p.s
r(subjectIndex + 1) = p.p
r(subjectIndex + 2) = p.o
}
def copyWithTickets(t: Long, complete: Boolean): Array[Int] = {
// It seems that for small arrays arraycopy is faster than clone:
// http://www.javaspecialists.co.za/archive/Issue124.html
val rLength = r.length
val newR = new Array[Int](rLength)
System.arraycopy(r, 0, newR, 0, rLength)
if (complete) {
newR.writeTickets(t)
} else {
newR.writeTickets(-t)
}
newR
}
def patterns: IndexedSeq[TriplePattern] = {
for {i <- numberOfPatterns - 1 to 0 by -1} yield pattern(i)
}
def numberOfPatterns: Int = (r.length - 4 - numberOfBindings) / 3
def pattern(index: Int): TriplePattern = {
val sIndex = 3 * index + 4 + numberOfBindings
val pIndex = sIndex + 1
val oIndex = sIndex + 2
TriplePattern(r(sIndex), r(pIndex), r(oIndex))
}
def lastPattern: TriplePattern = {
val sIndex = r.length - 3
val pIndex = r.length - 2
val oIndex = r.length - 1
TriplePattern(
r(sIndex),
r(pIndex),
r(oIndex))
}
def copyWithoutLastPattern: Array[Int] = {
val copyLength = r.length - 3
val rCopy = new Array[Int](copyLength)
System.arraycopy(r, 0, rCopy, 0, copyLength)
rCopy
}
// Update all patterns with this new binding.
def bindVariablesInPatterns(
variable: Int,
boundValue: Int): Unit = {
// Index of first subject of first TP.
var i = numberOfBindings + 4
while (i < r.length) {
if (r(i) == variable) {
r(i) = boundValue
}
i += 1
}
}
/**
* Routing address for this query.
*/
def routingAddress: Long = {
if (isResult) {
OperationIds.embedInLong(queryId)
} else {
// Query not complete yet, route onwards.
val s = lastPatternS
val p = lastPatternP
val o = lastPatternO
if (s > 0 && p > 0 && o > 0) {
EfficientIndexPattern(s, 0, o)
} else {
EfficientIndexPattern(math.max(s, 0), math.max(p, 0), math.max(o, 0))
}
}
}
/**
* Checks that the last pattern is not fully bound and that no variable appears multiple times in the last pattern.
* The same variable appearing multiple times might cause a binding to fail.
*/
def isSimpleToBind: Boolean = {
val s = lastPatternS
val p = lastPatternP
val o = lastPatternO
!(s > 0 && p > 0 && o > 0) &&
(s > 0 || (s != p && s != o)) &&
(o > 0 || (o != p))
}
@inline def lastPatternS: Int = r(r.length - 3)
@inline def lastPatternP: Int = r(r.length - 2)
@inline def lastPatternO: Int = r(r.length - 1)
/**
* Assumption: TP has all constants.
*/
def bind(toBindS: Int, toBindP: Int, toBindO: Int): Array[Int] = {
val patternS = lastPatternS
val patternP = lastPatternP
val patternO = lastPatternO
if (toBindS == patternS) {
// Subject is compatible constant. No binding necessary.
if (toBindP == patternP) {
// Predicate is compatible constant. No binding necessary.
if (toBindO == patternO) {
// Object is compatible constant. No binding necessary.
return copyWithoutLastPattern
}
return bindObject(patternO, toBindO, true)
}
return bindPredicate(patternP, patternO, toBindP, toBindO, true)
}
bindSubject(patternS, patternP, patternO, toBindS, toBindP, toBindO, true)
}
}
| hicolour/triplerush | src/main/scala/com/signalcollect/triplerush/QueryParticle.scala | Scala | apache-2.0 | 12,608 |
//
// Copyright 2016 Commonwealth Bank of Australia
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
// http://www.apache.org/licenses/LICENSE-2.0
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package commbank.coppersmith.scalding.lift
import com.twitter.scalding.typed.TypedPipe
import scalaz.Functor
/**
* There is good reason to include these somewhere more central to omnia, and soon.
*/
object ScaldingScalazInstances {
implicit val typedPipeFunctor: Functor[TypedPipe] = new Functor[TypedPipe] {
override def map[A, B](fa: TypedPipe[A])(f: (A) => B): TypedPipe[B] =
fa.map(f)
}
}
| CommBank/coppersmith | scalding/src/main/scala/commbank/coppersmith/scalding/lift/ScaldingScalazInstances.scala | Scala | apache-2.0 | 1,048 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.openwhisk.core.database.s3
import akka.actor.ActorSystem
import akka.event.Logging
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.headers.CacheDirectives._
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.model.{ContentType, HttpRequest, HttpResponse, Uri}
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.alpakka.s3.headers.CannedAcl
import akka.stream.alpakka.s3.scaladsl.S3
import akka.stream.alpakka.s3.{S3Attributes, S3Exception, S3Headers, S3Settings}
import akka.stream.scaladsl.{Sink, Source}
import akka.util.ByteString
import com.typesafe.config.Config
import org.apache.openwhisk.common.LoggingMarkers.{
DATABASE_ATTS_DELETE,
DATABASE_ATT_DELETE,
DATABASE_ATT_GET,
DATABASE_ATT_SAVE
}
import org.apache.openwhisk.common.{Logging, TransactionId}
import org.apache.openwhisk.core.ConfigKeys
import org.apache.openwhisk.core.database.StoreUtils._
import org.apache.openwhisk.core.database._
import org.apache.openwhisk.core.entity.DocId
import pureconfig._
import pureconfig.generic.auto._
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContext, Future}
import scala.reflect.ClassTag
object S3AttachmentStoreProvider extends AttachmentStoreProvider {
val alpakkaConfigKey = s"${ConfigKeys.s3}.alpakka"
case class S3Config(bucket: String, prefix: Option[String], cloudFrontConfig: Option[CloudFrontConfig] = None) {
def prefixFor[D](implicit tag: ClassTag[D]): String = {
val className = tag.runtimeClass.getSimpleName.toLowerCase
prefix.map(p => s"$p/$className").getOrElse(className)
}
def signer: Option[UrlSigner] = cloudFrontConfig.map(CloudFrontSigner)
}
override def makeStore[D <: DocumentSerializer: ClassTag]()(implicit actorSystem: ActorSystem,
logging: Logging): AttachmentStore = {
val config = loadConfigOrThrow[S3Config](ConfigKeys.s3)
new S3AttachmentStore(s3Settings(actorSystem.settings.config), config.bucket, config.prefixFor[D], config.signer)
}
def makeStore[D <: DocumentSerializer: ClassTag](config: Config)(implicit actorSystem: ActorSystem,
logging: Logging): AttachmentStore = {
val s3config = loadConfigOrThrow[S3Config](config, ConfigKeys.s3)
new S3AttachmentStore(s3Settings(config), s3config.bucket, s3config.prefixFor[D], s3config.signer)
}
private def s3Settings(config: Config) = S3Settings(config.getConfig(alpakkaConfigKey))
}
trait UrlSigner {
def getSignedURL(s3ObjectKey: String): Uri
}
class S3AttachmentStore(s3Settings: S3Settings, bucket: String, prefix: String, urlSigner: Option[UrlSigner])(
implicit system: ActorSystem,
logging: Logging)
extends AttachmentStore {
private val s3attributes = S3Attributes.settings(s3Settings)
private val commonS3Headers = {
val cache = `Cache-Control`(`max-age`(365.days.toSeconds))
S3Headers()
.withCannedAcl(CannedAcl.Private) //All contents are private
.withCustomHeaders(Map(cache.name -> cache.value)) //As objects are immutable cache them for long time
}
override val scheme = "s3"
override protected[core] implicit val executionContext: ExecutionContext = system.dispatcher
logging.info(this, s"Initializing S3AttachmentStore with bucket=[$bucket], prefix=[$prefix], signer=[$urlSigner]")
override protected[core] def attach(
docId: DocId,
name: String,
contentType: ContentType,
docStream: Source[ByteString, _])(implicit transid: TransactionId): Future[AttachResult] = {
require(name != null, "name undefined")
val start =
transid.started(this, DATABASE_ATT_SAVE, s"[ATT_PUT] uploading attachment '$name' of document 'id: $docId'")
//A possible optimization for small attachments < 5MB can be to use putObject instead of multipartUpload
//and thus use 1 remote call instead of 3
val f = docStream
.runWith(
combinedSink(
S3.multipartUploadWithHeaders(bucket, objectKey(docId, name), contentType, s3Headers = commonS3Headers)
.withAttributes(s3attributes)))
.map(r => AttachResult(r.digest, r.length))
f.foreach(_ =>
transid
.finished(this, start, s"[ATT_PUT] '$prefix' completed uploading attachment '$name' of document 'id: $docId'"))
reportFailure(
f,
start,
failure => s"[ATT_PUT] '$prefix' internal error, name: '$name', doc: '$docId', failure: '${failure.getMessage}'")
}
override protected[core] def readAttachment[T](docId: DocId, name: String, sink: Sink[ByteString, Future[T]])(
implicit transid: TransactionId): Future[T] = {
require(name != null, "name undefined")
val start =
transid.started(
this,
DATABASE_ATT_GET,
s"[ATT_GET] '$prefix' finding attachment '$name' of document 'id: $docId'")
val source = getAttachmentSource(objectKey(docId, name))
val f = source.flatMap {
case Some(x) => x.withAttributes(s3attributes).runWith(sink)
case None => Future.failed(NoDocumentException("Not found on 'readAttachment'."))
}
val g = f.transform(
{ s =>
transid
.finished(this, start, s"[ATT_GET] '$prefix' completed: found attachment '$name' of document 'id: $docId'")
s
}, {
case e: NoDocumentException =>
transid
.finished(
this,
start,
s"[ATT_GET] '$prefix', retrieving attachment '$name' of document 'id: $docId'; not found.",
logLevel = Logging.ErrorLevel)
e
case e => e
})
reportFailure(
g,
start,
failure =>
s"[ATT_GET] '$prefix' internal error, name: '$name', doc: 'id: $docId', failure: '${failure.getMessage}'")
}
private def getAttachmentSource(objectKey: String): Future[Option[Source[ByteString, Any]]] = urlSigner match {
case Some(signer) => getUrlContent(signer.getSignedURL(objectKey))
// When reading from S3 we get an optional source of ByteString and Metadata if the object exist
// For such case drop the metadata
case None =>
S3.download(bucket, objectKey)
.withAttributes(s3attributes)
.runWith(Sink.head)
.map(x => x.map(_._1))
}
private def getUrlContent(uri: Uri): Future[Option[Source[ByteString, Any]]] = {
val future = Http().singleRequest(HttpRequest(uri = uri))
future.flatMap {
case HttpResponse(status, _, entity, _) if status.isSuccess() && !status.isRedirection() =>
Future.successful(Some(entity.dataBytes))
case HttpResponse(_, _, entity, _) =>
Unmarshal(entity).to[String].map { err =>
//With CloudFront also the error message confirms to same S3 exception format
val exp = new S3Exception(err)
if (isMissingKeyException(exp)) None else throw exp
}
}
}
override protected[core] def deleteAttachments(docId: DocId)(implicit transid: TransactionId): Future[Boolean] = {
val start =
transid.started(
this,
DATABASE_ATTS_DELETE,
s"[ATT_DELETE] deleting attachments of document 'id: $docId' with prefix ${objectKeyPrefix(docId)}")
val f = S3
.deleteObjectsByPrefix(bucket, Some(objectKeyPrefix(docId)))
.withAttributes(s3attributes)
.runWith(Sink.seq)
.map(_ => true)
f.foreach(_ =>
transid.finished(this, start, s"[ATTS_DELETE] completed: deleting attachments of document 'id: $docId'"))
reportFailure(
f,
start,
failure => s"[ATTS_DELETE] '$prefix' internal error, doc: '$docId', failure: '${failure.getMessage}'")
}
override protected[core] def deleteAttachment(docId: DocId, name: String)(
implicit transid: TransactionId): Future[Boolean] = {
val start =
transid.started(this, DATABASE_ATT_DELETE, s"[ATT_DELETE] deleting attachment '$name' of document 'id: $docId'")
val f = S3
.deleteObject(bucket, objectKey(docId, name))
.withAttributes(s3attributes)
.runWith(Sink.head)
.map(_ => true)
f.foreach(_ =>
transid.finished(this, start, s"[ATT_DELETE] completed: deleting attachment '$name' of document 'id: $docId'"))
reportFailure(
f,
start,
failure => s"[ATT_DELETE] '$prefix' internal error, doc: '$docId', failure: '${failure.getMessage}'")
}
override def shutdown(): Unit = {}
private def objectKey(id: DocId, name: String): String = s"$prefix/${id.id}/$name"
private def objectKeyPrefix(id: DocId): String =
s"$prefix/${id.id}/" //must end with a slash so that ".../<package>/<action>other" does not match for "<package>/<action>"
private def isMissingKeyException(e: Throwable): Boolean = {
//In some case S3Exception is a sub cause. So need to recurse
e match {
case s: S3Exception if s.code == "NoSuchKey" => true
// In case of CloudFront a missing key would be reflected as access denied
case s: S3Exception if s.code == "AccessDenied" && urlSigner.isDefined => true
case t if t != null && isMissingKeyException(t.getCause) => true
case _ => false
}
}
}
| style95/openwhisk | common/scala/src/main/scala/org/apache/openwhisk/core/database/s3/S3AttachmentStore.scala | Scala | apache-2.0 | 10,100 |
package io.getquill.monad
import scala.language.higherKinds
import scala.collection.compat._
import language.experimental.macros
import io.getquill.context.Context
import scala.annotation.tailrec
import scala.util.Try
import io.getquill.{ Action, ActionReturning, BatchAction, Query, Quoted }
trait SyncIOMonad extends IOMonad {
this: Context[_, _] =>
type Result[T] = T
def runIO[T](quoted: Quoted[T]): IO[RunQuerySingleResult[T], Effect.Read] = macro IOMonadMacro.runIO
def runIO[T](quoted: Quoted[Query[T]]): IO[RunQueryResult[T], Effect.Read] = macro IOMonadMacro.runIO
def runIO(quoted: Quoted[Action[_]]): IO[RunActionResult, Effect.Write] = macro IOMonadMacro.runIO
def runIO[T](quoted: Quoted[ActionReturning[_, T]]): IO[RunActionReturningResult[T], Effect.Write] = macro IOMonadMacro.runIO
def runIO(quoted: Quoted[BatchAction[Action[_]]]): IO[RunBatchActionResult, Effect.Write] = macro IOMonadMacro.runIO
def runIO[T](quoted: Quoted[BatchAction[ActionReturning[_, T]]]): IO[RunBatchActionReturningResult[T], Effect.Write] = macro IOMonadMacro.runIO
case class Run[T, E <: Effect](f: () => Result[T]) extends IO[T, E]
def performIO[T](io: IO[T, _], transactional: Boolean = false): Result[T] = {
@tailrec def loop[U](io: IO[U, _]): Result[U] = {
def flatten[Y, M[X] <: IterableOnce[X]](seq: Sequence[Y, M, Effect]) =
seq.in.iterator.foldLeft(IO.successful(seq.cbfResultToValue.newBuilder)) {
(builder, item) =>
builder.flatMap(b => item.map(b += _))
}.map(_.result())
io match {
case FromTry(v) => v.get
case Run(f) => f()
case seq @ Sequence(_, _) => loop(flatten(seq))
case TransformWith(a, fA) =>
a match {
case FromTry(v) => loop(fA(v))
case Run(r) => loop(fA(Try(r())))
case seq @ Sequence(_, _) => loop(flatten(seq).transformWith(fA))
case TransformWith(b, fB) => loop(b.transformWith(fB(_).transformWith(fA)))
case Transactional(io) => loop(fA(Try(performIO(io, transactional = true))))
}
case Transactional(io) => performIO(io, transactional = true)
}
}
loop(io)
}
}
| getquill/quill | quill-core/src/main/scala/io/getquill/monad/SyncIOMonad.scala | Scala | apache-2.0 | 2,256 |
/*
* Copyright (C) 2005, The Beangle Software.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.beangle.ems.portal.admin.action.bulletin
import org.beangle.webmvc.support.action.RestfulAction
import org.beangle.ems.core.bulletin.model.News
class NewsAction extends RestfulAction[News] {
}
| beangle/ems | web/src/main/scala/org/beangle/ems/portal/admin/action/bulletin/NewsAction.scala | Scala | lgpl-3.0 | 929 |
/*
* SPDX-License-Identifier: Apache-2.0
*
* Copyright 2015-2021 Andre White.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.truthencode.ddo
import com.typesafe.scalalogging.LazyLogging
import org.scalatest.funspec.AnyFunSpec
import org.scalatest.matchers.should.Matchers
import org.scalatestplus.mockito.MockitoSugar
class IntegrationIT extends AnyFunSpec with Matchers with MockitoSugar with LazyLogging {
describe("Integration Tests")(pending)
}
| adarro/ddo-calc | subprojects/common/ddo-core/src/test/scala/io/truthencode/ddo/IntegrationIT.scala | Scala | apache-2.0 | 1,010 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.streaming
import java.io.{InputStreamReader, OutputStreamWriter}
import java.nio.charset.StandardCharsets
import scala.util.control.NonFatal
import org.apache.commons.io.IOUtils
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, FSDataInputStream, FSDataOutputStream, Path}
import org.json4s.NoTypeHints
import org.json4s.jackson.Serialization
import org.apache.spark.internal.Logging
import org.apache.spark.sql.streaming.StreamingQuery
/**
* Contains metadata associated with a [[StreamingQuery]]. This information is written
* in the checkpoint location the first time a query is started and recovered every time the query
* is restarted.
*
* @param id unique id of the [[StreamingQuery]] that needs to be persisted across restarts
*/
case class StreamMetadata(id: String) {
def json: String = Serialization.write(this)(StreamMetadata.format)
}
object StreamMetadata extends Logging {
implicit val format = Serialization.formats(NoTypeHints)
/** Read the metadata from file if it exists */
def read(metadataFile: Path, hadoopConf: Configuration): Option[StreamMetadata] = {
val fs = FileSystem.get(hadoopConf)
if (fs.exists(metadataFile)) {
var input: FSDataInputStream = null
try {
input = fs.open(metadataFile)
val reader = new InputStreamReader(input, StandardCharsets.UTF_8)
val metadata = Serialization.read[StreamMetadata](reader)
Some(metadata)
} catch {
case NonFatal(e) =>
logError(s"Error reading stream metadata from $metadataFile", e)
throw e
} finally {
IOUtils.closeQuietly(input)
}
} else None
}
/** Write metadata to file */
def write(
metadata: StreamMetadata,
metadataFile: Path,
hadoopConf: Configuration): Unit = {
var output: FSDataOutputStream = null
try {
val fs = FileSystem.get(hadoopConf)
output = fs.create(metadataFile)
val writer = new OutputStreamWriter(output)
Serialization.write(metadata, writer)
writer.close()
} catch {
case NonFatal(e) =>
logError(s"Error writing stream metadata $metadata to $metadataFile", e)
throw e
} finally {
IOUtils.closeQuietly(output)
}
}
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/core/src/main/scala/org/apache/spark/sql/execution/streaming/StreamMetadata.scala | Scala | apache-2.0 | 3,118 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import scala.collection.mutable
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.catalyst.analysis._
import org.apache.spark.sql.catalyst.catalog.{InMemoryCatalog, SessionCatalog}
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.expressions.aggregate._
import org.apache.spark.sql.catalyst.plans._
import org.apache.spark.sql.catalyst.plans.logical._
import org.apache.spark.sql.catalyst.rules._
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.types._
/**
* Abstract class all optimizers should inherit of, contains the standard batches (extending
* Optimizers can override this.
*/
abstract class Optimizer(sessionCatalog: SessionCatalog)
extends RuleExecutor[LogicalPlan] {
protected def fixedPoint = FixedPoint(SQLConf.get.optimizerMaxIterations)
def batches: Seq[Batch] = {
Batch("Eliminate Distinct", Once, EliminateDistinct) ::
// Technically some of the rules in Finish Analysis are not optimizer rules and belong more
// in the analyzer, because they are needed for correctness (e.g. ComputeCurrentTime).
// However, because we also use the analyzer to canonicalized queries (for view definition),
// we do not eliminate subqueries or compute current time in the analyzer.
Batch("Finish Analysis", Once,
EliminateSubqueryAliases,
EliminateView,
ReplaceExpressions,
ComputeCurrentTime,
GetCurrentDatabase(sessionCatalog),
RewriteDistinctAggregates,
ReplaceDeduplicateWithAggregate) ::
//////////////////////////////////////////////////////////////////////////////////////////
// Optimizer rules start here
//////////////////////////////////////////////////////////////////////////////////////////
// - Do the first call of CombineUnions before starting the major Optimizer rules,
// since it can reduce the number of iteration and the other rules could add/move
// extra operators between two adjacent Union operators.
// - Call CombineUnions again in Batch("Operator Optimizations"),
// since the other rules might make two separate Unions operators adjacent.
Batch("Union", Once,
CombineUnions) ::
Batch("Pullup Correlated Expressions", Once,
PullupCorrelatedPredicates) ::
Batch("Subquery", Once,
OptimizeSubqueries) ::
Batch("Replace Operators", fixedPoint,
ReplaceIntersectWithSemiJoin,
ReplaceExceptWithAntiJoin,
ReplaceDistinctWithAggregate) ::
Batch("Aggregate", fixedPoint,
RemoveLiteralFromGroupExpressions,
RemoveRepetitionFromGroupExpressions) ::
Batch("Operator Optimizations", fixedPoint, Seq(
// Operator push down
PushProjectionThroughUnion,
ReorderJoin,
EliminateOuterJoin,
PushPredicateThroughJoin,
PushDownPredicate,
LimitPushDown,
ColumnPruning,
InferFiltersFromConstraints,
// Operator combine
CollapseRepartition,
CollapseProject,
CollapseWindow,
CombineFilters,
CombineLimits,
CombineUnions,
// Constant folding and strength reduction
NullPropagation,
ConstantPropagation,
FoldablePropagation,
OptimizeIn,
ConstantFolding,
ReorderAssociativeOperator,
LikeSimplification,
BooleanSimplification,
SimplifyConditionals,
RemoveDispensableExpressions,
SimplifyBinaryComparison,
PruneFilters,
EliminateSorts,
SimplifyCasts,
SimplifyCaseConversionExpressions,
RewriteCorrelatedScalarSubquery,
EliminateSerialization,
RemoveRedundantAliases,
RemoveRedundantProject,
SimplifyCreateStructOps,
SimplifyCreateArrayOps,
SimplifyCreateMapOps,
CombineConcats) ++
extendedOperatorOptimizationRules: _*) ::
Batch("Check Cartesian Products", Once,
CheckCartesianProducts) ::
Batch("Join Reorder", Once,
CostBasedJoinReorder) ::
Batch("Decimal Optimizations", fixedPoint,
DecimalAggregates) ::
Batch("Object Expressions Optimization", fixedPoint,
EliminateMapObjects,
CombineTypedFilters) ::
Batch("LocalRelation", fixedPoint,
ConvertToLocalRelation,
PropagateEmptyRelation) ::
Batch("OptimizeCodegen", Once,
OptimizeCodegen) ::
Batch("RewriteSubquery", Once,
RewritePredicateSubquery,
CollapseProject) :: Nil
}
/**
* Optimize all the subqueries inside expression.
*/
object OptimizeSubqueries extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformAllExpressions {
case s: SubqueryExpression =>
val Subquery(newPlan) = Optimizer.this.execute(Subquery(s.plan))
s.withNewPlan(newPlan)
}
}
/**
* Override to provide additional rules for the operator optimization batch.
*/
def extendedOperatorOptimizationRules: Seq[Rule[LogicalPlan]] = Nil
}
/**
* Remove useless DISTINCT for MAX and MIN.
* This rule should be applied before RewriteDistinctAggregates.
*/
object EliminateDistinct extends Rule[LogicalPlan] {
override def apply(plan: LogicalPlan): LogicalPlan = plan transformExpressions {
case ae: AggregateExpression if ae.isDistinct =>
ae.aggregateFunction match {
case _: Max | _: Min => ae.copy(isDistinct = false)
case _ => ae
}
}
}
/**
* An optimizer used in test code.
*
* To ensure extendability, we leave the standard rules in the abstract optimizer rules, while
* specific rules go to the subclasses
*/
object SimpleTestOptimizer extends SimpleTestOptimizer
class SimpleTestOptimizer extends Optimizer(
new SessionCatalog(
new InMemoryCatalog,
EmptyFunctionRegistry,
new SQLConf().copy(SQLConf.CASE_SENSITIVE -> true)))
/**
* Remove redundant aliases from a query plan. A redundant alias is an alias that does not change
* the name or metadata of a column, and does not deduplicate it.
*/
object RemoveRedundantAliases extends Rule[LogicalPlan] {
/**
* Create an attribute mapping from the old to the new attributes. This function will only
* return the attribute pairs that have changed.
*/
private def createAttributeMapping(current: LogicalPlan, next: LogicalPlan)
: Seq[(Attribute, Attribute)] = {
current.output.zip(next.output).filterNot {
case (a1, a2) => a1.semanticEquals(a2)
}
}
/**
* Remove the top-level alias from an expression when it is redundant.
*/
private def removeRedundantAlias(e: Expression, blacklist: AttributeSet): Expression = e match {
// Alias with metadata can not be stripped, or the metadata will be lost.
// If the alias name is different from attribute name, we can't strip it either, or we
// may accidentally change the output schema name of the root plan.
case a @ Alias(attr: Attribute, name)
if a.metadata == Metadata.empty &&
name == attr.name &&
!blacklist.contains(attr) &&
!blacklist.contains(a) =>
attr
case a => a
}
/**
* Remove redundant alias expression from a LogicalPlan and its subtree. A blacklist is used to
* prevent the removal of seemingly redundant aliases used to deduplicate the input for a (self)
* join or to prevent the removal of top-level subquery attributes.
*/
private def removeRedundantAliases(plan: LogicalPlan, blacklist: AttributeSet): LogicalPlan = {
plan match {
// We want to keep the same output attributes for subqueries. This means we cannot remove
// the aliases that produce these attributes
case Subquery(child) =>
Subquery(removeRedundantAliases(child, blacklist ++ child.outputSet))
// A join has to be treated differently, because the left and the right side of the join are
// not allowed to use the same attributes. We use a blacklist to prevent us from creating a
// situation in which this happens; the rule will only remove an alias if its child
// attribute is not on the black list.
case Join(left, right, joinType, condition) =>
val newLeft = removeRedundantAliases(left, blacklist ++ right.outputSet)
val newRight = removeRedundantAliases(right, blacklist ++ newLeft.outputSet)
val mapping = AttributeMap(
createAttributeMapping(left, newLeft) ++
createAttributeMapping(right, newRight))
val newCondition = condition.map(_.transform {
case a: Attribute => mapping.getOrElse(a, a)
})
Join(newLeft, newRight, joinType, newCondition)
case _ =>
// Remove redundant aliases in the subtree(s).
val currentNextAttrPairs = mutable.Buffer.empty[(Attribute, Attribute)]
val newNode = plan.mapChildren { child =>
val newChild = removeRedundantAliases(child, blacklist)
currentNextAttrPairs ++= createAttributeMapping(child, newChild)
newChild
}
// Create the attribute mapping. Note that the currentNextAttrPairs can contain duplicate
// keys in case of Union (this is caused by the PushProjectionThroughUnion rule); in this
// case we use the the first mapping (which should be provided by the first child).
val mapping = AttributeMap(currentNextAttrPairs)
// Create a an expression cleaning function for nodes that can actually produce redundant
// aliases, use identity otherwise.
val clean: Expression => Expression = plan match {
case _: Project => removeRedundantAlias(_, blacklist)
case _: Aggregate => removeRedundantAlias(_, blacklist)
case _: Window => removeRedundantAlias(_, blacklist)
case _ => identity[Expression]
}
// Transform the expressions.
newNode.mapExpressions { expr =>
clean(expr.transform {
case a: Attribute => mapping.getOrElse(a, a)
})
}
}
}
def apply(plan: LogicalPlan): LogicalPlan = removeRedundantAliases(plan, AttributeSet.empty)
}
/**
* Remove projections from the query plan that do not make any modifications.
*/
object RemoveRedundantProject extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case p @ Project(_, child) if p.output == child.output => child
}
}
/**
* Pushes down [[LocalLimit]] beneath UNION ALL and beneath the streamed inputs of outer joins.
*/
object LimitPushDown extends Rule[LogicalPlan] {
private def stripGlobalLimitIfPresent(plan: LogicalPlan): LogicalPlan = {
plan match {
case GlobalLimit(_, child) => child
case _ => plan
}
}
private def maybePushLimit(limitExp: Expression, plan: LogicalPlan): LogicalPlan = {
(limitExp, plan.maxRows) match {
case (IntegerLiteral(maxRow), Some(childMaxRows)) if maxRow < childMaxRows =>
LocalLimit(limitExp, stripGlobalLimitIfPresent(plan))
case (_, None) =>
LocalLimit(limitExp, stripGlobalLimitIfPresent(plan))
case _ => plan
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// Adding extra Limits below UNION ALL for children which are not Limit or do not have Limit
// descendants whose maxRow is larger. This heuristic is valid assuming there does not exist any
// Limit push-down rule that is unable to infer the value of maxRows.
// Note: right now Union means UNION ALL, which does not de-duplicate rows, so it is safe to
// pushdown Limit through it. Once we add UNION DISTINCT, however, we will not be able to
// pushdown Limit.
case LocalLimit(exp, Union(children)) =>
LocalLimit(exp, Union(children.map(maybePushLimit(exp, _))))
// Add extra limits below OUTER JOIN. For LEFT OUTER and FULL OUTER JOIN we push limits to the
// left and right sides, respectively. For FULL OUTER JOIN, we can only push limits to one side
// because we need to ensure that rows from the limited side still have an opportunity to match
// against all candidates from the non-limited side. We also need to ensure that this limit
// pushdown rule will not eventually introduce limits on both sides if it is applied multiple
// times. Therefore:
// - If one side is already limited, stack another limit on top if the new limit is smaller.
// The redundant limit will be collapsed by the CombineLimits rule.
// - If neither side is limited, limit the side that is estimated to be bigger.
case LocalLimit(exp, join @ Join(left, right, joinType, _)) =>
val newJoin = joinType match {
case RightOuter => join.copy(right = maybePushLimit(exp, right))
case LeftOuter => join.copy(left = maybePushLimit(exp, left))
case FullOuter =>
(left.maxRows, right.maxRows) match {
case (None, None) =>
if (left.stats.sizeInBytes >= right.stats.sizeInBytes) {
join.copy(left = maybePushLimit(exp, left))
} else {
join.copy(right = maybePushLimit(exp, right))
}
case (Some(_), Some(_)) => join
case (Some(_), None) => join.copy(left = maybePushLimit(exp, left))
case (None, Some(_)) => join.copy(right = maybePushLimit(exp, right))
}
case _ => join
}
LocalLimit(exp, newJoin)
}
}
/**
* Pushes Project operator to both sides of a Union operator.
* Operations that are safe to pushdown are listed as follows.
* Union:
* Right now, Union means UNION ALL, which does not de-duplicate rows. So, it is
* safe to pushdown Filters and Projections through it. Filter pushdown is handled by another
* rule PushDownPredicate. Once we add UNION DISTINCT, we will not be able to pushdown Projections.
*/
object PushProjectionThroughUnion extends Rule[LogicalPlan] with PredicateHelper {
/**
* Maps Attributes from the left side to the corresponding Attribute on the right side.
*/
private def buildRewrites(left: LogicalPlan, right: LogicalPlan): AttributeMap[Attribute] = {
assert(left.output.size == right.output.size)
AttributeMap(left.output.zip(right.output))
}
/**
* Rewrites an expression so that it can be pushed to the right side of a
* Union or Except operator. This method relies on the fact that the output attributes
* of a union/intersect/except are always equal to the left child's output.
*/
private def pushToRight[A <: Expression](e: A, rewrites: AttributeMap[Attribute]) = {
val result = e transform {
case a: Attribute => rewrites(a)
}
// We must promise the compiler that we did not discard the names in the case of project
// expressions. This is safe since the only transformation is from Attribute => Attribute.
result.asInstanceOf[A]
}
/**
* Splits the condition expression into small conditions by `And`, and partition them by
* deterministic, and finally recombine them by `And`. It returns an expression containing
* all deterministic expressions (the first field of the returned Tuple2) and an expression
* containing all non-deterministic expressions (the second field of the returned Tuple2).
*/
private def partitionByDeterministic(condition: Expression): (Expression, Expression) = {
val andConditions = splitConjunctivePredicates(condition)
andConditions.partition(_.deterministic) match {
case (deterministic, nondeterministic) =>
deterministic.reduceOption(And).getOrElse(Literal(true)) ->
nondeterministic.reduceOption(And).getOrElse(Literal(true))
}
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// Push down deterministic projection through UNION ALL
case p @ Project(projectList, Union(children)) =>
assert(children.nonEmpty)
if (projectList.forall(_.deterministic)) {
val newFirstChild = Project(projectList, children.head)
val newOtherChildren = children.tail.map { child =>
val rewrites = buildRewrites(children.head, child)
Project(projectList.map(pushToRight(_, rewrites)), child)
}
Union(newFirstChild +: newOtherChildren)
} else {
p
}
}
}
/**
* Attempts to eliminate the reading of unneeded columns from the query plan.
*
* Since adding Project before Filter conflicts with PushPredicatesThroughProject, this rule will
* remove the Project p2 in the following pattern:
*
* p1 @ Project(_, Filter(_, p2 @ Project(_, child))) if p2.outputSet.subsetOf(p2.inputSet)
*
* p2 is usually inserted by this rule and useless, p1 could prune the columns anyway.
*/
object ColumnPruning extends Rule[LogicalPlan] {
private def sameOutput(output1: Seq[Attribute], output2: Seq[Attribute]): Boolean =
output1.size == output2.size &&
output1.zip(output2).forall(pair => pair._1.semanticEquals(pair._2))
def apply(plan: LogicalPlan): LogicalPlan = removeProjectBeforeFilter(plan transform {
// Prunes the unused columns from project list of Project/Aggregate/Expand
case p @ Project(_, p2: Project) if (p2.outputSet -- p.references).nonEmpty =>
p.copy(child = p2.copy(projectList = p2.projectList.filter(p.references.contains)))
case p @ Project(_, a: Aggregate) if (a.outputSet -- p.references).nonEmpty =>
p.copy(
child = a.copy(aggregateExpressions = a.aggregateExpressions.filter(p.references.contains)))
case a @ Project(_, e @ Expand(_, _, grandChild)) if (e.outputSet -- a.references).nonEmpty =>
val newOutput = e.output.filter(a.references.contains(_))
val newProjects = e.projections.map { proj =>
proj.zip(e.output).filter { case (_, a) =>
newOutput.contains(a)
}.unzip._1
}
a.copy(child = Expand(newProjects, newOutput, grandChild))
// Prunes the unused columns from child of `DeserializeToObject`
case d @ DeserializeToObject(_, _, child) if (child.outputSet -- d.references).nonEmpty =>
d.copy(child = prunedChild(child, d.references))
// Prunes the unused columns from child of Aggregate/Expand/Generate
case a @ Aggregate(_, _, child) if (child.outputSet -- a.references).nonEmpty =>
a.copy(child = prunedChild(child, a.references))
case e @ Expand(_, _, child) if (child.outputSet -- e.references).nonEmpty =>
e.copy(child = prunedChild(child, e.references))
case g: Generate if !g.join && (g.child.outputSet -- g.references).nonEmpty =>
g.copy(child = prunedChild(g.child, g.references))
// Turn off `join` for Generate if no column from it's child is used
case p @ Project(_, g: Generate) if g.join && p.references.subsetOf(g.generatedSet) =>
p.copy(child = g.copy(join = false))
// Eliminate unneeded attributes from right side of a Left Existence Join.
case j @ Join(_, right, LeftExistence(_), _) =>
j.copy(right = prunedChild(right, j.references))
// all the columns will be used to compare, so we can't prune them
case p @ Project(_, _: SetOperation) => p
case p @ Project(_, _: Distinct) => p
// Eliminate unneeded attributes from children of Union.
case p @ Project(_, u: Union) =>
if ((u.outputSet -- p.references).nonEmpty) {
val firstChild = u.children.head
val newOutput = prunedChild(firstChild, p.references).output
// pruning the columns of all children based on the pruned first child.
val newChildren = u.children.map { p =>
val selected = p.output.zipWithIndex.filter { case (a, i) =>
newOutput.contains(firstChild.output(i))
}.map(_._1)
Project(selected, p)
}
p.copy(child = u.withNewChildren(newChildren))
} else {
p
}
// Prune unnecessary window expressions
case p @ Project(_, w: Window) if (w.windowOutputSet -- p.references).nonEmpty =>
p.copy(child = w.copy(
windowExpressions = w.windowExpressions.filter(p.references.contains)))
// Eliminate no-op Window
case w: Window if w.windowExpressions.isEmpty => w.child
// Eliminate no-op Projects
case p @ Project(_, child) if sameOutput(child.output, p.output) => child
// Can't prune the columns on LeafNode
case p @ Project(_, _: LeafNode) => p
// for all other logical plans that inherits the output from it's children
case p @ Project(_, child) =>
val required = child.references ++ p.references
if ((child.inputSet -- required).nonEmpty) {
val newChildren = child.children.map(c => prunedChild(c, required))
p.copy(child = child.withNewChildren(newChildren))
} else {
p
}
})
/** Applies a projection only when the child is producing unnecessary attributes */
private def prunedChild(c: LogicalPlan, allReferences: AttributeSet) =
if ((c.outputSet -- allReferences.filter(c.outputSet.contains)).nonEmpty) {
Project(c.output.filter(allReferences.contains), c)
} else {
c
}
/**
* The Project before Filter is not necessary but conflict with PushPredicatesThroughProject,
* so remove it.
*/
private def removeProjectBeforeFilter(plan: LogicalPlan): LogicalPlan = plan transform {
case p1 @ Project(_, f @ Filter(_, p2 @ Project(_, child)))
if p2.outputSet.subsetOf(child.outputSet) =>
p1.copy(child = f.copy(child = child))
}
}
/**
* Combines two adjacent [[Project]] operators into one and perform alias substitution,
* merging the expressions into one single expression.
*/
object CollapseProject extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case p1 @ Project(_, p2: Project) =>
if (haveCommonNonDeterministicOutput(p1.projectList, p2.projectList)) {
p1
} else {
p2.copy(projectList = buildCleanedProjectList(p1.projectList, p2.projectList))
}
case p @ Project(_, agg: Aggregate) =>
if (haveCommonNonDeterministicOutput(p.projectList, agg.aggregateExpressions)) {
p
} else {
agg.copy(aggregateExpressions = buildCleanedProjectList(
p.projectList, agg.aggregateExpressions))
}
}
private def collectAliases(projectList: Seq[NamedExpression]): AttributeMap[Alias] = {
AttributeMap(projectList.collect {
case a: Alias => a.toAttribute -> a
})
}
private def haveCommonNonDeterministicOutput(
upper: Seq[NamedExpression], lower: Seq[NamedExpression]): Boolean = {
// Create a map of Aliases to their values from the lower projection.
// e.g., 'SELECT ... FROM (SELECT a + b AS c, d ...)' produces Map(c -> Alias(a + b, c)).
val aliases = collectAliases(lower)
// Collapse upper and lower Projects if and only if their overlapped expressions are all
// deterministic.
upper.exists(_.collect {
case a: Attribute if aliases.contains(a) => aliases(a).child
}.exists(!_.deterministic))
}
private def buildCleanedProjectList(
upper: Seq[NamedExpression],
lower: Seq[NamedExpression]): Seq[NamedExpression] = {
// Create a map of Aliases to their values from the lower projection.
// e.g., 'SELECT ... FROM (SELECT a + b AS c, d ...)' produces Map(c -> Alias(a + b, c)).
val aliases = collectAliases(lower)
// Substitute any attributes that are produced by the lower projection, so that we safely
// eliminate it.
// e.g., 'SELECT c + 1 FROM (SELECT a + b AS C ...' produces 'SELECT a + b + 1 ...'
// Use transformUp to prevent infinite recursion.
val rewrittenUpper = upper.map(_.transformUp {
case a: Attribute => aliases.getOrElse(a, a)
})
// collapse upper and lower Projects may introduce unnecessary Aliases, trim them here.
rewrittenUpper.map { p =>
CleanupAliases.trimNonTopLevelAliases(p).asInstanceOf[NamedExpression]
}
}
}
/**
* Combines adjacent [[RepartitionOperation]] operators
*/
object CollapseRepartition extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
// Case 1: When a Repartition has a child of Repartition or RepartitionByExpression,
// 1) When the top node does not enable the shuffle (i.e., coalesce API), but the child
// enables the shuffle. Returns the child node if the last numPartitions is bigger;
// otherwise, keep unchanged.
// 2) In the other cases, returns the top node with the child's child
case r @ Repartition(_, _, child: RepartitionOperation) => (r.shuffle, child.shuffle) match {
case (false, true) => if (r.numPartitions >= child.numPartitions) child else r
case _ => r.copy(child = child.child)
}
// Case 2: When a RepartitionByExpression has a child of Repartition or RepartitionByExpression
// we can remove the child.
case r @ RepartitionByExpression(_, child: RepartitionOperation, _) =>
r.copy(child = child.child)
}
}
/**
* Collapse Adjacent Window Expression.
* - If the partition specs and order specs are the same and the window expression are
* independent, collapse into the parent.
*/
object CollapseWindow extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformUp {
case w1 @ Window(we1, ps1, os1, w2 @ Window(we2, ps2, os2, grandChild))
if ps1 == ps2 && os1 == os2 && w1.references.intersect(w2.windowOutputSet).isEmpty =>
w1.copy(windowExpressions = we2 ++ we1, child = grandChild)
}
}
/**
* Generate a list of additional filters from an operator's existing constraint but remove those
* that are either already part of the operator's condition or are part of the operator's child
* constraints. These filters are currently inserted to the existing conditions in the Filter
* operators and on either side of Join operators.
*
* Note: While this optimization is applicable to all types of join, it primarily benefits Inner and
* LeftSemi joins.
*/
object InferFiltersFromConstraints extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = {
if (SQLConf.get.constraintPropagationEnabled) {
inferFilters(plan)
} else {
plan
}
}
private def inferFilters(plan: LogicalPlan): LogicalPlan = plan transform {
case filter @ Filter(condition, child) =>
val newFilters = filter.constraints --
(child.constraints ++ splitConjunctivePredicates(condition))
if (newFilters.nonEmpty) {
Filter(And(newFilters.reduce(And), condition), child)
} else {
filter
}
case join @ Join(left, right, joinType, conditionOpt) =>
// Only consider constraints that can be pushed down completely to either the left or the
// right child
val constraints = join.constraints.filter { c =>
c.references.subsetOf(left.outputSet) || c.references.subsetOf(right.outputSet)
}
// Remove those constraints that are already enforced by either the left or the right child
val additionalConstraints = constraints -- (left.constraints ++ right.constraints)
val newConditionOpt = conditionOpt match {
case Some(condition) =>
val newFilters = additionalConstraints -- splitConjunctivePredicates(condition)
if (newFilters.nonEmpty) Option(And(newFilters.reduce(And), condition)) else None
case None =>
additionalConstraints.reduceOption(And)
}
if (newConditionOpt.isDefined) Join(left, right, joinType, newConditionOpt) else join
}
}
/**
* Combines all adjacent [[Union]] operators into a single [[Union]].
*/
object CombineUnions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transformDown {
case u: Union => flattenUnion(u, false)
case Distinct(u: Union) => Distinct(flattenUnion(u, true))
}
private def flattenUnion(union: Union, flattenDistinct: Boolean): Union = {
val stack = mutable.Stack[LogicalPlan](union)
val flattened = mutable.ArrayBuffer.empty[LogicalPlan]
while (stack.nonEmpty) {
stack.pop() match {
case Distinct(Union(children)) if flattenDistinct =>
stack.pushAll(children.reverse)
case Union(children) =>
stack.pushAll(children.reverse)
case child =>
flattened += child
}
}
Union(flattened)
}
}
/**
* Combines two adjacent [[Filter]] operators into one, merging the non-redundant conditions into
* one conjunctive predicate.
*/
object CombineFilters extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Filter(fc, nf @ Filter(nc, grandChild)) =>
(ExpressionSet(splitConjunctivePredicates(fc)) --
ExpressionSet(splitConjunctivePredicates(nc))).reduceOption(And) match {
case Some(ac) =>
Filter(And(nc, ac), grandChild)
case None =>
nf
}
}
}
/**
* Removes no-op SortOrder from Sort
*/
object EliminateSorts extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case s @ Sort(orders, _, child) if orders.isEmpty || orders.exists(_.child.foldable) =>
val newOrders = orders.filterNot(_.child.foldable)
if (newOrders.isEmpty) child else s.copy(order = newOrders)
}
}
/**
* Removes filters that can be evaluated trivially. This can be done through the following ways:
* 1) by eliding the filter for cases where it will always evaluate to `true`.
* 2) by substituting a dummy empty relation when the filter will always evaluate to `false`.
* 3) by eliminating the always-true conditions given the constraints on the child's output.
*/
object PruneFilters extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// If the filter condition always evaluate to true, remove the filter.
case Filter(Literal(true, BooleanType), child) => child
// If the filter condition always evaluate to null or false,
// replace the input with an empty relation.
case Filter(Literal(null, _), child) => LocalRelation(child.output, data = Seq.empty)
case Filter(Literal(false, BooleanType), child) => LocalRelation(child.output, data = Seq.empty)
// If any deterministic condition is guaranteed to be true given the constraints on the child's
// output, remove the condition
case f @ Filter(fc, p: LogicalPlan) =>
val (prunedPredicates, remainingPredicates) =
splitConjunctivePredicates(fc).partition { cond =>
cond.deterministic && p.constraints.contains(cond)
}
if (prunedPredicates.isEmpty) {
f
} else if (remainingPredicates.isEmpty) {
p
} else {
val newCond = remainingPredicates.reduce(And)
Filter(newCond, p)
}
}
}
/**
* Pushes [[Filter]] operators through many operators iff:
* 1) the operator is deterministic
* 2) the predicate is deterministic and the operator will not change any of rows.
*
* This heuristic is valid assuming the expression evaluation cost is minimal.
*/
object PushDownPredicate extends Rule[LogicalPlan] with PredicateHelper {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// SPARK-13473: We can't push the predicate down when the underlying projection output non-
// deterministic field(s). Non-deterministic expressions are essentially stateful. This
// implies that, for a given input row, the output are determined by the expression's initial
// state and all the input rows processed before. In another word, the order of input rows
// matters for non-deterministic expressions, while pushing down predicates changes the order.
// This also applies to Aggregate.
case Filter(condition, project @ Project(fields, grandChild))
if fields.forall(_.deterministic) && canPushThroughCondition(grandChild, condition) =>
// Create a map of Aliases to their values from the child projection.
// e.g., 'SELECT a + b AS c, d ...' produces Map(c -> a + b).
val aliasMap = AttributeMap(fields.collect {
case a: Alias => (a.toAttribute, a.child)
})
project.copy(child = Filter(replaceAlias(condition, aliasMap), grandChild))
case filter @ Filter(condition, aggregate: Aggregate)
if aggregate.aggregateExpressions.forall(_.deterministic) =>
// Find all the aliased expressions in the aggregate list that don't include any actual
// AggregateExpression, and create a map from the alias to the expression
val aliasMap = AttributeMap(aggregate.aggregateExpressions.collect {
case a: Alias if a.child.find(_.isInstanceOf[AggregateExpression]).isEmpty =>
(a.toAttribute, a.child)
})
// For each filter, expand the alias and check if the filter can be evaluated using
// attributes produced by the aggregate operator's child operator.
val (candidates, containingNonDeterministic) =
splitConjunctivePredicates(condition).span(_.deterministic)
val (pushDown, rest) = candidates.partition { cond =>
val replaced = replaceAlias(cond, aliasMap)
cond.references.nonEmpty && replaced.references.subsetOf(aggregate.child.outputSet)
}
val stayUp = rest ++ containingNonDeterministic
if (pushDown.nonEmpty) {
val pushDownPredicate = pushDown.reduce(And)
val replaced = replaceAlias(pushDownPredicate, aliasMap)
val newAggregate = aggregate.copy(child = Filter(replaced, aggregate.child))
// If there is no more filter to stay up, just eliminate the filter.
// Otherwise, create "Filter(stayUp) <- Aggregate <- Filter(pushDownPredicate)".
if (stayUp.isEmpty) newAggregate else Filter(stayUp.reduce(And), newAggregate)
} else {
filter
}
// Push [[Filter]] operators through [[Window]] operators. Parts of the predicate that can be
// pushed beneath must satisfy the following conditions:
// 1. All the expressions are part of window partitioning key. The expressions can be compound.
// 2. Deterministic.
// 3. Placed before any non-deterministic predicates.
case filter @ Filter(condition, w: Window)
if w.partitionSpec.forall(_.isInstanceOf[AttributeReference]) =>
val partitionAttrs = AttributeSet(w.partitionSpec.flatMap(_.references))
val (candidates, containingNonDeterministic) =
splitConjunctivePredicates(condition).span(_.deterministic)
val (pushDown, rest) = candidates.partition { cond =>
cond.references.subsetOf(partitionAttrs)
}
val stayUp = rest ++ containingNonDeterministic
if (pushDown.nonEmpty) {
val pushDownPredicate = pushDown.reduce(And)
val newWindow = w.copy(child = Filter(pushDownPredicate, w.child))
if (stayUp.isEmpty) newWindow else Filter(stayUp.reduce(And), newWindow)
} else {
filter
}
case filter @ Filter(condition, union: Union) =>
// Union could change the rows, so non-deterministic predicate can't be pushed down
val (pushDown, stayUp) = splitConjunctivePredicates(condition).span(_.deterministic)
if (pushDown.nonEmpty) {
val pushDownCond = pushDown.reduceLeft(And)
val output = union.output
val newGrandChildren = union.children.map { grandchild =>
val newCond = pushDownCond transform {
case e if output.exists(_.semanticEquals(e)) =>
grandchild.output(output.indexWhere(_.semanticEquals(e)))
}
assert(newCond.references.subsetOf(grandchild.outputSet))
Filter(newCond, grandchild)
}
val newUnion = union.withNewChildren(newGrandChildren)
if (stayUp.nonEmpty) {
Filter(stayUp.reduceLeft(And), newUnion)
} else {
newUnion
}
} else {
filter
}
case filter @ Filter(_, u: UnaryNode)
if canPushThrough(u) && u.expressions.forall(_.deterministic) =>
pushDownPredicate(filter, u.child) { predicate =>
u.withNewChildren(Seq(Filter(predicate, u.child)))
}
}
private def canPushThrough(p: UnaryNode): Boolean = p match {
// Note that some operators (e.g. project, aggregate, union) are being handled separately
// (earlier in this rule).
case _: AppendColumns => true
case _: ResolvedHint => true
case _: Distinct => true
case _: Generate => true
case _: Pivot => true
case _: RepartitionByExpression => true
case _: Repartition => true
case _: ScriptTransformation => true
case _: Sort => true
case _ => false
}
private def pushDownPredicate(
filter: Filter,
grandchild: LogicalPlan)(insertFilter: Expression => LogicalPlan): LogicalPlan = {
// Only push down the predicates that is deterministic and all the referenced attributes
// come from grandchild.
// TODO: non-deterministic predicates could be pushed through some operators that do not change
// the rows.
val (candidates, containingNonDeterministic) =
splitConjunctivePredicates(filter.condition).span(_.deterministic)
val (pushDown, rest) = candidates.partition { cond =>
cond.references.subsetOf(grandchild.outputSet)
}
val stayUp = rest ++ containingNonDeterministic
if (pushDown.nonEmpty) {
val newChild = insertFilter(pushDown.reduceLeft(And))
if (stayUp.nonEmpty) {
Filter(stayUp.reduceLeft(And), newChild)
} else {
newChild
}
} else {
filter
}
}
/**
* Check if we can safely push a filter through a projection, by making sure that predicate
* subqueries in the condition do not contain the same attributes as the plan they are moved
* into. This can happen when the plan and predicate subquery have the same source.
*/
private def canPushThroughCondition(plan: LogicalPlan, condition: Expression): Boolean = {
val attributes = plan.outputSet
val matched = condition.find {
case s: SubqueryExpression => s.plan.outputSet.intersect(attributes).nonEmpty
case _ => false
}
matched.isEmpty
}
}
/**
* Pushes down [[Filter]] operators where the `condition` can be
* evaluated using only the attributes of the left or right side of a join. Other
* [[Filter]] conditions are moved into the `condition` of the [[Join]].
*
* And also pushes down the join filter, where the `condition` can be evaluated using only the
* attributes of the left or right side of sub query when applicable.
*
* Check https://cwiki.apache.org/confluence/display/Hive/OuterJoinBehavior for more details
*/
object PushPredicateThroughJoin extends Rule[LogicalPlan] with PredicateHelper {
/**
* Splits join condition expressions or filter predicates (on a given join's output) into three
* categories based on the attributes required to evaluate them. Note that we explicitly exclude
* on-deterministic (i.e., stateful) condition expressions in canEvaluateInLeft or
* canEvaluateInRight to prevent pushing these predicates on either side of the join.
*
* @return (canEvaluateInLeft, canEvaluateInRight, haveToEvaluateInBoth)
*/
private def split(condition: Seq[Expression], left: LogicalPlan, right: LogicalPlan) = {
// Note: In order to ensure correctness, it's important to not change the relative ordering of
// any deterministic expression that follows a non-deterministic expression. To achieve this,
// we only consider pushing down those expressions that precede the first non-deterministic
// expression in the condition.
val (pushDownCandidates, containingNonDeterministic) = condition.span(_.deterministic)
val (leftEvaluateCondition, rest) =
pushDownCandidates.partition(_.references.subsetOf(left.outputSet))
val (rightEvaluateCondition, commonCondition) =
rest.partition(expr => expr.references.subsetOf(right.outputSet))
(leftEvaluateCondition, rightEvaluateCondition, commonCondition ++ containingNonDeterministic)
}
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
// push the where condition down into join filter
case f @ Filter(filterCondition, Join(left, right, joinType, joinCondition)) =>
val (leftFilterConditions, rightFilterConditions, commonFilterCondition) =
split(splitConjunctivePredicates(filterCondition), left, right)
joinType match {
case _: InnerLike =>
// push down the single side `where` condition into respective sides
val newLeft = leftFilterConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = rightFilterConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val (newJoinConditions, others) =
commonFilterCondition.partition(canEvaluateWithinJoin)
val newJoinCond = (newJoinConditions ++ joinCondition).reduceLeftOption(And)
val join = Join(newLeft, newRight, joinType, newJoinCond)
if (others.nonEmpty) {
Filter(others.reduceLeft(And), join)
} else {
join
}
case RightOuter =>
// push down the right side only `where` condition
val newLeft = left
val newRight = rightFilterConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = joinCondition
val newJoin = Join(newLeft, newRight, RightOuter, newJoinCond)
(leftFilterConditions ++ commonFilterCondition).
reduceLeftOption(And).map(Filter(_, newJoin)).getOrElse(newJoin)
case LeftOuter | LeftExistence(_) =>
// push down the left side only `where` condition
val newLeft = leftFilterConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = right
val newJoinCond = joinCondition
val newJoin = Join(newLeft, newRight, joinType, newJoinCond)
(rightFilterConditions ++ commonFilterCondition).
reduceLeftOption(And).map(Filter(_, newJoin)).getOrElse(newJoin)
case FullOuter => f // DO Nothing for Full Outer Join
case NaturalJoin(_) => sys.error("Untransformed NaturalJoin node")
case UsingJoin(_, _) => sys.error("Untransformed Using join node")
}
// push down the join filter into sub query scanning if applicable
case j @ Join(left, right, joinType, joinCondition) =>
val (leftJoinConditions, rightJoinConditions, commonJoinCondition) =
split(joinCondition.map(splitConjunctivePredicates).getOrElse(Nil), left, right)
joinType match {
case _: InnerLike | LeftSemi =>
// push down the single side only join filter for both sides sub queries
val newLeft = leftJoinConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = rightJoinConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = commonJoinCondition.reduceLeftOption(And)
Join(newLeft, newRight, joinType, newJoinCond)
case RightOuter =>
// push down the left side only join filter for left side sub query
val newLeft = leftJoinConditions.
reduceLeftOption(And).map(Filter(_, left)).getOrElse(left)
val newRight = right
val newJoinCond = (rightJoinConditions ++ commonJoinCondition).reduceLeftOption(And)
Join(newLeft, newRight, RightOuter, newJoinCond)
case LeftOuter | LeftAnti | ExistenceJoin(_) =>
// push down the right side only join filter for right sub query
val newLeft = left
val newRight = rightJoinConditions.
reduceLeftOption(And).map(Filter(_, right)).getOrElse(right)
val newJoinCond = (leftJoinConditions ++ commonJoinCondition).reduceLeftOption(And)
Join(newLeft, newRight, joinType, newJoinCond)
case FullOuter => j
case NaturalJoin(_) => sys.error("Untransformed NaturalJoin node")
case UsingJoin(_, _) => sys.error("Untransformed Using join node")
}
}
}
/**
* Combines two adjacent [[Limit]] operators into one, merging the
* expressions into one single expression.
*/
object CombineLimits extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case GlobalLimit(le, GlobalLimit(ne, grandChild)) =>
GlobalLimit(Least(Seq(ne, le)), grandChild)
case LocalLimit(le, LocalLimit(ne, grandChild)) =>
LocalLimit(Least(Seq(ne, le)), grandChild)
case Limit(le, Limit(ne, grandChild)) =>
Limit(Least(Seq(ne, le)), grandChild)
}
}
/**
* Check if there any cartesian products between joins of any type in the optimized plan tree.
* Throw an error if a cartesian product is found without an explicit cross join specified.
* This rule is effectively disabled if the CROSS_JOINS_ENABLED flag is true.
*
* This rule must be run AFTER the ReorderJoin rule since the join conditions for each join must be
* collected before checking if it is a cartesian product. If you have
* SELECT * from R, S where R.r = S.s,
* the join between R and S is not a cartesian product and therefore should be allowed.
* The predicate R.r = S.s is not recognized as a join condition until the ReorderJoin rule.
*/
object CheckCartesianProducts extends Rule[LogicalPlan] with PredicateHelper {
/**
* Check if a join is a cartesian product. Returns true if
* there are no join conditions involving references from both left and right.
*/
def isCartesianProduct(join: Join): Boolean = {
val conditions = join.condition.map(splitConjunctivePredicates).getOrElse(Nil)
!conditions.map(_.references).exists(refs => refs.exists(join.left.outputSet.contains)
&& refs.exists(join.right.outputSet.contains))
}
def apply(plan: LogicalPlan): LogicalPlan =
if (SQLConf.get.crossJoinEnabled) {
plan
} else plan transform {
case j @ Join(left, right, Inner | LeftOuter | RightOuter | FullOuter, condition)
if isCartesianProduct(j) =>
throw new AnalysisException(
s"""Detected cartesian product for ${j.joinType.sql} join between logical plans
|${left.treeString(false).trim}
|and
|${right.treeString(false).trim}
|Join condition is missing or trivial.
|Use the CROSS JOIN syntax to allow cartesian products between these relations."""
.stripMargin)
}
}
/**
* Speeds up aggregates on fixed-precision decimals by executing them on unscaled Long values.
*
* This uses the same rules for increasing the precision and scale of the output as
* [[org.apache.spark.sql.catalyst.analysis.DecimalPrecision]].
*/
object DecimalAggregates extends Rule[LogicalPlan] {
import Decimal.MAX_LONG_DIGITS
/** Maximum number of decimal digits representable precisely in a Double */
private val MAX_DOUBLE_DIGITS = 15
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case q: LogicalPlan => q transformExpressionsDown {
case we @ WindowExpression(ae @ AggregateExpression(af, _, _, _), _) => af match {
case Sum(e @ DecimalType.Expression(prec, scale)) if prec + 10 <= MAX_LONG_DIGITS =>
MakeDecimal(we.copy(windowFunction = ae.copy(aggregateFunction = Sum(UnscaledValue(e)))),
prec + 10, scale)
case Average(e @ DecimalType.Expression(prec, scale)) if prec + 4 <= MAX_DOUBLE_DIGITS =>
val newAggExpr =
we.copy(windowFunction = ae.copy(aggregateFunction = Average(UnscaledValue(e))))
Cast(
Divide(newAggExpr, Literal.create(math.pow(10.0, scale), DoubleType)),
DecimalType(prec + 4, scale + 4), Option(SQLConf.get.sessionLocalTimeZone))
case _ => we
}
case ae @ AggregateExpression(af, _, _, _) => af match {
case Sum(e @ DecimalType.Expression(prec, scale)) if prec + 10 <= MAX_LONG_DIGITS =>
MakeDecimal(ae.copy(aggregateFunction = Sum(UnscaledValue(e))), prec + 10, scale)
case Average(e @ DecimalType.Expression(prec, scale)) if prec + 4 <= MAX_DOUBLE_DIGITS =>
val newAggExpr = ae.copy(aggregateFunction = Average(UnscaledValue(e)))
Cast(
Divide(newAggExpr, Literal.create(math.pow(10.0, scale), DoubleType)),
DecimalType(prec + 4, scale + 4), Option(SQLConf.get.sessionLocalTimeZone))
case _ => ae
}
}
}
}
/**
* Converts local operations (i.e. ones that don't require data exchange) on LocalRelation to
* another LocalRelation.
*
* This is relatively simple as it currently handles only a single case: Project.
*/
object ConvertToLocalRelation extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Project(projectList, LocalRelation(output, data))
if !projectList.exists(hasUnevaluableExpr) =>
val projection = new InterpretedProjection(projectList, output)
projection.initialize(0)
LocalRelation(projectList.map(_.toAttribute), data.map(projection))
}
private def hasUnevaluableExpr(expr: Expression): Boolean = {
expr.find(e => e.isInstanceOf[Unevaluable] && !e.isInstanceOf[AttributeReference]).isDefined
}
}
/**
* Replaces logical [[Distinct]] operator with an [[Aggregate]] operator.
* {{{
* SELECT DISTINCT f1, f2 FROM t ==> SELECT f1, f2 FROM t GROUP BY f1, f2
* }}}
*/
object ReplaceDistinctWithAggregate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Distinct(child) => Aggregate(child.output, child.output, child)
}
}
/**
* Replaces logical [[Deduplicate]] operator with an [[Aggregate]] operator.
*/
object ReplaceDeduplicateWithAggregate extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Deduplicate(keys, child, streaming) if !streaming =>
val keyExprIds = keys.map(_.exprId)
val aggCols = child.output.map { attr =>
if (keyExprIds.contains(attr.exprId)) {
attr
} else {
Alias(new First(attr).toAggregateExpression(), attr.name)(attr.exprId)
}
}
Aggregate(keys, aggCols, child)
}
}
/**
* Replaces logical [[Intersect]] operator with a left-semi [[Join]] operator.
* {{{
* SELECT a1, a2 FROM Tab1 INTERSECT SELECT b1, b2 FROM Tab2
* ==> SELECT DISTINCT a1, a2 FROM Tab1 LEFT SEMI JOIN Tab2 ON a1<=>b1 AND a2<=>b2
* }}}
*
* Note:
* 1. This rule is only applicable to INTERSECT DISTINCT. Do not use it for INTERSECT ALL.
* 2. This rule has to be done after de-duplicating the attributes; otherwise, the generated
* join conditions will be incorrect.
*/
object ReplaceIntersectWithSemiJoin extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Intersect(left, right) =>
assert(left.output.size == right.output.size)
val joinCond = left.output.zip(right.output).map { case (l, r) => EqualNullSafe(l, r) }
Distinct(Join(left, right, LeftSemi, joinCond.reduceLeftOption(And)))
}
}
/**
* Replaces logical [[Except]] operator with a left-anti [[Join]] operator.
* {{{
* SELECT a1, a2 FROM Tab1 EXCEPT SELECT b1, b2 FROM Tab2
* ==> SELECT DISTINCT a1, a2 FROM Tab1 LEFT ANTI JOIN Tab2 ON a1<=>b1 AND a2<=>b2
* }}}
*
* Note:
* 1. This rule is only applicable to EXCEPT DISTINCT. Do not use it for EXCEPT ALL.
* 2. This rule has to be done after de-duplicating the attributes; otherwise, the generated
* join conditions will be incorrect.
*/
object ReplaceExceptWithAntiJoin extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case Except(left, right) =>
assert(left.output.size == right.output.size)
val joinCond = left.output.zip(right.output).map { case (l, r) => EqualNullSafe(l, r) }
Distinct(Join(left, right, LeftAnti, joinCond.reduceLeftOption(And)))
}
}
/**
* Removes literals from group expressions in [[Aggregate]], as they have no effect to the result
* but only makes the grouping key bigger.
*/
object RemoveLiteralFromGroupExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case a @ Aggregate(grouping, _, _) if grouping.nonEmpty =>
val newGrouping = grouping.filter(!_.foldable)
if (newGrouping.nonEmpty) {
a.copy(groupingExpressions = newGrouping)
} else {
// All grouping expressions are literals. We should not drop them all, because this can
// change the return semantics when the input of the Aggregate is empty (SPARK-17114). We
// instead replace this by single, easy to hash/sort, literal expression.
a.copy(groupingExpressions = Seq(Literal(0, IntegerType)))
}
}
}
/**
* Removes repetition from group expressions in [[Aggregate]], as they have no effect to the result
* but only makes the grouping key bigger.
*/
object RemoveRepetitionFromGroupExpressions extends Rule[LogicalPlan] {
def apply(plan: LogicalPlan): LogicalPlan = plan transform {
case a @ Aggregate(grouping, _, _) =>
val newGrouping = ExpressionSet(grouping).toSeq
a.copy(groupingExpressions = newGrouping)
}
}
| mike0sv/spark | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/optimizer/Optimizer.scala | Scala | apache-2.0 | 54,172 |
package com.boldradius.cqrs
import akka.actor.ActorRef
import akka.pattern.ask
import akka.util.Timeout
import com.boldradius.cqrs.AuctionCommandQueryProtocol._
import com.boldradius.util.LLogging
import org.joda.time.format.DateTimeFormat
import spray.routing._
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
import scala.util.{Failure, Success}
final case class PlaceBidDto(auctionId:String, buyer:String, bidPrice:Double )
final case class StartAuctionDto(auctionId:String, start:String, end:String, initialPrice: Double, prodId: String)
final case class BidDto(price:Double, buyer:String, timeStamp:String)
final case class AuctionError(auctionId:String,msg:String,response:String = "AuctionError")
final case class AuctionStartedDto(auctionId:String,response:String = "AuctionStartedDto")
final case class AuctionNotStartedDto(auctionId:String,response:String = "AuctionNotStartedDto")
final case class SuccessfulBidDto(auctionId:String, bidPrice: Double, timeStamp:String,response:String = "SuccessfulBidDto")
final case class RejectedBidDto(auctionId:String, bidPrice: Double, currentBid:Double,response:String = "RejectedBidDto")
final case class FailedBidDto(auctionId:String, bidPrice: Double, currentBid:Double,response:String = "FailedBidDto")
final case class WinningBidDto(auctionId:String,bidPrice: Double,response:String = "WinningBidDto")
final case class BidHistoryDto(auctionId:String,bids: List[BidDto],response:String = "BidHistoryDto")
trait HttpAuctionServiceRoute extends HttpService with LLogging{
implicit val ec: ExecutionContext
import com.boldradius.util.MarshallingSupport._
implicit val timeout = Timeout(30 seconds)
lazy val fmt = DateTimeFormat.forPattern("yyyy-MM-dd-HH:mm")
def route(command: ActorRef, query:ActorRef) = {
post {
path("startAuction") {
extract(_.request) { e =>
entity(as[StartAuctionDto]) {
auction => onComplete(
(command ? StartAuctionCmd(auction.auctionId,
fmt.parseDateTime(auction.start).getMillis,
fmt.parseDateTime(auction.end).getMillis,
auction.initialPrice, auction.prodId)).mapTo[AuctionAck]) {
case Success(ack) => ack match {
case StartedAuctionAck(id) =>
complete(AuctionStartedDto(id))
case InvalidAuctionAck(id, msg) =>
complete(AuctionError("ERROR",id, msg))
case other =>
complete(AuctionError("ERROR",ack.auctionId, ack.toString))
}
case Failure(t) =>
t.printStackTrace()
complete(AuctionError("ERROR",auction.auctionId, t.getMessage))
}
}
}
} ~
path("bid") {
detach(ec) {
extract(_.request) { e =>
entity(as[PlaceBidDto]) {
bid => onComplete(
(command ? PlaceBidCmd(bid.auctionId, bid.buyer, bid.bidPrice)).mapTo[AuctionAck]) {
case Success(ack) => ack.logInfo(s"PlaceBidCmd bid.bidPrice ${bid.bidPrice} id:" + _.auctionId.toString) match {
case PlacedBidAck(id, buyer, bidPrice, timeStamp) =>
complete(SuccessfulBidDto(id, bidPrice, fmt.print(timeStamp)))
case RefusedBidAck(id, buyer, bidPrice, winningBid) =>
complete(RejectedBidDto(id, bidPrice, winningBid))
case other =>
complete(AuctionError("ERROR",bid.auctionId, other.toString))
}
case Failure(t) =>
complete(AuctionError("ERROR",bid.auctionId, t.getMessage))
}
}
}
}
}
} ~
get {
path("winningBid" / Rest) { auctionId =>
detach(ec) {
onComplete((query ? WinningBidPriceQuery(auctionId)).mapTo[BidQueryResponse]) {
case Success(s) => s match {
case WinningBidPriceResponse(id, price) =>
complete(WinningBidDto(id, price))
case AuctionNotStarted(id) =>
complete(AuctionNotStartedDto(id))
case _ =>
complete(AuctionError("ERROR",auctionId, ""))
}
case Failure(t) =>
t.getMessage.logError("WinningBidPriceQuery error: " + _)
complete(AuctionError("ERROR",auctionId, t.getMessage))
}
}
} ~
path("bidHistory" / Rest) { auctionId =>
onComplete((query ? GetBidHistoryQuery(auctionId)).mapTo[BidQueryResponse]) {
case Success(s) => s match {
case BidHistoryResponse(id, bids) =>
complete(BidHistoryDto(id, bids.map(b =>
BidDto(b.price, b.buyer, fmt.print(b.timeStamp)))))
case AuctionNotStarted(id) =>
complete(AuctionNotStartedDto(id))
case _ =>
complete(AuctionError("ERROR",auctionId, ""))
}
case Failure(t) =>
complete(AuctionError("ERROR",auctionId, t.getMessage))
}
}
}
}
}
| ceecer1/akka-dddd-template | src/main/scala/com/boldradius/cqrs/HttpAuctionServiceRoute.scala | Scala | apache-2.0 | 5,332 |
package asobu.distributed.service
import asobu.distributed.protocol.DRequest
import asobu.dsl.Extractor
package object extractors {
type DRequestExtractor[T] = Extractor[DRequest, T]
}
| iheartradio/asobu | distributed/src/main/scala/asobu/distributed/service/extractors/package.scala | Scala | apache-2.0 | 189 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.classification
import scala.util.Random
import breeze.linalg.{DenseMatrix => BDM, DenseVector => BDV, Vector => BV}
import breeze.stats.distributions.{Multinomial => BrzMultinomial}
import org.scalatest.exceptions.TestFailedException
import org.apache.spark.{SparkException, SparkFunSuite}
import org.apache.spark.mllib.linalg.{Vector, Vectors}
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.{LocalClusterSparkContext, MLlibTestSparkContext}
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.util.Utils
object NaiveBayesSuite {
import NaiveBayes.{Multinomial, Bernoulli}
private def calcLabel(p: Double, pi: Array[Double]): Int = {
var sum = 0.0
for (j <- 0 until pi.length) {
sum += pi(j)
if (p < sum) return j
}
-1
}
// Generate input of the form Y = (theta * x).argmax()
def generateNaiveBayesInput(
pi: Array[Double], // 1XC
theta: Array[Array[Double]], // CXD
nPoints: Int,
seed: Int,
modelType: String = Multinomial,
sample: Int = 10): Seq[LabeledPoint] = {
val D = theta(0).length
val rnd = new Random(seed)
val _pi = pi.map(math.pow(math.E, _))
val _theta = theta.map(row => row.map(math.pow(math.E, _)))
for (i <- 0 until nPoints) yield {
val y = calcLabel(rnd.nextDouble(), _pi)
val xi = modelType match {
case Bernoulli => Array.tabulate[Double] (D) { j =>
if (rnd.nextDouble () < _theta(y)(j) ) 1 else 0
}
case Multinomial =>
val mult = BrzMultinomial(BDV(_theta(y)))
val emptyMap = (0 until D).map(x => (x, 0.0)).toMap
val counts = emptyMap ++ mult.sample(sample).groupBy(x => x).map {
case (index, reps) => (index, reps.size.toDouble)
}
counts.toArray.sortBy(_._1).map(_._2)
case _ =>
// This should never happen.
throw new UnknownError(s"Invalid modelType: $modelType.")
}
LabeledPoint(y, Vectors.dense(xi))
}
}
/** Bernoulli NaiveBayes with binary labels, 3 features */
private val binaryBernoulliModel = new NaiveBayesModel(labels = Array(0.0, 1.0),
pi = Array(0.2, 0.8), theta = Array(Array(0.1, 0.3, 0.6), Array(0.2, 0.4, 0.4)), Bernoulli)
/** Multinomial NaiveBayes with binary labels, 3 features */
private val binaryMultinomialModel = new NaiveBayesModel(labels = Array(0.0, 1.0),
pi = Array(0.2, 0.8), theta = Array(Array(0.1, 0.3, 0.6), Array(0.2, 0.4, 0.4)), Multinomial)
}
class NaiveBayesSuite extends SparkFunSuite with MLlibTestSparkContext {
import NaiveBayes.{Multinomial, Bernoulli}
def validatePrediction(predictions: Seq[Double], input: Seq[LabeledPoint]) {
val numOfPredictions = predictions.zip(input).count {
case (prediction, expected) =>
prediction != expected.label
}
// At least 80% of the predictions should be on.
assert(numOfPredictions < input.length / 5)
}
def validateModelFit(
piData: Array[Double],
thetaData: Array[Array[Double]],
model: NaiveBayesModel): Unit = {
val modelIndex = piData.indices.zip(model.labels.map(_.toInt))
try {
for (i <- modelIndex) {
assert(math.exp(piData(i._2)) ~== math.exp(model.pi(i._1)) absTol 0.05)
for (j <- thetaData(i._2).indices) {
assert(math.exp(thetaData(i._2)(j)) ~== math.exp(model.theta(i._1)(j)) absTol 0.05)
}
}
} catch {
case e: TestFailedException =>
def arr2str(a: Array[Double]): String = a.mkString("[", ", ", "]")
def msg(orig: String): String = orig + "\nvalidateModelFit:\n" +
" piData: " + arr2str(piData) + "\n" +
" thetaData: " + thetaData.map(arr2str).mkString("\n") + "\n" +
" model.labels: " + arr2str(model.labels) + "\n" +
" model.pi: " + arr2str(model.pi) + "\n" +
" model.theta: " + model.theta.map(arr2str).mkString("\n")
throw e.modifyMessage(_.map(msg))
}
}
test("model types") {
assert(Multinomial === "multinomial")
assert(Bernoulli === "bernoulli")
}
test("get, set params") {
val nb = new NaiveBayes()
nb.setLambda(2.0)
assert(nb.getLambda === 2.0)
nb.setLambda(3.0)
assert(nb.getLambda === 3.0)
}
test("Naive Bayes Multinomial") {
val nPoints = 1000
val pi = Array(0.5, 0.1, 0.4).map(math.log)
val theta = Array(
Array(0.70, 0.10, 0.10, 0.10), // label 0
Array(0.10, 0.70, 0.10, 0.10), // label 1
Array(0.10, 0.10, 0.70, 0.10) // label 2
).map(_.map(math.log))
val testData = NaiveBayesSuite.generateNaiveBayesInput(pi, theta, nPoints, 42, Multinomial)
val testRDD = sc.parallelize(testData, 2)
testRDD.cache()
val model = NaiveBayes.train(testRDD, 1.0, Multinomial)
validateModelFit(pi, theta, model)
val validationData = NaiveBayesSuite.generateNaiveBayesInput(
pi, theta, nPoints, 17, Multinomial)
val validationRDD = sc.parallelize(validationData, 2)
// Test prediction on RDD.
validatePrediction(model.predict(validationRDD.map(_.features)).collect(), validationData)
// Test prediction on Array.
validatePrediction(validationData.map(row => model.predict(row.features)), validationData)
// Test posteriors
validationData.map(_.features).foreach { features =>
val predicted = model.predictProbabilities(features).toArray
assert(predicted.sum ~== 1.0 relTol 1.0e-10)
val expected = expectedMultinomialProbabilities(model, features)
expected.zip(predicted).foreach { case (e, p) => assert(e ~== p relTol 1.0e-10) }
}
}
/**
* @param model Multinomial Naive Bayes model
* @param testData input to compute posterior probabilities for
* @return posterior class probabilities (in order of labels) for input
*/
private def expectedMultinomialProbabilities(model: NaiveBayesModel, testData: Vector) = {
val piVector = new BDV(model.pi)
// model.theta is row-major; treat it as col-major representation of transpose, and transpose:
val thetaMatrix = new BDM(model.theta(0).length, model.theta.length, model.theta.flatten).t
val logClassProbs: BV[Double] = piVector + (thetaMatrix * testData.asBreeze)
val classProbs = logClassProbs.toArray.map(math.exp)
val classProbsSum = classProbs.sum
classProbs.map(_ / classProbsSum)
}
test("Naive Bayes Bernoulli") {
val nPoints = 10000
val pi = Array(0.5, 0.3, 0.2).map(math.log)
val theta = Array(
Array(0.50, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.40), // label 0
Array(0.02, 0.70, 0.10, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02), // label 1
Array(0.02, 0.02, 0.60, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.30) // label 2
).map(_.map(math.log))
val testData = NaiveBayesSuite.generateNaiveBayesInput(
pi, theta, nPoints, 45, Bernoulli)
val testRDD = sc.parallelize(testData, 2)
testRDD.cache()
val model = NaiveBayes.train(testRDD, 1.0, Bernoulli)
validateModelFit(pi, theta, model)
val validationData = NaiveBayesSuite.generateNaiveBayesInput(
pi, theta, nPoints, 20, Bernoulli)
val validationRDD = sc.parallelize(validationData, 2)
// Test prediction on RDD.
validatePrediction(model.predict(validationRDD.map(_.features)).collect(), validationData)
// Test prediction on Array.
validatePrediction(validationData.map(row => model.predict(row.features)), validationData)
// Test posteriors
validationData.map(_.features).foreach { features =>
val predicted = model.predictProbabilities(features).toArray
assert(predicted.sum ~== 1.0 relTol 1.0e-10)
val expected = expectedBernoulliProbabilities(model, features)
expected.zip(predicted).foreach { case (e, p) => assert(e ~== p relTol 1.0e-10) }
}
}
/**
* @param model Bernoulli Naive Bayes model
* @param testData input to compute posterior probabilities for
* @return posterior class probabilities (in order of labels) for input
*/
private def expectedBernoulliProbabilities(model: NaiveBayesModel, testData: Vector) = {
val piVector = new BDV(model.pi)
val thetaMatrix = new BDM(model.theta(0).length, model.theta.length, model.theta.flatten).t
val negThetaMatrix = new BDM(model.theta(0).length, model.theta.length,
model.theta.flatten.map(v => math.log(1.0 - math.exp(v)))).t
val testBreeze = testData.asBreeze
val negTestBreeze = new BDV(Array.fill(testBreeze.size)(1.0)) - testBreeze
val piTheta: BV[Double] = piVector + (thetaMatrix * testBreeze)
val logClassProbs: BV[Double] = piTheta + (negThetaMatrix * negTestBreeze)
val classProbs = logClassProbs.toArray.map(math.exp)
val classProbsSum = classProbs.sum
classProbs.map(_ / classProbsSum)
}
test("detect negative values") {
val dense = Seq(
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(-1.0)),
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(1.0, Vectors.dense(0.0)))
intercept[SparkException] {
NaiveBayes.train(sc.makeRDD(dense, 2))
}
val sparse = Seq(
LabeledPoint(1.0, Vectors.sparse(1, Array(0), Array(1.0))),
LabeledPoint(0.0, Vectors.sparse(1, Array(0), Array(-1.0))),
LabeledPoint(1.0, Vectors.sparse(1, Array(0), Array(1.0))),
LabeledPoint(1.0, Vectors.sparse(1, Array.empty, Array.empty)))
intercept[SparkException] {
NaiveBayes.train(sc.makeRDD(sparse, 2))
}
val nan = Seq(
LabeledPoint(1.0, Vectors.sparse(1, Array(0), Array(1.0))),
LabeledPoint(0.0, Vectors.sparse(1, Array(0), Array(Double.NaN))),
LabeledPoint(1.0, Vectors.sparse(1, Array(0), Array(1.0))),
LabeledPoint(1.0, Vectors.sparse(1, Array.empty, Array.empty)))
intercept[SparkException] {
NaiveBayes.train(sc.makeRDD(nan, 2))
}
}
test("detect non zero or one values in Bernoulli") {
val badTrain = Seq(
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(2.0)),
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(1.0, Vectors.dense(0.0)))
intercept[SparkException] {
NaiveBayes.train(sc.makeRDD(badTrain, 2), 1.0, Bernoulli)
}
val okTrain = Seq(
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(0.0, Vectors.dense(0.0)),
LabeledPoint(1.0, Vectors.dense(1.0)),
LabeledPoint(1.0, Vectors.dense(1.0))
)
val badPredict = Seq(
Vectors.dense(1.0),
Vectors.dense(2.0),
Vectors.dense(1.0),
Vectors.dense(0.0))
val model = NaiveBayes.train(sc.makeRDD(okTrain, 2), 1.0, Bernoulli)
intercept[SparkException] {
model.predict(sc.makeRDD(badPredict, 2)).collect()
}
}
test("model save/load: 2.0 to 2.0") {
val tempDir = Utils.createTempDir()
val path = tempDir.toURI.toString
Seq(NaiveBayesSuite.binaryBernoulliModel, NaiveBayesSuite.binaryMultinomialModel).foreach {
model =>
// Save model, load it back, and compare.
try {
model.save(sc, path)
val sameModel = NaiveBayesModel.load(sc, path)
assert(model.labels === sameModel.labels)
assert(model.pi === sameModel.pi)
assert(model.theta === sameModel.theta)
assert(model.modelType === sameModel.modelType)
} finally {
Utils.deleteRecursively(tempDir)
}
}
}
test("model save/load: 1.0 to 2.0") {
val model = NaiveBayesSuite.binaryMultinomialModel
val tempDir = Utils.createTempDir()
val path = tempDir.toURI.toString
// Save model as version 1.0, load it back, and compare.
try {
val data = NaiveBayesModel.SaveLoadV1_0.Data(model.labels, model.pi, model.theta)
NaiveBayesModel.SaveLoadV1_0.save(sc, path, data)
val sameModel = NaiveBayesModel.load(sc, path)
assert(model.labels === sameModel.labels)
assert(model.pi === sameModel.pi)
assert(model.theta === sameModel.theta)
assert(model.modelType === Multinomial)
} finally {
Utils.deleteRecursively(tempDir)
}
}
}
class NaiveBayesClusterSuite extends SparkFunSuite with LocalClusterSparkContext {
test("task size should be small in both training and prediction") {
val m = 10
val n = 200000
val examples = sc.parallelize(0 until m, 2).mapPartitionsWithIndex { (idx, iter) =>
val random = new Random(idx)
iter.map { i =>
LabeledPoint(random.nextInt(2), Vectors.dense(Array.fill(n)(random.nextDouble())))
}
}
// If we serialize data directly in the task closure, the size of the serialized task
// would be greater than 1MB and hence Spark would throw an error.
val model = NaiveBayes.train(examples)
val predictions = model.predict(examples.map(_.features))
}
}
| wangyixiaohuihui/spark2-annotation | mllib/src/test/scala/org/apache/spark/mllib/classification/NaiveBayesSuite.scala | Scala | apache-2.0 | 14,225 |
/**
* Swaggy Jenkins
* Jenkins API clients generated from Swagger / Open API specification
*
* The version of the OpenAPI document: 1.1.2-pre.0
* Contact: [email protected]
*
* NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
* https://openapi-generator.tech
* Do not edit the class manually.
*/
package io.swagger.client.model
import play.api.libs.json._
case class ExtensionClassContainerImpl1 (
`class`: Option[String],
links: Option[ExtensionClassContainerImpl1links],
map: Option[ExtensionClassContainerImpl1map]
)
object ExtensionClassContainerImpl1 {
implicit val format: Format[ExtensionClassContainerImpl1] = Json.format
}
| cliffano/swaggy-jenkins | clients/scala-lagom-server/generated/src/main/scala/io/swagger/client/model/ExtensionClassContainerImpl1.scala | Scala | mit | 741 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package base
package patterns
import com.intellij.lang.ASTNode
import com.intellij.psi._
import com.intellij.psi.scope.PsiScopeProcessor
import org.jetbrains.plugins.scala.extensions.{PsiTypeExt, ifReadAllowed}
import org.jetbrains.plugins.scala.lang.parser.ScalaElementType
import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._
import org.jetbrains.plugins.scala.lang.psi.api.statements.params.ScTypeParam
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef.ScTypeDefinition
import org.jetbrains.plugins.scala.lang.psi.stubs.ScBindingPatternStub
import org.jetbrains.plugins.scala.lang.psi.types.api.{Any, Nothing, ParameterizedType}
import org.jetbrains.plugins.scala.lang.psi.types.result._
import org.jetbrains.plugins.scala.lang.psi.types.{ScExistentialType, api, _}
/**
* @author Alexander Podkhalyuzin
* Date: 28.02.2008
*/
class ScTypedPatternImpl private(stub: ScBindingPatternStub[ScTypedPattern], node: ASTNode)
extends ScalaStubBasedElementImpl(stub, ScalaElementType.TYPED_PATTERN, node)
with ScPatternImpl
with ScTypedPattern
with TypedPatternLikeImpl
{
def this(node: ASTNode) = this(null, node)
def this(stub: ScBindingPatternStub[ScTypedPattern]) = this(stub, null)
override def nameId: PsiElement = findChildByType[PsiElement](TokenSets.ID_SET)
override def typePattern: Option[ScTypePattern] = findChild[ScTypePattern]
override def toString: String = "TypedPattern: " + ifReadAllowed(name)("")
override def `type`(): TypeResult = {
typePattern match {
case Some(tp) =>
if (tp.typeElement == null) return Failure(ScalaBundle.message("no.type.element.for.type.pattern"))
val typeElementType: TypeResult =
tp.typeElement.`type`().map {
case tp: ScExistentialType =>
val skolem = tp.quantified
skolem.extractClassType match { //todo: type aliases?
case Some((clazz: ScTypeDefinition, subst)) =>
val typeParams = clazz.typeParameters
skolem match {
case ParameterizedType(des, typeArgs) if typeArgs.length == typeParams.length =>
ScParameterizedType(des, typeArgs.zip(typeParams).map {
case (arg: ScExistentialArgument, param: ScTypeParam) =>
val lowerBound =
if (arg.lower.equiv(Nothing)) subst(param.lowerBound.getOrNothing)
else arg.lower //todo: lub?
val upperBound =
if (arg.upper.equiv(Any)) subst(param.upperBound.getOrAny)
else arg.upper //todo: glb?
ScExistentialArgument(arg.name, arg.typeParameters, lowerBound, upperBound)
case (tp: ScType, _: ScTypeParam) => tp
}).unpackedType
case _ => tp
}
case Some((clazz: PsiClass, subst)) =>
val typeParams: Array[PsiTypeParameter] = clazz.getTypeParameters
skolem match {
case ParameterizedType(des, typeArgs) if typeArgs.length == typeParams.length =>
ScParameterizedType(des, typeArgs.zip(typeParams).map {
case (arg: ScExistentialArgument, param: PsiTypeParameter) =>
val lowerBound = arg.lower
val upperBound =
if (arg.upper.equiv(api.Any)) {
val listTypes: Array[PsiClassType] = param.getExtendsListTypes
if (listTypes.isEmpty) api.Any
else subst(listTypes.toSeq.map(_.toScType()).glb(checkWeak = true))
} else arg.upper //todo: glb?
ScExistentialArgument(arg.name, arg.typeParameters, lowerBound, upperBound)
case (tp: ScType, _) => tp
}).unpackedType
case _ => tp
}
case _ => tp
}
case tp: ScType => tp
}
this.expectedType match {
case Some(expectedType) =>
typeElementType.map(resType => expectedType.glb(resType))
case _ => typeElementType
}
case None => Failure(ScalaBundle.message("no.type.pattern"))
}
}
override def processDeclarations(processor: PsiScopeProcessor, state: ResolveState, lastParent: PsiElement,
place: PsiElement): Boolean = {
ScalaPsiUtil.processImportLastParent(processor, state, place, lastParent, `type`())
}
override def getOriginalElement: PsiElement = super[ScTypedPattern].getOriginalElement
}
| JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/base/patterns/ScTypedPatternImpl.scala | Scala | apache-2.0 | 4,884 |
package com.eevolution.context.dictionary.infrastructure.repository
import java.util.UUID
import com.eevolution.context.dictionary.domain._
import com.eevolution.context.dictionary.domain.model.WorkflowNodeNext
import com.eevolution.context.dictionary.infrastructure.db.DbContext._
import com.eevolution.utils.PaginatedSequence
import com.lightbend.lagom.scaladsl.persistence.jdbc.JdbcSession
import scala.concurrent.{ExecutionContext, Future}
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/e-Evolution
* Created by [email protected] , www.e-evolution.com
*/
/**
* Workflow Node Next Repository
* @param session
* @param executionContext
*/
class WorkflowNodeNextRepository (session: JdbcSession)(implicit executionContext: ExecutionContext)
extends api.repository.WorkflowNodeNextRepository[WorkflowNodeNext , Int]
with WorkflowNodeNextMapping {
def getById(id: Int): Future[WorkflowNodeNext] = {
Future(run(queryWorkflowNodeNext.filter(_.workflowNodeNextId == lift(id))).headOption.get)
}
def getByUUID(uuid: UUID): Future[WorkflowNodeNext] = {
Future(run(queryWorkflowNodeNext.filter(_.uuid == lift(uuid.toString))).headOption.get)
}
def getByWorkflowNodeNextId(id : Int) : Future[List[WorkflowNodeNext]] = {
Future(run(queryWorkflowNodeNext))
}
def getAll() : Future[List[WorkflowNodeNext]] = {
Future(run(queryWorkflowNodeNext))
}
def getAllByPage(page: Int, pageSize: Int): Future[PaginatedSequence[WorkflowNodeNext]] = {
val offset = page * pageSize
val limit = (page + 1) * pageSize
for {
count <- countWorkflowNodeNext()
elements <- if (offset > count) Future.successful(Nil)
else selectWorkflowNodeNext(offset, limit)
} yield {
PaginatedSequence(elements, page, pageSize, count)
}
}
private def countWorkflowNodeNext() = {
Future(run(queryWorkflowNodeNext.size).toInt)
}
private def selectWorkflowNodeNext(offset: Int, limit: Int): Future[Seq[WorkflowNodeNext]] = {
Future(run(queryWorkflowNodeNext).drop(offset).take(limit).toSeq)
}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/repository/WorkflowNodeNextRepository.scala | Scala | gpl-3.0 | 2,871 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.manager.utils.two40
import java.nio.ByteBuffer
import org.apache.kafka.clients.admin.{ConsumerGroupDescription, MemberDescription}
import org.apache.kafka.clients.consumer.internals.ConsumerProtocol
import org.apache.kafka.common.requests.DescribeGroupsResponse
import org.apache.kafka.common.utils.Utils
object MemberMetadata {
import collection.JavaConverters._
def from(groupId: String, groupSummary: ConsumerGroupDescription, memberSummary: MemberDescription) : MemberMetadata = {
val assignment = memberSummary.assignment().topicPartitions().asScala.map(tp => tp.topic() -> tp.partition()).toSet
MemberMetadata(
memberSummary.consumerId()
, groupId
, None
, memberSummary.clientId
, memberSummary.host()
, "(n/a on backfill)"
, List.empty
, assignment
)
}
}
/**
* Member metadata contains the following metadata:
*
* Heartbeat metadata:
* 1. negotiated heartbeat session timeout
* 2. timestamp of the latest heartbeat
*
* Protocol metadata:
* 1. the list of supported protocols (ordered by preference)
* 2. the metadata associated with each protocol
*
* In addition, it also contains the following state information:
*
* 1. Awaiting rebalance callback: when the group is in the prepare-rebalance state,
* its rebalance callback will be kept in the metadata if the
* member has sent the join group request
* 2. Awaiting sync callback: when the group is in the awaiting-sync state, its sync callback
* is kept in metadata until the leader provides the group assignment
* and the group transitions to stable
*/
case class MemberMetadata(memberId: String,
groupId: String,
groupInstanceId: Option[String],
clientId: String,
clientHost: String,
protocolType: String,
supportedProtocols: List[(String, Set[String])],
assignment: Set[(String, Int)]
) {
def protocols = supportedProtocols.map(_._1).toSet
def metadata(protocol: String): Set[String] = {
supportedProtocols.find(_._1 == protocol) match {
case Some((_, metadata)) => metadata
case None =>
throw new IllegalArgumentException("Member does not support protocol")
}
}
override def toString: String = {
"MemberMetadata(" +
s"memberId=$memberId, " +
s"groupInstanceId=$groupInstanceId, " +
s"clientId=$clientId, " +
s"clientHost=$clientHost, " +
s"supportedProtocols=${supportedProtocols.map(_._1)}, " +
")"
}
}
| yahoo/kafka-manager | app/kafka/manager/utils/two40/MemberMetadata.scala | Scala | apache-2.0 | 3,610 |
package com.github.log0ymxm.mapper
import java.io.{ BufferedWriter, File, FileWriter }
import breeze.linalg.DenseMatrix
import breeze.linalg
import org.apache.spark.mllib.linalg.distributed.{ CoordinateMatrix, IndexedRow, IndexedRowMatrix }
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext
import org.apache.spark.graphx.{ Edge, Graph }
import com.github.log0ymxm.mapper.clustering.{ SingleLinkage, Cutoff }
object Mapper {
/**
* Computes 1-dimensional simplicial complex from a dataset represented by
* it's pairwise distances and a filtration.
*
* @param sc A SparkContext
* @param distances An n x n upper triangular matrix of pairwise distances,
* @param filtrationValues An n x k matrix, representing the k filtration
* functions that have been applied to the original
* data points. Indices should match up with the
* coordinates found in the distances matrix.
* @return GraphX structure representing the reduced dimension simplicial complex
*/
def mapper(sc: SparkContext, distances: CoordinateMatrix, filtrationValues: IndexedRowMatrix, coverIntervals: Int = 10, coverOverlapRatio: Double = 0.5): Graph[(String, Seq[Double], Int), Int] = {
val cover = new Cover(filtrationValues, coverIntervals, coverOverlapRatio)
// combine rows and columns of distance matrix since we only have it in upper triangular form.
// This has size n x n since for all elements n, we have the n pairwise distances
//
// It'd be great to avoid this.
// Might be doable with some kind of nearest neighbors structure, or lsh...
val pairwiseDistances: RDD[(DataKey, PointDistance)] = distances.entries
.union(distances.transpose().entries)
.map((entry) => { (DataKey(entry.i), PointDistance(DataKey(entry.j), entry.value)) })
// filtration indices match up with the keys in our distances
// RDD. We cogroup them and flatten to join each filtration value
// with it's corresponding set of distances.
val filtrationDistances: RDD[(DataKey, (IndexedRow, Iterable[PointDistance]))] = filtrationValues
.rows
.map({ idxRow => (DataKey(idxRow.index), idxRow) })
.cogroup(pairwiseDistances)
.flatMapValues({ case (f, d) => f.map({ x => (x, d) }) })
// for each data point, we only return the cover element keys
// that this data point belongs too, i.e. the data point
// filtration values are within the cover bounding box for all
// k dimensions
val dataDistancesAndCoverAssignment: RDD[(DataKey, (IndexedRow, Iterable[PointDistance], Seq[CoverSegmentKey]))] = filtrationDistances
.map({
case (dataKey, (filtrationRow, rowDistances)) =>
val coverAssignment: Seq[CoverSegmentKey] = cover.coverAssignment(filtrationRow.vector)
(dataKey, (filtrationRow, rowDistances, coverAssignment))
})
// key each element in the data by which patch it should be in,
// duplicates rows if they're in multiple patches. This is
// longer than the initial dataset, since rows are duplicated
// anywhere we have a point in multiple cover segments.
val flattenedDataWithCoverKey: RDD[(CoverSegmentKey, (DataKey, IndexedRow, Iterable[PointDistance]))] = dataDistancesAndCoverAssignment
.flatMap({
case (dataKey, (filtrationRow, distances, coverAssignment)) =>
coverAssignment.map({ assignment => (assignment, (dataKey, filtrationRow, distances)) })
})
val partitionedData: RDD[(CoverSegmentKey, (DataKey, IndexedRow, Iterable[PointDistance]))] = flattenedDataWithCoverKey
.partitionBy(new CoverAssignmentPartitioner(cover.numCoverSegments))
val clusters: RDD[(DataKey, (String, Seq[Double], Int))] = partitionedData.mapPartitions({
case (patch: Iterator[(CoverSegmentKey, (DataKey, IndexedRow, Iterable[PointDistance]))]) =>
val (keys, elements) = patch.toList.unzip
val n = elements.length
// seems like a gross way to handle this, I'd expect the partition not to exist in this case
// TODO should filter these out well before this point
if (n == 0) {
Iterator.empty
} else {
val segmentKey = keys(0)
val (indexKeys, filtrationValues, _) = elements.unzip3
val k = filtrationValues.take(1).length
val filtrationAverages: Seq[Double] = filtrationValues.foldLeft(Array.fill(k)(0.0))({
case (totals, row) => (totals, row.vector.toArray).zipped.map(_ + _)
}).map({ x => x / n }).toSeq
val localDistances: DenseMatrix[Double] = new DenseMatrix(n, n, elements.flatMap({
case (currentIndex, _, pointDistances) =>
indexKeys collect {
case i if i == currentIndex => 0
case i if i != currentIndex =>
pointDistances.filter({ d => d.coordinate == i }).head.distance
}
}).toArray)
val diameter = linalg.max(localDistances)
val linkage = SingleLinkage(localDistances)
val numClusters = Cutoff.firstGap(linkage, diameter)
val clusters = SingleLinkage.fcluster(linkage, numClusters)
val clusterNames = clusters.map(x => s"${segmentKey.id}-$x")
indexKeys.zip(clusterNames)
.map({ case (key, name) => (key, (name, filtrationAverages, n)) })
.toIterator
}
})
val vertices: RDD[(Long, (String, Seq[Double], Int))] = clusters
.map(_._2)
.distinct
.zipWithIndex
.map(_.swap)
val idLookup = sc.broadcast(
vertices
.map({ case (id, (name, avgs, size)) => (name, id) })
.collect()
.toMap
)
val assignments: RDD[(DataKey, CoverSegmentKey)] = dataDistancesAndCoverAssignment.flatMapValues(_._3)
val edges: RDD[Edge[Int]] = clusters.cogroup(assignments)
.flatMap({
case (key: DataKey, (vertices, segments: Seq[CoverSegmentKey])) =>
vertices.map(_._1).toSeq.combinations(2).map({ x =>
val node1 = idLookup.value(x(0))
val node2 = idLookup.value(x(1))
((node1, node2), 1)
})
})
.reduceByKey({ case (x: Int, y: Int) => x + y })
.map({ case ((n1, n2), w) => Edge(n1, n2, w) })
val graph: Graph[(String, Seq[Double], Int), Int] = Graph[(String, Seq[Double], Int), Int](vertices, edges)
return graph
}
/**
* If you expect your resultant graph structure to fit in memory, this will
* serialize your simplicial complex into a json structure suitable for
* visualization.
*
* @param graph Simplicial complex result from mapper algorithm
* @param graphPath Location where json file should be written
*/
def writeAsJson(graph: Graph[(String, Seq[Double], Int), Int], graphPath: String) = {
val vertices = graph.vertices.map({
case (id, (name, avgs, size)) => Map(
"id" -> id,
"name" -> name,
"filtration_values" -> avgs,
"cluster_size" -> size
)
}).collect()
val edges = graph.edges.map({
case Edge(src, dst, weight) =>
Map(
"src" -> src,
"dst" -> dst,
"weight" -> weight
)
}).collect()
val json = JsonUtil.toJson(Map(
"vertices" -> vertices,
"edges" -> edges
))
val bw = new BufferedWriter(new FileWriter(new File(graphPath)))
bw.write(json)
bw.close()
}
}
| log0ymxm/spark-mapper | src/main/scala/com/github/log0ymxm/mapper/Mapper.scala | Scala | apache-2.0 | 7,485 |
package com.meteorcode.pathway.graphics
/**
* ==Pathway Graphics Context==
*
* This object provides the Pathway Graphics API to JavaScript.
*
* Created by hawk on 9/21/15.
*/
class GraphicsContext {
// TODO: Implement me
}
| MeteorCode/Pathway | src/main/scala/com/meteorcode/pathway/graphics/GraphicsContext.scala | Scala | mit | 234 |
package com.atomist.tree.pathexpression
import com.atomist.rug.BadRugSyntaxException
import org.scalatest.{FlatSpec, Matchers}
class PathExpressionParserTest extends FlatSpec with Matchers {
val pep = PathExpressionParser
it should "parse a bare root node" in {
val pe = "/"
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.isEmpty === true)
}
it should "failed to parse an unanchored path expression" in {
val pe = "big/lebowski"
an[BadRugSyntaxException] should be thrownBy pep.parsePathExpression(pe)
}
it should "parse a child axis" in {
val pe = "/child::src"
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
val ls = parsed.locationSteps.head
assert(ls.axis === Child)
assert(ls.predicateToEvaluate === TruePredicate)
ls.test match {
case nnt: NamedNodeTest => assert(nnt.name === "src")
case x => fail(s"node test is not a NamedNodeTest: $x")
}
}
it should "parse an abbreviated child axis with node name" in {
val pe = "/src"
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
val ls = parsed.locationSteps.head
assert(ls.axis === Child)
assert(ls.predicateToEvaluate === TruePredicate)
ls.test match {
case nnt: NamedNodeTest => assert(nnt.name === "src")
case x => fail(s"node test is not a NamedNodeTest: $x")
}
}
it should "an abbreviated child axis should be equivalent to an explicit child axis" in {
val pe = "/child::src"
val parsed = pep.parsePathExpression(pe)
val pe1 = "/src"
val parsed1 = pep.parsePathExpression(pe1)
assert(parsed.locationSteps.size === 1)
val ls = parsed.locationSteps.head
assert(parsed1.locationSteps.size === 1)
val ls1 = parsed.locationSteps.head
assert(ls1.axis === ls.axis)
assert(ls1.predicateToEvaluate === ls.predicateToEvaluate)
assert(ls1.test === ls.test)
}
it should "parse a descendant axis" in {
val pe = "/descendant::src"
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
val ls = parsed.locationSteps.head
assert(ls.axis === Descendant)
assert(ls.predicateToEvaluate === TruePredicate)
ls.test match {
case nnt: NamedNodeTest => assert(nnt.name === "src")
case x => fail(s"node test is not a NamedNodeTest: $x")
}
}
it should "parse an abbreviated descendant axis with node name" in {
val pe = "//src"
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
val ls = parsed.locationSteps.head
assert(ls.axis === Descendant)
assert(ls.predicateToEvaluate === TruePredicate)
ls.test match {
case nnt: NamedNodeTest => assert(nnt.name === "src")
case x => fail(s"node test is not a NamedNodeTest: $x")
}
}
it should "an abbreviated descendant axis should be equivalent to an explicit descendant axis" in {
val pe = "/descendant::src"
val parsed = pep.parsePathExpression(pe)
val pe1 = "//src"
val parsed1 = pep.parsePathExpression(pe1)
assert(parsed.locationSteps.size === 1)
val ls = parsed.locationSteps.head
assert(parsed1.locationSteps.size === 1)
val ls1 = parsed.locationSteps.head
assert(ls1.axis === ls.axis)
assert(ls1.predicateToEvaluate === ls.predicateToEvaluate)
assert(ls1.test === ls.test)
}
it should "parse a node object type" in {
val pe = "/Issue()"
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
val ls = parsed.locationSteps.head
assert(ls.axis === Child)
assert(ls.predicateToEvaluate === TruePredicate)
assert(ls.test === NodesWithTag("Issue"))
}
it should "parse an index predicate" in {
val pe = "/dude[4]"
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
val ls = parsed.locationSteps.head
assert(ls.axis === Child)
ls.predicateToEvaluate match {
case p@IndexPredicate(4) =>
case x => fail(s"predicate did not match expected type: $x")
}
ls.test match {
case nnt: NamedNodeTest => assert(nnt.name === "dude")
case x => fail(s"node test is not a NamedNodeTest: $x")
}
}
it should "parse a simple predicate" in {
val pe = "/dude[@size='large']"
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
val ls = parsed.locationSteps.head
assert(ls.axis === Child)
ls.predicateToEvaluate match {
case np: PropertyValuePredicate =>
assert(np.property === "size")
assert(np.expectedValue === "large")
case x => fail(s"predicate did not match expected type: $x")
}
ls.test match {
case nnt: NamedNodeTest => assert(nnt.name === "dude")
case x => fail(s"node test is not a NamedNodeTest: $x")
}
}
it should "parse into json" in {
val pe = "/*[@name='elm-package.json']/Json()/summary"
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 3)
}
it should "parse a property name axis specifier" in {
val pe = """/Issue()[@state='open']/belongsTo::Repo()[@name='rug-cli']"""
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 2)
parsed.locationSteps(1).axis match {
case NavigationAxis("belongsTo") =>
}
}
it should "parse a property name axis specifier using double quoted strings" in {
val pe = """/Issue()[@state="open"]/belongsTo::Repo()[@name="rug-cli"]"""
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 2)
parsed.locationSteps(1).axis match {
case NavigationAxis("belongsTo") =>
}
}
it should "parse predicate to understandable repo" in {
val pe = """/Issue()[@state='open']/belongsTo::Repo()[@name='rug-cli']"""
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 2)
parsed.locationSteps.head.predicates.head match {
case PropertyValuePredicate("state", "open") =>
}
}
it should "parse nested predicate" in {
val pe = """/Issue()[@state='open'][/belongsTo::Repo()[@name='rug-cli']]"""
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
parsed.locationSteps.head.predicates(1) match {
case _: NestedPathExpressionPredicate =>
}
}
it should "parse an optional predicate" in {
val pe = """/Push()[contains::Commit()]?"""
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
assert(parsed.locationSteps.head.predicates.size === 1)
parsed.locationSteps.head.predicates.head match {
case _: OptionalPredicate =>
}
}
it should "parse required and optional predicates" in {
val pe = """/Push()[@name='brainiac'][contains::Commit()]?"""
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
assert(parsed.locationSteps.head.predicates.size === 2)
parsed.locationSteps.head.predicates.count {
case _: OptionalPredicate => true
case _ => false
} should be (1)
}
it should "parse predicate substring function" in {
val pe = """/Push()[@name='brainiac'][contains(name, "foobar")]"""
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
assert(parsed.locationSteps.head.predicates.size === 2)
}
it should "parse optional and required predicates" in {
val pe = """/Push()[contains::Commit()]?[@name='brainiac']"""
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
assert(parsed.locationSteps.head.predicates.size === 2)
parsed.locationSteps.head.predicates.count {
case _: OptionalPredicate => true
case _ => false
} should be (1)
}
it should "parse a nested optional predicate" in {
val pe = """/Push()[contains::Commit()[belongsTo::Repo()]?]"""
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 1)
assert(parsed.locationSteps.head.predicates.size === 1)
parsed.locationSteps.head.predicates.head match {
case npep: NestedPathExpressionPredicate =>
val npepSteps = npep.expression.locationSteps
assert(npepSteps.size === 1)
assert(npepSteps.head.predicates.size === 1)
npepSteps.head.predicates.head match {
case _: OptionalPredicate =>
}
}
}
it should "parse a complicated path expression" in {
val pe = """/Build()
|[on::Repo()/channel::ChatChannel()]
|[triggeredBy::Push()/contains::Commit()/author::GitHubId()/hasGithubIdentity::Person()/hasChatIdentity::ChatId()]
|/hasBuild::Commit()/author::GitHubId()/hasGithubIdentity::Person()/hasChatIdentity::ChatId()""".stripMargin
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 5)
assert(parsed.locationSteps.head.predicates.size === 2)
parsed.locationSteps.head.predicates.count {
case _: NestedPathExpressionPredicate => true
case _ => false
} should be (2)
}
it should "parse a complicated path expression with optional predicates" in {
val pe = """/Build()
|[on::Repo()/channel::ChatChannel()]?
|[triggeredBy::Push()/contains::Commit()/author::GitHubId()/hasGithubIdentity::Person()/hasChatIdentity::ChatId()]?
|/hasBuild::Commit()/author::GitHubId()/hasGithubIdentity::Person()/hasChatIdentity::ChatId()""".stripMargin
val parsed = pep.parsePathExpression(pe)
assert(parsed.locationSteps.size === 5)
assert(parsed.locationSteps.head.predicates.size === 2)
parsed.locationSteps.head.predicates.count {
case _: OptionalPredicate => true
case _ => false
} should be (2)
}
}
| atomist/rug | src/test/scala/com/atomist/tree/pathexpression/PathExpressionParserTest.scala | Scala | gpl-3.0 | 9,891 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.controller
import java.util
import java.util.concurrent.TimeUnit
import kafka.admin.AdminOperationException
import kafka.api._
import kafka.common._
import kafka.controller.KafkaController.AlterIsrCallback
import kafka.cluster.Broker
import kafka.controller.KafkaController.{AlterReassignmentsCallback, ElectLeadersCallback, ListReassignmentsCallback, UpdateFeaturesCallback}
import kafka.coordinator.transaction.ZkProducerIdManager
import kafka.metrics.{KafkaMetricsGroup, KafkaTimer}
import kafka.server._
import kafka.utils._
import kafka.utils.Implicits._
import kafka.zk.KafkaZkClient.UpdateLeaderAndIsrResult
import kafka.zk.TopicZNode.TopicIdReplicaAssignment
import kafka.zk.{FeatureZNodeStatus, _}
import kafka.zookeeper.{StateChangeHandler, ZNodeChangeHandler, ZNodeChildChangeHandler}
import org.apache.kafka.common.ElectionType
import org.apache.kafka.common.KafkaException
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.errors.{BrokerNotAvailableException, ControllerMovedException, StaleBrokerEpochException}
import org.apache.kafka.common.message.{AllocateProducerIdsRequestData, AllocateProducerIdsResponseData, AlterIsrRequestData, AlterIsrResponseData, UpdateFeaturesRequestData}
import org.apache.kafka.common.feature.{Features, FinalizedVersionRange}
import org.apache.kafka.common.metrics.Metrics
import org.apache.kafka.common.protocol.Errors
import org.apache.kafka.common.requests.{AbstractControlRequest, ApiError, LeaderAndIsrResponse, UpdateFeaturesRequest, UpdateMetadataResponse}
import org.apache.kafka.common.utils.{Time, Utils}
import org.apache.kafka.server.common.ProducerIdsBlock
import org.apache.zookeeper.KeeperException
import org.apache.zookeeper.KeeperException.Code
import scala.collection.{Map, Seq, Set, immutable, mutable}
import scala.collection.mutable.ArrayBuffer
import scala.jdk.CollectionConverters._
import scala.util.{Failure, Success, Try}
sealed trait ElectionTrigger
final case object AutoTriggered extends ElectionTrigger
final case object ZkTriggered extends ElectionTrigger
final case object AdminClientTriggered extends ElectionTrigger
object KafkaController extends Logging {
val InitialControllerEpoch = 0
val InitialControllerEpochZkVersion = 0
type ElectLeadersCallback = Map[TopicPartition, Either[ApiError, Int]] => Unit
type ListReassignmentsCallback = Either[Map[TopicPartition, ReplicaAssignment], ApiError] => Unit
type AlterReassignmentsCallback = Either[Map[TopicPartition, ApiError], ApiError] => Unit
type AlterIsrCallback = Either[Map[TopicPartition, Either[Errors, LeaderAndIsr]], Errors] => Unit
type UpdateFeaturesCallback = Either[ApiError, Map[String, ApiError]] => Unit
}
class KafkaController(val config: KafkaConfig,
zkClient: KafkaZkClient,
time: Time,
metrics: Metrics,
initialBrokerInfo: BrokerInfo,
initialBrokerEpoch: Long,
tokenManager: DelegationTokenManager,
brokerFeatures: BrokerFeatures,
featureCache: FinalizedFeatureCache,
threadNamePrefix: Option[String] = None)
extends ControllerEventProcessor with Logging with KafkaMetricsGroup {
this.logIdent = s"[Controller id=${config.brokerId}] "
@volatile private var brokerInfo = initialBrokerInfo
@volatile private var _brokerEpoch = initialBrokerEpoch
private val isAlterIsrEnabled = config.interBrokerProtocolVersion.isAlterIsrSupported
private val stateChangeLogger = new StateChangeLogger(config.brokerId, inControllerContext = true, None)
val controllerContext = new ControllerContext
var controllerChannelManager = new ControllerChannelManager(controllerContext, config, time, metrics,
stateChangeLogger, threadNamePrefix)
// have a separate scheduler for the controller to be able to start and stop independently of the kafka server
// visible for testing
private[controller] val kafkaScheduler = new KafkaScheduler(1)
// visible for testing
private[controller] val eventManager = new ControllerEventManager(config.brokerId, this, time,
controllerContext.stats.rateAndTimeMetrics)
private val brokerRequestBatch = new ControllerBrokerRequestBatch(config, controllerChannelManager,
eventManager, controllerContext, stateChangeLogger)
val replicaStateMachine: ReplicaStateMachine = new ZkReplicaStateMachine(config, stateChangeLogger, controllerContext, zkClient,
new ControllerBrokerRequestBatch(config, controllerChannelManager, eventManager, controllerContext, stateChangeLogger))
val partitionStateMachine: PartitionStateMachine = new ZkPartitionStateMachine(config, stateChangeLogger, controllerContext, zkClient,
new ControllerBrokerRequestBatch(config, controllerChannelManager, eventManager, controllerContext, stateChangeLogger))
val topicDeletionManager = new TopicDeletionManager(config, controllerContext, replicaStateMachine,
partitionStateMachine, new ControllerDeletionClient(this, zkClient))
private val controllerChangeHandler = new ControllerChangeHandler(eventManager)
private val brokerChangeHandler = new BrokerChangeHandler(eventManager)
private val brokerModificationsHandlers: mutable.Map[Int, BrokerModificationsHandler] = mutable.Map.empty
private val topicChangeHandler = new TopicChangeHandler(eventManager)
private val topicDeletionHandler = new TopicDeletionHandler(eventManager)
private val partitionModificationsHandlers: mutable.Map[String, PartitionModificationsHandler] = mutable.Map.empty
private val partitionReassignmentHandler = new PartitionReassignmentHandler(eventManager)
private val preferredReplicaElectionHandler = new PreferredReplicaElectionHandler(eventManager)
private val isrChangeNotificationHandler = new IsrChangeNotificationHandler(eventManager)
private val logDirEventNotificationHandler = new LogDirEventNotificationHandler(eventManager)
@volatile private var activeControllerId = -1
@volatile private var offlinePartitionCount = 0
@volatile private var preferredReplicaImbalanceCount = 0
@volatile private var globalTopicCount = 0
@volatile private var globalPartitionCount = 0
@volatile private var topicsToDeleteCount = 0
@volatile private var replicasToDeleteCount = 0
@volatile private var ineligibleTopicsToDeleteCount = 0
@volatile private var ineligibleReplicasToDeleteCount = 0
/* single-thread scheduler to clean expired tokens */
private val tokenCleanScheduler = new KafkaScheduler(threads = 1, threadNamePrefix = "delegation-token-cleaner")
newGauge("ActiveControllerCount", () => if (isActive) 1 else 0)
newGauge("OfflinePartitionsCount", () => offlinePartitionCount)
newGauge("PreferredReplicaImbalanceCount", () => preferredReplicaImbalanceCount)
newGauge("ControllerState", () => state.value)
newGauge("GlobalTopicCount", () => globalTopicCount)
newGauge("GlobalPartitionCount", () => globalPartitionCount)
newGauge("TopicsToDeleteCount", () => topicsToDeleteCount)
newGauge("ReplicasToDeleteCount", () => replicasToDeleteCount)
newGauge("TopicsIneligibleToDeleteCount", () => ineligibleTopicsToDeleteCount)
newGauge("ReplicasIneligibleToDeleteCount", () => ineligibleReplicasToDeleteCount)
/**
* Returns true if this broker is the current controller.
*/
def isActive: Boolean = activeControllerId == config.brokerId
def brokerEpoch: Long = _brokerEpoch
def epoch: Int = controllerContext.epoch
/**
* Invoked when the controller module of a Kafka server is started up. This does not assume that the current broker
* is the controller. It merely registers the session expiration listener and starts the controller leader
* elector
*/
def startup() = {
zkClient.registerStateChangeHandler(new StateChangeHandler {
override val name: String = StateChangeHandlers.ControllerHandler
override def afterInitializingSession(): Unit = {
eventManager.put(RegisterBrokerAndReelect)
}
override def beforeInitializingSession(): Unit = {
val queuedEvent = eventManager.clearAndPut(Expire)
// Block initialization of the new session until the expiration event is being handled,
// which ensures that all pending events have been processed before creating the new session
queuedEvent.awaitProcessing()
}
})
eventManager.put(Startup)
eventManager.start()
}
/**
* Invoked when the controller module of a Kafka server is shutting down. If the broker was the current controller,
* it shuts down the partition and replica state machines. If not, those are a no-op. In addition to that, it also
* shuts down the controller channel manager, if one exists (i.e. if it was the current controller)
*/
def shutdown(): Unit = {
eventManager.close()
onControllerResignation()
}
/**
* On controlled shutdown, the controller first determines the partitions that the
* shutting down broker leads, and moves leadership of those partitions to another broker
* that is in that partition's ISR.
*
* @param id Id of the broker to shutdown.
* @param brokerEpoch The broker epoch in the controlled shutdown request
* @return The number of partitions that the broker still leads.
*/
def controlledShutdown(id: Int, brokerEpoch: Long, controlledShutdownCallback: Try[Set[TopicPartition]] => Unit): Unit = {
val controlledShutdownEvent = ControlledShutdown(id, brokerEpoch, controlledShutdownCallback)
eventManager.put(controlledShutdownEvent)
}
private[kafka] def updateBrokerInfo(newBrokerInfo: BrokerInfo): Unit = {
this.brokerInfo = newBrokerInfo
zkClient.updateBrokerInfo(newBrokerInfo)
}
private[kafka] def enableDefaultUncleanLeaderElection(): Unit = {
eventManager.put(UncleanLeaderElectionEnable)
}
private[kafka] def enableTopicUncleanLeaderElection(topic: String): Unit = {
if (isActive) {
eventManager.put(TopicUncleanLeaderElectionEnable(topic))
}
}
private def state: ControllerState = eventManager.state
/**
* This callback is invoked by the zookeeper leader elector on electing the current broker as the new controller.
* It does the following things on the become-controller state change -
* 1. Initializes the controller's context object that holds cache objects for current topics, live brokers and
* leaders for all existing partitions.
* 2. Starts the controller's channel manager
* 3. Starts the replica state machine
* 4. Starts the partition state machine
* If it encounters any unexpected exception/error while becoming controller, it resigns as the current controller.
* This ensures another controller election will be triggered and there will always be an actively serving controller
*/
private def onControllerFailover(): Unit = {
maybeSetupFeatureVersioning()
info("Registering handlers")
// before reading source of truth from zookeeper, register the listeners to get broker/topic callbacks
val childChangeHandlers = Seq(brokerChangeHandler, topicChangeHandler, topicDeletionHandler, logDirEventNotificationHandler,
isrChangeNotificationHandler)
childChangeHandlers.foreach(zkClient.registerZNodeChildChangeHandler)
val nodeChangeHandlers = Seq(preferredReplicaElectionHandler, partitionReassignmentHandler)
nodeChangeHandlers.foreach(zkClient.registerZNodeChangeHandlerAndCheckExistence)
info("Deleting log dir event notifications")
zkClient.deleteLogDirEventNotifications(controllerContext.epochZkVersion)
info("Deleting isr change notifications")
zkClient.deleteIsrChangeNotifications(controllerContext.epochZkVersion)
info("Initializing controller context")
initializeControllerContext()
info("Fetching topic deletions in progress")
val (topicsToBeDeleted, topicsIneligibleForDeletion) = fetchTopicDeletionsInProgress()
info("Initializing topic deletion manager")
topicDeletionManager.init(topicsToBeDeleted, topicsIneligibleForDeletion)
// We need to send UpdateMetadataRequest after the controller context is initialized and before the state machines
// are started. The is because brokers need to receive the list of live brokers from UpdateMetadataRequest before
// they can process the LeaderAndIsrRequests that are generated by replicaStateMachine.startup() and
// partitionStateMachine.startup().
info("Sending update metadata request")
sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq, Set.empty)
replicaStateMachine.startup()
partitionStateMachine.startup()
info(s"Ready to serve as the new controller with epoch $epoch")
initializePartitionReassignments()
topicDeletionManager.tryTopicDeletion()
val pendingPreferredReplicaElections = fetchPendingPreferredReplicaElections()
onReplicaElection(pendingPreferredReplicaElections, ElectionType.PREFERRED, ZkTriggered)
info("Starting the controller scheduler")
kafkaScheduler.startup()
if (config.autoLeaderRebalanceEnable) {
scheduleAutoLeaderRebalanceTask(delay = 5, unit = TimeUnit.SECONDS)
}
if (config.tokenAuthEnabled) {
info("starting the token expiry check scheduler")
tokenCleanScheduler.startup()
tokenCleanScheduler.schedule(name = "delete-expired-tokens",
fun = () => tokenManager.expireTokens(),
period = config.delegationTokenExpiryCheckIntervalMs,
unit = TimeUnit.MILLISECONDS)
}
}
private def createFeatureZNode(newNode: FeatureZNode): Int = {
info(s"Creating FeatureZNode at path: ${FeatureZNode.path} with contents: $newNode")
zkClient.createFeatureZNode(newNode)
val (_, newVersion) = zkClient.getDataAndVersion(FeatureZNode.path)
newVersion
}
private def updateFeatureZNode(updatedNode: FeatureZNode): Int = {
info(s"Updating FeatureZNode at path: ${FeatureZNode.path} with contents: $updatedNode")
zkClient.updateFeatureZNode(updatedNode)
}
/**
* This method enables the feature versioning system (KIP-584).
*
* Development in Kafka (from a high level) is organized into features. Each feature is tracked by
* a name and a range of version numbers. A feature can be of two types:
*
* 1. Supported feature:
* A supported feature is represented by a name (string) and a range of versions (defined by a
* SupportedVersionRange). It refers to a feature that a particular broker advertises support for.
* Each broker advertises the version ranges of its own supported features in its own
* BrokerIdZNode. The contents of the advertisement are specific to the particular broker and
* do not represent any guarantee of a cluster-wide availability of the feature for any particular
* range of versions.
*
* 2. Finalized feature:
* A finalized feature is represented by a name (string) and a range of version levels (defined
* by a FinalizedVersionRange). Whenever the feature versioning system (KIP-584) is
* enabled, the finalized features are stored in the cluster-wide common FeatureZNode.
* In comparison to a supported feature, the key difference is that a finalized feature exists
* in ZK only when it is guaranteed to be supported by any random broker in the cluster for a
* specified range of version levels. Also, the controller is the only entity modifying the
* information about finalized features.
*
* This method sets up the FeatureZNode with enabled status, which means that the finalized
* features stored in the FeatureZNode are active. The enabled status should be written by the
* controller to the FeatureZNode only when the broker IBP config is greater than or equal to
* KAFKA_2_7_IV0.
*
* There are multiple cases handled here:
*
* 1. New cluster bootstrap:
* A new Kafka cluster (i.e. it is deployed first time) is almost always started with IBP config
* setting greater than or equal to KAFKA_2_7_IV0. We would like to start the cluster with all
* the possible supported features finalized immediately. Assuming this is the case, the
* controller will start up and notice that the FeatureZNode is absent in the new cluster,
* it will then create a FeatureZNode (with enabled status) containing the entire list of
* supported features as its finalized features.
*
* 2. Broker binary upgraded, but IBP config set to lower than KAFKA_2_7_IV0:
* Imagine there was an existing Kafka cluster with IBP config less than KAFKA_2_7_IV0, and the
* broker binary has now been upgraded to a newer version that supports the feature versioning
* system (KIP-584). But the IBP config is still set to lower than KAFKA_2_7_IV0, and may be
* set to a higher value later. In this case, we want to start with no finalized features and
* allow the user to finalize them whenever they are ready i.e. in the future whenever the
* user sets IBP config to be greater than or equal to KAFKA_2_7_IV0, then the user could start
* finalizing the features. This process ensures we do not enable all the possible features
* immediately after an upgrade, which could be harmful to Kafka.
* This is how we handle such a case:
* - Before the IBP config upgrade (i.e. IBP config set to less than KAFKA_2_7_IV0), the
* controller will start up and check if the FeatureZNode is absent.
* - If the node is absent, it will react by creating a FeatureZNode with disabled status
* and empty finalized features.
* - Otherwise, if a node already exists in enabled status then the controller will just
* flip the status to disabled and clear the finalized features.
* - After the IBP config upgrade (i.e. IBP config set to greater than or equal to
* KAFKA_2_7_IV0), when the controller starts up it will check if the FeatureZNode exists
* and whether it is disabled.
* - If the node is in disabled status, the controller won’t upgrade all features immediately.
* Instead it will just switch the FeatureZNode status to enabled status. This lets the
* user finalize the features later.
* - Otherwise, if a node already exists in enabled status then the controller will leave
* the node umodified.
*
* 3. Broker binary upgraded, with existing cluster IBP config >= KAFKA_2_7_IV0:
* Imagine there was an existing Kafka cluster with IBP config >= KAFKA_2_7_IV0, and the broker
* binary has just been upgraded to a newer version (that supports IBP config KAFKA_2_7_IV0 and
* higher). The controller will start up and find that a FeatureZNode is already present with
* enabled status and existing finalized features. In such a case, the controller leaves the node
* unmodified.
*
* 4. Broker downgrade:
* Imagine that a Kafka cluster exists already and the IBP config is greater than or equal to
* KAFKA_2_7_IV0. Then, the user decided to downgrade the cluster by setting IBP config to a
* value less than KAFKA_2_7_IV0. This means the user is also disabling the feature versioning
* system (KIP-584). In this case, when the controller starts up with the lower IBP config, it
* will switch the FeatureZNode status to disabled with empty features.
*/
private def enableFeatureVersioning(): Unit = {
val (mayBeFeatureZNodeBytes, version) = zkClient.getDataAndVersion(FeatureZNode.path)
if (version == ZkVersion.UnknownVersion) {
val newVersion = createFeatureZNode(new FeatureZNode(FeatureZNodeStatus.Enabled,
brokerFeatures.defaultFinalizedFeatures))
featureCache.waitUntilEpochOrThrow(newVersion, config.zkConnectionTimeoutMs)
} else {
val existingFeatureZNode = FeatureZNode.decode(mayBeFeatureZNodeBytes.get)
val newFeatures = existingFeatureZNode.status match {
case FeatureZNodeStatus.Enabled => existingFeatureZNode.features
case FeatureZNodeStatus.Disabled =>
if (!existingFeatureZNode.features.empty()) {
warn(s"FeatureZNode at path: ${FeatureZNode.path} with disabled status" +
s" contains non-empty features: ${existingFeatureZNode.features}")
}
Features.emptyFinalizedFeatures
}
val newFeatureZNode = new FeatureZNode(FeatureZNodeStatus.Enabled, newFeatures)
if (!newFeatureZNode.equals(existingFeatureZNode)) {
val newVersion = updateFeatureZNode(newFeatureZNode)
featureCache.waitUntilEpochOrThrow(newVersion, config.zkConnectionTimeoutMs)
}
}
}
/**
* Disables the feature versioning system (KIP-584).
*
* Sets up the FeatureZNode with disabled status. This status means the feature versioning system
* (KIP-584) is disabled, and, the finalized features stored in the FeatureZNode are not relevant.
* This status should be written by the controller to the FeatureZNode only when the broker
* IBP config is less than KAFKA_2_7_IV0.
*
* NOTE:
* 1. When this method returns, existing finalized features (if any) will be cleared from the
* FeatureZNode.
* 2. This method, unlike enableFeatureVersioning() need not wait for the FinalizedFeatureCache
* to be updated, because, such updates to the cache (via FinalizedFeatureChangeListener)
* are disabled when IBP config is < than KAFKA_2_7_IV0.
*/
private def disableFeatureVersioning(): Unit = {
val newNode = FeatureZNode(FeatureZNodeStatus.Disabled, Features.emptyFinalizedFeatures())
val (mayBeFeatureZNodeBytes, version) = zkClient.getDataAndVersion(FeatureZNode.path)
if (version == ZkVersion.UnknownVersion) {
createFeatureZNode(newNode)
} else {
val existingFeatureZNode = FeatureZNode.decode(mayBeFeatureZNodeBytes.get)
if (existingFeatureZNode.status == FeatureZNodeStatus.Disabled &&
!existingFeatureZNode.features.empty()) {
warn(s"FeatureZNode at path: ${FeatureZNode.path} with disabled status" +
s" contains non-empty features: ${existingFeatureZNode.features}")
}
if (!newNode.equals(existingFeatureZNode)) {
updateFeatureZNode(newNode)
}
}
}
private def maybeSetupFeatureVersioning(): Unit = {
if (config.isFeatureVersioningSupported) {
enableFeatureVersioning()
} else {
disableFeatureVersioning()
}
}
private def scheduleAutoLeaderRebalanceTask(delay: Long, unit: TimeUnit): Unit = {
kafkaScheduler.schedule("auto-leader-rebalance-task", () => eventManager.put(AutoPreferredReplicaLeaderElection),
delay = delay, unit = unit)
}
/**
* This callback is invoked by the zookeeper leader elector when the current broker resigns as the controller. This is
* required to clean up internal controller data structures
*/
private def onControllerResignation(): Unit = {
debug("Resigning")
// de-register listeners
zkClient.unregisterZNodeChildChangeHandler(isrChangeNotificationHandler.path)
zkClient.unregisterZNodeChangeHandler(partitionReassignmentHandler.path)
zkClient.unregisterZNodeChangeHandler(preferredReplicaElectionHandler.path)
zkClient.unregisterZNodeChildChangeHandler(logDirEventNotificationHandler.path)
unregisterBrokerModificationsHandler(brokerModificationsHandlers.keySet)
// shutdown leader rebalance scheduler
kafkaScheduler.shutdown()
offlinePartitionCount = 0
preferredReplicaImbalanceCount = 0
globalTopicCount = 0
globalPartitionCount = 0
topicsToDeleteCount = 0
replicasToDeleteCount = 0
ineligibleTopicsToDeleteCount = 0
ineligibleReplicasToDeleteCount = 0
// stop token expiry check scheduler
if (tokenCleanScheduler.isStarted)
tokenCleanScheduler.shutdown()
// de-register partition ISR listener for on-going partition reassignment task
unregisterPartitionReassignmentIsrChangeHandlers()
// shutdown partition state machine
partitionStateMachine.shutdown()
zkClient.unregisterZNodeChildChangeHandler(topicChangeHandler.path)
unregisterPartitionModificationsHandlers(partitionModificationsHandlers.keys.toSeq)
zkClient.unregisterZNodeChildChangeHandler(topicDeletionHandler.path)
// shutdown replica state machine
replicaStateMachine.shutdown()
zkClient.unregisterZNodeChildChangeHandler(brokerChangeHandler.path)
controllerChannelManager.shutdown()
controllerContext.resetContext()
info("Resigned")
}
/*
* This callback is invoked by the controller's LogDirEventNotificationListener with the list of broker ids who
* have experienced new log directory failures. In response the controller should send LeaderAndIsrRequest
* to all these brokers to query the state of their replicas. Replicas with an offline log directory respond with
* KAFKA_STORAGE_ERROR, which will be handled by the LeaderAndIsrResponseReceived event.
*/
private def onBrokerLogDirFailure(brokerIds: Seq[Int]): Unit = {
// send LeaderAndIsrRequest for all replicas on those brokers to see if they are still online.
info(s"Handling log directory failure for brokers ${brokerIds.mkString(",")}")
val replicasOnBrokers = controllerContext.replicasOnBrokers(brokerIds.toSet)
replicaStateMachine.handleStateChanges(replicasOnBrokers.toSeq, OnlineReplica)
}
/**
* This callback is invoked by the replica state machine's broker change listener, with the list of newly started
* brokers as input. It does the following -
* 1. Sends update metadata request to all live and shutting down brokers
* 2. Triggers the OnlinePartition state change for all new/offline partitions
* 3. It checks whether there are reassigned replicas assigned to any newly started brokers. If
* so, it performs the reassignment logic for each topic/partition.
*
* Note that we don't need to refresh the leader/isr cache for all topic/partitions at this point for two reasons:
* 1. The partition state machine, when triggering online state change, will refresh leader and ISR for only those
* partitions currently new or offline (rather than every partition this controller is aware of)
* 2. Even if we do refresh the cache, there is no guarantee that by the time the leader and ISR request reaches
* every broker that it is still valid. Brokers check the leader epoch to determine validity of the request.
*/
private def onBrokerStartup(newBrokers: Seq[Int]): Unit = {
info(s"New broker startup callback for ${newBrokers.mkString(",")}")
newBrokers.foreach(controllerContext.replicasOnOfflineDirs.remove)
val newBrokersSet = newBrokers.toSet
val existingBrokers = controllerContext.liveOrShuttingDownBrokerIds.diff(newBrokersSet)
// Send update metadata request to all the existing brokers in the cluster so that they know about the new brokers
// via this update. No need to include any partition states in the request since there are no partition state changes.
sendUpdateMetadataRequest(existingBrokers.toSeq, Set.empty)
// Send update metadata request to all the new brokers in the cluster with a full set of partition states for initialization.
// In cases of controlled shutdown leaders will not be elected when a new broker comes up. So at least in the
// common controlled shutdown case, the metadata will reach the new brokers faster.
sendUpdateMetadataRequest(newBrokers, controllerContext.partitionsWithLeaders)
// the very first thing to do when a new broker comes up is send it the entire list of partitions that it is
// supposed to host. Based on that the broker starts the high watermark threads for the input list of partitions
val allReplicasOnNewBrokers = controllerContext.replicasOnBrokers(newBrokersSet)
replicaStateMachine.handleStateChanges(allReplicasOnNewBrokers.toSeq, OnlineReplica)
// when a new broker comes up, the controller needs to trigger leader election for all new and offline partitions
// to see if these brokers can become leaders for some/all of those
partitionStateMachine.triggerOnlinePartitionStateChange()
// check if reassignment of some partitions need to be restarted
maybeResumeReassignments { (_, assignment) =>
assignment.targetReplicas.exists(newBrokersSet.contains)
}
// check if topic deletion needs to be resumed. If at least one replica that belongs to the topic being deleted exists
// on the newly restarted brokers, there is a chance that topic deletion can resume
val replicasForTopicsToBeDeleted = allReplicasOnNewBrokers.filter(p => topicDeletionManager.isTopicQueuedUpForDeletion(p.topic))
if (replicasForTopicsToBeDeleted.nonEmpty) {
info(s"Some replicas ${replicasForTopicsToBeDeleted.mkString(",")} for topics scheduled for deletion " +
s"${controllerContext.topicsToBeDeleted.mkString(",")} are on the newly restarted brokers " +
s"${newBrokers.mkString(",")}. Signaling restart of topic deletion for these topics")
topicDeletionManager.resumeDeletionForTopics(replicasForTopicsToBeDeleted.map(_.topic))
}
registerBrokerModificationsHandler(newBrokers)
}
private def maybeResumeReassignments(shouldResume: (TopicPartition, ReplicaAssignment) => Boolean): Unit = {
controllerContext.partitionsBeingReassigned.foreach { tp =>
val currentAssignment = controllerContext.partitionFullReplicaAssignment(tp)
if (shouldResume(tp, currentAssignment))
onPartitionReassignment(tp, currentAssignment)
}
}
private def registerBrokerModificationsHandler(brokerIds: Iterable[Int]): Unit = {
debug(s"Register BrokerModifications handler for $brokerIds")
brokerIds.foreach { brokerId =>
val brokerModificationsHandler = new BrokerModificationsHandler(eventManager, brokerId)
zkClient.registerZNodeChangeHandlerAndCheckExistence(brokerModificationsHandler)
brokerModificationsHandlers.put(brokerId, brokerModificationsHandler)
}
}
private def unregisterBrokerModificationsHandler(brokerIds: Iterable[Int]): Unit = {
debug(s"Unregister BrokerModifications handler for $brokerIds")
brokerIds.foreach { brokerId =>
brokerModificationsHandlers.remove(brokerId).foreach(handler => zkClient.unregisterZNodeChangeHandler(handler.path))
}
}
/*
* This callback is invoked by the replica state machine's broker change listener with the list of failed brokers
* as input. It will call onReplicaBecomeOffline(...) with the list of replicas on those failed brokers as input.
*/
private def onBrokerFailure(deadBrokers: Seq[Int]): Unit = {
info(s"Broker failure callback for ${deadBrokers.mkString(",")}")
deadBrokers.foreach(controllerContext.replicasOnOfflineDirs.remove)
val deadBrokersThatWereShuttingDown =
deadBrokers.filter(id => controllerContext.shuttingDownBrokerIds.remove(id))
if (deadBrokersThatWereShuttingDown.nonEmpty)
info(s"Removed ${deadBrokersThatWereShuttingDown.mkString(",")} from list of shutting down brokers.")
val allReplicasOnDeadBrokers = controllerContext.replicasOnBrokers(deadBrokers.toSet)
onReplicasBecomeOffline(allReplicasOnDeadBrokers)
unregisterBrokerModificationsHandler(deadBrokers)
}
private def onBrokerUpdate(updatedBrokerId: Int): Unit = {
info(s"Broker info update callback for $updatedBrokerId")
sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq, Set.empty)
}
/**
* This method marks the given replicas as offline. It does the following -
* 1. Marks the given partitions as offline
* 2. Triggers the OnlinePartition state change for all new/offline partitions
* 3. Invokes the OfflineReplica state change on the input list of newly offline replicas
* 4. If no partitions are affected then send UpdateMetadataRequest to live or shutting down brokers
*
* Note that we don't need to refresh the leader/isr cache for all topic/partitions at this point. This is because
* the partition state machine will refresh our cache for us when performing leader election for all new/offline
* partitions coming online.
*/
private def onReplicasBecomeOffline(newOfflineReplicas: Set[PartitionAndReplica]): Unit = {
val (newOfflineReplicasForDeletion, newOfflineReplicasNotForDeletion) =
newOfflineReplicas.partition(p => topicDeletionManager.isTopicQueuedUpForDeletion(p.topic))
val partitionsWithOfflineLeader = controllerContext.partitionsWithOfflineLeader
// trigger OfflinePartition state for all partitions whose current leader is one amongst the newOfflineReplicas
partitionStateMachine.handleStateChanges(partitionsWithOfflineLeader.toSeq, OfflinePartition)
// trigger OnlinePartition state changes for offline or new partitions
partitionStateMachine.triggerOnlinePartitionStateChange()
// trigger OfflineReplica state change for those newly offline replicas
replicaStateMachine.handleStateChanges(newOfflineReplicasNotForDeletion.toSeq, OfflineReplica)
// fail deletion of topics that are affected by the offline replicas
if (newOfflineReplicasForDeletion.nonEmpty) {
// it is required to mark the respective replicas in TopicDeletionFailed state since the replica cannot be
// deleted when its log directory is offline. This will prevent the replica from being in TopicDeletionStarted state indefinitely
// since topic deletion cannot be retried until at least one replica is in TopicDeletionStarted state
topicDeletionManager.failReplicaDeletion(newOfflineReplicasForDeletion)
}
// If replica failure did not require leader re-election, inform brokers of the offline brokers
// Note that during leader re-election, brokers update their metadata
if (partitionsWithOfflineLeader.isEmpty) {
sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq, Set.empty)
}
}
/**
* This callback is invoked by the topic change callback with the list of failed brokers as input.
* It does the following -
* 1. Move the newly created partitions to the NewPartition state
* 2. Move the newly created partitions from NewPartition->OnlinePartition state
*/
private def onNewPartitionCreation(newPartitions: Set[TopicPartition]): Unit = {
info(s"New partition creation callback for ${newPartitions.mkString(",")}")
partitionStateMachine.handleStateChanges(newPartitions.toSeq, NewPartition)
replicaStateMachine.handleStateChanges(controllerContext.replicasForPartition(newPartitions).toSeq, NewReplica)
partitionStateMachine.handleStateChanges(
newPartitions.toSeq,
OnlinePartition,
Some(OfflinePartitionLeaderElectionStrategy(false))
)
replicaStateMachine.handleStateChanges(controllerContext.replicasForPartition(newPartitions).toSeq, OnlineReplica)
}
/**
* This callback is invoked:
* 1. By the AlterPartitionReassignments API
* 2. By the reassigned partitions listener which is triggered when the /admin/reassign/partitions znode is created
* 3. When an ongoing reassignment finishes - this is detected by a change in the partition's ISR znode
* 4. Whenever a new broker comes up which is part of an ongoing reassignment
* 5. On controller startup/failover
*
* Reassigning replicas for a partition goes through a few steps listed in the code.
* RS = current assigned replica set
* ORS = Original replica set for partition
* TRS = Reassigned (target) replica set
* AR = The replicas we are adding as part of this reassignment
* RR = The replicas we are removing as part of this reassignment
*
* A reassignment may have up to three phases, each with its own steps:
* Phase U (Assignment update): Regardless of the trigger, the first step is in the reassignment process
* is to update the existing assignment state. We always update the state in Zookeeper before
* we update memory so that it can be resumed upon controller fail-over.
*
* U1. Update ZK with RS = ORS + TRS, AR = TRS - ORS, RR = ORS - TRS.
* U2. Update memory with RS = ORS + TRS, AR = TRS - ORS and RR = ORS - TRS
* U3. If we are cancelling or replacing an existing reassignment, send StopReplica to all members
* of AR in the original reassignment if they are not in TRS from the new assignment
*
* To complete the reassignment, we need to bring the new replicas into sync, so depending on the state
* of the ISR, we will execute one of the following steps.
*
* Phase A (when TRS != ISR): The reassignment is not yet complete
*
* A1. Bump the leader epoch for the partition and send LeaderAndIsr updates to RS.
* A2. Start new replicas AR by moving replicas in AR to NewReplica state.
*
* Phase B (when TRS = ISR): The reassignment is complete
*
* B1. Move all replicas in AR to OnlineReplica state.
* B2. Set RS = TRS, AR = [], RR = [] in memory.
* B3. Send a LeaderAndIsr request with RS = TRS. This will prevent the leader from adding any replica in TRS - ORS back in the isr.
* If the current leader is not in TRS or isn't alive, we move the leader to a new replica in TRS.
* We may send the LeaderAndIsr to more than the TRS replicas due to the
* way the partition state machine works (it reads replicas from ZK)
* B4. Move all replicas in RR to OfflineReplica state. As part of OfflineReplica state change, we shrink the
* isr to remove RR in ZooKeeper and send a LeaderAndIsr ONLY to the Leader to notify it of the shrunk isr.
* After that, we send a StopReplica (delete = false) to the replicas in RR.
* B5. Move all replicas in RR to NonExistentReplica state. This will send a StopReplica (delete = true) to
* the replicas in RR to physically delete the replicas on disk.
* B6. Update ZK with RS=TRS, AR=[], RR=[].
* B7. Remove the ISR reassign listener and maybe update the /admin/reassign_partitions path in ZK to remove this partition from it if present.
* B8. After electing leader, the replicas and isr information changes. So resend the update metadata request to every broker.
*
* In general, there are two goals we want to aim for:
* 1. Every replica present in the replica set of a LeaderAndIsrRequest gets the request sent to it
* 2. Replicas that are removed from a partition's assignment get StopReplica sent to them
*
* For example, if ORS = {1,2,3} and TRS = {4,5,6}, the values in the topic and leader/isr paths in ZK
* may go through the following transitions.
* RS AR RR leader isr
* {1,2,3} {} {} 1 {1,2,3} (initial state)
* {4,5,6,1,2,3} {4,5,6} {1,2,3} 1 {1,2,3} (step A2)
* {4,5,6,1,2,3} {4,5,6} {1,2,3} 1 {1,2,3,4,5,6} (phase B)
* {4,5,6,1,2,3} {4,5,6} {1,2,3} 4 {1,2,3,4,5,6} (step B3)
* {4,5,6,1,2,3} {4,5,6} {1,2,3} 4 {4,5,6} (step B4)
* {4,5,6} {} {} 4 {4,5,6} (step B6)
*
* Note that we have to update RS in ZK with TRS last since it's the only place where we store ORS persistently.
* This way, if the controller crashes before that step, we can still recover.
*/
private def onPartitionReassignment(topicPartition: TopicPartition, reassignment: ReplicaAssignment): Unit = {
// While a reassignment is in progress, deletion is not allowed
topicDeletionManager.markTopicIneligibleForDeletion(Set(topicPartition.topic), reason = "topic reassignment in progress")
updateCurrentReassignment(topicPartition, reassignment)
val addingReplicas = reassignment.addingReplicas
val removingReplicas = reassignment.removingReplicas
if (!isReassignmentComplete(topicPartition, reassignment)) {
// A1. Send LeaderAndIsr request to every replica in ORS + TRS (with the new RS, AR and RR).
updateLeaderEpochAndSendRequest(topicPartition, reassignment)
// A2. replicas in AR -> NewReplica
startNewReplicasForReassignedPartition(topicPartition, addingReplicas)
} else {
// B1. replicas in AR -> OnlineReplica
replicaStateMachine.handleStateChanges(addingReplicas.map(PartitionAndReplica(topicPartition, _)), OnlineReplica)
// B2. Set RS = TRS, AR = [], RR = [] in memory.
val completedReassignment = ReplicaAssignment(reassignment.targetReplicas)
controllerContext.updatePartitionFullReplicaAssignment(topicPartition, completedReassignment)
// B3. Send LeaderAndIsr request with a potential new leader (if current leader not in TRS) and
// a new RS (using TRS) and same isr to every broker in ORS + TRS or TRS
moveReassignedPartitionLeaderIfRequired(topicPartition, completedReassignment)
// B4. replicas in RR -> Offline (force those replicas out of isr)
// B5. replicas in RR -> NonExistentReplica (force those replicas to be deleted)
stopRemovedReplicasOfReassignedPartition(topicPartition, removingReplicas)
// B6. Update ZK with RS = TRS, AR = [], RR = [].
updateReplicaAssignmentForPartition(topicPartition, completedReassignment)
// B7. Remove the ISR reassign listener and maybe update the /admin/reassign_partitions path in ZK to remove this partition from it.
removePartitionFromReassigningPartitions(topicPartition, completedReassignment)
// B8. After electing a leader in B3, the replicas and isr information changes, so resend the update metadata request to every broker
sendUpdateMetadataRequest(controllerContext.liveOrShuttingDownBrokerIds.toSeq, Set(topicPartition))
// signal delete topic thread if reassignment for some partitions belonging to topics being deleted just completed
topicDeletionManager.resumeDeletionForTopics(Set(topicPartition.topic))
}
}
/**
* Update the current assignment state in Zookeeper and in memory. If a reassignment is already in
* progress, then the new reassignment will supplant it and some replicas will be shutdown.
*
* Note that due to the way we compute the original replica set, we cannot guarantee that a
* cancellation will restore the original replica order. Target replicas are always listed
* first in the replica set in the desired order, which means we have no way to get to the
* original order if the reassignment overlaps with the current assignment. For example,
* with an initial assignment of [1, 2, 3] and a reassignment of [3, 4, 2], then the replicas
* will be encoded as [3, 4, 2, 1] while the reassignment is in progress. If the reassignment
* is cancelled, there is no way to restore the original order.
*
* @param topicPartition The reassigning partition
* @param reassignment The new reassignment
*/
private def updateCurrentReassignment(topicPartition: TopicPartition, reassignment: ReplicaAssignment): Unit = {
val currentAssignment = controllerContext.partitionFullReplicaAssignment(topicPartition)
if (currentAssignment != reassignment) {
debug(s"Updating assignment of partition $topicPartition from $currentAssignment to $reassignment")
// U1. Update assignment state in zookeeper
updateReplicaAssignmentForPartition(topicPartition, reassignment)
// U2. Update assignment state in memory
controllerContext.updatePartitionFullReplicaAssignment(topicPartition, reassignment)
// If there is a reassignment already in progress, then some of the currently adding replicas
// may be eligible for immediate removal, in which case we need to stop the replicas.
val unneededReplicas = currentAssignment.replicas.diff(reassignment.replicas)
if (unneededReplicas.nonEmpty)
stopRemovedReplicasOfReassignedPartition(topicPartition, unneededReplicas)
}
if (!isAlterIsrEnabled) {
val reassignIsrChangeHandler = new PartitionReassignmentIsrChangeHandler(eventManager, topicPartition)
zkClient.registerZNodeChangeHandler(reassignIsrChangeHandler)
}
controllerContext.partitionsBeingReassigned.add(topicPartition)
}
/**
* Trigger a partition reassignment provided that the topic exists and is not being deleted.
*
* This is called when a reassignment is initially received either through Zookeeper or through the
* AlterPartitionReassignments API
*
* The `partitionsBeingReassigned` field in the controller context will be updated by this
* call after the reassignment completes validation and is successfully stored in the topic
* assignment zNode.
*
* @param reassignments The reassignments to begin processing
* @return A map of any errors in the reassignment. If the error is NONE for a given partition,
* then the reassignment was submitted successfully.
*/
private def maybeTriggerPartitionReassignment(reassignments: Map[TopicPartition, ReplicaAssignment]): Map[TopicPartition, ApiError] = {
reassignments.map { case (tp, reassignment) =>
val topic = tp.topic
val apiError = if (topicDeletionManager.isTopicQueuedUpForDeletion(topic)) {
info(s"Skipping reassignment of $tp since the topic is currently being deleted")
new ApiError(Errors.UNKNOWN_TOPIC_OR_PARTITION, "The partition does not exist.")
} else {
val assignedReplicas = controllerContext.partitionReplicaAssignment(tp)
if (assignedReplicas.nonEmpty) {
try {
onPartitionReassignment(tp, reassignment)
ApiError.NONE
} catch {
case e: ControllerMovedException =>
info(s"Failed completing reassignment of partition $tp because controller has moved to another broker")
throw e
case e: Throwable =>
error(s"Error completing reassignment of partition $tp", e)
new ApiError(Errors.UNKNOWN_SERVER_ERROR)
}
} else {
new ApiError(Errors.UNKNOWN_TOPIC_OR_PARTITION, "The partition does not exist.")
}
}
tp -> apiError
}
}
/**
* Attempt to elect a replica as leader for each of the given partitions.
* @param partitions The partitions to have a new leader elected
* @param electionType The type of election to perform
* @param electionTrigger The reason for tigger this election
* @return A map of failed and successful elections. The keys are the topic partitions and the corresponding values are
* either the exception that was thrown or new leader & ISR.
*/
private[this] def onReplicaElection(
partitions: Set[TopicPartition],
electionType: ElectionType,
electionTrigger: ElectionTrigger
): Map[TopicPartition, Either[Throwable, LeaderAndIsr]] = {
info(s"Starting replica leader election ($electionType) for partitions ${partitions.mkString(",")} triggered by $electionTrigger")
try {
val strategy = electionType match {
case ElectionType.PREFERRED => PreferredReplicaPartitionLeaderElectionStrategy
case ElectionType.UNCLEAN =>
/* Let's be conservative and only trigger unclean election if the election type is unclean and it was
* triggered by the admin client
*/
OfflinePartitionLeaderElectionStrategy(allowUnclean = electionTrigger == AdminClientTriggered)
}
val results = partitionStateMachine.handleStateChanges(
partitions.toSeq,
OnlinePartition,
Some(strategy)
)
if (electionTrigger != AdminClientTriggered) {
results.foreach {
case (tp, Left(throwable)) =>
if (throwable.isInstanceOf[ControllerMovedException]) {
info(s"Error completing replica leader election ($electionType) for partition $tp because controller has moved to another broker.", throwable)
throw throwable
} else {
error(s"Error completing replica leader election ($electionType) for partition $tp", throwable)
}
case (_, Right(_)) => // Ignored; No need to log or throw exception for the success cases
}
}
results
} finally {
if (electionTrigger != AdminClientTriggered) {
removePartitionsFromPreferredReplicaElection(partitions, electionTrigger == AutoTriggered)
}
}
}
private def initializeControllerContext(): Unit = {
// update controller cache with delete topic information
val curBrokerAndEpochs = zkClient.getAllBrokerAndEpochsInCluster
val (compatibleBrokerAndEpochs, incompatibleBrokerAndEpochs) = partitionOnFeatureCompatibility(curBrokerAndEpochs)
if (!incompatibleBrokerAndEpochs.isEmpty) {
warn("Ignoring registration of new brokers due to incompatibilities with finalized features: " +
incompatibleBrokerAndEpochs.map { case (broker, _) => broker.id }.toSeq.sorted.mkString(","))
}
controllerContext.setLiveBrokers(compatibleBrokerAndEpochs)
info(s"Initialized broker epochs cache: ${controllerContext.liveBrokerIdAndEpochs}")
controllerContext.setAllTopics(zkClient.getAllTopicsInCluster(true))
registerPartitionModificationsHandlers(controllerContext.allTopics.toSeq)
val replicaAssignmentAndTopicIds = zkClient.getReplicaAssignmentAndTopicIdForTopics(controllerContext.allTopics.toSet)
processTopicIds(replicaAssignmentAndTopicIds)
replicaAssignmentAndTopicIds.foreach { case TopicIdReplicaAssignment(_, _, assignments) =>
assignments.foreach { case (topicPartition, replicaAssignment) =>
controllerContext.updatePartitionFullReplicaAssignment(topicPartition, replicaAssignment)
if (replicaAssignment.isBeingReassigned)
controllerContext.partitionsBeingReassigned.add(topicPartition)
}
}
controllerContext.clearPartitionLeadershipInfo()
controllerContext.shuttingDownBrokerIds.clear()
// register broker modifications handlers
registerBrokerModificationsHandler(controllerContext.liveOrShuttingDownBrokerIds)
// update the leader and isr cache for all existing partitions from Zookeeper
updateLeaderAndIsrCache()
// start the channel manager
controllerChannelManager.startup()
info(s"Currently active brokers in the cluster: ${controllerContext.liveBrokerIds}")
info(s"Currently shutting brokers in the cluster: ${controllerContext.shuttingDownBrokerIds}")
info(s"Current list of topics in the cluster: ${controllerContext.allTopics}")
}
private def fetchPendingPreferredReplicaElections(): Set[TopicPartition] = {
val partitionsUndergoingPreferredReplicaElection = zkClient.getPreferredReplicaElection
// check if they are already completed or topic was deleted
val partitionsThatCompletedPreferredReplicaElection = partitionsUndergoingPreferredReplicaElection.filter { partition =>
val replicas = controllerContext.partitionReplicaAssignment(partition)
val topicDeleted = replicas.isEmpty
val successful =
if (!topicDeleted) controllerContext.partitionLeadershipInfo(partition).get.leaderAndIsr.leader == replicas.head else false
successful || topicDeleted
}
val pendingPreferredReplicaElectionsIgnoringTopicDeletion = partitionsUndergoingPreferredReplicaElection -- partitionsThatCompletedPreferredReplicaElection
val pendingPreferredReplicaElectionsSkippedFromTopicDeletion = pendingPreferredReplicaElectionsIgnoringTopicDeletion.filter(partition => topicDeletionManager.isTopicQueuedUpForDeletion(partition.topic))
val pendingPreferredReplicaElections = pendingPreferredReplicaElectionsIgnoringTopicDeletion -- pendingPreferredReplicaElectionsSkippedFromTopicDeletion
info(s"Partitions undergoing preferred replica election: ${partitionsUndergoingPreferredReplicaElection.mkString(",")}")
info(s"Partitions that completed preferred replica election: ${partitionsThatCompletedPreferredReplicaElection.mkString(",")}")
info(s"Skipping preferred replica election for partitions due to topic deletion: ${pendingPreferredReplicaElectionsSkippedFromTopicDeletion.mkString(",")}")
info(s"Resuming preferred replica election for partitions: ${pendingPreferredReplicaElections.mkString(",")}")
pendingPreferredReplicaElections
}
/**
* Initialize pending reassignments. This includes reassignments sent through /admin/reassign_partitions,
* which will supplant any API reassignments already in progress.
*/
private def initializePartitionReassignments(): Unit = {
// New reassignments may have been submitted through Zookeeper while the controller was failing over
val zkPartitionsResumed = processZkPartitionReassignment()
// We may also have some API-based reassignments that need to be restarted
maybeResumeReassignments { (tp, _) =>
!zkPartitionsResumed.contains(tp)
}
}
private def fetchTopicDeletionsInProgress(): (Set[String], Set[String]) = {
val topicsToBeDeleted = zkClient.getTopicDeletions.toSet
val topicsWithOfflineReplicas = controllerContext.allTopics.filter { topic => {
val replicasForTopic = controllerContext.replicasForTopic(topic)
replicasForTopic.exists(r => !controllerContext.isReplicaOnline(r.replica, r.topicPartition))
}}
val topicsForWhichPartitionReassignmentIsInProgress = controllerContext.partitionsBeingReassigned.map(_.topic)
val topicsIneligibleForDeletion = topicsWithOfflineReplicas | topicsForWhichPartitionReassignmentIsInProgress
info(s"List of topics to be deleted: ${topicsToBeDeleted.mkString(",")}")
info(s"List of topics ineligible for deletion: ${topicsIneligibleForDeletion.mkString(",")}")
(topicsToBeDeleted, topicsIneligibleForDeletion)
}
private def updateLeaderAndIsrCache(partitions: Seq[TopicPartition] = controllerContext.allPartitions.toSeq): Unit = {
val leaderIsrAndControllerEpochs = zkClient.getTopicPartitionStates(partitions)
leaderIsrAndControllerEpochs.forKeyValue { (partition, leaderIsrAndControllerEpoch) =>
controllerContext.putPartitionLeadershipInfo(partition, leaderIsrAndControllerEpoch)
}
}
private def isReassignmentComplete(partition: TopicPartition, assignment: ReplicaAssignment): Boolean = {
if (!assignment.isBeingReassigned) {
true
} else {
zkClient.getTopicPartitionStates(Seq(partition)).get(partition).exists { leaderIsrAndControllerEpoch =>
val isr = leaderIsrAndControllerEpoch.leaderAndIsr.isr.toSet
val targetReplicas = assignment.targetReplicas.toSet
targetReplicas.subsetOf(isr)
}
}
}
private def moveReassignedPartitionLeaderIfRequired(topicPartition: TopicPartition,
newAssignment: ReplicaAssignment): Unit = {
val reassignedReplicas = newAssignment.replicas
val currentLeader = controllerContext.partitionLeadershipInfo(topicPartition).get.leaderAndIsr.leader
if (!reassignedReplicas.contains(currentLeader)) {
info(s"Leader $currentLeader for partition $topicPartition being reassigned, " +
s"is not in the new list of replicas ${reassignedReplicas.mkString(",")}. Re-electing leader")
// move the leader to one of the alive and caught up new replicas
partitionStateMachine.handleStateChanges(Seq(topicPartition), OnlinePartition, Some(ReassignPartitionLeaderElectionStrategy))
} else if (controllerContext.isReplicaOnline(currentLeader, topicPartition)) {
info(s"Leader $currentLeader for partition $topicPartition being reassigned, " +
s"is already in the new list of replicas ${reassignedReplicas.mkString(",")} and is alive")
// shrink replication factor and update the leader epoch in zookeeper to use on the next LeaderAndIsrRequest
updateLeaderEpochAndSendRequest(topicPartition, newAssignment)
} else {
info(s"Leader $currentLeader for partition $topicPartition being reassigned, " +
s"is already in the new list of replicas ${reassignedReplicas.mkString(",")} but is dead")
partitionStateMachine.handleStateChanges(Seq(topicPartition), OnlinePartition, Some(ReassignPartitionLeaderElectionStrategy))
}
}
private def stopRemovedReplicasOfReassignedPartition(topicPartition: TopicPartition,
removedReplicas: Seq[Int]): Unit = {
// first move the replica to offline state (the controller removes it from the ISR)
val replicasToBeDeleted = removedReplicas.map(PartitionAndReplica(topicPartition, _))
replicaStateMachine.handleStateChanges(replicasToBeDeleted, OfflineReplica)
// send stop replica command to the old replicas
replicaStateMachine.handleStateChanges(replicasToBeDeleted, ReplicaDeletionStarted)
// TODO: Eventually partition reassignment could use a callback that does retries if deletion failed
replicaStateMachine.handleStateChanges(replicasToBeDeleted, ReplicaDeletionSuccessful)
replicaStateMachine.handleStateChanges(replicasToBeDeleted, NonExistentReplica)
}
private def updateReplicaAssignmentForPartition(topicPartition: TopicPartition, assignment: ReplicaAssignment): Unit = {
val topicAssignment = mutable.Map() ++=
controllerContext.partitionFullReplicaAssignmentForTopic(topicPartition.topic) +=
(topicPartition -> assignment)
val setDataResponse = zkClient.setTopicAssignmentRaw(topicPartition.topic,
controllerContext.topicIds.get(topicPartition.topic),
topicAssignment, controllerContext.epochZkVersion)
setDataResponse.resultCode match {
case Code.OK =>
info(s"Successfully updated assignment of partition $topicPartition to $assignment")
case Code.NONODE =>
throw new IllegalStateException(s"Failed to update assignment for $topicPartition since the topic " +
"has no current assignment")
case _ => throw new KafkaException(setDataResponse.resultException.get)
}
}
private def startNewReplicasForReassignedPartition(topicPartition: TopicPartition, newReplicas: Seq[Int]): Unit = {
// send the start replica request to the brokers in the reassigned replicas list that are not in the assigned
// replicas list
newReplicas.foreach { replica =>
replicaStateMachine.handleStateChanges(Seq(PartitionAndReplica(topicPartition, replica)), NewReplica)
}
}
private def updateLeaderEpochAndSendRequest(topicPartition: TopicPartition,
assignment: ReplicaAssignment): Unit = {
val stateChangeLog = stateChangeLogger.withControllerEpoch(controllerContext.epoch)
updateLeaderEpoch(topicPartition) match {
case Some(updatedLeaderIsrAndControllerEpoch) =>
try {
brokerRequestBatch.newBatch()
// the isNew flag, when set to true, makes sure that when a replica possibly resided
// in a logDir that is offline, we refrain from just creating a new replica in a good
// logDir. This is exactly the behavior we want for the original replicas, but not
// for the replicas we add in this reassignment. For new replicas, want to be able
// to assign to one of the good logDirs.
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(assignment.originReplicas, topicPartition,
updatedLeaderIsrAndControllerEpoch, assignment, isNew = false)
brokerRequestBatch.addLeaderAndIsrRequestForBrokers(assignment.addingReplicas, topicPartition,
updatedLeaderIsrAndControllerEpoch, assignment, isNew = true)
brokerRequestBatch.sendRequestsToBrokers(controllerContext.epoch)
} catch {
case e: IllegalStateException =>
handleIllegalState(e)
}
stateChangeLog.info(s"Sent LeaderAndIsr request $updatedLeaderIsrAndControllerEpoch with " +
s"new replica assignment $assignment to leader ${updatedLeaderIsrAndControllerEpoch.leaderAndIsr.leader} " +
s"for partition being reassigned $topicPartition")
case None => // fail the reassignment
stateChangeLog.error(s"Failed to send LeaderAndIsr request with new replica assignment " +
s"$assignment to leader for partition being reassigned $topicPartition")
}
}
private def registerPartitionModificationsHandlers(topics: Seq[String]) = {
topics.foreach { topic =>
val partitionModificationsHandler = new PartitionModificationsHandler(eventManager, topic)
partitionModificationsHandlers.put(topic, partitionModificationsHandler)
}
partitionModificationsHandlers.values.foreach(zkClient.registerZNodeChangeHandler)
}
private[controller] def unregisterPartitionModificationsHandlers(topics: Seq[String]) = {
topics.foreach { topic =>
partitionModificationsHandlers.remove(topic).foreach(handler => zkClient.unregisterZNodeChangeHandler(handler.path))
}
}
private def unregisterPartitionReassignmentIsrChangeHandlers(): Unit = {
if (!isAlterIsrEnabled) {
controllerContext.partitionsBeingReassigned.foreach { tp =>
val path = TopicPartitionStateZNode.path(tp)
zkClient.unregisterZNodeChangeHandler(path)
}
}
}
private def removePartitionFromReassigningPartitions(topicPartition: TopicPartition,
assignment: ReplicaAssignment): Unit = {
if (controllerContext.partitionsBeingReassigned.contains(topicPartition)) {
if (!isAlterIsrEnabled) {
val path = TopicPartitionStateZNode.path(topicPartition)
zkClient.unregisterZNodeChangeHandler(path)
}
maybeRemoveFromZkReassignment((tp, replicas) => tp == topicPartition && replicas == assignment.replicas)
controllerContext.partitionsBeingReassigned.remove(topicPartition)
} else {
throw new IllegalStateException("Cannot remove a reassigning partition because it is not present in memory")
}
}
/**
* Remove partitions from an active zk-based reassignment (if one exists).
*
* @param shouldRemoveReassignment Predicate indicating which partition reassignments should be removed
*/
private def maybeRemoveFromZkReassignment(shouldRemoveReassignment: (TopicPartition, Seq[Int]) => Boolean): Unit = {
if (!zkClient.reassignPartitionsInProgress)
return
val reassigningPartitions = zkClient.getPartitionReassignment
val (removingPartitions, updatedPartitionsBeingReassigned) = reassigningPartitions.partition { case (tp, replicas) =>
shouldRemoveReassignment(tp, replicas)
}
info(s"Removing partitions $removingPartitions from the list of reassigned partitions in zookeeper")
// write the new list to zookeeper
if (updatedPartitionsBeingReassigned.isEmpty) {
info(s"No more partitions need to be reassigned. Deleting zk path ${ReassignPartitionsZNode.path}")
zkClient.deletePartitionReassignment(controllerContext.epochZkVersion)
// Ensure we detect future reassignments
eventManager.put(ZkPartitionReassignment)
} else {
try {
zkClient.setOrCreatePartitionReassignment(updatedPartitionsBeingReassigned, controllerContext.epochZkVersion)
} catch {
case e: KeeperException => throw new AdminOperationException(e)
}
}
}
private def removePartitionsFromPreferredReplicaElection(partitionsToBeRemoved: Set[TopicPartition],
isTriggeredByAutoRebalance : Boolean): Unit = {
for (partition <- partitionsToBeRemoved) {
// check the status
val currentLeader = controllerContext.partitionLeadershipInfo(partition).get.leaderAndIsr.leader
val preferredReplica = controllerContext.partitionReplicaAssignment(partition).head
if (currentLeader == preferredReplica) {
info(s"Partition $partition completed preferred replica leader election. New leader is $preferredReplica")
} else {
warn(s"Partition $partition failed to complete preferred replica leader election to $preferredReplica. " +
s"Leader is still $currentLeader")
}
}
if (!isTriggeredByAutoRebalance) {
zkClient.deletePreferredReplicaElection(controllerContext.epochZkVersion)
// Ensure we detect future preferred replica leader elections
eventManager.put(ReplicaLeaderElection(None, ElectionType.PREFERRED, ZkTriggered))
}
}
/**
* Send the leader information for selected partitions to selected brokers so that they can correctly respond to
* metadata requests
*
* @param brokers The brokers that the update metadata request should be sent to
*/
private[controller] def sendUpdateMetadataRequest(brokers: Seq[Int], partitions: Set[TopicPartition]): Unit = {
try {
brokerRequestBatch.newBatch()
brokerRequestBatch.addUpdateMetadataRequestForBrokers(brokers, partitions)
brokerRequestBatch.sendRequestsToBrokers(epoch)
} catch {
case e: IllegalStateException =>
handleIllegalState(e)
}
}
/**
* Does not change leader or isr, but just increments the leader epoch
*
* @param partition partition
* @return the new leaderAndIsr with an incremented leader epoch, or None if leaderAndIsr is empty.
*/
private def updateLeaderEpoch(partition: TopicPartition): Option[LeaderIsrAndControllerEpoch] = {
debug(s"Updating leader epoch for partition $partition")
var finalLeaderIsrAndControllerEpoch: Option[LeaderIsrAndControllerEpoch] = None
var zkWriteCompleteOrUnnecessary = false
while (!zkWriteCompleteOrUnnecessary) {
// refresh leader and isr from zookeeper again
zkWriteCompleteOrUnnecessary = zkClient.getTopicPartitionStates(Seq(partition)).get(partition) match {
case Some(leaderIsrAndControllerEpoch) =>
val leaderAndIsr = leaderIsrAndControllerEpoch.leaderAndIsr
val controllerEpoch = leaderIsrAndControllerEpoch.controllerEpoch
if (controllerEpoch > epoch)
throw new StateChangeFailedException("Leader and isr path written by another controller. This probably " +
s"means the current controller with epoch $epoch went through a soft failure and another " +
s"controller was elected with epoch $controllerEpoch. Aborting state change by this controller")
// increment the leader epoch even if there are no leader or isr changes to allow the leader to cache the expanded
// assigned replica list
val newLeaderAndIsr = leaderAndIsr.newEpochAndZkVersion
// update the new leadership decision in zookeeper or retry
val UpdateLeaderAndIsrResult(finishedUpdates, _) =
zkClient.updateLeaderAndIsr(immutable.Map(partition -> newLeaderAndIsr), epoch, controllerContext.epochZkVersion)
finishedUpdates.get(partition) match {
case Some(Right(leaderAndIsr)) =>
val leaderIsrAndControllerEpoch = LeaderIsrAndControllerEpoch(leaderAndIsr, epoch)
controllerContext.putPartitionLeadershipInfo(partition, leaderIsrAndControllerEpoch)
finalLeaderIsrAndControllerEpoch = Some(leaderIsrAndControllerEpoch)
info(s"Updated leader epoch for partition $partition to ${leaderAndIsr.leaderEpoch}")
true
case Some(Left(e)) => throw e
case None => false
}
case None =>
throw new IllegalStateException(s"Cannot update leader epoch for partition $partition as " +
"leaderAndIsr path is empty. This could mean we somehow tried to reassign a partition that doesn't exist")
}
}
finalLeaderIsrAndControllerEpoch
}
private def checkAndTriggerAutoLeaderRebalance(): Unit = {
trace("Checking need to trigger auto leader balancing")
val preferredReplicasForTopicsByBrokers: Map[Int, Map[TopicPartition, Seq[Int]]] =
controllerContext.allPartitions.filterNot {
tp => topicDeletionManager.isTopicQueuedUpForDeletion(tp.topic)
}.map { tp =>
(tp, controllerContext.partitionReplicaAssignment(tp) )
}.toMap.groupBy { case (_, assignedReplicas) => assignedReplicas.head }
// for each broker, check if a preferred replica election needs to be triggered
preferredReplicasForTopicsByBrokers.forKeyValue { (leaderBroker, topicPartitionsForBroker) =>
val topicsNotInPreferredReplica = topicPartitionsForBroker.filter { case (topicPartition, _) =>
val leadershipInfo = controllerContext.partitionLeadershipInfo(topicPartition)
leadershipInfo.exists(_.leaderAndIsr.leader != leaderBroker)
}
debug(s"Topics not in preferred replica for broker $leaderBroker $topicsNotInPreferredReplica")
val imbalanceRatio = topicsNotInPreferredReplica.size.toDouble / topicPartitionsForBroker.size
trace(s"Leader imbalance ratio for broker $leaderBroker is $imbalanceRatio")
// check ratio and if greater than desired ratio, trigger a rebalance for the topic partitions
// that need to be on this broker
if (imbalanceRatio > (config.leaderImbalancePerBrokerPercentage.toDouble / 100)) {
// do this check only if the broker is live and there are no partitions being reassigned currently
// and preferred replica election is not in progress
val candidatePartitions = topicsNotInPreferredReplica.keys.filter(tp =>
controllerContext.partitionsBeingReassigned.isEmpty &&
!topicDeletionManager.isTopicQueuedUpForDeletion(tp.topic) &&
controllerContext.allTopics.contains(tp.topic) &&
canPreferredReplicaBeLeader(tp)
)
onReplicaElection(candidatePartitions.toSet, ElectionType.PREFERRED, AutoTriggered)
}
}
}
private def canPreferredReplicaBeLeader(tp: TopicPartition): Boolean = {
val assignment = controllerContext.partitionReplicaAssignment(tp)
val liveReplicas = assignment.filter(replica => controllerContext.isReplicaOnline(replica, tp))
val isr = controllerContext.partitionLeadershipInfo(tp).get.leaderAndIsr.isr
PartitionLeaderElectionAlgorithms
.preferredReplicaPartitionLeaderElection(assignment, isr, liveReplicas.toSet)
.nonEmpty
}
private def processAutoPreferredReplicaLeaderElection(): Unit = {
if (!isActive) return
try {
info("Processing automatic preferred replica leader election")
checkAndTriggerAutoLeaderRebalance()
} finally {
scheduleAutoLeaderRebalanceTask(delay = config.leaderImbalanceCheckIntervalSeconds, unit = TimeUnit.SECONDS)
}
}
private def processUncleanLeaderElectionEnable(): Unit = {
if (!isActive) return
info("Unclean leader election has been enabled by default")
partitionStateMachine.triggerOnlinePartitionStateChange()
}
private def processTopicUncleanLeaderElectionEnable(topic: String): Unit = {
if (!isActive) return
info(s"Unclean leader election has been enabled for topic $topic")
partitionStateMachine.triggerOnlinePartitionStateChange(topic)
}
private def processControlledShutdown(id: Int, brokerEpoch: Long, controlledShutdownCallback: Try[Set[TopicPartition]] => Unit): Unit = {
val controlledShutdownResult = Try { doControlledShutdown(id, brokerEpoch) }
controlledShutdownCallback(controlledShutdownResult)
}
private def doControlledShutdown(id: Int, brokerEpoch: Long): Set[TopicPartition] = {
if (!isActive) {
throw new ControllerMovedException("Controller moved to another broker. Aborting controlled shutdown")
}
// broker epoch in the request is unknown if the controller hasn't been upgraded to use KIP-380
// so we will keep the previous behavior and don't reject the request
if (brokerEpoch != AbstractControlRequest.UNKNOWN_BROKER_EPOCH) {
val cachedBrokerEpoch = controllerContext.liveBrokerIdAndEpochs(id)
if (brokerEpoch < cachedBrokerEpoch) {
val stateBrokerEpochErrorMessage = "Received controlled shutdown request from an old broker epoch " +
s"$brokerEpoch for broker $id. Current broker epoch is $cachedBrokerEpoch."
info(stateBrokerEpochErrorMessage)
throw new StaleBrokerEpochException(stateBrokerEpochErrorMessage)
}
}
info(s"Shutting down broker $id")
if (!controllerContext.liveOrShuttingDownBrokerIds.contains(id))
throw new BrokerNotAvailableException(s"Broker id $id does not exist.")
controllerContext.shuttingDownBrokerIds.add(id)
debug(s"All shutting down brokers: ${controllerContext.shuttingDownBrokerIds.mkString(",")}")
debug(s"Live brokers: ${controllerContext.liveBrokerIds.mkString(",")}")
val partitionsToActOn = controllerContext.partitionsOnBroker(id).filter { partition =>
controllerContext.partitionReplicaAssignment(partition).size > 1 &&
controllerContext.partitionLeadershipInfo(partition).isDefined &&
!topicDeletionManager.isTopicQueuedUpForDeletion(partition.topic)
}
val (partitionsLedByBroker, partitionsFollowedByBroker) = partitionsToActOn.partition { partition =>
controllerContext.partitionLeadershipInfo(partition).get.leaderAndIsr.leader == id
}
partitionStateMachine.handleStateChanges(partitionsLedByBroker.toSeq, OnlinePartition, Some(ControlledShutdownPartitionLeaderElectionStrategy))
try {
brokerRequestBatch.newBatch()
partitionsFollowedByBroker.foreach { partition =>
brokerRequestBatch.addStopReplicaRequestForBrokers(Seq(id), partition, deletePartition = false)
}
brokerRequestBatch.sendRequestsToBrokers(epoch)
} catch {
case e: IllegalStateException =>
handleIllegalState(e)
}
// If the broker is a follower, updates the isr in ZK and notifies the current leader
replicaStateMachine.handleStateChanges(partitionsFollowedByBroker.map(partition =>
PartitionAndReplica(partition, id)).toSeq, OfflineReplica)
trace(s"All leaders = ${controllerContext.partitionsLeadershipInfo.mkString(",")}")
controllerContext.partitionLeadersOnBroker(id)
}
private def processUpdateMetadataResponseReceived(updateMetadataResponse: UpdateMetadataResponse, brokerId: Int): Unit = {
if (!isActive) return
if (updateMetadataResponse.error != Errors.NONE) {
stateChangeLogger.error(s"Received error ${updateMetadataResponse.error} in UpdateMetadata " +
s"response $updateMetadataResponse from broker $brokerId")
}
}
private def processLeaderAndIsrResponseReceived(leaderAndIsrResponse: LeaderAndIsrResponse, brokerId: Int): Unit = {
if (!isActive) return
if (leaderAndIsrResponse.error != Errors.NONE) {
stateChangeLogger.error(s"Received error ${leaderAndIsrResponse.error} in LeaderAndIsr " +
s"response $leaderAndIsrResponse from broker $brokerId")
return
}
val offlineReplicas = new ArrayBuffer[TopicPartition]()
val onlineReplicas = new ArrayBuffer[TopicPartition]()
leaderAndIsrResponse.partitionErrors(controllerContext.topicNames.asJava).forEach{ case (tp, error) =>
if (error.code() == Errors.KAFKA_STORAGE_ERROR.code)
offlineReplicas += tp
else if (error.code() == Errors.NONE.code)
onlineReplicas += tp
}
val previousOfflineReplicas = controllerContext.replicasOnOfflineDirs.getOrElse(brokerId, Set.empty[TopicPartition])
val currentOfflineReplicas = mutable.Set() ++= previousOfflineReplicas --= onlineReplicas ++= offlineReplicas
controllerContext.replicasOnOfflineDirs.put(brokerId, currentOfflineReplicas)
val newOfflineReplicas = currentOfflineReplicas.diff(previousOfflineReplicas)
if (newOfflineReplicas.nonEmpty) {
stateChangeLogger.info(s"Mark replicas ${newOfflineReplicas.mkString(",")} on broker $brokerId as offline")
onReplicasBecomeOffline(newOfflineReplicas.map(PartitionAndReplica(_, brokerId)))
}
}
private def processTopicDeletionStopReplicaResponseReceived(replicaId: Int,
requestError: Errors,
partitionErrors: Map[TopicPartition, Errors]): Unit = {
if (!isActive) return
debug(s"Delete topic callback invoked on StopReplica response received from broker $replicaId: " +
s"request error = $requestError, partition errors = $partitionErrors")
val partitionsInError = if (requestError != Errors.NONE)
partitionErrors.keySet
else
partitionErrors.filter { case (_, error) => error != Errors.NONE }.keySet
val replicasInError = partitionsInError.map(PartitionAndReplica(_, replicaId))
// move all the failed replicas to ReplicaDeletionIneligible
topicDeletionManager.failReplicaDeletion(replicasInError)
if (replicasInError.size != partitionErrors.size) {
// some replicas could have been successfully deleted
val deletedReplicas = partitionErrors.keySet.diff(partitionsInError)
topicDeletionManager.completeReplicaDeletion(deletedReplicas.map(PartitionAndReplica(_, replicaId)))
}
}
private def processStartup(): Unit = {
zkClient.registerZNodeChangeHandlerAndCheckExistence(controllerChangeHandler)
elect()
}
private def updateMetrics(): Unit = {
offlinePartitionCount =
if (!isActive) {
0
} else {
controllerContext.offlinePartitionCount
}
preferredReplicaImbalanceCount =
if (!isActive) {
0
} else {
controllerContext.preferredReplicaImbalanceCount
}
globalTopicCount = if (!isActive) 0 else controllerContext.allTopics.size
globalPartitionCount = if (!isActive) 0 else controllerContext.partitionWithLeadersCount
topicsToDeleteCount = if (!isActive) 0 else controllerContext.topicsToBeDeleted.size
replicasToDeleteCount = if (!isActive) 0 else controllerContext.topicsToBeDeleted.map { topic =>
// For each enqueued topic, count the number of replicas that are not yet deleted
controllerContext.replicasForTopic(topic).count { replica =>
controllerContext.replicaState(replica) != ReplicaDeletionSuccessful
}
}.sum
ineligibleTopicsToDeleteCount = if (!isActive) 0 else controllerContext.topicsIneligibleForDeletion.size
ineligibleReplicasToDeleteCount = if (!isActive) 0 else controllerContext.topicsToBeDeleted.map { topic =>
// For each enqueued topic, count the number of replicas that are ineligible
controllerContext.replicasForTopic(topic).count { replica =>
controllerContext.replicaState(replica) == ReplicaDeletionIneligible
}
}.sum
}
// visible for testing
private[controller] def handleIllegalState(e: IllegalStateException): Nothing = {
// Resign if the controller is in an illegal state
error("Forcing the controller to resign")
brokerRequestBatch.clear()
triggerControllerMove()
throw e
}
private def triggerControllerMove(): Unit = {
activeControllerId = zkClient.getControllerId.getOrElse(-1)
if (!isActive) {
warn("Controller has already moved when trying to trigger controller movement")
return
}
try {
val expectedControllerEpochZkVersion = controllerContext.epochZkVersion
activeControllerId = -1
onControllerResignation()
zkClient.deleteController(expectedControllerEpochZkVersion)
} catch {
case _: ControllerMovedException =>
warn("Controller has already moved when trying to trigger controller movement")
}
}
private def maybeResign(): Unit = {
val wasActiveBeforeChange = isActive
zkClient.registerZNodeChangeHandlerAndCheckExistence(controllerChangeHandler)
activeControllerId = zkClient.getControllerId.getOrElse(-1)
if (wasActiveBeforeChange && !isActive) {
onControllerResignation()
}
}
private def elect(): Unit = {
activeControllerId = zkClient.getControllerId.getOrElse(-1)
/*
* We can get here during the initial startup and the handleDeleted ZK callback. Because of the potential race condition,
* it's possible that the controller has already been elected when we get here. This check will prevent the following
* createEphemeralPath method from getting into an infinite loop if this broker is already the controller.
*/
if (activeControllerId != -1) {
debug(s"Broker $activeControllerId has been elected as the controller, so stopping the election process.")
return
}
try {
val (epoch, epochZkVersion) = zkClient.registerControllerAndIncrementControllerEpoch(config.brokerId)
controllerContext.epoch = epoch
controllerContext.epochZkVersion = epochZkVersion
activeControllerId = config.brokerId
info(s"${config.brokerId} successfully elected as the controller. Epoch incremented to ${controllerContext.epoch} " +
s"and epoch zk version is now ${controllerContext.epochZkVersion}")
onControllerFailover()
} catch {
case e: ControllerMovedException =>
maybeResign()
if (activeControllerId != -1)
debug(s"Broker $activeControllerId was elected as controller instead of broker ${config.brokerId}", e)
else
warn("A controller has been elected but just resigned, this will result in another round of election", e)
case t: Throwable =>
error(s"Error while electing or becoming controller on broker ${config.brokerId}. " +
s"Trigger controller movement immediately", t)
triggerControllerMove()
}
}
/**
* Partitions the provided map of brokers and epochs into 2 new maps:
* - The first map contains only those brokers whose features were found to be compatible with
* the existing finalized features.
* - The second map contains only those brokers whose features were found to be incompatible with
* the existing finalized features.
*
* @param brokersAndEpochs the map to be partitioned
* @return two maps: first contains compatible brokers and second contains
* incompatible brokers as explained above
*/
private def partitionOnFeatureCompatibility(brokersAndEpochs: Map[Broker, Long]): (Map[Broker, Long], Map[Broker, Long]) = {
// There can not be any feature incompatibilities when the feature versioning system is disabled
// or when the finalized feature cache is empty. Otherwise, we check if the non-empty contents
// of the cache are compatible with the supported features of each broker.
brokersAndEpochs.partition {
case (broker, _) =>
!config.isFeatureVersioningSupported ||
!featureCache.get.exists(
latestFinalizedFeatures =>
BrokerFeatures.hasIncompatibleFeatures(broker.features, latestFinalizedFeatures.features))
}
}
private def processBrokerChange(): Unit = {
if (!isActive) return
val curBrokerAndEpochs = zkClient.getAllBrokerAndEpochsInCluster
val curBrokerIdAndEpochs = curBrokerAndEpochs map { case (broker, epoch) => (broker.id, epoch) }
val curBrokerIds = curBrokerIdAndEpochs.keySet
val liveOrShuttingDownBrokerIds = controllerContext.liveOrShuttingDownBrokerIds
val newBrokerIds = curBrokerIds.diff(liveOrShuttingDownBrokerIds)
val deadBrokerIds = liveOrShuttingDownBrokerIds.diff(curBrokerIds)
val bouncedBrokerIds = (curBrokerIds & liveOrShuttingDownBrokerIds)
.filter(brokerId => curBrokerIdAndEpochs(brokerId) > controllerContext.liveBrokerIdAndEpochs(brokerId))
val newBrokerAndEpochs = curBrokerAndEpochs.filter { case (broker, _) => newBrokerIds.contains(broker.id) }
val bouncedBrokerAndEpochs = curBrokerAndEpochs.filter { case (broker, _) => bouncedBrokerIds.contains(broker.id) }
val newBrokerIdsSorted = newBrokerIds.toSeq.sorted
val deadBrokerIdsSorted = deadBrokerIds.toSeq.sorted
val liveBrokerIdsSorted = curBrokerIds.toSeq.sorted
val bouncedBrokerIdsSorted = bouncedBrokerIds.toSeq.sorted
info(s"Newly added brokers: ${newBrokerIdsSorted.mkString(",")}, " +
s"deleted brokers: ${deadBrokerIdsSorted.mkString(",")}, " +
s"bounced brokers: ${bouncedBrokerIdsSorted.mkString(",")}, " +
s"all live brokers: ${liveBrokerIdsSorted.mkString(",")}")
newBrokerAndEpochs.keySet.foreach(controllerChannelManager.addBroker)
bouncedBrokerIds.foreach(controllerChannelManager.removeBroker)
bouncedBrokerAndEpochs.keySet.foreach(controllerChannelManager.addBroker)
deadBrokerIds.foreach(controllerChannelManager.removeBroker)
if (newBrokerIds.nonEmpty) {
val (newCompatibleBrokerAndEpochs, newIncompatibleBrokerAndEpochs) =
partitionOnFeatureCompatibility(newBrokerAndEpochs)
if (!newIncompatibleBrokerAndEpochs.isEmpty) {
warn("Ignoring registration of new brokers due to incompatibilities with finalized features: " +
newIncompatibleBrokerAndEpochs.map { case (broker, _) => broker.id }.toSeq.sorted.mkString(","))
}
controllerContext.addLiveBrokers(newCompatibleBrokerAndEpochs)
onBrokerStartup(newBrokerIdsSorted)
}
if (bouncedBrokerIds.nonEmpty) {
controllerContext.removeLiveBrokers(bouncedBrokerIds)
onBrokerFailure(bouncedBrokerIdsSorted)
val (bouncedCompatibleBrokerAndEpochs, bouncedIncompatibleBrokerAndEpochs) =
partitionOnFeatureCompatibility(bouncedBrokerAndEpochs)
if (!bouncedIncompatibleBrokerAndEpochs.isEmpty) {
warn("Ignoring registration of bounced brokers due to incompatibilities with finalized features: " +
bouncedIncompatibleBrokerAndEpochs.map { case (broker, _) => broker.id }.toSeq.sorted.mkString(","))
}
controllerContext.addLiveBrokers(bouncedCompatibleBrokerAndEpochs)
onBrokerStartup(bouncedBrokerIdsSorted)
}
if (deadBrokerIds.nonEmpty) {
controllerContext.removeLiveBrokers(deadBrokerIds)
onBrokerFailure(deadBrokerIdsSorted)
}
if (newBrokerIds.nonEmpty || deadBrokerIds.nonEmpty || bouncedBrokerIds.nonEmpty) {
info(s"Updated broker epochs cache: ${controllerContext.liveBrokerIdAndEpochs}")
}
}
private def processBrokerModification(brokerId: Int): Unit = {
if (!isActive) return
val newMetadataOpt = zkClient.getBroker(brokerId)
val oldMetadataOpt = controllerContext.liveOrShuttingDownBroker(brokerId)
if (newMetadataOpt.nonEmpty && oldMetadataOpt.nonEmpty) {
val oldMetadata = oldMetadataOpt.get
val newMetadata = newMetadataOpt.get
if (newMetadata.endPoints != oldMetadata.endPoints || !oldMetadata.features.equals(newMetadata.features)) {
info(s"Updated broker metadata: $oldMetadata -> $newMetadata")
controllerContext.updateBrokerMetadata(oldMetadata, newMetadata)
onBrokerUpdate(brokerId)
}
}
}
private def processTopicChange(): Unit = {
if (!isActive) return
val topics = zkClient.getAllTopicsInCluster(true)
val newTopics = topics -- controllerContext.allTopics
val deletedTopics = controllerContext.allTopics.diff(topics)
controllerContext.setAllTopics(topics)
registerPartitionModificationsHandlers(newTopics.toSeq)
val addedPartitionReplicaAssignment = zkClient.getReplicaAssignmentAndTopicIdForTopics(newTopics)
deletedTopics.foreach(controllerContext.removeTopic)
processTopicIds(addedPartitionReplicaAssignment)
addedPartitionReplicaAssignment.foreach { case TopicIdReplicaAssignment(_, _, newAssignments) =>
newAssignments.foreach { case (topicAndPartition, newReplicaAssignment) =>
controllerContext.updatePartitionFullReplicaAssignment(topicAndPartition, newReplicaAssignment)
}
}
info(s"New topics: [$newTopics], deleted topics: [$deletedTopics], new partition replica assignment " +
s"[$addedPartitionReplicaAssignment]")
if (addedPartitionReplicaAssignment.nonEmpty) {
val partitionAssignments = addedPartitionReplicaAssignment
.map { case TopicIdReplicaAssignment(_, _, partitionsReplicas) => partitionsReplicas.keySet }
.reduce((s1, s2) => s1.union(s2))
onNewPartitionCreation(partitionAssignments)
}
}
private def processTopicIds(topicIdAssignments: Set[TopicIdReplicaAssignment]): Unit = {
// Create topic IDs for topics missing them if we are using topic IDs
// Otherwise, maintain what we have in the topicZNode
val updatedTopicIdAssignments = if (config.usesTopicId) {
val (withTopicIds, withoutTopicIds) = topicIdAssignments.partition(_.topicId.isDefined)
withTopicIds ++ zkClient.setTopicIds(withoutTopicIds, controllerContext.epochZkVersion)
} else {
topicIdAssignments
}
// Add topic IDs to controller context
// If we don't have IBP 2.8, but are running 2.8 code, put any topic IDs from the ZNode in controller context
// This is to avoid losing topic IDs during operations like partition reassignments while the cluster is in a mixed state
updatedTopicIdAssignments.foreach { topicIdAssignment =>
topicIdAssignment.topicId.foreach { topicId =>
controllerContext.addTopicId(topicIdAssignment.topic, topicId)
}
}
}
private def processLogDirEventNotification(): Unit = {
if (!isActive) return
val sequenceNumbers = zkClient.getAllLogDirEventNotifications
try {
val brokerIds = zkClient.getBrokerIdsFromLogDirEvents(sequenceNumbers)
onBrokerLogDirFailure(brokerIds)
} finally {
// delete processed children
zkClient.deleteLogDirEventNotifications(sequenceNumbers, controllerContext.epochZkVersion)
}
}
private def processPartitionModifications(topic: String): Unit = {
def restorePartitionReplicaAssignment(
topic: String,
newPartitionReplicaAssignment: Map[TopicPartition, ReplicaAssignment]
): Unit = {
info("Restoring the partition replica assignment for topic %s".format(topic))
val existingPartitions = zkClient.getChildren(TopicPartitionsZNode.path(topic))
val existingPartitionReplicaAssignment = newPartitionReplicaAssignment
.filter(p => existingPartitions.contains(p._1.partition.toString))
.map { case (tp, _) =>
tp -> controllerContext.partitionFullReplicaAssignment(tp)
}.toMap
zkClient.setTopicAssignment(topic,
controllerContext.topicIds.get(topic),
existingPartitionReplicaAssignment,
controllerContext.epochZkVersion)
}
if (!isActive) return
val partitionReplicaAssignment = zkClient.getFullReplicaAssignmentForTopics(immutable.Set(topic))
val partitionsToBeAdded = partitionReplicaAssignment.filter { case (topicPartition, _) =>
controllerContext.partitionReplicaAssignment(topicPartition).isEmpty
}
if (topicDeletionManager.isTopicQueuedUpForDeletion(topic)) {
if (partitionsToBeAdded.nonEmpty) {
warn("Skipping adding partitions %s for topic %s since it is currently being deleted"
.format(partitionsToBeAdded.map(_._1.partition).mkString(","), topic))
restorePartitionReplicaAssignment(topic, partitionReplicaAssignment)
} else {
// This can happen if existing partition replica assignment are restored to prevent increasing partition count during topic deletion
info("Ignoring partition change during topic deletion as no new partitions are added")
}
} else if (partitionsToBeAdded.nonEmpty) {
info(s"New partitions to be added $partitionsToBeAdded")
partitionsToBeAdded.forKeyValue { (topicPartition, assignedReplicas) =>
controllerContext.updatePartitionFullReplicaAssignment(topicPartition, assignedReplicas)
}
onNewPartitionCreation(partitionsToBeAdded.keySet)
}
}
private def processTopicDeletion(): Unit = {
if (!isActive) return
var topicsToBeDeleted = zkClient.getTopicDeletions.toSet
debug(s"Delete topics listener fired for topics ${topicsToBeDeleted.mkString(",")} to be deleted")
val nonExistentTopics = topicsToBeDeleted -- controllerContext.allTopics
if (nonExistentTopics.nonEmpty) {
warn(s"Ignoring request to delete non-existing topics ${nonExistentTopics.mkString(",")}")
zkClient.deleteTopicDeletions(nonExistentTopics.toSeq, controllerContext.epochZkVersion)
}
topicsToBeDeleted --= nonExistentTopics
if (config.deleteTopicEnable) {
if (topicsToBeDeleted.nonEmpty) {
info(s"Starting topic deletion for topics ${topicsToBeDeleted.mkString(",")}")
// mark topic ineligible for deletion if other state changes are in progress
topicsToBeDeleted.foreach { topic =>
val partitionReassignmentInProgress =
controllerContext.partitionsBeingReassigned.map(_.topic).contains(topic)
if (partitionReassignmentInProgress)
topicDeletionManager.markTopicIneligibleForDeletion(Set(topic),
reason = "topic reassignment in progress")
}
// add topic to deletion list
topicDeletionManager.enqueueTopicsForDeletion(topicsToBeDeleted)
}
} else {
// If delete topic is disabled remove entries under zookeeper path : /admin/delete_topics
info(s"Removing $topicsToBeDeleted since delete topic is disabled")
zkClient.deleteTopicDeletions(topicsToBeDeleted.toSeq, controllerContext.epochZkVersion)
}
}
private def processZkPartitionReassignment(): Set[TopicPartition] = {
// We need to register the watcher if the path doesn't exist in order to detect future
// reassignments and we get the `path exists` check for free
if (isActive && zkClient.registerZNodeChangeHandlerAndCheckExistence(partitionReassignmentHandler)) {
val reassignmentResults = mutable.Map.empty[TopicPartition, ApiError]
val partitionsToReassign = mutable.Map.empty[TopicPartition, ReplicaAssignment]
zkClient.getPartitionReassignment.forKeyValue { (tp, targetReplicas) =>
maybeBuildReassignment(tp, Some(targetReplicas)) match {
case Some(context) => partitionsToReassign.put(tp, context)
case None => reassignmentResults.put(tp, new ApiError(Errors.NO_REASSIGNMENT_IN_PROGRESS))
}
}
reassignmentResults ++= maybeTriggerPartitionReassignment(partitionsToReassign)
val (partitionsReassigned, partitionsFailed) = reassignmentResults.partition(_._2.error == Errors.NONE)
if (partitionsFailed.nonEmpty) {
warn(s"Failed reassignment through zk with the following errors: $partitionsFailed")
maybeRemoveFromZkReassignment((tp, _) => partitionsFailed.contains(tp))
}
partitionsReassigned.keySet
} else {
Set.empty
}
}
/**
* Process a partition reassignment from the AlterPartitionReassignment API. If there is an
* existing reassignment through zookeeper for any of the requested partitions, they will be
* cancelled prior to beginning the new reassignment. Any zk-based reassignment for partitions
* which are NOT included in this call will not be affected.
*
* @param reassignments Map of reassignments passed through the AlterReassignments API. A null value
* means that we should cancel an in-progress reassignment.
* @param callback Callback to send AlterReassignments response
*/
private def processApiPartitionReassignment(reassignments: Map[TopicPartition, Option[Seq[Int]]],
callback: AlterReassignmentsCallback): Unit = {
if (!isActive) {
callback(Right(new ApiError(Errors.NOT_CONTROLLER)))
} else {
val reassignmentResults = mutable.Map.empty[TopicPartition, ApiError]
val partitionsToReassign = mutable.Map.empty[TopicPartition, ReplicaAssignment]
reassignments.forKeyValue { (tp, targetReplicas) =>
val maybeApiError = targetReplicas.flatMap(validateReplicas(tp, _))
maybeApiError match {
case None =>
maybeBuildReassignment(tp, targetReplicas) match {
case Some(context) => partitionsToReassign.put(tp, context)
case None => reassignmentResults.put(tp, new ApiError(Errors.NO_REASSIGNMENT_IN_PROGRESS))
}
case Some(err) =>
reassignmentResults.put(tp, err)
}
}
// The latest reassignment (whether by API or through zk) always takes precedence,
// so remove from active zk reassignment (if one exists)
maybeRemoveFromZkReassignment((tp, _) => partitionsToReassign.contains(tp))
reassignmentResults ++= maybeTriggerPartitionReassignment(partitionsToReassign)
callback(Left(reassignmentResults))
}
}
private def validateReplicas(topicPartition: TopicPartition, replicas: Seq[Int]): Option[ApiError] = {
val replicaSet = replicas.toSet
if (replicas.isEmpty)
Some(new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT,
s"Empty replica list specified in partition reassignment."))
else if (replicas.size != replicaSet.size) {
Some(new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT,
s"Duplicate replica ids in partition reassignment replica list: $replicas"))
} else if (replicas.exists(_ < 0))
Some(new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT,
s"Invalid broker id in replica list: $replicas"))
else {
// Ensure that any new replicas are among the live brokers
val currentAssignment = controllerContext.partitionFullReplicaAssignment(topicPartition)
val newAssignment = currentAssignment.reassignTo(replicas)
val areNewReplicasAlive = newAssignment.addingReplicas.toSet.subsetOf(controllerContext.liveBrokerIds)
if (!areNewReplicasAlive)
Some(new ApiError(Errors.INVALID_REPLICA_ASSIGNMENT,
s"Replica assignment has brokers that are not alive. Replica list: " +
s"${newAssignment.addingReplicas}, live broker list: ${controllerContext.liveBrokerIds}"))
else None
}
}
private def maybeBuildReassignment(topicPartition: TopicPartition,
targetReplicasOpt: Option[Seq[Int]]): Option[ReplicaAssignment] = {
val replicaAssignment = controllerContext.partitionFullReplicaAssignment(topicPartition)
if (replicaAssignment.isBeingReassigned) {
val targetReplicas = targetReplicasOpt.getOrElse(replicaAssignment.originReplicas)
Some(replicaAssignment.reassignTo(targetReplicas))
} else {
targetReplicasOpt.map { targetReplicas =>
replicaAssignment.reassignTo(targetReplicas)
}
}
}
private def processPartitionReassignmentIsrChange(topicPartition: TopicPartition): Unit = {
if (!isActive) return
if (controllerContext.partitionsBeingReassigned.contains(topicPartition)) {
maybeCompleteReassignment(topicPartition)
}
}
private def maybeCompleteReassignment(topicPartition: TopicPartition): Unit = {
val reassignment = controllerContext.partitionFullReplicaAssignment(topicPartition)
if (isReassignmentComplete(topicPartition, reassignment)) {
// resume the partition reassignment process
info(s"Target replicas ${reassignment.targetReplicas} have all caught up with the leader for " +
s"reassigning partition $topicPartition")
onPartitionReassignment(topicPartition, reassignment)
}
}
private def processListPartitionReassignments(partitionsOpt: Option[Set[TopicPartition]], callback: ListReassignmentsCallback): Unit = {
if (!isActive) {
callback(Right(new ApiError(Errors.NOT_CONTROLLER)))
} else {
val results: mutable.Map[TopicPartition, ReplicaAssignment] = mutable.Map.empty
val partitionsToList = partitionsOpt match {
case Some(partitions) => partitions
case None => controllerContext.partitionsBeingReassigned
}
partitionsToList.foreach { tp =>
val assignment = controllerContext.partitionFullReplicaAssignment(tp)
if (assignment.isBeingReassigned) {
results += tp -> assignment
}
}
callback(Left(results))
}
}
/**
* Returns the new FinalizedVersionRange for the feature, if there are no feature
* incompatibilities seen with all known brokers for the provided feature update.
* Otherwise returns an ApiError object containing Errors.INVALID_REQUEST.
*
* @param update the feature update to be processed (this can not be meant to delete the feature)
*
* @return the new FinalizedVersionRange or error, as described above.
*/
private def newFinalizedVersionRangeOrIncompatibilityError(update: UpdateFeaturesRequestData.FeatureUpdateKey): Either[FinalizedVersionRange, ApiError] = {
if (UpdateFeaturesRequest.isDeleteRequest(update)) {
throw new IllegalArgumentException(s"Provided feature update can not be meant to delete the feature: $update")
}
val supportedVersionRange = brokerFeatures.supportedFeatures.get(update.feature)
if (supportedVersionRange == null) {
Right(new ApiError(Errors.INVALID_REQUEST,
"Could not apply finalized feature update because the provided feature" +
" is not supported."))
} else {
var newVersionRange: FinalizedVersionRange = null
try {
newVersionRange = new FinalizedVersionRange(supportedVersionRange.min, update.maxVersionLevel)
} catch {
case _: IllegalArgumentException => {
// This exception means the provided maxVersionLevel is invalid. It is handled below
// outside of this catch clause.
}
}
if (newVersionRange == null) {
Right(new ApiError(Errors.INVALID_REQUEST,
"Could not apply finalized feature update because the provided" +
s" maxVersionLevel:${update.maxVersionLevel} is lower than the" +
s" supported minVersion:${supportedVersionRange.min}."))
} else {
val newFinalizedFeature =
Features.finalizedFeatures(Utils.mkMap(Utils.mkEntry(update.feature, newVersionRange)))
val numIncompatibleBrokers = controllerContext.liveOrShuttingDownBrokers.count(broker => {
BrokerFeatures.hasIncompatibleFeatures(broker.features, newFinalizedFeature)
})
if (numIncompatibleBrokers == 0) {
Left(newVersionRange)
} else {
Right(new ApiError(Errors.INVALID_REQUEST,
"Could not apply finalized feature update because" +
" brokers were found to have incompatible versions for the feature."))
}
}
}
}
/**
* Validates a feature update on an existing FinalizedVersionRange.
* If the validation succeeds, then, the return value contains:
* 1. the new FinalizedVersionRange for the feature, if the feature update was not meant to delete the feature.
* 2. Option.empty, if the feature update was meant to delete the feature.
*
* If the validation fails, then returned value contains a suitable ApiError.
*
* @param update the feature update to be processed.
* @param existingVersionRange the existing FinalizedVersionRange which can be empty when no
* FinalizedVersionRange exists for the associated feature
*
* @return the new FinalizedVersionRange to be updated into ZK or error
* as described above.
*/
private def validateFeatureUpdate(update: UpdateFeaturesRequestData.FeatureUpdateKey,
existingVersionRange: Option[FinalizedVersionRange]): Either[Option[FinalizedVersionRange], ApiError] = {
def newVersionRangeOrError(update: UpdateFeaturesRequestData.FeatureUpdateKey): Either[Option[FinalizedVersionRange], ApiError] = {
newFinalizedVersionRangeOrIncompatibilityError(update)
.fold(versionRange => Left(Some(versionRange)), error => Right(error))
}
if (update.feature.isEmpty) {
// Check that the feature name is not empty.
Right(new ApiError(Errors.INVALID_REQUEST, "Feature name can not be empty."))
} else {
// We handle deletion requests separately from non-deletion requests.
if (UpdateFeaturesRequest.isDeleteRequest(update)) {
if (existingVersionRange.isEmpty) {
// Disallow deletion of a non-existing finalized feature.
Right(new ApiError(Errors.INVALID_REQUEST,
"Can not delete non-existing finalized feature."))
} else {
Left(Option.empty)
}
} else if (update.maxVersionLevel() < 1) {
// Disallow deletion of a finalized feature without allowDowngrade flag set.
Right(new ApiError(Errors.INVALID_REQUEST,
s"Can not provide maxVersionLevel: ${update.maxVersionLevel} less" +
s" than 1 without setting the allowDowngrade flag to true in the request."))
} else {
existingVersionRange.map(existing =>
if (update.maxVersionLevel == existing.max) {
// Disallow a case where target maxVersionLevel matches existing maxVersionLevel.
Right(new ApiError(Errors.INVALID_REQUEST,
s"Can not ${if (update.allowDowngrade) "downgrade" else "upgrade"}" +
s" a finalized feature from existing maxVersionLevel:${existing.max}" +
" to the same value."))
} else if (update.maxVersionLevel < existing.max && !update.allowDowngrade) {
// Disallow downgrade of a finalized feature without the allowDowngrade flag set.
Right(new ApiError(Errors.INVALID_REQUEST,
s"Can not downgrade finalized feature from existing" +
s" maxVersionLevel:${existing.max} to provided" +
s" maxVersionLevel:${update.maxVersionLevel} without setting the" +
" allowDowngrade flag in the request."))
} else if (update.allowDowngrade && update.maxVersionLevel > existing.max) {
// Disallow a request that sets allowDowngrade flag without specifying a
// maxVersionLevel that's lower than the existing maxVersionLevel.
Right(new ApiError(Errors.INVALID_REQUEST,
s"When the allowDowngrade flag set in the request, the provided" +
s" maxVersionLevel:${update.maxVersionLevel} can not be greater than" +
s" existing maxVersionLevel:${existing.max}."))
} else if (update.maxVersionLevel < existing.min) {
// Disallow downgrade of a finalized feature below the existing finalized
// minVersionLevel.
Right(new ApiError(Errors.INVALID_REQUEST,
s"Can not downgrade finalized feature to maxVersionLevel:${update.maxVersionLevel}" +
s" because it's lower than the existing minVersionLevel:${existing.min}."))
} else {
newVersionRangeOrError(update)
}
).getOrElse(newVersionRangeOrError(update))
}
}
}
private def processFeatureUpdates(request: UpdateFeaturesRequest,
callback: UpdateFeaturesCallback): Unit = {
if (isActive) {
processFeatureUpdatesWithActiveController(request, callback)
} else {
callback(Left(new ApiError(Errors.NOT_CONTROLLER)))
}
}
private def processFeatureUpdatesWithActiveController(request: UpdateFeaturesRequest,
callback: UpdateFeaturesCallback): Unit = {
val updates = request.data.featureUpdates
val existingFeatures = featureCache.get
.map(featuresAndEpoch => featuresAndEpoch.features.features().asScala)
.getOrElse(Map[String, FinalizedVersionRange]())
// A map with key being feature name and value being FinalizedVersionRange.
// This contains the target features to be eventually written to FeatureZNode.
val targetFeatures = scala.collection.mutable.Map[String, FinalizedVersionRange]() ++ existingFeatures
// A map with key being feature name and value being error encountered when the FeatureUpdate
// was applied.
val errors = scala.collection.mutable.Map[String, ApiError]()
// Below we process each FeatureUpdate using the following logic:
// - If a FeatureUpdate is found to be valid, then:
// - The corresponding entry in errors map would be updated to contain Errors.NONE.
// - If the FeatureUpdate is an add or update request, then the targetFeatures map is updated
// to contain the new FinalizedVersionRange for the feature.
// - Otherwise if the FeatureUpdate is a delete request, then the feature is removed from the
// targetFeatures map.
// - Otherwise if a FeatureUpdate is found to be invalid, then:
// - The corresponding entry in errors map would be updated with the appropriate ApiError.
// - The entry in targetFeatures map is left untouched.
updates.asScala.iterator.foreach { update =>
validateFeatureUpdate(update, existingFeatures.get(update.feature())) match {
case Left(newVersionRangeOrNone) =>
newVersionRangeOrNone match {
case Some(newVersionRange) => targetFeatures += (update.feature() -> newVersionRange)
case None => targetFeatures -= update.feature()
}
errors += (update.feature() -> new ApiError(Errors.NONE))
case Right(featureUpdateFailureReason) =>
errors += (update.feature() -> featureUpdateFailureReason)
}
}
// If the existing and target features are the same, then, we skip the update to the
// FeatureZNode as no changes to the node are required. Otherwise, we replace the contents
// of the FeatureZNode with the new features. This may result in partial or full modification
// of the existing finalized features in ZK.
try {
if (!existingFeatures.equals(targetFeatures)) {
val newNode = new FeatureZNode(FeatureZNodeStatus.Enabled, Features.finalizedFeatures(targetFeatures.asJava))
val newVersion = updateFeatureZNode(newNode)
featureCache.waitUntilEpochOrThrow(newVersion, request.data().timeoutMs())
}
} catch {
// For all features that correspond to valid FeatureUpdate (i.e. error is Errors.NONE),
// we set the error as Errors.FEATURE_UPDATE_FAILED since the FeatureZNode update has failed
// for these. For the rest, the existing error is left untouched.
case e: Exception =>
warn(s"Processing of feature updates: $request failed due to error: $e")
errors.foreach { case (feature, apiError) =>
if (apiError.error() == Errors.NONE) {
errors(feature) = new ApiError(Errors.FEATURE_UPDATE_FAILED)
}
}
} finally {
callback(Right(errors))
}
}
private def processIsrChangeNotification(): Unit = {
def processUpdateNotifications(partitions: Seq[TopicPartition]): Unit = {
val liveBrokers: Seq[Int] = controllerContext.liveOrShuttingDownBrokerIds.toSeq
debug(s"Sending MetadataRequest to Brokers: $liveBrokers for TopicPartitions: $partitions")
sendUpdateMetadataRequest(liveBrokers, partitions.toSet)
}
if (!isActive) return
val sequenceNumbers = zkClient.getAllIsrChangeNotifications
try {
val partitions = zkClient.getPartitionsFromIsrChangeNotifications(sequenceNumbers)
if (partitions.nonEmpty) {
updateLeaderAndIsrCache(partitions)
processUpdateNotifications(partitions)
// During a partial upgrade, the controller may be on an IBP which assumes
// ISR changes through the `AlterIsr` API while some brokers are on an older
// IBP which assumes notification through Zookeeper. In this case, since the
// controller will not have registered watches for reassigning partitions, we
// can still rely on the batch ISR change notification path in order to
// complete the reassignment.
partitions.filter(controllerContext.partitionsBeingReassigned.contains).foreach { topicPartition =>
maybeCompleteReassignment(topicPartition)
}
}
} finally {
// delete the notifications
zkClient.deleteIsrChangeNotifications(sequenceNumbers, controllerContext.epochZkVersion)
}
}
def electLeaders(
partitions: Set[TopicPartition],
electionType: ElectionType,
callback: ElectLeadersCallback
): Unit = {
eventManager.put(ReplicaLeaderElection(Some(partitions), electionType, AdminClientTriggered, callback))
}
def listPartitionReassignments(partitions: Option[Set[TopicPartition]],
callback: ListReassignmentsCallback): Unit = {
eventManager.put(ListPartitionReassignments(partitions, callback))
}
def updateFeatures(request: UpdateFeaturesRequest,
callback: UpdateFeaturesCallback): Unit = {
eventManager.put(UpdateFeatures(request, callback))
}
def alterPartitionReassignments(partitions: Map[TopicPartition, Option[Seq[Int]]],
callback: AlterReassignmentsCallback): Unit = {
eventManager.put(ApiPartitionReassignment(partitions, callback))
}
private def processReplicaLeaderElection(
partitionsFromAdminClientOpt: Option[Set[TopicPartition]],
electionType: ElectionType,
electionTrigger: ElectionTrigger,
callback: ElectLeadersCallback
): Unit = {
if (!isActive) {
callback(partitionsFromAdminClientOpt.fold(Map.empty[TopicPartition, Either[ApiError, Int]]) { partitions =>
partitions.iterator.map(partition => partition -> Left(new ApiError(Errors.NOT_CONTROLLER, null))).toMap
})
} else {
// We need to register the watcher if the path doesn't exist in order to detect future preferred replica
// leader elections and we get the `path exists` check for free
if (electionTrigger == AdminClientTriggered || zkClient.registerZNodeChangeHandlerAndCheckExistence(preferredReplicaElectionHandler)) {
val partitions = partitionsFromAdminClientOpt match {
case Some(partitions) => partitions
case None => zkClient.getPreferredReplicaElection
}
val allPartitions = controllerContext.allPartitions
val (knownPartitions, unknownPartitions) = partitions.partition(tp => allPartitions.contains(tp))
unknownPartitions.foreach { p =>
info(s"Skipping replica leader election ($electionType) for partition $p by $electionTrigger since it doesn't exist.")
}
val (partitionsBeingDeleted, livePartitions) = knownPartitions.partition(partition =>
topicDeletionManager.isTopicQueuedUpForDeletion(partition.topic))
if (partitionsBeingDeleted.nonEmpty) {
warn(s"Skipping replica leader election ($electionType) for partitions $partitionsBeingDeleted " +
s"by $electionTrigger since the respective topics are being deleted")
}
// partition those that have a valid leader
val (electablePartitions, alreadyValidLeader) = livePartitions.partition { partition =>
electionType match {
case ElectionType.PREFERRED =>
val assignedReplicas = controllerContext.partitionReplicaAssignment(partition)
val preferredReplica = assignedReplicas.head
val currentLeader = controllerContext.partitionLeadershipInfo(partition).get.leaderAndIsr.leader
currentLeader != preferredReplica
case ElectionType.UNCLEAN =>
val currentLeader = controllerContext.partitionLeadershipInfo(partition).get.leaderAndIsr.leader
currentLeader == LeaderAndIsr.NoLeader || !controllerContext.liveBrokerIds.contains(currentLeader)
}
}
val results = onReplicaElection(electablePartitions, electionType, electionTrigger).map {
case (k, Left(ex)) =>
if (ex.isInstanceOf[StateChangeFailedException]) {
val error = if (electionType == ElectionType.PREFERRED) {
Errors.PREFERRED_LEADER_NOT_AVAILABLE
} else {
Errors.ELIGIBLE_LEADERS_NOT_AVAILABLE
}
k -> Left(new ApiError(error, ex.getMessage))
} else {
k -> Left(ApiError.fromThrowable(ex))
}
case (k, Right(leaderAndIsr)) => k -> Right(leaderAndIsr.leader)
} ++
alreadyValidLeader.map(_ -> Left(new ApiError(Errors.ELECTION_NOT_NEEDED))) ++
partitionsBeingDeleted.map(
_ -> Left(new ApiError(Errors.INVALID_TOPIC_EXCEPTION, "The topic is being deleted"))
) ++
unknownPartitions.map(
_ -> Left(new ApiError(Errors.UNKNOWN_TOPIC_OR_PARTITION, "The partition does not exist."))
)
debug(s"Waiting for any successful result for election type ($electionType) by $electionTrigger for partitions: $results")
callback(results)
}
}
}
def alterIsrs(alterIsrRequest: AlterIsrRequestData, callback: AlterIsrResponseData => Unit): Unit = {
val isrsToAlter = mutable.Map[TopicPartition, LeaderAndIsr]()
alterIsrRequest.topics.forEach { topicReq =>
topicReq.partitions.forEach { partitionReq =>
val tp = new TopicPartition(topicReq.name, partitionReq.partitionIndex)
val newIsr = partitionReq.newIsr().asScala.toList.map(_.toInt)
isrsToAlter.put(tp, new LeaderAndIsr(alterIsrRequest.brokerId, partitionReq.leaderEpoch, newIsr, partitionReq.currentIsrVersion))
}
}
def responseCallback(results: Either[Map[TopicPartition, Either[Errors, LeaderAndIsr]], Errors]): Unit = {
val resp = new AlterIsrResponseData()
results match {
case Right(error) =>
resp.setErrorCode(error.code)
case Left(partitionResults) =>
resp.setTopics(new util.ArrayList())
partitionResults
.groupBy { case (tp, _) => tp.topic } // Group by topic
.foreach { case (topic, partitions) =>
// Add each topic part to the response
val topicResp = new AlterIsrResponseData.TopicData()
.setName(topic)
.setPartitions(new util.ArrayList())
resp.topics.add(topicResp)
partitions.foreach { case (tp, errorOrIsr) =>
// Add each partition part to the response (new ISR or error)
errorOrIsr match {
case Left(error) => topicResp.partitions.add(
new AlterIsrResponseData.PartitionData()
.setPartitionIndex(tp.partition)
.setErrorCode(error.code))
case Right(leaderAndIsr) => topicResp.partitions.add(
new AlterIsrResponseData.PartitionData()
.setPartitionIndex(tp.partition)
.setLeaderId(leaderAndIsr.leader)
.setLeaderEpoch(leaderAndIsr.leaderEpoch)
.setIsr(leaderAndIsr.isr.map(Integer.valueOf).asJava)
.setCurrentIsrVersion(leaderAndIsr.zkVersion))
}
}
}
}
callback.apply(resp)
}
eventManager.put(AlterIsrReceived(alterIsrRequest.brokerId, alterIsrRequest.brokerEpoch, isrsToAlter, responseCallback))
}
private def processAlterIsr(brokerId: Int, brokerEpoch: Long,
isrsToAlter: Map[TopicPartition, LeaderAndIsr],
callback: AlterIsrCallback): Unit = {
// Handle a few short-circuits
if (!isActive) {
callback.apply(Right(Errors.NOT_CONTROLLER))
return
}
val brokerEpochOpt = controllerContext.liveBrokerIdAndEpochs.get(brokerId)
if (brokerEpochOpt.isEmpty) {
info(s"Ignoring AlterIsr due to unknown broker $brokerId")
callback.apply(Right(Errors.STALE_BROKER_EPOCH))
return
}
if (!brokerEpochOpt.contains(brokerEpoch)) {
info(s"Ignoring AlterIsr due to stale broker epoch $brokerEpoch and local broker epoch $brokerEpochOpt for broker $brokerId")
callback.apply(Right(Errors.STALE_BROKER_EPOCH))
return
}
val response = try {
val partitionResponses = mutable.HashMap[TopicPartition, Either[Errors, LeaderAndIsr]]()
// Determine which partitions we will accept the new ISR for
val adjustedIsrs: Map[TopicPartition, LeaderAndIsr] = isrsToAlter.flatMap {
case (tp: TopicPartition, newLeaderAndIsr: LeaderAndIsr) =>
controllerContext.partitionLeadershipInfo(tp) match {
case Some(leaderIsrAndControllerEpoch) =>
val currentLeaderAndIsr = leaderIsrAndControllerEpoch.leaderAndIsr
if (newLeaderAndIsr.leaderEpoch < currentLeaderAndIsr.leaderEpoch) {
partitionResponses(tp) = Left(Errors.FENCED_LEADER_EPOCH)
None
} else if (newLeaderAndIsr.equalsIgnoreZk(currentLeaderAndIsr)) {
// If a partition is already in the desired state, just return it
partitionResponses(tp) = Right(currentLeaderAndIsr)
None
} else {
Some(tp -> newLeaderAndIsr)
}
case None =>
partitionResponses(tp) = Left(Errors.UNKNOWN_TOPIC_OR_PARTITION)
None
}
}
// Do the updates in ZK
debug(s"Updating ISRs for partitions: ${adjustedIsrs.keySet}.")
val UpdateLeaderAndIsrResult(finishedUpdates, badVersionUpdates) = zkClient.updateLeaderAndIsr(
adjustedIsrs, controllerContext.epoch, controllerContext.epochZkVersion)
val successfulUpdates: Map[TopicPartition, LeaderAndIsr] = finishedUpdates.flatMap {
case (partition: TopicPartition, isrOrError: Either[Throwable, LeaderAndIsr]) =>
isrOrError match {
case Right(updatedIsr) =>
debug(s"ISR for partition $partition updated to [${updatedIsr.isr.mkString(",")}] and zkVersion updated to [${updatedIsr.zkVersion}]")
partitionResponses(partition) = Right(updatedIsr)
Some(partition -> updatedIsr)
case Left(error) =>
warn(s"Failed to update ISR for partition $partition", error)
partitionResponses(partition) = Left(Errors.forException(error))
None
}
}
badVersionUpdates.foreach(partition => {
debug(s"Failed to update ISR for partition $partition, bad ZK version")
partitionResponses(partition) = Left(Errors.INVALID_UPDATE_VERSION)
})
def processUpdateNotifications(partitions: Seq[TopicPartition]): Unit = {
val liveBrokers: Seq[Int] = controllerContext.liveOrShuttingDownBrokerIds.toSeq
sendUpdateMetadataRequest(liveBrokers, partitions.toSet)
}
// Update our cache and send out metadata updates
updateLeaderAndIsrCache(successfulUpdates.keys.toSeq)
processUpdateNotifications(isrsToAlter.keys.toSeq)
Left(partitionResponses)
} catch {
case e: Throwable =>
error(s"Error when processing AlterIsr for partitions: ${isrsToAlter.keys.toSeq}", e)
Right(Errors.UNKNOWN_SERVER_ERROR)
}
callback.apply(response)
// After we have returned the result of the `AlterIsr` request, we should check whether
// there are any reassignments which can be completed by a successful ISR expansion.
response.left.foreach { alterIsrResponses =>
alterIsrResponses.forKeyValue { (topicPartition, partitionResponse) =>
if (controllerContext.partitionsBeingReassigned.contains(topicPartition)) {
val isSuccessfulUpdate = partitionResponse.isRight
if (isSuccessfulUpdate) {
maybeCompleteReassignment(topicPartition)
}
}
}
}
}
def allocateProducerIds(allocateProducerIdsRequest: AllocateProducerIdsRequestData,
callback: AllocateProducerIdsResponseData => Unit): Unit = {
def eventManagerCallback(results: Either[Errors, ProducerIdsBlock]): Unit = {
results match {
case Left(error) => callback.apply(new AllocateProducerIdsResponseData().setErrorCode(error.code))
case Right(pidBlock) => callback.apply(
new AllocateProducerIdsResponseData()
.setProducerIdStart(pidBlock.producerIdStart())
.setProducerIdLen(pidBlock.producerIdLen()))
}
}
eventManager.put(AllocateProducerIds(allocateProducerIdsRequest.brokerId,
allocateProducerIdsRequest.brokerEpoch, eventManagerCallback))
}
def processAllocateProducerIds(brokerId: Int, brokerEpoch: Long, callback: Either[Errors, ProducerIdsBlock] => Unit): Unit = {
// Handle a few short-circuits
if (!isActive) {
callback.apply(Left(Errors.NOT_CONTROLLER))
return
}
val brokerEpochOpt = controllerContext.liveBrokerIdAndEpochs.get(brokerId)
if (brokerEpochOpt.isEmpty) {
warn(s"Ignoring AllocateProducerIds due to unknown broker $brokerId")
callback.apply(Left(Errors.BROKER_ID_NOT_REGISTERED))
return
}
if (!brokerEpochOpt.contains(brokerEpoch)) {
warn(s"Ignoring AllocateProducerIds due to stale broker epoch $brokerEpoch for broker $brokerId")
callback.apply(Left(Errors.STALE_BROKER_EPOCH))
return
}
val maybeNewProducerIdsBlock = try {
Try(ZkProducerIdManager.getNewProducerIdBlock(brokerId, zkClient, this))
} catch {
case ke: KafkaException => Failure(ke)
}
maybeNewProducerIdsBlock match {
case Failure(exception) => callback.apply(Left(Errors.forException(exception)))
case Success(newProducerIdBlock) => callback.apply(Right(newProducerIdBlock))
}
}
private def processControllerChange(): Unit = {
maybeResign()
}
private def processReelect(): Unit = {
maybeResign()
elect()
}
private def processRegisterBrokerAndReelect(): Unit = {
_brokerEpoch = zkClient.registerBroker(brokerInfo)
processReelect()
}
private def processExpire(): Unit = {
activeControllerId = -1
onControllerResignation()
}
override def process(event: ControllerEvent): Unit = {
try {
event match {
case event: MockEvent =>
// Used only in test cases
event.process()
case ShutdownEventThread =>
error("Received a ShutdownEventThread event. This type of event is supposed to be handle by ControllerEventThread")
case AutoPreferredReplicaLeaderElection =>
processAutoPreferredReplicaLeaderElection()
case ReplicaLeaderElection(partitions, electionType, electionTrigger, callback) =>
processReplicaLeaderElection(partitions, electionType, electionTrigger, callback)
case UncleanLeaderElectionEnable =>
processUncleanLeaderElectionEnable()
case TopicUncleanLeaderElectionEnable(topic) =>
processTopicUncleanLeaderElectionEnable(topic)
case ControlledShutdown(id, brokerEpoch, callback) =>
processControlledShutdown(id, brokerEpoch, callback)
case LeaderAndIsrResponseReceived(response, brokerId) =>
processLeaderAndIsrResponseReceived(response, brokerId)
case UpdateMetadataResponseReceived(response, brokerId) =>
processUpdateMetadataResponseReceived(response, brokerId)
case TopicDeletionStopReplicaResponseReceived(replicaId, requestError, partitionErrors) =>
processTopicDeletionStopReplicaResponseReceived(replicaId, requestError, partitionErrors)
case BrokerChange =>
processBrokerChange()
case BrokerModifications(brokerId) =>
processBrokerModification(brokerId)
case ControllerChange =>
processControllerChange()
case Reelect =>
processReelect()
case RegisterBrokerAndReelect =>
processRegisterBrokerAndReelect()
case Expire =>
processExpire()
case TopicChange =>
processTopicChange()
case LogDirEventNotification =>
processLogDirEventNotification()
case PartitionModifications(topic) =>
processPartitionModifications(topic)
case TopicDeletion =>
processTopicDeletion()
case ApiPartitionReassignment(reassignments, callback) =>
processApiPartitionReassignment(reassignments, callback)
case ZkPartitionReassignment =>
processZkPartitionReassignment()
case ListPartitionReassignments(partitions, callback) =>
processListPartitionReassignments(partitions, callback)
case UpdateFeatures(request, callback) =>
processFeatureUpdates(request, callback)
case PartitionReassignmentIsrChange(partition) =>
processPartitionReassignmentIsrChange(partition)
case IsrChangeNotification =>
processIsrChangeNotification()
case AlterIsrReceived(brokerId, brokerEpoch, isrsToAlter, callback) =>
processAlterIsr(brokerId, brokerEpoch, isrsToAlter, callback)
case AllocateProducerIds(brokerId, brokerEpoch, callback) =>
processAllocateProducerIds(brokerId, brokerEpoch, callback)
case Startup =>
processStartup()
}
} catch {
case e: ControllerMovedException =>
info(s"Controller moved to another broker when processing $event.", e)
maybeResign()
case e: Throwable =>
error(s"Error processing event $event", e)
} finally {
updateMetrics()
}
}
override def preempt(event: ControllerEvent): Unit = {
event.preempt()
}
}
class BrokerChangeHandler(eventManager: ControllerEventManager) extends ZNodeChildChangeHandler {
override val path: String = BrokerIdsZNode.path
override def handleChildChange(): Unit = {
eventManager.put(BrokerChange)
}
}
class BrokerModificationsHandler(eventManager: ControllerEventManager, brokerId: Int) extends ZNodeChangeHandler {
override val path: String = BrokerIdZNode.path(brokerId)
override def handleDataChange(): Unit = {
eventManager.put(BrokerModifications(brokerId))
}
}
class TopicChangeHandler(eventManager: ControllerEventManager) extends ZNodeChildChangeHandler {
override val path: String = TopicsZNode.path
override def handleChildChange(): Unit = eventManager.put(TopicChange)
}
class LogDirEventNotificationHandler(eventManager: ControllerEventManager) extends ZNodeChildChangeHandler {
override val path: String = LogDirEventNotificationZNode.path
override def handleChildChange(): Unit = eventManager.put(LogDirEventNotification)
}
object LogDirEventNotificationHandler {
val Version: Long = 1L
}
class PartitionModificationsHandler(eventManager: ControllerEventManager, topic: String) extends ZNodeChangeHandler {
override val path: String = TopicZNode.path(topic)
override def handleDataChange(): Unit = eventManager.put(PartitionModifications(topic))
}
class TopicDeletionHandler(eventManager: ControllerEventManager) extends ZNodeChildChangeHandler {
override val path: String = DeleteTopicsZNode.path
override def handleChildChange(): Unit = eventManager.put(TopicDeletion)
}
class PartitionReassignmentHandler(eventManager: ControllerEventManager) extends ZNodeChangeHandler {
override val path: String = ReassignPartitionsZNode.path
// Note that the event is also enqueued when the znode is deleted, but we do it explicitly instead of relying on
// handleDeletion(). This approach is more robust as it doesn't depend on the watcher being re-registered after
// it's consumed during data changes (we ensure re-registration when the znode is deleted).
override def handleCreation(): Unit = eventManager.put(ZkPartitionReassignment)
}
class PartitionReassignmentIsrChangeHandler(eventManager: ControllerEventManager, partition: TopicPartition) extends ZNodeChangeHandler {
override val path: String = TopicPartitionStateZNode.path(partition)
override def handleDataChange(): Unit = eventManager.put(PartitionReassignmentIsrChange(partition))
}
class IsrChangeNotificationHandler(eventManager: ControllerEventManager) extends ZNodeChildChangeHandler {
override val path: String = IsrChangeNotificationZNode.path
override def handleChildChange(): Unit = eventManager.put(IsrChangeNotification)
}
object IsrChangeNotificationHandler {
val Version: Long = 1L
}
class PreferredReplicaElectionHandler(eventManager: ControllerEventManager) extends ZNodeChangeHandler {
override val path: String = PreferredReplicaElectionZNode.path
override def handleCreation(): Unit = eventManager.put(ReplicaLeaderElection(None, ElectionType.PREFERRED, ZkTriggered))
}
class ControllerChangeHandler(eventManager: ControllerEventManager) extends ZNodeChangeHandler {
override val path: String = ControllerZNode.path
override def handleCreation(): Unit = eventManager.put(ControllerChange)
override def handleDeletion(): Unit = eventManager.put(Reelect)
override def handleDataChange(): Unit = eventManager.put(ControllerChange)
}
case class PartitionAndReplica(topicPartition: TopicPartition, replica: Int) {
def topic: String = topicPartition.topic
def partition: Int = topicPartition.partition
override def toString: String = {
s"[Topic=$topic,Partition=$partition,Replica=$replica]"
}
}
case class LeaderIsrAndControllerEpoch(leaderAndIsr: LeaderAndIsr, controllerEpoch: Int) {
override def toString: String = {
val leaderAndIsrInfo = new StringBuilder
leaderAndIsrInfo.append("(Leader:" + leaderAndIsr.leader)
leaderAndIsrInfo.append(",ISR:" + leaderAndIsr.isr.mkString(","))
leaderAndIsrInfo.append(",LeaderEpoch:" + leaderAndIsr.leaderEpoch)
leaderAndIsrInfo.append(",ControllerEpoch:" + controllerEpoch + ")")
leaderAndIsrInfo.toString()
}
}
private[controller] class ControllerStats extends KafkaMetricsGroup {
val uncleanLeaderElectionRate = newMeter("UncleanLeaderElectionsPerSec", "elections", TimeUnit.SECONDS)
val rateAndTimeMetrics: Map[ControllerState, KafkaTimer] = ControllerState.values.flatMap { state =>
state.rateAndTimeMetricName.map { metricName =>
state -> new KafkaTimer(newTimer(metricName, TimeUnit.MILLISECONDS, TimeUnit.SECONDS))
}
}.toMap
}
sealed trait ControllerEvent {
def state: ControllerState
// preempt() is not executed by `ControllerEventThread` but by the main thread.
def preempt(): Unit
}
case object ControllerChange extends ControllerEvent {
override def state: ControllerState = ControllerState.ControllerChange
override def preempt(): Unit = {}
}
case object Reelect extends ControllerEvent {
override def state: ControllerState = ControllerState.ControllerChange
override def preempt(): Unit = {}
}
case object RegisterBrokerAndReelect extends ControllerEvent {
override def state: ControllerState = ControllerState.ControllerChange
override def preempt(): Unit = {}
}
case object Expire extends ControllerEvent {
override def state: ControllerState = ControllerState.ControllerChange
override def preempt(): Unit = {}
}
case object ShutdownEventThread extends ControllerEvent {
override def state: ControllerState = ControllerState.ControllerShutdown
override def preempt(): Unit = {}
}
case object AutoPreferredReplicaLeaderElection extends ControllerEvent {
override def state: ControllerState = ControllerState.AutoLeaderBalance
override def preempt(): Unit = {}
}
case object UncleanLeaderElectionEnable extends ControllerEvent {
override def state: ControllerState = ControllerState.UncleanLeaderElectionEnable
override def preempt(): Unit = {}
}
case class TopicUncleanLeaderElectionEnable(topic: String) extends ControllerEvent {
override def state: ControllerState = ControllerState.TopicUncleanLeaderElectionEnable
override def preempt(): Unit = {}
}
case class ControlledShutdown(id: Int, brokerEpoch: Long, controlledShutdownCallback: Try[Set[TopicPartition]] => Unit) extends ControllerEvent {
override def state: ControllerState = ControllerState.ControlledShutdown
override def preempt(): Unit = controlledShutdownCallback(Failure(new ControllerMovedException("Controller moved to another broker")))
}
case class LeaderAndIsrResponseReceived(leaderAndIsrResponse: LeaderAndIsrResponse, brokerId: Int) extends ControllerEvent {
override def state: ControllerState = ControllerState.LeaderAndIsrResponseReceived
override def preempt(): Unit = {}
}
case class UpdateMetadataResponseReceived(updateMetadataResponse: UpdateMetadataResponse, brokerId: Int) extends ControllerEvent {
override def state: ControllerState = ControllerState.UpdateMetadataResponseReceived
override def preempt(): Unit = {}
}
case class TopicDeletionStopReplicaResponseReceived(replicaId: Int,
requestError: Errors,
partitionErrors: Map[TopicPartition, Errors]) extends ControllerEvent {
override def state: ControllerState = ControllerState.TopicDeletion
override def preempt(): Unit = {}
}
case object Startup extends ControllerEvent {
override def state: ControllerState = ControllerState.ControllerChange
override def preempt(): Unit = {}
}
case object BrokerChange extends ControllerEvent {
override def state: ControllerState = ControllerState.BrokerChange
override def preempt(): Unit = {}
}
case class BrokerModifications(brokerId: Int) extends ControllerEvent {
override def state: ControllerState = ControllerState.BrokerChange
override def preempt(): Unit = {}
}
case object TopicChange extends ControllerEvent {
override def state: ControllerState = ControllerState.TopicChange
override def preempt(): Unit = {}
}
case object LogDirEventNotification extends ControllerEvent {
override def state: ControllerState = ControllerState.LogDirChange
override def preempt(): Unit = {}
}
case class PartitionModifications(topic: String) extends ControllerEvent {
override def state: ControllerState = ControllerState.TopicChange
override def preempt(): Unit = {}
}
case object TopicDeletion extends ControllerEvent {
override def state: ControllerState = ControllerState.TopicDeletion
override def preempt(): Unit = {}
}
case object ZkPartitionReassignment extends ControllerEvent {
override def state: ControllerState = ControllerState.AlterPartitionReassignment
override def preempt(): Unit = {}
}
case class ApiPartitionReassignment(reassignments: Map[TopicPartition, Option[Seq[Int]]],
callback: AlterReassignmentsCallback) extends ControllerEvent {
override def state: ControllerState = ControllerState.AlterPartitionReassignment
override def preempt(): Unit = callback(Right(new ApiError(Errors.NOT_CONTROLLER)))
}
case class PartitionReassignmentIsrChange(partition: TopicPartition) extends ControllerEvent {
override def state: ControllerState = ControllerState.AlterPartitionReassignment
override def preempt(): Unit = {}
}
case object IsrChangeNotification extends ControllerEvent {
override def state: ControllerState = ControllerState.IsrChange
override def preempt(): Unit = {}
}
case class AlterIsrReceived(brokerId: Int, brokerEpoch: Long, isrsToAlter: Map[TopicPartition, LeaderAndIsr],
callback: AlterIsrCallback) extends ControllerEvent {
override def state: ControllerState = ControllerState.IsrChange
override def preempt(): Unit = {}
}
case class ReplicaLeaderElection(
partitionsFromAdminClientOpt: Option[Set[TopicPartition]],
electionType: ElectionType,
electionTrigger: ElectionTrigger,
callback: ElectLeadersCallback = _ => {}
) extends ControllerEvent {
override def state: ControllerState = ControllerState.ManualLeaderBalance
override def preempt(): Unit = callback(
partitionsFromAdminClientOpt.fold(Map.empty[TopicPartition, Either[ApiError, Int]]) { partitions =>
partitions.iterator.map(partition => partition -> Left(new ApiError(Errors.NOT_CONTROLLER, null))).toMap
}
)
}
/**
* @param partitionsOpt - an Optional set of partitions. If not present, all reassigning partitions are to be listed
*/
case class ListPartitionReassignments(partitionsOpt: Option[Set[TopicPartition]],
callback: ListReassignmentsCallback) extends ControllerEvent {
override def state: ControllerState = ControllerState.ListPartitionReassignment
override def preempt(): Unit = callback(Right(new ApiError(Errors.NOT_CONTROLLER, null)))
}
case class UpdateFeatures(request: UpdateFeaturesRequest,
callback: UpdateFeaturesCallback) extends ControllerEvent {
override def state: ControllerState = ControllerState.UpdateFeatures
override def preempt(): Unit = {}
}
case class AllocateProducerIds(brokerId: Int, brokerEpoch: Long, callback: Either[Errors, ProducerIdsBlock] => Unit)
extends ControllerEvent {
override def state: ControllerState = ControllerState.Idle
override def preempt(): Unit = {}
}
// Used only in test cases
abstract class MockEvent(val state: ControllerState) extends ControllerEvent {
def process(): Unit
def preempt(): Unit
}
| lindong28/kafka | core/src/main/scala/kafka/controller/KafkaController.scala | Scala | apache-2.0 | 142,541 |
package dk.bayes.math.linear
import breeze.linalg.DenseMatrix
import breeze.linalg.inv
object invchol {
/**
* @param R, where R'*R= A, cholesky decomposition
*/
def apply(R:DenseMatrix[Double]):DenseMatrix[Double] = {
val Rinv = inv(R)
Rinv*Rinv.t
}
} | danielkorzekwa/bayes-scala | src/main/scala/dk/bayes/math/linear/invchol.scala | Scala | bsd-2-clause | 284 |
import sbt._
object Dependencies {
val akkaVersion = "2.3.9"
val sprayVersion = "1.3.2"
val osgiVersion = "5.0.0"
val typesafeConfig = "com.typesafe" % "config" % "1.2.1"
val osgiCore = "org.osgi" % "org.osgi.core" % osgiVersion
val osgiEnterprise = "org.osgi" % "org.osgi.enterprise" % osgiVersion
val akkaActor = "com.typesafe.akka" %% "akka-actor" % akkaVersion
val akkaOsgi = "com.typesafe.akka" %% "akka-osgi" % akkaVersion
val sprayCan = "io.spray" %% "spray-can" % sprayVersion
val sprayRouting = "io.spray" %% "spray-routing-shapeless2" % sprayVersion
val jacksonDatabind = "com.fasterxml.jackson.core" % "jackson-databind" % "2.4.2"
def compileDep (deps: ModuleID*): Seq[ModuleID] = deps map (_ % "compile")
def testDep (deps: ModuleID*): Seq[ModuleID] = deps map (_ % "test")
}
| rkrzewski/spray-osgi | project/Dependencies.scala | Scala | apache-2.0 | 1,046 |
package org.workcraft.plugins.fsm
import java.awt.geom.Point2D
import org.workcraft.dom.visual.connections.StaticVisualConnectionData
import scalaz.NonEmptyList
sealed trait Node
class State extends Node
class Arc(val from: State, val to: State) extends Node
case class FSM(states: NonEmptyList[State], arcs: List[Arc], finalStates: Set[State], initialState: State, labels: Map[State, String], arcLabels: Map[Arc, String]) {
lazy val names = labels.map(_.swap).toMap
lazy val postset: Map[State, List[(State, Arc)]] =
arcs.foldLeft((Map[State, List[(State, Arc)]]().withDefault(_ => List()))) {
case (map, arc) => (map + (arc.from -> ((arc.to, arc) :: map(arc.from))))
}
lazy val preset: Map[State, List[(State, Arc)]] =
arcs.foldLeft((Map[State, List[(State, Arc)]]().withDefault(_ => List()))) {
case (map, arc) => (map + (arc.to -> ((arc.from, arc) :: map(arc.to))))
}
}
object FSM {
def Minimal = {
val st = new State
FSM(NonEmptyList(st), List(), Set(), st, Map(st -> "s0"), Map())
}
}
case class VisualFSM(fsm: FSM, layout: Map[State, Point2D.Double], visualArcs: Map[Arc, StaticVisualConnectionData])
object VisualFSM {
def Minimal = {
val fsm = FSM.Minimal
VisualFSM(fsm, Map(fsm.states.head -> new Point2D.Double(0, 0)), Map())
}
}
| mechkg/workcraft | FSMPlugin/src/main/scala/org/workcraft/plugins/fsm/FSM.scala | Scala | gpl-3.0 | 1,348 |
package com.cloudray.scalapress.util.mvc.interceptor
import org.springframework.web.servlet.handler.HandlerInterceptorAdapter
import javax.servlet.http.{HttpServletResponse, HttpServletRequest}
import org.springframework.web.servlet.ModelAndView
import com.cloudray.scalapress.framework.{ScalapressRequest, ScalapressContext}
/** @author Stephen Samuel */
class SiteInterceptor(context: ScalapressContext) extends HandlerInterceptorAdapter {
override def postHandle(request: HttpServletRequest,
response: HttpServletResponse,
handler: Any,
modelAndView: ModelAndView) {
if (modelAndView != null) {
val installation = ScalapressRequest(request, context).installation
modelAndView.getModelMap.put("installation", installation)
modelAndView.getModelMap.put("site", installation)
}
}
}
| vidyacraghav/scalapress | src/main/scala/com/cloudray/scalapress/util/mvc/interceptor/SiteInterceptor.scala | Scala | apache-2.0 | 892 |
package pl.touk.nussknacker.engine.component
import pl.touk.nussknacker.engine.api.component.ComponentType
import pl.touk.nussknacker.engine.api.component.NodeComponentInfo
import pl.touk.nussknacker.engine.compiledgraph.node._
import pl.touk.nussknacker.engine.graph.node.{NodeData, WithComponent}
// TODO this logic should be in one place with DefaultComponentIdProvider
object NodeComponentInfoExtractor {
def fromNode(node: Node): NodeComponentInfo = {
// warning: this logic should be kept synchronized with DefaultComponentIdProvider
// TODO: Maybe compiledgraph.node should have componentId field or WihtCompoment trait? Just like NodeData
node match {
case source: Source => NodeComponentInfo(node.id, source.ref.getOrElse("source"), ComponentType.Source)
case sink: Sink => NodeComponentInfo(node.id, sink.ref, ComponentType.Sink)
case _: Filter => NodeComponentInfo.forBaseNode(node.id, ComponentType.Filter)
case _: SplitNode => NodeComponentInfo.forBaseNode(node.id, ComponentType.Split)
case _: Switch => NodeComponentInfo.forBaseNode(node.id, ComponentType.Switch)
case _: VariableBuilder => NodeComponentInfo.forBaseNode(node.id, ComponentType.Variable)
case CustomNode(_, _) | EndingCustomNode(_) => NodeComponentInfo(node.id, "customNode", ComponentType.CustomNode)
case enricher: Enricher => NodeComponentInfo(node.id, enricher.service.id, ComponentType.Enricher)
case processor: Processor => NodeComponentInfo(node.id, processor.service.id, ComponentType.Processor)
case endingProcessor: EndingProcessor => NodeComponentInfo(node.id, endingProcessor.service.id, ComponentType.Processor)
case _: SubprocessStart => NodeComponentInfo.forBaseNode(node.id, ComponentType.FragmentInput)
case _: SubprocessEnd => NodeComponentInfo.forBaseNode(node.id, ComponentType.FragmentOutput)
case _: BranchEnd => NodeComponentInfo(node.id, None)
}
}
def fromNodeData(nodeData: NodeData): NodeComponentInfo = {
val maybeComponentType = ComponentUtil.fromNodeData(nodeData)
(nodeData, maybeComponentType) match {
case (withComponent: WithComponent, Some(componentType)) => NodeComponentInfo(nodeData.id, withComponent.componentId, componentType)
case (_, Some(componentType)) if ComponentType.isBaseComponent(componentType) => NodeComponentInfo.forBaseNode(nodeData.id, componentType)
case _ => NodeComponentInfo(nodeData.id, None)
}
}
}
| TouK/nussknacker | interpreter/src/main/scala/pl/touk/nussknacker/engine/component/NodeComponentInfoExtractor.scala | Scala | apache-2.0 | 2,475 |
package net.codejitsu.tasks.dsl
import org.scalatest.{FlatSpec, Matchers}
class PipeToTest extends FlatSpec with Matchers {
import net.codejitsu.tasks._
import net.codejitsu.tasks.dsl.Tasks._
import scala.concurrent.duration._
implicit val timeout = 30 seconds
implicit val stage = new Dev
"pipeTo" should "feed the next task with output from current task" in {
val pathSh = getClass.getResource("/program-param.sh").getPath
val task = ShellScript(Localhost, pathSh, List("test")) pipeTo Echo(Localhost)
val taskResult = task.run()
taskResult.res.isSuccess should be (true)
taskResult.err should be (empty)
taskResult.out should be (List("start test program with param: test"))
}
}
| codejitsu/tasks | tasks-dsl/src/test/scala/PipeToTest.scala | Scala | apache-2.0 | 728 |
package com.zenaptix.dsl
case class Cqsf602w (cqnf602wStlnCommonDtls:Cqnf602wStlnCommonDtls, cqnf602StudentLoanDtl:Cqnf602StudentLoanDtl)
case class Cqnf602wStlnCommonDtls (cqnf602wKey:Cqnf602wKey, cqnf602wCifk:Cqnf602wCifk)
case class Cqnf602wKey (cqnf602wAcctNo:Long)
case class Cqnf602wCifk (cqnf602wCifkey:String)
case class Cqnf602StudentLoanDtl (stnof602:String, crsef602:String, normf602:Long, instf602:Long, acyrf602:Long, rpdtf602:Long, failf602:Long, compf602:String, repyf602:String, adfnf602:String, fultf602:String, pgrdf602:String, finlf602:String, spare:String)
| zenaptix-lab/copybookStreams | src/main/scala/com/zenaptix/dsl/Cqsf602w.scala | Scala | apache-2.0 | 577 |
package com.wavesplatform.utils.doc
import com.wavesplatform.lang.v1.compiler.Types._
import scala.jdk.CollectionConverters._
abstract class TypeDoc {
val name: String
val isUnion: Boolean = false
val isObj: Boolean = false
val isNative: Boolean = false
val haveParam: Boolean = false
val isComplex: Boolean = false
val needLink: Boolean = false
def noLink: Boolean = !needLink
val verticalLineSymbol = "|"
lazy val mdName: String = name.replace("|", verticalLineSymbol)
}
object TypeDoc {
def apply(t: TYPE): TypeDoc = t match {
case t: FINAL => typeRepr(t)()
case TYPEPARAM(char: Byte) => new TypeDoc { val name: String = char.toChar.toString; override val isComplex: Boolean = true }
case PARAMETERIZEDLIST(l) => ListOf(apply(l))
case PARAMETERIZEDUNION(Seq(UNIT, l)) => OptionOf(apply(l))
case PARAMETERIZEDUNION(Seq(l, UNIT)) => OptionOf(apply(l))
case PARAMETERIZEDUNION(l) => UnionDoc(t.toString, l.map(apply).asJava)
case t => new TypeDoc { val name: String = t.toString; override val isComplex: Boolean = true }
}
def typeRepr(t: FINAL)(name: String = t.name): TypeDoc = t match {
case UNION(Seq(UNIT, l), _) => OptionOf(typeRepr(l)())
case UNION(Seq(l, UNIT), _) => OptionOf(typeRepr(l)())
case UNION(l, _) => UnionDoc(name, l.map(t => typeRepr(t)()).asJava)
case CASETYPEREF(_, fields, _) =>
objDoc(name, fields.map(f => Field(f._1, typeRepr(f._2)())).asJava)
case LIST(t) => ListOf(typeRepr(t)())
case t => nativeTypeDoc(t.toString)
}
}
case class Field(name: String, `type`: TypeDoc)
case class UnionDoc(override val name: String, union: java.util.List[TypeDoc]) extends TypeDoc {
override val isUnion: Boolean = true
}
case class objDoc(override val name: String, fields: java.util.List[Field]) extends TypeDoc {
override val isObj: Boolean = true
override val needLink: Boolean = true
}
case class ListOf(param: TypeDoc) extends TypeDoc { override val name: String = s"List[${param.name}]"; val hasParam: Boolean = true }
case class OptionOf(param: TypeDoc) extends TypeDoc { override val name: String = s"${param.name}|Unit"; val hasParam: Boolean = true }
case class nativeTypeDoc(override val name: String) extends TypeDoc {
override val isNative: Boolean = true
override val needLink: Boolean = true
}
case class VarDoc(private val nameRaw: String, `type`: TypeDoc, doc: String) {
val name = nameRaw.replace("@", "")
}
case class FuncDoc(name: String, `type`: TypeDoc, doc: String, params: java.util.List[VarDoc], cost: String)
case class TransactionDoc(name: String, fields: java.util.List[TransactionField])
case class TransactionField(absend: Boolean, `type`: java.util.List[TypeDoc])
case class FieldTypes(name: String, types: java.util.List[TransactionField])
case class CaseDoc(types: java.util.List[TransactionDoc], fields: java.util.List[FieldTypes])
case class Special(`class`: String, descr: CaseDoc)
case class Doc(
types: java.util.List[TypeDoc],
vars: java.util.List[VarDoc],
funcs: java.util.List[FuncDoc],
transactionDoc: java.util.List[TransactionDoc],
transactionFields: java.util.List[FieldTypes],
commonFields: CaseDoc,
specials: java.util.List[Special]
)
object Doc {
def apply(d: (Seq[TransactionDoc], Seq[FieldTypes])) = CaseDoc(d._1.asJava, d._2.asJava)
}
case class FuncDocV3(funcDoc: FuncDoc, index: Int) {
val name = funcDoc.name
val `type` = funcDoc.`type`
val doc = funcDoc.doc
val params = funcDoc.params
val cost = funcDoc.cost
val paramTypes = params.asScala.map(p => p.`type`.mdName).mkString(", ")
val paramArgTypes = params.asScala.map(p => s"${p.name}: ${p.`type`.name}").mkString(", ")
val anchor = name + params.asScala.map(_.`type`.mdName).mkString
}
case class CategorizedFuncsDoc(funcs: java.util.List[FuncDocV3], category: String)
| wavesplatform/Waves | lang/doc/src/main/scala/com/wavesplatform/utils/doc/MustacheDoc.scala | Scala | mit | 4,025 |
package com.github.sorhus.hmmongo.scalatra
import org.eclipse.jetty.server.Server
import org.eclipse.jetty.servlet.DefaultServlet
import org.eclipse.jetty.webapp.WebAppContext
import org.scalatra.servlet.ScalatraListener
/**
* args == {pi, A, B, T}
*/
object Main extends App {
val port = 8080
val server = new Server(port)
val context = new WebAppContext()
context.setContextPath("/")
context.setResourceBase("src/main/webapp")
context.addEventListener(new ScalatraListener)
context.addServlet(classOf[DefaultServlet], "/")
context.setAttribute("pi",args(0))
context.setAttribute("A",args(1))
context.setAttribute("B",args(2))
context.setAttribute("T",args(3))
server.setHandler(context)
server.start()
server.join()
}
| sorhus/hmmongo | scalatra/src/main/scala/com/github/sorhus/hmmongo/scalatra/Main.scala | Scala | gpl-2.0 | 752 |
val grid = List(
List(8, 2, 22, 97, 38, 15, 0, 40, 0, 75, 4, 5, 7, 78, 52, 12, 50, 77, 91, 8),
List(49, 49, 99, 40, 17, 81, 18, 57, 60, 87, 17, 40, 98, 43, 69, 48, 4, 56, 62, 0),
List(81, 49, 31, 73, 55, 79, 14, 29, 93, 71, 40, 67, 53, 88, 30, 3, 49, 13, 36, 65),
List(52, 70, 95, 23, 4, 60, 11, 42, 69, 24, 68, 56, 1, 32, 56, 71, 37, 2, 36, 91),
List(22, 31, 16, 71, 51, 67, 63, 89, 41, 92, 36, 54, 22, 40, 40, 28, 66, 33, 13, 80),
List(24, 47, 32, 60, 99, 3, 45, 2, 44, 75, 33, 53, 78, 36, 84, 20, 35, 17, 12, 50),
List(32, 98, 81, 28, 64, 23, 67, 10, 26, 38, 40, 67, 59, 54, 70, 66, 18, 38, 64, 70),
List(67, 26, 20, 68, 2, 62, 12, 20, 95, 63, 94, 39, 63, 8, 40, 91, 66, 49, 94, 21),
List(24, 55, 58, 5, 66, 73, 99, 26, 97, 17, 78, 78, 96, 83, 14, 88, 34, 89, 63, 72),
List(21, 36, 23, 9, 75, 0, 76, 44, 20, 45, 35, 14, 0, 61, 33, 97, 34, 31, 33, 95),
List(78, 17, 53, 28, 22, 75, 31, 67, 15, 94, 3, 80, 4, 62, 16, 14, 9, 53, 56, 92),
List(16, 39, 5, 42, 96, 35, 31, 47, 55, 58, 88, 24, 0, 17, 54, 24, 36, 29, 85, 57),
List(86, 56, 0, 48, 35, 71, 89, 7, 5, 44, 44, 37, 44, 60, 21, 58, 51, 54, 17, 58),
List(19, 80, 81, 68, 5, 94, 47, 69, 28, 73, 92, 13, 86, 52, 17, 77, 4, 89, 55, 40),
List(4, 52, 8, 83, 97, 35, 99, 16, 7, 97, 57, 32, 16, 26, 26, 79, 33, 27, 98, 66),
List(88, 36, 68, 87, 57, 62, 20, 72, 3, 46, 33, 67, 46, 55, 12, 32, 63, 93, 53, 69),
List(4, 42, 16, 73, 38, 25, 39, 11, 24, 94, 72, 18, 8, 46, 29, 32, 40, 62, 76, 36),
List(20, 69, 36, 41, 72, 30, 23, 88, 34, 62, 99, 69, 82, 67, 59, 85, 74, 4, 36, 16),
List(20, 73, 35, 29, 78, 31, 90, 1, 74, 31, 49, 71, 48, 86, 81, 16, 23, 57, 5, 54),
List(1, 70, 54, 71, 83, 51, 54, 69, 16, 92, 33, 48, 61, 43, 52, 1, 89, 19, 67, 48)
)
//This does some duplicate work, and accesses list entries by index, is ugly...
// but it gets the job done.
val indexes = (3 to 16).flatMap(i => (3 to 16).flatMap(j => {
List(List((i, j), (i+1, j), (i+2, j), (i+3, j)),
List((i, j), (i-1, j), (i-2, j), (i-3, j)),
List((i, j), (i, j+1), (i, j+2), (i, j+3)),
List((i, j), (i, j-1), (i, j-2), (i, j-3)),
List((i, j), (i+1, j+1), (i+2, j+2), (i+3, j+3)),
List((i, j), (i-1, j+1), (i-2, j+2), (i-3, j+3)),
List((i, j), (i+1, j-1), (i+2, j-2), (i+3, j-3)),
List((i, j), (i-1, j-1), (i-2, j-2), (i-3, j-3)))
}))
def gridPoint(x: Int, y: Int) = grid(x)(y)
val results = indexes.map(_.map(Function.tupled(gridPoint)).product).max
println(results)
| brandonhorst/project-euler-scala | 011.scala | Scala | mit | 2,452 |
package com.socrata.datacoordinator.truth.metadata
import com.socrata.datacoordinator.id._
import com.socrata.soql.environment.{ColumnName, ResourceName}
import scala.concurrent.duration.Duration
trait DatasetMapBase[CT] extends `-impl`.BaseDatasetMapReader[CT] {
}
trait DatasetMapReader[CT] extends DatasetMapBase[CT] {
/** Looks up a dataset record by its system ID. */
def datasetInfo(datasetId: DatasetId, repeatableRead: Boolean = false): Option[DatasetInfo]
/** Looks up a dataset record by its resource name. */
def datasetInfoByResourceName(resourceName: ResourceName, repeatableRead: Boolean = false): Option[DatasetInfo]
/** Find all datasets with snapshots */
def snapshottedDatasets(): Seq[DatasetInfo]
}
class CopyInWrongStateForDropException(val copyInfo: CopyInfo, val acceptableStates: Set[LifecycleStage]) extends Exception
class CannotDropInitialWorkingCopyException(val copyInfo: CopyInfo) extends Exception
trait DatasetMapWriter[CT] extends DatasetMapBase[CT] with `-impl`.BaseDatasetMapWriter[CT] {
/** Looks up a dataset record by its system ID.
* @param timeout Amount of time to block before throwing.
* @param semiExclusive A hint that this will not actually be doing writes to this row.
* @note An implementation should make a "best effort" to honor the timeout, but
* is permitted to wait less or more. In particular, the postgresql implementation
* will only wait up to `Int.MaxValue` milliseconds unless the timeout is
* actually non-finite.
* @throws DatasetIdInUseByWriterException if some other writer has been used to look up this dataset. */
def datasetInfo(datasetId: DatasetId, timeout: Duration, semiExclusive: Boolean = false): Option[DatasetInfo]
/** Creates a new dataset in the truthstore.
* @note Does not actually create any tables; this just updates the bookkeeping.
* @return A `CopyInfo` that refers to an unpublished copy. */
def create(localeName: String, resourceName: Option[String]): CopyInfo
/** Ensures that an "unpublished" table exists, creating it if necessary.
* @note Does not copy the actual tables; this just updates the bookkeeping.
* @note This also updates the bookkeeping for columns.
* @note None of the new columns will be marked as being a primary key.
* @return Either the `CopyInfo` of an existing copy, or a pair of CopyInfos
* for the copy that was duplicated and the new copy it was copied to. */
def ensureUnpublishedCopy(datasetInfo: DatasetInfo): Either[CopyInfo, CopyPair[CopyInfo]]
/** Promotes the current "published" table record (if it exists) to a "snapshot" one, and promotes the
* current "unpublished" table record to "published".
* @throws IllegalArgumentException if `copyInfo` does not name an unpublished copy.
* @return The copy info for the newly-published dataset, and the copy info for the new snapshot if
* there was one. */
def publish(copyInfo: CopyInfo): (CopyInfo, Option[CopyInfo])
/** Adds a column to this table-copy.
* @note Does not change the actual table; this just updates the bookkeeping.
* @return The new column
* @throws ColumnAlreadyExistsException if the column already exists */
def addColumn(copyInfo: CopyInfo, userColumnId: UserColumnId, fieldName: Option[ColumnName], typ: CT, physicalColumnBaseBase: String, computationStrategyInfo: Option[ComputationStrategyInfo]): ColumnInfo[CT]
}
trait BackupDatasetMap[CT] extends DatasetMapWriter[CT] with `-impl`.BaseDatasetMapWriter[CT] {
/** Creates a new dataset in the truthstore.
* @note Does not actually create any tables; this just updates the bookkeeping.
* @throws DatasetSystemIdAlreadyInUse if `systemId` is already in use.
* @return A `CopyInfo` that refers to an unpublished copy with system id `systemId`. */
def createWithId(systemId: DatasetId, initialCopySystemId: CopyId, localeName: String, obfuscationKey: Array[Byte], resourceName: Option[String]): CopyInfo
/** Ensures that an "unpublished" table exists, creating it if necessary.
* @note Does not copy the actual tables; this just updates the bookkeeping.
* @note This does NOT copy the schema, because those updates are sent separately.
* @throws CopySystemIdAlreadyInUse if `systemId` is already in use.
* @return A pair of copy infos for the copy that was copied and the copy it was copied to. */
def createUnpublishedCopyWithId(datasetInfo: DatasetInfo, systemId: CopyId): CopyPair[CopyInfo]
/** Promotes the current "published" table record (if it exists) to a "snapshot" one, and promotes the
* current "unpublished" table record to "published".
* @throws IllegalArgumentException if `copyInfo` does not name an unpublished copy.
* @return The copy info for the newly-published dataset. */
def publish(copyInfo: CopyInfo): (CopyInfo, Option[CopyInfo])
/** Adds a column to this table-copy.
* @note Does not change the actual table; this just updates the bookkeeping.
* @return The new column
* @throws ColumnAlreadyExistsException if the column already exists
* @throws ColumnSystemIdAlreadyInUse if `systemId` already names a column on this copy of the table. */
def addColumnWithId(systemId: ColumnId, copyInfo: CopyInfo, userColumnId: UserColumnId, fieldName: Option[ColumnName], typ: CT, physicalColumnBaseBase: String, computationStrategyInfo: Option[ComputationStrategyInfo]): ColumnInfo[CT]
/** Creates a dataset with the specified attributes
* @note Using this carelessly can get you into trouble. In particular, this
* newly created dataset will have NO copies attached. */
def unsafeCreateDataset(systemId: DatasetId,
nextCounterValue: Long,
latestDataVersion: Long,
localeName: String,
obfuscationKey: Array[Byte],
resourceName: Option[String]): DatasetInfo
/** Creates a dataset with the specified attributes
* @note Using this carelessly can get you into trouble. In particular, this
* newly created dataset will have NO copies attached. */
def unsafeCreateDatasetAllocatingSystemId(localeName: String,
obfuscationKey: Array[Byte],
resourceName: Option[String]): DatasetInfo
/** Reloads a dataset with the specified attributes, including CLEARING ALL COPIES.
* @note Using this carelessly can get you into trouble. It is intended to be used
* for resyncing only. The resulting dataset object will have NO copies. */
def unsafeReloadDataset(datasetInfo: DatasetInfo,
nextCounterValue: Long,
latestDataVersion: Long,
localeName: String,
obfuscationKey: Array[Byte],
resourceName: Option[String]): DatasetInfo
/** Creates a copy with the specified attributes.
* @note Using this carelessly can get you into trouble. It is intended to be used
* for resyncing only. */
def unsafeCreateCopy(datasetInfo: DatasetInfo,
systemId: CopyId,
copyNumber: Long,
lifecycleStage: LifecycleStage,
dataVersion: Long,
dataShapeVersion: Long): CopyInfo
}
case class CopyPair[A <: CopyInfo](oldCopyInfo: A, newCopyInfo: A)
| socrata-platform/data-coordinator | coordinatorlib/src/main/scala/com/socrata/datacoordinator/truth/metadata/DatasetMap.scala | Scala | apache-2.0 | 7,531 |
package org.scalamu.core.coverage
import org.scalamu.common.position.Position
import scoverage.Location
/**
* A single source level statement instrumented by scoverage plugin.
*
* @param id scoverage statement id
* @param pos statement's offset in source
*/
final case class Statement(id: StatementId, location: Location, pos: Position)
| sugakandrey/scalamu | core/src/main/scala/org/scalamu/core/coverage/Statement.scala | Scala | gpl-3.0 | 344 |
package com.crobox.clickhouse.dsl.language
import com.crobox.clickhouse.DslTestSpec
import com.crobox.clickhouse.dsl._
class TypeCaseFunctionTokenizerTest extends DslTestSpec {
it should "succeed for UUID functions" in {
toSQL(select(toUUID(const("00000000-0000-0000-0000-000000000000")))) shouldBe "SELECT toUUID('00000000-0000-0000-0000-000000000000')"
toSQL(select(toUUIDOrZero(const("123")))) shouldBe "SELECT toUUIDOrZero('123')"
toSQL(select(toUUIDOrNull(const("123")))) shouldBe "SELECT toUUIDOrNull('123')"
}
}
| crobox/clickhouse-scala-client | dsl/src/test/scala/com/crobox/clickhouse/dsl/language/TypeCaseFunctionTokenizerTest.scala | Scala | lgpl-3.0 | 538 |
package org.jetbrains.plugins.scala
package lang
package psi
package impl
package statements
import com.intellij.lang.ASTNode
import com.intellij.openapi.progress.ProgressManager
import com.intellij.openapi.util.Key
import com.intellij.psi._
import com.intellij.psi.tree.IElementType
import org.jetbrains.plugins.scala.JavaArrayFactoryUtil.ScFunctionDefinitionFactory
import org.jetbrains.plugins.scala.extensions.{StubBasedExt, ifReadAllowed}
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.parser.ScalaElementType.FUNCTION_DEFINITION
import org.jetbrains.plugins.scala.lang.psi.api.{ScBegin, ScalaElementVisitor}
import org.jetbrains.plugins.scala.lang.psi.api.base.types.ScTypeElement
import org.jetbrains.plugins.scala.lang.psi.api.expr._
import org.jetbrains.plugins.scala.lang.psi.api.statements._
import org.jetbrains.plugins.scala.lang.psi.api.toplevel.ScNamedElement
import org.jetbrains.plugins.scala.lang.psi.impl.statements.ScFunctionDefinitionImpl.{importantOrderFunction, isCalculatingFor, returnTypeInner}
import org.jetbrains.plugins.scala.lang.psi.stubs.ScFunctionStub
import org.jetbrains.plugins.scala.lang.psi.stubs.elements.ScFunctionElementType
import org.jetbrains.plugins.scala.lang.psi.types.result._
import org.jetbrains.plugins.scala.lang.psi.types.{ScLiteralType, api}
/**
* @author Alexander Podkhalyuzin
* Date: 22.02.2008
*/
class ScFunctionDefinitionImpl[S <: ScFunctionDefinition](stub: ScFunctionStub[S],
nodeType: ScFunctionElementType[S],
node: ASTNode)
extends ScFunctionImpl(stub, nodeType, node)
with ScFunctionDefinition with ScBegin {
override protected def shouldProcessParameters(lastParent: PsiElement): Boolean =
super.shouldProcessParameters(lastParent) || body.contains(lastParent)
override def toString: String = "ScFunctionDefinition: " + ifReadAllowed(name)("")
//types of implicit definitions without explicit type should be computed in the right order
override def returnType: TypeResult = {
if (importantOrderFunction(this)) {
val parent = getParent
val isCalculating = isCalculatingFor(parent)
if (isCalculating.get()) returnTypeInner(this)
else {
isCalculating.set(true)
try {
val children = parent.stubOrPsiChildren(FUNCTION_DEFINITION, ScFunctionDefinitionFactory).iterator
while (children.hasNext) {
val nextFun = children.next()
if (importantOrderFunction(nextFun)) {
ProgressManager.checkCanceled()
val nextReturnType = returnTypeInner(nextFun)
//stop at current function to avoid recursion
//if we are currently computing type in some implicit function body below
if (nextFun == this) {
return nextReturnType
}
}
}
returnTypeInner(this)
}
finally {
isCalculating.set(false)
}
}
} else returnTypeInner(this)
}
override def body: Option[ScExpression] = byPsiOrStub(findChild[ScExpression])(_.bodyExpression)
override def hasAssign: Boolean = byStubOrPsi(_.hasAssign)(assignment.isDefined)
override def getBody: FakePsiCodeBlock = body match {
case Some(b) => new FakePsiCodeBlock(b) // Needed so that LineBreakpoint.canAddLineBreakpoint allows line breakpoints on one-line method definitions
case None => null
}
override protected def acceptScala(visitor: ScalaElementVisitor): Unit =
visitor.visitFunctionDefinition(this)
override protected def keywordTokenType: IElementType = ScalaTokenTypes.kDEF
override def namedTag: Option[ScNamedElement] = declaredElements.headOption
}
private object ScFunctionDefinitionImpl {
import org.jetbrains.plugins.scala.project.UserDataHolderExt
private val calculatingBlockKey: Key[ThreadLocal[Boolean]] = Key.create("calculating.function.returns.block")
private def isCalculatingFor(e: PsiElement) = e.getOrUpdateUserData(
calculatingBlockKey,
ThreadLocal.withInitial[Boolean](() => false)
)
private def importantOrderFunction(function: ScFunction): Boolean = function match {
case funDef: ScFunctionDefinition => funDef.hasModifierProperty("implicit") && !funDef.hasExplicitType
case _ => false
}
private def returnTypeInner(fun: ScFunctionDefinition): TypeResult = {
import fun.projectContext
fun.returnTypeElement match {
case None if !fun.hasAssign => Right(api.Unit)
case None =>
fun.body match {
case Some(b) => b.`type`().map(ScLiteralType.widenRecursive)
case _ => Right(api.Unit)
}
case Some(rte: ScTypeElement) => rte.`type`()
}
}
} | JetBrains/intellij-scala | scala/scala-impl/src/org/jetbrains/plugins/scala/lang/psi/impl/statements/ScFunctionDefinitionImpl.scala | Scala | apache-2.0 | 4,861 |
package net.totietje.evaluator
/** An `Evaluator` is a string parser and evaluator.
*
* This class is flexible, allowing a user to define their own syntax. An example use might be to parse maths
* expressions, such as `(1 + 2) ^ 3`.
*
* @see [[net.totietje.complex.ComplexEvaluator ComplexEvaluator]]
* @tparam R
* The type that the string input should be evaluated to
*/
abstract class Evaluator[+R] {
/** Evaluates a string.
*
* @throws net.totietje.evaluator.EvaluationException
* If there is a syntax error in the expression
* @param in
* The input string to parse
* @return
* The result of parsing the input
*/
def evaluate(in: String): R
} | totietje/Evaluator | src/main/scala/net/totietje/evaluator/Evaluator.scala | Scala | apache-2.0 | 788 |
package almanac.spark
import almanac.model.Metric.Key
import almanac.model.TimeSpan.EVER
import almanac.model._
import almanac.spark.MetricsAggregator._
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming._
import org.apache.spark.streaming.dstream.DStream
object SparkMetricsAggregator {
implicit class RDDMetricsExtension(val source: RDD[Metric]) extends MetricsAggregator[RDD[Metric]] {
override def aggregateMetrics(func: KeyMapper) =
source map (m => func(m.key) -> m.value) reduceByKey (_+_) map (t => Metric(t._1, t._2))
}
implicit class DStreamMetricsExtension(val source: DStream[Metric]) extends MetricsAggregator[DStream[Metric]] {
override def aggregateMetrics(func: KeyMapper): DStream[Metric] = {
source map (m => func(m.key) -> m.value) reduceByKey (_ + _) map (t => Metric(t._1, t._2))
}
def stats(interval: Duration) = {
source
.map(m => m.bucket -> m.count)
.reduceByKeyAndWindow((a: Int, b: Int) => a + b, Seconds(10), Seconds(10))
.print()
source
}
/**
* aggregate geo-precision/time-span and save result stream
*
* @param precision
* @param span
* @return
*/
def saveMetrics(precision: Int, span: TimeSpan)(implicit repo: MetricRDDRepository): DStream[Metric] = {
println(s"save aggregated metrics of precision: $precision, span: $span")
repo.save(precision, span, source)
source
}
def saveKeys(precision: Int, span: TimeSpan)(implicit repo: MetricRDDRepository): DStream[Key] = {
// TODO: configuration of window span
val keyStream = source window(Minutes(1), Minutes(1)) aggregateMetrics by(EVER) map (_.key)
println(s"save keys of precision: $precision")
repo.saveKeys(keyStream)
keyStream
}
/**
* aggregate the first timeSchedule to the intial stream
* then aggregate on each level of timeSchedules and geoSchedules like below:
*
* Seq(HOUR, DAY, EVER) Seq(8, 4, GLOBAL)
*
* (12, RAW) -> (12, HOUR) -> (8, HOUR) -> 8, DAY) -> 8, EVER) -> 8, EVER) -> 8, EVER)
* |
* v
* (4, HOUR) -> 4, DAY) -> 4, EVER) -> 4, EVER) -> 4, EVER)
* |
* v
* GLOBAL / (0, HOUR) -> 0, DAY) -> 0, EVER) -> 0, EVER) -> 0, EVER)
*
* the return value is the last aggregated stream in the above case: GLOBAL / EVER
*
* @param repo the stream to be aggregated
* @param schedules
* @return the stream of the last aggregated stream
*/
def aggregateWithSchedule(schedules: AggregationSchedules = defaultSchedules)(implicit repo: MetricRDDRepository) = {
val spans = schedules.timeSpans.sorted
val precisions = schedules.geoPrecisions.sorted.reverse
val firstStream =
source aggregateMetrics by(spans.head)
(firstStream /: precisions) { (previousGeoStream, precision) =>
val nextStream = previousGeoStream aggregateMetrics by(precision) saveMetrics(precision, spans.head)
nextStream saveKeys (precision, spans.last)
(nextStream /: spans.tail) { (previousSpanStream, span) =>
previousSpanStream aggregateMetrics by(span) saveMetrics(precision, span)
}
nextStream
}
}
}
val defaultSchedules = AggregationSchedules(List(GeoHash.GLOBAL), List(TimeSpan.EVER))
} | adcade/almanac-oss | src/main/scala/almanac/spark/SparkAggregator.scala | Scala | mit | 3,512 |
package com.github.timgilbert.hexmap
import org.scalatra.test.specs2._
class HelloWorldMutableServletSpec extends MutableScalatraSpec {
addServlet(classOf[HexMapServlet], "/*")
"GET / on HexMapServlet" should {
"return status 200" in {
get("/") {
status must_== 200
}
}
}
}
| timgilbert/scala-hexmap | src/test/scala/ServletTests.scala | Scala | mit | 313 |
package springboard
import java.net.URL
import java.util.concurrent.ExecutorService
import akka.actor.{Actor, ActorSystem, Props}
import akka.http.scaladsl.model.HttpHeader.ParsingResult.Ok
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Keep, RunnableGraph, Sink, Source, SourceQueue}
import akka.util.Timeout
import akka.stream.OverflowStrategy.fail
import play.api.Logger
import play.api.libs.EventSource
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.concurrent.duration._
case class Tick()
class TickActor(queue: SourceQueue[String]) extends Actor {
def receive = {
case Tick => {
println("ticking")
queue.offer("tack")
}
}
}
object Application2 {
implicit val timeout: Timeout = 15.minute
val baseUrl = "http://wiprodigital.com"
def peekMatValue[T, M](src: Source[T, M]): (Source[T, M], Future[M]) = {
val p = Promise[M]
val s = src.mapMaterializedValue(
{ m =>
p.trySuccess(m)
m
})
(s, p.future)
}
def main(args: Array[String]) {
implicit val actorSystem = ActorSystem()
implicit val materializer = ActorMaterializer()
implicit val ec = actorSystem.dispatchers.defaultGlobalDispatcher
val (queueSource, futureQueue) = peekMatValue(Source.queue[String](10, fail))
futureQueue.map
{ queue =>
val tickActor = actorSystem.actorOf(Props(new TickActor(queue)))
val tickSchedule =
actorSystem.scheduler.schedule(0 milliseconds,
1 second,
tickActor,
Tick)
queue.watchCompletion().map
{ done =>
println("Client disconnected")
tickSchedule.cancel
println("Scheduler canceled")
}
}
queueSource.map
{
m => println(m)
}
}
}
| defpearlpilot/webcrawler | app/springboard/Application2.scala | Scala | gpl-3.0 | 1,880 |
package pl.touk.nussknacker.engine.requestresponse
import io.circe.Json
import pl.touk.nussknacker.engine.requestresponse.api.ResponseEncoder
import pl.touk.nussknacker.engine.util.json.BestEffortJsonEncoder
object DefaultResponseEncoder extends ResponseEncoder[Any] {
private val bestEffortEncoder = BestEffortJsonEncoder(failOnUnkown = true, getClass.getClassLoader)
override def toJsonResponse(input: Any, result: List[Any]): Json = bestEffortEncoder.encode(result)
}
| TouK/nussknacker | engine/lite/request-response/runtime/src/main/scala/pl/touk/nussknacker/engine/requestresponse/DefaultResponseEncoder.scala | Scala | apache-2.0 | 480 |
package com.yetu.oauth2provider.utils
import com.yetu.oauth2provider.base.BaseSpec
class StringUtilsSpec extends BaseSpec {
"When isEmpty method receives a value " must {
"return false when string contains no value" in {
StringUtils.isFull(Some("")) mustBe false
}
"return false when string contains no value but empty spaces" in {
StringUtils.isFull(Some(" ")) mustBe false
}
"return false when is None" in {
StringUtils.isFull(None) mustBe false
}
"return true when there is a character" in {
StringUtils.isFull(Some("a")) mustBe true
}
"return true when there are n character" in {
StringUtils.isFull(Some(" actual value ")) mustBe true
}
}
"When areAllEmptyItems method receives values " must {
"return false when one of the strings contains no value" in {
StringUtils.isAnyEmpty(Some("")) mustBe true
StringUtils.isAnyEmpty(Some(""), Some(""), Some("")) mustBe true
StringUtils.isAnyEmpty(Some(""), Some("value"), Some("second value")) mustBe true
}
"return true when all of the strings contains values" in {
StringUtils.isAnyEmpty(Some("first value "), Some("second value"), Some("third value")) mustBe false
}
}
"to toHashmark method" must {
"generate valid hash url" in {
val queryString = Map(
"access_token" -> Seq("someString"),
"expires_in" -> Seq("someNumber")
)
StringUtils.toHashmark("https://auth.yetu.me/", queryString) mustBe "https://auth.yetu.me/#access_token=someString&expires_in=someNumber"
StringUtils.toHashmark("https://auth.yetu.me/#value=1", queryString) mustBe "https://auth.yetu.me/#value=1&access_token=someString&expires_in=someNumber"
}
}
"the subdomain method" must {
"return the string itself if the host doesn't have subdomain" in {
val requestHost1 = "localhost"
val requestHost2 = "localhost:9000"
val requestHost3 = "localhost.com:9000"
StringUtils.subdomain(requestHost1) mustBe requestHost1
StringUtils.subdomain(requestHost2) mustBe requestHost2
StringUtils.subdomain(requestHost3) mustBe requestHost3
}
"return the proper subdomain url from the given host with port" in {
val requestHost = "auth.localhost.com:9000"
StringUtils.subdomain(requestHost) mustBe ".localhost.com"
}
"return the proper subdomain url from the given host without port" in {
val requestHost = "auth-dev.yetu.me"
StringUtils.subdomain(requestHost) mustBe ".yetu.me"
}
"return the proper subdomain url to 4 level subdomain" in {
val requestHost = "auth.prod.yetu.me"
StringUtils.subdomain(requestHost) mustBe ".yetu.me"
}
}
}
| yetu/oauth2-provider | test/com/yetu/oauth2provider/utils/StringUtilsSpec.scala | Scala | mit | 2,734 |
def main (args: Array[String]) {
lazy val a = "Hello, World!"
val s = a _
/*start*/s/*end*/
}
//() => String | LPTK/intellij-scala | testdata/typeInference/expected/param/LazyValUnderscore.scala | Scala | apache-2.0 | 114 |
import java.util.concurrent.atomic.AtomicInteger
import sbt._
import sbt.Keys._
import sbtjooq.codegen.JooqCodegenKeys._
import sbtjooq.codegen.JooqCodegenPlugin
object CheckCalled extends AutoPlugin {
override def requires: Plugins = JooqCodegenPlugin
override def trigger: PluginTrigger = allRequirements
object autoImport {
val checkCalled = inputKey[Unit]("")
}
import autoImport._
private val counter = new AtomicInteger(0)
override def projectSettings: Seq[Setting[_]] = Seq(
JooqCodegen / run := {
counter.incrementAndGet()
(JooqCodegen / run).evaluated
},
checkCalled := {
import sbt.complete.DefaultParsers._
val n = (Space ~> IntBasic).parsed
val c = counter.getAndSet(0)
require(c == n, s"Required to called $n times, but called $c times")
},
)
}
| kxbmap/sbt-jooq | codegen/src/sbt-test/jooq-codegen/features/src/sbt-test/codegen/skip/project/CheckCalled.scala | Scala | apache-2.0 | 837 |
package dk.gp.hgpr
import breeze.linalg.DenseMatrix
import breeze.linalg.DenseVector
import breeze.numerics.exp
import dk.bayes.math.gaussian.canonical.DenseCanonicalGaussian
import dk.gp.hgpr.util.HgprFactorGraph
import dk.gp.gp.GPPredictSingle
import dk.bayes.math.gaussian.Gaussian
import dk.bayes.math.gaussian.MultivariateGaussian
import dk.gp.gp.GPPredictSingle
import dk.gp.util.calcUniqueRowsMatrix
/**
* Hierarchical Gaussian Process regression. Multiple Gaussian Processes for n tasks with a single shared parent GP.
*/
object hgprPredict {
def apply(xTest: DenseMatrix[Double], model: HgprModel): DenseVector[Gaussian] = {
val gpModelsByTaskId: Map[Int, GPPredictSingle] = createTaskPosteriorByTaskId(xTest, model)
val predictedArray = (0 until xTest.rows).par.map { rowIndex =>
val xRow = xTest(rowIndex, ::).t
val taskId = xRow(0).toInt
val taskGPModel = gpModelsByTaskId(taskId)
val xTestPrior = taskGPModel.predictSingle(xRow.toDenseMatrix)
Gaussian(xTestPrior.m(0), xTestPrior.v(0, 0))
}.toArray
DenseVector(predictedArray)
}
private def createTaskPosteriorByTaskId(xTest: DenseMatrix[Double], model: HgprModel): Map[Int, GPPredictSingle] = {
val hgpFactorGraph = HgprFactorGraph(model.x, model.y, model.u, model.covFunc, model.covFuncParams, model.likNoiseLogStdDev)
val uPosterior = hgpFactorGraph.calcUPosterior()
val taskIds = xTest(::, 0).toArray.distinct
val gpModelsByTaskId: Map[Int, GPPredictSingle] = taskIds.map { taskId =>
val idx = model.x(::, 0).findAll { x => x == taskId }
val taskX = model.x(idx, ::).toDenseMatrix
val taskY = model.y(idx).toDenseVector
val taskGpModel = if (taskY.size == 0) GPPredictSingle(MultivariateGaussian(uPosterior.mean, uPosterior.variance), model.u, model.covFunc, model.covFuncParams)
else {
val taskXTestIdx = xTest(::, 0).findAll(x => x == taskId)
val taskXTest = calcUniqueRowsMatrix(xTest(taskXTestIdx, ::).toDenseMatrix)
val taskXX = DenseMatrix.vertcat(taskX, taskXTest)
val xPrior = GPPredictSingle(MultivariateGaussian(uPosterior.mean, uPosterior.variance), model.u, model.covFunc, model.covFuncParams).predictSingle(taskXX)
val xPriorVariable = dk.bayes.dsl.variable.gaussian.multivariate.MultivariateGaussian(xPrior.m, xPrior.v)
val A = DenseMatrix.horzcat(DenseMatrix.eye[Double](taskX.rows), DenseMatrix.zeros[Double](taskX.rows, taskXTest.rows))
val yVar = DenseMatrix.eye[Double](taskY.size) * exp(2d * model.likNoiseLogStdDev)
val yVariable = dk.bayes.dsl.variable.Gaussian(A, xPriorVariable, b = DenseVector.zeros[Double](taskX.rows), yVar, yValue = taskY) //y variable
val xPosterior = dk.bayes.dsl.infer(xPriorVariable)
GPPredictSingle(MultivariateGaussian(xPosterior.m, xPosterior.v), taskXX, model.covFunc, model.covFuncParams)
}
// @TODO Simple impl if xTest is in testX - use it in this situation
// val (xPriorMean, cPriorVar) = inferXPrior(testX, model.u, uPosterior, model.covFunc, model.covFuncParams, model.likNoiseLogStdDev)
// val xPriorVariable = dk.bayes.dsl.variable.gaussian.multivariate.MultivariateGaussian(xPriorMean, cPriorVar)
//
// val yVar = DenseMatrix.eye[Double](testY.size) * exp(2d * model.likNoiseLogStdDev)
// val yVariable = Gaussian(xPriorVariable, yVar, yValue = testY) //y variable
//
// val xPosterior = dk.bayes.dsl.infer(xPriorVariable)
//
// val xTestPrior = inferXPrior(xRow.toDenseMatrix, testX, DenseCanonicalGaussian(xPosterior.m, xPosterior.v), model.covFunc, model.covFuncParams, model.likNoiseLogStdDev)
// UnivariateGaussian(xTestPrior._1(0), xTestPrior._2(0, 0))
taskId.toInt -> taskGpModel
}.toList.toMap
gpModelsByTaskId
}
} | danielkorzekwa/bayes-scala-gp | src/main/scala/dk/gp/hgpr/hgprPredict.scala | Scala | bsd-2-clause | 3,871 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.model
import com.waz.utils.Identifiable
import org.threeten.bp.Instant
/**
* @param stageStartTime instant the push notification was received at stage
* @param stage the stage the notification was in at time `receivedAt`
*/
case class FCMNotification(override val id: Uid, stage: String, stageStartTime: Instant) extends Identifiable[Uid]
object FCMNotification {
val Pushed = "pushed"
val Fetched = "fetched"
val StartedPipeline = "startedPipeline"
val FinishedPipeline = "finishedPipeline"
val everyStage: Seq[String] = Seq(Pushed, Fetched, StartedPipeline, FinishedPipeline)
def prevStage(stage: String): Option[String] = stage match {
case FinishedPipeline => Some(StartedPipeline)
case StartedPipeline => Some(Fetched)
case Fetched => Some(Pushed)
case _ => None
}
}
| wireapp/wire-android-sync-engine | zmessaging/src/main/scala/com/waz/model/FCMNotification.scala | Scala | gpl-3.0 | 1,600 |
/*
* Licensed to Intel Corporation under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* Intel Corporation licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.dataset.image
import com.intel.analytics.bigdl.dataset.Transformer
import com.intel.analytics.bigdl.utils.RandomGenerator
import scala.collection.Iterator
object HFlip {
def apply(threshold: Double = 0.0): HFlip = {
new HFlip(threshold)
}
}
/**
* Flip a image with a probability. The threshold higher, the less easier to flip the image.
* @param threshold
*/
class HFlip(threshold: Double) extends Transformer[LabeledBGRImage, LabeledBGRImage] {
override def apply(prev: Iterator[LabeledBGRImage]): Iterator[LabeledBGRImage] = {
prev.map(img => {
if (RandomGenerator.RNG.uniform(0, 1) >= threshold) {
img.hflip()
} else {
img
}
})
}
}
| SeaOfOcean/BigDL | dl/src/main/scala/com/intel/analytics/bigdl/dataset/image/HFlip.scala | Scala | apache-2.0 | 1,524 |
/*
* Copyright © 2015 Reactific Software LLC. All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
package rxmongo.client
import rxmongo.bson.BSONObject
import rxmongo.messages.replies.WriteResult
import scala.concurrent.Await
import scala.concurrent.duration.FiniteDuration
/** Test Suite For Collection Statistics */
class CollStatsSpec extends RxMongoTest("rxmongo", "collstats") {
val objs = Seq(
BSONObject("a" → 21.0, "b" → 21L, "c" → 21),
BSONObject("a" → 28.0, "b" → 28L, "c" → 28),
BSONObject("a" → 35.0, "b" → 35L, "c" → 35),
BSONObject("a" → 42.0, "b" → 42L, "c" → 42),
BSONObject("a" → 49.0, "b" → 49L, "c" → 49),
BSONObject("a" → 56.0, "b" → 56L, "c" → 56)
)
"Collection" should {
"drop collection before populating" in mongoTest { () ⇒
val result = Await.result(collection.drop(), FiniteDuration(1, "seconds"))
success
}
"populate collection before testing" in mongoTest { () ⇒
val future = collection.insert(objs)
val result = Await.result(future, FiniteDuration(1, "seconds"))
result.ok must beEqualTo(1)
result.n must beEqualTo(6)
}
"permit collection stats retrieval" in mongoTest { () ⇒
val stats = collection.stats
stats.count must beEqualTo(6)
collection.count must beEqualTo(stats.count)
}
"implmenet avgObjSize" in mongoTest { () ⇒
val size = collection.avgObjSize
size must beGreaterThan(26)
}
"implement capped" in mongoTest { () ⇒
val capped = collection.capped
capped must beFalse
}
"implement storageSize" in mongoTest { () ⇒
val size = collection.storageSize
size must beGreaterThan(26 * 6L)
}
"implement indexSize" in mongoTest { () ⇒
val size = collection.indexSize
size must beEqualTo(7168L)
}
"implement numIndexes" in mongoTest { () ⇒
val size = collection.numIndexes
size must beEqualTo(1)
}
"implement numExtents" in mongoTest { () ⇒
val size = collection.numExtents
size must beEqualTo(1)
}
}
}
| reactific/RxMongo | client/src/test/scala/rxmongo/client/CollStatsSpec.scala | Scala | mit | 3,182 |
/***********************************************************************
* Copyright (c) 2013-2022 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.index.filters
import org.geotools.filter.text.ecql.ECQL
import org.junit.runner.RunWith
import org.locationtech.geomesa.index.api.ShardStrategy.NoShardStrategy
import org.locationtech.geomesa.index.index.z3.Z3IndexKeySpace
import org.locationtech.geomesa.index.utils.ExplainNull
import org.locationtech.geomesa.utils.geotools.SimpleFeatureTypes
import org.specs2.matcher.MatchResult
import org.specs2.mutable.Specification
import org.specs2.runner.JUnitRunner
@RunWith(classOf[JUnitRunner])
class Z3FilterTest extends Specification {
val sft = SimpleFeatureTypes.createType("z3FilterTest", "dtg:Date,*geom:Point:srid=4326")
val keySpace = new Z3IndexKeySpace(sft, NoShardStrategy, "geom", "dtg")
val filters = Seq(
"bbox(geom,38,48,52,62) and dtg DURING 2014-01-01T00:00:00.000Z/2014-01-08T12:00:00.000Z",
"bbox(geom,38,48,52,62) and dtg DURING 2013-12-15T00:00:00.000Z/2014-01-15T00:00:00.000Z",
"dtg DURING 2014-01-01T00:00:00.000Z/2014-01-08T12:00:00.000Z"
).map(ECQL.toFilter)
val values = filters.map(keySpace.getIndexValues(_, ExplainNull))
def compare(actual: Z3Filter, expected: Z3Filter): MatchResult[Boolean] = {
val left = Array[AnyRef](actual.xy, actual.t, Short.box(actual.minEpoch), Short.box(actual.maxEpoch))
val right = Array[AnyRef](expected.xy, expected.t, Short.box(expected.minEpoch), Short.box(expected.maxEpoch))
java.util.Arrays.deepEquals(left, right) must beTrue
}
"Z3Filter" should {
"serialize to and from bytes" in {
forall(values) { value =>
val filter = Z3Filter(value)
val result = Z3Filter.deserializeFromBytes(Z3Filter.serializeToBytes(filter))
compare(result, filter)
}
}
"serialize to and from strings" in {
forall(values) { value =>
val filter = Z3Filter(value)
val result = Z3Filter.deserializeFromStrings(Z3Filter.serializeToStrings(filter))
compare(result, filter)
}
}
}
}
| locationtech/geomesa | geomesa-index-api/src/test/scala/org/locationtech/geomesa/index/filters/Z3FilterTest.scala | Scala | apache-2.0 | 2,449 |
package au.id.cxd.math.function.moments
/*
the empirical mean of a sequence assuming normality
*/
class Mean {
/**
* compute the empirical mean of a sequence
*
* @param series
* @return
*/
def op(series: Seq[Double]): Double = {
val n = series.length
val total = series.reduce(_ + _)
total / n
}
}
object Mean {
def apply(series:Seq[Double]):Double = new Mean().op(series)
}
| cxd/scala-au.id.cxd.math | math/src/main/scala/au/id/cxd/math/function/moments/Mean.scala | Scala | mit | 419 |
package army
sealed trait Soldier
case class Robot(battery: Int) extends Soldier
case class Human(life: Int,
maybeRobot: Option[Robot] = None) extends Soldier
object Army extends App {
import implicits._
val army = 3.soldiers.recruit
printArmy(army)
// only robots
// 5.robots.recruit
// only humans
// 10.soldiers.recruit
// humans by default
// 3.recruit
def printArmy(army: List[Soldier]) = army.foreach(println)
}
| rabbitonweb/scala_implicits_exercise | src/main/scala/army/Army.scala | Scala | gpl-3.0 | 467 |
/**
* For copyright information see the LICENSE document.
*/
import entice.server._
import entice.server.scripting._
import entice.server.utils._
import entice.server.world._
import entice.protocol._
import akka.actor._
import shapeless._
import scala.util._
class Buddy extends Command {
var buddies: List[ActorRef] = Nil
def info = CommandInfo(
command = "buddy",
argsHelp =
" - grouping : Invites all possible entities, and will join any group that invites it." ::
" - kill : Kills all buddy instances." ::
Nil,
generalInfo = "Spawns a random character with a specified behaviour (1st arg).",
usageInfo = "Example: '/buddy grouping' To remove all buddies: '/buddy kill'")
def run(args: List[String], ctx: CommandContext): Option[String] = {
if (args == Nil) return Some("No behaviour argument given.")
args(0) match {
case "grouping" =>
buddies = ctx.actorSystem.actorOf(Props(new GroupingActor(ctx.sender.entity.get, buddies.length))) :: buddies
None
case "kill" =>
buddies.foreach { ctx.actorSystem.stop(_) }
None
case _ => Some(s"Unknown behaviour '${args(0)}'.")
}
}
/**
* Helping actors that implement the buddy's behaviour:
*/
class GroupingActor(
parent: RichEntity,
num: Int) extends Actor with Subscriber with Clients {
val subscriptions = classOf[GroupInvite] :: Nil
var entity: RichEntity = _
override def preStart {
register
// set our entity
entity = parent.world.create(
new TypedSet[Component]()
.add(Name(s"Buddy ${num.toString}"))
.add(Appearance())
.add(parent[Position].copy())
.add(GroupLeader()))
// invite all other teamleaders
parent.world.dump.keySet
.map { e => parent.world.getRich(e).get }
.filter { e => entity != e }
.filter { e => entity.get[GroupLeader] != None }
.foreach { e => publish(GroupInvite(entity, e)) }
}
override def postStop {
// clean up
parent.world.remove(entity)
}
def receive = {
// if someone invites us, accept it instantly
case MessageEvent(_, GroupInvite(e1, e2)) if e2 == entity =>
publish(GroupAccept(entity, e1))
// ignore all other messages
case _ =>
}
}
}
new Buddy()
| entice/old-server | scripts/commands/Buddy.scala | Scala | bsd-3-clause | 2,693 |
package blended.launcher.jvmrunner
import java.io.{IOException, InputStream, OutputStream}
private[jvmrunner]
class RunningProcess(process: Process, errorsIntoOutput: Boolean, interactive: Boolean) {
private[this] val errThread = asyncCopy(process.getErrorStream, if (errorsIntoOutput) Console.out else Console.err)
private[this] val inThread = asyncCopy(process.getInputStream, Console.out, interactive)
private[this] val in = System.in
private[this] val out = process.getOutputStream
private[this] val outThread = new Thread("StreamCopyThread") {
setDaemon(true)
override def run() {
try {
while (true) {
if (in.available > 0) {
in.read match {
case -1 =>
case read =>
out.write(read)
out.flush()
}
} else {
Thread.sleep(50)
}
}
} catch {
case e: IOException => // ignore
case e: InterruptedException => // this is ok
}
}
}
if (interactive) outThread.start()
def waitFor(): Int = {
try {
process.waitFor
} finally {
process.getOutputStream().close()
outThread.interrupt()
process.getErrorStream().close()
process.getInputStream().close()
}
}
def stop(): Int = {
if (interactive) {
outThread.interrupt()
} else {
out.write("stop 0\\n".getBytes())
}
out.flush()
out.close()
waitFor()
}
/**
* Starts a new thread which copies an InputStream into an Output stream. Does not close the streams.
*/
private def asyncCopy(in: InputStream, out: OutputStream, immediately: Boolean = false): Thread =
new Thread("StreamCopyThread") {
setDaemon(true)
override def run() {
try {
copy(in, out, immediately)
} catch {
case e: IOException => // ignore
case e: InterruptedException => // ok
}
out.flush()
}
start()
}
/**
* Copies an InputStream into an OutputStream. Does not close the streams.
*/
private def copy(in: InputStream, out: OutputStream, immediately: Boolean = false): Unit = {
if (immediately) {
while (true) {
if (in.available > 0) {
in.read match {
case -1 =>
case read =>
out.write(read)
out.flush()
}
} else {
Thread.sleep(50)
}
}
} else {
val buf = new Array[Byte](1024)
var len = 0
while ({
len = in.read(buf)
len > 0
}) {
out.write(buf, 0, len)
}
}
}
}
| lefou/blended | blended.launcher/src/main/scala/blended/launcher/jvmrunner/RunningProcess.scala | Scala | apache-2.0 | 2,665 |
package fly.play.aws.auth
import org.specs2.mutable.Before
import org.specs2.mutable.Specification
import play.api.Play.current
import play.api.test.FakeApplication
import testUtils.RunningFakePlayApplication
object AwsCredentialsSpec extends Specification with RunningFakePlayApplication {
"AwsCredentials" should {
"retrieve from configuration" in {
AwsCredentials.fromConfiguration must_== AwsCredentials("testKey", "testSecret")
}
"implement apply" in {
AwsCredentials("key", "secret")
ok
}
"implement unapply" >> {
val AwsCredentials(a, b, Some(t)) = AwsCredentials("key", "secret", Some("token"))
a must_== "key"
b must_== "secret"
t must_== "token"
}
def checkImplicit()(implicit c: AwsCredentials) = c
"provide an implicit value" in {
checkImplicit must not beNull
}
"override the implicit" in {
checkImplicit()(AwsCredentials("test", "test")) must_== AwsCredentials("test", "test")
}
}
} | fooblahblah/play-aws-utils | src/test/scala/fly/play/aws/auth/AwsCredentialsSpec.scala | Scala | mit | 1,004 |
// Copyright (C) 2009 The Android Open Source Project
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package com.google.gimd
import scala.util.Sorting
import scala.collection.mutable.ArrayBuffer
class MessageBuffer {
private class FieldStorage {
private var fields = new ArrayBuffer[Field]
private var exported = false
private var isSorted = true
private def beforeModify() {
if (exported) {
val t = new ArrayBuffer[Field]
fields copyToBuffer t
fields = t
exported = false
}
}
def +=(elem: Field) {
beforeModify()
if (isSorted && !fields.isEmpty && lt(elem, fields.last))
isSorted = false
fields += elem
}
def unsorted = fields.toSeq
def sorted = {
beforeModify()
if (isSorted) {
val t = new ArrayBuffer[Field]
Sorting.stableSort(fields) copyToBuffer t
fields = t
isSorted = true
}
exported = true
fields.toSeq
}
def lt(a: Field, b: Field) = a.name.compareTo(b.name) < 0
}
private val fields = new FieldStorage
protected def sortedFields = fields.sorted
def add(name: String, value: Int): MessageBuffer = add(IntField(name, value))
def add(name: String, value: Long): MessageBuffer = add(LongField(name, value))
def add(name: String, value: String): MessageBuffer =
if (value != null)
add(StringField(name, value))
else
this
def add(name: String, value: Timestamp): MessageBuffer =
if (value != null)
add(TimestampField(name, value))
else
this
def add(name: String, value: java.math.BigInteger): MessageBuffer =
if (value != null)
add(name, new BigInt(value))
else
this
def add(name: String, value: BigInt): MessageBuffer =
if (value != null)
add(BigIntField(name, value))
else
this
def add(name: String, value: java.math.BigDecimal): MessageBuffer =
if (value != null)
add(name, new BigDecimal(value))
else
this
def add(name: String, value: BigDecimal): MessageBuffer =
if (value != null)
add(BigDecimalField(name, value))
else
this
def add(name: String, value: Message): MessageBuffer =
if (value != null && !value.isEmpty)
add(MessageField(name, value))
else
this
def add(field: Field) = this += field
def +=(field: Field) = {
fields += field
this
}
def ++=(iter: Iterable[Field]) {
for(f <- iter)
add(f)
this
}
def ++(iter: Iterable[Field]) = {
val buffer = new MessageBuffer
buffer ++= fields.unsorted
buffer ++= iter
buffer
}
def readOnly = Message(fields.sorted: _*)
}
| gkossakowski/gimd | src/main/scala/com/google/gimd/MessageBuffer.scala | Scala | apache-2.0 | 3,204 |
/* Copyright 2015 Matt Silbernagel
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package models
import play.api.db.slick.Config.driver.simple._
import scala.slick.lifted.Tag
case class QuestionUpload(id: Option[Long], questionId: Long, uploadId: Long)
class QuestionUploads(tag: Tag) extends Table[QuestionUpload](tag, "question_uploads") {
def id = column[Long]("id", O.PrimaryKey, O.AutoInc)
def questionId = column[Long]("question_id")
def uploadId = column[Long]("upload_id")
def * = (id.?, questionId, uploadId) <> (QuestionUpload.tupled, QuestionUpload.unapply _)
def question = foreignKey("questionupload_question_fk", questionId, Questions.questions)(_.id)
def upload = foreignKey("questionupload_upload_fk", uploadId, Uploads.uploads)(_.id)
}
object QuestionUploads {
lazy val questionUploads = TableQuery[QuestionUploads]
def create(q: QuestionUpload)(implicit s: Session): QuestionUpload =
(questionUploads returning questionUploads.map(_.id) into ((upload, id) => upload.copy(Some(id)))) += q
def delete(q: QuestionUpload)(implicit s: Session): Int =
questionUploads.filter(_.id === q.id.get).delete
def all(implicit s: Session): List[QuestionUpload] =
questionUploads.list
def find(q: Long)(implicit s: Session) =
questionUploads.filter(_.id === q).firstOption
def findByQuestion(q: Long)(implicit s: Session) = {
val query = for {
upload <- questionUploads if upload.questionId === q
question <- upload.question
} yield upload
query.firstOption
}
def findUploadByQuestion(q: Long)(implicit s: Session) = {
val query = for {
upload <- questionUploads if upload.questionId === q
pic <- upload.upload
} yield pic
query.firstOption
}
}
| silbermm/proximal | app/models/QuestionUpload.scala | Scala | apache-2.0 | 2,269 |
package scalariform.formatter
import scalariform.lexer.Tokens._
import scalariform.lexer._
import scalariform.parser._
import scalariform.utils._
import scalariform.formatter.preferences._
import PartialFunction._
import scalariform.ScalaVersions
trait HasHiddenTokenInfo {
def isInferredNewline(token: Token): Boolean
def inferredNewlines(token: Token): HiddenTokens
def hiddenPredecessors(token: Token): HiddenTokens
def newlineBefore(token: Token): Boolean = hiddenPredecessors(token).containsNewline
def newlineBefore(node: AstNode): Boolean = newlineBefore(node.firstToken)
}
abstract class ScalaFormatter extends HasFormattingPreferences with TypeFormatter with AnnotationFormatter with ExprFormatter with HasHiddenTokenInfo with TemplateFormatter with XmlFormatter with CaseClauseFormatter with CommentFormatter {
val newlineSequence: String
def getSource(astNode: AstNode): String = {
val sb = new StringBuilder
for (token ← astNode.tokens) {
if (token != astNode.tokens.head)
sb.append(hiddenPredecessors(token).rawText)
sb.append(token.rawText)
}
sb.toString
}
def format(compilationUnit: CompilationUnit)(implicit formatterState: FormatterState = FormatterState()): FormatResult = {
val topStats = compilationUnit.topStats
var result = format(topStats)
for (firstStat ← topStats.firstStatOpt)
result = result.before(firstStat.firstToken, EnsureNewlineAndIndent(0))
if (formattingPreferences(NewlineAtEndOfFile)) {
result.before(compilationUnit.eofToken, EnsureNewlineAndIndent(0))
} else {
result
}
}
/**
* Converts an AstNode into what it should look like in text after Scalariform has run.
* Useful for calculating the actual length of an [[scalariform.parser.AstNode]] after formatting.
*
* @param ast The AST to format and render as a string
* @param astFormatResult Should run formatting actions for 'ast'
* @return Formatted string representation of what the AstNode should look like after Scalariform
* has run
*/
protected def formattedAstNode(ast: AstNode)(astFormatResult: ⇒ FormatResult): String = {
val source = getSource(ast)
val formatResult = astFormatResult
val offset = ast.firstToken.offset
val edits = writeTokens(source, ast.tokens, formatResult, offset)
TextEditProcessor.runEdits(source, edits)
}
private def alterSuspendFormatting(text: String): Option[Boolean] =
if (text contains "format: OFF")
Some(true)
else if (text contains "format: ON")
Some(false)
else
None
private def replaceEdit(token: Token, replacement: String): TextEdit = TextEdit(token.offset, token.length, replacement)
def writeTokens(s: String, tokens: List[Token], formatResult: FormatResult, offset: Int = 0): List[TextEdit] = {
val FormatResult(predecessorFormatting, inferredNewlineFormatting, xmlRewrites) = formatResult
val builder = new StringBuilder
var tokenIndentMap: Map[Token, Int] = Map()
var suspendFormatting = false
var edits: List[TextEdit] = Nil // Stored in reverse
def printableFormattingInstruction(previousTokenOpt: Option[Token], token: Token) =
predecessorFormatting.get(token) orElse
previousTokenOpt.map(defaultFormattingInstruction(_, token)) getOrElse
(if (token.tokenType == EOF) EnsureNewlineAndIndent(0) /* <-- to allow formatting of files with just a scaladoc comment */ else Compact)
for ((previousTokenOption, token, nextTokenOption) ← Utils.withPreviousAndNext(tokens)) {
val previousTokenIsPrintable = previousTokenOption exists { !isInferredNewline(_) }
if (isInferredNewline(token)) {
alterSuspendFormatting(token.text) foreach { suspendFormatting = _ }
if (suspendFormatting)
builder.append(token.rawText)
else {
val basicFormattingInstruction = inferredNewlineFormatting.get(token) getOrElse
defaultNewlineFormattingInstruction(previousTokenOption, token, nextTokenOption)
val formattingInstruction =
if (nextTokenOption.exists { _.tokenType == EOF } && basicFormattingInstruction.isInstanceOf[EnsureNewlineAndIndent])
EnsureNewlineAndIndent(0) // Adjustment for end of input when using non-zero initial indent
else
basicFormattingInstruction
val nextTokenUnindents = nextTokenOption exists { _.tokenType == RBRACE }
val includeBufferBeforeNextToken = nextTokenOption exists { nextToken ⇒
!printableFormattingInstruction(Some(token), nextToken).isInstanceOf[EnsureNewlineAndIndent]
}
edits :::= writeHiddenTokens(builder, inferredNewlines(token), formattingInstruction, nextTokenUnindents,
includeBufferBeforeNextToken, previousTokenIsPrintable, tokenIndentMap).toList
}
} else {
alterSuspendFormatting(hiddenPredecessors(token).text) foreach { suspendFormatting = _ }
if (suspendFormatting) {
builder.append(hiddenPredecessors(token).rawText)
tokenIndentMap += (token -> builder.currentColumn)
builder.append(token.rawText)
} else {
val formattingInstruction = printableFormattingInstruction(previousTokenOption, token)
val nextTokenUnindents = token.tokenType == RBRACE
val includeBufferBeforeNextToken = true // <-- i.e. current token
val hiddenTokens = hiddenPredecessors(token)
val positionHintOption = if (hiddenTokens.isEmpty) Some(token.offset) else None
edits :::= writeHiddenTokens(builder, hiddenTokens, formattingInstruction, nextTokenUnindents, includeBufferBeforeNextToken,
previousTokenIsPrintable, tokenIndentMap, positionHintOption).toList
tokenIndentMap += (token -> builder.currentColumn)
val newTokenTextOpt: Option[String] = if (xmlRewrites contains token) Some(xmlRewrites(token)) else None
edits :::= builder.write(token, newTokenTextOpt).toList
}
}
}
edits
.reverse
.flatMap { edit ⇒ if (edit.position >= offset) Some(edit.shift(-offset)) else None }
.filter { case TextEdit(position, length, replacement) ⇒ s.substring(position, position + length) != replacement }
.distinct
}
private def writeHiddenTokens(
builder: StringBuilder,
hiddenTokens: HiddenTokens,
instruction: IntertokenFormatInstruction,
nextTokenUnindents: Boolean,
includeBufferBeforeNextToken: Boolean,
previousTokenIsPrintable: Boolean,
tokenIndentMap: Map[Token, Int],
positionHintOption: Option[Int] = None
): Option[TextEdit] = {
def writeIntertokenCompact() {
val comments = hiddenTokens.comments
for ((previousCommentOption, comment, nextCommentOption) ← Utils.withPreviousAndNext(comments)) {
val needGapBetweenThisAndPrevious = cond(previousCommentOption) {
case Some(MultiLineComment(_)) | Some(ScalaDocComment(_)) ⇒ true
case _ if comment == comments.head && previousTokenIsPrintable ⇒ true
}
if (needGapBetweenThisAndPrevious)
builder.append(" ")
val extraIndent = comment match {
case SingleLineComment(_) if nextCommentOption.isDefined || includeBufferBeforeNextToken ⇒ builder.currentIndent
case _ ⇒ ""
}
builder.write(comment.token)
builder.append(extraIndent)
}
val needGapBetweenThisAndFollowing = cond(comments.lastOption) {
case Some(MultiLineComment(_)) if includeBufferBeforeNextToken ⇒ true
case Some(ScalaDocComment(_)) if includeBufferBeforeNextToken ⇒ true
}
if (needGapBetweenThisAndFollowing)
builder.append(" ")
}
val startPos = builder.length
val allWhitespace = hiddenTokens forall { _.isInstanceOf[Whitespace] }
instruction match {
case Compact ⇒ writeIntertokenCompact()
case CompactEnsuringGap ⇒
if (allWhitespace)
builder.append(" ")
else
writeIntertokenCompact()
case CompactPreservingGap ⇒
if (allWhitespace && !hiddenTokens.isEmpty)
builder.append(" ")
else
writeIntertokenCompact()
case PlaceAtColumn(indentLevel, spaces, relativeTo) ⇒
require(!formattingPreferences(IndentWithTabs))
writeIntertokenCompact()
val relativeIndent = relativeTo flatMap tokenIndentMap.get getOrElse 0
val indentLength = Spaces(formattingPreferences(IndentSpaces)).length(indentLevel)
builder.append(" " * (indentLength + relativeIndent + spaces - builder.currentColumn))
case EnsureNewlineAndIndent(indentLevel, relativeTo) ⇒
require(!(formattingPreferences(IndentWithTabs) && relativeTo.isDefined))
val baseIndentOption = relativeTo flatMap tokenIndentMap.get
if (hiddenTokens.isEmpty) {
builder.ensureAtBeginningOfLine()
builder.indent(indentLevel, baseIndentOption)
} else {
val commentIndentLevel = if (nextTokenUnindents) indentLevel + 1 else indentLevel
for ((previousOpt, hiddenToken, nextOpt) ← Utils.withPreviousAndNext(hiddenTokens)) {
hiddenToken match {
case ScalaDocComment(_) ⇒
builder.ensureAtBeginningOfLine()
builder.indent(commentIndentLevel, baseIndentOption)
builder.append(formatScaladocComment(hiddenToken, commentIndentLevel))
case SingleLineComment(_) | MultiLineComment(_) ⇒
if (builder.atBeginningOfLine)
builder.indent(commentIndentLevel, baseIndentOption)
else if (builder.atVisibleCharacter) // Separation from previous visible token
builder.append(" ")
builder.append(formatNonScaladocComment(hiddenToken, commentIndentLevel))
case Whitespace(token) ⇒
val newlineCount = token.text.count(_ == '\\n')
val newlinesToWrite = previousOpt match {
case Some(SingleLineComment(_)) ⇒ math.min(1, newlineCount)
case _ ⇒ math.min(2, newlineCount)
}
for (i ← 1 to newlinesToWrite)
builder.newline()
}
if (nextOpt.isEmpty) {
hiddenToken match {
case ScalaDocComment(_) ⇒
builder.newline()
builder.indent(indentLevel, baseIndentOption)
case SingleLineComment(_) ⇒
builder.indent(indentLevel, baseIndentOption)
case MultiLineComment(_) ⇒
builder.append(" ")
case Whitespace(token) ⇒
if (previousOpt.exists(_.isInstanceOf[MultiLineComment]) && !token.text.contains('\\n'))
builder.append(" ")
else {
builder.ensureAtBeginningOfLine()
builder.indent(indentLevel, baseIndentOption)
}
}
}
}
}
}
val replacement = builder.substring(startPos)
positionHintOption match {
case Some(positionHint) if hiddenTokens.isEmpty ⇒
Some(TextEdit(positionHint, length = 0, replacement = replacement))
case _ ⇒
for {
firstToken ← hiddenTokens.firstTokenOption
lastToken ← hiddenTokens.lastTokenOption
start = firstToken.token.offset
end = lastToken.token.lastCharacterOffset
length = end - start + 1
} yield TextEdit(start, length, replacement)
}
}
class StringBuilderExtra(builder: StringBuilder) {
def indent(indentLevel: Int, baseIndentOption: Option[Int] = None) = {
for {
baseIndent ← baseIndentOption
n ← 1 to baseIndent
} builder.append(" ")
val indentChars = formattingPreferences.indentStyle.indent(indentLevel)
builder.append(indentChars)
builder
}
def write(token: Token, replacementOption: Option[String] = None): Option[TextEdit] = {
val rewriteArrows = formattingPreferences(RewriteArrowSymbols)
val actualReplacementOption = replacementOption orElse (condOpt(token.tokenType) {
case ARROW if rewriteArrows ⇒ "⇒"
case LARROW if rewriteArrows ⇒ "←"
case RARROW if rewriteArrows ⇒ "→"
case EOF ⇒ ""
})
builder.append(actualReplacementOption getOrElse token.rawText)
actualReplacementOption map { replaceEdit(token, _) }
}
def write(hiddenToken: HiddenToken) = {
builder.append(hiddenToken.token.rawText)
builder
}
def newline() = {
builder.append(newlineSequence)
builder
}
def atBeginningOfLine = builder.isEmpty || lastChar == '\\n'
private def lastChar = builder(builder.length - 1)
def currentColumn = {
var pos = builder.length - 1
while (pos >= 0 && builder(pos) != '\\n')
pos -= 1
builder.length - pos - 1
}
def currentIndent = {
val lineStart = builder.length - currentColumn
var pos = lineStart
while (pos < builder.length && builder(pos).isWhitespace)
pos += 1
builder.substring(lineStart, pos)
}
def lastCharacter = if (builder.length == 0) None else Some(lastChar)
def ensureAtBeginningOfLine() = {
if (!atBeginningOfLine)
newline()
builder
}
def atVisibleCharacter = builder.length > 0 && !Character.isWhitespace(lastChar)
}
implicit def stringBuilder2stringBuilderExtra(builder: StringBuilder): StringBuilderExtra = new StringBuilderExtra(builder)
private def defaultNewlineFormattingInstruction(previousTokenOption: Option[Token], token: Token, nextTokenOption: Option[Token]): IntertokenFormatInstruction = {
val previousTypeOption = previousTokenOption map { _.tokenType }
val nextTypeOption = nextTokenOption map { _.tokenType }
val result =
if (previousTypeOption == Some(TYPE))
CompactEnsuringGap
else if (previousTypeOption == Some(RBRACKET) && nextTypeOption.exists(Set(CASE, CLASS, TRAIT, OBJECT, DEF, VAL, VAR, TYPE, ABSTRACT, FINAL, SEALED, OVERRIDE, IMPLICIT, LAZY)))
CompactEnsuringGap
else if (nextTypeOption == Some(LBRACE))
CompactEnsuringGap
else
Compact
// println("defaultNewlineFormattingInstruction(" + previousTokenOption + ", " + token + ", " + nextTokenOption + ") = " + result)
result
}
private def defaultFormattingInstruction(token1: Token, token2: Token): IntertokenFormatInstruction = {
val result = actualDefaultFormattingInstruction(token1, token2)
// println("defaultFormattingInstruction(" + token1 + ", " + token2 + ") = " + result)
result
}
private def actualDefaultFormattingInstruction(token1: Token, token2: Token): IntertokenFormatInstruction = {
import ScalaFormatter._
import scalariform.lexer.Chars.isOperatorPart
val type1 = token1.tokenType
val type2 = token2.tokenType
if (type2 == EOF)
return Compact
if (type1 == LPAREN && type2 != RPAREN && formattingPreferences(SpaceInsideParentheses))
return CompactEnsuringGap
if (type1 != LPAREN && type2 == RPAREN && formattingPreferences(SpaceInsideParentheses))
return CompactEnsuringGap
if (type1 == LBRACKET && type2 != RBRACKET && formattingPreferences(SpaceInsideBrackets))
return CompactEnsuringGap
if (type1 != LBRACKET && type2 == RBRACKET && formattingPreferences(SpaceInsideBrackets))
return CompactEnsuringGap
val xmlPreviousExceptions = Set(LBRACE, LPAREN, NEWLINE, NEWLINES)
if (type1 == TYPE && type2.isId)
return CompactEnsuringGap
if (type2 == XML_START_OPEN && !(xmlPreviousExceptions.contains(type1) || type1.isXml))
return CompactEnsuringGap
if (type1 == USCORE && type2.isId && type2 != STAR)
return CompactEnsuringGap
if (type2 == USCORE && type1.isId)
return CompactEnsuringGap
if ((type1 == RPAREN || type1 == RBRACKET) && type2 == LBRACE)
return CompactEnsuringGap
if (type1 == MINUS && (type2 == INTEGER_LITERAL || type2 == FLOATING_POINT_LITERAL))
return Compact
if (Set(IMPLICIT, VAL, VAR, PRIVATE, PROTECTED, OVERRIDE).contains(type2) && type1 == LPAREN)
return Compact
if ((type1 == PROTECTED || type1 == PRIVATE) && type2 == LBRACKET)
return Compact
if (type1 == NEWLINE || type2 == NEWLINE || type1 == NEWLINES || type2 == NEWLINES)
return Compact
if (type1.isId && type2 == LBRACE)
return CompactEnsuringGap
if (type1 == LBRACE && type2 == RBRACE)
return Compact //CompactEnsuringGap
if (type1 == RBRACE && type2 == LBRACE)
return CompactEnsuringGap
if (type1 == RPAREN && type2.isLiteral)
return CompactEnsuringGap
if (type1 == RPAREN && type2.isId)
return CompactEnsuringGap
if (type1.isLiteral && type2.isId)
return CompactEnsuringGap
if (type1.isId && type2.isLiteral)
return CompactEnsuringGap
if (ENSURE_SPACE_AFTER(type1))
return CompactEnsuringGap
if (ENSURE_SPACE_BEFORE(type2))
return CompactEnsuringGap
if (type1.isId && type2.isId)
return CompactEnsuringGap
val firstCharOfToken2 = token2.text.head
if (formattingPreferences(SpacesWithinPatternBinders) && type1.isId && type2 == AT)
return CompactEnsuringGap
if (formattingPreferences(SpacesWithinPatternBinders) && type1 == AT)
return CompactEnsuringGap
if (Set(HASH, AT).contains(type1) && isOperatorPart(firstCharOfToken2))
return CompactEnsuringGap
val lastCharOfToken1 = token1.text.last
val firstIsIdEndingWithOpChar = type1.isId && (lastCharOfToken1 == '_' || isOperatorPart(lastCharOfToken1))
if (Set(HASH, COLON, AT).contains(type2) && firstIsIdEndingWithOpChar)
return CompactEnsuringGap
if (type2 == COLON && formattingPreferences(SpaceBeforeColon))
return CompactEnsuringGap
type1 match {
case ARROW if type2 != RPAREN ⇒ return CompactEnsuringGap // TODO: Redundant? no test fails.
case COMMA ⇒ return CompactEnsuringGap
case _ ⇒
}
type2 match {
case IF if type1 != LPAREN ⇒ return CompactEnsuringGap
case ARROW if type1 != LPAREN ⇒ return CompactEnsuringGap
case AT if type2.isId ⇒ return CompactEnsuringGap
case _ ⇒
}
Compact
}
protected def containsNewline(tokens: List[Token]): Boolean =
tokens exists { token ⇒
require(token != null)
require(token.text != null, token)
token != tokens.head && hiddenPredecessors(token).containsNewline ||
token.text.contains("\\n") ||
isInferredNewline(token) && inferredNewlines(token).containsNewline // TODO: Why would an inferred newline not contain newline?
}
protected def containsNewline(astNode: AstNode): Boolean = containsNewline(astNode.tokens)
}
object ScalaFormatter {
// format: OFF
val ENSURE_SPACE_AFTER = Set(
ABSTRACT, CASE, CATCH, CLASS, DEF,
DO, ELSE, EXTENDS, FINAL,
FINALLY, FOR, FORSOME, IF, IMPLICIT,
IMPORT, LAZY, MATCH, NEW,
OBJECT, OVERRIDE, PACKAGE, PRIVATE, PROTECTED,
RETURN, SEALED, /* SUPER, THIS, */
THROW, TRAIT, TRY, /* TYPE ,*/
VAL, VAR, WHILE, WITH, YIELD,
/* USCORE, */ COLON, EQUALS, ARROW, LARROW, RARROW, SUBTYPE, VIEWBOUND, SUPERTYPE, /* HASH, AT */
LBRACE, SEMI)
val ENSURE_SPACE_BEFORE = Set(
ABSTRACT, CASE, CATCH, CLASS, DEF,
/* DO, */ ELSE, EXTENDS, FINAL,
FINALLY, /* FOR, */ FORSOME, /* IF, */ IMPLICIT,
/* IMPORT, */ LAZY, MATCH, /* NEW, */
OBJECT, OVERRIDE, /* PACKAGE, */ PRIVATE, PROTECTED,
/* RETURN, */ SEALED, /* SUPER, THIS, */
/* THROW, */ TRAIT, /* TRY, TYPE, */
VAL, VAR, /* WHILE, */ WITH, YIELD,
/* USCORE, COLON, */ EQUALS, /* ARROW, */ LARROW, RARROW, SUBTYPE, VIEWBOUND, SUPERTYPE, /*, HASH, AT, */
RBRACE)
// format: ON
@throws(classOf[ScalaParserException])
def format(source: String, formattingPreferences: IFormattingPreferences = FormattingPreferences(), lineDelimiter: Option[String] = None,
initialIndentLevel: Int = 0, scalaVersion: String = ScalaVersions.DEFAULT_VERSION): String = {
val edits = formatAsEdits(source, formattingPreferences, lineDelimiter, initialIndentLevel, scalaVersion)
TextEditProcessor.runEdits(source, edits)
}
@throws(classOf[ScalaParserException])
def formatAsEdits(source: String, formattingPreferences: IFormattingPreferences = FormattingPreferences(), lineDelimiter: Option[String] = None,
initialIndentLevel: Int = 0, scalaVersion: String = ScalaVersions.DEFAULT_VERSION): List[TextEdit] = {
val specificFormatter = new SpecificFormatter {
type Result = CompilationUnit
def parse(parser: ScalaParser) = parser.compilationUnitOrScript()
def format(formatter: ScalaFormatter, result: Result) = formatter.format(result)(FormatterState(indentLevel = initialIndentLevel))
}
val (edits, _) = specificFormatter.fullFormat(source, scalaVersion = scalaVersion)(formattingPreferences)
edits
}
}
| jkinkead/scalariform | scalariform/src/main/scala/scalariform/formatter/ScalaFormatter.scala | Scala | mit | 21,504 |
class foo(a: String) extends annotation.StaticAnnotation
object o {
implicit def i2s(i: Int) = ""
@foo(1: String) def blerg { }
}
| loskutov/intellij-scala | testdata/scalacTests/pos/t5892.scala | Scala | apache-2.0 | 134 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.utils.iterators
import scala.collection.Iterator.empty
object InfiniteIterator {
implicit class InfiniteIterator[A](val iter: Iterator[A]) {
/** @param p the predicate defining which element to stop after
*
* @return a [[Iterator]] which will delegate to the underlying iterator and return all elements up to
* and including the first element ``a`` for which p(a) == true after which no more calls will
* be made to the underlying iterator
*/
def stopAfter(p: A => Boolean): Iterator[A] = new Iterator[A] {
private var done = false
override def hasNext = !done && iter.hasNext
override def next() = {
if (!done) {
val next = iter.next()
done = p(next)
next
} else {
empty.next()
}
}
}
}
}
| elahrvivaz/geomesa | geomesa-utils/src/main/scala/org/locationtech/geomesa/utils/iterators/InfiniteIterator.scala | Scala | apache-2.0 | 1,343 |
object Test {
def main(args: Array[String]): Unit = {
println(rewrite("foo"))
println(rewrite("foo" + "foo"))
rewrite {
println("apply")
}
rewrite {
println("block")
println("block")
}
val b: Boolean = true
rewrite {
if b then println("then")
else println("else")
}
rewrite {
if !b then println("then")
else println("else")
}
rewrite {
val s: String = "val"
println(s)
}
rewrite {
val s: "vals" = "vals"
println(s) // prints "foo" not "oof"
}
rewrite {
def s: String = "def"
println(s)
}
rewrite {
def s: "defs" = "defs"
println(s) // prints "foo" not "oof"
}
rewrite {
def s(x: String): String = x
println(s("def"))
}
rewrite {
var s: String = "var"
s = "bar"
println(s)
}
rewrite {
try println("try")
finally println("finally")
}
rewrite {
try throw new Exception()
catch case x: Exception => println("catch")
}
rewrite {
var x = true
while (x) {
println("while")
x = false
}
}
rewrite {
val t = new Tuple1("new")
println(t._1)
}
rewrite {
println("typed": String)
println("typed": Any)
}
rewrite {
val f = new Foo(foo = "namedArg")
println(f.foo)
}
rewrite {
println("qual".reverse)
}
rewrite {
val f = () => "lambda"
println(f())
}
rewrite {
def f(args: String*): String = args.mkString
println(f("var", "args"))
}
rewrite {
"match" match {
case "match" => println("match")
case x => println("x")
}
}
// FIXME should print fed
rewrite {
def s: String = return "def"
println(s)
}
rewrite {
class Foo {
println("new Foo")
}
new Foo
}
}
}
class Foo(val foo: String)
| som-snytt/dotty | tests/run-macros/expr-map-1/Test_2.scala | Scala | apache-2.0 | 2,000 |
package com.github.pedrovgs.kuronometer.free.interpreter.formatter
import com.github.pedrovgs.kuronometer.generators.BuildExecutionGenerators._
import org.scalatest.prop.PropertyChecks
import org.scalatest.{FlatSpec, Matchers}
import scala.concurrent.duration._
class DurationFormatterSpec
extends FlatSpec
with Matchers
with PropertyChecks {
private val formatter = DurationFormatter.NanosecondsFormat
"DurationFormatter" should "show 0 nanoseconds as 0 ns" in {
formatter.format(0) shouldBe "0 ns"
}
it should "show 1 ns as 1 ns" in {
formatter.format(1) shouldBe "1 ns"
}
it should "show 1000 ns as 1 μs" in {
formatter.format(1000) shouldBe "1 μs"
}
it should "show values greater than 1000 ns as the composition of μs plus ns" in {
formatter.format(1001) shouldBe "1 μs 1 ns"
}
it should "show values greater than 10000000 ns as the composition of ms plus μs plus ns" in {
formatter.format(1000001) shouldBe "1 ms 1 ns"
}
it should "show values composed by ms μs and ns" in {
formatter.format(1001001) shouldBe "1 ms 1 μs 1 ns"
}
it should "show values composed by secs ms μs and ns" in {
formatter.format(2001001001) shouldBe "2 secs 1 ms 1 μs 1 ns"
}
it should "show values composed by minutes secs ms μs and ns" in {
formatter.format(62001001001L) shouldBe "1 min 2 secs 1 ms 1 μs 1 ns"
}
it should "show values composed by hours minutes secs ms μs and ns" in {
formatter.format(3662001001001L) shouldBe "1 hour 1 min 2 secs 1 ms 1 μs 1 ns"
}
it should "shows 1 day" in {
val duration: Duration = 1.days
formatter.format(duration.toNanos) shouldBe "1 day"
}
it should "show more than 1 day in plural" in {
val duration: Duration = 2.days
formatter.format(duration.toNanos) shouldBe "2 days"
}
it should "keep symmetry property" in {
forAll(buildExecutionTime) { (executionTime: Long) =>
val formattedString = formatter.format(executionTime)
formatter.parse(formattedString) shouldBe executionTime
}
}
}
| pedrovgs/Kuronometer | kuronometer-core/src/test/scala/com/github/pedrovgs/kuronometer/free/interpreter/formatter/DurationFormatterSpec.scala | Scala | apache-2.0 | 2,074 |
package com.twitter.finatra.http
import com.fasterxml.jackson.databind.JsonNode
import com.google.common.net.{HttpHeaders => CommonHttpHeaders, MediaType}
import com.google.inject.Stage
import com.twitter.finagle.http.{Method, Status, _}
import com.twitter.finatra.json.{FinatraObjectMapper, JsonDiff}
import com.twitter.inject.server.PortUtils.{ephemeralLoopback, loopbackAddressForPort}
import com.twitter.inject.server.{EmbeddedTwitterServer, PortUtils, Ports}
import com.twitter.util.Try
/**
*
* EmbeddedHttpServer allows a [[com.twitter.server.TwitterServer]] serving http endpoints to be started
* locally (on ephemeral ports), and tested through it's http interfaces.
*
* @param twitterServer The [[com.twitter.server.TwitterServer]] to be started for testing.
* @param flags Command line flags (e.g. "foo"->"bar" is translated into -foo=bar). See: [[com.twitter.app.Flag]].
* @param args Extra command line arguments.
* @param waitForWarmup Once the server is started, wait for server warmup to be completed
* @param stage [[com.google.inject.Stage]] used to create the server's injector. Since EmbeddedHttpServer is used for testing,
* we default to Stage.DEVELOPMENT. This makes it possible to only mock objects that are used in a given test,
* at the expense of not checking that the entire object graph is valid. As such, you should always have at
* least one Stage.PRODUCTION test for your service (which eagerly creates all classes at startup)
* @param useSocksProxy Use a tunneled socks proxy for external service discovery/calls (useful for manually run external
* integration tests that connect to external services).
* @param defaultRequestHeaders Headers to always send to the embedded server.
* @param defaultHttpSecure Default all requests to the server to be HTTPS.
* @param mapperOverride [[com.twitter.finatra.json.FinatraObjectMapper]] to use instead of the mapper configuered by
* the embedded server.
* @param httpPortFlag Name of the flag that defines the external http port for the server.
* @param streamResponse Toggle to not unwrap response content body to allow caller to stream response.
* @param verbose Enable verbose logging during test runs.
* @param disableTestLogging Disable all logging emitted from the test infrastructure.
* @param maxStartupTimeSeconds Maximum seconds to wait for embedded server to start. If exceeded a
* [[com.twitter.inject.app.StartupTimeoutException]] is thrown.
*/
class EmbeddedHttpServer(
val twitterServer: Ports,
flags: Map[String, String] = Map(),
args: Seq[String] = Seq(),
waitForWarmup: Boolean = true,
stage: Stage = Stage.DEVELOPMENT,
useSocksProxy: Boolean = false,
defaultRequestHeaders: Map[String, String] = Map(),
defaultHttpSecure: Boolean = false,
mapperOverride: Option[FinatraObjectMapper] = None,
httpPortFlag: String = "http.port",
streamResponse: Boolean = false,
verbose: Boolean = false,
disableTestLogging: Boolean = false,
maxStartupTimeSeconds: Int = 60)
extends EmbeddedTwitterServer(
twitterServer = twitterServer,
flags = flags + (httpPortFlag -> ephemeralLoopback),
args = args,
waitForWarmup = waitForWarmup,
stage = stage,
useSocksProxy = useSocksProxy,
defaultRequestHeaders = defaultRequestHeaders,
streamResponse = streamResponse,
verbose = verbose,
disableTestLogging = disableTestLogging,
maxStartupTimeSeconds = maxStartupTimeSeconds) {
/* Additional Constructors */
def this(twitterServer: Ports) = {
this(twitterServer, flags = Map())
}
/* Overrides */
override protected def logStartup() {
super.logStartup()
info(s"ExternalHttp -> http://$externalHttpHostAndPort")
}
override protected def printNonEmptyResponseBody(response: Response): Unit = {
try {
info(mapper.writePrettyString(
response.getContentString()))
} catch {
case e: Exception =>
info(response.contentString)
}
info("")
}
override protected def prettyRequestBody(request: Request): String = {
val printableBody = request.contentString.replaceAll("[\\\\p{Cntrl}&&[^\\n\\t\\r]]", "?") //replace non-printable characters
Try {
mapper.writePrettyString(printableBody)
} getOrElse {
printableBody
}
}
override def close() {
if (!closed) {
super.close()
if (twitterServer.httpExternalPort.isDefined) {
httpClient.close()
}
if (twitterServer.httpsExternalPort.isDefined) {
httpsClient.close()
}
closed = true
}
}
override def bind[T : Manifest](instance: T): EmbeddedHttpServer = {
bindInstance[T](instance)
this
}
/* Public */
lazy val httpClient = {
createHttpClient(
"httpClient",
httpExternalPort)
}
lazy val httpsClient = {
createHttpClient(
"httpsClient",
httpsExternalPort,
secure = true)
}
lazy val mapper = mapperOverride getOrElse injector.instance[FinatraObjectMapper]
lazy val httpExternalPort = {
start()
twitterServer.httpExternalPort.getOrElse(throw new Exception("External HTTP port not bound"))
}
lazy val httpsExternalPort = {
start()
twitterServer.httpsExternalPort.getOrElse(throw new Exception("External HTTPs port not bound"))
}
lazy val externalHttpHostAndPort = PortUtils.loopbackAddressForPort(httpExternalPort)
lazy val externalHttpsHostAndPort = PortUtils.loopbackAddressForPort(httpsExternalPort)
/**
* Performs a GET request against the embedded server.
*
* @param path - URI of the request
* @param accept - add request Accept header with the given [[com.google.common.net.MediaType]]
* @param headers - additional headers that should be passed with the request
* @param suppress - suppress http client logging
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default
* @param secure - use the https port to address the embedded server, default = None
* @return a [[com.twitter.finagle.http.Response]] on success otherwise an exception
* if any of the assertions defined by andExpect or withXXXX fail
*/
def httpGet(
path: String,
accept: MediaType = null,
headers: Map[String, String] = Map(),
suppress: Boolean = false,
andExpect: Status = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, Method.Get)
jsonAwareHttpExecute(request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
/**
* Performs a GET request against the embedded server serializing the normalized
* response#contentString into an instance of type [[ResponseType]].
*
* @see [[com.twitter.finatra.json.FinatraObjectMapper]]#parse[T: Manifest](string: String)
* @param path - URI of the request
* @param accept - add request Accept header with the given [[com.google.common.net.MediaType]]
* @param headers - additional headers that should be passed with the request
* @param suppress - suppress http client logging
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody.
* @param normalizeJsonParsedReturnValue - if the normalizer SHOULD be applied on the parsing of the
* response#contentString into type [[ResponseType]], default = false.
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default.
* @param secure - use the https port to address the embedded server, default = None
* @tparam ResponseType - parse the response#contentString into type [[ResponseType]]
* @return instance of type [[ResponseType]] serialized from the the response#contentString.
*/
def httpGetJson[ResponseType: Manifest](
path: String,
accept: MediaType = null,
headers: Map[String, String] = Map(),
suppress: Boolean = false,
andExpect: Status = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
normalizeJsonParsedReturnValue: Boolean = true,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): ResponseType = {
assert(manifest[ResponseType] != manifest[Nothing], "httpGetJson requires a type-param to parse the JSON response into, e.g. http<Method>Json[MyCaseClass] or http<Method>Json[JsonNode]")
val response =
httpGet(path, accept = MediaType.JSON_UTF_8, headers = headers, suppress = suppress,
andExpect = andExpect, withLocation = withLocation,
withJsonBody = withJsonBody, withJsonBodyNormalizer = withJsonBodyNormalizer)
jsonParseWithNormalizer(response, withJsonBodyNormalizer, normalizeJsonParsedReturnValue)
}
/**
* Performs a POST request against the embedded server.
*
* @param path - URI of the request
* @param postBody - body of the POST request
* @param accept - add request Accept header with the given [[com.google.common.net.MediaType]]
* @param suppress - suppress http client logging
* @param contentType - request Content-Type header value, application/json by default
* @param headers - additional headers that should be passed with the request
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default
* @param secure - use the https port to address the embedded server, default = None
* @return a [[com.twitter.finagle.http.Response]] on success otherwise an exception
* if any of the assertions defined by andExpect or withXXXX fail
*/
def httpPost(
path: String,
postBody: String,
accept: MediaType = null,
suppress: Boolean = false,
contentType: String = Message.ContentTypeJson,
headers: Map[String, String] = Map(),
andExpect: Status = null,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, Method.Post)
request.setContentString(postBody)
request.headerMap.set(CommonHttpHeaders.CONTENT_LENGTH, request.content.length.toString)
request.headerMap.set(CommonHttpHeaders.CONTENT_TYPE, contentType)
jsonAwareHttpExecute(request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
/**
* Performs a POST request against the embedded server serializing the normalized
* response#contentString into an instance of type [[ResponseType]].
*
* @see [[com.twitter.finatra.json.FinatraObjectMapper]]#parse[T: Manifest](string: String)
* @param path - URI of the request
* @param postBody - body of the POST request
* @param suppress - suppress http client logging
* @param headers - additional headers that should be passed with the request
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody.
* @param normalizeJsonParsedReturnValue - if the normalizer SHOULD be applied on the parsing of the
* response#contentString into type [[ResponseType]], default = false.
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default.
* @param secure - use the https port to address the embedded server, default = None
* @tparam ResponseType - parse the response#contentString into type [[ResponseType]]
* @return instance of type [[ResponseType]] serialized from the the response#contentString.
*/
def httpPostJson[ResponseType: Manifest](
path: String,
postBody: String,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: Status = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
normalizeJsonParsedReturnValue: Boolean = false,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): ResponseType = {
assert(manifest[ResponseType] != manifest[Nothing], "httpPostJson requires a type-param to parse the JSON response into, e.g. http<Method>Json[MyCaseClass] or http<Method>Json[JsonNode]")
val response = httpPost(path, postBody, MediaType.JSON_UTF_8, suppress, Message.ContentTypeJson, headers, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure)
jsonParseWithNormalizer(response, withJsonBodyNormalizer, normalizeJsonParsedReturnValue)
}
/**
* Performs a PUT request against the embedded server.
*
* @param path - URI of the request
* @param putBody - the body of the PUT request
* @param accept - add request Accept header with the given [[com.google.common.net.MediaType]]
* @param suppress - suppress http client logging
* @param contentType - request Content-Type header value, application/json by default
* @param headers - additional headers that should be passed with the request
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody.
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default.
* @param secure - use the https port to address the embedded server, default = None
* @return a [[com.twitter.finagle.http.Response]] on success otherwise an exception
* if any of the assertions defined by andExpect or withXXXX fail
*/
def httpPut(
path: String,
putBody: String,
accept: MediaType = null,
suppress: Boolean = false,
contentType: String = Message.ContentTypeJson,
headers: Map[String, String] = Map(),
andExpect: Status = null,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, Method.Put)
request.setContentString(putBody)
request.headerMap.set(CommonHttpHeaders.CONTENT_LENGTH, request.content.length.toString)
request.headerMap.set(CommonHttpHeaders.CONTENT_TYPE, contentType)
jsonAwareHttpExecute(request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
/**
* Performs a PUT request against the embedded server serializing the normalized
* response#contentString into an instance of type [[ResponseType]].
*
* @see [[com.twitter.finatra.json.FinatraObjectMapper]]#parse[T: Manifest](string: String)
* @param path - URI of the request
* @param putBody - the body of the PUT request
* @param suppress - suppress http client logging
* @param headers - additional headers that should be passed with the request
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody.
* @param normalizeJsonParsedReturnValue - if the normalizer SHOULD be applied on the parsing of the
* response#contentString into type [[ResponseType]], default = false.
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default.
* @param secure - use the https port to address the embedded server, default = None
* @tparam ResponseType - parse the response#contentString into type [[ResponseType]]
* @return instance of type [[ResponseType]] serialized from the the response#contentString.
*/
def httpPutJson[ResponseType: Manifest](
path: String,
putBody: String,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: Status = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
normalizeJsonParsedReturnValue: Boolean = false,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): ResponseType = {
assert(manifest[ResponseType] != manifest[Nothing], "httpPutJson requires a type-param to parse the JSON response into, e.g. httpPutJson[MyCaseClass] or httpPutJson[JsonNode]")
val response = httpPut(path, putBody, MediaType.JSON_UTF_8, suppress, Message.ContentTypeJson, headers, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure)
jsonParseWithNormalizer(response, withJsonBodyNormalizer, normalizeJsonParsedReturnValue)
}
/**
* Performs a DELETE request against the embedded server.
*
* @param path - URI of the request
* @param deleteBody - the body of the DELETE request
* @param accept - add request Accept header with the given [[com.google.common.net.MediaType]]
* @param suppress - suppress http client logging
* @param headers - additional headers that should be passed with the request
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody.
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default.
* @param secure - use the https port to address the embedded server, default = None
* @return a [[com.twitter.finagle.http.Response]] on success otherwise an exception
* if any of the assertions defined by andExpect or withXXXX fail
*/
def httpDelete(
path: String,
deleteBody: String = null,
accept: MediaType = null,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: Status = null,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, Method.Delete)
if (deleteBody != null) {
request.setContentString(deleteBody)
}
jsonAwareHttpExecute(
request,
addAcceptHeader(accept, headers),
suppress,
andExpect,
withLocation,
withBody,
withJsonBody,
withJsonBodyNormalizer,
withErrors,
routeToAdminServer,
secure = secure.getOrElse(defaultHttpSecure))
}
/**
* Performs a DELETE request against the embedded server serializing the normalized
* response#contentString into an instance of type [[ResponseType]].
*
* @see [[com.twitter.finatra.json.FinatraObjectMapper]]#parse[T: Manifest](string: String)
* @param path - URI of the request
* @param deleteBody - the body of the DELETE request
* @param suppress - suppress http client logging
* @param headers - additional headers that should be passed with the request
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody.
* @param normalizeJsonParsedReturnValue - if the normalizer SHOULD be applied on the parsing of the
* response#contentString into type [[ResponseType]], default = false.
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default.
* @param secure - use the https port to address the embedded server, default = None
* @tparam ResponseType - parse the response#contentString into type [[ResponseType]]
* @return instance of type [[ResponseType]] serialized from the the response#contentString.
*/
def httpDeleteJson[ResponseType: Manifest](
path: String,
deleteBody: String,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: Status = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
normalizeJsonParsedReturnValue: Boolean = false,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): ResponseType = {
assert(manifest[ResponseType] != manifest[Nothing], "httpDeleteJson requires a type-param to parse the JSON response into, e.g. http<Method>Json[MyCaseClass] or http<Method>Json[JsonNode]")
val response = httpDelete(path, deleteBody, MediaType.JSON_UTF_8, suppress, headers, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure)
jsonParseWithNormalizer(response, withJsonBodyNormalizer, normalizeJsonParsedReturnValue)
}
/**
* Performs a OPTIONS request against the embedded server.
*
* @param path - URI of the request
* @param accept - add request Accept header with the given [[com.google.common.net.MediaType]]
* @param headers - additional headers that should be passed with the request
* @param suppress - suppress http client logging
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default
* @param secure - use the https port to address the embedded server, default = None
* @return a [[com.twitter.finagle.http.Response]] on success otherwise an exception
* if any of the assertions defined by andExpect or withXXXX fail
*/
def httpOptions(
path: String,
accept: MediaType = null,
headers: Map[String, String] = Map(),
suppress: Boolean = false,
andExpect: Status = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, Method.Options)
jsonAwareHttpExecute(request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
/**
* Performs a PATCH request against the embedded server.
*
* @param path - URI of the request
* @param patchBody - the body of the PATCH request
* @param accept - add request Accept header with the given [[com.google.common.net.MediaType]]
* @param suppress - suppress http client logging
* @param headers - additional headers that should be passed with the request
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default
* @param secure - use the https port to address the embedded server, default = None
* @return a [[com.twitter.finagle.http.Response]] on success otherwise an exception
* if any of the assertions defined by andExpect or withXXXX fail
*/
def httpPatch(
path: String,
patchBody: String,
accept: MediaType = null,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: Status = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, Method.Patch)
request.setContentString(patchBody)
request.headerMap.set(CommonHttpHeaders.CONTENT_LENGTH, request.content.length.toString)
jsonAwareHttpExecute(request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
/**
* Performs a PATCH request against the embedded server serializing the normalized
* response#contentString into an instance of type [[ResponseType]].
*
* @see [[com.twitter.finatra.json.FinatraObjectMapper]]#parse[T: Manifest](string: String)
* @param path - URI of the request
* @param patchBody - the body of the PATCH request
* @param suppress - suppress http client logging
* @param headers - additional headers that should be passed with the request
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody.
* @param normalizeJsonParsedReturnValue - if the normalizer SHOULD be applied on the parsing of the
* response#contentString into type [[ResponseType]], default = false
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default
* @param secure - use the https port to address the embedded server, default = None
* @tparam ResponseType - parse the response#contentString into type [[ResponseType]]
* @return instance of type [[ResponseType]] serialized from the the response#contentString.
*/
def httpPatchJson[ResponseType: Manifest](
path: String,
patchBody: String,
suppress: Boolean = false,
headers: Map[String, String] = Map(),
andExpect: Status = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
normalizeJsonParsedReturnValue: Boolean = false,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): ResponseType = {
assert(manifest[ResponseType] != manifest[Nothing], "httpPatchJson requires a type-param to parse the JSON response into, e.g. http<Method>Json[MyCaseClass] or http<Method>Json[JsonNode]")
val response = httpPatch(path, patchBody, MediaType.JSON_UTF_8, suppress, headers, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure)
jsonParseWithNormalizer(response, withJsonBodyNormalizer, normalizeJsonParsedReturnValue)
}
/**
* Performs a HEAD request against the embedded server.
*
* @param path - URI of the request
* @param accept - add request Accept header with the given [[com.google.common.net.MediaType]]
* @param headers - additional headers that should be passed with the request
* @param suppress - suppress http client logging
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default
* @param secure - use the https port to address the embedded server, default = None
* @return a [[com.twitter.finagle.http.Response]] on success otherwise an exception
* if any of the assertions defined by andExpect or withXXXX fail
*/
def httpHead(
path: String,
accept: MediaType = null,
headers: Map[String, String] = Map(),
suppress: Boolean = false,
andExpect: Status = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
val request = createApiRequest(path, Method.Head)
jsonAwareHttpExecute(request, addAcceptHeader(accept, headers), suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
/**
* Performs a form POST request against the embedded server.
*
* @param path - URI of the request
* @param params - a Map[String,String] of form params to send in the request
* @param multipart - if this form post is a multi-part request, false by default
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default
* @param headers - additional headers that should be passed with the request
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param secure - use the https port to address the embedded server, default = None
* @return a [[com.twitter.finagle.http.Response]] on success otherwise an exception
* if any of the assertions defined by andExpect or withXXXX fail
*/
def httpFormPost(
path: String,
params: Map[String, String],
multipart: Boolean = false,
routeToAdminServer: Boolean = false,
headers: Map[String, String] = Map.empty,
andExpect: Status = Status.Ok,
withBody: String = null,
withJsonBody: String = null,
secure: Option[Boolean] = None): Response = {
formPost(
path = path,
params = paramsToElements(params),
multipart = multipart,
routeToAdminServer = routeToAdminServer,
headers = headers,
andExpect = andExpect,
withBody = withBody,
withJsonBody = withJsonBody,
secure = secure)
}
/**
* Performs a multi-part form POST request against the embedded server.
*
* @param path - URI of the request
* @param params - a Seq of [[com.twitter.finagle.http.FormElement]] to send in the request
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default.
* @param headers - additional headers that should be passed with the request
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param secure - use the https port to address the embedded server, default = None
* @return a [[com.twitter.finagle.http.Response]] on success otherwise an exception
* if any of the assertions defined by andExpect or withXXXX fail
*/
def httpMultipartFormPost(
path: String,
params: Seq[FormElement],
routeToAdminServer: Boolean = false,
headers: Map[String, String] = Map.empty,
andExpect: Status = Status.Ok,
withBody: String = null,
withJsonBody: String = null,
secure: Option[Boolean] = None): Response = {
formPost(
path = path,
params = params,
multipart = true,
routeToAdminServer = routeToAdminServer,
headers = headers,
andExpect = andExpect,
withBody = withBody,
withJsonBody = withJsonBody,
secure = secure)
}
/**
* Sends the given [[com.twitter.finagle.http.Request]] against the embedded server.
*
* @param request - built [[com.twitter.finagle.http.Request]] to send to the embedded server
* @param suppress - suppress http client logging
* @param andExpect - expected [[com.twitter.finagle.http.Status]] value
* @param withLocation - expected response Location header value
* @param withBody - expected body as a String
* @param withJsonBody - expected body as JSON
* @param withJsonBodyNormalizer - normalizer to use in conjunction with withJsonBody
* @param withErrors - expected errors
* @param routeToAdminServer - force the request to the admin interface of the embedded server, false by default
* @param secure - use the https port to address the embedded server, default = None
* @return a [[com.twitter.finagle.http.Response]] on success otherwise an exception
* if any of the assertions defined by andExpect or withXXXX fail
*/
def httpRequest(
request: Request,
suppress: Boolean = false,
andExpect: Status = null,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null,
routeToAdminServer: Boolean = false,
secure: Option[Boolean] = None): Response = {
jsonAwareHttpExecute(request, request.headerMap.toMap, suppress, andExpect, withLocation, withBody, withJsonBody, withJsonBodyNormalizer, withErrors, routeToAdminServer, secure = secure.getOrElse(defaultHttpSecure))
}
// Note: Added to support tests from Java code which would need to manually set all arguments with default values
def httpRequest(
request: Request): Response = {
httpRequest(request, suppress = false)
}
/* Private */
private def formPost(
path: String,
params: Seq[FormElement],
multipart: Boolean,
routeToAdminServer: Boolean,
headers: Map[String, String],
andExpect: Status,
withBody: String,
withJsonBody: String,
secure: Option[Boolean]): Response = {
val request = RequestBuilder().
url(normalizeURL(path)).
addHeaders(headers).
add(params).
buildFormPost(multipart = multipart)
jsonAwareHttpExecute(
request,
routeToAdminServer = routeToAdminServer,
andExpect = andExpect,
withBody = withBody,
withJsonBody = withJsonBody,
secure = secure.getOrElse(defaultHttpSecure))
}
private def jsonAwareHttpExecute(
request: Request,
headers: Map[String, String] = Map(),
suppress: Boolean = false,
andExpect: Status = Status.Ok,
withLocation: String = null,
withBody: String = null,
withJsonBody: String = null,
withJsonBodyNormalizer: JsonNode => JsonNode = null,
withErrors: Seq[String] = null, //TODO: Deprecate
routeToAdminServer: Boolean = false,
secure: Boolean): Response = {
val (client, port) = chooseHttpClient(request.path, routeToAdminServer, secure)
request.headerMap.set("Host", loopbackAddressForPort(port))
val response = httpExecute(client, request, headers, suppress, andExpect, withLocation, withBody)
if (withJsonBody != null) {
if (!withJsonBody.isEmpty)
JsonDiff.jsonDiff(response.contentString, withJsonBody, withJsonBodyNormalizer, verbose = false)
else
response.contentString should equal("")
}
if (withErrors != null) {
JsonDiff.jsonDiff(response.contentString, Map("errors" -> withErrors), withJsonBodyNormalizer)
}
response
}
private def normalizeURL(path: String) = {
if (path.startsWith("http://"))
path
else
"http://localhost:8080%s".format(path)
}
private def paramsToElements(params: Map[String, String]): Seq[SimpleElement] = {
(params map { case (key, value) =>
SimpleElement(key, value)
}).toSeq
}
private def chooseHttpClient(path: String, forceAdmin: Boolean, secure: Boolean) = {
if (path.startsWith("/admin") || forceAdmin)
(httpAdminClient, httpAdminPort)
else if (secure)
(httpsClient, twitterServer.httpsExternalPort.get)
else
(httpClient, twitterServer.httpExternalPort.get)
}
private def addAcceptHeader(
accept: MediaType,
headers: Map[String, String]): Map[String, String] = {
if (accept != null)
headers + (CommonHttpHeaders.ACCEPT -> accept.toString)
else
headers
}
private def jsonParseWithNormalizer[T: Manifest](
response: Response,
normalizer: JsonNode => JsonNode,
normalizeParsedJsonNode: Boolean) = {
val jsonNode = {
val parsedJsonNode = mapper.parse[JsonNode](response.contentString)
if (normalizer != null && normalizeParsedJsonNode)
normalizer(parsedJsonNode)
else
parsedJsonNode
}
try {
mapper.parse[T](jsonNode)
} catch {
case e: Exception =>
println(s"Json parsing error $e trying to parse response $response with body " + response.contentString)
throw e
}
}
}
| syamantm/finatra | http/src/test/scala/com/twitter/finatra/http/EmbeddedHttpServer.scala | Scala | apache-2.0 | 40,401 |
package worker
import scala.concurrent.duration._
import com.typesafe.config.ConfigFactory
import akka.actor.ActorSystem
import akka.actor.PoisonPill
import akka.actor.Props
import akka.actor.RootActorPath
import akka.cluster.client.{ClusterClientReceptionist, ClusterClientSettings, ClusterClient}
import akka.cluster.singleton.{ClusterSingletonManagerSettings, ClusterSingletonManager}
import akka.japi.Util.immutableSeq
import akka.actor.AddressFromURIString
import akka.actor.ActorPath
import akka.persistence.journal.leveldb.SharedLeveldbStore
import akka.persistence.journal.leveldb.SharedLeveldbJournal
import akka.util.Timeout
import akka.pattern.ask
import akka.actor.Identify
import akka.actor.ActorIdentity
object Main {
def main(args: Array[String]): Unit = {
if (args.isEmpty) {
startBackend(2551, "backend")
Thread.sleep(5000)
startBackend(2552, "backend")
startWorker(0)
Thread.sleep(5000)
startFrontend(0)
} else {
val port = args(0).toInt
if (2000 <= port && port <= 2999)
startBackend(port, "backend")
else if (3000 <= port && port <= 3999)
startFrontend(port)
else
startWorker(port)
}
}
def workTimeout = 10.seconds
def startBackend(port: Int, role: String): Unit = {
val conf = ConfigFactory.parseString(s"akka.cluster.roles=[$role]").
withFallback(ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port)).
withFallback(ConfigFactory.load())
val system = ActorSystem("ClusterSystem", conf)
startupSharedJournal(system, startStore = (port == 2551), path =
ActorPath.fromString("akka.tcp://[email protected]:2551/user/store"))
system.actorOf(
ClusterSingletonManager.props(
Master.props(workTimeout),
PoisonPill,
ClusterSingletonManagerSettings(system).withRole(role)
),
"master")
}
def startFrontend(port: Int): Unit = {
val conf = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port).
withFallback(ConfigFactory.load())
val system = ActorSystem("ClusterSystem", conf)
val frontend = system.actorOf(Props[Frontend], "frontend")
system.actorOf(Props(classOf[WorkProducer], frontend), "producer")
system.actorOf(Props[WorkResultConsumer], "consumer")
}
def startWorker(port: Int): Unit = {
// load worker.conf
val conf = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port).
withFallback(ConfigFactory.load("worker"))
val system = ActorSystem("WorkerSystem", conf)
val initialContacts = immutableSeq(conf.getStringList("contact-points")).map {
case AddressFromURIString(addr) ⇒ RootActorPath(addr) / "system" / "receptionist"
}.toSet
val clusterClient = system.actorOf(
ClusterClient.props(
ClusterClientSettings(system)
.withInitialContacts(initialContacts)),
"clusterClient")
system.actorOf(Worker.props(clusterClient, Props[WorkExecutor]), "worker")
}
def startupSharedJournal(system: ActorSystem, startStore: Boolean, path: ActorPath): Unit = {
// Start the shared journal one one node (don't crash this SPOF)
// This will not be needed with a distributed journal
if (startStore)
system.actorOf(Props[SharedLeveldbStore], "store")
// register the shared journal
import system.dispatcher
implicit val timeout = Timeout(15.seconds)
val f = (system.actorSelection(path) ? Identify(None))
f.onSuccess {
case ActorIdentity(_, Some(ref)) => SharedLeveldbJournal.setStore(ref, system)
case _ =>
system.log.error("Shared journal not started at {}", path)
system.terminate()
}
f.onFailure {
case _ =>
system.log.error("Lookup of shared journal at {} timed out", path)
system.terminate()
}
}
}
| typesafehub/activator-akka-distributed-workers | src/main/scala/worker/Main.scala | Scala | cc0-1.0 | 3,847 |
import scala.quoted._
def test(using QuoteContext) = {
given QuoteContext = ???
implicit def IntIsLiftable: Liftable[Int] = new {
def toExpr(n: Int) = n match {
case Int.MinValue => '{Int.MinValue}
case _ if n < 0 => '{- ${toExpr(n)}}
case 0 => '{0}
case _ if n % 2 == 0 => '{ ${toExpr(n / 2)} * 2 }
case _ => '{ ${toExpr(n / 2)} * 2 + 1 }
}
}
implicit def BooleanIsLiftable: Liftable[Boolean] = new {
implicit def toExpr(b: Boolean) =
if (b) '{true} else '{false}
}
implicit def ListIsLiftable[T: Liftable: Type]: Liftable[List[T]] = new {
def toExpr(xs: List[T]) = xs match {
case x :: xs1 => '{ ${ Expr(x) } :: ${ toExpr(xs1) } }
case Nil => '{Nil: List[T]}
}
}
Expr(true)
Expr(1)
Expr('a')
Expr(1)
Expr(1)
Expr(1L)
Expr(1.0f)
Expr(1.0)
Expr("abc")
val xs: Expr[List[Int]] = Expr(1 :: 2 :: 3 :: Nil)
}
| som-snytt/dotty | tests/pos/quote-liftable.scala | Scala | apache-2.0 | 950 |
package com.bowlingx.websocket.hazelcast
import com.hazelcast.core._
import org.atmosphere.util.AbstractBroadcasterProxy
import org.atmosphere.cpr._
import java.net.URI
import com.hazelcast.core.Hazelcast
import com.hazelcast.core.ITopic
import com.hazelcast.core.IMap
import com.hazelcast.core.MessageListener
import com.hazelcast.core.Message
import org.fusesource.scalate.util.Logging
import java.security.MessageDigest
import java.math.BigInteger
object HazelcastInstance {
lazy val hazelcast = Hazelcast.newHazelcastInstance(null)
}
/**
* Serializable Hazelcast Message
* Wraps a Message and Global Cluster ID
* @param msg
* @param clusterIdent
*/
class HazelcastMessage[T](var msg: T, var clusterIdent: String) extends Serializable
object HazelcastBroadcaster extends Logging {
lazy val broadcastTopicIdentifier = classOf[HazelcastBroadcaster].getName
}
/**
* Hazelcast Broadcasting for Atmosphere
*/
class HazelcastBroadcaster(id: String, config: AtmosphereConfig)
extends AbstractBroadcasterProxy(id, URI.create("http://localhost"), config) with Logging {
import HazelcastInstance._
import HazelcastBroadcaster._
private var topic: ITopic[HazelcastMessage[AnyRef]] = _
// A Map to keep track of Messages
lazy val map: IMap[String, String] = hazelcast.getMap(broadcastTopicIdentifier)
// A Map to keep track of Topics
private var uniqueBroadcasterId: Long = _
private var msgListener: MessageListener[HazelcastMessage[AnyRef]] = _
def setup() {
// Subscribe to Topic
topic = getTopic
// Generate Cluster wide unique id to track Message
uniqueBroadcasterId = hazelcast.getIdGenerator(broadcastTopicIdentifier).newId()
msgListener = new MessageListener[HazelcastMessage[AnyRef]] {
def onMessage(message: Message[HazelcastMessage[AnyRef]]) {
import collection.JavaConversions._
getAtmosphereResources foreach { r =>
r.addEventListener(new AtmosphereResourceEventListener() {
def onPreSuspend(event:AtmosphereResourceEvent) {}
def onThrowable(event: AtmosphereResourceEvent) {}
def onBroadcast(event: AtmosphereResourceEvent) {
map.remove(message.getMessageObject.clusterIdent)
event.getResource.removeEventListener(this)
}
def onDisconnect(event: AtmosphereResourceEvent) {}
def onResume(event: AtmosphereResourceEvent) {
map.remove(message.getMessageObject.clusterIdent)
event.getResource.removeEventListener(this)
}
def onSuspend(event: AtmosphereResourceEvent) {}
})
}
// Broadcast message to all atmosphere resources
broadcastReceivedMessage(message.getMessageObject.msg)
}
}
topic.addMessageListener(msgListener)
}
override def setID(id: String) {
super.setID(id)
setup()
}
override def destroy() {
this.synchronized {
topic.removeMessageListener(msgListener)
super.destroy()
}
}
def getTopic = hazelcast.getTopic[HazelcastMessage[AnyRef]](getID)
/**
* Important: Call this Method with a delay (to assure messages can del
* Call to check if message was successfully delivered
* If not delete topic, because there are not listeners
* @return
*/
def didReceivedMessage(message:AnyRef) = !map.containsKey(calcMessageHash(message))
def incomingBroadcast() {}
private def calcMessageHash(message:AnyRef) : String = {
val v = "%s@%s" format (uniqueBroadcasterId.toString, message.toString) getBytes "UTF-8"
val dig = MessageDigest.getInstance("MD5")
dig.update(v, 0, v.length)
new BigInteger(1, dig.digest()).toString(16)
}
def outgoingBroadcast(message: AnyRef) {
// Track IDs:
map.put(calcMessageHash(message), getID)
val hcMessage = new HazelcastMessage[AnyRef](message, calcMessageHash(message))
topic.publish(hcMessage)
}
/**
* Broadcast Hazelcast Message (contains original Message and a global Cluster ID)
* @param message
*/
def broadcastReceivedMessage(message: HazelcastMessage[AnyRef]) {
try {
val newMsg = message
newMsg.msg = filter(newMsg.msg)
val future = new HazelcastBroadcastFuture(newMsg, this)
push(new Entry(newMsg.msg,future, message.msg));
} catch {
case e: Exception => log.error("failed to push message: " + message, e)
}
}
}
/**
* A Hazelcast Future
*
* @param hsl
* @param listeners
* @param b
* @tparam T
*/
class HazelcastBroadcastFuture[T]
(hsl: HazelcastMessage[T], b: HazelcastBroadcaster)
extends BroadcasterFuture[T](hsl.msg, b.asInstanceOf[Broadcaster]) {
}
| BowlingX/scalatra-websockets | src/main/scala/com/bowlingx/websocket/hazelcast/HazelcastBroadcaster.scala | Scala | mit | 4,677 |
package be.cmpg.walk.fungus
import be.cmpg.graph.interaction.NetworkManager
import be.cmpg.graph.interaction.NodeCostNetworkManager
import be.cmpg.graph.Network
import be.cmpg.graph.Interaction
import be.cmpg.graph.Gene
import java.util.concurrent.Callable
import scala.collection.Set
import be.cmpg.expression.ExpressionNetworkManager
import be.cmpg.graph.interaction.WalkerResult
object FungusTestFixme {
val interactions = Set(
Interaction(Gene("1"), Gene("2"), probability = 1),
Interaction(Gene("2"), Gene("3"), probability = 1),
Interaction(Gene("3"), Gene("4"), probability = 1),
Interaction(Gene("4"), Gene("5"), probability = 1),
Interaction(Gene("4"), Gene("6"), probability = 1),
Interaction(Gene("4"), Gene("7"), probability = 1),
Interaction(Gene("6"), Gene("8"), probability = 1),
Interaction(Gene("7"), Gene("8"), probability = 1),
Interaction(Gene("7"), Gene("10"), probability = 1),
Interaction(Gene("8"), Gene("9"), probability = 1),
Interaction(Gene("10"), Gene("11"), probability = 1),
Interaction(Gene("11"), Gene("12"), probability = 1),
Interaction(Gene("11"), Gene("14"), probability = 1),
Interaction(Gene("12"), Gene("14"), probability = 1),
Interaction(Gene("12"), Gene("13"), probability = 1),
Interaction(Gene("13"), Gene("18"), probability = 1),
Interaction(Gene("14"), Gene("15"), probability = 1),
Interaction(Gene("15"), Gene("16"), probability = 1),
Interaction(Gene("16"), Gene("17"), probability = 1),
Interaction(Gene("17"), Gene("18"), probability = 1))
val network = new Network(interactions)
val networkManager = new ExpressionNetworkManager(network = network)
val notWantedScore = 10;
val wantedScore = 3;
network.getNode(Gene("1")).score = notWantedScore
network.getNode(Gene("2")).score = notWantedScore
network.getNode(Gene("3")).score = wantedScore
network.getNode(Gene("4")).score = wantedScore
network.getNode(Gene("5")).score = notWantedScore
network.getNode(Gene("6")).score = wantedScore
network.getNode(Gene("7")).score = notWantedScore
network.getNode(Gene("8")).score = wantedScore
network.getNode(Gene("9")).score = wantedScore
network.getNode(Gene("10")).score = notWantedScore
network.getNode(Gene("11")).score = notWantedScore
network.getNode(Gene("12")).score = wantedScore
network.getNode(Gene("13")).score = wantedScore
network.getNode(Gene("14")).score = wantedScore
network.getNode(Gene("15")).score = wantedScore
network.getNode(Gene("16")).score = notWantedScore
network.getNode(Gene("17")).score = notWantedScore
network.getNode(Gene("18")).score = notWantedScore
def generateFungus(network: NetworkManager[Gene], score: Double) = {
(0 to 0).map(_ => networkManager.getGenes.map(gene => {
new Fungus(
startGene = gene,
endGenes = Set(),
_geneNumberVariable = score,
network = network)
})).flatten
}
def main(args: Array[String]) {
//val threadpool = java.util.concurrent.Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors())
val threadpool = java.util.concurrent.Executors.newSingleThreadExecutor()
val numberOfSteps = 1000
for (i <- 0 to numberOfSteps) {
val callables = generateFungus(networkManager, 19).map(walker => {
new Callable[Option[WalkerResult]] {
override def call(): Option[WalkerResult] = {
val subnetwork = walker.selectSubNetwork()
if (subnetwork.isDefined)
Some(WalkerResult(walker, subnetwork.get, networkManager.scoreSubnetwork(subnetwork.get)))
else
return None
}
}
})
val previous = System.nanoTime()
val futures = callables.map(threadpool.submit(_))
val after = System.nanoTime()
//futures.map(x => println(x.get))
val paths = futures.map(_.get.get)
networkManager.updateScores(paths)
if (i != numberOfSteps)
networkManager.evaporate()
if (i % 100 == 0) {
println("Finished pathfinding for round: " + i + " in: " + (after.toDouble - previous.toDouble) / 1000d + " ms.")
}
}
threadpool.shutdown()
networkManager.printResults(System.out)
}
} | spulido99/SSA | src/test/scala/be/cmpg/walk/fungus/FungusTestFixme.scala | Scala | gpl-2.0 | 4,271 |
package controllers.s_care_you_provide
import controllers.mappings.AddressMappings._
import controllers.mappings.{AddressMappings, Mappings}
import controllers.s_your_partner.GYourPartnerPersonalDetails._
import models.yesNo.YesNoMandWithAddress
import play.api.Play._
import play.api.data.validation.{Valid, ValidationError, Invalid, Constraint}
import gov.dwp.carers.xml.validation.CommonValidation
import language.reflectiveCalls
import play.api.data.{FormError, Form}
import play.api.data.Forms._
import play.api.mvc.{AnyContent, Request, Controller}
import controllers.mappings.Mappings._
import models.view.{Navigable, CachedClaim}
import utils.helpers.CarersForm._
import models.domain._
import controllers.CarersForms._
import models.{NationalInsuranceNumber, DayMonthYear}
import controllers.mappings.NINOMappings._
import play.api.i18n._
object GTheirPersonalDetails extends Controller with CachedClaim with Navigable with I18nSupport {
override val messagesApi: MessagesApi = current.injector.instanceOf[MMessages]
val addressMapping = "theirAddress"->mapping(
"answer" -> nonEmptyText.verifying(validYesNo),
"address" -> optional(address(AddressMappings.CAREE)),
"postCode" -> optional(text verifying(restrictedPostCodeAddressStringText, validPostcode))
)(YesNoMandWithAddress.apply)(YesNoMandWithAddress.unapply)
def form(implicit request: Request[AnyContent]) = Form(mapping(
"title" -> carersNonEmptyText(maxLength = Mappings.twenty),
"firstName" -> nonEmptyText(maxLength = CommonValidation.FIRSTNAME_MAX_LENGTH).verifying(YourDetails.validName),
"middleName" -> optional(text(maxLength = CommonValidation.MIDDLENAME_MAX_LENGTH).verifying(YourDetails.validName)),
"surname" -> nonEmptyText(maxLength = CommonValidation.SURNAME_MAX_LENGTH).verifying(YourDetails.validName),
"nationalInsuranceNumber" -> optional(nino.verifying(stopOnFirstFail (validNino, isSameNinoAsDPOrPartner))),
"dateOfBirth" -> dayMonthYear.verifying(validDateOfBirth),
"relationship" -> carersNonEmptyText(maxLength = 35),
addressMapping
)(TheirPersonalDetails.apply)(TheirPersonalDetails.unapply)
.verifying("theirAddress.address", validateSameAddressAnswer _)
)
private def validateSameAddressAnswer(form: TheirPersonalDetails) = form.theirAddress.answer match {
case `no` => form.theirAddress.address.isDefined
case _ => true
}
def present = claimingWithCheck { implicit claim => implicit request => implicit request2lang =>
val isPartnerPersonYouCareFor = YourPartner.visible &&
claim.questionGroup[YourPartnerPersonalDetails].exists(_.isPartnerPersonYouCareFor.getOrElse("") == "yes")
val currentForm = if (isPartnerPersonYouCareFor) {
claim.questionGroup(YourPartnerPersonalDetails) match {
case Some(t: YourPartnerPersonalDetails) =>
val theirPersonalDetails = claim.questionGroup(TheirPersonalDetails).getOrElse(TheirPersonalDetails()).asInstanceOf[TheirPersonalDetails]
form.fill(TheirPersonalDetails(relationship = theirPersonalDetails.relationship,
title = t.title.getOrElse(""),
firstName = t.firstName.getOrElse(""),
middleName = t.middleName,
surname = t.surname.getOrElse(""),
nationalInsuranceNumber = t.nationalInsuranceNumber,
dateOfBirth = t.dateOfBirth.getOrElse(DayMonthYear(None, None, None)),
theirAddress = theirPersonalDetails.theirAddress
)) // Pre-populate form with values from YourPartnerPersonalDetails - this is for the case that the Caree is your partner
case _ => form // Blank form (user can only get here if they skip sections by manually typing URL).
}
} else {
form.fill(TheirPersonalDetails)
}
track(TheirPersonalDetails) { implicit claim => Ok(views.html.s_care_you_provide.g_theirPersonalDetails(currentForm)) }
}
def submit = claimingWithCheck { implicit claim => implicit request => implicit request2lang =>
form.bindEncrypted.fold(
formWithErrors => {
val updatedFormWithErrors = formWithErrors
.replaceError("","theirAddress.address", FormError("theirAddress.address", "error.careeaddress.lines.required"))
BadRequest(views.html.s_care_you_provide.g_theirPersonalDetails(updatedFormWithErrors))
},
theirPersonalDetails => {
val liveAtSameAddress = theirPersonalDetails.theirAddress.answer == yes
//copy the address from the carer
val updatedTheirPersonalDetails = if(liveAtSameAddress){
claim.questionGroup[ContactDetails].map{ cd =>
theirPersonalDetails.copy(theirAddress = YesNoMandWithAddress(answer = yes, address= Some(cd.address), postCode = cd.postcode))
}.getOrElse(theirPersonalDetails)
}else{
theirPersonalDetails
}
claim.update(formatPostCodes(updatedTheirPersonalDetails)) -> Redirect(routes.GMoreAboutTheCare.present())
})
} withPreview()
private def formatPostCodes(theirPersonalDetails : TheirPersonalDetails): TheirPersonalDetails = {
theirPersonalDetails.copy(
theirAddress = theirPersonalDetails.theirAddress.copy(
postCode = Some(formatPostCode(theirPersonalDetails.theirAddress.postCode.getOrElse("")))))
}
private def isSameNinoAsDPOrPartner(implicit request: Request[AnyContent]): Constraint[NationalInsuranceNumber] = Constraint[NationalInsuranceNumber]("constraint.nino") {
case nino@NationalInsuranceNumber(Some(_)) => checkSameValues(nino.nino.get.toUpperCase.replace(" ", ""), request)
case _ => Invalid(ValidationError("error.nationalInsuranceNumber"))
}
private def checkSameValues(nino: String, request: Request[AnyContent]) = {
val claim = fromCache(request).getOrElse(new Claim("xxxx"))
val partnerDetails = claim.questionGroup[YourPartnerPersonalDetails].getOrElse(YourPartnerPersonalDetails())
val yourDetails = claim.questionGroup[YourDetails].getOrElse(YourDetails())
if (yourNINO(yourDetails) == nino) Invalid(ValidationError("error.you.and.dp.nationalInsuranceNumber", yourName(yourDetails), pageName(request)))
else if (partnerNINO(partnerDetails) == nino && partnerDetails.isPartnerPersonYouCareFor.getOrElse("no") == Mappings.no) Invalid(ValidationError("error.partner.and.dp.nationalInsuranceNumber", partnerName(partnerDetails), pageName(request)))
else Valid
}
}
| Department-for-Work-and-Pensions/ClaimCapture | c3/app/controllers/s_care_you_provide/GTheirPersonalDetails.scala | Scala | mit | 6,423 |
package org.sstudio.bulldozer.dsl
object Assert {
private[this] var _code: Int = _
private[this] var _body: String = _
def code: Int = {
_code = Result.code
Result.code
}
def body: String = {
_body = Result.body
Result.body
}
def apply(expr: Long): Long = {
expr
}
} | avril23/bulldozer | bulldozer/src/main/scala/org/sstudio/bulldozer/dsl/Assert.scala | Scala | bsd-3-clause | 307 |
package dao
import java.sql.{Date, Time, Timestamp}
import javax.inject.Inject
import com.google.inject.Singleton
import models.Training.Training
import play.api.db.slick.{DatabaseConfigProvider, HasDatabaseConfigProvider}
import play.db.NamedDatabase
import slick.driver.JdbcProfile
import slick.driver.PostgresDriver.api._
import slick.lifted.TableQuery
import scala.concurrent.Future
class TrainingTable(tag: Tag) extends Table[Training](tag, "training") {
def trainingid = column[Int]("trainingid", O.PrimaryKey, O.AutoInc)
def street = column[String]("street")
def zipcode = column[String]("zipcode")
def city = column[String]("city")
def date = column[Date]("date")
def begintime = column[Time]("begintime")
def endtime = column[Time]("endtime")
def gettogethertime = column[Time]("gettogethertime")
def * = (trainingid, street, zipcode, city, date, begintime, endtime, gettogethertime) <> (Training.tupled, Training.unapply _)
}
@Singleton()
class TrainingDAO @Inject()(protected val dbConfigProvider: DatabaseConfigProvider) extends HasDatabaseConfigProvider[JdbcProfile] {
private val trainings = TableQuery[TrainingTable]
def all(): Future[Seq[Training]] = db.run(trainings.result)
def getTraining(trainingId: Int): Future[Option[Training]] = db.run(trainings.filter(_.trainingid === trainingId).result.headOption)
def deleteTraining(trainingId: Int): Future[Int] = db.run(trainings.filter(_.trainingid === trainingId).delete)
def createTraining(training: Training): Future[Int] = {
val query = (trainings returning trainings.map(_.trainingid)) += training
db.run(query)
}
def updateTraining(trainingId: Int, training: Training): Future[Int] = {
val trainingToUpdate: Training = training.copy(trainingId)
db.run(trainings.filter(_.trainingid === trainingId).update(trainingToUpdate))
}
}
| magura42/KickAppServer | app/dao/TrainingDAO.scala | Scala | mit | 1,863 |
/*
* Copyright (C) 2014 Romain Reuillon
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.openmole.plugin.task
package scala {
trait ScalaPackage <: jvm.JVMPackage
}
package object scala extends ScalaPackage
| openmole/openmole | openmole/plugins/org.openmole.plugin.task.scala/src/main/scala/org/openmole/plugin/task/scala/package.scala | Scala | agpl-3.0 | 836 |
/* NSC -- new Scala compiler
* Copyright 2006-2013 LAMP/EPFL
* @author Martin Odersky
*/
package scala.tools.nsc
package util
import io.{ AbstractFile, Directory, File, Jar }
import java.net.MalformedURLException
import java.net.URL
import java.util.regex.PatternSyntaxException
import scala.collection.{ mutable, immutable }
import scala.reflect.internal.FatalError
import scala.reflect.internal.util.StringOps.splitWhere
import scala.tools.nsc.classpath.FileUtils
import File.pathSeparator
import FileUtils.endsClass
import FileUtils.endsScalaOrJava
import Jar.isJarOrZip
/** <p>
* This module provides star expansion of '-classpath' option arguments, behaves the same as
* java, see [http://java.sun.com/javase/6/docs/technotes/tools/windows/classpath.html]
* </p>
*
* @author Stepan Koltsov
*/
object ClassPath {
import scala.language.postfixOps
/** Expand single path entry */
private def expandS(pattern: String): List[String] = {
val wildSuffix = File.separator + "*"
/* Get all subdirectories, jars, zips out of a directory. */
def lsDir(dir: Directory, filt: String => Boolean = _ => true) =
dir.list filter (x => filt(x.name) && (x.isDirectory || isJarOrZip(x))) map (_.path) toList
if (pattern == "*") lsDir(Directory("."))
else if (pattern endsWith wildSuffix) lsDir(Directory(pattern dropRight 2))
else if (pattern contains '*') {
try {
val regexp = ("^" + pattern.replaceAllLiterally("""\\*""", """.*""") + "$").r
lsDir(Directory(pattern).parent, regexp findFirstIn _ isDefined)
}
catch { case _: PatternSyntaxException => List(pattern) }
}
else List(pattern)
}
/** Split classpath using platform-dependent path separator */
def split(path: String): List[String] = (path split pathSeparator).toList filterNot (_ == "") distinct
/** Join classpath using platform-dependent path separator */
def join(paths: String*): String = paths filterNot (_ == "") mkString pathSeparator
/** Split the classpath, apply a transformation function, and reassemble it. */
def map(cp: String, f: String => String): String = join(split(cp) map f: _*)
/** Expand path and possibly expanding stars */
def expandPath(path: String, expandStar: Boolean = true): List[String] =
if (expandStar) split(path) flatMap expandS
else split(path)
/** Expand dir out to contents, a la extdir */
def expandDir(extdir: String): List[String] = {
AbstractFile getDirectory extdir match {
case null => Nil
case dir => dir filter (_.isClassContainer) map (x => new java.io.File(dir.file, x.name) getPath) toList
}
}
/** Expand manifest jar classpath entries: these are either urls, or paths
* relative to the location of the jar.
*/
def expandManifestPath(jarPath: String): List[URL] = {
val file = File(jarPath)
if (!file.isFile) return Nil
val baseDir = file.parent
new Jar(file).classPathElements map (elem =>
specToURL(elem) getOrElse (baseDir / elem).toURL
)
}
def specToURL(spec: String): Option[URL] =
try Some(new URL(spec))
catch { case _: MalformedURLException => None }
/** A class modeling aspects of a ClassPath which should be
* propagated to any classpaths it creates.
*/
abstract class ClassPathContext[T] extends classpath.ClassPathFactory[ClassPath[T]] {
/** A filter which can be used to exclude entities from the classpath
* based on their name.
*/
def isValidName(name: String): Boolean = true
/** Filters for assessing validity of various entities.
*/
def validClassFile(name: String) = endsClass(name) && isValidName(name)
def validPackage(name: String) = (name != "META-INF") && (name != "") && (name.charAt(0) != '.')
def validSourceFile(name: String) = endsScalaOrJava(name)
/** From the representation to its identifier.
*/
def toBinaryName(rep: T): String
def sourcesInPath(path: String): List[ClassPath[T]] =
for (file <- expandPath(path, expandStar = false) ; dir <- Option(AbstractFile getDirectory file)) yield
new SourcePath[T](dir, this)
}
def manifests: List[java.net.URL] = {
import scala.collection.convert.WrapAsScala.enumerationAsScalaIterator
Thread.currentThread().getContextClassLoader()
.getResources("META-INF/MANIFEST.MF")
.filter(_.getProtocol == "jar").toList
}
class JavaContext extends ClassPathContext[AbstractFile] {
def toBinaryName(rep: AbstractFile) = {
val name = rep.name
assert(endsClass(name), name)
FileUtils.stripClassExtension(name)
}
def newClassPath(dir: AbstractFile) = new DirectoryClassPath(dir, this)
}
object DefaultJavaContext extends JavaContext
/** From the source file to its identifier.
*/
def toSourceName(f: AbstractFile): String = FileUtils.stripSourceExtension(f.name)
}
import ClassPath._
/**
* Represents a package which contains classes and other packages
*/
abstract class ClassPath[T] extends ClassFileLookup[T] {
/**
* The short name of the package (without prefix)
*/
def name: String
/**
* A String representing the origin of this classpath element, if known.
* For example, the path of the directory or jar.
*/
def origin: Option[String] = None
/** Info which should be propagated to any sub-classpaths.
*/
def context: ClassPathContext[T]
/** Lists of entities.
*/
def classes: IndexedSeq[ClassRepresentation[T]]
def packages: IndexedSeq[ClassPath[T]]
def sourcepaths: IndexedSeq[AbstractFile]
/** The entries this classpath is composed of. In class `ClassPath` it's just the singleton list containing `this`.
* Subclasses such as `MergedClassPath` typically return lists with more elements.
*/
def entries: IndexedSeq[ClassPath[T]] = IndexedSeq(this)
/** Merge classpath of `platform` and `urls` into merged classpath */
def mergeUrlsIntoClassPath(urls: URL*): MergedClassPath[T] = {
// Collect our new jars/directories and add them to the existing set of classpaths
val allEntries =
(entries ++
urls.map(url => context.newClassPath(io.AbstractFile.getURL(url)))
).distinct
// Combine all of our classpaths (old and new) into one merged classpath
new MergedClassPath(allEntries, context)
}
/**
* Represents classes which can be loaded with a ClassfileLoader and/or SourcefileLoader.
*/
case class ClassRep(binary: Option[T], source: Option[AbstractFile]) extends ClassRepresentation[T] {
def name: String = binary match {
case Some(x) => context.toBinaryName(x)
case _ =>
assert(source.isDefined)
toSourceName(source.get)
}
}
/** Filters for assessing validity of various entities.
*/
def validClassFile(name: String) = context.validClassFile(name)
def validPackage(name: String) = context.validPackage(name)
def validSourceFile(name: String) = context.validSourceFile(name)
/**
* Find a ClassRep given a class name of the form "package.subpackage.ClassName".
* Does not support nested classes on .NET
*/
override def findClass(name: String): Option[ClassRepresentation[T]] =
splitWhere(name, _ == '.', doDropIndex = true) match {
case Some((pkg, rest)) =>
val rep = packages find (_.name == pkg) flatMap (_ findClass rest)
rep map {
case x: ClassRepresentation[T] => x
case x => throw new FatalError("Unexpected ClassRep '%s' found searching for name '%s'".format(x, name))
}
case _ =>
classes find (_.name == name)
}
override def findClassFile(name: String): Option[AbstractFile] =
findClass(name) match {
case Some(ClassRepresentation(Some(x: AbstractFile), _)) => Some(x)
case _ => None
}
override def asSourcePathString: String = sourcepaths.mkString(pathSeparator)
def sortString = join(split(asClassPathString).sorted: _*)
override def equals(that: Any) = that match {
case x: ClassPath[_] => this.sortString == x.sortString
case _ => false
}
override def hashCode = sortString.hashCode()
}
/**
* A Classpath containing source files
*/
class SourcePath[T](dir: AbstractFile, val context: ClassPathContext[T]) extends ClassPath[T] {
import FileUtils.AbstractFileOps
def name = dir.name
override def origin = dir.underlyingSource map (_.path)
def asURLs = dir.toURLs()
def asClassPathString = dir.path
val sourcepaths: IndexedSeq[AbstractFile] = IndexedSeq(dir)
private def traverse() = {
val classBuf = immutable.Vector.newBuilder[ClassRep]
val packageBuf = immutable.Vector.newBuilder[SourcePath[T]]
dir foreach { f =>
if (!f.isDirectory && validSourceFile(f.name))
classBuf += ClassRep(None, Some(f))
else if (f.isDirectory && validPackage(f.name))
packageBuf += new SourcePath[T](f, context)
}
(packageBuf.result(), classBuf.result())
}
lazy val (packages, classes) = traverse()
override def toString() = "sourcepath: "+ dir.toString()
}
/**
* A directory (or a .jar file) containing classfiles and packages
*/
class DirectoryClassPath(val dir: AbstractFile, val context: ClassPathContext[AbstractFile]) extends ClassPath[AbstractFile] {
import FileUtils.AbstractFileOps
def name = dir.name
override def origin = dir.underlyingSource map (_.path)
def asURLs = dir.toURLs(default = Seq(new URL(name)))
def asClassPathString = dir.path
val sourcepaths: IndexedSeq[AbstractFile] = IndexedSeq()
// calculates (packages, classes) in one traversal.
private def traverse() = {
val classBuf = immutable.Vector.newBuilder[ClassRep]
val packageBuf = immutable.Vector.newBuilder[DirectoryClassPath]
dir foreach {
f =>
// Optimization: We assume the file was not changed since `dir` called
// `Path.apply` and categorized existent files as `Directory`
// or `File`.
val isDirectory = f match {
case pf: io.PlainFile => pf.givenPath match {
case _: io.Directory => true
case _: io.File => false
case _ => f.isDirectory
}
case _ =>
f.isDirectory
}
if (!isDirectory && validClassFile(f.name))
classBuf += ClassRep(Some(f), None)
else if (isDirectory && validPackage(f.name))
packageBuf += new DirectoryClassPath(f, context)
}
(packageBuf.result(), classBuf.result())
}
lazy val (packages, classes) = traverse()
override def toString() = "directory classpath: "+ origin.getOrElse("?")
}
class DeltaClassPath[T](original: MergedClassPath[T], subst: Map[ClassPath[T], ClassPath[T]])
extends MergedClassPath[T](original.entries map (e => subst getOrElse (e, e)), original.context) {
// not sure we should require that here. Commented out for now.
// require(subst.keySet subsetOf original.entries.toSet)
// We might add specialized operations for computing classes packages here. Not sure it's worth it.
}
/**
* A classpath unifying multiple class- and sourcepath entries.
*/
class MergedClassPath[T](
override val entries: IndexedSeq[ClassPath[T]],
val context: ClassPathContext[T])
extends ClassPath[T] {
def this(entries: TraversableOnce[ClassPath[T]], context: ClassPathContext[T]) =
this(entries.toIndexedSeq, context)
def name = entries.head.name
def asURLs = (entries flatMap (_.asURLs)).toList
lazy val sourcepaths: IndexedSeq[AbstractFile] = entries flatMap (_.sourcepaths)
override def origin = Some(entries map (x => x.origin getOrElse x.name) mkString ("Merged(", ", ", ")"))
override def asClassPathString: String = join(entries map (_.asClassPathString) : _*)
lazy val classes: IndexedSeq[ClassRepresentation[T]] = {
var count = 0
val indices = mutable.HashMap[String, Int]()
val cls = new mutable.ArrayBuffer[ClassRepresentation[T]](1024)
for (e <- entries; c <- e.classes) {
val name = c.name
if (indices contains name) {
val idx = indices(name)
val existing = cls(idx)
if (existing.binary.isEmpty && c.binary.isDefined)
cls(idx) = ClassRep(binary = c.binary, source = existing.source)
if (existing.source.isEmpty && c.source.isDefined)
cls(idx) = ClassRep(binary = existing.binary, source = c.source)
}
else {
indices(name) = count
cls += c
count += 1
}
}
cls.toIndexedSeq
}
lazy val packages: IndexedSeq[ClassPath[T]] = {
var count = 0
val indices = mutable.HashMap[String, Int]()
val pkg = new mutable.ArrayBuffer[ClassPath[T]](256)
for (e <- entries; p <- e.packages) {
val name = p.name
if (indices contains name) {
val idx = indices(name)
pkg(idx) = addPackage(pkg(idx), p)
}
else {
indices(name) = count
pkg += p
count += 1
}
}
pkg.toIndexedSeq
}
private def addPackage(to: ClassPath[T], pkg: ClassPath[T]) = {
val newEntries: IndexedSeq[ClassPath[T]] = to match {
case cp: MergedClassPath[_] => cp.entries :+ pkg
case _ => IndexedSeq(to, pkg)
}
new MergedClassPath[T](newEntries, context)
}
def show() {
println("ClassPath %s has %d entries and results in:\\n".format(name, entries.size))
asClassPathString split ':' foreach (x => println(" " + x))
}
override def toString() = "merged classpath "+ entries.mkString("(", "\\n", ")")
}
/**
* The classpath when compiling with target:jvm. Binary files (classfiles) are represented
* as AbstractFile. nsc.io.ZipArchive is used to view zip/jar archives as directories.
*/
class JavaClassPath(
containers: IndexedSeq[ClassPath[AbstractFile]],
context: JavaContext)
extends MergedClassPath[AbstractFile](containers, context) { }
| twitter/finatra-misc | scalap-compiler-deps/2.12.1/src/main/scala/scala/tools/nsc/util/ClassPath.scala | Scala | apache-2.0 | 13,961 |
package boardGame
import scala.collection.mutable.ArrayBuffer
class SimpleDeck[T] {
private val cards = new ArrayBuffer[T];
def pushTop(card : T) = { card +=: cards}
def pushBottom(card : T) = { cards += card }
def rest = cards.length;
def draw : T = {
val c = cards.head;
cards.trimStart(1);
c
}
def clear = cards.clear()
def canDraw : Boolean = cards.length > 0
//Fisher-Yates
def shuffle = {
var i = cards.length;
while (i > 0) {
val j = (Math.random * i).toInt
i -= 1
val tmp = cards(i)
cards(i) = cards(j)
cards(j) = tmp
}
}
} | ksk9687/Aquire | src/main/scala/boardGame/Deck.scala | Scala | mit | 686 |
package cromwell.util.docker
import cromwell.CromwellSpec.IntegrationTest
import cromwell.util.DockerConfiguration
import cromwell.util.google.{GoogleCredentialFactory, GoogleCredentialFactorySpec}
import org.scalatest.prop.TableDrivenPropertyChecks._
import org.scalatest.prop.Tables.Table
import org.scalatest.{FlatSpec, Matchers}
class DockerIdentifierParserSpec extends FlatSpec with Matchers {
behavior of "DockerIdentifierParser"
it should "parse docker tagged identifiers" in {
val parser = DockerIdentifierParser.Default
val identifiers = Table(
("identifier", "name", "tag", "namespace"),
("team/imageName", "team/imageName", "latest", "docker.io"),
("team/imageName:tag", "team/imageName", "tag", "docker.io"),
("imageName", "library/imageName", "latest", "docker.io"),
("imageName:", "library/imageName:", "latest", "docker.io"),
("imageName:tag", "library/imageName", "tag", "docker.io"),
// TODO: We should be able to handle other registries _correctly_.
// TODO: For now, assume the host is actually a user/team.
("quay.io/namespace/repository", "quay.io/namespace/repository", "latest", "docker.io"),
("quay.io/namespace/repository:tag", "quay.io/namespace/repository", "tag", "docker.io"))
forAll(identifiers) { (identifier, name, tag, namespace) =>
val parsed = parser.parse(identifier)
parsed shouldBe a[DockerTagIdentifier]
val tagged = parsed.asInstanceOf[DockerTagIdentifier]
tagged.name should be(name)
tagged.tag should be(tag)
tagged.registry.namespace should be(namespace)
}
}
it should "parse gcr.io tagged identifiers" taggedAs IntegrationTest in {
GoogleCredentialFactorySpec.assumeAccountConfigExists()
val googleCredentials = new GoogleCredentialFactory {
override val GoogleConf = GoogleCredentialFactorySpec.GoogleAccountConfig
}.fromCromwellAuthScheme
val dockerConf = DockerConfiguration.build(GoogleCredentialFactorySpec.AccountConfig)
val parser = new DockerIdentifierParser(dockerConf, Option(googleCredentials))
val identifiers = Table(
("identifier", "name", "tag", "namespace"),
("gcr.io/google-project/imageName", "google-project/imageName", "latest", "gcr.io"),
("gcr.io/google-project/imageName:tag", "google-project/imageName", "tag", "gcr.io"),
("us.gcr.io/google-project/imageName", "google-project/imageName", "latest", "us.gcr.io"),
("eu.gcr.io/google-project/imageName", "google-project/imageName", "latest", "eu.gcr.io"),
("asia.gcr.io/google-project/imageName", "google-project/imageName", "latest", "asia.gcr.io"),
("b.gcr.io/google-bucket/imageName", "google-bucket/imageName", "latest", "b.gcr.io"))
forAll(identifiers) { (identifier, name, tag, namespace) =>
val parsed = parser.parse(identifier)
parsed shouldBe a[DockerTagIdentifier]
val tagged = parsed.asInstanceOf[DockerTagIdentifier]
tagged.name should be(name)
tagged.tag should be(tag)
tagged.registry.namespace should be(namespace)
}
}
}
| dgtester/cromwell | src/test/scala/cromwell/util/docker/DockerIdentifierParserSpec.scala | Scala | bsd-3-clause | 3,102 |
import scala.language.{ implicitConversions }
import runtime.ScalaRunTime
object Test {
val p = new Pattern { }
import p._
implicit object IntOps extends NumericOps[Int] {
def zero = 0
def one = 1
def add(a: Int, b: Int): Int = a + b
def sub(a: Int, b: Int): Int = a - b
def mul(a: Int, b: Int): Int = a * b
def mul(a: Int, b: Double): Int = (a * b).toInt
def div(a: Int, b: Int): Int = a / b
def div(a: Int, b: Double): Int = (a / b).toInt
def similar(a: Int, b: Int): Boolean = a == b
def abs(a: Int): Double = math.abs(a).toDouble
def sqr(a: Int): Int = a * a
def sqrt(a: Int): Int = math.sqrt(a).toInt
def log(a: Int): Int = math.log(a).toInt
def exp(a: Int): Int = math.exp(a).toInt
def sin(a: Int): Int = math.sin(a).toInt
def cos(a: Int): Int = math.cos(a).toInt
def fromDouble(a: Double): Int = a.toInt
def fromInt(a: Int): Int = a
}
def main(args: Array[String]): Unit = {
println((5: Expr[Int]) + 10 + 15 * 20)
}
}
trait Pattern {
// For trying out 2.7.7
//
// type Numeric[T]
// import java.io.Serializable
//
// implicit def compat27a[T](x: Iterable[T]) = new {
// def iterator: Iterator[T] = x.elements
// def sum: Int = 5
// def collect[U](pf: PartialFunction[T, U]): Iterable[U] = x map pf
// }
/** Function that returns object of the same type it was passed */
trait EndoFunction[-A] {
def apply[B <: A](x: B): B
}
/** Allows for smart construction of EndoFunction from an ordinary function */
object EndoFunction {
def apply[A](f: A => A): EndoFunction[A] = new EndoFunction[A] {
def apply[B <: A](x: B): B = f(x).asInstanceOf[B]
}
}
trait NumericOps[T] extends Serializable {
def zero: T
def one: T
def two = add(one, one)
def three = add(two, one)
def add(a: T, b: T): T
def add(a: T, b: T, c: T): T = add(a, add(b, c))
def sub(a: T, b: T): T
def mul(a: T, b: T): T
def mul(a: T, b: Double): T
def div(a: T, b: T): T
def div(a: T, b: Double): T
def similar(a: T, b: T): Boolean
def neg(a: T) = sub(zero, a)
def abs(a: T): Double
def sqr(a: T): T
def sqrt(a: T): T
def log(a: T): T
def exp(a: T): T
def sin(a: T): T
def cos(a: T): T
def tan(a: T): T = div(sin(a), cos(a))
def fromDouble(a: Double): T
def fromInt(a: Int): T
def sum(terms: Iterable[T]) = terms.foldLeft(zero)(add)
def sum(terms: Iterator[T]) = terms.foldLeft(zero)(add)
def product(terms: Iterable[T]) = terms.foldLeft(one)(mul)
def product(terms: Iterator[T]) = terms.foldLeft(one)(mul)
def similar(a: Iterable[T], b: Iterable[T]): Boolean = {
val i1 = a.iterator
val i2 = b.iterator
while (i1.hasNext && i2.hasNext)
if (!similar(i1.next, i2.next))
return false;
true;
}
}
/**
* Simple expression interpreter with some basic symbolic manipulation.
* Able to evaluate derivatives.
*/
trait Expr[T] {
import Expr._
/** Evaluates value of the expression. */
def eval(context: Any => Any): T
/** Symbolically calculates derivative of this expression. Does not simplify it. */
def derivative(variable: Var[T]): Expr[T]
/** Returns arguments of this operator */
def args: Iterable[Expr[_]]
/** Transforms arguments of this operator by applying given function. */
def mapArgs(f: EndoFunction[Expr[_]]): Expr[T]
/** Transforms this operator and its arguments by applying given function */
def map(f: EndoFunction[Expr[_]]): Expr[T] =
f(mapArgs(EndoFunction[Expr[_]](x => x.map(f))))
/** Folds all subexpressions in this expression in depth-first order */
def fold[A](v: A)(f: (A, Expr[_]) => A): A =
f(args.foldLeft(v) { (a, b) => b.fold(a)(f) }, this)
/** Replaces all occurrences of one subexpression with another one */
def replace(from: Expr[_], to: Expr[_]): Expr[T] =
map(EndoFunction[Expr[_]](x => if (x == from) to else x))
/** Returns true if this expression contains given subexpression */
def contains(s: Expr[_]): Boolean =
this == s || args.exists(_ contains s)
/** Counts number of occurrences of the given subexpression. */
def count(condition: Expr[_] => Boolean): Int =
(if (condition(this)) 1 else 0) + args.map(_.count(condition)).sum
/** Executes some code for every subexpression in the depth-first order */
def foreach[U](block: Expr[_] => U): Unit = {
args.foreach(_.foreach(block))
block(this)
}
/** Collects subexpressions successfully transformed by the given partial function, in depth-first order. */
def collect[U](f: PartialFunction[Expr[_], U]): List[U] = {
val a = args.flatMap(_.collect(f)).toList
if (f.isDefinedAt(this)) (f(this) :: a) else a
}
def leaves: List[Leaf[T]] = collect { case l: Leaf[T] => l }
def + (other: Expr[T])(implicit n: NumericOps[T]) = Add(List(this, other))
def - (other: Expr[T])(implicit n: NumericOps[T]) = Sub(this, other)
def * (other: Expr[T])(implicit n: NumericOps[T]) = Mul(this, other)
def / (other: Expr[T])(implicit n: NumericOps[T]) = Div(this, other)
def unary_- (implicit n: NumericOps[T]) = Neg(this)
def sqr(implicit n: NumericOps[T]) = Sqr(this)
def < (other: Expr[T])(implicit n: NumericOps[T], o: Ordering[T]) = LT(this, other)
def <= (other: Expr[T])(implicit n: NumericOps[T], o: Ordering[T]) = LE(this, other)
def > (other: Expr[T])(implicit n: NumericOps[T], o: Ordering[T]) = GT(this, other)
def >= (other: Expr[T])(implicit n: NumericOps[T], o: Ordering[T]) = GE(this, other)
private def generalize(implicit num: NumericOps[T]): Expr[T] = {
this match {
case Add2(a, b) => Add(a :: b :: Nil)
case Add3(a, b, c) => Add(a :: b :: c :: Nil)
case Sub(a, b) => Add(a :: Neg(b) :: Nil)
case Add(x) => Add(x flatMap {
case Neg(Add(y)) => y.map(Neg(_))
case Add(y) => y
case y => y :: Nil
})
case x => x
}
}
private def specialize(implicit num: NumericOps[T]): Expr[T] = {
this match {
case Add(Seq(a, b)) => Add2(a, b)
case Add(Seq(a, b, c)) => Add3(a, b, c)
case x => x
}
}
/** Eliminates common negated components of a sum */
private def reduceComponents(components: List[Expr[T]])(implicit num: NumericOps[T]): List[Expr[T]] = {
val pairs =
for (a <- components; b <- components if Neg(a) == b || a == Neg(b))
yield (a, b)
pairs.foldLeft(components) { (c, pair) =>
if (c.contains(pair._1) && c.contains(pair._2))
c.diff(pair._1 :: pair._2 :: Nil)
else
c
}
}
/** Simplifies this expression to make evaluation faster and more accurate.
* Performs only one pass. */
private def reduce(implicit num: NumericOps[T]): Expr[T] = {
this match {
case Add(Seq(Neg(x), Neg(y), Neg(z))) => Neg(Add(List(x, y, z)))
case Add(Seq(Mul(x, y), z)) if (x == z) => Mul(x, Add(List(y, One[T])))
case Add(Seq(Mul(x, y), z)) if (y == z) => Mul(y, Add(List(z, One[T])))
case Add(Seq(Mul(x, y), Mul(u, w))) if (x == u) => Mul(x, Add(List(y, w)))
case Add(Seq(Mul(x, y), Mul(u, w))) if (y == w) => Mul(y, Add(List(x, u)))
case Add(Seq(Add(x), Add(y))) => Add(x.toList ::: y.toList).simplify
case Add(Seq(Add(x), y)) => Add(y :: x.toList).simplify
case Add(Seq(x, Add(y))) => Add(x :: y.toList).simplify
case Add(x) => {
val noZeros = x.filter(_ != Zero[T])
val noOnes = noZeros.map { case y: One[_] => Const(num.one); case y => y }
val constant = num.sum(noOnes.collect { case c: Const[T] => c.value })
val rest = noOnes.filter(x => !x.isInstanceOf[Const[_]]).toList
val reduced = reduceComponents(rest)
val args = if (num.similar(constant, num.zero)) reduced else reduced ::: Const(constant) :: Nil
args.size match {
case 0 => Zero[T]
case 1 => args.head
case 2 => Add2(args(0), args(1))
case 3 => Add3(args(0), args(1), args(2))
case _ => Add(args)
}
}
case Sub(x: Zero[_], y) => Neg(y)
case Sub(x, y: Zero[_]) => x
case Sub(x, y) if x == y => Zero[T]
case Sub(Mul(x, y), z) if (x == z) => Mul(x, Sub(y, One[T]))
case Sub(Mul(x, y), z) if (y == z) => Mul(y, Sub(z, One[T]))
case Sub(Mul(x, y), Mul(u, w)) if (x == u) => Mul(x, Sub(y, w))
case Sub(Mul(x, y), Mul(u, w)) if (y == w) => Mul(y, Sub(x, u))
case Mul(x: Zero[_], y) => Zero[T]
case Mul(x, y: Zero[_]) => Zero[T]
case Mul(x: One[_], y) => y
case Mul(x, y: One[_]) => x
case Mul(Neg(x: One[_]), y) => Neg(y)
case Mul(x, Neg(y: One[_])) => Neg(x)
case Mul(x, y) if (x == y) => Sqr(x)
case Div(x: Zero[_], y) => Zero[T] // warning: possibly extends domain
case Div(x, y: One[_]) => x
case Div(Sqr(x), y) if x == y => x
case Div(Mul(x, y), z) if (x == z) => y
case Div(Mul(x, y), z) if (y == z) => y
case Div(Mul(Mul(x, y), z), w) if (x == w) => Mul(y, z)
case Div(Mul(Mul(x, y), z), w) if (y == w) => Mul(x, z)
case Div(Mul(z, Mul(x, y)), w) if (x == w) => Mul(y, z)
case Div(Mul(z, Mul(x, y)), w) if (y == w) => Mul(x, z)
case Div(Mul(x, y), Mul(u, w)) if (x == u) => Div(y, w)
case Div(Mul(x, y), Mul(u, w)) if (y == w) => Div(x, u)
case Div(x: One[_], y) => Inv(y)
case Div(x, Sqr(y)) if x == y => Inv(y)
case Div(Mul(x, y), Sqr(Mul(u, w))) if x == u && y == w => Inv(Mul(x, y))
case Div(x, y) if x == y => One[T]
case Mul(Neg(a), Neg(b)) => Mul(a, b)
case Div(Neg(a), Neg(b)) => Div(a, b)
case Neg(x: Zero[_]) => Zero[T]
case Neg(x: One[_]) => Const(num.neg(num.one))
case Sub(Const(x), Const(y)) => const(num.sub(x, y))
case Mul(Const(x), Const(y)) => const(num.mul(x, y))
case Div(Const(x), Const(y)) => const(num.div(x, y))
case Neg(Const(x)) => const(num.neg(x))
case Sqr(Const(x)) => const(num.sqr(x))
case Mul(Const(x), Mul(Const(y), z)) => Mul(const(num.mul(x, y)), z)
case Mul(Const(x), Mul(y, Const(z))) => Mul(const(num.mul(x, z)), y)
case Mul(Mul(Const(y), z), Const(x)) => Mul(const(num.mul(x, y)), z)
case Mul(Mul(y, Const(z)), Const(x)) => Mul(const(num.mul(x, z)), y)
case Const(x) if x == num.one => One[T]
case Const(x) if x == num.zero => Zero[T]
case Sub(x, Neg(y)) => Add(List(x, y))
case Sub(Neg(x), y) => Neg(Add(List(x, y)))
case Neg(Neg(x)) => x
case Neg(Mul(a: Const[T], x)) => Mul(const(num.neg(a.value)), x)
case Neg(Mul(x, a: Const[T])) => Mul(const(num.neg(a.value)), x)
case Neg(Div(Neg(a), b)) => Div(a, b)
case Neg(Div(a, Neg(b))) => Div(a, b)
case Neg(Mul(Neg(a), b)) => Mul(a, b)
case Neg(Mul(a, Neg(b))) => Mul(a, b)
case Log(Exp(x)) => x
case x => x
}
}
private def optimizeWith(f: Expr[T] => Expr[T]): Expr[T] = {
f(mapArgs(EndoFunction[Expr[_]](
a => a match { case x: Expr[T] => x.optimizeWith(f) }
)))
}
/** Simplifies this expression to make evaluation faster and more accurate.*/
def simplify(implicit num: NumericOps[T]): Expr[T] = {
val a1 = optimizeWith(_.generalize)
val a2 = a1.optimizeWith(_.generalize)
val b = a2.optimizeWith(_.reduce)
val c = b.optimizeWith(_.reduce)
val d = c.optimizeWith(_.specialize)
d
}
}
trait Leaf[T] extends Expr[T] {
val args = List[Expr[T]]()
def mapArgs(f: EndoFunction[Expr[_]]) = this
}
trait OneArg[T] extends Expr[T] {
val expr: Expr[T]
val args = List(expr)
}
trait TwoArg[T] extends Expr[T] {
val left: Expr[T]
val right: Expr[T]
val args = List(left, right)
}
trait ManyArg[T] extends Expr[T]
/** Marker trait for specifying that you can safely divide by this */
trait NonZero[T] extends Expr[T]
case class Const[T](value: T)(implicit num: NumericOps[T]) extends Leaf[T] with NonZero[T] {
def derivative(variable: Var[T]) = Zero[T]
def eval(f: Any => Any) = value
override def toString = value.toString
}
case class Zero[T]()(implicit num: NumericOps[T]) extends Leaf[T] {
def derivative(variable: Var[T]) = Zero[T]
def eval(f: Any => Any) = num.zero
override def toString = "0"
}
case class One[T]()(implicit num: NumericOps[T]) extends Leaf[T] {
def derivative(variable: Var[T]) = Zero[T]
def eval(f: Any => Any) = num.one
override def toString = "1"
}
abstract class Var[T](implicit num: NumericOps[T]) extends Leaf[T] {
def derivative(variable: Var[T]) = if (variable == this) One[T] else Zero[T]
def eval(f: Any => Any) = f(this).asInstanceOf[T]
}
case class NamedVar[T](name: String)(implicit num: NumericOps[T]) extends Var[T] {
override lazy val hashCode = ScalaRunTime._hashCode(this)
override def toString = name
}
case class Add[T](args: Iterable[Expr[T]])(implicit num: NumericOps[T]) extends ManyArg[T] {
def eval(f: Any => Any) = num.sum(for (i <- args.iterator) yield i.eval(f))
def derivative(v: Var[T]) = Add(args.map(_.derivative(v)))
def mapArgs(f: EndoFunction[Expr[_]]) = Add(args map (x => f(x)))
override def toString = "(" + args.mkString(" + ") + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Add2[T](left: Expr[T], right: Expr[T])
(implicit num: NumericOps[T]) extends TwoArg[T] {
def eval(f: Any => Any) = num.add(left.eval(f), right.eval(f))
def derivative(v: Var[T]) = Add2(left.derivative(v), right.derivative(v))
def mapArgs(f: EndoFunction[Expr[_]]) = Add2(f(left), f(right))
override def toString = "(" + left + " + " + right + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Add3[T](a1: Expr[T], a2: Expr[T], a3: Expr[T])
(implicit num: NumericOps[T]) extends ManyArg[T] {
val args = List(a1, a2, a3)
def eval(f: Any => Any) = num.add(a1.eval(f), a2.eval(f), a3.eval(f))
def derivative(v: Var[T]) = Add3(a1.derivative(v), a2.derivative(v), a3.derivative(v))
def mapArgs(f: EndoFunction[Expr[_]]) = Add3(f(a1), f(a2), f(a3))
override def toString = "(" + a1 + " + " + a2 + " + " + a3 + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Sub[T](left: Expr[T], right: Expr[T])
(implicit num: NumericOps[T]) extends TwoArg[T] {
def derivative(v: Var[T]) = Sub(left.derivative(v), right.derivative(v))
def eval(f: Any => Any) = num.sub(left.eval(f), right.eval(f))
def mapArgs(f: EndoFunction[Expr[_]]) = Sub(f(left), f(right))
override def toString = "(" + left + " - " + right + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Neg[T](expr: Expr[T])
(implicit num: NumericOps[T]) extends OneArg[T] {
def derivative(v: Var[T]) = Neg(expr.derivative(v))
def eval(f: Any => Any) = num.neg(expr.eval(f))
def mapArgs(f: EndoFunction[Expr[_]]) = Neg(f(expr))
override def toString = "(-" + expr + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Mul[T](left: Expr[T], right: Expr[T])
(implicit num: NumericOps[T]) extends TwoArg[T] {
def derivative(v: Var[T]) =
Add(List(
Mul(left, right.derivative(v)),
Mul(right, left.derivative(v))))
def eval(f: Any => Any) = num.mul(left.eval(f), right.eval(f))
def mapArgs(f: EndoFunction[Expr[_]]) = Mul(f(left), f(right))
override def toString = "(" + left + " * " + right + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Div[T](left: Expr[T], right: Expr[T])
(implicit num: NumericOps[T]) extends TwoArg[T] {
// [f(x) / g(x)]' = [f(x) * 1 / g(x)]' = f'(x) * 1 / g(x) + f(x) * [1 / g(x)]' =
// f'(x) / g(x) + f(x) * [-1 / g(x) ^ 2] * g'(x) = (f'(x) * g(x) - f(x) * g'(x)) / g(x)^2
def derivative(v: Var[T]) =
Div(
Sub(
Mul(left.derivative(v), right),
Mul(left, right.derivative(v))),
Sqr(right)
)
def eval(f: Any => Any) = num.div(left.eval(f), right.eval(f))
def mapArgs(f: EndoFunction[Expr[_]]) =
Div(f(left), f(right))
override def toString = "(" + left + " / " + right + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Inv[T](expr: Expr[T])(implicit num: NumericOps[T]) extends OneArg[T] {
// [1 / f(x)]' = - f'(x) / f(x) ^ 2
def derivative(v: Var[T]) = Neg(Div(expr.derivative(v), Sqr(expr)))
def eval(f: Any => Any) = num.div(num.one, expr.eval(f))
def mapArgs(f: EndoFunction[Expr[_]]) = Inv(f(expr))
override def toString = "(1 / " + expr + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Sqr[T](expr: Expr[T])(implicit num: NumericOps[T]) extends OneArg[T] {
// [f(x) ^ 2]' = 2 * f(x) * f'(x)
def derivative(v: Var[T]) = Mul(Mul(Const(num.two), expr), expr.derivative(v))
def eval(f: Any => Any) = num.sqr(expr.eval(f))
def mapArgs(f: EndoFunction[Expr[_]]) = Sqr(f(expr))
override def toString = expr + " ^ 2"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Log[T](expr: Expr[T])(implicit num: NumericOps[T]) extends OneArg[T] {
def derivative(v: Var[T]) = Div(expr.derivative(v), expr)
def eval(f: Any => Any) = num.log(expr.eval(f))
def mapArgs(f: EndoFunction[Expr[_]]) = Log(f(expr))
override def toString = "log(" + expr + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Exp[T](expr: Expr[T])(implicit num: NumericOps[T]) extends OneArg[T] {
def derivative(v: Var[T]) = Mul(expr.derivative(v), Exp(expr))
def eval(f: Any => Any) = num.exp(expr.eval(f))
def mapArgs(f: EndoFunction[Expr[_]]) = Exp(f(expr))
override def toString = "exp(" + expr + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Sqrt[T](expr: Expr[T])(implicit num: NumericOps[T]) extends OneArg[T] {
def derivative(v: Var[T]) = Neg(Div(expr.derivative(v), Sqrt(expr)))
def eval(f: Any => Any) = num.sqrt(expr.eval(f))
def mapArgs(f: EndoFunction[Expr[_]]) = Sqrt(f(expr))
override def toString = "sqrt(" + expr + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Sin[T](expr: Expr[T])(implicit num: NumericOps[T]) extends OneArg[T] {
def derivative(v: Var[T]) = Mul(expr.derivative(v), Cos(expr))
def eval(f: Any => Any) = num.sin(expr.eval(f))
def mapArgs(f: EndoFunction[Expr[_]]) = Sin(f(expr))
override def toString = "sin(" + expr + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
case class Cos[T](expr: Expr[T])(implicit num: NumericOps[T]) extends OneArg[T] {
def derivative(v: Var[T]) = Neg(Mul(expr.derivative(v), Sin(expr)))
def eval(f: Any => Any) = num.cos(expr.eval(f))
def mapArgs(f: EndoFunction[Expr[_]]) = Cos(f(expr))
override def toString = "cos(" + expr + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
abstract class Compare[T](left: Expr[T], right: Expr[T], cmp: (T, T) => Boolean)(implicit num: NumericOps[T])
extends Expr[Boolean] {
def derivative(v: Var[Boolean]) = throw new IllegalStateException("Derivative of Boolean not allowed")
def eval(f: Any => Any) = cmp(left.eval(f), right.eval(f))
val args = List(left, right)
}
case class LE[T](left: Expr[T], right: Expr[T])(implicit num: NumericOps[T], ord: Ordering[T])
extends Compare[T](left, right, ord.compare(_, _) <= 0) {
def mapArgs(f: EndoFunction[Expr[_]]) = LE(
f(left), f(right))
override def toString = left.toString + " <= " + right.toString
}
case class LT[T](left: Expr[T], right: Expr[T])(implicit num: NumericOps[T], ord: Ordering[T])
extends Compare[T](left, right, ord.compare(_, _) < 0) {
def mapArgs(f: EndoFunction[Expr[_]]) = LT(
f(left), f(right))
override def toString = left.toString + " < " + right.toString
}
case class GE[T](left: Expr[T], right: Expr[T])(implicit num: NumericOps[T], ord: Ordering[T])
extends Compare[T](left, right, ord.compare(_, _) >= 0) {
def mapArgs(f: EndoFunction[Expr[_]]) = GE(
f(left), f(right))
override def toString = left.toString + " >= " + right.toString
}
case class GT[T](left: Expr[T], right: Expr[T])(implicit num: NumericOps[T], ord: Ordering[T])
extends Compare[T](left, right, ord.compare(_, _) > 0) {
def mapArgs(f: EndoFunction[Expr[_]]) = GT(
f(left), f(right))
override def toString = left.toString + " > " + right.toString
}
case class IfElse[T <: Numeric[T]]
(condition: Expr[Boolean], left: Expr[T], right: Expr[T])(implicit num: NumericOps[T]) extends Expr[T] {
val args = List(condition, left, right)
def derivative(v: Var[T]) = IfElse(condition, left.derivative(v), right.derivative(v))
def eval(f: Any => Any) = if (condition.eval(f)) left.eval(f) else right.eval(f)
def mapArgs(f: EndoFunction[Expr[_]]) = IfElse(
f(condition).asInstanceOf[Expr[Boolean]],
f(left),
f(right))
override def toString = "if (" + condition + ")(" + left + ") else (" + right + ")"
override lazy val hashCode = ScalaRunTime._hashCode(this);
}
object Expr {
/** Creates a constant expression */
def const[T](value: T)(implicit num: NumericOps[T]): Leaf[T] =
if (num.zero == value) Zero[T]
else Const(value)
implicit def double2Constant[T](d: Double)(implicit num: NumericOps[T]): Leaf[T] =
const(num.fromDouble(d))
implicit def float2Constant[T](f: Float)(implicit num: NumericOps[T]): Leaf[T] =
const(num.fromDouble(f.toDouble))
implicit def int2Constant[T](i: Int)(implicit num: NumericOps[T]): Leaf[T] =
const(num.fromDouble(i.toDouble))
implicit def long2Constant[T](l: Long)(implicit num: NumericOps[T]): Leaf[T] =
const(num.fromDouble(l.toDouble))
}
}
| shimib/scala | test/files/run/patmat-exprs.scala | Scala | bsd-3-clause | 22,570 |
/**
* Licensed to Big Data Genomics (BDG) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The BDG licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.bdgenomics.adam.apis.java
import org.apache.spark.SparkContext
import org.apache.spark.api.java.JavaSparkContext
import org.bdgenomics.adam.models.{ RecordGroupDictionary, SequenceDictionary }
import org.bdgenomics.adam.rdd.ADAMContext
import org.bdgenomics.adam.rdd.ADAMContext._
import org.bdgenomics.formats.avro._
import scala.collection.JavaConversions._
object JavaADAMContext {
// convert to and from java/scala implementations
implicit def fromADAMContext(ac: ADAMContext): JavaADAMContext = new JavaADAMContext(ac)
implicit def toADAMContext(jac: JavaADAMContext): ADAMContext = jac.ac
}
class JavaADAMContext(val ac: ADAMContext) extends Serializable {
/**
* @return Returns the Java Spark Context associated with this Java ADAM Context.
*/
def getSparkContext: JavaSparkContext = new JavaSparkContext(ac.sc)
/**
* Builds this Java ADAM Context using an existing Java Spark Context.
*
* @param jsc Java Spark Context to use to build this ADAM Context.
* @return A new Java ADAM Context.
*/
def this(jsc: JavaSparkContext) = this(new ADAMContext(jsc))
/**
* Builds this Java ADAM Context using an existing Spark Context.
*
* @param sc Spark Context to use to build this ADAM Context.
* @return A new Java ADAM Context.
*/
def this(sc: SparkContext) = this(new ADAMContext(sc))
/**
* Loads in an ADAM read file. This method can load SAM, BAM, and ADAM files.
*
* @param filePath Path to load the file from.
* @return Returns a read RDD.
*/
def loadAlignments(filePath: java.lang.String): JavaAlignmentRecordRDD = {
val aRdd = ac.loadAlignments(filePath)
new JavaAlignmentRecordRDD(aRdd.rdd.toJavaRDD(),
aRdd.sequences,
aRdd.recordGroups)
}
}
| erictu/adam | adam-apis/src/main/scala/org/bdgenomics/adam/apis/java/JavaADAMContext.scala | Scala | apache-2.0 | 2,558 |
package org.jetbrains.jps.incremental.scala
import java.io.File
import org.jetbrains.jps.incremental.messages.BuildMessage.Kind
import org.jetbrains.jps.incremental.scala.remote.{CompileServerMeteringInfo, CompileServerMetrics}
class DummyClient extends Client {
override def message(msg: Client.ClientMsg): Unit = ()
override def deleted(module: File): Unit = ()
override def progress(text: String, done: Option[Float]): Unit = ()
override def isCanceled: Boolean = false
override def internalInfo(text: String): Unit = ()
override def internalDebug(text: String): Unit = ()
override def internalTrace(text: String): Unit = ()
override def trace(exception: Throwable): Unit = ()
override def generated(source: File, module: File, name: String): Unit = ()
override def worksheetOutput(text: String): Unit = ()
override def compilationStart(): Unit = ()
override def compilationPhase(name: String): Unit = ()
override def compilationUnit(path: String): Unit = ()
override def compilationEnd(sources: Set[File]): Unit = ()
override def processingEnd(): Unit = ()
override def sourceStarted(source: String): Unit = ()
override def meteringInfo(info: CompileServerMeteringInfo): Unit = ()
override def metrics(value: CompileServerMetrics): Unit = ()
}
object DummyClient {
val Instance: DummyClient = new DummyClient
} | JetBrains/intellij-scala | scala/compiler-shared/src/org/jetbrains/jps/incremental/scala/DummyClient.scala | Scala | apache-2.0 | 1,356 |
package solution1
object Factorial extends App {
assert(args.size == 1, "Usage: Factorial <n>")
val n = args(0).toInt
assert(n >= 0, "<n> needs to be >= 0")
println(fac(n))
private def fac(n: Int): Int = {
var f = 1
for(i <- 1 to n) {
f = f * i
}
f
}
}
| keshwans/scala-1-day | 220.immutability/src/main/scala/solution1/Factorial.scala | Scala | mit | 290 |
/*
* Copyright 2016 Lightcopy
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql
import scala.collection.mutable.{HashMap => MutableHashMap}
import org.apache.spark.sql.execution.datasources.{CatalogTableSource, IndexedDataSource, Metastore}
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.types.StructType
/**
* Entrypoint for working with index functionality, e.g. reading indexed table, creating index
* for provided file path, or deleting index for table.
*/
class DataFrameIndexManager(sparkSession: SparkSession) {
private var source: String = IndexedDataSource.parquet
private var extraOptions = new MutableHashMap[String, String]()
/** File format for table */
def format(source: String): DataFrameIndexManager = {
this.source = source
this
}
/** Add key-value to options */
def option(key: String, value: String): DataFrameIndexManager = {
this.extraOptions += (key -> value)
this
}
/** Add boolean value to options, for compatibility with Spark */
def option(key: String, value: Boolean): DataFrameIndexManager = {
option(key, value.toString)
}
/** Add long value to options, for compatibility with Spark */
def option(key: String, value: Long): DataFrameIndexManager = {
option(key, value.toString)
}
/** Add double value to options, for compatibility with Spark */
def option(key: String, value: Double): DataFrameIndexManager = {
option(key, value.toString)
}
/** Add options from external map */
def options(options: scala.collection.Map[String, String]): DataFrameIndexManager = {
this.extraOptions ++= options
this
}
/**
* Load indexed table as DataFrame.
* @param path filepath to the table (directory)
*/
def load(path: String): DataFrame = {
option("path", path)
sparkSession.baseRelationToDataFrame(
IndexedDataSource(
Metastore.getOrCreate(sparkSession),
className = source,
options = extraOptions.toMap).resolveRelation())
}
/**
* Load indexed DataFrame from persistent table.
* @param tableName table name in catalog
*/
def table(tableName: String): DataFrame = {
sparkSession.baseRelationToDataFrame(
CatalogTableSource(
Metastore.getOrCreate(sparkSession),
tableName = tableName,
options = extraOptions.toMap).asDataSource.resolveRelation())
}
/**
* Load indexed DataFrame from Parquet table.
* @param path filepath to the Parquet table (directory)
*/
def parquet(path: String): DataFrame = {
format(IndexedDataSource.parquet).load(path)
}
/** DDL command to create index for provided source with options */
def create: CreateIndexCommand = {
CreateIndexCommand(
sparkSession = sparkSession,
source = source,
options = extraOptions)
}
/** DDL command to check if index exists in metastore */
def exists: ExistsIndexCommand = {
ExistsIndexCommand(
sparkSession = sparkSession,
source = source,
options = extraOptions)
}
/** DDL command to delete index for provided source */
def delete: DeleteIndexCommand = {
DeleteIndexCommand(
sparkSession = sparkSession,
source = source,
options = extraOptions)
}
/** Get currently set source, for testing only */
private[sql] def getSource(): String = this.source
/** Get currently set options, for testing only */
private[sql] def getOptions(): Map[String, String] = this.extraOptions.toMap
}
/**
* [[CreateIndexCommand]] provides functionality to create index for a table. Requires index
* columns and valid table path. Also allows to specify different mode for creating index, similar
* to writing DataFrame.
*/
private[sql] case class CreateIndexCommand(
@transient val sparkSession: SparkSession,
private var source: String,
private var options: MutableHashMap[String, String]) {
private var mode: SaveMode = SaveMode.ErrorIfExists
private var columns: Seq[Column] = Nil
/**
* Provide mode for creating index.
* @param mode save mode
*/
def mode(mode: SaveMode): CreateIndexCommand = {
this.mode = mode
this
}
/**
* Provide string-like mode to create index.
* @param mode string value for save mode
*/
def mode(mode: String): CreateIndexCommand = {
val typedMode = mode.toLowerCase match {
case "append" => SaveMode.Append
case "overwrite" => SaveMode.Overwrite
case "error" => SaveMode.ErrorIfExists
case "ignore" => SaveMode.Ignore
case other => throw new UnsupportedOperationException(
s"Unsupported mode $mode, must be one of ${SaveMode.Append}, ${SaveMode.Overwrite}, " +
s"${SaveMode.ErrorIfExists}, ${SaveMode.Ignore}")
}
this.mode = typedMode
this
}
/** Specify columns to index, at least one column is required */
def indexBy(column: Column, columns: Column*): CreateIndexCommand = {
this.columns = column +: columns
this
}
/** Specify column names to index, at least one column is required */
def indexBy(columnName: String, columnNames: String*): CreateIndexCommand = {
indexBy(col(columnName), columnNames.map(col): _*)
}
/**
* Java-friendly API to index by columns.
* For Scala it is recommended to use other more convenient API methods.
*/
def indexBy(columns: Array[Column]): CreateIndexCommand = {
require(columns.nonEmpty, "At least one column is required, " +
"use 'indexByAll()' method to infer all columns that can be indexed")
this.columns = columns.toSeq
this
}
/**
* Java-friendly API to index by column names.
* For Scala it is recommended to use other more convenient API methods.
*/
def indexBy(columnNames: Array[String]): CreateIndexCommand = {
indexBy(columnNames.map(col))
}
/**
* Java-friendly API to index by column names. Also used in Python API.
* For Scala it is recommended to use other more convenient API methods.
*/
def indexBy(columnNames: java.util.List[String]): CreateIndexCommand = {
val cols = new Array[String](columnNames.size())
for (i <- 0 until cols.length) {
cols(i) = columnNames.get(i)
}
indexBy(cols)
}
/** Use all available columns that can be indexed */
def indexByAll(): CreateIndexCommand = {
// assign empty list, will infer all columns, see `MetastoreSupport` API for more info
this.columns = Nil
this
}
/** Public for Python API */
def createIndex(path: String): Unit = {
this.options += "path" -> path
IndexedDataSource(
Metastore.getOrCreate(sparkSession),
className = source,
mode = mode,
options = this.options.toMap).createIndex(this.columns)
}
/** Create index for Spark persistent table */
def table(tableName: String): Unit = {
CatalogTableSource(
Metastore.getOrCreate(sparkSession),
tableName = tableName,
options = this.options.toMap,
mode = mode).asDataSource.createIndex(this.columns)
}
/** Create index for Parquet table as datasource */
def parquet(path: String): Unit = {
this.source = IndexedDataSource.parquet
createIndex(path)
}
/** Get currently set source, for testing only */
private[sql] def getSource(): String = this.source
/** Get currently set options, for testing only */
private[sql] def getOptions(): Map[String, String] = this.options.toMap
/** Get currently set mode, for testing only */
private[sql] def getMode(): SaveMode = this.mode
/** Get currently set columns, for testing only */
private[sql] def getColumns(): Seq[Column] = this.columns.toList
}
/**
* [[ExistsIndexCommand]] reports whether or not given table path is indexed.
*/
private[sql] case class ExistsIndexCommand(
@transient val sparkSession: SparkSession,
private var source: String,
private val options: MutableHashMap[String, String]) {
/** Public for Python API */
def existsIndex(path: String): Boolean = {
this.options += "path" -> path
IndexedDataSource(
Metastore.getOrCreate(sparkSession),
className = source,
options = this.options.toMap).existsIndex()
}
/** Check index for Spark persistent table */
def table(tableName: String): Boolean = {
CatalogTableSource(
Metastore.getOrCreate(sparkSession),
tableName = tableName,
options = this.options.toMap).asDataSource.existsIndex()
}
/** Check index for Parquet table as datasource */
def parquet(path: String): Boolean = {
this.source = IndexedDataSource.parquet
existsIndex(path)
}
/** Get currently set source, for testing only */
private[sql] def getSource(): String = this.source
/** Get currently set options, for testing only */
private[sql] def getOptions(): Map[String, String] = this.options.toMap
}
/**
* [[DeleteIndexCommand]] provides functionality to delete existing index. Current behaviour is
* no-op when deleting non-existent index.
*/
private[sql] case class DeleteIndexCommand(
@transient val sparkSession: SparkSession,
private var source: String,
private val options: MutableHashMap[String, String]) {
/** Public for Python API */
def deleteIndex(path: String): Unit = {
this.options += "path" -> path
IndexedDataSource(
Metastore.getOrCreate(sparkSession),
className = source,
options = this.options.toMap).deleteIndex()
}
/** Delete index for Spark persistent table */
def table(tableName: String): Unit = {
CatalogTableSource(
Metastore.getOrCreate(sparkSession),
tableName = tableName,
options = this.options.toMap).asDataSource.deleteIndex()
}
/** Delete index for Parquet table as datasource */
def parquet(path: String): Unit = {
this.source = IndexedDataSource.parquet
deleteIndex(path)
}
/** Get currently set source, for testing only */
private[sql] def getSource(): String = this.source
/** Get currently set options, for testing only */
private[sql] def getOptions(): Map[String, String] = this.options.toMap
}
| lightcopy/parquet-index | src/main/scala/org/apache/spark/sql/DataFrameIndexManager.scala | Scala | apache-2.0 | 10,580 |
package streams
/**
* This component implements a parser to define terrains from a
* graphical ASCII representation.
*
* When mixing in that component, a level can be defined by
* defining the field `level` in the following form:
*
* val level =
* """------
* |--ST--
* |--oo--
* |--oo--
* |------""".stripMargin
*
* - The `-` character denotes parts which are outside the terrain
* - `o` denotes fields which are part of the terrain
* - `S` denotes the start position of the block (which is also considered
* inside the terrain)
* - `T` denotes the final position of the block (which is also considered
* inside the terrain)
*
* In this example, the first and last lines could be omitted, and
* also the columns that consist of `-` characters only.
*/
trait StringParserTerrain extends GameDef {
/**
* A ASCII representation of the terrain. This field should remain
* abstract here.
*/
val level: String
/**
* This method returns terrain function that represents the terrain
* in `levelVector`. The vector contains parsed version of the `level`
* string. For example, the following level
*
* val level =
* """ST
* |oo
* |oo""".stripMargin
*
* is represented as
*
* Vector(Vector('S', 'T'), Vector('o', 'o'), Vector('o', 'o'))
*
* The resulting function should return `true` if the position `pos` is
* a valid position (not a '-' character) inside the terrain described
* by `levelVector`.
*/
val validChars = List('S', 'T', 'o')
def terrainFunction(levelVector: Vector[Vector[Char]]): Pos => Boolean =
(pos: Pos) => {
val row = pos.x
val col = pos.y
if (row >= 0 && row < levelVector.length && col >= 0 && col < levelVector(row).length)
validChars contains (levelVector(row)) (col)
else
false
}
/**
* This function should return the position of character `c` in the
* terrain described by `levelVector`. You can assume that the `c`
* appears exactly once in the terrain.
*
* Hint: you can use the functions `indexWhere` and / or `indexOf` of the
* `Vector` class
*/
def findChar(c: Char, levelVector: Vector[Vector[Char]]): Pos = {
val x: Int = levelVector indexWhere (_ contains c)
val y: Int = levelVector(x) indexOf c
Pos(x, y)
}
private lazy val vector: Vector[Vector[Char]] =
Vector(level.split("\\n").map(str => Vector(str: _*)): _*)
lazy val terrain: Terrain = terrainFunction(vector)
lazy val startPos: Pos = findChar('S', vector)
lazy val goal: Pos = findChar('T', vector)
}
| kevllino/scala-specialization | 01-ProgFun2/streams/src/main/scala/streams/StringParserTerrain.scala | Scala | mit | 2,657 |
/*
* Copyright 2017 PayPal
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.squbs.unicomplex
import java.beans.ConstructorProperties
import java.lang.management.ManagementFactory
import java.util
import java.util.Date
import javax.management.{MXBean, ObjectName}
import akka.actor._
import akka.stream.StreamSubscriptionTimeoutTerminationMode.{CancelTermination, NoopTermination, WarnTermination}
import akka.stream.ActorMaterializer
import com.typesafe.config.Config
import scala.beans.BeanProperty
import scala.collection.concurrent.TrieMap
import scala.language.{implicitConversions, postfixOps}
object JMX {
val prefixConfig = "prefix-jmx-name"
val systemStateName = "org.squbs.unicomplex:type=SystemState"
val cubesName = "org.squbs.unicomplex:type=Cubes"
val extensionsName = "org.squbs.unicomplex:type=Extensions"
val cubeStateName = "org.squbs.unicomplex:type=CubeState,name="
val listenersName = "org.squbs.unicomplex:type=Listeners"
val listenerStateName = "org.squbs.unicomplex:type=ListenerState"
val serverStats = "org.squbs.unicomplex:type=ServerStats,listener="
val systemSettingName = "org.squbs.unicomplex:type=SystemSetting"
val forkJoinStatsName = "org.squbs.unicomplex:type=ForkJoinPool,name="
val materializerName = "org.squbs.unicomplex:type=Materializer,name="
implicit def string2objectName(name:String):ObjectName = new ObjectName(name)
private val prefixes = TrieMap.empty[ActorSystem, String]
/**
* Gets the prefix used for prefixing JMX names. If a single ActorSystem is used, this function returns empty string
* unless explicitly configured with squbs.prefix-jmx-name = true. If multiple actor systems are detected, the first
* (which could be indeterministic) will use no prefix. Subsequent JMX registration of the same component will
* be prefixed with the ActorSystem name.<br/>
*
* Note: prefix derivation may not be reliable on concurrent access. If intending to use multiple ActorSystems,
* it is more reliable to set configuration squbs.prefix-jmx-name = true
*
* @param system The caller's ActorSystem
* @return The ActorSystem's name or empty string dependent on configuration and conflict.
*/
def prefix(system: ActorSystem): String = {
(prefixes.get(system) orElse Option {
import org.squbs.util.ConfigUtil._
val p =
if (Unicomplex(system).config.get[Boolean](prefixConfig, false) || isRegistered(systemStateName))
system.name + '.'
else ""
prefixes += system -> p
p
}).get
}
def prefix(implicit context: ActorContext): String = prefix(context.system)
def register(ob: AnyRef, objName: ObjectName) = ManagementFactory.getPlatformMBeanServer.registerMBean(ob, objName)
def unregister(objName: ObjectName) = ManagementFactory.getPlatformMBeanServer.unregisterMBean(objName)
def isRegistered(objName: ObjectName) = ManagementFactory.getPlatformMBeanServer.isRegistered(objName)
def get(objName: ObjectName, attr: String) = ManagementFactory.getPlatformMBeanServer.getAttribute(objName, attr)
}
// $COVERAGE-OFF$
case class CubeInfo @ConstructorProperties(Array("name", "fullName", "version", "supervisor"))(
@BeanProperty name: String,
@BeanProperty fullName: String,
@BeanProperty version: String,
@BeanProperty supervisor: String)
case class ListenerState @ConstructorProperties(Array("listener", "state", "error"))(
@BeanProperty listener: String,
@BeanProperty state: String,
@BeanProperty error: String)
case class ListenerInfo @ConstructorProperties(Array("listener", "context", "actor"))(
@BeanProperty listener: String,
@BeanProperty context: String,
@BeanProperty actorPath: String)
case class SystemSetting @ConstructorProperties(Array("key", "value"))(
@BeanProperty key: String,
@BeanProperty value: String)
case class ExtensionInfo @ConstructorProperties(Array("cube", "sequence", "phase", "error"))(
@BeanProperty cube: String,
@BeanProperty sequence: Int,
@BeanProperty phase: String,
@BeanProperty error: String)
case class ActorErrorState @ConstructorProperties(Array("actorPath", "errorCount", "latestException"))(
@BeanProperty actorPath: String,
@BeanProperty errorCount: Int,
@BeanProperty latestException: String)
// $COVERAGE-ON$
@MXBean
trait SystemStateMXBean {
def getSystemState: String
def getStartTime : Date
def getInitMillis: Int
def getActivationMillis: Int
}
@MXBean
trait CubesMXBean {
def getCubes: util.List[CubeInfo]
}
@MXBean
trait ExtensionsMXBean {
def getExtensions: util.List[ExtensionInfo]
}
@MXBean
trait ActorMXBean {
def getActor: String
def getClassName: String
def getRouteConfig : String
def getParent: String
def getChildren: String
def getDispatcher : String
def getMailBoxSize : String
}
@MXBean
trait CubeStateMXBean {
def getName: String
def getCubeState: String
def getWellKnownActors : String
def getActorErrorStates: util.List[ActorErrorState]
}
@MXBean
trait ListenerStateMXBean {
def getListenerStates: java.util.List[ListenerState]
}
@MXBean
trait ListenerMXBean {
def getListeners: java.util.List[ListenerInfo]
}
@MXBean
trait ServerStatsMXBean {
def getListenerName: String
def getUptime: String
def getTotalRequests: Long
def getOpenRequests: Long
def getMaxOpenRequests: Long
def getTotalConnections: Long
def getOpenConnections: Long
def getMaxOpenConnections: Long
def getRequestsTimedOut: Long
}
@MXBean
trait SystemSettingMXBean {
def getSystemSetting: util.List[SystemSetting]
}
@MXBean
trait ForkJoinPoolMXBean {
def getPoolSize: Int
def getActiveThreadCount: Int
def getParallelism: Int
def getStealCount: Long
def getMode: String
def getQueuedSubmissionCount: Int
def getQueuedTaskCount: Long
def getRunningThreadCount: Int
def isQuiescent: Boolean
}
class SystemSettingBean(config: Config) extends SystemSettingMXBean {
lazy val settings:util.List[SystemSetting] = {
import scala.collection.JavaConversions._
def iterateMap(prefix: String, map: util.Map[String, AnyRef]): util.Map[String, String] = {
val result = new util.TreeMap[String, String]()
map.foreach {
case (key, v: util.List[_]) =>
val value = v.asInstanceOf[util.List[AnyRef]]
result.putAll(iterateList(s"$prefix$key", value))
case (key, v: util.Map[_, _]) =>
val value = v.asInstanceOf[util.Map[String, AnyRef]]
result.putAll(iterateMap(s"$prefix$key.", value))
case (key, value) => result.put(s"$prefix$key", String.valueOf(value))
}
result
}
def iterateList(prefix: String, list: util.List[AnyRef]): util.Map[String, String] = {
val result = new util.TreeMap[String, String]()
list.zipWithIndex.foreach{
case (v: util.List[_], i) =>
val value = v.asInstanceOf[util.List[AnyRef]]
result.putAll(iterateList(s"$prefix[$i]", value))
case (v: util.Map[_, _], i) =>
val value = v.asInstanceOf[util.Map[String, AnyRef]]
result.putAll(iterateMap(s"$prefix[$i].", value))
case (value, i) => result.put(s"$prefix[$i]", String.valueOf(value))
}
result
}
iterateMap("", config.root.unwrapped()).toList.map{case (k:String, v:String) => {
SystemSetting(k, v)
}}
}
override def getSystemSetting: util.List[SystemSetting] = settings
}
@MXBean
trait ActorMaterializerMXBean {
def getName: String
def getFactoryClass: String
def getActorSystemName: String
def getShutdown: String
def getInitialInputBufferSize: Int
def getMaxInputBufferSize: Int
def getDispatcher: String
def getStreamSubscriptionTimeoutTerminationMode: String
def getSubscriptionTimeout: String
def getDebugLogging: String
def getOutputBurstLimit: Int
def getFuzzingMode: String
def getAutoFusing: String
def getMaxFixedBufferSize: Int
def getSyncProcessingLimit: Int
}
class ActorMaterializerBean(name: String, className: String, materializer: ActorMaterializer)
extends ActorMaterializerMXBean {
val settings = materializer.settings
override def getName: String = name
override def getFactoryClass: String = className
override def getActorSystemName: String = materializer.system.name
override def getShutdown: String = materializer.isShutdown match {
case true => "yes"
case false => "no"
}
override def getInitialInputBufferSize: Int = settings.initialInputBufferSize
override def getMaxInputBufferSize: Int = settings.maxInputBufferSize
override def getDispatcher: String = settings.dispatcher
override def getStreamSubscriptionTimeoutTerminationMode: String = settings.subscriptionTimeoutSettings.mode match {
case NoopTermination => "noop"
case WarnTermination => "warn"
case CancelTermination => "cancel"
}
override def getSubscriptionTimeout: String = settings.subscriptionTimeoutSettings.timeout.toString
override def getDebugLogging: String = settings.debugLogging match {
case true => "on"
case false => "off"
}
override def getOutputBurstLimit: Int = settings.outputBurstLimit
override def getFuzzingMode: String = settings.fuzzingMode match {
case true => "on"
case false => "off"
}
override def getAutoFusing: String = settings.autoFusing match {
case true => "on"
case false => "off"
}
override def getMaxFixedBufferSize: Int = settings.maxFixedBufferSize
override def getSyncProcessingLimit: Int = settings.syncProcessingLimit
} | az-qbradley/squbs | squbs-unicomplex/src/main/scala/org/squbs/unicomplex/JMX.scala | Scala | apache-2.0 | 10,814 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.mllib.linalg
import java.util.Random
import scala.collection.mutable.{Map => MutableMap}
import scala.reflect.ClassTag
import breeze.linalg.{CSCMatrix, Matrix => BM}
import org.mockito.Mockito.when
import org.scalatest.mockito.MockitoSugar._
import org.apache.spark.{SparkConf, SparkFunSuite}
import org.apache.spark.ml.{linalg => newlinalg}
import org.apache.spark.mllib.util.TestingUtils._
import org.apache.spark.serializer.KryoSerializer
class MatricesSuite extends SparkFunSuite {
test("kryo class register") {
val conf = new SparkConf(false)
conf.set("spark.kryo.registrationRequired", "true")
val ser = new KryoSerializer(conf).newInstance()
def check[T: ClassTag](t: T) {
assert(ser.deserialize[T](ser.serialize(t)) === t)
}
val m = 3
val n = 2
val denseValues = Array(0.0, 1.0, 2.0, 3.0, 4.0, 5.0)
val denseMat = Matrices.dense(m, n, denseValues).asInstanceOf[DenseMatrix]
val sparseValues = Array(1.0, 2.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(1, 2, 1, 2)
val sparseMat =
Matrices.sparse(m, n, colPtrs, rowIndices, sparseValues).asInstanceOf[SparseMatrix]
check(denseMat)
check(sparseMat)
}
test("dense matrix construction") {
val m = 3
val n = 2
val values = Array(0.0, 1.0, 2.0, 3.0, 4.0, 5.0)
val mat = Matrices.dense(m, n, values).asInstanceOf[DenseMatrix]
assert(mat.numRows === m)
assert(mat.numCols === n)
assert(mat.values.eq(values), "should not copy data")
}
test("dense matrix construction with wrong dimension") {
intercept[RuntimeException] {
Matrices.dense(3, 2, Array(0.0, 1.0, 2.0))
}
}
test("sparse matrix construction") {
val m = 3
val n = 4
val values = Array(1.0, 2.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 2, 4, 4)
val rowIndices = Array(1, 2, 1, 2)
val mat = Matrices.sparse(m, n, colPtrs, rowIndices, values).asInstanceOf[SparseMatrix]
assert(mat.numRows === m)
assert(mat.numCols === n)
assert(mat.values.eq(values), "should not copy data")
assert(mat.colPtrs.eq(colPtrs), "should not copy data")
assert(mat.rowIndices.eq(rowIndices), "should not copy data")
val entries: Array[(Int, Int, Double)] = Array((2, 2, 3.0), (1, 0, 1.0), (2, 0, 2.0),
(1, 2, 2.0), (2, 2, 2.0), (1, 2, 2.0), (0, 0, 0.0))
val mat2 = SparseMatrix.fromCOO(m, n, entries)
assert(mat.asBreeze === mat2.asBreeze)
assert(mat2.values.length == 4)
}
test("sparse matrix construction with wrong number of elements") {
intercept[IllegalArgumentException] {
Matrices.sparse(3, 2, Array(0, 1), Array(1, 2, 1), Array(0.0, 1.0, 2.0))
}
intercept[IllegalArgumentException] {
Matrices.sparse(3, 2, Array(0, 1, 2), Array(1, 2), Array(0.0, 1.0, 2.0))
}
}
test("index in matrices incorrect input") {
val sm = Matrices.sparse(3, 2, Array(0, 2, 3), Array(1, 2, 1), Array(0.0, 1.0, 2.0))
val dm = Matrices.dense(3, 2, Array(0.0, 2.3, 1.4, 3.2, 1.0, 9.1))
Array(sm, dm).foreach { mat =>
intercept[IllegalArgumentException] { mat.index(4, 1) }
intercept[IllegalArgumentException] { mat.index(1, 4) }
intercept[IllegalArgumentException] { mat.index(-1, 2) }
intercept[IllegalArgumentException] { mat.index(1, -2) }
}
}
test("equals") {
val dm1 = Matrices.dense(2, 2, Array(0.0, 1.0, 2.0, 3.0))
assert(dm1 === dm1)
assert(dm1 !== dm1.transpose)
val dm2 = Matrices.dense(2, 2, Array(0.0, 2.0, 1.0, 3.0))
assert(dm1 === dm2.transpose)
val sm1 = dm1.asInstanceOf[DenseMatrix].toSparse
assert(sm1 === sm1)
assert(sm1 === dm1)
assert(sm1 !== sm1.transpose)
val sm2 = dm2.asInstanceOf[DenseMatrix].toSparse
assert(sm1 === sm2.transpose)
assert(sm1 === dm2.transpose)
}
test("matrix copies are deep copies") {
val m = 3
val n = 2
val denseMat = Matrices.dense(m, n, Array(0.0, 1.0, 2.0, 3.0, 4.0, 5.0))
val denseCopy = denseMat.copy
assert(!denseMat.toArray.eq(denseCopy.toArray))
val values = Array(1.0, 2.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(1, 2, 1, 2)
val sparseMat = Matrices.sparse(m, n, colPtrs, rowIndices, values)
val sparseCopy = sparseMat.copy
assert(!sparseMat.toArray.eq(sparseCopy.toArray))
}
test("matrix indexing and updating") {
val m = 3
val n = 2
val allValues = Array(0.0, 1.0, 2.0, 3.0, 4.0, 0.0)
val denseMat = new DenseMatrix(m, n, allValues)
assert(denseMat(0, 1) === 3.0)
assert(denseMat(0, 1) === denseMat.values(3))
assert(denseMat(0, 1) === denseMat(3))
assert(denseMat(0, 0) === 0.0)
denseMat.update(0, 0, 10.0)
assert(denseMat(0, 0) === 10.0)
assert(denseMat.values(0) === 10.0)
val sparseValues = Array(1.0, 2.0, 3.0, 4.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(1, 2, 0, 1)
val sparseMat = new SparseMatrix(m, n, colPtrs, rowIndices, sparseValues)
assert(sparseMat(0, 1) === 3.0)
assert(sparseMat(0, 1) === sparseMat.values(2))
assert(sparseMat(0, 0) === 0.0)
intercept[NoSuchElementException] {
sparseMat.update(0, 0, 10.0)
}
intercept[NoSuchElementException] {
sparseMat.update(2, 1, 10.0)
}
sparseMat.update(0, 1, 10.0)
assert(sparseMat(0, 1) === 10.0)
assert(sparseMat.values(2) === 10.0)
}
test("toSparse, toDense") {
val m = 3
val n = 2
val values = Array(1.0, 2.0, 4.0, 5.0)
val allValues = Array(1.0, 2.0, 0.0, 0.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(0, 1, 1, 2)
val spMat1 = new SparseMatrix(m, n, colPtrs, rowIndices, values)
val deMat1 = new DenseMatrix(m, n, allValues)
val spMat2 = deMat1.toSparse
val deMat2 = spMat1.toDense
assert(spMat1.asBreeze === spMat2.asBreeze)
assert(deMat1.asBreeze === deMat2.asBreeze)
}
test("map, update") {
val m = 3
val n = 2
val values = Array(1.0, 2.0, 4.0, 5.0)
val allValues = Array(1.0, 2.0, 0.0, 0.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(0, 1, 1, 2)
val spMat1 = new SparseMatrix(m, n, colPtrs, rowIndices, values)
val deMat1 = new DenseMatrix(m, n, allValues)
val deMat2 = deMat1.map(_ * 2)
val spMat2 = spMat1.map(_ * 2)
deMat1.update(_ * 2)
spMat1.update(_ * 2)
assert(spMat1.toArray === spMat2.toArray)
assert(deMat1.toArray === deMat2.toArray)
}
test("transpose") {
val dA =
new DenseMatrix(4, 3, Array(0.0, 1.0, 0.0, 0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 0.0, 3.0))
val sA = new SparseMatrix(4, 3, Array(0, 1, 3, 4), Array(1, 0, 2, 3), Array(1.0, 2.0, 1.0, 3.0))
val dAT = dA.transpose.asInstanceOf[DenseMatrix]
val sAT = sA.transpose.asInstanceOf[SparseMatrix]
val dATexpected =
new DenseMatrix(3, 4, Array(0.0, 2.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 3.0))
val sATexpected =
new SparseMatrix(3, 4, Array(0, 1, 2, 3, 4), Array(1, 0, 1, 2), Array(2.0, 1.0, 1.0, 3.0))
assert(dAT.asBreeze === dATexpected.asBreeze)
assert(sAT.asBreeze === sATexpected.asBreeze)
assert(dA(1, 0) === dAT(0, 1))
assert(dA(2, 1) === dAT(1, 2))
assert(sA(1, 0) === sAT(0, 1))
assert(sA(2, 1) === sAT(1, 2))
assert(!dA.toArray.eq(dAT.toArray), "has to have a new array")
assert(dA.values.eq(dAT.transpose.asInstanceOf[DenseMatrix].values), "should not copy array")
assert(dAT.toSparse.asBreeze === sATexpected.asBreeze)
assert(sAT.toDense.asBreeze === dATexpected.asBreeze)
}
test("foreachActive") {
val m = 3
val n = 2
val values = Array(1.0, 2.0, 4.0, 5.0)
val allValues = Array(1.0, 2.0, 0.0, 0.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(0, 1, 1, 2)
val sp = new SparseMatrix(m, n, colPtrs, rowIndices, values)
val dn = new DenseMatrix(m, n, allValues)
val dnMap = MutableMap[(Int, Int), Double]()
dn.foreachActive { (i, j, value) =>
dnMap.put((i, j), value)
}
assert(dnMap.size === 6)
assert(dnMap((0, 0)) === 1.0)
assert(dnMap((1, 0)) === 2.0)
assert(dnMap((2, 0)) === 0.0)
assert(dnMap((0, 1)) === 0.0)
assert(dnMap((1, 1)) === 4.0)
assert(dnMap((2, 1)) === 5.0)
val spMap = MutableMap[(Int, Int), Double]()
sp.foreachActive { (i, j, value) =>
spMap.put((i, j), value)
}
assert(spMap.size === 4)
assert(spMap((0, 0)) === 1.0)
assert(spMap((1, 0)) === 2.0)
assert(spMap((1, 1)) === 4.0)
assert(spMap((2, 1)) === 5.0)
}
test("horzcat, vertcat, eye, speye") {
val m = 3
val n = 2
val values = Array(1.0, 2.0, 4.0, 5.0)
val allValues = Array(1.0, 2.0, 0.0, 0.0, 4.0, 5.0)
val colPtrs = Array(0, 2, 4)
val rowIndices = Array(0, 1, 1, 2)
// transposed versions
val allValuesT = Array(1.0, 0.0, 2.0, 4.0, 0.0, 5.0)
val colPtrsT = Array(0, 1, 3, 4)
val rowIndicesT = Array(0, 0, 1, 1)
val spMat1 = new SparseMatrix(m, n, colPtrs, rowIndices, values)
val deMat1 = new DenseMatrix(m, n, allValues)
val spMat1T = new SparseMatrix(n, m, colPtrsT, rowIndicesT, values)
val deMat1T = new DenseMatrix(n, m, allValuesT)
// should equal spMat1 & deMat1 respectively
val spMat1TT = spMat1T.transpose
val deMat1TT = deMat1T.transpose
val deMat2 = Matrices.eye(3)
val spMat2 = Matrices.speye(3)
val deMat3 = Matrices.eye(2)
val spMat3 = Matrices.speye(2)
val spHorz = Matrices.horzcat(Array(spMat1, spMat2))
val spHorz2 = Matrices.horzcat(Array(spMat1, deMat2))
val spHorz3 = Matrices.horzcat(Array(deMat1, spMat2))
val deHorz1 = Matrices.horzcat(Array(deMat1, deMat2))
val deHorz2 = Matrices.horzcat(Array.empty[Matrix])
assert(deHorz1.numRows === 3)
assert(spHorz2.numRows === 3)
assert(spHorz3.numRows === 3)
assert(spHorz.numRows === 3)
assert(deHorz1.numCols === 5)
assert(spHorz2.numCols === 5)
assert(spHorz3.numCols === 5)
assert(spHorz.numCols === 5)
assert(deHorz2.numRows === 0)
assert(deHorz2.numCols === 0)
assert(deHorz2.toArray.length === 0)
assert(deHorz1 ~== spHorz2.asInstanceOf[SparseMatrix].toDense absTol 1e-15)
assert(spHorz2 ~== spHorz3 absTol 1e-15)
assert(spHorz(0, 0) === 1.0)
assert(spHorz(2, 1) === 5.0)
assert(spHorz(0, 2) === 1.0)
assert(spHorz(1, 2) === 0.0)
assert(spHorz(1, 3) === 1.0)
assert(spHorz(2, 4) === 1.0)
assert(spHorz(1, 4) === 0.0)
assert(deHorz1(0, 0) === 1.0)
assert(deHorz1(2, 1) === 5.0)
assert(deHorz1(0, 2) === 1.0)
assert(deHorz1(1, 2) == 0.0)
assert(deHorz1(1, 3) === 1.0)
assert(deHorz1(2, 4) === 1.0)
assert(deHorz1(1, 4) === 0.0)
// containing transposed matrices
val spHorzT = Matrices.horzcat(Array(spMat1TT, spMat2))
val spHorz2T = Matrices.horzcat(Array(spMat1TT, deMat2))
val spHorz3T = Matrices.horzcat(Array(deMat1TT, spMat2))
val deHorz1T = Matrices.horzcat(Array(deMat1TT, deMat2))
assert(deHorz1T ~== deHorz1 absTol 1e-15)
assert(spHorzT ~== spHorz absTol 1e-15)
assert(spHorz2T ~== spHorz2 absTol 1e-15)
assert(spHorz3T ~== spHorz3 absTol 1e-15)
intercept[IllegalArgumentException] {
Matrices.horzcat(Array(spMat1, spMat3))
}
intercept[IllegalArgumentException] {
Matrices.horzcat(Array(deMat1, spMat3))
}
val spVert = Matrices.vertcat(Array(spMat1, spMat3))
val deVert1 = Matrices.vertcat(Array(deMat1, deMat3))
val spVert2 = Matrices.vertcat(Array(spMat1, deMat3))
val spVert3 = Matrices.vertcat(Array(deMat1, spMat3))
val deVert2 = Matrices.vertcat(Array.empty[Matrix])
assert(deVert1.numRows === 5)
assert(spVert2.numRows === 5)
assert(spVert3.numRows === 5)
assert(spVert.numRows === 5)
assert(deVert1.numCols === 2)
assert(spVert2.numCols === 2)
assert(spVert3.numCols === 2)
assert(spVert.numCols === 2)
assert(deVert2.numRows === 0)
assert(deVert2.numCols === 0)
assert(deVert2.toArray.length === 0)
assert(deVert1 ~== spVert2.asInstanceOf[SparseMatrix].toDense absTol 1e-15)
assert(spVert2 ~== spVert3 absTol 1e-15)
assert(spVert(0, 0) === 1.0)
assert(spVert(2, 1) === 5.0)
assert(spVert(3, 0) === 1.0)
assert(spVert(3, 1) === 0.0)
assert(spVert(4, 1) === 1.0)
assert(deVert1(0, 0) === 1.0)
assert(deVert1(2, 1) === 5.0)
assert(deVert1(3, 0) === 1.0)
assert(deVert1(3, 1) === 0.0)
assert(deVert1(4, 1) === 1.0)
// containing transposed matrices
val spVertT = Matrices.vertcat(Array(spMat1TT, spMat3))
val deVert1T = Matrices.vertcat(Array(deMat1TT, deMat3))
val spVert2T = Matrices.vertcat(Array(spMat1TT, deMat3))
val spVert3T = Matrices.vertcat(Array(deMat1TT, spMat3))
assert(deVert1T ~== deVert1 absTol 1e-15)
assert(spVertT ~== spVert absTol 1e-15)
assert(spVert2T ~== spVert2 absTol 1e-15)
assert(spVert3T ~== spVert3 absTol 1e-15)
intercept[IllegalArgumentException] {
Matrices.vertcat(Array(spMat1, spMat2))
}
intercept[IllegalArgumentException] {
Matrices.vertcat(Array(deMat1, spMat2))
}
}
test("zeros") {
val mat = Matrices.zeros(2, 3).asInstanceOf[DenseMatrix]
assert(mat.numRows === 2)
assert(mat.numCols === 3)
assert(mat.values.forall(_ == 0.0))
}
test("ones") {
val mat = Matrices.ones(2, 3).asInstanceOf[DenseMatrix]
assert(mat.numRows === 2)
assert(mat.numCols === 3)
assert(mat.values.forall(_ == 1.0))
}
test("eye") {
val mat = Matrices.eye(2).asInstanceOf[DenseMatrix]
assert(mat.numCols === 2)
assert(mat.numCols === 2)
assert(mat.values.toSeq === Seq(1.0, 0.0, 0.0, 1.0))
}
test("rand") {
val rng = mock[Random]
when(rng.nextDouble()).thenReturn(1.0, 2.0, 3.0, 4.0)
val mat = Matrices.rand(2, 2, rng).asInstanceOf[DenseMatrix]
assert(mat.numRows === 2)
assert(mat.numCols === 2)
assert(mat.values.toSeq === Seq(1.0, 2.0, 3.0, 4.0))
}
test("randn") {
val rng = mock[Random]
when(rng.nextGaussian()).thenReturn(1.0, 2.0, 3.0, 4.0)
val mat = Matrices.randn(2, 2, rng).asInstanceOf[DenseMatrix]
assert(mat.numRows === 2)
assert(mat.numCols === 2)
assert(mat.values.toSeq === Seq(1.0, 2.0, 3.0, 4.0))
}
test("diag") {
val mat = Matrices.diag(Vectors.dense(1.0, 2.0)).asInstanceOf[DenseMatrix]
assert(mat.numRows === 2)
assert(mat.numCols === 2)
assert(mat.values.toSeq === Seq(1.0, 0.0, 0.0, 2.0))
}
test("sprand") {
val rng = mock[Random]
when(rng.nextInt(4)).thenReturn(0, 1, 1, 3, 2, 2, 0, 1, 3, 0)
when(rng.nextDouble()).thenReturn(1.0, 2.0, 3.0, 4.0, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0)
val mat = SparseMatrix.sprand(4, 4, 0.25, rng)
assert(mat.numRows === 4)
assert(mat.numCols === 4)
assert(mat.rowIndices.toSeq === Seq(3, 0, 2, 1))
assert(mat.values.toSeq === Seq(1.0, 2.0, 3.0, 4.0))
val mat2 = SparseMatrix.sprand(2, 3, 1.0, rng)
assert(mat2.rowIndices.toSeq === Seq(0, 1, 0, 1, 0, 1))
assert(mat2.colPtrs.toSeq === Seq(0, 2, 4, 6))
}
test("sprandn") {
val rng = mock[Random]
when(rng.nextInt(4)).thenReturn(0, 1, 1, 3, 2, 2, 0, 1, 3, 0)
when(rng.nextGaussian()).thenReturn(1.0, 2.0, 3.0, 4.0)
val mat = SparseMatrix.sprandn(4, 4, 0.25, rng)
assert(mat.numRows === 4)
assert(mat.numCols === 4)
assert(mat.rowIndices.toSeq === Seq(3, 0, 2, 1))
assert(mat.values.toSeq === Seq(1.0, 2.0, 3.0, 4.0))
}
test("MatrixUDT") {
val dm1 = new DenseMatrix(2, 2, Array(0.9, 1.2, 2.3, 9.8))
val dm2 = new DenseMatrix(3, 2, Array(0.0, 1.21, 2.3, 9.8, 9.0, 0.0))
val dm3 = new DenseMatrix(0, 0, Array())
val sm1 = dm1.toSparse
val sm2 = dm2.toSparse
val sm3 = dm3.toSparse
val mUDT = new MatrixUDT()
Seq(dm1, dm2, dm3, sm1, sm2, sm3).foreach {
mat => assert(mat.toArray === mUDT.deserialize(mUDT.serialize(mat)).toArray)
}
assert(mUDT.typeName == "matrix")
assert(mUDT.simpleString == "matrix")
}
test("toString") {
val empty = Matrices.ones(0, 0)
empty.toString(0, 0)
val mat = Matrices.rand(5, 10, new Random())
mat.toString(-1, -5)
mat.toString(0, 0)
mat.toString(Int.MinValue, Int.MinValue)
mat.toString(Int.MaxValue, Int.MaxValue)
var lines = mat.toString(6, 50).lines.toArray
assert(lines.size == 5 && lines.forall(_.size <= 50))
lines = mat.toString(5, 100).lines.toArray
assert(lines.size == 5 && lines.forall(_.size <= 100))
}
test("numNonzeros and numActives") {
val dm1 = Matrices.dense(3, 2, Array(0, 0, -1, 1, 0, 1))
assert(dm1.numNonzeros === 3)
assert(dm1.numActives === 6)
val sm1 = Matrices.sparse(3, 2, Array(0, 2, 3), Array(0, 2, 1), Array(0.0, -1.2, 0.0))
assert(sm1.numNonzeros === 1)
assert(sm1.numActives === 3)
}
test("fromBreeze with sparse matrix") {
// colPtr.last does NOT always equal to values.length in breeze SCSMatrix and
// invocation of compact() may be necessary. Refer to SPARK-11507
val bm1: BM[Double] = new CSCMatrix[Double](
Array(1.0, 1, 1), 3, 3, Array(0, 1, 2, 3), Array(0, 1, 2))
val bm2: BM[Double] = new CSCMatrix[Double](
Array(1.0, 2, 2, 4), 3, 3, Array(0, 0, 2, 4), Array(1, 2, 1, 2))
val sum = bm1 + bm2
Matrices.fromBreeze(sum)
}
test("Test FromBreeze when Breeze.CSCMatrix.rowIndices has trailing zeros. - SPARK-20687") {
// (2, 0, 0)
// (2, 0, 0)
val mat1Brz = Matrices.sparse(2, 3, Array(0, 2, 2, 2), Array(0, 1), Array(2, 2)).asBreeze
// (2, 1E-15, 1E-15)
// (2, 1E-15, 1E-15)
val mat2Brz = Matrices.sparse(2, 3,
Array(0, 2, 4, 6),
Array(0, 0, 0, 1, 1, 1),
Array(2, 1E-15, 1E-15, 2, 1E-15, 1E-15)).asBreeze
val t1Brz = mat1Brz - mat2Brz
val t2Brz = mat2Brz - mat1Brz
// The following operations raise exceptions on un-patch Matrices.fromBreeze
val t1 = Matrices.fromBreeze(t1Brz)
val t2 = Matrices.fromBreeze(t2Brz)
// t1 == t1Brz && t2 == t2Brz
assert((t1.asBreeze - t1Brz).iterator.map((x) => math.abs(x._2)).sum < 1E-15)
assert((t2.asBreeze - t2Brz).iterator.map((x) => math.abs(x._2)).sum < 1E-15)
}
test("row/col iterator") {
val dm = new DenseMatrix(3, 2, Array(0, 1, 2, 3, 4, 0))
val sm = dm.toSparse
val rows = Seq(Vectors.dense(0, 3), Vectors.dense(1, 4), Vectors.dense(2, 0))
val cols = Seq(Vectors.dense(0, 1, 2), Vectors.dense(3, 4, 0))
for (m <- Seq(dm, sm)) {
assert(m.rowIter.toSeq === rows)
assert(m.colIter.toSeq === cols)
assert(m.transpose.rowIter.toSeq === cols)
assert(m.transpose.colIter.toSeq === rows)
}
}
test("conversions between new local linalg and mllib linalg") {
val dm: DenseMatrix = new DenseMatrix(3, 2, Array(0.0, 0.0, 1.0, 0.0, 2.0, 3.5))
val sm: SparseMatrix = dm.toSparse
val sm0: Matrix = sm.asInstanceOf[Matrix]
val dm0: Matrix = dm.asInstanceOf[Matrix]
def compare(oldM: Matrix, newM: newlinalg.Matrix): Unit = {
assert(oldM.toArray === newM.toArray)
assert(oldM.numCols === newM.numCols)
assert(oldM.numRows === newM.numRows)
}
val newSM: newlinalg.SparseMatrix = sm.asML
val newDM: newlinalg.DenseMatrix = dm.asML
val newSM0: newlinalg.Matrix = sm0.asML
val newDM0: newlinalg.Matrix = dm0.asML
assert(newSM0.isInstanceOf[newlinalg.SparseMatrix])
assert(newDM0.isInstanceOf[newlinalg.DenseMatrix])
compare(sm, newSM)
compare(dm, newDM)
compare(sm0, newSM0)
compare(dm0, newDM0)
val oldSM: SparseMatrix = SparseMatrix.fromML(newSM)
val oldDM: DenseMatrix = DenseMatrix.fromML(newDM)
val oldSM0: Matrix = Matrices.fromML(newSM0)
val oldDM0: Matrix = Matrices.fromML(newDM0)
assert(oldSM0.isInstanceOf[SparseMatrix])
assert(oldDM0.isInstanceOf[DenseMatrix])
compare(oldSM, newSM)
compare(oldDM, newDM)
compare(oldSM0, newSM0)
compare(oldDM0, newDM0)
}
test("implicit conversions between new local linalg and mllib linalg") {
def mllibMatrixToTriple(m: Matrix): (Array[Double], Int, Int) =
(m.toArray, m.numCols, m.numRows)
def mllibDenseMatrixToTriple(m: DenseMatrix): (Array[Double], Int, Int) =
(m.toArray, m.numCols, m.numRows)
def mllibSparseMatrixToTriple(m: SparseMatrix): (Array[Double], Int, Int) =
(m.toArray, m.numCols, m.numRows)
def mlMatrixToTriple(m: newlinalg.Matrix): (Array[Double], Int, Int) =
(m.toArray, m.numCols, m.numRows)
def mlDenseMatrixToTriple(m: newlinalg.DenseMatrix): (Array[Double], Int, Int) =
(m.toArray, m.numCols, m.numRows)
def mlSparseMatrixToTriple(m: newlinalg.SparseMatrix): (Array[Double], Int, Int) =
(m.toArray, m.numCols, m.numRows)
def compare(m1: (Array[Double], Int, Int), m2: (Array[Double], Int, Int)): Unit = {
assert(m1._1 === m2._1)
assert(m1._2 === m2._2)
assert(m1._3 === m2._3)
}
val dm: DenseMatrix = new DenseMatrix(3, 2, Array(0.0, 0.0, 1.0, 0.0, 2.0, 3.5))
val sm: SparseMatrix = dm.toSparse
val sm0: Matrix = sm.asInstanceOf[Matrix]
val dm0: Matrix = dm.asInstanceOf[Matrix]
val newSM: newlinalg.SparseMatrix = sm.asML
val newDM: newlinalg.DenseMatrix = dm.asML
val newSM0: newlinalg.Matrix = sm0.asML
val newDM0: newlinalg.Matrix = dm0.asML
import org.apache.spark.mllib.linalg.MatrixImplicits._
compare(mllibMatrixToTriple(dm0), mllibMatrixToTriple(newDM0))
compare(mllibMatrixToTriple(sm0), mllibMatrixToTriple(newSM0))
compare(mllibDenseMatrixToTriple(dm), mllibDenseMatrixToTriple(newDM))
compare(mllibSparseMatrixToTriple(sm), mllibSparseMatrixToTriple(newSM))
compare(mlMatrixToTriple(dm0), mlMatrixToTriple(newDM))
compare(mlMatrixToTriple(sm0), mlMatrixToTriple(newSM0))
compare(mlDenseMatrixToTriple(dm), mlDenseMatrixToTriple(newDM))
compare(mlSparseMatrixToTriple(sm), mlSparseMatrixToTriple(newSM))
}
}
| lvdongr/spark | mllib/src/test/scala/org/apache/spark/mllib/linalg/MatricesSuite.scala | Scala | apache-2.0 | 22,827 |
/* Copyright 2009-2016 EPFL, Lausanne */
package leon
package evaluators
import purescala.Common._
import purescala.Definitions._
import purescala.Expressions._
import codegen.CompilationUnit
import codegen.CompiledExpression
import codegen.CodeGenParams
import leon.codegen.runtime.LeonCodeGenRuntimeException
import leon.codegen.runtime.LeonCodeGenEvaluationException
class CodeGenEvaluator(ctx: LeonContext, val unit: CompilationUnit) extends Evaluator(ctx, unit.program) with DeterministicEvaluator {
val name = "codegen-eval"
val description = "Evaluator for PureScala expressions based on compilation to JVM"
val bank = unit.bank
/** Another constructor to make it look more like other `Evaluator`s. */
def this(ctx: LeonContext, prog: Program, bank: EvaluationBank = new EvaluationBank, params: CodeGenParams = CodeGenParams.default) {
this(ctx, new CompilationUnit(ctx, prog, bank, params))
}
private def compileExpr(expression: Expr, args: Seq[Identifier]): Option[CompiledExpression] = {
ctx.timers.evaluators.codegen.compilation.start()
try {
Some(unit.compileExpression(expression, args)(ctx))
} catch {
case t: Throwable =>
ctx.reporter.warning(expression.getPos, "Error while compiling expression: "+t.getMessage)
None
} finally {
ctx.timers.evaluators.codegen.compilation.stop()
}
}
def eval(expression: Expr, model: solvers.Model) : EvaluationResult = {
compile(expression, model.toSeq.map(_._1)).map { e =>
ctx.timers.evaluators.codegen.runtime.start()
val res = e(model)
ctx.timers.evaluators.codegen.runtime.stop()
res
}.getOrElse(EvaluationResults.EvaluatorError(s"Couldn't compile expression $expression"))
}
override def compile(expression: Expr, args: Seq[Identifier]) : Option[solvers.Model=>EvaluationResult] = {
compileExpr(expression, args).map(ce => (model: solvers.Model) => {
if (args.exists(arg => !model.isDefinedAt(arg))) {
EvaluationResults.EvaluatorError("Model undefined for free arguments")
} else try {
EvaluationResults.Successful(ce.eval(model))
} catch {
case e : ArithmeticException =>
EvaluationResults.RuntimeError(e.getMessage)
case e : ArrayIndexOutOfBoundsException =>
EvaluationResults.RuntimeError(e.getMessage)
case e : LeonCodeGenRuntimeException =>
EvaluationResults.RuntimeError(e.getMessage)
case e : LeonCodeGenEvaluationException =>
EvaluationResults.EvaluatorError(e.getMessage)
case e : java.lang.ExceptionInInitializerError =>
EvaluationResults.RuntimeError(e.getException.getMessage)
case so : java.lang.StackOverflowError =>
EvaluationResults.RuntimeError("Stack overflow")
}
})
}
}
| regb/leon | src/main/scala/leon/evaluators/CodeGenEvaluator.scala | Scala | gpl-3.0 | 2,872 |
// Databricks notebook source exported at Sun, 19 Jun 2016 11:29:39 UTC
// MAGIC %md
// MAGIC
// MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
// MAGIC
// MAGIC
// MAGIC ### prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand)
// MAGIC
// MAGIC *supported by* [](https://databricks.com/)
// MAGIC and
// MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome)
// COMMAND ----------
// MAGIC %md
// MAGIC This is an elaboration of the [Apache Spark 1.6 mllib-progamming-guide on mllib-data-types](http://spark.apache.org/docs/latest/mllib-data-types.html).
// MAGIC
// MAGIC # [Overview](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/000_MLlibProgGuide)
// MAGIC
// MAGIC ## [Data Types - MLlib Programming Guide](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/000_dataTypesProgGuide)
// MAGIC
// MAGIC - [Local vector](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/001_LocalVector) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#local-vector)
// MAGIC - [Labeled point](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/002_LabeledPoint) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#labeled-point)
// MAGIC - [Local matrix](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/003_LocalMatrix) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#local-matrix)
// MAGIC - [Distributed matrix](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/004_DistributedMatrix) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#distributed-matrix)
// MAGIC - [RowMatrix](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/005_RowMatrix) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#rowmatrix)
// MAGIC - [IndexedRowMatrix](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/006_IndexedRowMatrix) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#indexedrowmatrix)
// MAGIC - [CoordinateMatrix](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/007_CoordinateMatrix) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#coordinatematrix)
// MAGIC - [BlockMatrix](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/008_BlockMatrix) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#blockmatrix)
// MAGIC
// MAGIC MLlib supports local vectors and matrices stored on a single machine, as
// MAGIC well as distributed matrices backed by one or more RDDs. Local vectors
// MAGIC and local matrices are simple data models that serve as public
// MAGIC interfaces. The underlying linear algebra operations are provided by
// MAGIC [Breeze](http://www.scalanlp.org/) and [jblas](http://jblas.org/). A
// MAGIC training example used in supervised learning is called a “labeled point”
// MAGIC in MLlib.
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
// MAGIC
// MAGIC
// MAGIC ### prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand)
// MAGIC
// MAGIC *supported by* [](https://databricks.com/)
// MAGIC and
// MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome) | lamastex/scalable-data-science | db/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/000_dataTypesProgGuide.scala | Scala | unlicense | 4,336 |
package com.gravity.gdk.placement
import com.fasterxml.jackson.core.JsonParseException
import com.gravity.gdk.config.DefaultSettings
import com.gravity.gdk.reco.{RecoContext, RecoResult}
import scala.collection.mutable
import scala.concurrent._
import scalaj.http.{BaseHttp, Http}
/*
___...---''
___...---'\\'___
'' _.-'' _`'.______\\\\.
/_.) )..- __..--'\\\\
( __..--''
'-''\\@
Ⓐ Ⓐ Ⓐ Ⓐ Ⓐ Ⓐ Ⓐ Ⓐ Ⓐ Ⓐ Ⓐ
*/
/**
* Article recommendations are generated against a placement. Placements at this time are configured for you by a
* Gravity Account Manager.
*/
case class Placement(key: PlacementKey) {
/**
* @param limit Limit number of article recommendations. The default is the number of articles configured for your
* placement. The limit you specify must be <= the number of articles
* configured for your placement.
* @param bucketNumber Optional bucket number override; if you don't know what this is or haven't received
* instructions from Gravity to use this parameter, then it is safe to ignore.
* @param logImpression If FALSE, you *must* log your impression using [[com.gravity.gdk.impression.Impression.log]].
*/
def getRecos(
limit: Int = 0,
bucketNumber: Int = 0,
logImpression: Boolean = true
)(implicit recoCtx: RecoContext, http: BaseHttp = Http, ec: ExecutionContext): Future[RecoResult] = Future {
val params = mutable.HashMap(
"placement" -> key.placementId.toString,
"userguid" -> recoCtx.user.userGuid,
"url" -> recoCtx.currentUrl,
"_" -> System.currentTimeMillis().toString,
"pageViewGuid" -> recoCtx.pageViewGuid.guid
)
if(limit > 0)
params += "limit" -> limit.toString
if(recoCtx.imageWidth > 0)
params += "imageWidth" -> recoCtx.imageWidth.toString
if(recoCtx.imageHeight > 0)
params += "imageHeight" -> recoCtx.imageHeight.toString
if(bucketNumber > 0)
params += "bucket" -> bucketNumber.toString
if(!logImpression)
params += "logResult" -> "0"
val response = http(DefaultSettings.apiRecosUrl(key.placementId)).params(params.toMap).asString
if(response.isSuccess) {
try {
RecoResult.fromApiResponse(response) match {
case Left(errors) =>
throw PlacementGetRecosResponseFormatException(errors)
case Right(recoResult) =>
recoResult
}
}
catch {
case ex: JsonParseException => throw PlacementGetRecosResponseJsonParseException(ex)
}
}
else
throw PlacementGetRecosBadHttpStatusException(response.code)
}
} | GravityLabs/gdk-scala | src/main/scala/com/gravity/gdk/placement/Placement.scala | Scala | apache-2.0 | 2,721 |
/***********************************************************************
* Copyright (c) 2013-2018 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.process.knn
import com.typesafe.scalalogging.LazyLogging
import com.vividsolutions.jts.geom.Point
import org.geotools.data.Query
import org.geotools.data.simple.{SimpleFeatureCollection, SimpleFeatureSource}
import org.geotools.feature.DefaultFeatureCollection
import org.geotools.feature.visitor.{AbstractCalcResult, CalcResult}
import org.geotools.process.factory.{DescribeParameter, DescribeProcess, DescribeResult}
import org.geotools.util.NullProgressListener
import org.locationtech.geomesa.process.{GeoMesaProcess, GeoMesaProcessVisitor}
import org.locationtech.geomesa.utils.collection.SelfClosingIterator
import org.opengis.feature.Feature
import scala.collection.JavaConverters._
@DescribeProcess(
title = "Geomesa-enabled K Nearest Neighbor Search",
description = "Performs a K Nearest Neighbor search on a Geomesa feature collection using another feature collection as input"
)
class KNearestNeighborSearchProcess extends GeoMesaProcess with LazyLogging {
@DescribeResult(description = "Output feature collection")
def execute(
@DescribeParameter(
name = "inputFeatures",
description = "Input feature collection that defines the KNN search")
inputFeatures: SimpleFeatureCollection,
@DescribeParameter(
name = "dataFeatures",
description = "The data set to query for matching features")
dataFeatures: SimpleFeatureCollection,
@DescribeParameter(
name = "numDesired",
description = "K: number of nearest neighbors to return")
numDesired: java.lang.Integer,
@DescribeParameter(
name = "estimatedDistance",
description = "Estimate of Search Distance in meters for K neighbors---used to set the granularity of the search")
estimatedDistance: java.lang.Double,
@DescribeParameter(
name = "maxSearchDistance",
description = "Maximum search distance in meters---used to prevent runaway queries of the entire table")
maxSearchDistance: java.lang.Double
): SimpleFeatureCollection = {
logger.debug("Attempting Geomesa K-Nearest Neighbor Search on collection type " + dataFeatures.getClass.getName)
val visitor = new KNNVisitor(inputFeatures, dataFeatures, numDesired, estimatedDistance, maxSearchDistance)
dataFeatures.accepts(visitor, new NullProgressListener)
visitor.getResult.asInstanceOf[KNNResult].results
}
}
/**
* The main visitor class for the KNN search process
*/
class KNNVisitor( inputFeatures: SimpleFeatureCollection,
dataFeatures: SimpleFeatureCollection,
numDesired: java.lang.Integer,
estimatedDistance: java.lang.Double,
maxSearchDistance: java.lang.Double
) extends GeoMesaProcessVisitor with LazyLogging {
val manualVisitResults = new DefaultFeatureCollection(null, dataFeatures.getSchema)
// called for non AccumuloFeatureCollections
// FIXME Implement as detailed in GEOMESA-284
def visit(feature: Feature): Unit = {}
var resultCalc: KNNResult = KNNResult(manualVisitResults)
override def getResult: CalcResult = resultCalc
/** The KNN-Search interface for the WPS process.
*
* Takes as input a Query and SimpleFeatureSource, in addition to
* inputFeatures which define one or more SimpleFeatures for which to find KNN of each
*
* Note that the results are NOT de-duplicated!
*
*/
override def execute(source: SimpleFeatureSource, query: Query): Unit = {
logger.debug("Running Geomesa K-Nearest Neighbor Search on source type " + source.getClass.getName)
// create a new Feature collection to hold the results of the KNN search around each point
val resultCollection = new DefaultFeatureCollection
val searchFeatureIterator = SelfClosingIterator(inputFeatures.features())
// for each entry in the inputFeatures collection:
while (searchFeatureIterator.hasNext) {
val aFeatureForSearch = searchFeatureIterator.next()
aFeatureForSearch.getDefaultGeometry match {
case geo: Point =>
val knnResults = KNNQuery.runNewKNNQuery(source, query, numDesired, estimatedDistance, maxSearchDistance, aFeatureForSearch)
// extract the SimpleFeatures and convert to a Collection. Ordering will not be preserved.
val sfList = knnResults.getK.map {_.sf}.asJavaCollection
resultCollection.addAll(sfList)
case _ => logger.warn("K Nearest Neighbor Search not implemented for non-point geometries, skipping this Feature")
}
}
resultCalc = KNNResult(resultCollection)
}
}
case class KNNResult(results: SimpleFeatureCollection) extends AbstractCalcResult
| ddseapy/geomesa | geomesa-process/geomesa-process-vector/src/main/scala/org/locationtech/geomesa/process/knn/KNearestNeighborSearchProcess.scala | Scala | apache-2.0 | 5,396 |
package scalan.util
import java.io.{InputStreamReader, BufferedReader, File}
import scala.collection.mutable
object ProcessUtil {
def launch(workingDir: File, command: String*): Array[String] = {
val absoluteWorkingDir = workingDir.getAbsoluteFile
val builder = new ProcessBuilder(command: _*).
directory(absoluteWorkingDir).
redirectErrorStream(true)
val proc = builder.start()
val reader = new BufferedReader(new InputStreamReader(proc.getInputStream))
val ar = mutable.ArrayBuffer[String]()
var notDone = true
while (notDone) {
notDone = reader.readLine() match {
case null => false
case s2: String =>
ar += s2
true
}
}
reader.close()
val exitCode = proc.waitFor()
exitCode match {
case 0 => ar.toArray
case _ => throw new RuntimeException(s"Executing '${command.mkString(" ")}' in directory $absoluteWorkingDir returned exit code $exitCode with following output:\n${ar.mkString("\n")}")
}
}
}
| PCMNN/scalan-ce | common/src/main/scala/scalan/util/ProcessUtil.scala | Scala | apache-2.0 | 1,024 |
package org.apache.mesos.chronos.scheduler.api
import java.util.logging.{Level, Logger}
import javax.ws.rs._
import javax.ws.rs.core.Response.Status
import javax.ws.rs.core.{MediaType, Response}
import org.apache.mesos.chronos.scheduler.config.{CassandraConfiguration, SchedulerConfiguration}
import org.apache.mesos.chronos.scheduler.graph.JobGraph
import org.apache.mesos.chronos.scheduler.jobs._
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.databind.module.SimpleModule
import com.codahale.metrics.annotation.Timed
import com.google.inject.Inject
import org.apache.mesos.chronos.scheduler.jobs.stats.JobStats
import org.joda.time.{DateTime, DateTimeZone}
import scala.collection.mutable.ListBuffer
/**
* The REST API for managing jobs.
* @author Florian Leibert ([email protected])
*/
//TODO(FL): Create a case class that removes epsilon from the dependent.
@Path(PathConstants.jobBasePath)
@Produces(Array(MediaType.APPLICATION_JSON))
@Consumes(Array(MediaType.APPLICATION_JSON))
class JobManagementResource @Inject()(val jobScheduler: JobScheduler,
val jobGraph: JobGraph,
val configuration: SchedulerConfiguration,
val cassandraConfig: CassandraConfiguration,
val jobStats: JobStats,
val jobMetrics: JobMetrics) {
private[this] val log = Logger.getLogger(getClass.getName)
private val objectMapper = new ObjectMapper
private val mod = new SimpleModule("JobManagementResourceModule")
mod.addSerializer(classOf[JobStatWrapper], new JobStatWrapperSerializer)
objectMapper.registerModule(mod)
@Path(PathConstants.jobPatternPath)
@DELETE
@Timed
def delete(@PathParam("jobName") jobName: String): Response = {
try {
require(jobGraph.lookupVertex(jobName).nonEmpty, "Job '%s' not found".format(jobName))
val job = jobGraph.lookupVertex(jobName).get
val children = jobGraph.getChildren(jobName)
if (children.nonEmpty) {
job match {
case j: DependencyBasedJob =>
val parents = jobGraph.parentJobs(j)
children.foreach {
child =>
val childJob = jobGraph.lookupVertex(child).get.asInstanceOf[DependencyBasedJob]
val newParents = childJob.parents.filter { name => name != job.name} ++ j.parents
val newChild = childJob.copy(parents = newParents)
jobScheduler.replaceJob(childJob, newChild)
parents.foreach { p =>
jobGraph.removeDependency(p.name, job.name)
jobGraph.addDependency(p.name, newChild.name)
}
}
case j: ScheduleBasedJob =>
children.foreach {
child =>
jobGraph.lookupVertex(child).get match {
case childJob: DependencyBasedJob =>
val newChild = new ScheduleBasedJob(
schedule = j.schedule,
scheduleTimeZone = j.scheduleTimeZone,
name = childJob.name,
command = childJob.command,
epsilon = childJob.epsilon,
successCount = childJob.successCount,
errorCount = childJob.errorCount,
executor = childJob.executor,
executorFlags = childJob.executorFlags,
taskInfoData = childJob.taskInfoData,
retries = childJob.retries,
owner = childJob.owner,
lastError = childJob.lastError,
lastSuccess = childJob.lastSuccess,
async = childJob.async,
cpus = childJob.cpus,
disk = childJob.disk,
mem = childJob.mem,
disabled = childJob.disabled,
softError = childJob.softError,
uris = childJob.uris,
fetch = childJob.fetch,
highPriority = childJob.highPriority
)
jobScheduler.updateJob(childJob, newChild)
case _ =>
}
}
}
}
// No need to send notifications here, the jobScheduler.deregisterJob will do it
jobScheduler.deregisterJob(job, persist = true)
Response.noContent().build
} catch {
case ex: IllegalArgumentException => {
log.log(Level.INFO, "Bad Request", ex)
Response.status(Response.Status.BAD_REQUEST).entity(ex.getMessage)
.build()
}
case ex: Exception => {
log.log(Level.WARNING, "Exception while serving request", ex)
Response.serverError().build
}
}
}
@GET
@Path(PathConstants.jobStatsPatternPath)
def getStat(@PathParam("jobName") jobName: String): Response = {
try {
val jobOpt = jobGraph.lookupVertex(jobName)
require(jobOpt.nonEmpty, "Job '%s' not found".format(jobName))
val histoStats = jobMetrics.getJobHistogramStats(jobName)
val jobStatsList: List[TaskStat] = jobStats.getMostRecentTaskStatsByJob(jobOpt.get, cassandraConfig.jobHistoryLimit())
val jobStatsWrapper = new JobStatWrapper(jobStatsList, histoStats)
val wrapperStr = objectMapper.writeValueAsString(jobStatsWrapper)
Response.ok(wrapperStr).build()
} catch {
case ex: IllegalArgumentException =>
log.log(Level.INFO, "Bad Request", ex)
Response.status(Response.Status.BAD_REQUEST).entity(ex.getMessage)
.build()
case ex: Exception =>
log.log(Level.WARNING, "Exception while serving request", ex)
Response.serverError().build
}
}
@Path(PathConstants.jobPatternPath)
@PUT
@Timed
def trigger(@PathParam("jobName") jobName: String,
@QueryParam("arguments") arguments: String
): Response = {
try {
require(jobGraph.lookupVertex(jobName).isDefined, "Job '%s' not found".format(jobName))
val job = jobGraph.getJobForName(jobName).get
log.info("Manually triggering job:" + jobName)
jobScheduler.taskManager.enqueue(TaskUtils.getTaskId(job, DateTime.now(DateTimeZone.UTC), 0, Option(arguments).filter(_.trim.nonEmpty))
, job.highPriority)
Response.noContent().build
} catch {
case ex: IllegalArgumentException =>
log.log(Level.INFO, "Bad Request", ex)
Response.status(Response.Status.BAD_REQUEST).entity(ex.getMessage)
.build()
case ex: Exception =>
log.log(Level.WARNING, "Exception while serving request", ex)
Response.serverError().build
}
}
/**
* Mark Job successful
*/
@Path(PathConstants.jobSuccessPath)
@PUT
@Timed
def markJobSuccessful(@PathParam("jobName") jobName: String): Response = {
try {
val success = jobScheduler.markJobSuccessAndFireOffDependencies(jobName)
Response.ok("marked job %s as successful: %b".format(jobName, success)).build()
} catch {
case ex: IllegalArgumentException =>
log.log(Level.INFO, "Bad Request", ex)
Response.status(Response.Status.BAD_REQUEST).entity(ex.getMessage)
.build()
case ex: Exception =>
log.log(Level.WARNING, "Exception while serving request", ex)
Response.serverError().build
}
}
/**
* Allows an user to update the elements processed count for a job that
* supports data tracking. The processed count has to be non-negative.
*/
@POST
@Path(PathConstants.jobTaskProgressPath)
def updateTaskProgress(@PathParam("jobName") jobName: String,
@PathParam("taskId") taskId: String,
taskStat: TaskStat) : Response = {
try {
val jobOpt = jobGraph.lookupVertex(jobName)
require(jobOpt.nonEmpty, "Job '%s' not found".format(jobName))
require(TaskUtils.isValidVersion(taskId), "Invalid task id format %s".format(taskId))
require(jobOpt.get.dataProcessingJobType, "Job '%s' is not enabled to track data".format(jobName))
taskStat.numAdditionalElementsProcessed.foreach {
num =>
//NOTE: 0 is a valid value
require(num >= 0,
"numAdditionalElementsProcessed (%d) is not positive".format(num))
jobStats.updateTaskProgress(jobOpt.get, taskId, num)
}
Response.noContent().build
} catch {
case ex: IllegalArgumentException =>
log.log(Level.INFO, "Bad Request", ex)
Response.status(Response.Status.BAD_REQUEST).entity(ex.getMessage).build
case ex: Exception =>
log.log(Level.WARNING, "Exception while serving request", ex)
Response.serverError().build
}
}
@Path(PathConstants.allJobsPath)
@GET
@Timed
def list(): Response = {
try {
import scala.collection.JavaConversions._
val jobs = jobGraph.dag.vertexSet()
.map { jobGraph.getJobForName }
.flatten
.map { // copies fetch in uris or uris in fetch (only one can be set) __only__ in REST get, for compatibility
case j : ScheduleBasedJob =>
if(j.fetch.isEmpty) j.copy(fetch = j.uris.map { Fetch(_) })
else j.copy(uris = j.fetch.map { _.uri })
case j : DependencyBasedJob =>
if(j.fetch.isEmpty) j.copy(fetch = j.uris.map { Fetch(_) })
else j.copy(uris = j.fetch.map { _.uri })
}
Response.ok(jobs).build
} catch {
case ex: Exception =>
log.log(Level.WARNING, "Exception while serving request", ex)
throw new WebApplicationException(Status.INTERNAL_SERVER_ERROR)
}
}
@GET
@Path(PathConstants.jobSearchPath)
@Timed
def search(@QueryParam("name") name: String,
@QueryParam("command") command: String,
@QueryParam("any") any: String,
@QueryParam("limit") limit: Integer,
@QueryParam("offset") offset: Integer
) = {
try {
val jobs = ListBuffer[BaseJob]()
import scala.collection.JavaConversions._
jobGraph.dag.vertexSet().map({
job =>
jobs += jobGraph.getJobForName(job).get
})
val _limit: Integer = limit match {
case x: Integer =>
x
case _ =>
10
}
val _offset: Integer = offset match {
case x: Integer =>
x
case _ =>
0
}
val filteredJobs = jobs.filter {
x =>
var valid = true
if (name != null && !name.isEmpty && !x.name.toLowerCase.contains(name.toLowerCase)) {
valid = false
}
if (command != null && !command.isEmpty && !x.command.toLowerCase.contains(command.toLowerCase)) {
valid = false
}
if (!valid && any != null && !any.isEmpty &&
(x.name.toLowerCase.contains(any.toLowerCase) || x.command.toLowerCase.contains(any.toLowerCase))) {
valid = true
}
// Maybe add some other query parameters?
valid
}.toList.slice(_offset, _offset + _limit)
Response.ok(filteredJobs).build
} catch {
case ex: Exception =>
log.log(Level.WARNING, "Exception while serving request", ex)
throw new WebApplicationException(Status.INTERNAL_SERVER_ERROR)
}
}
}
| mikkokupsu/chronos | src/main/scala/org/apache/mesos/chronos/scheduler/api/JobManagementResource.scala | Scala | apache-2.0 | 11,510 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.TypeCheckResult
import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode}
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util.TypeUtils
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
object InterpretedPredicate {
def create(expression: Expression, inputSchema: Seq[Attribute]): (InternalRow => Boolean) =
create(BindReferences.bindReference(expression, inputSchema))
def create(expression: Expression): (InternalRow => Boolean) = {
(r: InternalRow) => expression.eval(r).asInstanceOf[Boolean]
}
}
/**
* An [[Expression]] that returns a boolean value.
*/
trait Predicate extends Expression {
override def dataType: DataType = BooleanType
}
trait PredicateHelper {
protected def splitConjunctivePredicates(condition: Expression): Seq[Expression] = {
condition match {
case And(cond1, cond2) =>
splitConjunctivePredicates(cond1) ++ splitConjunctivePredicates(cond2)
case other => other :: Nil
}
}
protected def splitDisjunctivePredicates(condition: Expression): Seq[Expression] = {
condition match {
case Or(cond1, cond2) =>
splitDisjunctivePredicates(cond1) ++ splitDisjunctivePredicates(cond2)
case other => other :: Nil
}
}
// Substitute any known alias from a map.
protected def replaceAlias(
condition: Expression,
aliases: AttributeMap[Expression]): Expression = {
// Use transformUp to prevent infinite recursion when the replacement expression
// redefines the same ExprId,
condition.transformUp {
case a: Attribute =>
aliases.getOrElse(a, a)
}
}
/**
* Returns true if `expr` can be evaluated using only the output of `plan`. This method
* can be used to determine when it is acceptable to move expression evaluation within a query
* plan.
*
* For example consider a join between two relations R(a, b) and S(c, d).
*
* - `canEvaluate(EqualTo(a,b), R)` returns `true`
* - `canEvaluate(EqualTo(a,c), R)` returns `false`
* - `canEvaluate(Literal(1), R)` returns `true` as literals CAN be evaluated on any plan
*/
protected def canEvaluate(expr: Expression, plan: LogicalPlan): Boolean =
expr.references.subsetOf(plan.outputSet)
}
@ExpressionDescription(
usage = "_FUNC_ expr - Logical not.")
case class Not(child: Expression)
extends UnaryExpression with Predicate with ImplicitCastInputTypes with NullIntolerant {
override def toString: String = s"NOT $child"
override def inputTypes: Seq[DataType] = Seq(BooleanType)
protected override def nullSafeEval(input: Any): Any = !input.asInstanceOf[Boolean]
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, c => s"!($c)")
}
override def sql: String = s"(NOT ${child.sql})"
}
/**
* Evaluates to `true` if `list` contains `value`.
*/
@ExpressionDescription(
usage = "expr1 _FUNC_(expr2, expr3, ...) - Returns true if `expr` equals to any valN.")
case class In(value: Expression, list: Seq[Expression]) extends Predicate
with ImplicitCastInputTypes {
require(list != null, "list should not be null")
override def inputTypes: Seq[AbstractDataType] = value.dataType +: list.map(_.dataType)
override def checkInputDataTypes(): TypeCheckResult = {
if (list.exists(l => l.dataType != value.dataType)) {
TypeCheckResult.TypeCheckFailure(
"Arguments must be same type")
} else {
TypeCheckResult.TypeCheckSuccess
}
}
override def children: Seq[Expression] = value +: list
lazy val inSetConvertible = list.forall(_.isInstanceOf[Literal])
override def nullable: Boolean = children.exists(_.nullable)
override def foldable: Boolean = children.forall(_.foldable)
override def toString: String = s"$value IN ${list.mkString("(", ",", ")")}"
override def eval(input: InternalRow): Any = {
val evaluatedValue = value.eval(input)
if (evaluatedValue == null) {
null
} else {
var hasNull = false
list.foreach { e =>
val v = e.eval(input)
if (v == evaluatedValue) {
return true
} else if (v == null) {
hasNull = true
}
}
if (hasNull) {
null
} else {
false
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val valueGen = value.genCode(ctx)
val listGen = list.map(_.genCode(ctx))
val listCode = listGen.map(x =>
s"""
if (!${ev.value}) {
${x.code}
if (${x.isNull}) {
${ev.isNull} = true;
} else if (${ctx.genEqual(value.dataType, valueGen.value, x.value)}) {
${ev.isNull} = false;
${ev.value} = true;
}
}
""").mkString("\n")
ev.copy(code = s"""
${valueGen.code}
boolean ${ev.value} = false;
boolean ${ev.isNull} = ${valueGen.isNull};
if (!${ev.isNull}) {
$listCode
}
""")
}
override def sql: String = {
val childrenSQL = children.map(_.sql)
val valueSQL = childrenSQL.head
val listSQL = childrenSQL.tail.mkString(", ")
s"($valueSQL IN ($listSQL))"
}
}
/**
* Optimized version of In clause, when all filter values of In clause are
* static.
*/
case class InSet(child: Expression, hset: Set[Any]) extends UnaryExpression with Predicate {
require(hset != null, "hset could not be null")
override def toString: String = s"$child INSET ${hset.mkString("(", ",", ")")}"
@transient private[this] lazy val hasNull: Boolean = hset.contains(null)
override def nullable: Boolean = child.nullable || hasNull
protected override def nullSafeEval(value: Any): Any = {
if (hset.contains(value)) {
true
} else if (hasNull) {
null
} else {
false
}
}
def getHSet(): Set[Any] = hset
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val setName = classOf[Set[Any]].getName
val InSetName = classOf[InSet].getName
val childGen = child.genCode(ctx)
ctx.references += this
val hsetTerm = ctx.freshName("hset")
val hasNullTerm = ctx.freshName("hasNull")
ctx.addMutableState(setName, hsetTerm,
s"$hsetTerm = (($InSetName)references[${ctx.references.size - 1}]).getHSet();")
ctx.addMutableState("boolean", hasNullTerm, s"$hasNullTerm = $hsetTerm.contains(null);")
ev.copy(code = s"""
${childGen.code}
boolean ${ev.isNull} = ${childGen.isNull};
boolean ${ev.value} = false;
if (!${ev.isNull}) {
${ev.value} = $hsetTerm.contains(${childGen.value});
if (!${ev.value} && $hasNullTerm) {
${ev.isNull} = true;
}
}
""")
}
override def sql: String = {
val valueSQL = child.sql
val listSQL = hset.toSeq.map(Literal(_).sql).mkString(", ")
s"($valueSQL IN ($listSQL))"
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Logical AND.")
case class And(left: Expression, right: Expression) extends BinaryOperator with Predicate {
override def inputType: AbstractDataType = BooleanType
override def symbol: String = "&&"
override def sqlOperator: String = "AND"
override def eval(input: InternalRow): Any = {
val input1 = left.eval(input)
if (input1 == false) {
false
} else {
val input2 = right.eval(input)
if (input2 == false) {
false
} else {
if (input1 != null && input2 != null) {
true
} else {
null
}
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval1 = left.genCode(ctx)
val eval2 = right.genCode(ctx)
// The result should be `false`, if any of them is `false` whenever the other is null or not.
if (!left.nullable && !right.nullable) {
ev.copy(code = s"""
${eval1.code}
boolean ${ev.value} = false;
if (${eval1.value}) {
${eval2.code}
${ev.value} = ${eval2.value};
}""", isNull = "false")
} else {
ev.copy(code = s"""
${eval1.code}
boolean ${ev.isNull} = false;
boolean ${ev.value} = false;
if (!${eval1.isNull} && !${eval1.value}) {
} else {
${eval2.code}
if (!${eval2.isNull} && !${eval2.value}) {
} else if (!${eval1.isNull} && !${eval2.isNull}) {
${ev.value} = true;
} else {
${ev.isNull} = true;
}
}
""")
}
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Logical OR.")
case class Or(left: Expression, right: Expression) extends BinaryOperator with Predicate {
override def inputType: AbstractDataType = BooleanType
override def symbol: String = "||"
override def sqlOperator: String = "OR"
override def eval(input: InternalRow): Any = {
val input1 = left.eval(input)
if (input1 == true) {
true
} else {
val input2 = right.eval(input)
if (input2 == true) {
true
} else {
if (input1 != null && input2 != null) {
false
} else {
null
}
}
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval1 = left.genCode(ctx)
val eval2 = right.genCode(ctx)
// The result should be `true`, if any of them is `true` whenever the other is null or not.
if (!left.nullable && !right.nullable) {
ev.isNull = "false"
ev.copy(code = s"""
${eval1.code}
boolean ${ev.value} = true;
if (!${eval1.value}) {
${eval2.code}
${ev.value} = ${eval2.value};
}""", isNull = "false")
} else {
ev.copy(code = s"""
${eval1.code}
boolean ${ev.isNull} = false;
boolean ${ev.value} = true;
if (!${eval1.isNull} && ${eval1.value}) {
} else {
${eval2.code}
if (!${eval2.isNull} && ${eval2.value}) {
} else if (!${eval1.isNull} && !${eval2.isNull}) {
${ev.value} = false;
} else {
${ev.isNull} = true;
}
}
""")
}
}
}
abstract class BinaryComparison extends BinaryOperator with Predicate {
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
if (ctx.isPrimitiveType(left.dataType)
&& left.dataType != BooleanType // java boolean doesn't support > or < operator
&& left.dataType != FloatType
&& left.dataType != DoubleType) {
// faster version
defineCodeGen(ctx, ev, (c1, c2) => s"$c1 $symbol $c2")
} else {
defineCodeGen(ctx, ev, (c1, c2) => s"${ctx.genComp(left.dataType, c1, c2)} $symbol 0")
}
}
protected lazy val ordering = TypeUtils.getInterpretedOrdering(left.dataType)
}
object BinaryComparison {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = Some((e.left, e.right))
}
/** An extractor that matches both standard 3VL equality and null-safe equality. */
object Equality {
def unapply(e: BinaryComparison): Option[(Expression, Expression)] = e match {
case EqualTo(l, r) => Some((l, r))
case EqualNullSafe(l, r) => Some((l, r))
case _ => None
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` equals `expr2`, or false otherwise.")
case class EqualTo(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def inputType: AbstractDataType = AnyDataType
override def checkInputDataTypes(): TypeCheckResult = {
super.checkInputDataTypes() match {
case TypeCheckResult.TypeCheckSuccess =>
// TODO: although map type is not orderable, technically map type should be able to be used
// in equality comparison, remove this type check once we support it.
if (left.dataType.existsRecursively(_.isInstanceOf[MapType])) {
TypeCheckResult.TypeCheckFailure("Cannot use map type in EqualTo, but the actual " +
s"input type is ${left.dataType.catalogString}.")
} else {
TypeCheckResult.TypeCheckSuccess
}
case failure => failure
}
}
override def symbol: String = "="
protected override def nullSafeEval(left: Any, right: Any): Any = ordering.equiv(left, right)
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
defineCodeGen(ctx, ev, (c1, c2) => ctx.genEqual(left.dataType, c1, c2))
}
}
@ExpressionDescription(
usage = """
expr1 _FUNC_ expr2 - Returns same result as the EQUAL(=) operator for non-null operands,
but returns true if both are null, false if one of the them is null.
""")
case class EqualNullSafe(left: Expression, right: Expression) extends BinaryComparison {
override def inputType: AbstractDataType = AnyDataType
override def checkInputDataTypes(): TypeCheckResult = {
super.checkInputDataTypes() match {
case TypeCheckResult.TypeCheckSuccess =>
// TODO: although map type is not orderable, technically map type should be able to be used
// in equality comparison, remove this type check once we support it.
if (left.dataType.existsRecursively(_.isInstanceOf[MapType])) {
TypeCheckResult.TypeCheckFailure("Cannot use map type in EqualNullSafe, but the actual " +
s"input type is ${left.dataType.catalogString}.")
} else {
TypeCheckResult.TypeCheckSuccess
}
case failure => failure
}
}
override def symbol: String = "<=>"
override def nullable: Boolean = false
override def eval(input: InternalRow): Any = {
val input1 = left.eval(input)
val input2 = right.eval(input)
if (input1 == null && input2 == null) {
true
} else if (input1 == null || input2 == null) {
false
} else {
ordering.equiv(input1, input2)
}
}
override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = {
val eval1 = left.genCode(ctx)
val eval2 = right.genCode(ctx)
val equalCode = ctx.genEqual(left.dataType, eval1.value, eval2.value)
ev.copy(code = eval1.code + eval2.code + s"""
boolean ${ev.value} = (${eval1.isNull} && ${eval2.isNull}) ||
(!${eval1.isNull} && !${eval2.isNull} && $equalCode);""", isNull = "false")
}
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is less than `expr2`.")
case class LessThan(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def inputType: AbstractDataType = TypeCollection.Ordered
override def symbol: String = "<"
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.lt(input1, input2)
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is less than or equal to `expr2`.")
case class LessThanOrEqual(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def inputType: AbstractDataType = TypeCollection.Ordered
override def symbol: String = "<="
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.lteq(input1, input2)
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is greater than `expr2`.")
case class GreaterThan(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def inputType: AbstractDataType = TypeCollection.Ordered
override def symbol: String = ">"
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.gt(input1, input2)
}
@ExpressionDescription(
usage = "expr1 _FUNC_ expr2 - Returns true if `expr1` is greater than or equal to `expr2`.")
case class GreaterThanOrEqual(left: Expression, right: Expression)
extends BinaryComparison with NullIntolerant {
override def inputType: AbstractDataType = TypeCollection.Ordered
override def symbol: String = ">="
protected override def nullSafeEval(input1: Any, input2: Any): Any = ordering.gteq(input1, input2)
}
| javalovelinux/SparkGroovyScript | sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/predicates.scala | Scala | apache-2.0 | 17,044 |
package vultura.factor
import org.specs2._
class NormalDTest extends Specification {
override def is =
"log expectation with zeros" ! (NormalD.logExpectation(Array(0d,1), Array(0d,1)) === 0d)
}
| ziggystar/vultura-factor | src/test/scala/vultura/factor/NormalDTest.scala | Scala | mit | 202 |
package io.flow.event.v2
import java.util.concurrent.ConcurrentLinkedQueue
import io.flow.event.Record
import io.flow.log.RollbarLogger
import io.flow.play.util.Config
import io.flow.util.{FlowEnvironment, StreamNames}
import javax.inject.{Inject, Singleton}
import scala.concurrent.duration._
import scala.reflect.runtime.universe._
trait DynamoStreamQueue extends Queue
class DefaultDynamoStreamQueue @Inject() (
config: Config,
creds: AWSCreds,
endpoints: AWSEndpoints,
logger: RollbarLogger
) extends DynamoStreamQueue with StreamUsage {
import scala.jdk.CollectionConverters._
private[this] val consumers = new ConcurrentLinkedQueue[DynamoStreamConsumer]()
override def appName: String = config.requiredString("name")
override def producer[T: TypeTag](numberShards: Int = 0): Producer[T] = sys.error("Not supported for Dynamo DB streams")
override def consume[T: TypeTag](
f: Seq[Record] => Unit,
pollTime: FiniteDuration = 5.seconds
): Unit = {
markConsumesStream(streamName[T], typeOf[T])
val consumer = DynamoStreamConsumer(
streamConfig[T],
creds.awsSDKv1Creds,
f,
logger,
)
consumer.start()
consumers.add(consumer)
()
}
override def shutdownConsumers(): Unit = {
// synchronized to avoid a consumer being registered "in between" shutdown and clear
synchronized {
consumers.asScala.foreach(_.shutdown())
consumers.clear()
}
}
override def shutdown(): Unit = shutdownConsumers()
private[v2] def streamConfig[T: TypeTag] = {
val tn = tableName[T]
DynamoStreamConfig(
appName = appName,
dynamoTableName = tn,
eventClass = typeOf[T],
maxRecords = config.optionalInt(s"$tn.maxRecords"),
idleMillisBetweenCalls = config.optionalLong(s"$tn.idleMillisBetweenCalls"),
idleTimeBetweenReadsInMillis = config.optionalLong(s"$tn.idleTimeBetweenReadsMs"),
maxLeasesForWorker = config.optionalInt(s"$tn.maxLeasesForWorker"),
maxLeasesToStealAtOneTime = config.optionalInt(s"$tn.maxLeasesToStealAtOneTime"),
endpoints = endpoints
)
}
private[v2] def tableName[T: TypeTag] = s"${FlowEnvironment.Current}.${tableNameFromType[T]}s"
private def tableNameFromType[T: TypeTag]: String = {
val typ = typeOf[T].typeSymbol.name.toString
StreamNames.toSnakeCase(typ)
}
}
@Singleton
class MockDynamoStreamQueue @Inject()(logger: RollbarLogger) extends MockQueue(logger) with DynamoStreamQueue
| flowcommerce/lib-event | app/io/flow/event/v2/DynamoStreamQueue.scala | Scala | mit | 2,482 |
package wtf.shekels.alice.als.command
import sx.blah.discord.handle.obj.IMessage
import wtf.shekels.alice.als.listeners.CommandListener
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.util.{Failure, Success}
/**
* @author alice
* @since 10/2/17.
*/
abstract class Command(val name: String, val description: String) extends ICommand {
def parse(message: IMessage): CommandInfo = {
val msg = message.getContent
val pattern = """@.*#[0-9]{4}""".r
val name = msg.tail.split(" ").head
val getArgs = (str: String) => {
str match {
case buf if buf.contains(" ") =>
pattern.replaceAllIn(str, x => x.toString())
.split(" ")
.map(_.trim)
.filter(_.nonEmpty)
.tail
case _ => Array.empty[String]
}
}
val command = CommandListener.commands.find(c => c.name.equalsIgnoreCase(name))
val use = if (command.isEmpty) null else command.get
new CommandInfo(use, getArgs(msg), message.getContent)
}
def run(msg: CommandInfo): Unit = {
val result: Future[Unit] = Future {
val time = System.currentTimeMillis()
execute(msg)
println(s"Command $msg executed in ~${System.currentTimeMillis() - time}ms")
}
result onComplete {
case Success(_) =>
case Failure(x) =>
try {
throw CommandExecutionException(x.getMessage)
} catch {
case c: CommandExecutionException =>
c.printStackTrace()
}
}
}
case class CommandExecutionException(private val message: String = "",
private val cause: Throwable = None.orNull) extends Exception(message, cause)
}
| antflga/AL-S | src/main/scala/wtf.shekels.alice.als/command/Command.scala | Scala | gpl-3.0 | 1,634 |
package com.sparcedge.turbine.query.pipeline
import com.sparcedge.turbine.query._
import com.sparcedge.turbine.data.SegmentValueHolder
object MatchPipelineElement {
def apply(mtch: Match): MatchPipelineElement = new MatchPipelineElement(mtch)
}
class MatchPipelineElement(mtch: Match) extends QueryPipelineElement {
val iMatch = mtch.copy()
var satisfied = false
val segment = iMatch.segment
override def shouldContinue(): Boolean = satisfied
def apply(segmentValues: Iterable[SegmentValueHolder]) {
segmentValues foreach { placeholder =>
iMatch(placeholder)
}
}
def evaluate() {
satisfied = iMatch.evaluate()
}
def reset() {
satisfied = false
}
} | sparcedge/turbinedb | src/main/scala/com/sparcedge/turbine/query/pipeline/MatchPipelineElement.scala | Scala | gpl-3.0 | 744 |
package io.finch
import com.twitter.finagle.http.Status
trait Outputs {
// See https://gist.github.com/vkostyukov/32c84c0c01789425c29a to understand how this list is assembled.
// 2xx
def Ok[A](a: A): Output.Payload[A] = Output.Payload(a, Status.Ok) // 200
def Created[A](a: A): Output.Payload[A] = Output.Payload(a, Status.Created) // 201
def Accepted[A](a: A): Output.Payload[A] = Output.Payload(a, Status.Accepted) // 202
def NoContent[A](a: A): Output.Payload[A] = Output.Payload(a, Status.NoContent) // 204
// 3xx
def MovedPermanently(cause: Exception): Output.Failure = Output.Failure(cause, Status.MovedPermanently) //301
def Found(cause: Exception): Output.Failure = Output.Failure(cause, Status.Found) //302
def SeeOther(cause: Exception): Output.Failure = Output.Failure(cause, Status.SeeOther) //303
def NotModified(cause: Exception): Output.Failure = Output.Failure(cause, Status.NotModified) //304
def TemporaryRedirect(cause: Exception): Output.Failure = Output.Failure(cause, Status.TemporaryRedirect) //307
def PermanentRedirect(cause: Exception): Output.Failure = Output.Failure(cause, Status(308)) //308
// 4xx
def BadRequest(cause: Exception): Output.Failure = Output.Failure(cause, Status.BadRequest) //400
def Unauthorized(cause: Exception): Output.Failure = Output.Failure(cause, Status.Unauthorized) //401
def PaymentRequired(cause: Exception): Output.Failure = Output.Failure(cause, Status.PaymentRequired) //402
def Forbidden(cause: Exception): Output.Failure = Output.Failure(cause, Status.Forbidden) //403
def NotFound(cause: Exception): Output.Failure = Output.Failure(cause, Status.NotFound) //404
def MethodNotAllowed(cause: Exception): Output.Failure = Output.Failure(cause, Status.MethodNotAllowed) //405
def NotAcceptable(cause: Exception): Output.Failure = Output.Failure(cause, Status.NotAcceptable) //406
def RequestTimeout(cause: Exception): Output.Failure = Output.Failure(cause, Status.RequestTimeout) //408
def Conflict(cause: Exception): Output.Failure = Output.Failure(cause, Status.Conflict) //409
def Gone(cause: Exception): Output.Failure = Output.Failure(cause, Status.Gone) //410
def LengthRequired(cause: Exception): Output.Failure = Output.Failure(cause, Status.LengthRequired) //411
def PreconditionFailed(cause: Exception): Output.Failure = Output.Failure(cause, Status.PreconditionFailed) //412
def RequestEntityTooLarge(cause: Exception): Output.Failure = Output.Failure(cause, Status.RequestEntityTooLarge)//413
def RequestedRangeNotSatisfiable(cause: Exception): Output.Failure = Output.Failure(
cause, Status.RequestedRangeNotSatisfiable) //416
def EnhanceYourCalm(cause: Exception): Output.Failure = Output.Failure(cause, Status.EnhanceYourCalm) //420
def UnprocessableEntity(cause: Exception): Output.Failure = Output.Failure(cause, Status.UnprocessableEntity) //422
def TooManyRequests(cause: Exception): Output.Failure = Output.Failure(cause, Status.TooManyRequests) //429
// 5xx
def InternalServerError(cause: Exception): Output.Failure = Output.Failure(cause, Status.InternalServerError) //500
def NotImplemented(cause: Exception): Output.Failure = Output.Failure(cause, Status.NotImplemented) //501
def BadGateway(cause: Exception): Output.Failure = Output.Failure(cause, Status.BadGateway) //502
def ServiceUnavailable(cause: Exception): Output.Failure = Output.Failure(cause, Status.ServiceUnavailable) //503
def GatewayTimeout(cause: Exception): Output.Failure = Output.Failure(cause, Status.GatewayTimeout) //504
def InsufficientStorage(cause: Exception): Output.Failure = Output.Failure(cause, Status.InsufficientStorage) //507
}
| BenWhitehead/finch | core/src/main/scala/io/finch/Outputs.scala | Scala | apache-2.0 | 4,304 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar
import slamdata.Predef._
import quasar.fp._
import matryoshka._
import matryoshka.data._
import matryoshka.implicits._
import scalaz._, Scalaz._
import simulacrum.typeclass
import iotaz.{CopK, TListK}
@typeclass trait RenderTree[A] {
def render(a: A): RenderedTree
}
@SuppressWarnings(Array("org.wartremover.warts.ImplicitConversion"))
object RenderTree extends RenderTreeInstances {
import ops._
def contramap[A, B: RenderTree](f: A => B): RenderTree[A] =
new RenderTree[A] { def render(v: A) = RenderTree[B].render(f(v)) }
def make[A](f: A => RenderedTree): RenderTree[A] =
new RenderTree[A] { def render(v: A) = f(v) }
/** Always a Terminal, with a fixed type and computed label. */
def simple[A](nodeType: List[String], f: A => Option[String]): RenderTree[A] =
new RenderTree[A] { def render(v: A) = Terminal(nodeType, f(v)) }
/** Derive an instance from `Show[A]`, with a static type; e.g. `Shape(Circle(5))`. */
def fromShow[A: Show](simpleType: String): RenderTree[A] =
make[A](v => Terminal(List(simpleType), Some(v.shows)))
/** Derive an instance from `Show[A]`, where the result is one of a few choices,
* and suitable as the node's type; e.g. `LeftSide`. Note that the `parentType`
* is not shown in the usual text rendering. */
def fromShowAsType[A: Show](parentType: String): RenderTree[A] =
make[A](v => Terminal(List(v.shows, parentType), None))
/** Derive a `Show[A]` where RenderTree is defined. */
def toShow[A: RenderTree]: Show[A] = Show.show(_.render.show)
def delayFromShow[F[_]: Functor: Foldable](implicit F: Delay[Show, F]) =
new Delay[RenderTree, F] {
def apply[A](a: RenderTree[A]) = new RenderTree[F[A]] {
def render(v: F[A]) =
NonTerminal(List(v.void.shows), None, v.toList.map(a.render))
}
}
/** For use with `<|`, mostly. */
def print[A: RenderTree](label: String, a: A): Unit =
println(label + ":\\n" + a.render.shows)
def recursive[T, F[_]](implicit T: Recursive.Aux[T, F], FD: Delay[RenderTree, F], FF: Functor[F]): RenderTree[T] =
make(_.cata(FD(RenderTree[RenderedTree]).render))
}
sealed abstract class RenderTreeInstances extends RenderTreeInstances0 {
import RenderTree.make
import RenderTree.ops._
implicit def const[A: RenderTree]: Delay[RenderTree, Const[A, ?]] =
Delay.fromNT(λ[RenderTree ~> DelayedA[A]#RenderTree](_ =>
make(_.getConst.render)))
implicit def delay[F[_], A: RenderTree](implicit F: Delay[RenderTree, F]): RenderTree[F[A]] =
F(RenderTree[A])
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
implicit def free[F[_]: Functor](implicit F: Delay[RenderTree, F]): Delay[RenderTree, Free[F, ?]] =
Delay.fromNT(λ[RenderTree ~> (RenderTree ∘ Free[F, ?])#λ](rt =>
make(_.resume.fold(F(free[F].apply(rt)).render, rt.render))))
@SuppressWarnings(Array("org.wartremover.warts.Recursion"))
implicit def cofree[F[_]](implicit F: Delay[RenderTree, F]): Delay[RenderTree, Cofree[F, ?]] =
Delay.fromNT(λ[RenderTree ~> (RenderTree ∘ Cofree[F, ?])#λ](rt =>
make(t => NonTerminal(List("Cofree"), None, List(rt.render(t.head), F(cofree(F)(rt)).render(t.tail))))))
implicit def these[A: RenderTree, B: RenderTree]: RenderTree[A \\&/ B] =
make {
case \\&/.Both(a, b) => NonTerminal(List("\\\\&/"), "Both".some, List(a.render, b.render))
case \\&/.This(a) => NonTerminal(List("\\\\&/"), "This".some, List(a.render))
case \\&/.That(b) => NonTerminal(List("\\\\&/"), "That".some, List(b.render))
}
implicit def coproduct[F[_], G[_], A](implicit RF: RenderTree[F[A]], RG: RenderTree[G[A]]): RenderTree[Coproduct[F, G, A]] =
make(_.run.fold(RF.render, RG.render))
implicit lazy val unit: RenderTree[Unit] =
make(_ => Terminal(List("()", "Unit"), None))
implicit def renderTreeT[T[_[_]], F[_]: Functor](implicit T: RenderTreeT[T], F: Delay[RenderTree, F]): RenderTree[T[F]] =
T.renderTree(F)
implicit def copKRenderTree[LL <: TListK](implicit M: RenderTreeKMaterializer[LL]): Delay[RenderTree, CopK[LL, ?]] = M.materialize(offset = 0)
implicit def coproductDelay[F[_], G[_]](implicit RF: Delay[RenderTree, F], RG: Delay[RenderTree, G]): Delay[RenderTree, Coproduct[F, G, ?]] =
Delay.fromNT(λ[RenderTree ~> DelayedFG[F, G]#RenderTree](ra =>
make(_.run.fold(RF(ra).render, RG(ra).render))))
implicit def eitherRenderTree[A, B](implicit RA: RenderTree[A], RB: RenderTree[B]): RenderTree[A \\/ B] =
make {
case -\\/ (a) => NonTerminal("-\\\\/" :: Nil, None, RA.render(a) :: Nil)
case \\/- (b) => NonTerminal("\\\\/-" :: Nil, None, RB.render(b) :: Nil)
}
implicit def optionRenderTree[A](implicit RA: RenderTree[A]): RenderTree[Option[A]] =
make {
case Some(a) => RA.render(a)
case None => Terminal("None" :: "Option" :: Nil, None)
}
implicit def listRenderTree[A](implicit RA: RenderTree[A]): RenderTree[List[A]] =
make(v => NonTerminal(List("List"), None, v.map(RA.render)))
implicit def listMapRenderTree[K: Show, V](implicit RV: RenderTree[V]): RenderTree[ListMap[K, V]] =
make(RenderTree[Map[K, V]].render(_))
implicit def vectorRenderTree[A](implicit RA: RenderTree[A]): RenderTree[Vector[A]] =
make(v => NonTerminal(List("Vector"), None, v.map(RA.render).toList))
implicit lazy val booleanRenderTree: RenderTree[Boolean] =
RenderTree.fromShow[Boolean]("Boolean")
implicit lazy val intRenderTree: RenderTree[Int] =
RenderTree.fromShow[Int]("Int")
implicit lazy val doubleRenderTree: RenderTree[Double] =
RenderTree.fromShow[Double]("Double")
implicit lazy val stringRenderTree: RenderTree[String] =
RenderTree.fromShow[String]("String")
implicit lazy val symbolRenderTree: RenderTree[Symbol] =
RenderTree.fromShow[Symbol]("Symbol")
implicit def pathRenderTree[B,T,S]: RenderTree[pathy.Path[B,T,S]] =
// NB: the implicit Show instance in scope here ends up being a circular
// call, so an explicit reference to pathy's Show is needed.
make(p => Terminal(List("Path"), pathy.Path.pathShow.shows(p).some))
implicit def leftTuple4RenderTree[A, B, C, D](implicit RA: RenderTree[A], RB: RenderTree[B], RC: RenderTree[C], RD: RenderTree[D]):
RenderTree[(((A, B), C), D)] =
new RenderTree[(((A, B), C), D)] {
def render(t: (((A, B), C), D)) =
NonTerminal("tuple" :: Nil, None,
RA.render(t._1._1._1) ::
RB.render(t._1._1._2) ::
RC.render(t._1._2) ::
RD.render(t._2) ::
Nil)
}
}
sealed abstract class RenderTreeInstances0 extends RenderTreeInstances1 {
implicit def leftTuple3RenderTree[A, B, C](
implicit RA: RenderTree[A], RB: RenderTree[B], RC: RenderTree[C]
): RenderTree[((A, B), C)] =
new RenderTree[((A, B), C)] {
def render(t: ((A, B), C)) =
NonTerminal("tuple" :: Nil, None,
RA.render(t._1._1) ::
RB.render(t._1._2) ::
RC.render(t._2) ::
Nil)
}
implicit def mapRenderTree[K: Show, V](implicit RV: RenderTree[V]): RenderTree[Map[K, V]] =
RenderTree.make(v => NonTerminal("Map" :: Nil, None,
v.toList.map { case (k, v) =>
NonTerminal("Key" :: "Map" :: Nil, Some(k.shows), RV.render(v) :: Nil)
}))
implicit def fix[F[_]: Functor](implicit F: Delay[RenderTree, F]): RenderTree[Fix[F]] =
RenderTree.recursive
implicit def mu[F[_]: Functor](implicit F: Delay[RenderTree, F]): RenderTree[Mu[F]] =
RenderTree.recursive
implicit def nu[F[_]: Functor](implicit F: Delay[RenderTree, F]): RenderTree[Nu[F]] =
RenderTree.recursive
}
sealed abstract class RenderTreeInstances1 extends RenderTreeInstances2 {
import RenderTree.make
implicit def tuple2RenderTree[A, B](
implicit RA: RenderTree[A], RB: RenderTree[B]
): RenderTree[(A, B)] =
make(t => NonTerminal("tuple" :: Nil, None,
RA.render(t._1) ::
RB.render(t._2) ::
Nil))
}
sealed abstract class RenderTreeInstances2 {
import cats.{~>, Functor}
import cats.data.Const
import iota.{CopK, TListK}
import higherkindness.droste.{Algebra, Basis, Delay, scheme}
import RenderTree.ops._
implicit def catsConst[A: RenderTree]: Delay[RenderTree, Const[A, ?]] =
λ[RenderTree ~> λ[α => RenderTree[Const[A, α]]]](_ =>
RenderTree.make(_.getConst.render))
implicit def catsDelay[F[_], A: RenderTree](implicit F: Delay[RenderTree, F]): RenderTree[F[A]] =
F(RenderTree[A])
implicit def catsCopKRenderTree[LL <: TListK](
implicit M: DelayRenderTreeMaterlializer[LL])
: Delay[RenderTree, CopK[LL, ?]] = M.materialize(offset = 0)
implicit def basisRenderTree[F[_], U: Basis[F, ?]](implicit FD: Delay[RenderTree, F], FF: Functor[F]): RenderTree[U] =
RenderTree.make(scheme.cata(Algebra(FD(RenderTree[RenderedTree]).render)))
}
| djspiewak/quasar | foundation/src/main/scala/quasar/RenderTree.scala | Scala | apache-2.0 | 9,460 |
package com.github.truerss.rollbar
import entities.{Trace, Frame, RollBarException}
trait ToTrace {
def convert(thr: Throwable): Trace
}
object DefaultImplicits {
implicit val ToTraceDefault: ToTrace = new ToTrace {
override def convert(thr: Throwable): Trace = {
val fs = thr.getStackTrace.map { x =>
Frame(
fileName = x.getFileName,
lineNumber = Option(x.getLineNumber),
colNumber = None,
method = Option(x.getMethodName),
code = None,
className = Option(x.getClassName),
context = None,
argSpec = Vector.empty,
varargSpec = None,
keyWordSpec = None
)
}.toVector
Trace(frames = fs, exception = RollBarException(thr))
}
}
} | truerss/rollbar-scala | src/main/scala/com/github/truerss/rollbar/ToTrace.scala | Scala | mit | 776 |
/*
There are a number of variations on `Option` and `Either`. If we want to accumulate multiple errors, a simple approach is a new data type that lets us keep a list of errors in the data constructor that represents failures:
trait Partial[+A,+B]
case class Errors[+A](get: Seq[A]) extends Partial[A,Nothing]
case class Success[+B](get: B) extends Partial[Nothing,B]
There is a type very similar to this called `Validation` in the Scalaz library. You can implement `map`, `map2`, `sequence`, and so on for this type in such a way that errors are accumulated when possible (`flatMap` is unable to accumulate errors--can you see why?). This idea can even be generalized further--we don't need to accumulate failing values into a list; we can accumulate values using any user-supplied binary function.
It's also possible to use `Either[List[E],_]` directly to accumulate errors, using different implementations of helper functions like `map2` and `sequence`.
*/
| lhohan/fpscala | answerkey/errorhandling/08.answer.scala | Scala | mit | 963 |
Subsets and Splits