code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package scala
import scala.language.higherKinds
package object collection {
@deprecated("Use Iterable instead of Traversable", "2.13.0")
type Traversable[+X] = Iterable[X]
@deprecated("Use Iterable instead of Traversable", "2.13.0")
val Traversable = Iterable
@deprecated("Use IterableOnce instead of TraversableOnce", "2.13.0")
type TraversableOnce[+X] = IterableOnce[X]
@deprecated("Use IterableOnce instead of TraversableOnce", "2.13.0")
val TraversableOnce = IterableOnce
@deprecated("Use SeqOps instead of SeqLike", "2.13.0")
type SeqLike[A, T] = SeqOps[A, Seq, T]
@deprecated("Use SeqOps (for the methods) or IndexedSeqOps (for fast indexed access) instead of ArrayLike", "2.13.0")
type ArrayLike[A] = SeqOps[A, Seq, Seq[A]]
@deprecated("Gen* collection types have been removed", "2.13.0")
type GenTraversableOnce[+X] = IterableOnce[X]
@deprecated("Gen* collection types have been removed", "2.13.0")
val GenTraversableOnce = IterableOnce
@deprecated("Gen* collection types have been removed", "2.13.0")
type GenTraversable[+X] = Iterable[X]
@deprecated("Gen* collection types have been removed", "2.13.0")
val GenTraversable = Iterable
@deprecated("Gen* collection types have been removed", "2.13.0")
type GenIterable[+X] = Iterable[X]
@deprecated("Gen* collection types have been removed", "2.13.0")
val GenIterable = Iterable
@deprecated("Gen* collection types have been removed", "2.13.0")
type GenSeq[+X] = Seq[X]
@deprecated("Gen* collection types have been removed", "2.13.0")
val GenSeq = Seq
@deprecated("Gen* collection types have been removed", "2.13.0")
type GenSet[X] = Set[X]
@deprecated("Gen* collection types have been removed", "2.13.0")
val GenSet = Set
@deprecated("Gen* collection types have been removed", "2.13.0")
type GenMap[K, +V] = Map[K, V]
@deprecated("Gen* collection types have been removed", "2.13.0")
val GenMap = Map
/** Needed to circumvent a difficulty between dotty and scalac concerning
* the right top type for a type parameter of kind * -> *.
* In Scalac, we can provide `Any`, as `Any` is kind-polymorphic. In dotty this is not allowed.
* In dotty, we can provide `[X] => Any`. But Scalac does not know lambda syntax.
*/
type AnyConstr[X] = Any
/** Collection internal utility functions.
*/
private[collection] object DebugUtils {
def unsupported(msg: String) = throw new UnsupportedOperationException(msg)
def noSuchElement(msg: String) = throw new NoSuchElementException(msg)
def indexOutOfBounds(index: Int) = throw new IndexOutOfBoundsException(index.toString)
def illegalArgument(msg: String) = throw new IllegalArgumentException(msg)
def buildString(closure: (Any => Unit) => Unit): String = {
val output = new collection.mutable.StringBuilder
closure { any =>
output ++= any.toString
output += '\\n'
}
output.result()
}
def arrayString[T](array: Array[T], from: Int, until: Int): String = {
array.slice(from, until) map ({
case null => "n/a"
case x => "" + x
}: scala.PartialFunction[T, String]) mkString " | "
}
}
/** An extractor used to head/tail deconstruct sequences. */
object +: {
/** Splits a sequence into head :+ tail.
* @return Some((head, tail)) if sequence is non-empty. None otherwise.
*/
def unapply[A, CC[_] <: Seq[_], C <: SeqOps[A, CC, C]](t: C with SeqOps[A, CC, C]): Option[(A, C)] =
if(t.isEmpty) None
else Some(t.head -> t.tail)
}
/** An extractor used to init/last deconstruct sequences. */
object :+ {
/** Splits a sequence into init :+ last.
* @return Some((init, last)) if sequence is non-empty. None otherwise.
*/
def unapply[A, CC[_] <: Seq[_], C <: SeqOps[A, CC, C]](t: C with SeqOps[A, CC, C]): Option[(C, A)] =
if(t.isEmpty) None
else Some(t.init -> t.last)
}
}
| rorygraves/perf_tester | corpus/scala-library/src/main/scala/collection/package.scala | Scala | apache-2.0 | 3,941 |
/*
* Shadowsocks - A shadowsocks client for Android
* Copyright (C) 2014 <[email protected]>
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* ___====-_ _-====___
* _--^^^#####// \\\\#####^^^--_
* _-^##########// ( ) \\\\##########^-_
* -############// |\\^^/| \\\\############-
* _/############// (@::@) \\\\############\\_
* /#############(( \\\\// ))#############\\
* -###############\\\\ (oo) //###############-
* -#################\\\\ / VV \\ //#################-
* -###################\\\\/ \\//###################-
* _#/|##########/\\######( /\\ )######/\\##########|\\#_
* |/ |#/\\#/\\#/\\/ \\#/\\##\\ | | /##/\\#/ \\/\\#/\\#/\\#| \\|
* ` |/ V V ` V \\#\\| | | |/#/ V ' V V \\| '
* ` ` ` ` / | | | | \\ ' ' ' '
* ( | | | | )
* __\\ | | | | /__
* (vvv(VVV)(VVV)vvv)
*
* HERE BE DRAGONS
*
*/
package com.github.shadowsocks.database
import com.j256.ormlite.field.{DataType, DatabaseField}
class Profile {
@DatabaseField(generatedId = true)
var id: Int = 0
@DatabaseField
var name: String = "Untitled"
@DatabaseField
var host: String = ""
@DatabaseField
var localPort: Int = 1080
@DatabaseField
var remotePort: Int = 8338
@DatabaseField
var password: String = ""
@DatabaseField
var method: String = "rc4"
@DatabaseField
var route: String = "all"
@DatabaseField
var proxyApps: Boolean = false
@DatabaseField
var bypass: Boolean = false
@DatabaseField
var udpdns: Boolean = false
@DatabaseField
var auth: Boolean = false
@DatabaseField
var ipv6: Boolean = false
@DatabaseField(dataType = DataType.LONG_STRING)
var individual: String = ""
@DatabaseField
var tx: Long = 0
@DatabaseField
var rx: Long = 0
@DatabaseField
val date: java.util.Date = new java.util.Date()
}
| ticonci/shadowsocks-android | src/main/scala/com/github/shadowsocks/database/Profile.scala | Scala | gpl-3.0 | 2,737 |
/*
* Copyright 2016 Coursera Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.coursera.common.concurrent
import scala.concurrent.ExecutionContext
import scala.concurrent.Future
import scala.concurrent.Promise
import scala.util.Failure
import scala.util.Success
import scala.util.Try
import scala.util.control.NonFatal
object Futures extends FutureExtractors {
/**
* Executes `f` immediately without an [[ExecutionContext]].
*
* Returns a successful future if `f` completes or a failed one if `f` throws an exception.
*/
def immediate[T](f: => T): Future[T] = {
try {
Future.successful(f)
} catch {
case NonFatal(e) =>
Future.failed(e)
}
}
/**
* Executes `f` immediately. Returns `f`'s future (either successful or not) if `f` completes
* or a failed one if `f` throws an exception.
*/
def safelyCall[T](f: => Future[T]): Future[T] = {
Try(f).recover {
case e: Throwable => Future.failed(e)
}.get
}
/**
* Variant of [[Future.find]] that applies a [[PartialFunction]], if it's defined, rather than
* checking a predicate. This allows type safe transformation of matched values.
*
* For example, if you wanted to find the first string that represented a non-negative integer:
* {{{
* val nonNegativeIntRegex = """[0-9]+""".r
* val futures: immutable.Seq[Future[String]] = startComputation()
*
* val matchOptionFuture: Future[Option[Int]] = Futures.findMatch(futures) {
* case nonNegativeIntRegex(s) => s.toInt
* }
* }}}
* (Types added for clarity where they would normally be inferred.)
*
* With [[Future.find]], you'd need to use something more verbose instead:
* {{{
* val matchOptionFuture: Future[Option[Int]] = Futures.find(futures) {
* case nonNegativeIntRegex(s) => true
* case _ => false
* }.map(_.toInt)
* }}}
*/
def findMatch[T, U](
futures: TraversableOnce[Future[T]])
(pf: PartialFunction[T, U])
(implicit ec: ExecutionContext): Future[Option[U]] = {
Future.find(futures)(pf.isDefinedAt).map(_.map(pf))
}
def option[T](option: Option[Future[T]])(implicit ec: ExecutionContext): Future[Option[T]] =
option.map(_.map(Some(_))).getOrElse(Future.successful(None))
def map[K, V](m: Map[K, Future[V]])(implicit ec: ExecutionContext): Future[Map[K, V]] = {
val elementFutures = m.map { case (key, valueFuture) =>
valueFuture.map(key -> _)
}
Future.sequence(elementFutures).map(_.toMap)
}
object Implicits {
implicit class FutureOps[T](future: Future[T]) {
def toTry(implicit ec: ExecutionContext): Future[Try[T]] = {
future
.map(Success.apply)
.recover(PartialFunction(Failure.apply))
}
}
}
}
| coursera/courscala | courscala/src/main/scala/org/coursera/common/concurrent/Futures.scala | Scala | apache-2.0 | 3,318 |
/*
* Copyright (C) 2005 - 2019 Schlichtherle IT Services.
* All rights reserved. Use is subject to license terms.
*/
package global.namespace.truelicense.tests.core
import global.namespace.fun.io.bios.BIOS.memory
trait LicenseConsumerPerformance {
this: TestContext =>
final def run(): Unit = new State {
{
val tempStore = memory
vendorManager generateKeyFrom licenseBean saveTo tempStore
for (i <- 1 to 5) {
consumerManager install tempStore
val num = 1000 * 1000
val start = System.nanoTime
for (j <- 1 to num) {
consumerManager.verify()
}
val time = System.nanoTime - start
printf("Iteration %d verified the installed license key %,d times per second.\\n", i, num * 1000L * 1000L * 1000L / time)
}
}
}
}
| christian-schlichtherle/truelicense | tests/src/test/scala/global/namespace/truelicense/tests/core/LicenseConsumerPerformance.scala | Scala | apache-2.0 | 816 |
package com.ergodicity.marketdb.loader
import org.slf4j.LoggerFactory
import org.joda.time.Interval
import com.ergodicity.marketdb.loader.util.Implicits._
import com.twitter.ostrich.admin.{RuntimeEnvironment, Service}
import com.twitter.ostrich.stats.Stats
import com.ergodicity.marketdb.model.TradePayload
import scalaz.IterV
object Loader {
val log = LoggerFactory.getLogger(getClass.getName)
var loader: Loader[_] = null
var runtime: RuntimeEnvironment = null
def main(args: Array[String]) {
try {
runtime = RuntimeEnvironment(this, args)
loader = runtime.loadRuntimeConfig[Loader[_]]()
loader.start()
} catch {
case e =>
log.error("Exception during startup; exiting!", e)
System.exit(1)
}
}
}
class Loader[E](interval: Interval, loader: TradeLoader, i: IterV[TradePayload, E]) extends Service {
val log = LoggerFactory.getLogger(classOf[Loader[_]])
if (loader == null) {
throw new IllegalStateException("Loader not defined")
}
def start() {
log.info("Start marketDB loader")
log.info("Loader: " + loader)
log.info("I: " + i)
log.info("Date interval: " + interval)
for (day <- interval.toDays) {
log.info("Load data for: " + day)
val count = Stats.time("trades_enumeration") {
loader.enumTrades(day, i)
}
log.info("Loader report for day: " + day + "; Report: " + count)
}
}
def shutdown() {
}
} | ezhulenev/marketdb | marketdb-loader/src/main/scala/com/ergodicity/marketdb/loader/Loader.scala | Scala | mit | 1,442 |
package com.twitter.finagle.http
import com.twitter.io.{Reader => BufReader, Writer => BufWriter}
import com.twitter.finagle.netty3.ChannelBufferBuf
import com.twitter.util.{Await, Duration, Closable}
import java.io.{InputStream, InputStreamReader, OutputStream, OutputStreamWriter, Reader, Writer}
import java.util.{Iterator => JIterator}
import java.nio.charset.Charset
import java.util.{Date, TimeZone}
import org.apache.commons.lang.StringUtils
import org.apache.commons.lang.time.FastDateFormat
import org.jboss.netty.buffer._
import org.jboss.netty.handler.codec.http.{HttpMessage, HttpHeaders, HttpMethod, HttpVersion}
import scala.collection.JavaConverters._
/**
* Rich HttpMessage
*
* Base class for Request and Response. There are both input and output
* methods, though only one set of methods should be used.
*/
abstract class Message extends HttpMessage {
private[this] val readerWriter = BufReader.writable()
/**
* A read-only handle to the internal stream of bytes, representing the
* message body. See [[com.twitter.io.Reader]] for more information.
**/
def reader: BufReader = readerWriter
/**
* A write-only handle to the internal stream of bytes, representing the
* message body. See [[com.twitter.io.Writer]] for more information.
**/
def writer: BufWriter with Closable = readerWriter
def isRequest: Boolean
def isResponse = !isRequest
def content: ChannelBuffer = getContent()
def content_=(content: ChannelBuffer) { setContent(content) }
def version: HttpVersion = getProtocolVersion()
def version_=(version: HttpVersion) { setProtocolVersion(version) }
lazy val headerMap: HeaderMap = new MessageHeaderMap(this)
// Java users: use Netty HttpHeaders interface for headers
/**
* Cookies. In a request, this uses the Cookie headers.
* In a response, it uses the Set-Cookie headers.
*/
lazy val cookies = new CookieMap(this)
// Java users: use the interface below for cookies
/** Get iterator over Cookies */
def getCookies(): JIterator[Cookie] = cookies.valuesIterator.asJava
/** Add a cookie */
def addCookie(cookie: Cookie) {
cookies += cookie
}
/** Remove a cookie */
def removeCookie(name: String) {
cookies -= name
}
/** Accept header */
def accept: Seq[String] =
Option(headers.get(HttpHeaders.Names.ACCEPT)) match {
case Some(s) => s.split(",").map(_.trim).filter(_.nonEmpty)
case None => Seq()
}
/** Set Accept header */
def accept_=(value: String) { headers.set(HttpHeaders.Names.ACCEPT, value) }
/** Set Accept header with list of values */
def accept_=(values: Iterable[String]) { accept = values.mkString(", ") }
/** Accept header media types (normalized, no parameters) */
def acceptMediaTypes: Seq[String] =
accept.map {
_.split(";", 2).headOption
.map(_.trim.toLowerCase) // media types are case-insensitive
.filter(_.nonEmpty) // skip blanks
}.flatten
/** Allow header */
def allow: Option[String] = Option(headers.get(HttpHeaders.Names.ALLOW))
/** Set Authorization header */
def allow_=(value: String) { headers.set(HttpHeaders.Names.ALLOW, value) }
/** Set Authorization header */
def allow_=(values: Iterable[HttpMethod]) { allow = values.mkString(",") }
/** Get Authorization header */
def authorization: Option[String] = Option(headers.get(HttpHeaders.Names.AUTHORIZATION))
/** Set Authorization header */
def authorization_=(value: String) { headers.set(HttpHeaders.Names.AUTHORIZATION, value) }
/** Get Cache-Control header */
def cacheControl: Option[String] = Option(headers.get(HttpHeaders.Names.CACHE_CONTROL))
/** Set Cache-Control header */
def cacheControl_=(value: String) { headers.set(HttpHeaders.Names.CACHE_CONTROL, value) }
/** Set Cache-Control header with a max-age (and must-revalidate). */
def cacheControl_=(maxAge: Duration) {
cacheControl = "max-age=" + maxAge.inSeconds.toString + ", must-revalidate"
}
/** Get charset from Content-Type header */
def charset: Option[String] = {
contentType.foreach { contentType =>
val parts = StringUtils.split(contentType, ';')
1.to(parts.length - 1) foreach { i =>
val part = parts(i).trim
if (part.startsWith("charset=")) {
val equalsIndex = part.indexOf('=')
val charset = part.substring(equalsIndex + 1)
return Some(charset)
}
}
}
None
}
/** Set charset in Content-Type header. This does not change the content. */
def charset_=(value: String) {
val contentType = this.contentType.getOrElse("")
val parts = StringUtils.split(contentType, ';')
if (parts.isEmpty) {
this.contentType = ";charset=" + value // malformed
return
}
val builder = new StringBuilder(parts(0))
if (!(parts.exists { _.trim.startsWith("charset=") })) {
// No charset parameter exist, add charset after media type
builder.append(";charset=")
builder.append(value)
// Copy other parameters
1.to(parts.length - 1) foreach { i =>
builder.append(";")
builder.append(parts(i))
}
} else {
// Replace charset= parameter(s)
1.to(parts.length - 1) foreach { i =>
val part = parts(i)
if (part.trim.startsWith("charset=")) {
builder.append(";charset=")
builder.append(value)
} else {
builder.append(";")
builder.append(part)
}
}
}
this.contentType = builder.toString
}
/** Get Content-Length header. Use length to get the length of actual content. */
def contentLength: Option[Long] =
Option(headers.get(HttpHeaders.Names.CONTENT_LENGTH)).map { _.toLong }
/** Set Content-Length header. Normally, this is automatically set by the
* Codec, but this method allows you to override that. */
def contentLength_=(value: Long) {
headers.set(HttpHeaders.Names.CONTENT_LENGTH, value.toString)
}
/** Get Content-Type header */
def contentType: Option[String] = Option(headers.get(HttpHeaders.Names.CONTENT_TYPE))
/** Set Content-Type header */
def contentType_=(value: String) { headers.set(HttpHeaders.Names.CONTENT_TYPE, value) }
/** Set Content-Type header by media-type and charset */
def setContentType(mediaType: String, charset: String = "utf-8") {
headers.set(HttpHeaders.Names.CONTENT_TYPE, mediaType + ";charset=" + charset)
}
/** Set Content-Type header to application/json;charset=utf-8 */
def setContentTypeJson() { headers.set(HttpHeaders.Names.CONTENT_TYPE, Message.ContentTypeJson) }
/** Get Date header */
def date: Option[String] = Option(headers.get(HttpHeaders.Names.DATE))
/** Set Date header */
def date_=(value: String) { headers.set(HttpHeaders.Names.DATE, value) }
/** Set Date header by Date */
def date_=(value: Date) { date = Message.httpDateFormat(value) }
/** Get Expires header */
def expires: Option[String] = Option(headers.get(HttpHeaders.Names.EXPIRES))
/** Set Expires header */
def expires_=(value: String) { headers.set(HttpHeaders.Names.EXPIRES, value) }
/** Set Expires header by Date */
def expires_=(value: Date) { expires = Message.httpDateFormat(value) }
/** Get Host header */
def host: Option[String] = Option(headers.get(HttpHeaders.Names.HOST))
/** Set Host header */
def host_=(value: String) { headers.set(HttpHeaders.Names.HOST, value) }
/** Get Last-Modified header */
def lastModified: Option[String] = Option(headers.get(HttpHeaders.Names.LAST_MODIFIED))
/** Set Last-Modified header */
def lastModified_=(value: String) { headers.set(HttpHeaders.Names.LAST_MODIFIED, value) }
/** Set Last-Modified header by Date */
def lastModified_=(value: Date) { lastModified = Message.httpDateFormat(value) }
/** Get Location header */
def location: Option[String] = Option(headers.get(HttpHeaders.Names.LOCATION))
/** Set Location header */
def location_=(value: String) { headers.set(HttpHeaders.Names.LOCATION, value) }
/** Get media-type from Content-Type header */
def mediaType: Option[String] =
contentType.flatMap { contentType =>
val beforeSemi =
contentType.indexOf(";") match {
case -1 => contentType
case n => contentType.substring(0, n)
}
val mediaType = beforeSemi.trim
if (mediaType.nonEmpty)
Some(mediaType.toLowerCase)
else
None
}
/**
* Set media-type in Content-Type header. Charset and parameter values are
* preserved, though may not be appropriate for the new media type.
*/
def mediaType_=(value: String) {
contentType match {
case Some(contentType) =>
val parts = StringUtils.split(contentType, ";", 2)
if (parts.length == 2) {
this.contentType = value + ";" + parts(1)
} else {
this.contentType = value
}
case None =>
this.contentType = value
}
}
/** Get Referer [sic] header */
def referer: Option[String] = Option(headers.get(HttpHeaders.Names.REFERER))
/** Set Referer [sic] header */
def referer_=(value: String) { headers.set(HttpHeaders.Names.REFERER, value) }
/** Get Retry-After header */
def retryAfter: Option[String] = Option(headers.get(HttpHeaders.Names.RETRY_AFTER))
/** Set Retry-After header */
def retryAfter_=(value: String) { headers.set(HttpHeaders.Names.RETRY_AFTER, value) }
/** Set Retry-After header by seconds */
def retryAfter_=(value: Long) { retryAfter = value.toString }
/** Get Server header */
def server: Option[String] = Option(headers.get(HttpHeaders.Names.SERVER))
/** Set Server header */
def server_=(value: String) { headers.set(HttpHeaders.Names.SERVER, value) }
/** Get User-Agent header */
def userAgent: Option[String] = Option(headers.get(HttpHeaders.Names.USER_AGENT))
/** Set User-Agent header */
def userAgent_=(value: String) { headers.set(HttpHeaders.Names.USER_AGENT, value) }
/** Get WWW-Authenticate header */
def wwwAuthenticate: Option[String] = Option(headers.get(HttpHeaders.Names.WWW_AUTHENTICATE))
/** Set WWW-Authenticate header */
def wwwAuthenticate_=(value: String) { headers.set(HttpHeaders.Names.WWW_AUTHENTICATE, value) }
/** Get X-Forwarded-For header */
def xForwardedFor: Option[String] = Option(headers.get("X-Forwarded-For"))
/** Set X-Forwarded-For header */
def xForwardedFor_=(value: String) { headers.set("X-Forwarded-For", value) }
/**
* Check if X-Requested-With contains XMLHttpRequest, usually signalling a
* request from a JavaScript AJAX libraries. Some servers treat these
* requests specially. For example, an endpoint might render JSON or XML
* instead HTML if it's an XmlHttpRequest. (Tip: don't do this - it's gross.)
*/
def isXmlHttpRequest = {
Option(headers.get("X-Requested-With")) exists { _.toLowerCase.contains("xmlhttprequest") }
}
/** Get length of content. */
def length: Int = getContent.readableBytes
def getLength(): Int = length
/** Get the content as a string. */
def contentString: String = {
val encoding = try {
Charset.forName(charset getOrElse "UTF-8")
} catch {
case _: Throwable => Message.Utf8
}
getContent.toString(encoding)
}
def getContentString(): String = contentString
/** Set the content as a string. */
def contentString_=(value: String) {
if (value != "")
setContent(ChannelBuffers.wrappedBuffer(value.getBytes("UTF-8")))
else
setContent(ChannelBuffers.EMPTY_BUFFER)
}
def setContentString(value: String) { contentString = value }
/**
* Use content as InputStream. The underlying channel buffer's reader
* index is advanced. (Scala interface. Java users can use getInputStream().)
*/
def withInputStream[T](f: InputStream => T): T = {
val inputStream = getInputStream()
val result = f(inputStream) // throws
inputStream.close()
result
}
/**
* Get InputStream for content. Caller must close. (Java interface. Scala
* users should use withInputStream.)
*/
def getInputStream(): InputStream =
new ChannelBufferInputStream(getContent)
/** Use content as Reader. (Scala interface. Java usrs can use getReader().) */
def withReader[T](f: Reader => T): T = {
withInputStream { inputStream =>
val reader = new InputStreamReader(inputStream)
f(reader)
}
}
/** Get Reader for content. (Java interface. Scala users should use withReader.) */
def getReader(): Reader =
new InputStreamReader(getInputStream())
/** Append string to content. */
def write(string: String) {
write(string.getBytes("UTF-8"))
}
/** Append bytes to content. */
def write(bytes: Array[Byte]) {
getContent match {
case buffer: DynamicChannelBuffer =>
buffer.writeBytes(bytes)
case _ =>
val buffer = ChannelBuffers.wrappedBuffer(bytes)
write(buffer)
}
}
/** Append ChannelBuffer to content.
*
* If `isChunked` then multiple writes must be composed using `writer` and
* `flatMap` to have the appropriate backpressure semantics.
*
* Attempting to `write` after calling `close` will result in a thrown
* [[com.twitter.io.Reader.ReaderDiscarded]].
*/
@throws(classOf[BufReader.ReaderDiscarded])
@throws(classOf[IllegalStateException])
def write(buffer: ChannelBuffer) {
if (isChunked) writeChunk(buffer) else {
getContent match {
case ChannelBuffers.EMPTY_BUFFER =>
setContent(buffer)
case content =>
setContent(ChannelBuffers.wrappedBuffer(content, buffer))
}
}
}
/**
* Use content as OutputStream. Content is replaced with stream contents.
* (Java users can use this with a Function, or use Netty's ChannelBufferOutputStream
* and then call setContent() with the underlying buffer.)
*/
def withOutputStream[T](f: OutputStream => T): T = {
// Use buffer size of 1024. Netty default is 256, which seems too small.
// Netty doubles buffers on resize.
val outputStream = new ChannelBufferOutputStream(ChannelBuffers.dynamicBuffer(1024))
val result = f(outputStream) // throws
outputStream.close()
write(outputStream.buffer)
result
}
/** Use as a Writer. Content is replaced with writer contents. */
def withWriter[T](f: Writer => T): T = {
withOutputStream { outputStream =>
val writer = new OutputStreamWriter(outputStream, Message.Utf8)
val result = f(writer)
writer.close()
// withOutputStream will write()
result
}
}
/** Clear content (set to ""). */
def clearContent() {
setContent(ChannelBuffers.EMPTY_BUFFER)
}
/** End the response stream. */
def close() = writer.close()
private[this] def writeChunk(buf: ChannelBuffer) {
if (buf.readable) {
val future = writer.write(new ChannelBufferBuf(buf))
// Unwraps the future in the Return case, or throws exception in the Throw case.
if (future.isDefined) Await.result(future)
}
}
}
object Message {
private[http] val Utf8 = Charset.forName("UTF-8")
@deprecated("Use MediaType.Json", "6.1.5")
val MediaTypeJson = "application/json"
@deprecated("Use MediaType.Javascript", "6.1.5")
val MediaTypeJavascript = "application/javascript"
@deprecated("Use MediaType.WwwForm", "6.1.5")
val MediaTypeWwwForm = "application/x-www-form-urlencoded"
val CharsetUtf8 = "charset=utf-8"
val ContentTypeJson = MediaType.Json + ";" + CharsetUtf8
val ContentTypeJavascript = MediaType.Javascript + ";" + CharsetUtf8
val ContentTypeWwwFrom = MediaType.WwwForm + ";" + CharsetUtf8
private val HttpDateFormat = FastDateFormat.getInstance("EEE, dd MMM yyyy HH:mm:ss",
TimeZone.getTimeZone("GMT"))
def httpDateFormat(date: Date): String =
HttpDateFormat.format(date) + " GMT"
}
| lysu/finagle | finagle-http/src/main/scala/com/twitter/finagle/http/Message.scala | Scala | apache-2.0 | 15,984 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution.atomic
/** For applying padding to atomic references, in order to reduce
* cache contention. JEP 142 should reduce the need for this along
* with the `@Contended` annotation, however that might have
* security restrictions, the runtime might not act on it since it's
* just a recommendation, plus it's nice to provide backwards
* compatibility.
*
* See: [[http://mail.openjdk.java.net/pipermail/hotspot-dev/2012-November/007309.html]]
*
* The default strategy is [[PaddingStrategy.NoPadding NoPadding]].
* In order to apply padding:
* {{{
* import monix.execution.atomic.Atomic
* import monix.execution.atomic.PaddingStrategy.Right64
*
* val paddedAtomic = Atomic.withPadding(10, Right64)
* }}}
*
* @see [[PaddingStrategy.NoPadding]]
* @see [[PaddingStrategy.Left64]]
* @see [[PaddingStrategy.Right64]]
* @see [[PaddingStrategy.LeftRight128]]
* @see [[PaddingStrategy.Left128]]
* @see [[PaddingStrategy.Right128]]
* @see [[PaddingStrategy.LeftRight256]]
*/
sealed abstract class PaddingStrategy
object PaddingStrategy {
/** A [[PaddingStrategy]] that specifies no padding should be applied.
* This is the default.
*/
case object NoPadding extends PaddingStrategy
/** A [[PaddingStrategy]] that applies padding to the left of our
* atomic value for a total cache line of 64 bytes (8 longs).
*
* Note the actual padding applied will be less, like 48 or 52 bytes,
* because we take into account the object's header and the
* the stored value.
*/
case object Left64 extends PaddingStrategy
/** A [[PaddingStrategy]] that applies padding to the right of our
* atomic value for a total cache line of 64 bytes (8 longs).
*
* Note the actual padding applied will be less, like 48 or 52 bytes,
* because we take into account the object's header and the
* the stored value.
*/
case object Right64 extends PaddingStrategy
/** A [[PaddingStrategy]] that applies padding both to the left
* and to the right of our atomic value for a total cache
* line of 128 bytes (16 longs).
*
* Note the actual padding applied will be less, like 112 or 116 bytes,
* because we take into account the object's header and the stored value.
*/
case object LeftRight128 extends PaddingStrategy
/** A [[PaddingStrategy]] that applies padding to the left of our
* atomic value for a total cache line of 64 bytes (8 longs).
*
* Note the actual padding applied will be less, like 112 bytes,
* because we take into account the object's header and the
* the stored value.
*/
case object Left128 extends PaddingStrategy
/** A [[PaddingStrategy]] that applies padding to the right of our
* atomic value for a total cache line of 64 bytes (8 longs).
*
* Note the actual padding applied will be less, like 112 bytes,
* because we take into account the object's header and the
* the stored value.
*/
case object Right128 extends PaddingStrategy
/** A [[PaddingStrategy]] that applies padding both to the left
* and to the right of our atomic value for a total cache
* line of 128 bytes (16 longs).
*
* Note the actual padding applied will be less, like 232/240 bytes,
* because we take into account the object's header and the stored value.
*/
case object LeftRight256 extends PaddingStrategy
}
| alexandru/monifu | monix-execution/shared/src/main/scala/monix/execution/atomic/PaddingStrategy.scala | Scala | apache-2.0 | 4,095 |
/*
Stratagem is a model checker for transition systems described using rewriting
rules and strategies.
Copyright (C) 2013 - SMV@Geneva University.
Program written by Edmundo Lopez Bobeda <edmundo [at] lopezbobeda.net>.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
package ch.unige.cui.smv.stratagem.transformers
import java.io.File
import ch.unige.cui.smv.stratagem.ts.TransitionSystem
/**
* All classes inheritinig from this one transform some model into a transition system.
* @author mundacho
*
*/
abstract class Model2TransitionSystem {
type ModelType
type PreprocessedModelType
val file2Model: (File*) => ModelType
val modelPreprocessor: ModelType => PreprocessedModelType
val preprocessedModel2TransitionSystem: (PreprocessedModelType) => TransitionSystem
def apply(files: File*): TransitionSystem = {
val model = file2Model(files: _*)
val preprocessedModel = modelPreprocessor(model)
preprocessedModel2TransitionSystem(preprocessedModel)
}
} | didierbuchs/oldstratagem | src/main/scala/ch/unige/cui/smv/stratagem/transformers/Model2TransitionSystem.scala | Scala | gpl-2.0 | 1,618 |
/*
* Copyright 2018 Analytics Zoo Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.zoo.pipeline.api.keras.layers
import com.intel.analytics.bigdl.nn.abstractnn.AbstractModule
import com.intel.analytics.bigdl.tensor.Tensor
import com.intel.analytics.bigdl.utils.Shape
import com.intel.analytics.zoo.pipeline.api.keras.models.Sequential
import com.intel.analytics.zoo.pipeline.api.keras.serializer.ModuleSerializationTest
class GlobalMaxPooling1DSpec extends KerasBaseSpec{
"GlobalMaxPooling1D" should "be the same as Keras" in {
val kerasCode =
"""
|input_tensor = Input(shape=[3, 24])
|input = np.random.random([2, 3, 24])
|output_tensor = GlobalMaxPooling1D()(input_tensor)
|model = Model(input=input_tensor, output=output_tensor)
""".stripMargin
val seq = Sequential[Float]()
val layer = GlobalMaxPooling1D[Float](inputShape = Shape(3, 24))
seq.add(layer)
seq.getOutputShape().toSingle().toArray should be (Array(-1, 24))
checkOutputAndGrad(seq.asInstanceOf[AbstractModule[Tensor[Float], Tensor[Float], Float]],
kerasCode)
}
}
class GlobalMaxPooling1DSerialTest extends ModuleSerializationTest {
override def test(): Unit = {
val layer = GlobalMaxPooling1D[Float](inputShape = Shape(12, 24))
layer.build(Shape(2, 12, 24))
val input = Tensor[Float](2, 12, 24).rand()
runSerializationTest(layer, input)
}
}
| intel-analytics/analytics-zoo | zoo/src/test/scala/com/intel/analytics/zoo/pipeline/api/keras/layers/GlobalMaxPooling1DSpec.scala | Scala | apache-2.0 | 1,963 |
package controllers
import play.api.libs.functional.syntax._
import play.api.libs.json.Reads._
import play.api.libs.json._
import models.entities.{Attachment => att}
/**
* Created by artem on 29.11.14.
*/
object AttachmentController extends JsonSerializerController with Secured {
/**
* Generate new attachment
* uuid of repository
* uuid of entity
* @return
*/
def gen = Auth.async() { implicit user => implicit request => !>>(((__ \ "name").read[String] ~
(__ \ "url").read[String] ~ (__ \ "entity").read[String] ~ (__ \ "repo").read[String])((name: String, url: String, entity: String, repo: String) => att gen(name, url, entity, repo)))
}
/**
* List of attachments
* @param repo - uuid of repository
* @return
*/
def list(repo: String) = Auth.async(parse.anyContent) { implicit user => implicit request => >>!(att list repo)}
/**
* List entities attachments
* @param entity - uuid of entity
* @return
*/
def byEntity(entity: String) = Auth.async(parse.anyContent) { implicit user => implicit request => >>!(att byEntity entity)}
/**
* Update attachment by uuid
* @return
*/
def update = Auth.async() { implicit user => implicit request => !>>(((__ \ "uuid").read[String] ~ (__ \ "name").read[String] ~
(__ \ "url").read[String])((uuid: String, name: String, url: String) => att update(uuid, name, url)))
}
/**
* Delete attachment by uuid
* @param uuid - uuid of attachment
* @return
*/
def del(uuid: String) = Auth.async() { implicit user => implicit request => !>>(att del uuid)}
}
| arakcheev/wbox | app/controllers/AttachmentController.scala | Scala | apache-2.0 | 1,589 |
package io.plasmap.model
sealed trait OsmRole
case object OsmRoleInner extends OsmRole
case object OsmRoleOuter extends OsmRole
case object OsmRoleEmpty extends OsmRole
case class OsmRoleOther(value:String) extends OsmRole | plasmap/geow | src/main/scala/io/plasmap/model/OsmRole.scala | Scala | apache-2.0 | 223 |
package io.iohk.ethereum.blockchain.sync
import akka.actor.{ActorRef, ActorSystem, Props}
import akka.testkit.TestActor.AutoPilot
import akka.testkit.{TestActorRef, TestProbe}
import akka.util.ByteString
import io.iohk.ethereum.blockchain.sync.fast.FastSync
import io.iohk.ethereum.blockchain.sync.fast.FastSync.SyncState
import io.iohk.ethereum.consensus.TestConsensus
import io.iohk.ethereum.consensus.blocks.CheckpointBlockGenerator
import io.iohk.ethereum.consensus.validators.BlockHeaderError.HeaderPoWError
import io.iohk.ethereum.consensus.validators.{BlockHeaderValid, BlockHeaderValidator, Validators}
import io.iohk.ethereum.domain.{Account, BlockBody, BlockHeader, ChainWeight, Receipt}
import io.iohk.ethereum.ledger.Ledger
import io.iohk.ethereum.ledger.Ledger.VMImpl
import io.iohk.ethereum.network.EtcPeerManagerActor
import io.iohk.ethereum.network.EtcPeerManagerActor.{HandshakedPeers, SendMessage}
import io.iohk.ethereum.network.PeerEventBusActor.PeerEvent.MessageFromPeer
import io.iohk.ethereum.network.p2p.messages.PV62.GetBlockBodies.GetBlockBodiesEnc
import io.iohk.ethereum.network.p2p.messages.PV62.GetBlockHeaders.GetBlockHeadersEnc
import io.iohk.ethereum.network.p2p.messages.PV62._
import io.iohk.ethereum.network.p2p.messages.PV63.GetNodeData.GetNodeDataEnc
import io.iohk.ethereum.network.p2p.messages.PV63.GetReceipts.GetReceiptsEnc
import io.iohk.ethereum.network.p2p.messages.PV63.{NodeData, Receipts}
import io.iohk.ethereum.utils.Config.SyncConfig
import io.iohk.ethereum.{Fixtures, Mocks}
import org.bouncycastle.util.encoders.Hex
import org.scalamock.scalatest.MockFactory
import org.scalatest.BeforeAndAfter
import org.scalatest.concurrent.Eventually
import org.scalatest.concurrent.PatienceConfiguration.Timeout
import org.scalatest.flatspec.AnyFlatSpec
import org.scalatest.matchers.should.Matchers
import org.scalatest.time.{Seconds, Span}
import scala.concurrent.Await
import scala.concurrent.duration._
// scalastyle:off file.size.limit
class SyncControllerSpec extends AnyFlatSpec with Matchers with BeforeAndAfter with MockFactory with Eventually {
implicit var system: ActorSystem = _
before {
system = ActorSystem("SyncControllerSpec_System")
}
after {
Await.result(system.terminate(), 10.seconds)
}
"SyncController" should "download pivot block and request block headers" in new TestSetup() {
syncController ! SyncProtocol.Start
val handshakedPeers = HandshakedPeers(twoAcceptedPeers)
setupAutoPilot(etcPeerManager, handshakedPeers, defaultPivotBlockHeader, BlockchainData(Seq()))
eventually(timeout = eventuallyTimeOut) {
val syncState = storagesInstance.storages.fastSyncStateStorage.getSyncState().get
syncState.bestBlockHeaderNumber shouldBe 0
syncState.pivotBlock == defaultPivotBlockHeader
}
}
it should "download better pivot block, request state, blocks and finish when downloaded" in new TestSetup() {
startWithState(defaultStateBeforeNodeRestart)
syncController ! SyncProtocol.Start
val handshakedPeers = HandshakedPeers(singlePeer)
val newBlocks =
getHeaders(defaultStateBeforeNodeRestart.bestBlockHeaderNumber + 1, syncConfig.blockHeadersPerRequest)
setupAutoPilot(etcPeerManager, handshakedPeers, defaultPivotBlockHeader, BlockchainData(newBlocks))
val watcher = TestProbe()
watcher.watch(syncController)
eventually(timeout = eventuallyTimeOut) {
//switch to regular download
val children = syncController.children
assert(storagesInstance.storages.appStateStorage.isFastSyncDone())
assert(children.exists(ref => ref.path.name == "regular-sync"))
assert(blockchain.getBestBlockNumber() == defaultPivotBlockHeader.number)
}
}
it should "gracefully handle receiving empty receipts while syncing" in new TestSetup() {
startWithState(defaultStateBeforeNodeRestart)
syncController ! SyncProtocol.Start
val handshakedPeers = HandshakedPeers(singlePeer)
val watcher = TestProbe()
watcher.watch(syncController)
val newBlocks =
getHeaders(defaultStateBeforeNodeRestart.bestBlockHeaderNumber + 1, syncConfig.blockHeadersPerRequest)
setupAutoPilot(
etcPeerManager,
handshakedPeers,
defaultPivotBlockHeader,
BlockchainData(newBlocks),
failedReceiptsTries = 1
)
eventually(timeout = eventuallyTimeOut) {
assert(storagesInstance.storages.appStateStorage.isFastSyncDone())
//switch to regular download
val children = syncController.children
assert(children.exists(ref => ref.path.name == "regular-sync"))
assert(blockchain.getBestBlockNumber() == defaultPivotBlockHeader.number)
}
}
it should "handle blocks that fail validation" in new TestSetup(
_validators = new Mocks.MockValidatorsAlwaysSucceed {
override val blockHeaderValidator: BlockHeaderValidator = { (blockHeader, getBlockHeaderByHash) =>
Left(HeaderPoWError)
}
}
) {
startWithState(
defaultStateBeforeNodeRestart.copy(nextBlockToFullyValidate =
defaultStateBeforeNodeRestart.bestBlockHeaderNumber + 1
)
)
syncController ! SyncProtocol.Start
val handshakedPeers = HandshakedPeers(singlePeer)
val newBlocks =
getHeaders(defaultStateBeforeNodeRestart.bestBlockHeaderNumber + 1, syncConfig.blockHeadersPerRequest)
setupAutoPilot(etcPeerManager, handshakedPeers, defaultPivotBlockHeader, BlockchainData(newBlocks), 0, 0)
val watcher = TestProbe()
watcher.watch(syncController)
eventually(timeout = eventuallyTimeOut) {
val syncState = storagesInstance.storages.fastSyncStateStorage.getSyncState().get
syncState.bestBlockHeaderNumber shouldBe (defaultStateBeforeNodeRestart.bestBlockHeaderNumber - syncConfig.fastSyncBlockValidationN)
syncState.nextBlockToFullyValidate shouldBe (defaultStateBeforeNodeRestart.bestBlockHeaderNumber - syncConfig.fastSyncBlockValidationN + 1)
syncState.blockBodiesQueue.isEmpty shouldBe true
syncState.receiptsQueue.isEmpty shouldBe true
}
}
it should "rewind fast-sync state if received header have no known parent" in new TestSetup() {
startWithState(defaultStateBeforeNodeRestart)
syncController ! SyncProtocol.Start
val handshakedPeers = HandshakedPeers(singlePeer)
val newBlocks = Seq(
defaultPivotBlockHeader.copy(
number = defaultStateBeforeNodeRestart.bestBlockHeaderNumber + 1,
parentHash = ByteString(1, 2, 3)
)
)
setupAutoPilot(etcPeerManager, handshakedPeers, defaultPivotBlockHeader, BlockchainData(newBlocks))
val watcher = TestProbe()
watcher.watch(syncController)
eventually(timeout = eventuallyTimeOut) {
val syncState = storagesInstance.storages.fastSyncStateStorage.getSyncState().get
syncState.bestBlockHeaderNumber shouldBe (defaultStateBeforeNodeRestart.bestBlockHeaderNumber - syncConfig.fastSyncBlockValidationN)
syncState.nextBlockToFullyValidate shouldBe (defaultStateBeforeNodeRestart.bestBlockHeaderNumber - syncConfig.fastSyncBlockValidationN + 1)
syncState.blockBodiesQueue.isEmpty shouldBe true
syncState.receiptsQueue.isEmpty shouldBe true
}
}
it should "not change best block after receiving faraway block" in new TestSetup {
startWithState(defaultStateBeforeNodeRestart)
syncController ! SyncProtocol.Start
val handshakedPeers = HandshakedPeers(singlePeer)
val watcher = TestProbe()
watcher.watch(syncController)
val newBlocks =
getHeaders(defaultStateBeforeNodeRestart.bestBlockHeaderNumber + 1, syncConfig.blockHeadersPerRequest)
setupAutoPilot(etcPeerManager, handshakedPeers, defaultPivotBlockHeader, BlockchainData(newBlocks))
val fast = syncController.getSingleChild("fast-sync")
// Send block that is way forward, we should ignore that block and blacklist that peer
val futureHeaders = Seq(defaultPivotBlockHeader.copy(number = defaultPivotBlockHeader.number + 20))
val futureHeadersMessage = PeerRequestHandler.ResponseReceived(peer1, BlockHeaders(futureHeaders), 2L)
implicit val ec = system.dispatcher
system.scheduler.scheduleAtFixedRate(0.seconds, 0.1.second, fast, futureHeadersMessage)
eventually(timeout = eventuallyTimeOut) {
storagesInstance.storages.fastSyncStateStorage.getSyncState().get.pivotBlock shouldBe defaultPivotBlockHeader
}
// even though we receive this future headers fast sync should finish
eventually(timeout = eventuallyTimeOut) {
assert(storagesInstance.storages.appStateStorage.isFastSyncDone())
}
}
it should "update pivot block if pivot fail" in new TestSetup(_validators = new Mocks.MockValidatorsAlwaysSucceed {
override val blockHeaderValidator: BlockHeaderValidator = { (blockHeader, getBlockHeaderByHash) =>
{
if (blockHeader.number != 399500 + 10) {
Right(BlockHeaderValid)
} else {
Left(HeaderPoWError)
}
}
}
}) {
startWithState(defaultStateBeforeNodeRestart)
syncController ! SyncProtocol.Start
val handshakedPeers = HandshakedPeers(singlePeer)
val newPivot = defaultPivotBlockHeader.copy(number = defaultPivotBlockHeader.number + 20)
val peerWithNewPivot = defaultPeer1Info.copy(maxBlockNumber = bestBlock + 20)
val newHanshaked = HandshakedPeers(Map(peer1 -> peerWithNewPivot))
val newBest = 399500 + 9
val newBlocks =
getHeaders(defaultStateBeforeNodeRestart.bestBlockHeaderNumber + 1, syncConfig.blockHeadersPerRequest)
val autopilot = setupAutoPilot(etcPeerManager, handshakedPeers, defaultPivotBlockHeader, BlockchainData(newBlocks))
eventually(timeout = eventuallyTimeOut) {
storagesInstance.storages.fastSyncStateStorage.getSyncState().get.pivotBlock shouldBe defaultPivotBlockHeader
}
autopilot.updateAutoPilot(newHanshaked, newPivot, BlockchainData(newBlocks))
val watcher = TestProbe()
watcher.watch(syncController)
eventually(timeout = eventuallyTimeOut) {
val syncState = storagesInstance.storages.fastSyncStateStorage.getSyncState().get
syncState.pivotBlock shouldBe newPivot
syncState.safeDownloadTarget shouldEqual newPivot.number + syncConfig.fastSyncBlockValidationX
syncState.blockBodiesQueue.isEmpty shouldBe true
syncState.receiptsQueue.isEmpty shouldBe true
syncState.bestBlockHeaderNumber shouldBe (newBest - syncConfig.fastSyncBlockValidationN)
}
}
it should "not process, out of date new pivot block" in new TestSetup() {
startWithState(defaultStateBeforeNodeRestart)
syncController ! SyncProtocol.Start
val staleNewPeer1Info = defaultPeer1Info.copy(maxBlockNumber = bestBlock - 2)
val staleHeader = defaultPivotBlockHeader.copy(number = defaultPivotBlockHeader.number - 2)
val staleHandshakedPeers = HandshakedPeers(Map(peer1 -> staleNewPeer1Info))
val freshHeader = defaultPivotBlockHeader
val freshPeerInfo1 = defaultPeer1Info
val freshHandshakedPeers = HandshakedPeers(Map(peer1 -> freshPeerInfo1))
val watcher = TestProbe()
watcher.watch(syncController)
val newBlocks =
getHeaders(defaultStateBeforeNodeRestart.bestBlockHeaderNumber + 1, syncConfig.blockHeadersPerRequest)
val pilot =
setupAutoPilot(etcPeerManager, staleHandshakedPeers, staleHeader, BlockchainData(newBlocks), onlyPivot = true)
eventually(timeout = eventuallyTimeOut) {
storagesInstance.storages.fastSyncStateStorage.getSyncState().get.pivotBlockUpdateFailures shouldBe 1
}
pilot.updateAutoPilot(freshHandshakedPeers, freshHeader, BlockchainData(newBlocks), onlyPivot = true)
eventually(timeout = eventuallyTimeOut) {
storagesInstance.storages.fastSyncStateStorage.getSyncState().get.pivotBlock shouldBe defaultPivotBlockHeader
}
}
it should "start state download only when pivot block is fresh enough" in new TestSetup() {
startWithState(defaultStateBeforeNodeRestart)
syncController ! SyncProtocol.Start
val freshHeader = defaultPivotBlockHeader.copy(number = defaultPivotBlockHeader.number + 9)
val freshPeerInfo1 = defaultPeer1Info.copy(maxBlockNumber = bestBlock + 9)
val freshHandshakedPeers = HandshakedPeers(Map(peer1 -> freshPeerInfo1))
val watcher = TestProbe()
watcher.watch(syncController)
val newBlocks = getHeaders(defaultStateBeforeNodeRestart.bestBlockHeaderNumber + 1, 50)
val pilot = setupAutoPilot(etcPeerManager, freshHandshakedPeers, freshHeader, BlockchainData(newBlocks))
eventually(timeout = longeventuallyTimeOut) {
storagesInstance.storages.fastSyncStateStorage
.getSyncState()
.get
.bestBlockHeaderNumber shouldBe freshHeader.number + syncConfig.fastSyncBlockValidationX
}
val freshHeader1 = defaultPivotBlockHeader.copy(number = defaultPivotBlockHeader.number + 19)
val freshPeerInfo1a = defaultPeer1Info.copy(maxBlockNumber = bestBlock + 19)
val freshHandshakedPeers1 = HandshakedPeers(Map(peer1 -> freshPeerInfo1a))
// set up new received header previously received header will need update
pilot.updateAutoPilot(freshHandshakedPeers1, freshHeader1, BlockchainData(newBlocks))
eventually(timeout = longeventuallyTimeOut) {
storagesInstance.storages.fastSyncStateStorage
.getSyncState()
.get
.bestBlockHeaderNumber shouldBe freshHeader1.number + syncConfig.fastSyncBlockValidationX
}
eventually(timeout = longeventuallyTimeOut) {
assert(storagesInstance.storages.appStateStorage.isFastSyncDone())
//switch to regular download
val children = syncController.children
assert(children.exists(ref => ref.path.name == "regular-sync"))
assert(blockchain.getBestBlockNumber() == freshHeader1.number)
}
}
it should "re-enqueue block bodies when empty response is received" in new TestSetup {
startWithState(defaultStateBeforeNodeRestart)
syncController ! SyncProtocol.Start
val handshakedPeers = HandshakedPeers(singlePeer)
val watcher = TestProbe()
watcher.watch(syncController)
val newBlocks =
getHeaders(defaultStateBeforeNodeRestart.bestBlockHeaderNumber + 1, syncConfig.blockHeadersPerRequest)
setupAutoPilot(
etcPeerManager,
handshakedPeers,
defaultPivotBlockHeader,
BlockchainData(newBlocks),
failedBodiesTries = 1
)
eventually(timeout = eventuallyTimeOut) {
assert(storagesInstance.storages.appStateStorage.isFastSyncDone())
//switch to regular download
val children = syncController.children
assert(children.exists(ref => ref.path.name == "regular-sync"))
assert(blockchain.getBestBlockNumber() == defaultPivotBlockHeader.number)
}
}
it should "update pivot block during state sync if it goes stale" in new TestSetup() {
startWithState(defaultStateBeforeNodeRestart)
syncController ! SyncProtocol.Start
val handshakedPeers = HandshakedPeers(singlePeer)
val newBlocks =
getHeaders(defaultStateBeforeNodeRestart.bestBlockHeaderNumber + 1, 50)
val pilot = setupAutoPilot(
etcPeerManager,
handshakedPeers,
defaultPivotBlockHeader,
BlockchainData(newBlocks),
failedNodeRequest = true
)
// choose first pivot and as it is fresh enough start state sync
eventually(timeout = eventuallyTimeOut) {
val syncState = storagesInstance.storages.fastSyncStateStorage.getSyncState().get
syncState.isBlockchainWorkFinished shouldBe true
syncState.updatingPivotBlock shouldBe false
stateDownloadStarted shouldBe true
}
val peerWithBetterBlock = defaultPeer1Info.copy(maxBlockNumber = bestBlock + syncConfig.maxPivotBlockAge)
val newHandshakedPeers = HandshakedPeers(Map(peer1 -> peerWithBetterBlock))
val newPivot = defaultPivotBlockHeader.copy(number = defaultPivotBlockHeader.number + syncConfig.maxPivotBlockAge)
pilot.updateAutoPilot(
newHandshakedPeers,
newPivot,
BlockchainData(newBlocks),
failedNodeRequest = true
)
// sync to new pivot
eventually(timeout = eventuallyTimeOut) {
val syncState = storagesInstance.storages.fastSyncStateStorage.getSyncState().get
syncState.pivotBlock shouldBe newPivot
}
// enable peer to respond with mpt nodes
pilot.updateAutoPilot(newHandshakedPeers, newPivot, BlockchainData(newBlocks))
val watcher = TestProbe()
watcher.watch(syncController)
eventually(timeout = longeventuallyTimeOut) {
//switch to regular download
val children = syncController.children
assert(storagesInstance.storages.appStateStorage.isFastSyncDone())
assert(children.exists(ref => ref.path.name == "regular-sync"))
assert(blockchain.getBestBlockNumber() == newPivot.number)
}
}
class TestSetup(
blocksForWhichLedgerFails: Seq[BigInt] = Nil,
_validators: Validators = new Mocks.MockValidatorsAlwaysSucceed
) extends EphemBlockchainTestSetup
with TestSyncPeers
with TestSyncConfig {
@volatile
var stateDownloadStarted = false
val eventuallyTimeOut: Timeout = Timeout(Span(10, Seconds))
val longeventuallyTimeOut = Timeout(Span(30, Seconds))
//+ cake overrides
override implicit lazy val system: ActorSystem = SyncControllerSpec.this.system
override lazy val vm: VMImpl = new VMImpl
override lazy val validators: Validators = _validators
override lazy val consensus: TestConsensus = buildTestConsensus().withValidators(validators)
override lazy val ledger: Ledger = mock[Ledger]
//+ cake overrides
val etcPeerManager = TestProbe()
val peerMessageBus = TestProbe()
val pendingTransactionsManager = TestProbe()
val checkpointBlockGenerator = new CheckpointBlockGenerator()
val ommersPool = TestProbe()
override def defaultSyncConfig: SyncConfig = super.defaultSyncConfig.copy(
doFastSync = true,
branchResolutionRequestSize = 20,
checkForNewBlockInterval = 1.second,
blockHeadersPerRequest = 10,
blockBodiesPerRequest = 10,
minPeersToChoosePivotBlock = 1,
peersScanInterval = 1.second,
redownloadMissingStateNodes = false,
fastSyncBlockValidationX = 10,
blacklistDuration = 1.second,
peerResponseTimeout = 2.seconds,
persistStateSnapshotInterval = 0.1.seconds,
fastSyncThrottle = 10.milliseconds,
maxPivotBlockAge = 30
)
lazy val syncController = TestActorRef(
Props(
new SyncController(
storagesInstance.storages.appStateStorage,
blockchain,
storagesInstance.storages.fastSyncStateStorage,
ledger,
validators,
peerMessageBus.ref,
pendingTransactionsManager.ref,
checkpointBlockGenerator,
ommersPool.ref,
etcPeerManager.ref,
syncConfig,
externalSchedulerOpt = Some(system.scheduler)
)
)
)
val EmptyTrieRootHash: ByteString = Account.EmptyStorageRootHash
val baseBlockHeader = Fixtures.Blocks.Genesis.header
blockchain.storeChainWeight(baseBlockHeader.parentHash, ChainWeight.zero).commit()
val startDelayMillis = 200
case class BlockchainData(
headers: Map[BigInt, BlockHeader],
bodies: Map[ByteString, BlockBody],
receipts: Map[ByteString, Seq[Receipt]]
)
object BlockchainData {
def apply(headers: Seq[BlockHeader]): BlockchainData = {
// assumes headers are correct chain
headers.foldLeft(new BlockchainData(Map.empty, Map.empty, Map.empty)) { (state, header) =>
state.copy(
headers = state.headers + (header.number -> header),
bodies = state.bodies + (header.hash -> BlockBody.empty),
receipts = state.receipts + (header.hash -> Seq.empty)
)
}
}
}
// scalastyle:off method.length
case class SyncStateAutoPilot(
handshakedPeers: HandshakedPeers,
pivotHeader: BlockHeader,
blockchainData: BlockchainData,
failedReceiptsTries: Int,
failedBodiesTries: Int,
onlyPivot: Boolean,
failedNodeRequest: Boolean,
autoPilotProbeRef: ActorRef
) extends AutoPilot {
override def run(sender: ActorRef, msg: Any): AutoPilot = {
msg match {
case EtcPeerManagerActor.GetHandshakedPeers =>
sender ! handshakedPeers
this
case SendMessage(msg: GetBlockHeadersEnc, peer) =>
val underlyingMessage = msg.underlyingMsg
if (underlyingMessage.maxHeaders == 1) {
// pivot block
sender ! MessageFromPeer(BlockHeaders(Seq(pivotHeader)), peer)
} else {
if (!onlyPivot) {
val start = msg.underlyingMsg.block.left.get
val stop = start + msg.underlyingMsg.maxHeaders
val headers = (start until stop).flatMap(i => blockchainData.headers.get(i))
sender ! MessageFromPeer(BlockHeaders(headers), peer)
}
}
this
case SendMessage(msg: GetReceiptsEnc, peer) if !onlyPivot =>
val underlyingMessage = msg.underlyingMsg
if (failedReceiptsTries > 0) {
sender ! MessageFromPeer(Receipts(Seq()), peer)
this.copy(failedReceiptsTries = failedReceiptsTries - 1)
} else {
val rec = msg.underlyingMsg.blockHashes.flatMap(h => blockchainData.receipts.get(h))
sender ! MessageFromPeer(Receipts(rec), peer)
this
}
case SendMessage(msg: GetBlockBodiesEnc, peer) if !onlyPivot =>
val underlyingMessage = msg.underlyingMsg
if (failedBodiesTries > 0) {
sender ! MessageFromPeer(BlockBodies(Seq()), peer)
this.copy(failedBodiesTries = failedBodiesTries - 1)
} else {
val bod = msg.underlyingMsg.hashes.flatMap(h => blockchainData.bodies.get(h))
sender ! MessageFromPeer(BlockBodies(bod), peer)
this
}
case SendMessage(msg: GetNodeDataEnc, peer) if !onlyPivot =>
stateDownloadStarted = true
val underlyingMessage = msg.underlyingMsg
if (!failedNodeRequest) {
sender ! MessageFromPeer(NodeData(Seq(defaultStateMptLeafWithAccount)), peer)
}
this
case AutoPilotUpdateData(peers, pivot, data, failedReceipts, failedBodies, onlyPivot, failedNode) =>
sender ! DataUpdated
this.copy(peers, pivot, data, failedReceipts, failedBodies, onlyPivot, failedNode)
}
}
def updateAutoPilot(
handshakedPeers: HandshakedPeers,
pivotHeader: BlockHeader,
blockchainData: BlockchainData,
failedReceiptsTries: Int = 0,
failedBodiesTries: Int = 0,
onlyPivot: Boolean = false,
failedNodeRequest: Boolean = false
): Unit = {
val sender = TestProbe()
autoPilotProbeRef.tell(
AutoPilotUpdateData(
handshakedPeers,
pivotHeader,
blockchainData,
failedReceiptsTries,
failedBodiesTries,
onlyPivot,
failedNodeRequest
),
sender.ref
)
sender.expectMsg(DataUpdated)
}
}
// scalastyle:off method.length parameter.number
def setupAutoPilot(
testProbe: TestProbe,
handshakedPeers: HandshakedPeers,
pivotHeader: BlockHeader,
blockchainData: BlockchainData,
failedReceiptsTries: Int = 0,
failedBodiesTries: Int = 0,
onlyPivot: Boolean = false,
failedNodeRequest: Boolean = false
): SyncStateAutoPilot = {
val autopilot = SyncStateAutoPilot(
handshakedPeers,
pivotHeader,
blockchainData,
failedReceiptsTries,
failedBodiesTries,
onlyPivot,
failedNodeRequest,
testProbe.ref
)
testProbe.setAutoPilot(autopilot)
autopilot
}
case class AutoPilotUpdateData(
handshakedPeers: HandshakedPeers,
pivotHeader: BlockHeader,
blockchainData: BlockchainData,
failedReceiptsTries: Int = 0,
failedBodiesTries: Int = 0,
onlyPivot: Boolean = false,
failedNodeRequest: Boolean = false
)
case object DataUpdated
val defaultExpectedPivotBlock = 399500
val defaultSafeDownloadTarget = defaultExpectedPivotBlock
val defaultBestBlock = defaultExpectedPivotBlock - 1
val defaultStateRoot = "deae1dfad5ec8dcef15915811e1f044d2543674fd648f94345231da9fc2646cc"
val defaultPivotBlockHeader =
baseBlockHeader.copy(number = defaultExpectedPivotBlock, stateRoot = ByteString(Hex.decode(defaultStateRoot)))
val defaultState =
SyncState(
defaultPivotBlockHeader,
safeDownloadTarget = defaultSafeDownloadTarget,
bestBlockHeaderNumber = defaultBestBlock
)
val defaultStateMptLeafWithAccount =
ByteString(
Hex.decode(
"f86d9e328415c225a782bb339b22acad1c739e42277bc7ef34de3623114997ce78b84cf84a0186cb7d8738d800a056e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421a0c5d2460186f7233c927e7db2dcc703c0e500b653ca82273b7bfad8045d85a470"
)
)
val beforeRestartPivot = defaultPivotBlockHeader.copy(number = defaultExpectedPivotBlock - 1)
val defaultStateBeforeNodeRestart = defaultState.copy(
pivotBlock = beforeRestartPivot,
bestBlockHeaderNumber = defaultExpectedPivotBlock,
nextBlockToFullyValidate = beforeRestartPivot.number + syncConfig.fastSyncBlockValidationX
)
def getHeaders(from: BigInt, number: BigInt): Seq[BlockHeader] = {
val headers = (from until from + number).toSeq.map { nr =>
defaultPivotBlockHeader.copy(number = nr)
}
def genChain(
parenthash: ByteString,
headers: Seq[BlockHeader],
result: Seq[BlockHeader] = Seq.empty
): Seq[BlockHeader] = {
if (headers.isEmpty)
result
else {
val header = headers.head
val newHeader = header.copy(parentHash = parenthash)
val newHash = newHeader.hash
genChain(newHash, headers.tail, result :+ newHeader)
}
}
val first = headers.head
first +: genChain(first.hash, headers.tail)
}
def startWithState(state: SyncState): Unit = {
storagesInstance.storages.fastSyncStateStorage.putSyncState(state)
}
def persistState(): Unit = {
Thread.sleep(300)
syncController.getSingleChild("fast-sync") ! FastSync.PersistSyncState
Thread.sleep(300)
}
}
}
| input-output-hk/etc-client | src/test/scala/io/iohk/ethereum/blockchain/sync/SyncControllerSpec.scala | Scala | mit | 26,886 |
package org.gark87.yajom.macros
import scala.reflect.macros.Universe
class PredicateToFactory() {
def process[T: y.c.WeakTypeTag](y : YajomContext)(expr: y.c.Expr[T => Boolean], objectFactoryType: y.c.Type): y.c.Tree = {
import y.c.universe._
val creator = y.creator
val onNull = y.createOnNull
def convertEquals(tree: Tree, last: Tree): Tree = {
tree match {
case Apply(Select(qualifier, name), List(arg)) => {
val decoded = name.decoded
if (decoded == "equals" || decoded == "==") {
qualifier match {
case Apply(Select(gQualifier, gName), List()) => {
onNull.findSetter(y)(gQualifier, gName, List(), (s) => {
y.reporter.error("PredicateToFactory works with chained getters only: " + tree)
}, (setterName, returnType) => {
Block(Apply(Select(onNull.process(y)(y.c.Expr[Any](gQualifier), objectFactoryType), newTermName(setterName)), List(arg)), last)
})
}
case _ => y.reporter.error("PredicateToFactory last call before equals should be getter, not: " + qualifier)
}
} else {
y.reporter.error("PredicateToFactory works with && of == or equals: " + tree)
}
}
case _ => {
y.reporter.error("PredicateToFactory works with && of == or equals: " + tree)
}
}
}
def convertAnd(tree: Tree, last: Tree): Tree = {
tree match {
case Apply(Select(qualifier, name), List(arg)) => {
val decoded = name.decoded
if (decoded == "&&") {
convertAnd(qualifier, convertAnd(arg, last))
} else {
convertEquals(tree, last)
}
}
case _ => y.reporter.error("PredicateToFactory works with one-parameter function only(&& and ==): " + tree)
}
}
val tree: Tree = expr.tree
tree match {
case Function(List(ValDef(mods, name, tpt, rhs)), body) => {
val valDef = ValDef(Modifiers(), name, tpt, creator.createDefaultObject[T](y)(tpt.tpe, objectFactoryType).asInstanceOf[y.c.Tree])
val sss= convertAnd(body, Ident(name))
Function(List(), Block(valDef, sss))
}
case _ => y.reporter.error("Unexpected (waiting for function def with one param): " + tree)
}
}
}
| gark87/yajom | yajom-macros/src/main/scala/org/gark87/yajom/macros/PredicateToFactory.scala | Scala | mit | 2,368 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.streaming
import java.io.File
import java.net.URI
import scala.util.Random
import org.apache.hadoop.fs.{FileStatus, Path, RawLocalFileSystem}
import org.scalatest.PrivateMethodTester
import org.scalatest.concurrent.Eventually._
import org.scalatest.time.SpanSugar._
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.streaming._
import org.apache.spark.sql.execution.streaming.FileStreamSource.{FileEntry, SeenFilesMap}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.streaming.ExistsThrowsExceptionFileSystem._
import org.apache.spark.sql.test.SharedSQLContext
import org.apache.spark.sql.types._
import org.apache.spark.util.Utils
abstract class FileStreamSourceTest
extends StreamTest with SharedSQLContext with PrivateMethodTester {
import testImplicits._
/**
* A subclass [[AddData]] for adding data to files. This is meant to use the
* [[FileStreamSource]] actually being used in the execution.
*/
abstract class AddFileData extends AddData {
override def addData(query: Option[StreamExecution]): (Source, Offset) = {
require(
query.nonEmpty,
"Cannot add data when there is no query for finding the active file stream source")
val sources = query.get.logicalPlan.collect {
case StreamingExecutionRelation(source, _) if source.isInstanceOf[FileStreamSource] =>
source.asInstanceOf[FileStreamSource]
}
if (sources.isEmpty) {
throw new Exception(
"Could not find file source in the StreamExecution logical plan to add data to")
} else if (sources.size > 1) {
throw new Exception(
"Could not select the file source in the StreamExecution logical plan as there" +
"are multiple file sources:\\n\\t" + sources.mkString("\\n\\t"))
}
val source = sources.head
val newOffset = source.withBatchingLocked {
addData(source)
new FileStreamSourceOffset(source.currentLogOffset + 1)
}
logInfo(s"Added file to $source at offset $newOffset")
(source, newOffset)
}
protected def addData(source: FileStreamSource): Unit
}
case class AddTextFileData(content: String, src: File, tmp: File)
extends AddFileData {
override def addData(source: FileStreamSource): Unit = {
val tempFile = Utils.tempFileWith(new File(tmp, "text"))
val finalFile = new File(src, tempFile.getName)
src.mkdirs()
require(stringToFile(tempFile, content).renameTo(finalFile))
logInfo(s"Written text '$content' to file $finalFile")
}
}
case class AddParquetFileData(data: DataFrame, src: File, tmp: File) extends AddFileData {
override def addData(source: FileStreamSource): Unit = {
AddParquetFileData.writeToFile(data, src, tmp)
}
}
object AddParquetFileData {
def apply(seq: Seq[String], src: File, tmp: File): AddParquetFileData = {
AddParquetFileData(seq.toDS().toDF(), src, tmp)
}
/** Write parquet files in a temp dir, and move the individual files to the 'src' dir */
def writeToFile(df: DataFrame, src: File, tmp: File): Unit = {
val tmpDir = Utils.tempFileWith(new File(tmp, "parquet"))
df.write.parquet(tmpDir.getCanonicalPath)
src.mkdirs()
tmpDir.listFiles().foreach { f =>
f.renameTo(new File(src, s"${f.getName}"))
}
}
}
/** Use `format` and `path` to create FileStreamSource via DataFrameReader */
def createFileStream(
format: String,
path: String,
schema: Option[StructType] = None,
options: Map[String, String] = Map.empty): DataFrame = {
val reader =
if (schema.isDefined) {
spark.readStream.format(format).schema(schema.get).options(options)
} else {
spark.readStream.format(format).options(options)
}
reader.load(path)
}
protected def getSourceFromFileStream(df: DataFrame): FileStreamSource = {
val checkpointLocation = Utils.createTempDir(namePrefix = "streaming.metadata").getCanonicalPath
df.queryExecution.analyzed
.collect { case StreamingRelation(dataSource, _, _) =>
// There is only one source in our tests so just set sourceId to 0
dataSource.createSource(s"$checkpointLocation/sources/0").asInstanceOf[FileStreamSource]
}.head
}
protected def withTempDirs(body: (File, File) => Unit) {
val src = Utils.createTempDir(namePrefix = "streaming.src")
val tmp = Utils.createTempDir(namePrefix = "streaming.tmp")
try {
body(src, tmp)
} finally {
Utils.deleteRecursively(src)
Utils.deleteRecursively(tmp)
}
}
val valueSchema = new StructType().add("value", StringType)
}
class FileStreamSourceSuite extends FileStreamSourceTest {
import testImplicits._
override val streamingTimeout = 20.seconds
/** Use `format` and `path` to create FileStreamSource via DataFrameReader */
private def createFileStreamSource(
format: String,
path: String,
schema: Option[StructType] = None): FileStreamSource = {
getSourceFromFileStream(createFileStream(format, path, schema))
}
private def createFileStreamSourceAndGetSchema(
format: Option[String],
path: Option[String],
schema: Option[StructType] = None): StructType = {
val reader = spark.readStream
format.foreach(reader.format)
schema.foreach(reader.schema)
val df =
if (path.isDefined) {
reader.load(path.get)
} else {
reader.load()
}
df.queryExecution.analyzed
.collect { case s @ StreamingRelation(dataSource, _, _) => s.schema }.head
}
// ============= Basic parameter exists tests ================
test("FileStreamSource schema: no path") {
def testError(): Unit = {
val e = intercept[IllegalArgumentException] {
createFileStreamSourceAndGetSchema(format = None, path = None, schema = None)
}
assert(e.getMessage.contains("path")) // reason is path, not schema
}
withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "false") { testError() }
withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "true") { testError() }
}
test("FileStreamSource schema: path doesn't exist (without schema) should throw exception") {
withTempDir { dir =>
intercept[AnalysisException] {
val userSchema = new StructType().add(new StructField("value", IntegerType))
val schema = createFileStreamSourceAndGetSchema(
format = None, path = Some(new File(dir, "1").getAbsolutePath), schema = None)
}
}
}
test("FileStreamSource schema: path doesn't exist (with schema) should throw exception") {
withTempDir { dir =>
intercept[AnalysisException] {
val userSchema = new StructType().add(new StructField("value", IntegerType))
val schema = createFileStreamSourceAndGetSchema(
format = None, path = Some(new File(dir, "1").getAbsolutePath), schema = Some(userSchema))
}
}
}
// =============== Text file stream schema tests ================
test("FileStreamSource schema: text, no existing files, no schema") {
withTempDir { src =>
val schema = createFileStreamSourceAndGetSchema(
format = Some("text"), path = Some(src.getCanonicalPath), schema = None)
assert(schema === new StructType().add("value", StringType))
}
}
test("FileStreamSource schema: text, existing files, no schema") {
withTempDir { src =>
stringToFile(new File(src, "1"), "a\\nb\\nc")
val schema = createFileStreamSourceAndGetSchema(
format = Some("text"), path = Some(src.getCanonicalPath), schema = None)
assert(schema === new StructType().add("value", StringType))
}
}
test("FileStreamSource schema: text, existing files, schema") {
withTempDir { src =>
stringToFile(new File(src, "1"), "a\\nb\\nc")
val userSchema = new StructType().add("userColumn", StringType)
val schema = createFileStreamSourceAndGetSchema(
format = Some("text"), path = Some(src.getCanonicalPath), schema = Some(userSchema))
assert(schema === userSchema)
}
}
// =============== Parquet file stream schema tests ================
test("FileStreamSource schema: parquet, existing files, no schema") {
withTempDir { src =>
Seq("a", "b", "c").toDS().as("userColumn").toDF().write
.mode(org.apache.spark.sql.SaveMode.Overwrite)
.parquet(src.getCanonicalPath)
// Without schema inference, should throw error
withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "false") {
intercept[IllegalArgumentException] {
createFileStreamSourceAndGetSchema(
format = Some("parquet"), path = Some(src.getCanonicalPath), schema = None)
}
}
// With schema inference, should infer correct schema
withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "true") {
val schema = createFileStreamSourceAndGetSchema(
format = Some("parquet"), path = Some(src.getCanonicalPath), schema = None)
assert(schema === new StructType().add("value", StringType))
}
}
}
test("FileStreamSource schema: parquet, existing files, schema") {
withTempPath { src =>
Seq("a", "b", "c").toDS().as("oldUserColumn").toDF()
.write.parquet(new File(src, "1").getCanonicalPath)
val userSchema = new StructType().add("userColumn", StringType)
val schema = createFileStreamSourceAndGetSchema(
format = Some("parquet"), path = Some(src.getCanonicalPath), schema = Some(userSchema))
assert(schema === userSchema)
}
}
// =============== JSON file stream schema tests ================
test("FileStreamSource schema: json, no existing files, no schema") {
withTempDir { src =>
withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "true") {
val e = intercept[AnalysisException] {
createFileStreamSourceAndGetSchema(
format = Some("json"), path = Some(src.getCanonicalPath), schema = None)
}
assert("Unable to infer schema for JSON. It must be specified manually.;" === e.getMessage)
}
}
}
test("FileStreamSource schema: json, existing files, no schema") {
withTempDir { src =>
// Without schema inference, should throw error
withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "false") {
intercept[IllegalArgumentException] {
createFileStreamSourceAndGetSchema(
format = Some("json"), path = Some(src.getCanonicalPath), schema = None)
}
}
// With schema inference, should infer correct schema
withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "true") {
stringToFile(new File(src, "1"), "{'c': '1'}\\n{'c': '2'}\\n{'c': '3'}")
val schema = createFileStreamSourceAndGetSchema(
format = Some("json"), path = Some(src.getCanonicalPath), schema = None)
assert(schema === new StructType().add("c", StringType))
}
}
}
test("FileStreamSource schema: json, existing files, schema") {
withTempDir { src =>
stringToFile(new File(src, "1"), "{'c': '1'}\\n{'c': '2'}\\n{'c', '3'}")
val userSchema = new StructType().add("userColumn", StringType)
val schema = createFileStreamSourceAndGetSchema(
format = Some("json"), path = Some(src.getCanonicalPath), schema = Some(userSchema))
assert(schema === userSchema)
}
}
// =============== Text file stream tests ================
test("read from text files") {
withTempDirs { case (src, tmp) =>
val textStream = createFileStream("text", src.getCanonicalPath)
val filtered = textStream.filter($"value" contains "keep")
testStream(filtered)(
AddTextFileData("drop1\\nkeep2\\nkeep3", src, tmp),
CheckAnswer("keep2", "keep3"),
StopStream,
AddTextFileData("drop4\\nkeep5\\nkeep6", src, tmp),
StartStream(),
CheckAnswer("keep2", "keep3", "keep5", "keep6"),
AddTextFileData("drop7\\nkeep8\\nkeep9", src, tmp),
CheckAnswer("keep2", "keep3", "keep5", "keep6", "keep8", "keep9")
)
}
}
test("read from textfile") {
withTempDirs { case (src, tmp) =>
val textStream = spark.readStream.textFile(src.getCanonicalPath)
val filtered = textStream.filter(_.contains("keep"))
testStream(filtered)(
AddTextFileData("drop1\\nkeep2\\nkeep3", src, tmp),
CheckAnswer("keep2", "keep3"),
StopStream,
AddTextFileData("drop4\\nkeep5\\nkeep6", src, tmp),
StartStream(),
CheckAnswer("keep2", "keep3", "keep5", "keep6"),
AddTextFileData("drop7\\nkeep8\\nkeep9", src, tmp),
CheckAnswer("keep2", "keep3", "keep5", "keep6", "keep8", "keep9")
)
}
}
test("SPARK-17165 should not track the list of seen files indefinitely") {
// This test works by:
// 1. Create a file
// 2. Get it processed
// 3. Sleeps for a very short amount of time (larger than maxFileAge
// 4. Add another file (at this point the original file should have been purged
// 5. Test the size of the seenFiles internal data structure
// Note that if we change maxFileAge to a very large number, the last step should fail.
withTempDirs { case (src, tmp) =>
val textStream: DataFrame =
createFileStream("text", src.getCanonicalPath, options = Map("maxFileAge" -> "5ms"))
testStream(textStream)(
AddTextFileData("a\\nb", src, tmp),
CheckAnswer("a", "b"),
// SLeeps longer than 5ms (maxFileAge)
// Unfortunately since a lot of file system does not have modification time granularity
// finer grained than 1 sec, we need to use 1 sec here.
AssertOnQuery { _ => Thread.sleep(1000); true },
AddTextFileData("c\\nd", src, tmp),
CheckAnswer("a", "b", "c", "d"),
AssertOnQuery("seen files should contain only one entry") { streamExecution =>
val source = streamExecution.logicalPlan.collect { case e: StreamingExecutionRelation =>
e.source.asInstanceOf[FileStreamSource]
}.head
assert(source.seenFiles.size == 1)
true
}
)
}
}
// =============== JSON file stream tests ================
test("read from json files") {
withTempDirs { case (src, tmp) =>
val fileStream = createFileStream("json", src.getCanonicalPath, Some(valueSchema))
val filtered = fileStream.filter($"value" contains "keep")
testStream(filtered)(
AddTextFileData(
"{'value': 'drop1'}\\n{'value': 'keep2'}\\n{'value': 'keep3'}",
src,
tmp),
CheckAnswer("keep2", "keep3"),
StopStream,
AddTextFileData(
"{'value': 'drop4'}\\n{'value': 'keep5'}\\n{'value': 'keep6'}",
src,
tmp),
StartStream(),
CheckAnswer("keep2", "keep3", "keep5", "keep6"),
AddTextFileData(
"{'value': 'drop7'}\\n{'value': 'keep8'}\\n{'value': 'keep9'}",
src,
tmp),
CheckAnswer("keep2", "keep3", "keep5", "keep6", "keep8", "keep9")
)
}
}
test("read from json files with inferring schema") {
withTempDirs { case (src, tmp) =>
withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "true") {
// Add a file so that we can infer its schema
stringToFile(new File(src, "existing"), "{'c': 'drop1'}\\n{'c': 'keep2'}\\n{'c': 'keep3'}")
val fileStream = createFileStream("json", src.getCanonicalPath)
assert(fileStream.schema === StructType(Seq(StructField("c", StringType))))
// FileStreamSource should infer the column "c"
val filtered = fileStream.filter($"c" contains "keep")
testStream(filtered)(
AddTextFileData("{'c': 'drop4'}\\n{'c': 'keep5'}\\n{'c': 'keep6'}", src, tmp),
CheckAnswer("keep2", "keep3", "keep5", "keep6")
)
}
}
}
test("reading from json files inside partitioned directory") {
withTempDirs { case (baseSrc, tmp) =>
withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "true") {
val src = new File(baseSrc, "type=X")
src.mkdirs()
// Add a file so that we can infer its schema
stringToFile(new File(src, "existing"), "{'c': 'drop1'}\\n{'c': 'keep2'}\\n{'c': 'keep3'}")
val fileStream = createFileStream("json", src.getCanonicalPath)
// FileStreamSource should infer the column "c"
val filtered = fileStream.filter($"c" contains "keep")
testStream(filtered)(
AddTextFileData("{'c': 'drop4'}\\n{'c': 'keep5'}\\n{'c': 'keep6'}", src, tmp),
CheckAnswer("keep2", "keep3", "keep5", "keep6")
)
}
}
}
test("reading from json files with changing schema") {
withTempDirs { case (src, tmp) =>
withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "true") {
// Add a file so that we can infer its schema
stringToFile(new File(src, "existing"), "{'k': 'value0'}")
val fileStream = createFileStream("json", src.getCanonicalPath)
// FileStreamSource should infer the column "k"
assert(fileStream.schema === StructType(Seq(StructField("k", StringType))))
// After creating DF and before starting stream, add data with different schema
// Should not affect the inferred schema any more
stringToFile(new File(src, "existing2"), "{'k': 'value1', 'v': 'new'}")
testStream(fileStream)(
// Should not pick up column v in the file added before start
AddTextFileData("{'k': 'value2'}", src, tmp),
CheckAnswer("value0", "value1", "value2"),
// Should read data in column k, and ignore v
AddTextFileData("{'k': 'value3', 'v': 'new'}", src, tmp),
CheckAnswer("value0", "value1", "value2", "value3"),
// Should ignore rows that do not have the necessary k column
AddTextFileData("{'v': 'value4'}", src, tmp),
CheckAnswer("value0", "value1", "value2", "value3", null))
}
}
}
// =============== Parquet file stream tests ================
test("read from parquet files") {
withTempDirs { case (src, tmp) =>
val fileStream = createFileStream("parquet", src.getCanonicalPath, Some(valueSchema))
val filtered = fileStream.filter($"value" contains "keep")
testStream(filtered)(
AddParquetFileData(Seq("drop1", "keep2", "keep3"), src, tmp),
CheckAnswer("keep2", "keep3"),
StopStream,
AddParquetFileData(Seq("drop4", "keep5", "keep6"), src, tmp),
StartStream(),
CheckAnswer("keep2", "keep3", "keep5", "keep6"),
AddParquetFileData(Seq("drop7", "keep8", "keep9"), src, tmp),
CheckAnswer("keep2", "keep3", "keep5", "keep6", "keep8", "keep9")
)
}
}
test("read from parquet files with changing schema") {
withTempDirs { case (src, tmp) =>
withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "true") {
// Add a file so that we can infer its schema
AddParquetFileData.writeToFile(Seq("value0").toDF("k"), src, tmp)
val fileStream = createFileStream("parquet", src.getCanonicalPath)
// FileStreamSource should infer the column "k"
assert(fileStream.schema === StructType(Seq(StructField("k", StringType))))
// After creating DF and before starting stream, add data with different schema
// Should not affect the inferred schema any more
AddParquetFileData.writeToFile(Seq(("value1", 0)).toDF("k", "v"), src, tmp)
testStream(fileStream)(
// Should not pick up column v in the file added before start
AddParquetFileData(Seq("value2").toDF("k"), src, tmp),
CheckAnswer("value0", "value1", "value2"),
// Should read data in column k, and ignore v
AddParquetFileData(Seq(("value3", 1)).toDF("k", "v"), src, tmp),
CheckAnswer("value0", "value1", "value2", "value3"),
// Should ignore rows that do not have the necessary k column
AddParquetFileData(Seq("value5").toDF("v"), src, tmp),
CheckAnswer("value0", "value1", "value2", "value3", null)
)
}
}
}
// =============== file stream globbing tests ================
test("read new files in nested directories with globbing") {
withTempDirs { case (dir, tmp) =>
// src/*/* should consider all the files and directories that matches that glob.
// So any files that matches the glob as well as any files in directories that matches
// this glob should be read.
val fileStream = createFileStream("text", s"${dir.getCanonicalPath}/*/*")
val filtered = fileStream.filter($"value" contains "keep")
val subDir = new File(dir, "subdir")
val subSubDir = new File(subDir, "subsubdir")
val subSubSubDir = new File(subSubDir, "subsubsubdir")
require(!subDir.exists())
require(!subSubDir.exists())
testStream(filtered)(
// Create new dir/subdir and write to it, should read
AddTextFileData("drop1\\nkeep2", subDir, tmp),
CheckAnswer("keep2"),
// Add files to dir/subdir, should read
AddTextFileData("keep3", subDir, tmp),
CheckAnswer("keep2", "keep3"),
// Create new dir/subdir/subsubdir and write to it, should read
AddTextFileData("keep4", subSubDir, tmp),
CheckAnswer("keep2", "keep3", "keep4"),
// Add files to dir/subdir/subsubdir, should read
AddTextFileData("keep5", subSubDir, tmp),
CheckAnswer("keep2", "keep3", "keep4", "keep5"),
// 1. Add file to src dir, should not read as globbing src/*/* does not capture files in
// dir, only captures files in dir/subdir/
// 2. Add files to dir/subDir/subsubdir/subsubsubdir, should not read as src/*/* should
// not capture those files
AddTextFileData("keep6", dir, tmp),
AddTextFileData("keep7", subSubSubDir, tmp),
AddTextFileData("keep8", subDir, tmp), // needed to make query detect new data
CheckAnswer("keep2", "keep3", "keep4", "keep5", "keep8")
)
}
}
test("read new files in partitioned table with globbing, should not read partition data") {
withTempDirs { case (dir, tmp) =>
val partitionFooSubDir = new File(dir, "partition=foo")
val partitionBarSubDir = new File(dir, "partition=bar")
val schema = new StructType().add("value", StringType).add("partition", StringType)
val fileStream = createFileStream("json", s"${dir.getCanonicalPath}/*/*", Some(schema))
val filtered = fileStream.filter($"value" contains "keep")
val nullStr = null.asInstanceOf[String]
testStream(filtered)(
// Create new partition=foo sub dir and write to it, should read only value, not partition
AddTextFileData("{'value': 'drop1'}\\n{'value': 'keep2'}", partitionFooSubDir, tmp),
CheckAnswer(("keep2", nullStr)),
// Append to same partition=1 sub dir, should read only value, not partition
AddTextFileData("{'value': 'keep3'}", partitionFooSubDir, tmp),
CheckAnswer(("keep2", nullStr), ("keep3", nullStr)),
// Create new partition sub dir and write to it, should read only value, not partition
AddTextFileData("{'value': 'keep4'}", partitionBarSubDir, tmp),
CheckAnswer(("keep2", nullStr), ("keep3", nullStr), ("keep4", nullStr)),
// Append to same partition=2 sub dir, should read only value, not partition
AddTextFileData("{'value': 'keep5'}", partitionBarSubDir, tmp),
CheckAnswer(("keep2", nullStr), ("keep3", nullStr), ("keep4", nullStr), ("keep5", nullStr))
)
}
}
// =============== other tests ================
test("read new files in partitioned table without globbing, should read partition data") {
withTempDirs { case (dir, tmp) =>
val partitionFooSubDir = new File(dir, "partition=foo")
val partitionBarSubDir = new File(dir, "partition=bar")
val schema = new StructType().add("value", StringType).add("partition", StringType)
val fileStream = createFileStream("json", s"${dir.getCanonicalPath}", Some(schema))
val filtered = fileStream.filter($"value" contains "keep")
testStream(filtered)(
// Create new partition=foo sub dir and write to it
AddTextFileData("{'value': 'drop1'}\\n{'value': 'keep2'}", partitionFooSubDir, tmp),
CheckAnswer(("keep2", "foo")),
// Append to same partition=foo sub dir
AddTextFileData("{'value': 'keep3'}", partitionFooSubDir, tmp),
CheckAnswer(("keep2", "foo"), ("keep3", "foo")),
// Create new partition sub dir and write to it
AddTextFileData("{'value': 'keep4'}", partitionBarSubDir, tmp),
CheckAnswer(("keep2", "foo"), ("keep3", "foo"), ("keep4", "bar")),
// Append to same partition=bar sub dir
AddTextFileData("{'value': 'keep5'}", partitionBarSubDir, tmp),
CheckAnswer(("keep2", "foo"), ("keep3", "foo"), ("keep4", "bar"), ("keep5", "bar"))
)
}
}
test("when schema inference is turned on, should read partition data") {
def createFile(content: String, src: File, tmp: File): Unit = {
val tempFile = Utils.tempFileWith(new File(tmp, "text"))
val finalFile = new File(src, tempFile.getName)
require(!src.exists(), s"$src exists, dir: ${src.isDirectory}, file: ${src.isFile}")
require(src.mkdirs(), s"Cannot create $src")
require(src.isDirectory(), s"$src is not a directory")
require(stringToFile(tempFile, content).renameTo(finalFile))
}
withSQLConf(SQLConf.STREAMING_SCHEMA_INFERENCE.key -> "true") {
withTempDirs { case (dir, tmp) =>
val partitionFooSubDir = new File(dir, "partition=foo")
val partitionBarSubDir = new File(dir, "partition=bar")
// Create file in partition, so we can infer the schema.
createFile("{'value': 'drop0'}", partitionFooSubDir, tmp)
val fileStream = createFileStream("json", s"${dir.getCanonicalPath}")
val filtered = fileStream.filter($"value" contains "keep")
testStream(filtered)(
// Append to same partition=foo sub dir
AddTextFileData("{'value': 'drop1'}\\n{'value': 'keep2'}", partitionFooSubDir, tmp),
CheckAnswer(("keep2", "foo")),
// Append to same partition=foo sub dir
AddTextFileData("{'value': 'keep3'}", partitionFooSubDir, tmp),
CheckAnswer(("keep2", "foo"), ("keep3", "foo")),
// Create new partition sub dir and write to it
AddTextFileData("{'value': 'keep4'}", partitionBarSubDir, tmp),
CheckAnswer(("keep2", "foo"), ("keep3", "foo"), ("keep4", "bar")),
// Append to same partition=bar sub dir
AddTextFileData("{'value': 'keep5'}", partitionBarSubDir, tmp),
CheckAnswer(("keep2", "foo"), ("keep3", "foo"), ("keep4", "bar"), ("keep5", "bar")),
AddTextFileData("{'value': 'keep6'}", partitionBarSubDir, tmp),
CheckAnswer(("keep2", "foo"), ("keep3", "foo"), ("keep4", "bar"), ("keep5", "bar"),
("keep6", "bar"))
)
}
}
}
test("fault tolerance") {
withTempDirs { case (src, tmp) =>
val fileStream = createFileStream("text", src.getCanonicalPath)
val filtered = fileStream.filter($"value" contains "keep")
testStream(filtered)(
AddTextFileData("drop1\\nkeep2\\nkeep3", src, tmp),
CheckAnswer("keep2", "keep3"),
StopStream,
AddTextFileData("drop4\\nkeep5\\nkeep6", src, tmp),
StartStream(),
CheckAnswer("keep2", "keep3", "keep5", "keep6"),
AddTextFileData("drop7\\nkeep8\\nkeep9", src, tmp),
CheckAnswer("keep2", "keep3", "keep5", "keep6", "keep8", "keep9")
)
}
}
test("max files per trigger") {
withTempDir { case src =>
var lastFileModTime: Option[Long] = None
/** Create a text file with a single data item */
def createFile(data: Int): File = {
val file = stringToFile(new File(src, s"$data.txt"), data.toString)
if (lastFileModTime.nonEmpty) file.setLastModified(lastFileModTime.get + 1000)
lastFileModTime = Some(file.lastModified)
file
}
createFile(1)
createFile(2)
createFile(3)
// Set up a query to read text files 2 at a time
val df = spark
.readStream
.option("maxFilesPerTrigger", 2)
.text(src.getCanonicalPath)
val q = df
.writeStream
.format("memory")
.queryName("file_data")
.start()
.asInstanceOf[StreamingQueryWrapper]
.streamingQuery
q.processAllAvailable()
val memorySink = q.sink.asInstanceOf[MemorySink]
val fileSource = q.logicalPlan.collect {
case StreamingExecutionRelation(source, _) if source.isInstanceOf[FileStreamSource] =>
source.asInstanceOf[FileStreamSource]
}.head
/** Check the data read in the last batch */
def checkLastBatchData(data: Int*): Unit = {
val schema = StructType(Seq(StructField("value", StringType)))
val df = spark.createDataFrame(
spark.sparkContext.makeRDD(memorySink.latestBatchData), schema)
checkAnswer(df, data.map(_.toString).toDF("value"))
}
def checkAllData(data: Seq[Int]): Unit = {
val schema = StructType(Seq(StructField("value", StringType)))
val df = spark.createDataFrame(
spark.sparkContext.makeRDD(memorySink.allData), schema)
checkAnswer(df, data.map(_.toString).toDF("value"))
}
/** Check how many batches have executed since the last time this check was made */
var lastBatchId = -1L
def checkNumBatchesSinceLastCheck(numBatches: Int): Unit = {
require(lastBatchId >= 0)
assert(memorySink.latestBatchId.get === lastBatchId + numBatches)
lastBatchId = memorySink.latestBatchId.get
}
checkLastBatchData(3) // (1 and 2) should be in batch 1, (3) should be in batch 2 (last)
checkAllData(1 to 3)
lastBatchId = memorySink.latestBatchId.get
fileSource.withBatchingLocked {
createFile(4)
createFile(5) // 4 and 5 should be in a batch
createFile(6)
createFile(7) // 6 and 7 should be in the last batch
}
q.processAllAvailable()
checkNumBatchesSinceLastCheck(2)
checkLastBatchData(6, 7)
checkAllData(1 to 7)
fileSource.withBatchingLocked {
createFile(8)
createFile(9) // 8 and 9 should be in a batch
createFile(10)
createFile(11) // 10 and 11 should be in a batch
createFile(12) // 12 should be in the last batch
}
q.processAllAvailable()
checkNumBatchesSinceLastCheck(3)
checkLastBatchData(12)
checkAllData(1 to 12)
q.stop()
}
}
test("max files per trigger - incorrect values") {
val testTable = "maxFilesPerTrigger_test"
withTable(testTable) {
withTempDir { case src =>
def testMaxFilePerTriggerValue(value: String): Unit = {
val df = spark.readStream.option("maxFilesPerTrigger", value).text(src.getCanonicalPath)
val e = intercept[StreamingQueryException] {
// Note: `maxFilesPerTrigger` is checked in the stream thread when creating the source
val q = df.writeStream.format("memory").queryName(testTable).start()
try {
q.processAllAvailable()
} finally {
q.stop()
}
}
assert(e.getCause.isInstanceOf[IllegalArgumentException])
Seq("maxFilesPerTrigger", value, "positive integer").foreach { s =>
assert(e.getMessage.contains(s))
}
}
testMaxFilePerTriggerValue("not-a-integer")
testMaxFilePerTriggerValue("-1")
testMaxFilePerTriggerValue("0")
testMaxFilePerTriggerValue("10.1")
}
}
}
test("explain") {
withTempDirs { case (src, tmp) =>
src.mkdirs()
val df = spark.readStream.format("text").load(src.getCanonicalPath).map(_ + "-x")
// Test `explain` not throwing errors
df.explain()
val q = df.writeStream.queryName("file_explain").format("memory").start()
.asInstanceOf[StreamingQueryWrapper]
.streamingQuery
try {
assert("No physical plan. Waiting for data." === q.explainInternal(false))
assert("No physical plan. Waiting for data." === q.explainInternal(true))
val tempFile = Utils.tempFileWith(new File(tmp, "text"))
val finalFile = new File(src, tempFile.getName)
require(stringToFile(tempFile, "foo").renameTo(finalFile))
q.processAllAvailable()
val explainWithoutExtended = q.explainInternal(false)
// `extended = false` only displays the physical plan.
assert("Relation.*text".r.findAllMatchIn(explainWithoutExtended).size === 0)
assert(": Text".r.findAllMatchIn(explainWithoutExtended).size === 1)
val explainWithExtended = q.explainInternal(true)
// `extended = true` displays 3 logical plans (Parsed/Optimized/Optimized) and 1 physical
// plan.
assert("Relation.*text".r.findAllMatchIn(explainWithExtended).size === 3)
assert(": Text".r.findAllMatchIn(explainWithExtended).size === 1)
} finally {
q.stop()
}
}
}
test("SPARK-17372 - write file names to WAL as Array[String]") {
// Note: If this test takes longer than the timeout, then its likely that this is actually
// running a Spark job with 10000 tasks. This test tries to avoid that by
// 1. Setting the threshold for parallel file listing to very high
// 2. Using a query that should use constant folding to eliminate reading of the files
val numFiles = 10000
// This is to avoid running a spark job to list of files in parallel
// by the InMemoryFileIndex.
spark.sessionState.conf.setConf(SQLConf.PARALLEL_PARTITION_DISCOVERY_THRESHOLD, numFiles * 2)
withTempDirs { case (root, tmp) =>
val src = new File(root, "a=1")
src.mkdirs()
(1 to numFiles).map { _.toString }.foreach { i =>
val tempFile = Utils.tempFileWith(new File(tmp, "text"))
val finalFile = new File(src, tempFile.getName)
stringToFile(finalFile, i)
}
assert(src.listFiles().size === numFiles)
val files = spark.readStream.text(root.getCanonicalPath).as[(String, Int)]
// Note this query will use constant folding to eliminate the file scan.
// This is to avoid actually running a Spark job with 10000 tasks
val df = files.filter("1 == 0").groupBy().count()
testStream(df, OutputMode.Complete)(
AddTextFileData("0", src, tmp),
CheckAnswer(0)
)
}
}
test("compact interval metadata log") {
val _sources = PrivateMethod[Seq[Source]]('sources)
val _metadataLog = PrivateMethod[FileStreamSourceLog]('metadataLog)
def verify(
execution: StreamExecution,
batchId: Long,
expectedBatches: Int,
expectedCompactInterval: Int): Boolean = {
import CompactibleFileStreamLog._
val fileSource = (execution invokePrivate _sources()).head.asInstanceOf[FileStreamSource]
val metadataLog = fileSource invokePrivate _metadataLog()
if (isCompactionBatch(batchId, expectedCompactInterval)) {
val path = metadataLog.batchIdToPath(batchId)
// Assert path name should be ended with compact suffix.
assert(path.getName.endsWith(COMPACT_FILE_SUFFIX),
"path does not end with compact file suffix")
// Compacted batch should include all entries from start.
val entries = metadataLog.get(batchId)
assert(entries.isDefined, "Entries not defined")
assert(entries.get.length === metadataLog.allFiles().length, "clean up check")
assert(metadataLog.get(None, Some(batchId)).flatMap(_._2).length ===
entries.get.length, "Length check")
}
assert(metadataLog.allFiles().sortBy(_.batchId) ===
metadataLog.get(None, Some(batchId)).flatMap(_._2).sortBy(_.batchId),
"Batch id mismatch")
metadataLog.get(None, Some(batchId)).flatMap(_._2).length === expectedBatches
}
withTempDirs { case (src, tmp) =>
withSQLConf(
SQLConf.FILE_SOURCE_LOG_COMPACT_INTERVAL.key -> "2"
) {
val fileStream = createFileStream("text", src.getCanonicalPath)
val filtered = fileStream.filter($"value" contains "keep")
val updateConf = Map(SQLConf.FILE_SOURCE_LOG_COMPACT_INTERVAL.key -> "5")
testStream(filtered)(
AddTextFileData("drop1\\nkeep2\\nkeep3", src, tmp),
CheckAnswer("keep2", "keep3"),
AssertOnQuery(verify(_, 0L, 1, 2)),
AddTextFileData("drop4\\nkeep5\\nkeep6", src, tmp),
CheckAnswer("keep2", "keep3", "keep5", "keep6"),
AssertOnQuery(verify(_, 1L, 2, 2)),
AddTextFileData("drop7\\nkeep8\\nkeep9", src, tmp),
CheckAnswer("keep2", "keep3", "keep5", "keep6", "keep8", "keep9"),
AssertOnQuery(verify(_, 2L, 3, 2)),
StopStream,
StartStream(additionalConfs = updateConf),
AssertOnQuery(verify(_, 2L, 3, 2)),
AddTextFileData("drop10\\nkeep11", src, tmp),
CheckAnswer("keep2", "keep3", "keep5", "keep6", "keep8", "keep9", "keep11"),
AssertOnQuery(verify(_, 3L, 4, 2)),
AddTextFileData("drop12\\nkeep13", src, tmp),
CheckAnswer("keep2", "keep3", "keep5", "keep6", "keep8", "keep9", "keep11", "keep13"),
AssertOnQuery(verify(_, 4L, 5, 2))
)
}
}
}
test("get arbitrary batch from FileStreamSource") {
withTempDirs { case (src, tmp) =>
withSQLConf(
SQLConf.FILE_SOURCE_LOG_COMPACT_INTERVAL.key -> "2",
// Force deleting the old logs
SQLConf.FILE_SOURCE_LOG_CLEANUP_DELAY.key -> "1"
) {
val fileStream = createFileStream("text", src.getCanonicalPath)
val filtered = fileStream.filter($"value" contains "keep")
testStream(filtered)(
AddTextFileData("keep1", src, tmp),
CheckAnswer("keep1"),
AddTextFileData("keep2", src, tmp),
CheckAnswer("keep1", "keep2"),
AddTextFileData("keep3", src, tmp),
CheckAnswer("keep1", "keep2", "keep3"),
AssertOnQuery("check getBatch") { execution: StreamExecution =>
val _sources = PrivateMethod[Seq[Source]]('sources)
val fileSource =
(execution invokePrivate _sources()).head.asInstanceOf[FileStreamSource]
def verify(startId: Option[Int], endId: Int, expected: String*): Unit = {
val start = startId.map(new FileStreamSourceOffset(_))
val end = FileStreamSourceOffset(endId)
assert(fileSource.getBatch(start, end).as[String].collect().toSeq === expected)
}
verify(startId = None, endId = 2, "keep1", "keep2", "keep3")
verify(startId = Some(0), endId = 1, "keep2")
verify(startId = Some(0), endId = 2, "keep2", "keep3")
verify(startId = Some(1), endId = 2, "keep3")
true
}
)
}
}
}
test("input row metrics") {
withTempDirs { case (src, tmp) =>
val input = spark.readStream.format("text").load(src.getCanonicalPath)
testStream(input)(
AddTextFileData("100", src, tmp),
CheckAnswer("100"),
AssertOnQuery { query =>
val actualProgress = query.recentProgress
.find(_.numInputRows > 0)
.getOrElse(sys.error("Could not find records with data."))
assert(actualProgress.numInputRows === 1)
assert(actualProgress.sources(0).processedRowsPerSecond > 0.0)
true
}
)
}
}
test("SPARK-18433: Improve DataSource option keys to be more case-insensitive") {
val options = new FileStreamOptions(Map("maxfilespertrigger" -> "1"))
assert(options.maxFilesPerTrigger == Some(1))
}
test("FileStreamSource offset - read Spark 2.1.0 offset json format") {
val offset = readOffsetFromResource("file-source-offset-version-2.1.0-json.txt")
assert(FileStreamSourceOffset(offset) === FileStreamSourceOffset(345))
}
test("FileStreamSource offset - read Spark 2.1.0 offset long format") {
val offset = readOffsetFromResource("file-source-offset-version-2.1.0-long.txt")
assert(FileStreamSourceOffset(offset) === FileStreamSourceOffset(345))
}
test("FileStreamSourceLog - read Spark 2.1.0 log format") {
assert(readLogFromResource("file-source-log-version-2.1.0") === Seq(
FileEntry("/a/b/0", 1480730949000L, 0L),
FileEntry("/a/b/1", 1480730950000L, 1L),
FileEntry("/a/b/2", 1480730950000L, 2L),
FileEntry("/a/b/3", 1480730950000L, 3L),
FileEntry("/a/b/4", 1480730951000L, 4L)
))
}
private def readLogFromResource(dir: String): Seq[FileEntry] = {
val input = getClass.getResource(s"/structured-streaming/$dir")
val log = new FileStreamSourceLog(FileStreamSourceLog.VERSION, spark, input.toString)
log.allFiles()
}
private def readOffsetFromResource(file: String): SerializedOffset = {
import scala.io.Source
val str = Source.fromFile(getClass.getResource(s"/structured-streaming/$file").toURI).mkString
SerializedOffset(str.trim)
}
private def runTwoBatchesAndVerifyResults(
src: File,
latestFirst: Boolean,
firstBatch: String,
secondBatch: String,
maxFileAge: Option[String] = None): Unit = {
val srcOptions = Map("latestFirst" -> latestFirst.toString, "maxFilesPerTrigger" -> "1") ++
maxFileAge.map("maxFileAge" -> _)
val fileStream = createFileStream(
"text",
src.getCanonicalPath,
options = srcOptions)
val clock = new StreamManualClock()
testStream(fileStream)(
StartStream(trigger = ProcessingTime(10), triggerClock = clock),
AssertOnQuery { _ =>
// Block until the first batch finishes.
eventually(timeout(streamingTimeout)) {
assert(clock.isStreamWaitingAt(0))
}
true
},
CheckLastBatch(firstBatch),
AdvanceManualClock(10),
AssertOnQuery { _ =>
// Block until the second batch finishes.
eventually(timeout(streamingTimeout)) {
assert(clock.isStreamWaitingAt(10))
}
true
},
CheckLastBatch(secondBatch)
)
}
test("FileStreamSource - latestFirst") {
withTempDir { src =>
// Prepare two files: 1.txt, 2.txt, and make sure they have different modified time.
val f1 = stringToFile(new File(src, "1.txt"), "1")
val f2 = stringToFile(new File(src, "2.txt"), "2")
f2.setLastModified(f1.lastModified + 1000)
// Read oldest files first, so the first batch is "1", and the second batch is "2".
runTwoBatchesAndVerifyResults(src, latestFirst = false, firstBatch = "1", secondBatch = "2")
// Read latest files first, so the first batch is "2", and the second batch is "1".
runTwoBatchesAndVerifyResults(src, latestFirst = true, firstBatch = "2", secondBatch = "1")
}
}
test("SPARK-19813: Ignore maxFileAge when maxFilesPerTrigger and latestFirst is used") {
withTempDir { src =>
// Prepare two files: 1.txt, 2.txt, and make sure they have different modified time.
val f1 = stringToFile(new File(src, "1.txt"), "1")
val f2 = stringToFile(new File(src, "2.txt"), "2")
f2.setLastModified(f1.lastModified + 3600 * 1000 /* 1 hour later */)
runTwoBatchesAndVerifyResults(src, latestFirst = true, firstBatch = "2", secondBatch = "1",
maxFileAge = Some("1m") /* 1 minute */)
}
}
test("SeenFilesMap") {
val map = new SeenFilesMap(maxAgeMs = 10)
map.add("a", 5)
assert(map.size == 1)
map.purge()
assert(map.size == 1)
// Add a new entry and purge should be no-op, since the gap is exactly 10 ms.
map.add("b", 15)
assert(map.size == 2)
map.purge()
assert(map.size == 2)
// Add a new entry that's more than 10 ms than the first entry. We should be able to purge now.
map.add("c", 16)
assert(map.size == 3)
map.purge()
assert(map.size == 2)
// Override existing entry shouldn't change the size
map.add("c", 25)
assert(map.size == 2)
// Not a new file because we have seen c before
assert(!map.isNewFile("c", 20))
// Not a new file because timestamp is too old
assert(!map.isNewFile("d", 5))
// Finally a new file: never seen and not too old
assert(map.isNewFile("e", 20))
}
test("SeenFilesMap should only consider a file old if it is earlier than last purge time") {
val map = new SeenFilesMap(maxAgeMs = 10)
map.add("a", 20)
assert(map.size == 1)
// Timestamp 5 should still considered a new file because purge time should be 0
assert(map.isNewFile("b", 9))
assert(map.isNewFile("b", 10))
// Once purge, purge time should be 10 and then b would be a old file if it is less than 10.
map.purge()
assert(!map.isNewFile("b", 9))
assert(map.isNewFile("b", 10))
}
testWithUninterruptibleThread("do not recheck that files exist during getBatch") {
withTempDir { temp =>
spark.conf.set(
s"fs.$scheme.impl",
classOf[ExistsThrowsExceptionFileSystem].getName)
// add the metadata entries as a pre-req
val dir = new File(temp, "dir") // use non-existent directory to test whether log make the dir
val metadataLog =
new FileStreamSourceLog(FileStreamSourceLog.VERSION, spark, dir.getAbsolutePath)
assert(metadataLog.add(0, Array(FileEntry(s"$scheme:///file1", 100L, 0))))
val newSource = new FileStreamSource(spark, s"$scheme:///", "parquet", StructType(Nil), Nil,
dir.getAbsolutePath, Map.empty)
// this method should throw an exception if `fs.exists` is called during resolveRelation
newSource.getBatch(None, FileStreamSourceOffset(1))
}
}
}
class FileStreamSourceStressTestSuite extends FileStreamSourceTest {
import testImplicits._
test("file source stress test") {
val src = Utils.createTempDir(namePrefix = "streaming.src")
val tmp = Utils.createTempDir(namePrefix = "streaming.tmp")
val fileStream = createFileStream("text", src.getCanonicalPath)
val ds = fileStream.as[String].map(_.toInt + 1)
runStressTest(ds, data => {
AddTextFileData(data.mkString("\\n"), src, tmp)
})
Utils.deleteRecursively(src)
Utils.deleteRecursively(tmp)
}
}
/**
* Fake FileSystem to test whether the method `fs.exists` is called during
* `DataSource.resolveRelation`.
*/
class ExistsThrowsExceptionFileSystem extends RawLocalFileSystem {
override def getUri: URI = {
URI.create(s"$scheme:///")
}
override def exists(f: Path): Boolean = {
throw new IllegalArgumentException("Exists shouldn't have been called!")
}
/** Simply return an empty file for now. */
override def listStatus(file: Path): Array[FileStatus] = {
val emptyFile = new FileStatus()
emptyFile.setPath(file)
Array(emptyFile)
}
}
object ExistsThrowsExceptionFileSystem {
val scheme = s"FileStreamSourceSuite${math.abs(Random.nextInt)}fs"
}
| u2009cf/spark-radar | sql/core/src/test/scala/org/apache/spark/sql/streaming/FileStreamSourceSuite.scala | Scala | apache-2.0 | 48,339 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "scalatest"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
// Add your project dependencies here,
"org.scalatestplus" % "play_2.10" % "1.0-RC1" % "test"
)
val main = play.Project(appName, appVersion, appDependencies).settings(
// Add your own project settings here
)
}
| jedesah/scalatest-website | project/Build.scala | Scala | apache-2.0 | 450 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.plan.nodes.physical.batch
import org.apache.flink.table.plan.nodes.calcite.Expand
import org.apache.flink.table.plan.util.RelExplainUtil
import org.apache.calcite.plan.{RelOptCluster, RelTraitSet}
import org.apache.calcite.rel.`type`.RelDataType
import org.apache.calcite.rel.{RelNode, RelWriter}
import org.apache.calcite.rex.RexNode
import java.util
/**
* Batch physical RelNode for [[Expand]].
*/
class BatchExecExpand(
cluster: RelOptCluster,
traitSet: RelTraitSet,
input: RelNode,
outputRowType: RelDataType,
projects: util.List[util.List[RexNode]],
expandIdIndex: Int)
extends Expand(cluster, traitSet, input, outputRowType, projects, expandIdIndex)
with BatchPhysicalRel {
override def copy(traitSet: RelTraitSet, inputs: util.List[RelNode]): RelNode = {
new BatchExecExpand(
cluster,
traitSet,
inputs.get(0),
outputRowType,
projects,
expandIdIndex
)
}
override def explainTerms(pw: RelWriter): RelWriter = {
super.explainTerms(pw)
.item("projects", RelExplainUtil.projectsToString(projects, input.getRowType, getRowType))
}
}
| ueshin/apache-flink | flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/plan/nodes/physical/batch/BatchExecExpand.scala | Scala | apache-2.0 | 1,972 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log4j
import kafka.consumer.SimpleConsumer
import kafka.server.{KafkaConfig, KafkaServer}
import kafka.utils.{TestUtils, Utils, Logging}
import kafka.api.FetchRequestBuilder
import kafka.producer.async.MissingConfigException
import kafka.serializer.Encoder
import kafka.zk.ZooKeeperTestHarness
import java.util.Properties
import java.io.File
import org.apache.log4j.spi.LoggingEvent
import org.apache.log4j.{PropertyConfigurator, Logger}
import org.junit.{After, Before, Test}
import org.scalatest.junit.JUnit3Suite
import junit.framework.Assert._
class KafkaLog4jAppenderTest extends JUnit3Suite with ZooKeeperTestHarness with Logging {
var logDirZk: File = null
var config: KafkaConfig = null
var server: KafkaServer = null
var simpleConsumerZk: SimpleConsumer = null
val tLogger = Logger.getLogger(getClass())
private val brokerZk = 0
private val ports = TestUtils.choosePorts(2)
private val portZk = ports(0)
@Before
override def setUp() {
super.setUp()
val propsZk = TestUtils.createBrokerConfig(brokerZk, portZk)
val logDirZkPath = propsZk.getProperty("log.dir")
logDirZk = new File(logDirZkPath)
config = KafkaConfig.fromProps(propsZk)
server = TestUtils.createServer(config)
simpleConsumerZk = new SimpleConsumer("localhost", portZk, 1000000, 64 * 1024, "")
}
@After
override def tearDown() {
simpleConsumerZk.close
server.shutdown
Utils.rm(logDirZk)
super.tearDown()
}
@Test
def testKafkaLog4jConfigs() {
// host missing
var props = new Properties()
props.put("log4j.rootLogger", "INFO")
props.put("log4j.appender.KAFKA", "kafka.producer.KafkaLog4jAppender")
props.put("log4j.appender.KAFKA.layout", "org.apache.log4j.PatternLayout")
props.put("log4j.appender.KAFKA.layout.ConversionPattern", "%-5p: %c - %m%n")
props.put("log4j.appender.KAFKA.Topic", "test-topic")
props.put("log4j.logger.kafka.log4j", "INFO, KAFKA")
try {
PropertyConfigurator.configure(props)
fail("Missing properties exception was expected !")
} catch {
case e: MissingConfigException =>
}
// topic missing
props = new Properties()
props.put("log4j.rootLogger", "INFO")
props.put("log4j.appender.KAFKA", "kafka.producer.KafkaLog4jAppender")
props.put("log4j.appender.KAFKA.layout", "org.apache.log4j.PatternLayout")
props.put("log4j.appender.KAFKA.layout.ConversionPattern", "%-5p: %c - %m%n")
props.put("log4j.appender.KAFKA.brokerList", TestUtils.getBrokerListStrFromConfigs(Seq(config)))
props.put("log4j.logger.kafka.log4j", "INFO, KAFKA")
try {
PropertyConfigurator.configure(props)
fail("Missing properties exception was expected !")
} catch {
case e: MissingConfigException =>
}
}
@Test
def testLog4jAppends() {
PropertyConfigurator.configure(getLog4jConfig)
for(i <- 1 to 5)
info("test")
val response = simpleConsumerZk.fetch(new FetchRequestBuilder().addFetch("test-topic", 0, 0L, 1024*1024).build())
val fetchMessage = response.messageSet("test-topic", 0)
var count = 0
for(message <- fetchMessage) {
count = count + 1
}
assertEquals(5, count)
}
private def getLog4jConfig: Properties = {
val props = new Properties()
props.put("log4j.rootLogger", "INFO")
props.put("log4j.appender.KAFKA", "kafka.producer.KafkaLog4jAppender")
props.put("log4j.appender.KAFKA.layout", "org.apache.log4j.PatternLayout")
props.put("log4j.appender.KAFKA.layout.ConversionPattern", "%-5p: %c - %m%n")
props.put("log4j.appender.KAFKA.BrokerList", TestUtils.getBrokerListStrFromConfigs(Seq(config)))
props.put("log4j.appender.KAFKA.Topic", "test-topic")
props.put("log4j.appender.KAFKA.RequiredNumAcks", "1")
props.put("log4j.appender.KAFKA.SyncSend", "true")
props.put("log4j.logger.kafka.log4j", "INFO, KAFKA")
props
}
}
class AppenderStringEncoder(encoding: String = "UTF-8") extends Encoder[LoggingEvent] {
def toBytes(event: LoggingEvent): Array[Byte] = {
event.getMessage.toString.getBytes(encoding)
}
}
| WillCh/cs286A | dataMover/kafka/core/src/test/scala/unit/kafka/log4j/KafkaLog4jAppenderTest.scala | Scala | bsd-2-clause | 4,924 |
// Copyright: 2010 - 2018 https://github.com/ensime/ensime-server/graphs
// License: http://www.gnu.org/licenses/lgpl-3.0.en.html
package spray.json
import org.scalatest._
class JsonParserSpec extends WordSpec {
"The JsonParser" should {
"parse 'null' to JsNull" in {
JsonParser("null") === JsNull
}
"parse 'true' to JsTrue" in {
JsonParser("true") === JsTrue
}
"parse 'false' to JsFalse" in {
JsonParser("false") === JsFalse
}
"parse '0' to JsNumber" in {
JsonParser("0") === JsNumber(0)
}
"parse '1.23' to JsNumber" in {
JsonParser("1.23") === JsNumber(1.23)
}
"parse '-1E10' to JsNumber" in {
JsonParser("-1E10") === JsNumber("-1E+10")
}
"parse '12.34e-10' to JsNumber" in {
JsonParser("12.34e-10") === JsNumber("1.234E-9")
}
"parse \\"xyz\\" to JsString" in {
JsonParser("\\"xyz\\"") === JsString("xyz")
}
"parse escapes in a JsString" in {
JsonParser(""""\\"\\\\/\\b\\f\\n\\r\\t"""") === JsString("\\"\\\\/\\b\\f\\n\\r\\t")
JsonParser("\\"L\\\\" + "u00e4nder\\"") === JsString("Länder")
}
"parse all representations of the slash (SOLIDUS) character in a JsString" in {
JsonParser("\\"" + "/\\\\/\\\\u002f" + "\\"") === JsString("///")
}
"parse a simple JsObject" in (
JsonParser(""" { "key" :42, "key2": "value" }""") ===
JsObject("key" -> JsNumber(42), "key2" -> JsString("value"))
)
"parse a simple JsArray" in (
JsonParser("""[null, 1.23 ,{"key":true } ] """) ===
JsArray(JsNull, JsNumber(1.23), JsObject("key" -> JsTrue))
)
"parse directly from UTF-8 encoded bytes" in {
val json = JsObject(
"7-bit" -> JsString("This is regular 7-bit ASCII text."),
"2-bytes" -> JsString("2-byte UTF-8 chars like £, æ or Ö"),
"3-bytes" -> JsString("3-byte UTF-8 chars like ヨ, ᄅ or ᐁ."),
"4-bytes" -> JsString(
"4-byte UTF-8 chars like \\uD801\\uDC37, \\uD852\\uDF62 or \\uD83D\\uDE01."
)
)
JsonParser(json.prettyPrint.getBytes("UTF-8")) === json
}
"parse directly from UTF-8 encoded bytes when string starts with a multi-byte character" in {
val json = JsString("£0.99")
JsonParser(json.prettyPrint.getBytes("UTF-8")) === json
}
"be reentrant" in {
val largeJsonSource = scala.io.Source
.fromInputStream(getClass.getResourceAsStream("/test.json"))
.mkString
import scala.collection.parallel.immutable.ParSeq
ParSeq.fill(20)(largeJsonSource).map(JsonParser(_)).toList.map {
_.asInstanceOf[JsObject]
.fields("questions")
.asInstanceOf[JsArray]
.elements
.size
} === List.fill(20)(100)
}
"produce proper error messages" in {
def errorMessage(input: String) =
try JsonParser(input)
catch { case e: JsonParser.ParsingException => e.getMessage }
errorMessage("""[null, 1.23 {"key":true } ]""") ===
"""Unexpected character '{' at input index 12 (line 1, position 13), expected ']':
|[null, 1.23 {"key":true } ]
| ^
|""".stripMargin
errorMessage("""[null, 1.23, { key":true } ]""") ===
"""Unexpected character 'k' at input index 16 (line 1, position 17), expected '"':
|[null, 1.23, { key":true } ]
| ^
|""".stripMargin
errorMessage("""{"a}""") ===
"""Unexpected end-of-input at input index 4 (line 1, position 5), expected '"':
|{"a}
| ^
|""".stripMargin
errorMessage("""{}x""") ===
"""Unexpected character 'x' at input index 2 (line 1, position 3), expected end-of-input:
|{}x
| ^
|""".stripMargin
}
}
}
| yyadavalli/ensime-server | json/src/test/scala/spray/json/JsonParserSpec.scala | Scala | gpl-3.0 | 3,800 |
package chrome.webNavigation.bindings
import scala.scalajs.js
import scala.scalajs.js._
@js.native
trait OnDOMContentLoadedDetails extends js.Object {
/**
* The ID of the tab in which the navigation occurs.
*/
val tabId: Int = native
val url: String = native
/**
* The ID of the process that runs the renderer for this frame.
*
* Missing in firefox https://bugzilla.mozilla.org/show_bug.cgi?id=1248426
*/
val processId: js.UndefOr[Int] = native
/**
* 0 indicates the navigation happens in the tab content window; a positive value indicates navigation in a subframe.
* Frame IDs are unique within a tab.
*/
val frameId: Int = native
/**
* The time when the page's DOM was fully constructed, in milliseconds since the epoch.
*/
val timeStamp: Double = native
}
| lucidd/scala-js-chrome | bindings/src/main/scala/chrome/webNavigation/bindings/OnDOMContentLoadedDetails.scala | Scala | mit | 823 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.sysml.api.ml.serving
import java.util.concurrent.CountDownLatch
import java.util.concurrent.atomic.LongAdder
import scala.concurrent.Future
import scala.concurrent.duration.Duration
object NonBatchingScheduler extends Scheduler {
override def start(numCores: Int, cpuMemoryBudgetInBytes: Long, gpus: String): Unit = {
LOG.info(s"Starting Non Batching Scheduler with: ${numCores} CPUs and ${gpus} GPUs")
super.start(numCores, cpuMemoryBudgetInBytes, gpus)
}
override def schedule(executor: JmlcExecutor): Array[SchedulingRequest] = {
var ret = Array[SchedulingRequest]()
dummyResponse.synchronized {
if (requestQueue.size() > 0) {
val request = requestQueue.poll()
ret :+= request
}
}
ret
}
var requestNum = new LongAdder
/**
* Enqueues a request for processing. The scheduler will read from these queues to determine which
* models to execute next
* @param request A PredictionRequest object containing the data for which a prediction is desired
* @param model The model object for which prediction is desired
* @return
*/
override private[serving] def enqueue(request: PredictionRequest, model: Model): Future[PredictionResponse] = Future {
val statistics = if (_statistics) RequestStatistics() else null
val schedulingRequest = SchedulingRequest(
request, model, new CountDownLatch(1), System.nanoTime(), null, statistics)
if (_statistics) statistics.queueSize = requestQueue.size()
requestQueue.add(schedulingRequest)
counter += 1
try {
schedulingRequest.latch.await(timeout.length, timeout.unit)
schedulingRequest.response
} catch {
case e : scala.concurrent.TimeoutException => dummyResponse
}
}
override def onCompleteCallback(model: String, latency: Double, batchSize: Int, execType: String, execTime: Long): Unit = {}
} | niketanpansare/systemml | src/main/scala/org/apache/sysml/api/ml/serving/NonBatchingScheduler.scala | Scala | apache-2.0 | 2,849 |
import java.io.File
import testgen.TestSuiteBuilder._
import testgen._
object SublistTestGenerator {
def main(args: Array[String]): Unit = {
val file = new File("src/main/resources/sublist.json")
def toString(expected: CanonicalDataParser.Expected): String = {
expected match {
case Right("equal") => s"Sublist.Equal"
case Right("sublist") => s"Sublist.Sublist"
case Right("superlist") => s"Sublist.Superlist"
case Right("unequal") => s"Sublist.Unequal"
case _ => throw new IllegalStateException()
}
}
def fromLabeledTestFromInput(argNames: String*): ToTestCaseData =
withLabeledTest { sut =>
labeledTest =>
val args = sutArgsFromInput(labeledTest.result, argNames: _*)
val property = labeledTest.property
val sutCall =
s"$sut.$property($args)"
val expected = toString(labeledTest.expected)
TestCaseData(labeledTest.description, sutCall, expected)
}
val code =
TestSuiteBuilder.build(file, fromLabeledTestFromInput("listOne", "listTwo"))
println(s"-------------")
println(code)
println(s"-------------")
}
}
| ricemery/xscala | testgen/src/main/scala/SublistTestGenerator.scala | Scala | mit | 1,190 |
package com.nulabinc.backlog.migration.common.utils
import java.util.Locale
import com.osinka.i18n.Lang
import org.slf4j.{Logger, LoggerFactory}
/**
* @author
* uchida
*/
trait Logging {
implicit val userLang =
if (Locale.getDefault.equals(Locale.JAPAN)) Lang("ja") else Lang("en")
val logger: Logger = LoggerFactory.getLogger(getClass)
}
| nulab/backlog-migration-common | core/src/main/scala/com/nulabinc/backlog/migration/common/utils/Logging.scala | Scala | mit | 358 |
package com.github.ligangty.scala.jsoup.helper
import com.github.ligangty.scala.jsoup.helper.Strings._
import org.scalatest.FunSuite
/**
* Test for Strings
*/
class StringsTest extends FunSuite {
test("join Iterable") {
assertResult("")(join(List(""), " "))
assertResult("one")(join(List("one"), " "))
assertResult("one two three")(join(List("one", "two", "three"), " "))
}
test("join Array") {
assertResult("")(join(Array(""), " "))
assertResult("one")(join(Array("one"), " "))
assertResult("one two three")(join(Array("one", "two", "three"), " "))
}
test("padding") {
assertResult("")(padding(0))
assertResult(" ")(padding(1))
assertResult(" ")(padding(2))
assertResult(" ")(padding(15))
}
test("isWhitespace") {
assert(isWhitespace('\\t'))
assert(isWhitespace('\\n'))
assert(isWhitespace('\\r'))
assert(isWhitespace('\\f'))
assert(isWhitespace(' '))
assert(!isWhitespace('\\u00a0'))
assert(!isWhitespace('\\u2000'))
assert(!isWhitespace('\\u3000'))
}
test("isBlank") {
assert(isBlank(null))
assert(isBlank(""))
assert(isBlank(" "))
assert(isBlank(" \\r\\n "))
assert(!isBlank("hello"))
assert(!isBlank(" hello "))
}
test("isNumeric") {
assert(!isNumeric(null))
assert(!isNumeric(" "))
assert(!isNumeric("123 546"))
assert(!isNumeric("hello"))
assert(!isNumeric("123.334"))
assert(isNumeric("1"))
assert(isNumeric("1234"))
}
test("normaliseWhiteSpace") {
assertResult(" ")(normaliseWhitespace(" \\r \\n \\r\\n"))
assertResult(" hello there ")(normaliseWhitespace(" hello \\r \\n there \\n"))
assertResult("hello")(normaliseWhitespace("hello"))
assertResult("hello there")(normaliseWhitespace("hello\\nthere"))
}
test("normaliseWhiteSpaceHandlesHighSurrogates") {
val test71540chars = "\\ud869\\udeb2\\u304b\\u309a 1"
val test71540charsExpectedSingleWhitespace = "\\ud869\\udeb2\\u304b\\u309a 1"
assertResult(test71540charsExpectedSingleWhitespace)(normaliseWhitespace(test71540chars))
// val extractedText = Jsoup.parse(test71540chars).text()
// assertResult(test71540charsExpectedSingleWhitespace)(extractedText)
}
test("in"){
assert("a"!=null)
assert(in("a","a","b","c"))
assert(in("a","a","b",null))
assert(in("ab","ab","bc","cd"))
assert(!in("a","b","c","d"))
assert(!in("ab","abc","bcd","cde"))
}
}
| ligangty/scalajsoup | src/test/scala/com/github/ligangty/scala/jsoup/helper/StringsTest.scala | Scala | mit | 2,453 |
package nl.svanwouw.trending.types
/**
* Syntactic sugar for a frequency integer.
* Represents topic frequency.
* @param v The value with run time value type.
*/
class Frequency(val v: Int) extends AnyVal with Serializable {
def +(o: Frequency): Frequency = new Frequency(v + o.v)
override def toString = v.toString
}
| stefanvanwouw/spark-based-trending-topics-extraction | src/main/scala/nl/svanwouw/trending/types/Frequency.scala | Scala | mit | 329 |
package scorex.network
import java.net.{InetAddress, InetSocketAddress}
import akka.actor.Props
import akka.io.Tcp.{Close, Connected}
import akka.testkit.TestProbe
import scorex.ActorTestingCommons
import scorex.network.peer.PeerManager
import scala.concurrent.duration._
class NetworkListenerSpecification extends ActorTestingCommons {
private val localhost = InetAddress.getByName("127.0.0.1")
private val testBindAddress = new InetSocketAddress(localhost, 12300)
private val testPeerManager = TestProbe("peerManager")
private val testTcp = TestProbe()
private val timeout = 6.seconds
private val address1 = new InetSocketAddress(InetAddress.getByName("1.1.1.1"), 12301)
private val address2 = new InetSocketAddress(InetAddress.getByName("2.2.2.2"), 12302)
private val address3 = new InetSocketAddress(InetAddress.getByName("3.3.3.3"), 12303)
protected override val actorRef = system.actorOf(Props(classOf[NetworkListener], networkController.ref,
testPeerManager.ref, testBindAddress))
testSafely {
"NetworkListener" - {
"should initialize blacklist" in {
actorRef ! PeerManager.ExistedBlacklist(Seq("1.1.1.1", "2.2.2.2"))
networkController.expectMsg(NetworkController.ReadyToListen)
testTcp.send(actorRef, Connected(address1, testBindAddress))
testTcp.expectMsg(timeout, Close)
networkController.expectNoMsg(timeout)
testTcp.send(actorRef, Connected(address2, testBindAddress))
testTcp.expectMsg(timeout,Close)
networkController.expectNoMsg(timeout)
testTcp.send(actorRef, Connected(address3, testBindAddress))
testTcp.expectNoMsg(timeout)
networkController.expectMsgType[NetworkController.InboundConnection]
}
"should update initialized blacklist" in {
actorRef ! PeerManager.ExistedBlacklist(Seq("1.1.1.1", "2.2.2.2"))
networkController.expectMsg(NetworkController.ReadyToListen)
actorRef ! PeerManager.BlackListUpdated("3.3.3.3")
testTcp.send(actorRef, Connected(address1, testBindAddress))
testTcp.expectMsg(timeout,Close)
networkController.expectNoMsg(timeout)
testTcp.send(actorRef, Connected(address2, testBindAddress))
testTcp.expectMsg(timeout, Close)
networkController.expectNoMsg(timeout)
testTcp.send(actorRef, Connected(address3, testBindAddress))
testTcp.expectMsg(timeout, Close)
networkController.expectNoMsg(timeout)
}
"should reset blacklist" in {
actorRef ! PeerManager.ExistedBlacklist(Seq("1.1.1.1", "2.2.2.2"))
networkController.expectMsg(NetworkController.ReadyToListen)
testTcp.send(actorRef, Connected(address1, testBindAddress))
testTcp.expectMsg(timeout, Close)
networkController.expectNoMsg(timeout)
testTcp.send(actorRef, Connected(address2, testBindAddress))
testTcp.expectMsg(timeout, Close)
networkController.expectNoMsg(timeout)
testTcp.send(actorRef, Connected(address3, testBindAddress))
testTcp.expectNoMsg(timeout)
networkController.expectMsgType[NetworkController.InboundConnection](timeout)
actorRef ! PeerManager.ExistedBlacklist(Seq("2.2.2.2", "3.3.3.3"))
testTcp.send(actorRef, Connected(address1, testBindAddress))
testTcp.expectNoMsg(timeout)
networkController.expectMsgType[NetworkController.InboundConnection]
testTcp.send(actorRef, Connected(address2, testBindAddress))
testTcp.expectMsg(timeout, Close)
networkController.expectNoMsg(timeout)
testTcp.send(actorRef, Connected(address3, testBindAddress))
testTcp.expectMsg(timeout, Close)
networkController.expectNoMsg(timeout)
}
}
}
}
| B83YPoj/Waves | src/test/scala/scorex/network/NetworkListenerSpecification.scala | Scala | apache-2.0 | 3,773 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.benchmark
import org.apache.spark.benchmark.Benchmark
import org.apache.spark.sql.SparkSession
import org.apache.spark.sql.hive.HiveUtils
import org.apache.spark.sql.hive.test.TestHive
/**
* Benchmark to measure hive table write performance.
* To run this benchmark:
* {{{
* 1. without sbt: bin/spark-submit --class <this class>
* --jars <spark catalyst test jar>,<spark core test jar>,<spark hive jar>
* --packages org.spark-project.hive:hive-exec:1.2.1.spark2
* <spark hive test jar>
* 2. build/sbt "hive/test:runMain <this class>" -Phive-1.2 or
* build/sbt "hive/test:runMain <this class>" -Phive-2.3
* 3. generate result:
* SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "hive/test:runMain <this class>"
* Results will be written to "benchmarks/InsertIntoHiveTableBenchmark-hive2.3-results.txt".
* 4. -Phive-1.2 does not work for JDK 11
* }}}
*/
object InsertIntoHiveTableBenchmark extends SqlBasedBenchmark {
override def getSparkSession: SparkSession = TestHive.sparkSession
val tempView = "temp"
val numRows = 1024 * 10
val sql = spark.sql _
// scalastyle:off hadoopconfiguration
private val hadoopConf = spark.sparkContext.hadoopConfiguration
// scalastyle:on hadoopconfiguration
hadoopConf.set("hive.exec.dynamic.partition", "true")
hadoopConf.set("hive.exec.dynamic.partition.mode", "nonstrict")
hadoopConf.set("hive.exec.max.dynamic.partitions", numRows.toString)
def withTable(tableNames: String*)(f: => Unit): Unit = {
tableNames.foreach { name =>
sql(s"CREATE TABLE $name(a INT) STORED AS TEXTFILE PARTITIONED BY (b INT, c INT)")
}
try f finally {
tableNames.foreach { name =>
spark.sql(s"DROP TABLE IF EXISTS $name")
}
}
}
def insertOverwriteDynamic(table: String, benchmark: Benchmark): Unit = {
benchmark.addCase("INSERT OVERWRITE DYNAMIC") { _ =>
sql(s"INSERT OVERWRITE TABLE $table SELECT CAST(id AS INT) AS a," +
s" CAST(id % 10 AS INT) AS b, CAST(id % 100 AS INT) AS c FROM $tempView")
}
}
def insertOverwriteHybrid(table: String, benchmark: Benchmark): Unit = {
benchmark.addCase("INSERT OVERWRITE HYBRID") { _ =>
sql(s"INSERT OVERWRITE TABLE $table partition(b=1, c) SELECT CAST(id AS INT) AS a," +
s" CAST(id % 10 AS INT) AS c FROM $tempView")
}
}
def insertOverwriteStatic(table: String, benchmark: Benchmark): Unit = {
benchmark.addCase("INSERT OVERWRITE STATIC") { _ =>
sql(s"INSERT OVERWRITE TABLE $table partition(b=1, c=10) SELECT CAST(id AS INT) AS a" +
s" FROM $tempView")
}
}
def insertIntoDynamic(table: String, benchmark: Benchmark): Unit = {
benchmark.addCase("INSERT INTO DYNAMIC") { _ =>
sql(s"INSERT INTO TABLE $table SELECT CAST(id AS INT) AS a," +
s" CAST(id % 10 AS INT) AS b, CAST(id % 100 AS INT) AS c FROM $tempView")
}
}
def insertIntoHybrid(table: String, benchmark: Benchmark): Unit = {
benchmark.addCase("INSERT INTO HYBRID") { _ =>
sql(s"INSERT INTO TABLE $table partition(b=1, c) SELECT CAST(id AS INT) AS a," +
s" CAST(id % 10 AS INT) AS c FROM $tempView")
}
}
def insertIntoStatic(table: String, benchmark: Benchmark): Unit = {
benchmark.addCase("INSERT INTO STATIC") { _ =>
sql(s"INSERT INTO TABLE $table partition(b=1, c=10) SELECT CAST(id AS INT) AS a" +
s" FROM $tempView")
}
}
override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
spark.range(numRows).createOrReplaceTempView(tempView)
try {
val t1 = "t1"
val t2 = "t2"
val t3 = "t3"
val t4 = "t4"
val t5 = "t5"
val t6 = "t6"
val benchmark = new Benchmark(s"insert hive table benchmark", numRows, output = output)
withTable(t1, t2, t3, t4, t5, t6) {
insertIntoDynamic(t1, benchmark)
insertIntoHybrid(t2, benchmark)
insertIntoStatic(t3, benchmark)
insertOverwriteDynamic(t4, benchmark)
insertOverwriteHybrid(t5, benchmark)
insertOverwriteStatic(t6, benchmark)
benchmark.run()
}
} finally {
spark.catalog.dropTempView(tempView)
}
}
override def suffix: String = if (HiveUtils.isHive23) "-hive2.3" else "-hive1.2"
}
| dbtsai/spark | sql/hive/src/test/scala/org/apache/spark/sql/execution/benchmark/InsertIntoHiveTableBenchmark.scala | Scala | apache-2.0 | 5,106 |
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.computations
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.computations.nir.NorthernIrelandRateValidation
import uk.gov.hmrc.ct.computations.retriever.ComputationsBoxRetriever
case class CP283d(value: Option[Int])
extends CtBoxIdentifier("Main Stream Losses brought forward from on or after 01/04/2017 used against trading profit")
with CtOptionalInteger
with Input
with ValidatableBox[ComputationsBoxRetriever] with NorthernIrelandRateValidation {
override def validate(boxRetriever: ComputationsBoxRetriever): Set[CtValidation] = {
collectErrors(
requiredErrorIf(
boxRetriever.cp283b().isPositive &&
mayHaveNirLosses(boxRetriever) &&
!hasValue),
validateIntegerRange("CP283d", this, 0, boxRetriever.cp283b().orZero)
)
}
}
object CP283d {
def apply(int: Int): CP283d = CP283d(Some(int))
}
| hmrc/ct-calculations | src/main/scala/uk/gov/hmrc/ct/computations/CP283d.scala | Scala | apache-2.0 | 1,497 |
package org.jetbrains.plugins.scala.lang.psi.stubs.elements
import com.intellij.lang.Language
import com.intellij.psi.stubs.{IndexSink, StubElement, StubOutputStream, StubSerializer}
/**
* @author adkozlov
*/
trait DefaultStubSerializer[S <: StubElement[_]] extends StubSerializer[S] {
def getLanguage: Language
def debugName: String
override def getExternalId: String =
s"${getLanguage.toString.toLowerCase}.$debugName"
override def serialize(stub: S, dataStream: StubOutputStream): Unit = {}
override def indexStub(stub: S, sink: IndexSink): Unit = {}
}
| loskutov/intellij-scala | src/org/jetbrains/plugins/scala/lang/psi/stubs/elements/DefaultStubSerializer.scala | Scala | apache-2.0 | 580 |
package mesosphere.marathon
package state
import java.util.UUID
import org.apache.mesos.{Protos => mesos}
object TaskFailureTestHelper {
val taskFailureId = s"failedtask.${UUID.randomUUID()}"
lazy val taskFailure = TaskFailure(
appId = AbsolutePathId("/group/app"),
taskId = mesos.TaskID.newBuilder.setValue(taskFailureId).build,
state = mesos.TaskState.TASK_FAILED,
message = "Process exited with status [1]",
host = "slave5.mega.co",
version = Timestamp(1000),
timestamp = Timestamp(2000)
)
}
| mesosphere/marathon | src/test/scala/mesosphere/marathon/state/TaskFailureTestHelper.scala | Scala | apache-2.0 | 532 |
package io.youi.event
sealed trait DeltaMode
object DeltaMode {
case object Pixel extends DeltaMode
case object Line extends DeltaMode
case object Page extends DeltaMode
} | outr/youi | gui/src/main/scala/io/youi/event/DeltaMode.scala | Scala | mit | 179 |
package fi.pyppe.ircbot.slave
import twitter4j.TwitterFactory
import scala.concurrent.{ExecutionContext, Future}
object Tweets {
def statusText(id: Long)(implicit ec: ExecutionContext): Future[String] = Future {
val status = TwitterFactory.getSingleton.showStatus(id)
val user = s"@${status.getUser.getScreenName} (${status.getUser.getName})"
val text = removeUrls(status.getText.replace("\\n", " "))
s"$user: $text"
}
private def removeUrls(str: String) = str.replaceAll(
"https?://\\\\S+\\\\b",
""
).trim
}
| Pyppe/akka-ircbot | slave/src/main/scala/fi/pyppe/ircbot/slave/Tweets.scala | Scala | mit | 542 |
package lila
package object plan extends PackageObject with WithPlay {
private[plan] def logger = lila.log("plan")
}
| clarkerubber/lila | modules/plan/src/main/package.scala | Scala | agpl-3.0 | 121 |
/*
* Copyright 2014 http4s.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.http4s.blaze.http.http2
import java.nio.charset.StandardCharsets
import java.nio.ByteBuffer
import org.http4s.blaze.http.http2.Http2Settings.Setting
import org.http4s.blaze.http.http2.mocks.MockFrameListener
private[http2] object ProtocolFrameDecoder {
private[this] class Listener(inHeaderSequence: Boolean)
extends MockFrameListener(inHeaderSequence) {
var frame: ProtocolFrame = ProtocolFrame.Empty
override def onGoAwayFrame(lastStream: Int, errorCode: Long, debugData: Array[Byte]): Result = {
val ex = new Http2SessionException(errorCode, new String(debugData, StandardCharsets.UTF_8))
frame = ProtocolFrame.GoAway(lastStream, ex)
Continue
}
override def onPingFrame(ack: Boolean, data: Array[Byte]): Result = {
frame = ProtocolFrame.Ping(ack, data)
Continue
}
override def onSettingsFrame(settings: Option[Seq[Setting]]): Result = {
frame = ProtocolFrame.Settings(settings)
Continue
}
}
def decode(
buffer: ByteBuffer,
inHeadersSequence: Boolean = false,
maxFrameSize: Int = Http2Settings.default.maxFrameSize
): ProtocolFrame = {
val listener = new Listener(inHeadersSequence)
val decoder = new FrameDecoder(toSettings(maxFrameSize), listener)
decoder.decodeBuffer(buffer) match {
case Continue => listener.frame
case BufferUnderflow => ProtocolFrame.Empty
case Error(ex) => throw ex
}
}
private[this] def toSettings(maxFrameSize: Int): Http2Settings =
if (maxFrameSize == Http2Settings.default.maxFrameSize) Http2Settings.default
else Http2Settings.default.copy(maxFrameSize = maxFrameSize)
}
| http4s/blaze | http/src/test/scala/org/http4s/blaze/http/http2/ProtocolFrameDecoder.scala | Scala | apache-2.0 | 2,266 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.rdd
import java.util.Random
import scala.collection.{mutable, Map}
import scala.collection.mutable.ArrayBuffer
import scala.io.Codec
import scala.language.implicitConversions
import scala.reflect.{classTag, ClassTag}
import com.clearspring.analytics.stream.cardinality.HyperLogLogPlus
import org.apache.hadoop.io.{BytesWritable, NullWritable, Text}
import org.apache.hadoop.io.compress.CompressionCodec
import org.apache.hadoop.mapred.TextOutputFormat
import org.apache.spark._
import org.apache.spark.Partitioner._
import org.apache.spark.annotation.{DeveloperApi, Since}
import org.apache.spark.api.java.JavaRDD
import org.apache.spark.internal.Logging
import org.apache.spark.partial.BoundedDouble
import org.apache.spark.partial.CountEvaluator
import org.apache.spark.partial.GroupedCountEvaluator
import org.apache.spark.partial.PartialResult
import org.apache.spark.storage.{RDDBlockId, StorageLevel}
import org.apache.spark.util.{BoundedPriorityQueue, Utils}
import org.apache.spark.util.collection.{OpenHashMap, Utils => collectionUtils}
import org.apache.spark.util.random.{BernoulliCellSampler, BernoulliSampler, PoissonSampler,
SamplingUtils}
/**
* A Resilient Distributed Dataset (RDD), the basic abstraction in Spark. Represents an immutable,
* partitioned collection of elements that can be operated on in parallel. This class contains the
* basic operations available on all RDDs, such as `map`, `filter`, and `persist`. In addition,
* [[org.apache.spark.rdd.PairRDDFunctions]] contains operations available only on RDDs of key-value
* pairs, such as `groupByKey` and `join`;
* [[org.apache.spark.rdd.DoubleRDDFunctions]] contains operations available only on RDDs of
* Doubles; and
* [[org.apache.spark.rdd.SequenceFileRDDFunctions]] contains operations available on RDDs that
* can be saved as SequenceFiles.
* All operations are automatically available on any RDD of the right type (e.g. RDD[(Int, Int)]
* through implicit.
*
* Internally, each RDD is characterized by five main properties:
*
* - A list of partitions
* - A function for computing each split
* - A list of dependencies on other RDDs
* - Optionally, a Partitioner for key-value RDDs (e.g. to say that the RDD is hash-partitioned)
* - Optionally, a list of preferred locations to compute each split on (e.g. block locations for
* an HDFS file)
*
* All of the scheduling and execution in Spark is done based on these methods, allowing each RDD
* to implement its own way of computing itself. Indeed, users can implement custom RDDs (e.g. for
* reading data from a new storage system) by overriding these functions. Please refer to the
* <a href="http://people.csail.mit.edu/matei/papers/2012/nsdi_spark.pdf">Spark paper</a>
* for more details on RDD internals.
*/
abstract class RDD[T: ClassTag](
@transient private var _sc: SparkContext,
@transient private var deps: Seq[Dependency[_]]
) extends Serializable with Logging {
if (classOf[RDD[_]].isAssignableFrom(elementClassTag.runtimeClass)) {
// This is a warning instead of an exception in order to avoid breaking user programs that
// might have defined nested RDDs without running jobs with them.
logWarning("Spark does not support nested RDDs (see SPARK-5063)")
}
private def sc: SparkContext = {
if (_sc == null) {
throw new SparkException(
"This RDD lacks a SparkContext. It could happen in the following cases: \\n(1) RDD " +
"transformations and actions are NOT invoked by the driver, but inside of other " +
"transformations; for example, rdd1.map(x => rdd2.values.count() * x) is invalid " +
"because the values transformation and count action cannot be performed inside of the " +
"rdd1.map transformation. For more information, see SPARK-5063.\\n(2) When a Spark " +
"Streaming job recovers from checkpoint, this exception will be hit if a reference to " +
"an RDD not defined by the streaming job is used in DStream operations. For more " +
"information, See SPARK-13758.")
}
_sc
}
/** Construct an RDD with just a one-to-one dependency on one parent */
def this(@transient oneParent: RDD[_]) =
this(oneParent.context, List(new OneToOneDependency(oneParent)))
private[spark] def conf = sc.conf
// =======================================================================
// Methods that should be implemented by subclasses of RDD
// =======================================================================
/**
* :: DeveloperApi ::
* Implemented by subclasses to compute a given partition.
*/
@DeveloperApi
def compute(split: Partition, context: TaskContext): Iterator[T]
/**
* Implemented by subclasses to return the set of partitions in this RDD. This method will only
* be called once, so it is safe to implement a time-consuming computation in it.
*
* The partitions in this array must satisfy the following property:
* `rdd.partitions.zipWithIndex.forall { case (partition, index) => partition.index == index }`
*/
protected def getPartitions: Array[Partition]
/**
* Implemented by subclasses to return how this RDD depends on parent RDDs. This method will only
* be called once, so it is safe to implement a time-consuming computation in it.
*/
protected def getDependencies: Seq[Dependency[_]] = deps
/**
* Optionally overridden by subclasses to specify placement preferences.
*/
protected def getPreferredLocations(split: Partition): Seq[String] = Nil
/** Optionally overridden by subclasses to specify how they are partitioned. */
@transient val partitioner: Option[Partitioner] = None
// =======================================================================
// Methods and fields available on all RDDs
// =======================================================================
/** The SparkContext that created this RDD. */
def sparkContext: SparkContext = sc
/** A unique ID for this RDD (within its SparkContext). */
val id: Int = sc.newRddId()
/** A friendly name for this RDD */
@transient var name: String = null
/** Assign a name to this RDD */
def setName(_name: String): this.type = {
name = _name
this
}
/**
* Mark this RDD for persisting using the specified level.
*
* @param newLevel the target storage level
* @param allowOverride whether to override any existing level with the new one
*/
private def persist(newLevel: StorageLevel, allowOverride: Boolean): this.type = {
// TODO: Handle changes of StorageLevel
if (storageLevel != StorageLevel.NONE && newLevel != storageLevel && !allowOverride) {
throw new UnsupportedOperationException(
"Cannot change storage level of an RDD after it was already assigned a level")
}
// If this is the first time this RDD is marked for persisting, register it
// with the SparkContext for cleanups and accounting. Do this only once.
if (storageLevel == StorageLevel.NONE) {
sc.cleaner.foreach(_.registerRDDForCleanup(this))
sc.persistRDD(this)
}
storageLevel = newLevel
this
}
/**
* Set this RDD's storage level to persist its values across operations after the first time
* it is computed. This can only be used to assign a new storage level if the RDD does not
* have a storage level set yet. Local checkpointing is an exception.
*/
def persist(newLevel: StorageLevel): this.type = {
if (isLocallyCheckpointed) {
// This means the user previously called localCheckpoint(), which should have already
// marked this RDD for persisting. Here we should override the old storage level with
// one that is explicitly requested by the user (after adapting it to use disk).
persist(LocalRDDCheckpointData.transformStorageLevel(newLevel), allowOverride = true)
} else {
persist(newLevel, allowOverride = false)
}
}
/**
* Persist this RDD with the default storage level (`MEMORY_ONLY`).
*/
def persist(): this.type = persist(StorageLevel.MEMORY_ONLY)
/**
* Persist this RDD with the default storage level (`MEMORY_ONLY`).
*/
def cache(): this.type = persist()
/**
* Mark the RDD as non-persistent, and remove all blocks for it from memory and disk.
*
* @param blocking Whether to block until all blocks are deleted.
* @return This RDD.
*/
def unpersist(blocking: Boolean = true): this.type = {
logInfo("Removing RDD " + id + " from persistence list")
sc.unpersistRDD(id, blocking)
storageLevel = StorageLevel.NONE
this
}
/** Get the RDD's current storage level, or StorageLevel.NONE if none is set. */
def getStorageLevel: StorageLevel = storageLevel
// Our dependencies and partitions will be gotten by calling subclass's methods below, and will
// be overwritten when we're checkpointed
private var dependencies_ : Seq[Dependency[_]] = null
@transient private var partitions_ : Array[Partition] = null
/** An Option holding our checkpoint RDD, if we are checkpointed */
private def checkpointRDD: Option[CheckpointRDD[T]] = checkpointData.flatMap(_.checkpointRDD)
/**
* Get the list of dependencies of this RDD, taking into account whether the
* RDD is checkpointed or not.
*/
final def dependencies: Seq[Dependency[_]] = {
checkpointRDD.map(r => List(new OneToOneDependency(r))).getOrElse {
if (dependencies_ == null) {
dependencies_ = getDependencies
}
dependencies_
}
}
/**
* Get the array of partitions of this RDD, taking into account whether the
* RDD is checkpointed or not.
*/
final def partitions: Array[Partition] = {
checkpointRDD.map(_.partitions).getOrElse {
if (partitions_ == null) {
partitions_ = getPartitions
partitions_.zipWithIndex.foreach { case (partition, index) =>
require(partition.index == index,
s"partitions($index).partition == ${partition.index}, but it should equal $index")
}
}
partitions_
}
}
/**
* Returns the number of partitions of this RDD.
*/
@Since("1.6.0")
final def getNumPartitions: Int = partitions.length
/**
* Get the preferred locations of a partition, taking into account whether the
* RDD is checkpointed.
*/
final def preferredLocations(split: Partition): Seq[String] = {
checkpointRDD.map(_.getPreferredLocations(split)).getOrElse {
getPreferredLocations(split)
}
}
/**
* Internal method to this RDD; will read from cache if applicable, or otherwise compute it.
* This should ''not'' be called by users directly, but is available for implementors of custom
* subclasses of RDD.
*/
final def iterator(split: Partition, context: TaskContext): Iterator[T] = {
if (storageLevel != StorageLevel.NONE) {
getOrCompute(split, context)
} else {
computeOrReadCheckpoint(split, context)
}
}
/**
* Return the ancestors of the given RDD that are related to it only through a sequence of
* narrow dependencies. This traverses the given RDD's dependency tree using DFS, but maintains
* no ordering on the RDDs returned.
*/
private[spark] def getNarrowAncestors: Seq[RDD[_]] = {
val ancestors = new mutable.HashSet[RDD[_]]
def visit(rdd: RDD[_]) {
val narrowDependencies = rdd.dependencies.filter(_.isInstanceOf[NarrowDependency[_]])
val narrowParents = narrowDependencies.map(_.rdd)
val narrowParentsNotVisited = narrowParents.filterNot(ancestors.contains)
narrowParentsNotVisited.foreach { parent =>
ancestors.add(parent)
visit(parent)
}
}
visit(this)
// In case there is a cycle, do not include the root itself
ancestors.filterNot(_ == this).toSeq
}
/**
* Compute an RDD partition or read it from a checkpoint if the RDD is checkpointing.
*/
private[spark] def computeOrReadCheckpoint(split: Partition, context: TaskContext): Iterator[T] =
{
if (isCheckpointedAndMaterialized) {
firstParent[T].iterator(split, context)
} else {
compute(split, context)
}
}
/**
* Gets or computes an RDD partition. Used by RDD.iterator() when an RDD is cached.
*/
private[spark] def getOrCompute(partition: Partition, context: TaskContext): Iterator[T] = {
// 通过RDD 的编号 和 partition 序号获取数据块 Block 的 编号
val blockId = RDDBlockId(id, partition.index)
var readCachedBlock = true
// This method is called on executors, so we need call SparkEnv.get instead of sc.env.
// 该方法由Executor调用, 使用SparkEnv.get 代替 SparkEnv.get
// 根据数据块BlockId 先读取数据然后更新数据,这里是写数据入口
SparkEnv.get.blockManager.getOrElseUpdate(blockId, storageLevel, elementClassTag, () => {
// 如果数据不在内存,则尝试读取检查点结果进行迭代计算
readCachedBlock = false
computeOrReadCheckpoint(partition, context)
}) match {
// 对getOrElseUpdate 返回结果处理,记录结果度量信息
case Left(blockResult) => // 命中缓存
if (readCachedBlock) {
// 记录度量信息
val existingMetrics = context.taskMetrics().inputMetrics
existingMetrics.incBytesRead(blockResult.bytes)
new InterruptibleIterator[T](context, blockResult.data.asInstanceOf[Iterator[T]]) {
override def next(): T = {
existingMetrics.incRecordsRead(1)
delegate.next()
}
}
} else {
new InterruptibleIterator(context, blockResult.data.asInstanceOf[Iterator[T]])
}
// 结果标识保存失败, 例如数据太大无法放到内存中,也无法磁盘中保存,结果直接返回给调用方
case Right(iter) =>
new InterruptibleIterator(context, iter.asInstanceOf[Iterator[T]])
}
}
/**
* Execute a block of code in a scope such that all new RDDs created in this body will
* be part of the same scope. For more detail, see {{org.apache.spark.rdd.RDDOperationScope}}.
*
* Note: Return statements are NOT allowed in the given body.
*/
private[spark] def withScope[U](body: => U): U = RDDOperationScope.withScope[U](sc)(body)
// Transformations (return a new RDD)
/**
* Return a new RDD by applying a function to all elements of this RDD.
*/
def map[U: ClassTag](f: T => U): RDD[U] = withScope {
val cleanF = sc.clean(f)
new MapPartitionsRDD[U, T](this, (context, pid, iter) => iter.map(cleanF))
}
/**
* Return a new RDD by first applying a function to all elements of this
* RDD, and then flattening the results.
*/
def flatMap[U: ClassTag](f: T => TraversableOnce[U]): RDD[U] = withScope {
val cleanF = sc.clean(f)
new MapPartitionsRDD[U, T](this, (context, pid, iter) => iter.flatMap(cleanF))
}
/**
* Return a new RDD containing only the elements that satisfy a predicate.
*/
def filter(f: T => Boolean): RDD[T] = withScope {
val cleanF = sc.clean(f)
new MapPartitionsRDD[T, T](
this,
(context, pid, iter) => iter.filter(cleanF),
preservesPartitioning = true)
}
/**
* Return a new RDD containing the distinct elements in this RDD.
*/
def distinct(numPartitions: Int)(implicit ord: Ordering[T] = null): RDD[T] = withScope {
map(x => (x, null)).reduceByKey((x, y) => x, numPartitions).map(_._1)
}
/**
* Return a new RDD containing the distinct elements in this RDD.
*/
def distinct(): RDD[T] = withScope {
distinct(partitions.length)
}
/**
* Return a new RDD that has exactly numPartitions partitions.
*
* Can increase or decrease the level of parallelism in this RDD. Internally, this uses
* a shuffle to redistribute data.
*
* If you are decreasing the number of partitions in this RDD, consider using `coalesce`,
* which can avoid performing a shuffle.
*/
def repartition(numPartitions: Int)(implicit ord: Ordering[T] = null): RDD[T] = withScope {
coalesce(numPartitions, shuffle = true)
}
/**
* Return a new RDD that is reduced into `numPartitions` partitions.
*
* This results in a narrow dependency, e.g. if you go from 1000 partitions
* to 100 partitions, there will not be a shuffle, instead each of the 100
* new partitions will claim 10 of the current partitions. If a larger number
* of partitions is requested, it will stay at the current number of partitions.
*
* However, if you're doing a drastic coalesce, e.g. to numPartitions = 1,
* this may result in your computation taking place on fewer nodes than
* you like (e.g. one node in the case of numPartitions = 1). To avoid this,
* you can pass shuffle = true. This will add a shuffle step, but means the
* current upstream partitions will be executed in parallel (per whatever
* the current partitioning is).
*
* @note With shuffle = true, you can actually coalesce to a larger number
* of partitions. This is useful if you have a small number of partitions,
* say 100, potentially with a few partitions being abnormally large. Calling
* coalesce(1000, shuffle = true) will result in 1000 partitions with the
* data distributed using a hash partitioner. The optional partition coalescer
* passed in must be serializable.
*/
def coalesce(numPartitions: Int, shuffle: Boolean = false,
partitionCoalescer: Option[PartitionCoalescer] = Option.empty)
(implicit ord: Ordering[T] = null)
: RDD[T] = withScope {
require(numPartitions > 0, s"Number of partitions ($numPartitions) must be positive.")
if (shuffle) {
/** Distributes elements evenly across output partitions, starting from a random partition. */
val distributePartition = (index: Int, items: Iterator[T]) => {
var position = (new Random(index)).nextInt(numPartitions)
items.map { t =>
// Note that the hash code of the key will just be the key itself. The HashPartitioner
// will mod it with the number of total partitions.
position = position + 1
(position, t)
}
} : Iterator[(Int, T)]
// include a shuffle step so that our upstream tasks are still distributed
new CoalescedRDD(
new ShuffledRDD[Int, T, T](mapPartitionsWithIndex(distributePartition),
new HashPartitioner(numPartitions)),
numPartitions,
partitionCoalescer).values
} else {
new CoalescedRDD(this, numPartitions, partitionCoalescer)
}
}
/**
* Return a sampled subset of this RDD.
*
* @param withReplacement can elements be sampled multiple times (replaced when sampled out)
* @param fraction expected size of the sample as a fraction of this RDD's size
* without replacement: probability that each element is chosen; fraction must be [0, 1]
* with replacement: expected number of times each element is chosen; fraction must be greater
* than or equal to 0
* @param seed seed for the random number generator
*
* @note This is NOT guaranteed to provide exactly the fraction of the count
* of the given [[RDD]].
*/
def sample(
withReplacement: Boolean,
fraction: Double,
seed: Long = Utils.random.nextLong): RDD[T] = {
require(fraction >= 0,
s"Fraction must be nonnegative, but got ${fraction}")
withScope {
require(fraction >= 0.0, "Negative fraction value: " + fraction)
if (withReplacement) {
new PartitionwiseSampledRDD[T, T](this, new PoissonSampler[T](fraction), true, seed)
} else {
new PartitionwiseSampledRDD[T, T](this, new BernoulliSampler[T](fraction), true, seed)
}
}
}
/**
* Randomly splits this RDD with the provided weights.
*
* @param weights weights for splits, will be normalized if they don't sum to 1
* @param seed random seed
*
* @return split RDDs in an array
*/
def randomSplit(
weights: Array[Double],
seed: Long = Utils.random.nextLong): Array[RDD[T]] = {
require(weights.forall(_ >= 0),
s"Weights must be nonnegative, but got ${weights.mkString("[", ",", "]")}")
require(weights.sum > 0,
s"Sum of weights must be positive, but got ${weights.mkString("[", ",", "]")}")
withScope {
val sum = weights.sum
val normalizedCumWeights = weights.map(_ / sum).scanLeft(0.0d)(_ + _)
normalizedCumWeights.sliding(2).map { x =>
randomSampleWithRange(x(0), x(1), seed)
}.toArray
}
}
/**
* Internal method exposed for Random Splits in DataFrames. Samples an RDD given a probability
* range.
* @param lb lower bound to use for the Bernoulli sampler
* @param ub upper bound to use for the Bernoulli sampler
* @param seed the seed for the Random number generator
* @return A random sub-sample of the RDD without replacement.
*/
private[spark] def randomSampleWithRange(lb: Double, ub: Double, seed: Long): RDD[T] = {
this.mapPartitionsWithIndex( { (index, partition) =>
val sampler = new BernoulliCellSampler[T](lb, ub)
sampler.setSeed(seed + index)
sampler.sample(partition)
}, preservesPartitioning = true)
}
/**
* Return a fixed-size sampled subset of this RDD in an array
*
* @param withReplacement whether sampling is done with replacement
* @param num size of the returned sample
* @param seed seed for the random number generator
* @return sample of specified size in an array
*
* @note this method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*/
def takeSample(
withReplacement: Boolean,
num: Int,
seed: Long = Utils.random.nextLong): Array[T] = withScope {
val numStDev = 10.0
require(num >= 0, "Negative number of elements requested")
require(num <= (Int.MaxValue - (numStDev * math.sqrt(Int.MaxValue)).toInt),
"Cannot support a sample size > Int.MaxValue - " +
s"$numStDev * math.sqrt(Int.MaxValue)")
if (num == 0) {
new Array[T](0)
} else {
val initialCount = this.count()
if (initialCount == 0) {
new Array[T](0)
} else {
val rand = new Random(seed)
if (!withReplacement && num >= initialCount) {
Utils.randomizeInPlace(this.collect(), rand)
} else {
val fraction = SamplingUtils.computeFractionForSampleSize(num, initialCount,
withReplacement)
var samples = this.sample(withReplacement, fraction, rand.nextInt()).collect()
// If the first sample didn't turn out large enough, keep trying to take samples;
// this shouldn't happen often because we use a big multiplier for the initial size
var numIters = 0
while (samples.length < num) {
logWarning(s"Needed to re-sample due to insufficient sample size. Repeat #$numIters")
samples = this.sample(withReplacement, fraction, rand.nextInt()).collect()
numIters += 1
}
Utils.randomizeInPlace(samples, rand).take(num)
}
}
}
}
/**
* Return the union of this RDD and another one. Any identical elements will appear multiple
* times (use `.distinct()` to eliminate them).
*/
def union(other: RDD[T]): RDD[T] = withScope {
sc.union(this, other)
}
/**
* Return the union of this RDD and another one. Any identical elements will appear multiple
* times (use `.distinct()` to eliminate them).
*/
def ++(other: RDD[T]): RDD[T] = withScope {
this.union(other)
}
/**
* Return this RDD sorted by the given key function.
*/
def sortBy[K](
f: (T) => K,
ascending: Boolean = true,
numPartitions: Int = this.partitions.length)
(implicit ord: Ordering[K], ctag: ClassTag[K]): RDD[T] = withScope {
this.keyBy[K](f)
.sortByKey(ascending, numPartitions)
.values
}
/**
* Return the intersection of this RDD and another one. The output will not contain any duplicate
* elements, even if the input RDDs did.
*
* @note This method performs a shuffle internally.
*/
def intersection(other: RDD[T]): RDD[T] = withScope {
this.map(v => (v, null)).cogroup(other.map(v => (v, null)))
.filter { case (_, (leftGroup, rightGroup)) => leftGroup.nonEmpty && rightGroup.nonEmpty }
.keys
}
/**
* Return the intersection of this RDD and another one. The output will not contain any duplicate
* elements, even if the input RDDs did.
*
* @note This method performs a shuffle internally.
*
* @param partitioner Partitioner to use for the resulting RDD
*/
def intersection(
other: RDD[T],
partitioner: Partitioner)(implicit ord: Ordering[T] = null): RDD[T] = withScope {
this.map(v => (v, null)).cogroup(other.map(v => (v, null)), partitioner)
.filter { case (_, (leftGroup, rightGroup)) => leftGroup.nonEmpty && rightGroup.nonEmpty }
.keys
}
/**
* Return the intersection of this RDD and another one. The output will not contain any duplicate
* elements, even if the input RDDs did. Performs a hash partition across the cluster
*
* @note This method performs a shuffle internally.
*
* @param numPartitions How many partitions to use in the resulting RDD
*/
def intersection(other: RDD[T], numPartitions: Int): RDD[T] = withScope {
intersection(other, new HashPartitioner(numPartitions))
}
/**
* Return an RDD created by coalescing all elements within each partition into an array.
*/
def glom(): RDD[Array[T]] = withScope {
new MapPartitionsRDD[Array[T], T](this, (context, pid, iter) => Iterator(iter.toArray))
}
/**
* Return the Cartesian product of this RDD and another one, that is, the RDD of all pairs of
* elements (a, b) where a is in `this` and b is in `other`.
*/
def cartesian[U: ClassTag](other: RDD[U]): RDD[(T, U)] = withScope {
new CartesianRDD(sc, this, other)
}
/**
* Return an RDD of grouped items. Each group consists of a key and a sequence of elements
* mapping to that key. The ordering of elements within each group is not guaranteed, and
* may even differ each time the resulting RDD is evaluated.
*
* @note This operation may be very expensive. If you are grouping in order to perform an
* aggregation (such as a sum or average) over each key, using `PairRDDFunctions.aggregateByKey`
* or `PairRDDFunctions.reduceByKey` will provide much better performance.
*/
def groupBy[K](f: T => K)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])] = withScope {
groupBy[K](f, defaultPartitioner(this))
}
/**
* Return an RDD of grouped elements. Each group consists of a key and a sequence of elements
* mapping to that key. The ordering of elements within each group is not guaranteed, and
* may even differ each time the resulting RDD is evaluated.
*
* @note This operation may be very expensive. If you are grouping in order to perform an
* aggregation (such as a sum or average) over each key, using `PairRDDFunctions.aggregateByKey`
* or `PairRDDFunctions.reduceByKey` will provide much better performance.
*/
def groupBy[K](
f: T => K,
numPartitions: Int)(implicit kt: ClassTag[K]): RDD[(K, Iterable[T])] = withScope {
groupBy(f, new HashPartitioner(numPartitions))
}
/**
* Return an RDD of grouped items. Each group consists of a key and a sequence of elements
* mapping to that key. The ordering of elements within each group is not guaranteed, and
* may even differ each time the resulting RDD is evaluated.
*
* @note This operation may be very expensive. If you are grouping in order to perform an
* aggregation (such as a sum or average) over each key, using `PairRDDFunctions.aggregateByKey`
* or `PairRDDFunctions.reduceByKey` will provide much better performance.
*/
def groupBy[K](f: T => K, p: Partitioner)(implicit kt: ClassTag[K], ord: Ordering[K] = null)
: RDD[(K, Iterable[T])] = withScope {
val cleanF = sc.clean(f)
this.map(t => (cleanF(t), t)).groupByKey(p)
}
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: String): RDD[String] = withScope {
// Similar to Runtime.exec(), if we are given a single string, split it into words
// using a standard StringTokenizer (i.e. by spaces)
pipe(PipedRDD.tokenize(command))
}
/**
* Return an RDD created by piping elements to a forked external process.
*/
def pipe(command: String, env: Map[String, String]): RDD[String] = withScope {
// Similar to Runtime.exec(), if we are given a single string, split it into words
// using a standard StringTokenizer (i.e. by spaces)
pipe(PipedRDD.tokenize(command), env)
}
/**
* Return an RDD created by piping elements to a forked external process. The resulting RDD
* is computed by executing the given process once per partition. All elements
* of each input partition are written to a process's stdin as lines of input separated
* by a newline. The resulting partition consists of the process's stdout output, with
* each line of stdout resulting in one element of the output partition. A process is invoked
* even for empty partitions.
*
* The print behavior can be customized by providing two functions.
*
* @param command command to run in forked process.
* @param env environment variables to set.
* @param printPipeContext Before piping elements, this function is called as an opportunity
* to pipe context data. Print line function (like out.println) will be
* passed as printPipeContext's parameter.
* @param printRDDElement Use this function to customize how to pipe elements. This function
* will be called with each RDD element as the 1st parameter, and the
* print line function (like out.println()) as the 2nd parameter.
* An example of pipe the RDD data of groupBy() in a streaming way,
* instead of constructing a huge String to concat all the elements:
* {{{
* def printRDDElement(record:(String, Seq[String]), f:String=>Unit) =
* for (e <- record._2) {f(e)}
* }}}
* @param separateWorkingDir Use separate working directories for each task.
* @param bufferSize Buffer size for the stdin writer for the piped process.
* @param encoding Char encoding used for interacting (via stdin, stdout and stderr) with
* the piped process
* @return the result RDD
*/
def pipe(
command: Seq[String],
env: Map[String, String] = Map(),
printPipeContext: (String => Unit) => Unit = null,
printRDDElement: (T, String => Unit) => Unit = null,
separateWorkingDir: Boolean = false,
bufferSize: Int = 8192,
encoding: String = Codec.defaultCharsetCodec.name): RDD[String] = withScope {
new PipedRDD(this, command, env,
if (printPipeContext ne null) sc.clean(printPipeContext) else null,
if (printRDDElement ne null) sc.clean(printRDDElement) else null,
separateWorkingDir,
bufferSize,
encoding)
}
/**
* Return a new RDD by applying a function to each partition of this RDD.
*
* `preservesPartitioning` indicates whether the input function preserves the partitioner, which
* should be `false` unless this is a pair RDD and the input function doesn't modify the keys.
*/
def mapPartitions[U: ClassTag](
f: Iterator[T] => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = withScope {
val cleanedF = sc.clean(f)
new MapPartitionsRDD(
this,
(context: TaskContext, index: Int, iter: Iterator[T]) => cleanedF(iter),
preservesPartitioning)
}
/**
* [performance] Spark's internal mapPartitionsWithIndex method that skips closure cleaning.
* It is a performance API to be used carefully only if we are sure that the RDD elements are
* serializable and don't require closure cleaning.
*
* @param preservesPartitioning indicates whether the input function preserves the partitioner,
* which should be `false` unless this is a pair RDD and the input function doesn't modify
* the keys.
*/
private[spark] def mapPartitionsWithIndexInternal[U: ClassTag](
f: (Int, Iterator[T]) => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = withScope {
new MapPartitionsRDD(
this,
(context: TaskContext, index: Int, iter: Iterator[T]) => f(index, iter),
preservesPartitioning)
}
/**
* [performance] Spark's internal mapPartitions method that skips closure cleaning.
*/
private[spark] def mapPartitionsInternal[U: ClassTag](
f: Iterator[T] => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = withScope {
new MapPartitionsRDD(
this,
(context: TaskContext, index: Int, iter: Iterator[T]) => f(iter),
preservesPartitioning)
}
/**
* Return a new RDD by applying a function to each partition of this RDD, while tracking the index
* of the original partition.
*
* `preservesPartitioning` indicates whether the input function preserves the partitioner, which
* should be `false` unless this is a pair RDD and the input function doesn't modify the keys.
*/
def mapPartitionsWithIndex[U: ClassTag](
f: (Int, Iterator[T]) => Iterator[U],
preservesPartitioning: Boolean = false): RDD[U] = withScope {
val cleanedF = sc.clean(f)
new MapPartitionsRDD(
this,
(context: TaskContext, index: Int, iter: Iterator[T]) => cleanedF(index, iter),
preservesPartitioning)
}
/**
* Zips this RDD with another one, returning key-value pairs with the first element in each RDD,
* second element in each RDD, etc. Assumes that the two RDDs have the *same number of
* partitions* and the *same number of elements in each partition* (e.g. one was made through
* a map on the other).
*/
def zip[U: ClassTag](other: RDD[U]): RDD[(T, U)] = withScope {
zipPartitions(other, preservesPartitioning = false) { (thisIter, otherIter) =>
new Iterator[(T, U)] {
def hasNext: Boolean = (thisIter.hasNext, otherIter.hasNext) match {
case (true, true) => true
case (false, false) => false
case _ => throw new SparkException("Can only zip RDDs with " +
"same number of elements in each partition")
}
def next(): (T, U) = (thisIter.next(), otherIter.next())
}
}
}
/**
* Zip this RDD's partitions with one (or more) RDD(s) and return a new RDD by
* applying a function to the zipped partitions. Assumes that all the RDDs have the
* *same number of partitions*, but does *not* require them to have the same number
* of elements in each partition.
*/
def zipPartitions[B: ClassTag, V: ClassTag]
(rdd2: RDD[B], preservesPartitioning: Boolean)
(f: (Iterator[T], Iterator[B]) => Iterator[V]): RDD[V] = withScope {
new ZippedPartitionsRDD2(sc, sc.clean(f), this, rdd2, preservesPartitioning)
}
def zipPartitions[B: ClassTag, V: ClassTag]
(rdd2: RDD[B])
(f: (Iterator[T], Iterator[B]) => Iterator[V]): RDD[V] = withScope {
zipPartitions(rdd2, preservesPartitioning = false)(f)
}
def zipPartitions[B: ClassTag, C: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C], preservesPartitioning: Boolean)
(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V]): RDD[V] = withScope {
new ZippedPartitionsRDD3(sc, sc.clean(f), this, rdd2, rdd3, preservesPartitioning)
}
def zipPartitions[B: ClassTag, C: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C])
(f: (Iterator[T], Iterator[B], Iterator[C]) => Iterator[V]): RDD[V] = withScope {
zipPartitions(rdd2, rdd3, preservesPartitioning = false)(f)
}
def zipPartitions[B: ClassTag, C: ClassTag, D: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D], preservesPartitioning: Boolean)
(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V]): RDD[V] = withScope {
new ZippedPartitionsRDD4(sc, sc.clean(f), this, rdd2, rdd3, rdd4, preservesPartitioning)
}
def zipPartitions[B: ClassTag, C: ClassTag, D: ClassTag, V: ClassTag]
(rdd2: RDD[B], rdd3: RDD[C], rdd4: RDD[D])
(f: (Iterator[T], Iterator[B], Iterator[C], Iterator[D]) => Iterator[V]): RDD[V] = withScope {
zipPartitions(rdd2, rdd3, rdd4, preservesPartitioning = false)(f)
}
// Actions (launch a job to return a value to the user program)
/**
* Applies a function f to all elements of this RDD.
*/
def foreach(f: T => Unit): Unit = withScope {
val cleanF = sc.clean(f)
sc.runJob(this, (iter: Iterator[T]) => iter.foreach(cleanF))
}
/**
* Applies a function f to each partition of this RDD.
*/
def foreachPartition(f: Iterator[T] => Unit): Unit = withScope {
val cleanF = sc.clean(f)
sc.runJob(this, (iter: Iterator[T]) => cleanF(iter))
}
/**
* Return an array that contains all of the elements in this RDD.
*
* @note This method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*/
def collect(): Array[T] = withScope {
val results = sc.runJob(this, (iter: Iterator[T]) => iter.toArray)
Array.concat(results: _*)
}
/**
* Return an iterator that contains all of the elements in this RDD.
*
* The iterator will consume as much memory as the largest partition in this RDD.
*
* @note This results in multiple Spark jobs, and if the input RDD is the result
* of a wide transformation (e.g. join with different partitioners), to avoid
* recomputing the input RDD should be cached first.
*/
def toLocalIterator: Iterator[T] = withScope {
def collectPartition(p: Int): Array[T] = {
sc.runJob(this, (iter: Iterator[T]) => iter.toArray, Seq(p)).head
}
(0 until partitions.length).iterator.flatMap(i => collectPartition(i))
}
/**
* Return an RDD that contains all matching values by applying `f`.
*/
def collect[U: ClassTag](f: PartialFunction[T, U]): RDD[U] = withScope {
val cleanF = sc.clean(f)
filter(cleanF.isDefinedAt).map(cleanF)
}
/**
* Return an RDD with the elements from `this` that are not in `other`.
*
* Uses `this` partitioner/partition size, because even if `other` is huge, the resulting
* RDD will be <= us.
*/
def subtract(other: RDD[T]): RDD[T] = withScope {
subtract(other, partitioner.getOrElse(new HashPartitioner(partitions.length)))
}
/**
* Return an RDD with the elements from `this` that are not in `other`.
*/
def subtract(other: RDD[T], numPartitions: Int): RDD[T] = withScope {
subtract(other, new HashPartitioner(numPartitions))
}
/**
* Return an RDD with the elements from `this` that are not in `other`.
*/
def subtract(
other: RDD[T],
p: Partitioner)(implicit ord: Ordering[T] = null): RDD[T] = withScope {
if (partitioner == Some(p)) {
// Our partitioner knows how to handle T (which, since we have a partitioner, is
// really (K, V)) so make a new Partitioner that will de-tuple our fake tuples
val p2 = new Partitioner() {
override def numPartitions: Int = p.numPartitions
override def getPartition(k: Any): Int = p.getPartition(k.asInstanceOf[(Any, _)]._1)
}
// Unfortunately, since we're making a new p2, we'll get ShuffleDependencies
// anyway, and when calling .keys, will not have a partitioner set, even though
// the SubtractedRDD will, thanks to p2's de-tupled partitioning, already be
// partitioned by the right/real keys (e.g. p).
this.map(x => (x, null)).subtractByKey(other.map((_, null)), p2).keys
} else {
this.map(x => (x, null)).subtractByKey(other.map((_, null)), p).keys
}
}
/**
* Reduces the elements of this RDD using the specified commutative and
* associative binary operator.
*/
def reduce(f: (T, T) => T): T = withScope {
val cleanF = sc.clean(f)
val reducePartition: Iterator[T] => Option[T] = iter => {
if (iter.hasNext) {
Some(iter.reduceLeft(cleanF))
} else {
None
}
}
var jobResult: Option[T] = None
val mergeResult = (index: Int, taskResult: Option[T]) => {
if (taskResult.isDefined) {
jobResult = jobResult match {
case Some(value) => Some(f(value, taskResult.get))
case None => taskResult
}
}
}
sc.runJob(this, reducePartition, mergeResult)
// Get the final result out of our Option, or throw an exception if the RDD was empty
jobResult.getOrElse(throw new UnsupportedOperationException("empty collection"))
}
/**
* Reduces the elements of this RDD in a multi-level tree pattern.
*
* @param depth suggested depth of the tree (default: 2)
* @see [[org.apache.spark.rdd.RDD#reduce]]
*/
def treeReduce(f: (T, T) => T, depth: Int = 2): T = withScope {
require(depth >= 1, s"Depth must be greater than or equal to 1 but got $depth.")
val cleanF = context.clean(f)
val reducePartition: Iterator[T] => Option[T] = iter => {
if (iter.hasNext) {
Some(iter.reduceLeft(cleanF))
} else {
None
}
}
val partiallyReduced = mapPartitions(it => Iterator(reducePartition(it)))
val op: (Option[T], Option[T]) => Option[T] = (c, x) => {
if (c.isDefined && x.isDefined) {
Some(cleanF(c.get, x.get))
} else if (c.isDefined) {
c
} else if (x.isDefined) {
x
} else {
None
}
}
partiallyReduced.treeAggregate(Option.empty[T])(op, op, depth)
.getOrElse(throw new UnsupportedOperationException("empty collection"))
}
/**
* Aggregate the elements of each partition, and then the results for all the partitions, using a
* given associative function and a neutral "zero value". The function
* op(t1, t2) is allowed to modify t1 and return it as its result value to avoid object
* allocation; however, it should not modify t2.
*
* This behaves somewhat differently from fold operations implemented for non-distributed
* collections in functional languages like Scala. This fold operation may be applied to
* partitions individually, and then fold those results into the final result, rather than
* apply the fold to each element sequentially in some defined ordering. For functions
* that are not commutative, the result may differ from that of a fold applied to a
* non-distributed collection.
*
* @param zeroValue the initial value for the accumulated result of each partition for the `op`
* operator, and also the initial value for the combine results from different
* partitions for the `op` operator - this will typically be the neutral
* element (e.g. `Nil` for list concatenation or `0` for summation)
* @param op an operator used to both accumulate results within a partition and combine results
* from different partitions
*/
def fold(zeroValue: T)(op: (T, T) => T): T = withScope {
// Clone the zero value since we will also be serializing it as part of tasks
var jobResult = Utils.clone(zeroValue, sc.env.closureSerializer.newInstance())
val cleanOp = sc.clean(op)
val foldPartition = (iter: Iterator[T]) => iter.fold(zeroValue)(cleanOp)
val mergeResult = (index: Int, taskResult: T) => jobResult = op(jobResult, taskResult)
sc.runJob(this, foldPartition, mergeResult)
jobResult
}
/**
* Aggregate the elements of each partition, and then the results for all the partitions, using
* given combine functions and a neutral "zero value". This function can return a different result
* type, U, than the type of this RDD, T. Thus, we need one operation for merging a T into an U
* and one operation for merging two U's, as in scala.TraversableOnce. Both of these functions are
* allowed to modify and return their first argument instead of creating a new U to avoid memory
* allocation.
*
* @param zeroValue the initial value for the accumulated result of each partition for the
* `seqOp` operator, and also the initial value for the combine results from
* different partitions for the `combOp` operator - this will typically be the
* neutral element (e.g. `Nil` for list concatenation or `0` for summation)
* @param seqOp an operator used to accumulate results within a partition
* @param combOp an associative operator used to combine results from different partitions
*/
def aggregate[U: ClassTag](zeroValue: U)(seqOp: (U, T) => U, combOp: (U, U) => U): U = withScope {
// Clone the zero value since we will also be serializing it as part of tasks
var jobResult = Utils.clone(zeroValue, sc.env.serializer.newInstance())
val cleanSeqOp = sc.clean(seqOp)
val cleanCombOp = sc.clean(combOp)
val aggregatePartition = (it: Iterator[T]) => it.aggregate(zeroValue)(cleanSeqOp, cleanCombOp)
val mergeResult = (index: Int, taskResult: U) => jobResult = combOp(jobResult, taskResult)
sc.runJob(this, aggregatePartition, mergeResult)
jobResult
}
/**
* Aggregates the elements of this RDD in a multi-level tree pattern.
*
* @param depth suggested depth of the tree (default: 2)
* @see [[org.apache.spark.rdd.RDD#aggregate]]
*/
def treeAggregate[U: ClassTag](zeroValue: U)(
seqOp: (U, T) => U,
combOp: (U, U) => U,
depth: Int = 2): U = withScope {
require(depth >= 1, s"Depth must be greater than or equal to 1 but got $depth.")
if (partitions.length == 0) {
Utils.clone(zeroValue, context.env.closureSerializer.newInstance())
} else {
val cleanSeqOp = context.clean(seqOp)
val cleanCombOp = context.clean(combOp)
val aggregatePartition =
(it: Iterator[T]) => it.aggregate(zeroValue)(cleanSeqOp, cleanCombOp)
var partiallyAggregated = mapPartitions(it => Iterator(aggregatePartition(it)))
var numPartitions = partiallyAggregated.partitions.length
val scale = math.max(math.ceil(math.pow(numPartitions, 1.0 / depth)).toInt, 2)
// If creating an extra level doesn't help reduce
// the wall-clock time, we stop tree aggregation.
// Don't trigger TreeAggregation when it doesn't save wall-clock time
while (numPartitions > scale + math.ceil(numPartitions.toDouble / scale)) {
numPartitions /= scale
val curNumPartitions = numPartitions
partiallyAggregated = partiallyAggregated.mapPartitionsWithIndex {
(i, iter) => iter.map((i % curNumPartitions, _))
}.reduceByKey(new HashPartitioner(curNumPartitions), cleanCombOp).values
}
partiallyAggregated.reduce(cleanCombOp)
}
}
/**
* Return the number of elements in the RDD.
*/
def count(): Long = sc.runJob(this, Utils.getIteratorSize _).sum
/**
* Approximate version of count() that returns a potentially incomplete result
* within a timeout, even if not all tasks have finished.
*
* The confidence is the probability that the error bounds of the result will
* contain the true value. That is, if countApprox were called repeatedly
* with confidence 0.9, we would expect 90% of the results to contain the
* true count. The confidence must be in the range [0,1] or an exception will
* be thrown.
*
* @param timeout maximum time to wait for the job, in milliseconds
* @param confidence the desired statistical confidence in the result
* @return a potentially incomplete result, with error bounds
*/
def countApprox(
timeout: Long,
confidence: Double = 0.95): PartialResult[BoundedDouble] = withScope {
require(0.0 <= confidence && confidence <= 1.0, s"confidence ($confidence) must be in [0,1]")
val countElements: (TaskContext, Iterator[T]) => Long = { (ctx, iter) =>
var result = 0L
while (iter.hasNext) {
result += 1L
iter.next()
}
result
}
val evaluator = new CountEvaluator(partitions.length, confidence)
sc.runApproximateJob(this, countElements, evaluator, timeout)
}
/**
* Return the count of each unique value in this RDD as a local map of (value, count) pairs.
*
* @note This method should only be used if the resulting map is expected to be small, as
* the whole thing is loaded into the driver's memory.
* To handle very large results, consider using
*
* {{{
* rdd.map(x => (x, 1L)).reduceByKey(_ + _)
* }}}
*
* , which returns an RDD[T, Long] instead of a map.
*/
def countByValue()(implicit ord: Ordering[T] = null): Map[T, Long] = withScope {
map(value => (value, null)).countByKey()
}
/**
* Approximate version of countByValue().
*
* @param timeout maximum time to wait for the job, in milliseconds
* @param confidence the desired statistical confidence in the result
* @return a potentially incomplete result, with error bounds
*/
def countByValueApprox(timeout: Long, confidence: Double = 0.95)
(implicit ord: Ordering[T] = null)
: PartialResult[Map[T, BoundedDouble]] = withScope {
require(0.0 <= confidence && confidence <= 1.0, s"confidence ($confidence) must be in [0,1]")
if (elementClassTag.runtimeClass.isArray) {
throw new SparkException("countByValueApprox() does not support arrays")
}
val countPartition: (TaskContext, Iterator[T]) => OpenHashMap[T, Long] = { (ctx, iter) =>
val map = new OpenHashMap[T, Long]
iter.foreach {
t => map.changeValue(t, 1L, _ + 1L)
}
map
}
val evaluator = new GroupedCountEvaluator[T](partitions.length, confidence)
sc.runApproximateJob(this, countPartition, evaluator, timeout)
}
/**
* Return approximate number of distinct elements in the RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
*
* The relative accuracy is approximately `1.054 / sqrt(2^p)`. Setting a nonzero (`sp` is greater
* than `p`) would trigger sparse representation of registers, which may reduce the memory
* consumption and increase accuracy when the cardinality is small.
*
* @param p The precision value for the normal set.
* `p` must be a value between 4 and `sp` if `sp` is not zero (32 max).
* @param sp The precision value for the sparse set, between 0 and 32.
* If `sp` equals 0, the sparse representation is skipped.
*/
def countApproxDistinct(p: Int, sp: Int): Long = withScope {
require(p >= 4, s"p ($p) must be >= 4")
require(sp <= 32, s"sp ($sp) must be <= 32")
require(sp == 0 || p <= sp, s"p ($p) cannot be greater than sp ($sp)")
val zeroCounter = new HyperLogLogPlus(p, sp)
aggregate(zeroCounter)(
(hll: HyperLogLogPlus, v: T) => {
hll.offer(v)
hll
},
(h1: HyperLogLogPlus, h2: HyperLogLogPlus) => {
h1.addAll(h2)
h1
}).cardinality()
}
/**
* Return approximate number of distinct elements in the RDD.
*
* The algorithm used is based on streamlib's implementation of "HyperLogLog in Practice:
* Algorithmic Engineering of a State of The Art Cardinality Estimation Algorithm", available
* <a href="http://dx.doi.org/10.1145/2452376.2452456">here</a>.
*
* @param relativeSD Relative accuracy. Smaller values create counters that require more space.
* It must be greater than 0.000017.
*/
def countApproxDistinct(relativeSD: Double = 0.05): Long = withScope {
require(relativeSD > 0.000017, s"accuracy ($relativeSD) must be greater than 0.000017")
val p = math.ceil(2.0 * math.log(1.054 / relativeSD) / math.log(2)).toInt
countApproxDistinct(if (p < 4) 4 else p, 0)
}
/**
* Zips this RDD with its element indices. The ordering is first based on the partition index
* and then the ordering of items within each partition. So the first item in the first
* partition gets index 0, and the last item in the last partition receives the largest index.
*
* This is similar to Scala's zipWithIndex but it uses Long instead of Int as the index type.
* This method needs to trigger a spark job when this RDD contains more than one partitions.
*
* @note Some RDDs, such as those returned by groupBy(), do not guarantee order of
* elements in a partition. The index assigned to each element is therefore not guaranteed,
* and may even change if the RDD is reevaluated. If a fixed ordering is required to guarantee
* the same index assignments, you should sort the RDD with sortByKey() or save it to a file.
*/
def zipWithIndex(): RDD[(T, Long)] = withScope {
new ZippedWithIndexRDD(this)
}
/**
* Zips this RDD with generated unique Long ids. Items in the kth partition will get ids k, n+k,
* 2*n+k, ..., where n is the number of partitions. So there may exist gaps, but this method
* won't trigger a spark job, which is different from [[org.apache.spark.rdd.RDD#zipWithIndex]].
*
* @note Some RDDs, such as those returned by groupBy(), do not guarantee order of
* elements in a partition. The unique ID assigned to each element is therefore not guaranteed,
* and may even change if the RDD is reevaluated. If a fixed ordering is required to guarantee
* the same index assignments, you should sort the RDD with sortByKey() or save it to a file.
*/
def zipWithUniqueId(): RDD[(T, Long)] = withScope {
val n = this.partitions.length.toLong
this.mapPartitionsWithIndex { case (k, iter) =>
Utils.getIteratorZipWithIndex(iter, 0L).map { case (item, i) =>
(item, i * n + k)
}
}
}
/**
* Take the first num elements of the RDD. It works by first scanning one partition, and use the
* results from that partition to estimate the number of additional partitions needed to satisfy
* the limit.
*
* @note This method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @note Due to complications in the internal implementation, this method will raise
* an exception if called on an RDD of `Nothing` or `Null`.
*/
def take(num: Int): Array[T] = withScope {
val scaleUpFactor = Math.max(conf.getInt("spark.rdd.limit.scaleUpFactor", 4), 2)
if (num == 0) {
new Array[T](0)
} else {
val buf = new ArrayBuffer[T]
val totalParts = this.partitions.length
var partsScanned = 0
while (buf.size < num && partsScanned < totalParts) {
// The number of partitions to try in this iteration. It is ok for this number to be
// greater than totalParts because we actually cap it at totalParts in runJob.
var numPartsToTry = 1L
if (partsScanned > 0) {
// If we didn't find any rows after the previous iteration, quadruple and retry.
// Otherwise, interpolate the number of partitions we need to try, but overestimate
// it by 50%. We also cap the estimation in the end.
if (buf.isEmpty) {
numPartsToTry = partsScanned * scaleUpFactor
} else {
// the left side of max is >=1 whenever partsScanned >= 2
numPartsToTry = Math.max((1.5 * num * partsScanned / buf.size).toInt - partsScanned, 1)
numPartsToTry = Math.min(numPartsToTry, partsScanned * scaleUpFactor)
}
}
val left = num - buf.size
val p = partsScanned.until(math.min(partsScanned + numPartsToTry, totalParts).toInt)
val res = sc.runJob(this, (it: Iterator[T]) => it.take(left).toArray, p)
res.foreach(buf ++= _.take(num - buf.size))
partsScanned += p.size
}
buf.toArray
}
}
/**
* Return the first element in this RDD.
*/
def first(): T = withScope {
take(1) match {
case Array(t) => t
case _ => throw new UnsupportedOperationException("empty collection")
}
}
/**
* Returns the top k (largest) elements from this RDD as defined by the specified
* implicit Ordering[T] and maintains the ordering. This does the opposite of
* [[takeOrdered]]. For example:
* {{{
* sc.parallelize(Seq(10, 4, 2, 12, 3)).top(1)
* // returns Array(12)
*
* sc.parallelize(Seq(2, 3, 4, 5, 6)).top(2)
* // returns Array(6, 5)
* }}}
*
* @note This method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @param num k, the number of top elements to return
* @param ord the implicit ordering for T
* @return an array of top elements
*/
def top(num: Int)(implicit ord: Ordering[T]): Array[T] = withScope {
takeOrdered(num)(ord.reverse)
}
/**
* Returns the first k (smallest) elements from this RDD as defined by the specified
* implicit Ordering[T] and maintains the ordering. This does the opposite of [[top]].
* For example:
* {{{
* sc.parallelize(Seq(10, 4, 2, 12, 3)).takeOrdered(1)
* // returns Array(2)
*
* sc.parallelize(Seq(2, 3, 4, 5, 6)).takeOrdered(2)
* // returns Array(2, 3)
* }}}
*
* @note This method should only be used if the resulting array is expected to be small, as
* all the data is loaded into the driver's memory.
*
* @param num k, the number of elements to return
* @param ord the implicit ordering for T
* @return an array of top elements
*/
def takeOrdered(num: Int)(implicit ord: Ordering[T]): Array[T] = withScope {
if (num == 0) {
Array.empty
} else {
val mapRDDs = mapPartitions { items =>
// Priority keeps the largest elements, so let's reverse the ordering.
val queue = new BoundedPriorityQueue[T](num)(ord.reverse)
queue ++= collectionUtils.takeOrdered(items, num)(ord)
Iterator.single(queue)
}
if (mapRDDs.partitions.length == 0) {
Array.empty
} else {
mapRDDs.reduce { (queue1, queue2) =>
queue1 ++= queue2
queue1
}.toArray.sorted(ord)
}
}
}
/**
* Returns the max of this RDD as defined by the implicit Ordering[T].
* @return the maximum element of the RDD
* */
def max()(implicit ord: Ordering[T]): T = withScope {
this.reduce(ord.max)
}
/**
* Returns the min of this RDD as defined by the implicit Ordering[T].
* @return the minimum element of the RDD
* */
def min()(implicit ord: Ordering[T]): T = withScope {
this.reduce(ord.min)
}
/**
* @note Due to complications in the internal implementation, this method will raise an
* exception if called on an RDD of `Nothing` or `Null`. This may be come up in practice
* because, for example, the type of `parallelize(Seq())` is `RDD[Nothing]`.
* (`parallelize(Seq())` should be avoided anyway in favor of `parallelize(Seq[T]())`.)
* @return true if and only if the RDD contains no elements at all. Note that an RDD
* may be empty even when it has at least 1 partition.
*/
def isEmpty(): Boolean = withScope {
partitions.length == 0 || take(1).length == 0
}
/**
* Save this RDD as a text file, using string representations of elements.
*/
def saveAsTextFile(path: String): Unit = withScope {
// https://issues.apache.org/jira/browse/SPARK-2075
//
// NullWritable is a `Comparable` in Hadoop 1.+, so the compiler cannot find an implicit
// Ordering for it and will use the default `null`. However, it's a `Comparable[NullWritable]`
// in Hadoop 2.+, so the compiler will call the implicit `Ordering.ordered` method to create an
// Ordering for `NullWritable`. That's why the compiler will generate different anonymous
// classes for `saveAsTextFile` in Hadoop 1.+ and Hadoop 2.+.
//
// Therefore, here we provide an explicit Ordering `null` to make sure the compiler generate
// same bytecodes for `saveAsTextFile`.
val nullWritableClassTag = implicitly[ClassTag[NullWritable]]
val textClassTag = implicitly[ClassTag[Text]]
val r = this.mapPartitions { iter =>
val text = new Text()
iter.map { x =>
text.set(x.toString)
(NullWritable.get(), text)
}
}
RDD.rddToPairRDDFunctions(r)(nullWritableClassTag, textClassTag, null)
.saveAsHadoopFile[TextOutputFormat[NullWritable, Text]](path)
}
/**
* Save this RDD as a compressed text file, using string representations of elements.
*/
def saveAsTextFile(path: String, codec: Class[_ <: CompressionCodec]): Unit = withScope {
// https://issues.apache.org/jira/browse/SPARK-2075
val nullWritableClassTag = implicitly[ClassTag[NullWritable]]
val textClassTag = implicitly[ClassTag[Text]]
val r = this.mapPartitions { iter =>
val text = new Text()
iter.map { x =>
text.set(x.toString)
(NullWritable.get(), text)
}
}
RDD.rddToPairRDDFunctions(r)(nullWritableClassTag, textClassTag, null)
.saveAsHadoopFile[TextOutputFormat[NullWritable, Text]](path, codec)
}
/**
* Save this RDD as a SequenceFile of serialized objects.
*/
def saveAsObjectFile(path: String): Unit = withScope {
this.mapPartitions(iter => iter.grouped(10).map(_.toArray))
.map(x => (NullWritable.get(), new BytesWritable(Utils.serialize(x))))
.saveAsSequenceFile(path)
}
/**
* Creates tuples of the elements in this RDD by applying `f`.
*/
def keyBy[K](f: T => K): RDD[(K, T)] = withScope {
val cleanedF = sc.clean(f)
map(x => (cleanedF(x), x))
}
/** A private method for tests, to look at the contents of each partition */
private[spark] def collectPartitions(): Array[Array[T]] = withScope {
sc.runJob(this, (iter: Iterator[T]) => iter.toArray)
}
/**
* Mark this RDD for checkpointing. It will be saved to a file inside the checkpoint
* directory set with `SparkContext#setCheckpointDir` and all references to its parent
* RDDs will be removed. This function must be called before any job has been
* executed on this RDD. It is strongly recommended that this RDD is persisted in
* memory, otherwise saving it on a file will require recomputation.
*/
def checkpoint(): Unit = RDDCheckpointData.synchronized {
// NOTE: we use a global lock here due to complexities downstream with ensuring
// children RDD partitions point to the correct parent partitions. In the future
// we should revisit this consideration.
if (context.checkpointDir.isEmpty) {
throw new SparkException("Checkpoint directory has not been set in the SparkContext")
} else if (checkpointData.isEmpty) {
checkpointData = Some(new ReliableRDDCheckpointData(this))
}
}
/**
* Mark this RDD for local checkpointing using Spark's existing caching layer.
*
* This method is for users who wish to truncate RDD lineages while skipping the expensive
* step of replicating the materialized data in a reliable distributed file system. This is
* useful for RDDs with long lineages that need to be truncated periodically (e.g. GraphX).
*
* Local checkpointing sacrifices fault-tolerance for performance. In particular, checkpointed
* data is written to ephemeral local storage in the executors instead of to a reliable,
* fault-tolerant storage. The effect is that if an executor fails during the computation,
* the checkpointed data may no longer be accessible, causing an irrecoverable job failure.
*
* This is NOT safe to use with dynamic allocation, which removes executors along
* with their cached blocks. If you must use both features, you are advised to set
* `spark.dynamicAllocation.cachedExecutorIdleTimeout` to a high value.
*
* The checkpoint directory set through `SparkContext#setCheckpointDir` is not used.
*/
def localCheckpoint(): this.type = RDDCheckpointData.synchronized {
if (conf.getBoolean("spark.dynamicAllocation.enabled", false) &&
conf.contains("spark.dynamicAllocation.cachedExecutorIdleTimeout")) {
logWarning("Local checkpointing is NOT safe to use with dynamic allocation, " +
"which removes executors along with their cached blocks. If you must use both " +
"features, you are advised to set `spark.dynamicAllocation.cachedExecutorIdleTimeout` " +
"to a high value. E.g. If you plan to use the RDD for 1 hour, set the timeout to " +
"at least 1 hour.")
}
// Note: At this point we do not actually know whether the user will call persist() on
// this RDD later, so we must explicitly call it here ourselves to ensure the cached
// blocks are registered for cleanup later in the SparkContext.
//
// If, however, the user has already called persist() on this RDD, then we must adapt
// the storage level he/she specified to one that is appropriate for local checkpointing
// (i.e. uses disk) to guarantee correctness.
if (storageLevel == StorageLevel.NONE) {
persist(LocalRDDCheckpointData.DEFAULT_STORAGE_LEVEL)
} else {
persist(LocalRDDCheckpointData.transformStorageLevel(storageLevel), allowOverride = true)
}
// If this RDD is already checkpointed and materialized, its lineage is already truncated.
// We must not override our `checkpointData` in this case because it is needed to recover
// the checkpointed data. If it is overridden, next time materializing on this RDD will
// cause error.
if (isCheckpointedAndMaterialized) {
logWarning("Not marking RDD for local checkpoint because it was already " +
"checkpointed and materialized")
} else {
// Lineage is not truncated yet, so just override any existing checkpoint data with ours
checkpointData match {
case Some(_: ReliableRDDCheckpointData[_]) => logWarning(
"RDD was already marked for reliable checkpointing: overriding with local checkpoint.")
case _ =>
}
checkpointData = Some(new LocalRDDCheckpointData(this))
}
this
}
/**
* Return whether this RDD is checkpointed and materialized, either reliably or locally.
*/
def isCheckpointed: Boolean = isCheckpointedAndMaterialized
/**
* Return whether this RDD is checkpointed and materialized, either reliably or locally.
* This is introduced as an alias for `isCheckpointed` to clarify the semantics of the
* return value. Exposed for testing.
*/
private[spark] def isCheckpointedAndMaterialized: Boolean =
checkpointData.exists(_.isCheckpointed)
/**
* Return whether this RDD is marked for local checkpointing.
* Exposed for testing.
*/
private[rdd] def isLocallyCheckpointed: Boolean = {
checkpointData match {
case Some(_: LocalRDDCheckpointData[T]) => true
case _ => false
}
}
/**
* Gets the name of the directory to which this RDD was checkpointed.
* This is not defined if the RDD is checkpointed locally.
*/
def getCheckpointFile: Option[String] = {
checkpointData match {
case Some(reliable: ReliableRDDCheckpointData[T]) => reliable.getCheckpointDir
case _ => None
}
}
// =======================================================================
// Other internal methods and fields
// =======================================================================
private var storageLevel: StorageLevel = StorageLevel.NONE
/** User code that created this RDD (e.g. `textFile`, `parallelize`). */
@transient private[spark] val creationSite = sc.getCallSite()
/**
* The scope associated with the operation that created this RDD.
*
* This is more flexible than the call site and can be defined hierarchically. For more
* detail, see the documentation of {{RDDOperationScope}}. This scope is not defined if the
* user instantiates this RDD himself without using any Spark operations.
*/
@transient private[spark] val scope: Option[RDDOperationScope] = {
Option(sc.getLocalProperty(SparkContext.RDD_SCOPE_KEY)).map(RDDOperationScope.fromJson)
}
private[spark] def getCreationSite: String = Option(creationSite).map(_.shortForm).getOrElse("")
private[spark] def elementClassTag: ClassTag[T] = classTag[T]
private[spark] var checkpointData: Option[RDDCheckpointData[T]] = None
// Whether to checkpoint all ancestor RDDs that are marked for checkpointing. By default,
// we stop as soon as we find the first such RDD, an optimization that allows us to write
// less data but is not safe for all workloads. E.g. in streaming we may checkpoint both
// an RDD and its parent in every batch, in which case the parent may never be checkpointed
// and its lineage never truncated, leading to OOMs in the long run (SPARK-6847).
private val checkpointAllMarkedAncestors =
Option(sc.getLocalProperty(RDD.CHECKPOINT_ALL_MARKED_ANCESTORS))
.map(_.toBoolean).getOrElse(false)
/** Returns the first parent RDD */
protected[spark] def firstParent[U: ClassTag]: RDD[U] = {
dependencies.head.rdd.asInstanceOf[RDD[U]]
}
/** Returns the jth parent RDD: e.g. rdd.parent[T](0) is equivalent to rdd.firstParent[T] */
protected[spark] def parent[U: ClassTag](j: Int) = {
dependencies(j).rdd.asInstanceOf[RDD[U]]
}
/** The [[org.apache.spark.SparkContext]] that this RDD was created on. */
def context: SparkContext = sc
/**
* Private API for changing an RDD's ClassTag.
* Used for internal Java-Scala API compatibility.
*/
private[spark] def retag(cls: Class[T]): RDD[T] = {
val classTag: ClassTag[T] = ClassTag.apply(cls)
this.retag(classTag)
}
/**
* Private API for changing an RDD's ClassTag.
* Used for internal Java-Scala API compatibility.
*/
private[spark] def retag(implicit classTag: ClassTag[T]): RDD[T] = {
this.mapPartitions(identity, preservesPartitioning = true)(classTag)
}
// Avoid handling doCheckpoint multiple times to prevent excessive recursion
@transient private var doCheckpointCalled = false
/**
* Performs the checkpointing of this RDD by saving this. It is called after a job using this RDD
* has completed (therefore the RDD has been materialized and potentially stored in memory).
* doCheckpoint() is called recursively on the parent RDDs.
*/
private[spark] def doCheckpoint(): Unit = {
RDDOperationScope.withScope(sc, "checkpoint", allowNesting = false, ignoreParent = true) {
if (!doCheckpointCalled) {
doCheckpointCalled = true
if (checkpointData.isDefined) {
if (checkpointAllMarkedAncestors) {
// TODO We can collect all the RDDs that needs to be checkpointed, and then checkpoint
// them in parallel.
// Checkpoint parents first because our lineage will be truncated after we
// checkpoint ourselves
dependencies.foreach(_.rdd.doCheckpoint())
}
checkpointData.get.checkpoint()
} else {
dependencies.foreach(_.rdd.doCheckpoint())
}
}
}
}
/**
* Changes the dependencies of this RDD from its original parents to a new RDD (`newRDD`)
* created from the checkpoint file, and forget its old dependencies and partitions.
*/
private[spark] def markCheckpointed(): Unit = {
clearDependencies()
partitions_ = null
deps = null // Forget the constructor argument for dependencies too
}
/**
* Clears the dependencies of this RDD. This method must ensure that all references
* to the original parent RDDs are removed to enable the parent RDDs to be garbage
* collected. Subclasses of RDD may override this method for implementing their own cleaning
* logic. See [[org.apache.spark.rdd.UnionRDD]] for an example.
*/
protected def clearDependencies() {
dependencies_ = null
}
/** A description of this RDD and its recursive dependencies for debugging. */
def toDebugString: String = {
// Get a debug description of an rdd without its children
def debugSelf(rdd: RDD[_]): Seq[String] = {
import Utils.bytesToString
val persistence = if (storageLevel != StorageLevel.NONE) storageLevel.description else ""
val storageInfo = rdd.context.getRDDStorageInfo(_.id == rdd.id).map(info =>
" CachedPartitions: %d; MemorySize: %s; ExternalBlockStoreSize: %s; DiskSize: %s".format(
info.numCachedPartitions, bytesToString(info.memSize),
bytesToString(info.externalBlockStoreSize), bytesToString(info.diskSize)))
s"$rdd [$persistence]" +: storageInfo
}
// Apply a different rule to the last child
def debugChildren(rdd: RDD[_], prefix: String): Seq[String] = {
val len = rdd.dependencies.length
len match {
case 0 => Seq.empty
case 1 =>
val d = rdd.dependencies.head
debugString(d.rdd, prefix, d.isInstanceOf[ShuffleDependency[_, _, _]], true)
case _ =>
val frontDeps = rdd.dependencies.take(len - 1)
val frontDepStrings = frontDeps.flatMap(
d => debugString(d.rdd, prefix, d.isInstanceOf[ShuffleDependency[_, _, _]]))
val lastDep = rdd.dependencies.last
val lastDepStrings =
debugString(lastDep.rdd, prefix, lastDep.isInstanceOf[ShuffleDependency[_, _, _]], true)
(frontDepStrings ++ lastDepStrings)
}
}
// The first RDD in the dependency stack has no parents, so no need for a +-
def firstDebugString(rdd: RDD[_]): Seq[String] = {
val partitionStr = "(" + rdd.partitions.length + ")"
val leftOffset = (partitionStr.length - 1) / 2
val nextPrefix = (" " * leftOffset) + "|" + (" " * (partitionStr.length - leftOffset))
debugSelf(rdd).zipWithIndex.map{
case (desc: String, 0) => s"$partitionStr $desc"
case (desc: String, _) => s"$nextPrefix $desc"
} ++ debugChildren(rdd, nextPrefix)
}
def shuffleDebugString(rdd: RDD[_], prefix: String = "", isLastChild: Boolean): Seq[String] = {
val partitionStr = "(" + rdd.partitions.length + ")"
val leftOffset = (partitionStr.length - 1) / 2
val thisPrefix = prefix.replaceAll("\\\\|\\\\s+$", "")
val nextPrefix = (
thisPrefix
+ (if (isLastChild) " " else "| ")
+ (" " * leftOffset) + "|" + (" " * (partitionStr.length - leftOffset)))
debugSelf(rdd).zipWithIndex.map{
case (desc: String, 0) => s"$thisPrefix+-$partitionStr $desc"
case (desc: String, _) => s"$nextPrefix$desc"
} ++ debugChildren(rdd, nextPrefix)
}
def debugString(
rdd: RDD[_],
prefix: String = "",
isShuffle: Boolean = true,
isLastChild: Boolean = false): Seq[String] = {
if (isShuffle) {
shuffleDebugString(rdd, prefix, isLastChild)
} else {
debugSelf(rdd).map(prefix + _) ++ debugChildren(rdd, prefix)
}
}
firstDebugString(this).mkString("\\n")
}
override def toString: String = "%s%s[%d] at %s".format(
Option(name).map(_ + " ").getOrElse(""), getClass.getSimpleName, id, getCreationSite)
def toJavaRDD() : JavaRDD[T] = {
new JavaRDD(this)(elementClassTag)
}
}
/**
* Defines implicit functions that provide extra functionalities on RDDs of specific types.
*
* For example, [[RDD.rddToPairRDDFunctions]] converts an RDD into a [[PairRDDFunctions]] for
* key-value-pair RDDs, and enabling extra functionalities such as `PairRDDFunctions.reduceByKey`.
*/
object RDD {
private[spark] val CHECKPOINT_ALL_MARKED_ANCESTORS =
"spark.checkpoint.checkpointAllMarkedAncestors"
// The following implicit functions were in SparkContext before 1.3 and users had to
// `import SparkContext._` to enable them. Now we move them here to make the compiler find
// them automatically. However, we still keep the old functions in SparkContext for backward
// compatibility and forward to the following functions directly.
implicit def rddToPairRDDFunctions[K, V](rdd: RDD[(K, V)])
(implicit kt: ClassTag[K], vt: ClassTag[V], ord: Ordering[K] = null): PairRDDFunctions[K, V] = {
new PairRDDFunctions(rdd)
}
implicit def rddToAsyncRDDActions[T: ClassTag](rdd: RDD[T]): AsyncRDDActions[T] = {
new AsyncRDDActions(rdd)
}
implicit def rddToSequenceFileRDDFunctions[K, V](rdd: RDD[(K, V)])
(implicit kt: ClassTag[K], vt: ClassTag[V],
keyWritableFactory: WritableFactory[K],
valueWritableFactory: WritableFactory[V])
: SequenceFileRDDFunctions[K, V] = {
implicit val keyConverter = keyWritableFactory.convert
implicit val valueConverter = valueWritableFactory.convert
new SequenceFileRDDFunctions(rdd,
keyWritableFactory.writableClass(kt), valueWritableFactory.writableClass(vt))
}
implicit def rddToOrderedRDDFunctions[K : Ordering : ClassTag, V: ClassTag](rdd: RDD[(K, V)])
: OrderedRDDFunctions[K, V, (K, V)] = {
new OrderedRDDFunctions[K, V, (K, V)](rdd)
}
implicit def doubleRDDToDoubleRDDFunctions(rdd: RDD[Double]): DoubleRDDFunctions = {
new DoubleRDDFunctions(rdd)
}
implicit def numericRDDToDoubleRDDFunctions[T](rdd: RDD[T])(implicit num: Numeric[T])
: DoubleRDDFunctions = {
new DoubleRDDFunctions(rdd.map(x => num.toDouble(x)))
}
}
| wangyixiaohuihui/spark2-annotation | core/src/main/scala/org/apache/spark/rdd/RDD.scala | Scala | apache-2.0 | 80,696 |
import org.scalatest._
import petrovich._
class PetrovichSpec extends FlatSpec with Matchers {
"Petrovich" should "detect female" in {
val person = FirstName("Светлана") :: MiddleName("Андреевна")
person.gender shouldEqual Gender.Female
}
it should "detect male" in {
val person = FirstName("Лев") :: MiddleName("Алексеевич")
person.gender shouldEqual Gender.Male
}
it should "convert simple names to genitive case" in {
val personN = LastName("Фомкин") :: FirstName("Алексей") :: MiddleName("Юрьевич")
val personG = LastName("Фомкина") :: FirstName("Алексея") :: MiddleName("Юрьевича")
assert(petrovich(personN, Case.Genitive).intersect(personG) == personG)
}
it should "convert complex names to genitive case" in {
val personN = LastName("Ткач") :: FirstName("Антон") :: MiddleName("Вячеславович")
val personD = LastName("Ткача") :: FirstName("Антона") :: MiddleName("Вячеславовича")
assert(petrovich(personN, Case.Genitive).intersect(personD) == personD)
}
it should "convert complex names to dative case" in {
val personN = LastName("Бонч-Бруевич") :: FirstName("Виктор") :: MiddleName("Леопольдович")
val personD = LastName("Бонч-Бруевичу") :: FirstName("Виктору") :: MiddleName("Леопольдовичу")
assert(petrovich(personN, Case.Dative).intersect(personD) == personD)
}
it should "convert name via alternative syntax" in {
val expr = petrovich.
male.
first("Лев").
last("Щаранский").
prepositional.
firstLast
assert(expr == "Льве Щаранском")
}
}
| fomkin/petrovich-scala | petrovich-scala/src/test/scala/PetrovichSpec.scala | Scala | apache-2.0 | 1,774 |
package solve
object Problem16 extends App {
println(
BigInt.apply("1" + "0" * 1000, 2).toString()
.foldLeft(0) { _ + _.toString().toInt })
} | knaou/project-euler-scala | src/solve/Problem16.scala | Scala | mit | 154 |
package main.classifier
import breeze.linalg.SparseVector
import breeze.numerics.exp
import main.factory.InputLRData
import main.linalg.AlgUtil
import main.optimizer.FTRLProximal
import org.apache.spark.mllib.linalg.Vector
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.rdd.RDD
import scala.collection.mutable.ArrayBuffer
/**
* Created by zhy on 2015/8/2 0002.
*/
/**
* Logistic Regression逻辑回归模型
*/
final class LRWithFTRL(val numFeatures: Int)
extends RegressionModel with InputLRData with Serializable {
//初始化特征向量
private var weights: SparseVector[Double] = SparseVector.zeros(numFeatures)
//设定优化算法
override val optimizer = new FTRLProximal(D = numFeatures)
train(trainData)
predictAccuracy(testData)
def train(data: LabeledPoint): Unit = {
weights = optimizer.optimize(data, weights)
optimizer.printV
}
//训练参数
//TODO 训练和测试过程并行化
override def train(trainData: RDD[LabeledPoint]): Unit = {
val localTrainData = trainData.toLocalIterator
localTrainData.foreach(data => train(data))
}
/**
* 分类预测准确率
* @param testData 测试数据集合
* @return 准确率
*/
def predictAccuracy(testData: RDD[LabeledPoint]): Unit = {
var predictions = new ArrayBuffer[Tuple2[Double,Double]]()
testData.toLocalIterator.foreach{ data =>
val prediction = (data.label, predict(data.features))
train(data)
predictions += prediction
}
val numData:Int = predictions.toArray.length
val numCorrect:Int = predictions.toArray.filter{data=>
data._1 == data._2
}.length
println("正确预测的数量: " + numCorrect +
"\n所有数量: " + numData )
RMSE = numCorrect * 1.0 / numData
}
/**
* 根据假设函数 预测单个样本
* @param testData 测试样本数据
* @return 分类数据: 1 or 0
*/
def predict(testData: Vector): Double = {
val x: Double = weights.dot(AlgUtil.VtoB(testData))
val prob: Double = sigmod(x)
if (prob > 0.5) return 1.0
else return 0.0
}
override def getRMSE =
println("使用FTRL-Proximal的逻辑回归在测试集上的预测准确率为" + RMSE + "\n----------测试完毕----------")
//sigmod函数
private def sigmod(x: Double): Double = 1.0 / (1 + exp(-x))
}
| zhanghaoyu1993/MachineLearning_in_Spark | sparkML/src/main/Classifier/LRWithFTRL.scala | Scala | mit | 2,366 |
/*
* Copyright 2014 Intelix Pty Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package hq.flows.core
import akka.actor.{Actor, ActorRef, Props}
import akka.stream.actor.ActorSubscriberMessage.OnNext
import akka.stream.actor.{MaxInFlightRequestStrategy, RequestStrategy}
import common.ToolExt.configHelper
import common.actors._
import common.{BecomeActive, Fail, JsonFrame}
import hq.flows.core.Builder.SinkActorPropsType
import play.api.libs.json.{JsValue, Json}
import scalaz.Scalaz._
import scalaz.{Scalaz, \\/}
private[core] object GateSinkBuilder extends BuilderFromConfig[SinkActorPropsType] {
val configId = "gate"
override def build(props: JsValue): \\/[Fail, SinkActorPropsType] =
for (
name <- props ~> 'name \\/> Fail(s"Invalid gate sink configuration. Missing 'name' value. Contents: ${Json.stringify(props)}")
) yield GateSinkActor.props(name)
}
private object GateSinkActor {
def props(gate: String) = Props(new GateSinkActor(gate))
}
private class GateSinkActor(gate: String)
extends PipelineWithStatesActor
with ShutdownableSubscriberActor
with ReconnectingActor
with AtLeastOnceDeliveryActor[JsonFrame]
with ActorWithGateStateMonitoring {
override def commonBehavior: Receive = handleOnNext orElse super.commonBehavior
override def connectionEndpoint: String = "/user/gates/" +gate // TODO do properly
override def preStart(): Unit = {
self ! BecomeActive() // TODO !>>>> remove!!!
super.preStart()
}
override def onConnectedToEndpoint(): Unit = {
super.onConnectedToEndpoint()
logger.info("In connected state")
startGateStateMonitoring()
}
override def onDisconnectedFromEndpoint(): Unit = {
super.onDisconnectedFromEndpoint()
logger.info("In disconnected state")
stopGateStateMonitoring()
if (isPipelineActive) initiateReconnect()
}
override def becomeActive(): Unit = {
logger.info(s"Sink becoming active")
initiateReconnect()
}
override def becomePassive(): Unit = {
logger.info(s"Sink becoming passive")
stopGateStateMonitoring()
disconnect()
}
override def canDeliverDownstreamRightNow = isPipelineActive && connected && isGateOpen
override def getSetOfActiveEndpoints: Set[ActorRef] = remoteActorRef.map(Set(_)).getOrElse(Set())
override def fullyAcknowledged(correlationId: Long, msg: JsonFrame): Unit = {
logger.info(s"Fully achnowledged $correlationId")
context.parent ! Acknowledged(correlationId, msg)
}
override protected def requestStrategy: RequestStrategy = new MaxInFlightRequestStrategy(96) {
override def inFlightInternally: Int = inFlightCount
}
private def handleOnNext: Actor.Receive = {
case OnNext(x) => x match {
case m : JsonFrame => deliverMessage(m)
case _ => logger.warn(s"Unrecognised message at gate sink: $x")
}
}
}
| mglukh/ehub | modules/core/src/main/scala/hq/flows/core/GateSinkBuilder.scala | Scala | apache-2.0 | 3,363 |
package nyaya.gen
import scala.collection.immutable.ArraySeq
import scala.collection.Factory
import scala.reflect.ClassTag
object ScalaVerSpecific {
implicit object SetLikeForLazyList extends SetLike.CastFromAny[LazyList]({
type F[A] = LazyList[A]
type A = Any
new SetLike[F, A] {
override def empty = LazyList.empty
override def contains(h: F[A], a: A) = h contains a
override def add (h: F[A], a: A) = a #:: h
override def addAll (h: F[A], i: F[A]) = i #::: h
}
})
implicit object SetLikeForArraySeq extends SetLike.ByClassTag[ArraySeq] {
override def apply[A: ClassTag]: SetLike[ArraySeq, A] =
new SetLike[ArraySeq, A] {
override def empty = ArraySeq.empty
override def contains(h: ArraySeq[A], a: A) = h contains a
override def add (h: ArraySeq[A], a: A) = h :+ a
override def addAll (h: ArraySeq[A], i: ArraySeq[A]) = h ++ i
}
}
trait SetLikeImplicits {
implicit def NyayaSetLikeForLazyList: SetLike.CastFromAny[LazyList] =
ScalaVerSpecific.SetLikeForLazyList
implicit def NyayaSetLikeForArraySeq: SetLike.ByClassTag[ArraySeq] =
ScalaVerSpecific.SetLikeForArraySeq
}
}
| japgolly/nyaya | gen/shared/src/main/scala-3/nyaya/gen/Scala3Specific.scala | Scala | lgpl-2.1 | 1,293 |
package commons.mapper.utils
import java.lang.reflect.{ Method, Modifier }
import java.sql.Date
import java.util.Date
import scala.reflect.runtime.universe
import scala.reflect.runtime.universe.{ Mirror, TermName, runtimeMirror, termNames, typeOf }
import commons.mapper.ArgWithDefault
/**
* @author Kai Han
*/
private[mapper] object TypeUtils {
private val dateParsePatterns = Array(
"yyyy-MM-dd", "yyyy-MM-dd HH:mm:ss", "yyyy-MM-dd'T'HH:mm:ss",
"yyyy/MM/dd", "yyyy/MM/dd HH:mm:ss", "yyyy/MM/dd'T'HH:mm:ss",
"HH:mm:ss"
)
val dateTimeClass = try { Class.forName("org.joda.time.DateTime") } catch { case t : Throwable => null }
lazy val dateTimeClassConstructor = dateTimeClass.getConstructor(classOf[Long])
def fieldName(methodName : String) : String = {
if (methodName.endsWith("_$eq")) methodName.substring(0, methodName.length() - 4)
else if (methodName.startsWith("set")) methodName.substring(3, 4).toLowerCase() + methodName.substring(4)
else methodName
}
def convertType(clazz : Class[_], value : Any) : Any = {
if (clazz == null || value == null) {
return null
}
if (clazz.isInstance(value)) {
return value
}
if (clazz == classOf[Int] || clazz == classOf[java.lang.Integer]) {
if (value.isInstanceOf[java.lang.Integer] || value.isInstanceOf[Int]) return value
else if (value.isInstanceOf[String]) return java.lang.Integer.parseInt(value.asInstanceOf[String])
} else if (clazz == classOf[Long] || clazz == classOf[java.lang.Long]) {
if (value.isInstanceOf[java.lang.Long] || value.isInstanceOf[Long]) return value
else if (value.isInstanceOf[String]) return java.lang.Long.parseLong(value.asInstanceOf[String])
} else if (clazz == classOf[Float] || clazz == classOf[java.lang.Float]) {
if (value.isInstanceOf[java.lang.Float] || value.isInstanceOf[Float]) return value
else if (value.isInstanceOf[String]) return java.lang.Float.parseFloat(value.asInstanceOf[String])
} else if (clazz == classOf[Double] || clazz == classOf[java.lang.Double]) {
if (value.isInstanceOf[java.lang.Double] || value.isInstanceOf[Double]) return value
else if (value.isInstanceOf[String]) return java.lang.Double.parseDouble(value.asInstanceOf[String])
} else if (clazz == classOf[Boolean] || clazz == classOf[java.lang.Boolean]) {
if (value.isInstanceOf[java.lang.Boolean] || value.isInstanceOf[Boolean]) return value
else if (value.isInstanceOf[String]) return java.lang.Boolean.parseBoolean(value.asInstanceOf[String])
} else if (clazz == classOf[Byte] || clazz == classOf[java.lang.Byte]) {
if (value.isInstanceOf[java.lang.Byte] || value.isInstanceOf[Byte]) return value
else if (value.isInstanceOf[String]) return java.lang.Byte.parseByte(value.asInstanceOf[String])
} else if (clazz == classOf[Short] || clazz == classOf[java.lang.Short]) {
if (value.isInstanceOf[java.lang.Short] || value.isInstanceOf[Short]) return value
else if (value.isInstanceOf[String]) return java.lang.Short.parseShort(value.asInstanceOf[String])
} else if (clazz == classOf[Char] || clazz == classOf[java.lang.Character]) {
if (value.isInstanceOf[java.lang.Character] || value.isInstanceOf[Char]) return value
else if (value.isInstanceOf[String]) return java.lang.Integer.parseInt(value.asInstanceOf[String]).asInstanceOf[Char]
} else if (clazz == classOf[java.util.Date] || clazz == classOf[java.sql.Date] || clazz == dateTimeClass) {
if (value.isInstanceOf[java.util.Date] && clazz == classOf[java.sql.Date]) {
return new java.sql.Date(value.asInstanceOf[java.util.Date].getTime)
} else if (value.isInstanceOf[java.util.Date] && clazz == dateTimeClass) {
return dateTimeClassConstructor.newInstance(Long.box(value.asInstanceOf[java.util.Date].getTime)).asInstanceOf[Object]
} else if (value.isInstanceOf[java.sql.Date] && clazz == classOf[java.util.Date]) {
return value
} else if (value.isInstanceOf[java.sql.Date] && clazz == dateTimeClass) {
return dateTimeClassConstructor.newInstance(Long.box(value.asInstanceOf[java.util.Date].getTime)).asInstanceOf[Object]
} else if (value.isInstanceOf[String]) {
val date = DateUtils.parseDateWithLeniency(value.asInstanceOf[String], dateParsePatterns, false)
if (clazz == classOf[java.sql.Date]) {
return new java.sql.Date(date.getTime)
}
if (clazz == classOf[java.util.Date]) {
return date
}
if (clazz == dateTimeClass) {
val obj = Long.box(date.getTime)
return dateTimeClassConstructor.newInstance(obj).asInstanceOf[Object]
}
}
}
throw new ClassCastException(value.getClass().getName + "无法转型为" + clazz.getName)
}
def extractTypeInfo[T](clazz : Class[T]) : (List[ArgWithDefault], List[Method]) = {
val rm : Mirror = runtimeMirror(clazz.getClassLoader)
val classSymbol = rm.classSymbol(clazz)
if (classSymbol.isJava) {
extractJavaTypeInfo(clazz)
} else {
extractScalaTypeInfo(clazz)
}
}
private def extractJavaTypeInfo[T](clazz : Class[T]) : (List[ArgWithDefault], List[Method]) = {
val constructors = clazz.getConstructors
.filter(m => Modifier.isPublic(m.getModifiers))
.toList
if (constructors.isEmpty) {
throw new RuntimeException(clazz + " has no public constructor")
}
val constructor = constructors.minBy(_.getParameterTypes.length)
val args = constructor.getParameterTypes.map(clazz => {
val default = clazz match {
case t if t == classOf[Boolean] => Boolean.box(false)
case t if t == classOf[Byte] => Byte.box(0.toByte)
case t if t == classOf[Short] => Short.box(0.toShort)
case t if t == classOf[Char] => Char.box(0.toChar)
case t if t == classOf[Int] => Int.box(0)
case t if t == classOf[Long] => Long.box(0L)
case t if t == classOf[Float] => Float.box(0F)
case t if t == classOf[Double] => Double.box(0D)
case _ => null
}
new ArgWithDefault(clazz, null, default)
}).toList
val setMethods = clazz.getDeclaredMethods
.filter(method => Modifier.isPublic(method.getModifiers))
.filterNot(method => Modifier.isStatic(method.getModifiers))
.filter(_.getName.startsWith("set"))
.filter(_.getName.length > 3)
.filter(_.getParameterTypes.length == 1)
.toList
(args, setMethods)
}
private def extractScalaTypeInfo[T](clazz : Class[T]) : (List[ArgWithDefault], List[Method]) = {
val rm : Mirror = runtimeMirror(clazz.getClassLoader)
val classSymbol = rm.classSymbol(clazz)
val classMirror = rm.reflectClass(classSymbol)
val alternatives = classSymbol.typeSignature.decl(termNames.CONSTRUCTOR).alternatives
val constructorSymbol = alternatives.find(_.asMethod.isPrimaryConstructor).getOrElse(null)
if (constructorSymbol == null) {
throw new RuntimeException(clazz + " has no PrimaryConstructor")
}
val constructor = constructorSymbol.asMethod
val constructorMirror = classMirror.reflectConstructor(constructor)
lazy val module = classSymbol.companion.asModule
lazy val companion = rm.reflectModule(module).instance
lazy val companionMirror = rm.reflect(companion)
val args = constructor.paramLists.flatten.zipWithIndex.map(paramWithIndex => {
val param = paramWithIndex._1
val paramName = param.name.toString
val default = if (param.asTerm.isParamWithDefault) {
val index = paramWithIndex._2 + 1
val defaultTermName = TermName("$lessinit$greater$default$" + index)
val methodSymbol = classSymbol.typeSignature.companion.decl(defaultTermName).asMethod
companionMirror.reflectMethod(methodSymbol)()
} else {
param.typeSignature match {
case t if t =:= typeOf[Boolean] => Boolean.box(false)
case t if t =:= typeOf[Byte] => Byte.box(0.toByte)
case t if t =:= typeOf[Short] => Short.box(0.toShort)
case t if t =:= typeOf[Char] => Char.box(0.toChar)
case t if t =:= typeOf[Int] => Int.box(0)
case t if t =:= typeOf[Long] => Long.box(0L)
case t if t =:= typeOf[Float] => Float.box(0F)
case t if t =:= typeOf[Double] => Double.box(0D)
case t if t =:= typeOf[Object] => null.asInstanceOf[AnyRef]
case _ => null.asInstanceOf[AnyRef]
}
}
ArgWithDefault(rm.runtimeClass(param.typeSignature.erasure), paramName, default)
}).toList
val setMethodSet = args.map(_.paramName + "_$eq").toSet
val setMethods = clazz.getDeclaredMethods
.filter(method => Modifier.isPublic(method.getModifiers))
.filterNot(method => Modifier.isStatic(method.getModifiers))
.filter(_.getName.endsWith("_$eq"))
.filter(_.getName.length > 4)
.filter(_.getParameterTypes.length == 1)
.filter(method => !setMethodSet.contains(method.getName))
.toList
(args, setMethods)
}
} | hank-whu/common4s | src/main/scala/commons/mapper/utils/TypeUtils.scala | Scala | apache-2.0 | 8,636 |
package com.yannick_cw.elastic_indexer4s.elasticsearch.elasic_config
import com.sksamuel.elastic4s.analyzers.AnalyzerDefinition
import com.sksamuel.elastic4s.mappings.MappingDefinition
import io.circe.{Json, ParsingFailure}
import io.circe.parser.parse
sealed trait MappingSetting {
def fold[A](typed: TypedMappingSetting => A, unsafe: StringMappingSetting => A): A
}
case class TypedMappingSetting(
analyzer: List[AnalyzerDefinition] = List.empty,
mappings: List[MappingDefinition] = List.empty,
shards: Option[Int] = None,
replicas: Option[Int] = None
) extends MappingSetting {
def fold[A](typed: (TypedMappingSetting) => A, unsafe: (StringMappingSetting) => A): A = typed(this)
}
sealed abstract case class StringMappingSetting(source: Json) extends MappingSetting {
def fold[A](typed: (TypedMappingSetting) => A, unsafe: (StringMappingSetting) => A): A = unsafe(this)
}
object StringMappingSetting {
def unsafeString(source: String): Either[ParsingFailure, MappingSetting] =
parse(source).map(json => new StringMappingSetting(json) {})
}
| yannick-cw/elastic-indexer4s | src/main/scala/com/yannick_cw/elastic_indexer4s/elasticsearch/elasic_config/MappingSetting.scala | Scala | mit | 1,074 |
package ignition.core.jobs
import scala.util.Try
object ExecutionRetry extends ExecutionRetry
trait ExecutionRetry {
def executeRetrying[T](code: => T, maxExecutions: Int = 3): T = {
assert(maxExecutions > 0) // we will execute at least once
// TODO: log retries
def _executeRetrying(retriesLeft: Int): Try[T] = {
val tryResult = Try { code }
if (tryResult.isFailure && retriesLeft > 0) {
_executeRetrying(retriesLeft - 1)
} else {
tryResult
}
}
_executeRetrying(maxExecutions - 1).get
}
}
| chaordic/ignition-core | src/main/scala/ignition/core/jobs/ExecutionRetry.scala | Scala | mit | 561 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark
import org.apache.spark.Partitioner
import org.apache.carbondata.core.metadata.schema.PartitionInfo
import org.apache.carbondata.core.metadata.schema.partition.PartitionType
import org.apache.carbondata.core.scan.partition.{HashPartitioner => JavaHashPartitioner, ListPartitioner => JavaListPartitioner, RangePartitioner => JavaRangePartitioner}
import org.apache.carbondata.processing.loading.exception.CarbonDataLoadingException
object PartitionFactory {
def getPartitioner(partitionInfo: PartitionInfo): Partitioner = {
partitionInfo.getPartitionType match {
case PartitionType.HASH => new HashPartitioner(partitionInfo.getNumPartitions)
case PartitionType.LIST => new ListPartitioner(partitionInfo)
case PartitionType.RANGE => new RangePartitioner(partitionInfo)
case partitionType =>
throw new CarbonDataLoadingException(s"Unsupported partition type: $partitionType")
}
}
}
class HashPartitioner(partitions: Int) extends Partitioner {
private val partitioner = new JavaHashPartitioner(partitions)
override def numPartitions: Int = partitioner.numPartitions()
override def getPartition(key: Any): Int = partitioner.getPartition(key)
}
class ListPartitioner(partitionInfo: PartitionInfo) extends Partitioner {
private val partitioner = new JavaListPartitioner(partitionInfo)
override def numPartitions: Int = partitioner.numPartitions()
override def getPartition(key: Any): Int = partitioner.getPartition(key)
}
class RangePartitioner(partitionInfo: PartitionInfo) extends Partitioner {
private val partitioner = new JavaRangePartitioner(partitionInfo)
override def numPartitions: Int = partitioner.numPartitions()
override def getPartition(key: Any): Int = partitioner.getPartition(key)
}
| manishgupta88/carbondata | integration/spark-common/src/main/scala/org/apache/carbondata/spark/PartitionFactory.scala | Scala | apache-2.0 | 2,609 |
package systems.opalia.commons.control
package object units {
type Percent = Double
type Meter = Double
type Radian = Double
type MeterPerSecond = Double
type RadianPerSecond = Double
type MeterPerSecondSq = Double
type RadianPerSecondSq = Double
}
| OpaliaSystems/commons | src/main/scala/systems/opalia/commons/control/units/package.scala | Scala | apache-2.0 | 268 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.api
import java.util.Properties
import org.apache.kafka.clients.producer.{ProducerConfig, KafkaProducer}
import org.apache.kafka.common.config.ConfigException
import org.apache.kafka.common.serialization.ByteArraySerializer
import org.junit.Test
class PlaintextProducerSendTest extends BaseProducerSendTest {
@Test
def testSerializerConstructors() {
try {
createNewProducerWithNoSerializer(brokerList)
fail("Instantiating a producer without specifying a serializer should cause a ConfigException")
} catch {
case ce : ConfigException => // this is ok
}
// create a producer with explicit serializers should succeed
createNewProducerWithExplicitSerializer(brokerList)
}
private def createNewProducerWithNoSerializer(brokerList: String) : KafkaProducer[Array[Byte],Array[Byte]] = {
val producerProps = new Properties()
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
return new KafkaProducer[Array[Byte],Array[Byte]](producerProps)
}
private def createNewProducerWithExplicitSerializer(brokerList: String) : KafkaProducer[Array[Byte],Array[Byte]] = {
val producerProps = new Properties()
producerProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList)
return new KafkaProducer[Array[Byte],Array[Byte]](producerProps, new ByteArraySerializer, new ByteArraySerializer)
}
}
| Zhiqiang-He/kafka-0914-edit | core/src/test/scala/integration/kafka/api/PlaintextProducerSendTest.scala | Scala | apache-2.0 | 2,215 |
/**
* Original work: SecureSocial (https://github.com/jaliss/securesocial)
* Copyright 2013 Jorge Aliss (jaliss at gmail dot com) - twitter: @jaliss
*
* Derivative work: Silhouette (https://github.com/mohiva/play-silhouette)
* Modifications Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.util
import java.security.SecureRandom
import com.mohiva.play.silhouette.api.util.IDGenerator
import play.api.libs.Codecs
import play.api.libs.concurrent.Execution.Implicits._
import scala.concurrent.Future
/**
* A generator which uses SecureRandom to generate cryptographically strong IDs.
*
* @param idSizeInBytes The size of the ID length in bytes.
*/
class SecureRandomIDGenerator(idSizeInBytes: Int = 128) extends IDGenerator {
/**
* Generates a new ID using SecureRandom.
*
* @return The generated ID.
*/
def generate: Future[String] = {
val randomValue = new Array[Byte](idSizeInBytes)
Future(SecureRandomIDGenerator.random.nextBytes(randomValue)).map { _ =>
Codecs.toHexString(randomValue)
}
}
}
/**
* The companion object.
*/
object SecureRandomIDGenerator {
/**
* A cryptographically strong random number generator (RNG).
*
* There is a cost of getting a secure random instance for its initial seeding, so it's recommended you use
* a singleton style so you only create one for all of your usage going forward.
*
* On Linux systems SecureRandom uses /dev/random and it can block waiting for sufficient entropy to build up.
*/
lazy val random = new SecureRandom()
}
| rfranco/play-silhouette | silhouette/app/com/mohiva/play/silhouette/impl/util/SecureRandomIDGenerator.scala | Scala | apache-2.0 | 2,160 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.test
import java.io.File
import java.net.URI
import java.nio.file.Files
import java.util.{Locale, UUID}
import scala.concurrent.duration._
import scala.language.implicitConversions
import scala.util.control.NonFatal
import org.apache.hadoop.fs.Path
import org.scalactic.source.Position
import org.scalatest.{BeforeAndAfterAll, Suite, Tag}
import org.scalatest.concurrent.Eventually
import org.apache.spark.SparkFunSuite
import org.apache.spark.sql._
import org.apache.spark.sql.catalyst.FunctionIdentifier
import org.apache.spark.sql.catalyst.analysis.NoSuchTableException
import org.apache.spark.sql.catalyst.catalog.SessionCatalog.DEFAULT_DATABASE
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.PlanTestBase
import org.apache.spark.sql.catalyst.plans.logical.LogicalPlan
import org.apache.spark.sql.catalyst.util._
import org.apache.spark.sql.execution.FilterExec
import org.apache.spark.sql.execution.adaptive.DisableAdaptiveExecution
import org.apache.spark.sql.execution.datasources.DataSourceUtils
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.util.UninterruptibleThread
import org.apache.spark.util.Utils
/**
* Helper trait that should be extended by all SQL test suites within the Spark
* code base.
*
* This allows subclasses to plugin a custom `SQLContext`. It comes with test data
* prepared in advance as well as all implicit conversions used extensively by dataframes.
* To use implicit methods, import `testImplicits._` instead of through the `SQLContext`.
*
* Subclasses should *not* create `SQLContext`s in the test suite constructor, which is
* prone to leaving multiple overlapping [[org.apache.spark.SparkContext]]s in the same JVM.
*/
private[sql] trait SQLTestUtils extends SparkFunSuite with SQLTestUtilsBase with PlanTest {
// Whether to materialize all test data before the first test is run
private var loadTestDataBeforeTests = false
protected override def beforeAll(): Unit = {
super.beforeAll()
if (loadTestDataBeforeTests) {
loadTestData()
}
}
/**
* Creates a temporary directory, which is then passed to `f` and will be deleted after `f`
* returns.
*/
protected override def withTempDir(f: File => Unit): Unit = {
super.withTempDir { dir =>
f(dir)
waitForTasksToFinish()
}
}
/**
* A helper function for turning off/on codegen.
*/
protected def testWithWholeStageCodegenOnAndOff(testName: String)(f: String => Unit): Unit = {
Seq("false", "true").foreach { codegenEnabled =>
val isTurnOn = if (codegenEnabled == "true") "on" else "off"
test(s"$testName (whole-stage-codegen ${isTurnOn})") {
withSQLConf(SQLConf.WHOLESTAGE_CODEGEN_ENABLED.key -> codegenEnabled) {
f(codegenEnabled)
}
}
}
}
/**
* Materialize the test data immediately after the `SQLContext` is set up.
* This is necessary if the data is accessed by name but not through direct reference.
*/
protected def setupTestData(): Unit = {
loadTestDataBeforeTests = true
}
/**
* Disable stdout and stderr when running the test. To not output the logs to the console,
* ConsoleAppender's `follow` should be set to `true` so that it will honor reassignments of
* System.out or System.err. Otherwise, ConsoleAppender will still output to the console even if
* we change System.out and System.err.
*/
protected def testQuietly(name: String)(f: => Unit): Unit = {
test(name) {
quietly {
f
}
}
}
override protected def test(testName: String, testTags: Tag*)(testFun: => Any)
(implicit pos: Position): Unit = {
if (testTags.exists(_.isInstanceOf[DisableAdaptiveExecution])) {
super.test(testName, testTags: _*) {
withSQLConf(SQLConf.ADAPTIVE_EXECUTION_ENABLED.key -> "false") {
testFun
}
}
} else {
super.test(testName, testTags: _*)(testFun)
}
}
/**
* Run a test on a separate `UninterruptibleThread`.
*/
protected def testWithUninterruptibleThread(name: String, quietly: Boolean = false)
(body: => Unit): Unit = {
val timeoutMillis = 10000
@transient var ex: Throwable = null
def runOnThread(): Unit = {
val thread = new UninterruptibleThread(s"Testing thread for test $name") {
override def run(): Unit = {
try {
body
} catch {
case NonFatal(e) =>
ex = e
}
}
}
thread.setDaemon(true)
thread.start()
thread.join(timeoutMillis)
if (thread.isAlive) {
thread.interrupt()
// If this interrupt does not work, then this thread is most likely running something that
// is not interruptible. There is not much point to wait for the thread to terminate, and
// we rather let the JVM terminate the thread on exit.
fail(
s"Test '$name' running on o.a.s.util.UninterruptibleThread timed out after" +
s" $timeoutMillis ms")
} else if (ex != null) {
throw ex
}
}
if (quietly) {
testQuietly(name) { runOnThread() }
} else {
test(name) { runOnThread() }
}
}
/**
* Copy file in jar's resource to a temp file, then pass it to `f`.
* This function is used to make `f` can use the path of temp file(e.g. file:/), instead of
* path of jar's resource which starts with 'jar:file:/'
*/
protected def withResourceTempPath(resourcePath: String)(f: File => Unit): Unit = {
val inputStream =
Thread.currentThread().getContextClassLoader.getResourceAsStream(resourcePath)
withTempDir { dir =>
val tmpFile = new File(dir, "tmp")
Files.copy(inputStream, tmpFile.toPath)
f(tmpFile)
}
}
/**
* Waits for all tasks on all executors to be finished.
*/
protected def waitForTasksToFinish(): Unit = {
eventually(timeout(10.seconds)) {
assert(spark.sparkContext.statusTracker
.getExecutorInfos.map(_.numRunningTasks()).sum == 0)
}
}
/**
* Creates the specified number of temporary directories, which is then passed to `f` and will be
* deleted after `f` returns.
*/
protected def withTempPaths(numPaths: Int)(f: Seq[File] => Unit): Unit = {
val files = Array.fill[File](numPaths)(Utils.createTempDir().getCanonicalFile)
try f(files) finally {
// wait for all tasks to finish before deleting files
waitForTasksToFinish()
files.foreach(Utils.deleteRecursively)
}
}
}
/**
* Helper trait that can be extended by all external SQL test suites.
*
* This allows subclasses to plugin a custom `SQLContext`.
* To use implicit methods, import `testImplicits._` instead of through the `SQLContext`.
*
* Subclasses should *not* create `SQLContext`s in the test suite constructor, which is
* prone to leaving multiple overlapping [[org.apache.spark.SparkContext]]s in the same JVM.
*/
private[sql] trait SQLTestUtilsBase
extends Eventually
with BeforeAndAfterAll
with SQLTestData
with PlanTestBase { self: Suite =>
protected def sparkContext = spark.sparkContext
// Shorthand for running a query using our SQLContext
protected lazy val sql = spark.sql _
/**
* A helper object for importing SQL implicits.
*
* Note that the alternative of importing `spark.implicits._` is not possible here.
* This is because we create the `SQLContext` immediately before the first test is run,
* but the implicits import is needed in the constructor.
*/
protected object testImplicits extends SQLImplicits {
protected override def _sqlContext: SQLContext = self.spark.sqlContext
}
protected override def withSQLConf(pairs: (String, String)*)(f: => Unit): Unit = {
SparkSession.setActiveSession(spark)
super.withSQLConf(pairs: _*)(f)
}
/**
* Drops functions after calling `f`. A function is represented by (functionName, isTemporary).
*/
protected def withUserDefinedFunction(functions: (String, Boolean)*)(f: => Unit): Unit = {
try {
f
} catch {
case cause: Throwable => throw cause
} finally {
// If the test failed part way, we don't want to mask the failure by failing to remove
// temp tables that never got created.
functions.foreach { case (functionName, isTemporary) =>
val withTemporary = if (isTemporary) "TEMPORARY" else ""
spark.sql(s"DROP $withTemporary FUNCTION IF EXISTS $functionName")
assert(
!spark.sessionState.catalog.functionExists(FunctionIdentifier(functionName)),
s"Function $functionName should have been dropped. But, it still exists.")
}
}
}
/**
* Drops temporary view `viewNames` after calling `f`.
*/
protected def withTempView(viewNames: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f) {
viewNames.foreach { viewName =>
try spark.catalog.dropTempView(viewName) catch {
// If the test failed part way, we don't want to mask the failure by failing to remove
// temp views that never got created.
case _: NoSuchTableException =>
}
}
}
}
/**
* Drops global temporary view `viewNames` after calling `f`.
*/
protected def withGlobalTempView(viewNames: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f) {
viewNames.foreach { viewName =>
try spark.catalog.dropGlobalTempView(viewName) catch {
// If the test failed part way, we don't want to mask the failure by failing to remove
// global temp views that never got created.
case _: NoSuchTableException =>
}
}
}
}
/**
* Drops table `tableName` after calling `f`.
*/
protected def withTable(tableNames: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f) {
tableNames.foreach { name =>
spark.sql(s"DROP TABLE IF EXISTS $name")
}
}
}
/**
* Drops view `viewName` after calling `f`.
*/
protected def withView(viewNames: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f)(
viewNames.foreach { name =>
spark.sql(s"DROP VIEW IF EXISTS $name")
}
)
}
/**
* Drops cache `cacheName` after calling `f`.
*/
protected def withCache(cacheNames: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f) {
cacheNames.foreach { cacheName =>
try uncacheTable(cacheName) catch {
case _: AnalysisException =>
}
}
}
}
// Blocking uncache table for tests
protected def uncacheTable(tableName: String): Unit = {
val tableIdent = spark.sessionState.sqlParser.parseTableIdentifier(tableName)
val cascade = !spark.sessionState.catalog.isTemporaryTable(tableIdent)
spark.sharedState.cacheManager.uncacheQuery(
spark,
spark.table(tableName).logicalPlan,
cascade = cascade,
blocking = true)
}
/**
* Creates a temporary database and switches current database to it before executing `f`. This
* database is dropped after `f` returns.
*
* Note that this method doesn't switch current database before executing `f`.
*/
protected def withTempDatabase(f: String => Unit): Unit = {
val dbName = s"db_${UUID.randomUUID().toString.replace('-', '_')}"
try {
spark.sql(s"CREATE DATABASE $dbName")
} catch { case cause: Throwable =>
fail("Failed to create temporary database", cause)
}
try f(dbName) finally {
if (spark.catalog.currentDatabase == dbName) {
spark.sql(s"USE $DEFAULT_DATABASE")
}
spark.sql(s"DROP DATABASE $dbName CASCADE")
}
}
/**
* Drops database `dbName` after calling `f`.
*/
protected def withDatabase(dbNames: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f) {
dbNames.foreach { name =>
spark.sql(s"DROP DATABASE IF EXISTS $name CASCADE")
}
spark.sql(s"USE $DEFAULT_DATABASE")
}
}
/**
* Drops namespace `namespace` after calling `f`.
*
* Note that, if you switch current catalog/namespace in `f`, you should switch it back manually.
*/
protected def withNamespace(namespaces: String*)(f: => Unit): Unit = {
Utils.tryWithSafeFinally(f) {
namespaces.foreach { name =>
spark.sql(s"DROP NAMESPACE IF EXISTS $name CASCADE")
}
}
}
/**
* Enables Locale `language` before executing `f`, then switches back to the default locale of JVM
* after `f` returns.
*/
protected def withLocale(language: String)(f: => Unit): Unit = {
val originalLocale = Locale.getDefault
try {
// Add Locale setting
Locale.setDefault(new Locale(language))
f
} finally {
Locale.setDefault(originalLocale)
}
}
/**
* Activates database `db` before executing `f`, then switches back to `default` database after
* `f` returns.
*/
protected def activateDatabase(db: String)(f: => Unit): Unit = {
spark.sessionState.catalog.setCurrentDatabase(db)
Utils.tryWithSafeFinally(f)(spark.sessionState.catalog.setCurrentDatabase("default"))
}
/**
* Strip Spark-side filtering in order to check if a datasource filters rows correctly.
*/
protected def stripSparkFilter(df: DataFrame): DataFrame = {
val schema = df.schema
val withoutFilters = df.queryExecution.executedPlan.transform {
case FilterExec(_, child) => child
}
spark.internalCreateDataFrame(withoutFilters.execute(), schema)
}
/**
* Turn a logical plan into a `DataFrame`. This should be removed once we have an easier
* way to construct `DataFrame` directly out of local data without relying on implicits.
*/
protected implicit def logicalPlanToSparkQuery(plan: LogicalPlan): DataFrame = {
Dataset.ofRows(spark, plan)
}
/**
* This method is used to make the given path qualified, when a path
* does not contain a scheme, this path will not be changed after the default
* FileSystem is changed.
*/
def makeQualifiedPath(path: String): URI = {
val hadoopPath = new Path(path)
val fs = hadoopPath.getFileSystem(spark.sessionState.newHadoopConf())
fs.makeQualified(hadoopPath).toUri
}
/**
* Returns full path to the given file in the resource folder
*/
protected def testFile(fileName: String): String = {
Thread.currentThread().getContextClassLoader.getResource(fileName).toString
}
/**
* Returns the size of the local directory except the metadata file and the temporary file.
*/
def getLocalDirSize(file: File): Long = {
assert(file.isDirectory)
file.listFiles.filter(f => DataSourceUtils.isDataFile(f.getName)).map(_.length).sum
}
}
private[sql] object SQLTestUtils {
def compareAnswers(
sparkAnswer: Seq[Row],
expectedAnswer: Seq[Row],
sort: Boolean): Option[String] = {
def prepareAnswer(answer: Seq[Row]): Seq[Row] = {
// Converts data to types that we can do equality comparison using Scala collections.
// For BigDecimal type, the Scala type has a better definition of equality test (similar to
// Java's java.math.BigDecimal.compareTo).
// For binary arrays, we convert it to Seq to avoid of calling java.util.Arrays.equals for
// equality test.
// This function is copied from Catalyst's QueryTest
val converted: Seq[Row] = answer.map { s =>
Row.fromSeq(s.toSeq.map {
case d: java.math.BigDecimal => BigDecimal(d)
case b: Array[Byte] => b.toSeq
case o => o
})
}
if (sort) {
converted.sortBy(_.toString())
} else {
converted
}
}
if (prepareAnswer(expectedAnswer) != prepareAnswer(sparkAnswer)) {
val errorMessage =
s"""
| == Results ==
| ${sideBySide(
s"== Expected Answer - ${expectedAnswer.size} ==" +:
prepareAnswer(expectedAnswer).map(_.toString()),
s"== Actual Answer - ${sparkAnswer.size} ==" +:
prepareAnswer(sparkAnswer).map(_.toString())).mkString("\\n")}
""".stripMargin
Some(errorMessage)
} else {
None
}
}
}
| dbtsai/spark | sql/core/src/test/scala/org/apache/spark/sql/test/SQLTestUtils.scala | Scala | apache-2.0 | 17,002 |
// scalac: -Xfatal-warnings
//
sealed abstract class Base
object Test {
case object Up extends Base
def foo(d1: Base) =
d1 match {
case Up =>
}
// Sealed subtype: ModuleTypeRef <empty>.this.Test.Up.type
// Pattern: UniqueThisType Test.this.type
}
object Test1 {
sealed abstract class Base
object Base {
case object Down extends Base {
}
case object Up extends Base {
}
def f = (d1: Base, d2: Base) =>
(d1, d2) match {
case (Up, Up) | (Down, Down) => false
case (Down, Up) => true
case (Up, Down) => false
}
}
}
object Test2 {
sealed abstract class Base
object Base {
case object Down extends Base {
}
case object Up extends Base {
}
def f = (d1: Base, d2: Base) =>
(d1) match {
case Up | Down => false
}
}
}
object Test3 {
sealed abstract class Base
object Base {
case object Down extends Base
def f = (d1: Base, d2: Base) =>
(d1, d2) match {
case (Down, Down) => false
}
}
}
object Test4 {
sealed abstract class Base
object Base {
case object Down extends Base {
}
case object Up extends Base {
}
}
import Test4.Base._
def f = (d1: Base, d2: Base) =>
(d1, d2) match {
case (Up, Up) | (Down, Down) => false
case (Down, Test4.Base.Up) => true
case (Up, Down) => false
}
}
| scala/scala | test/files/pos/t7285a.scala | Scala | apache-2.0 | 1,462 |
object Test {
def main(args: Array[String]) {
trait T1 { def a: Int; def c: Int }
trait T2 { def a: Int; def b: Int }
class Bar(val x: Int)
class Foo(val a: Int, val b: Int, val c: Int) extends Bar(a + b + c) with T1 with T2
import scala.reflect.runtime.{currentMirror => cm}
val members = cm.classSymbol(classOf[Foo]).info.members
members.sorted.toList.filter(!_.isMethod) foreach System.out.println
}
}
| felixmulder/scala | test/files/run/reflection-sorted-members.scala | Scala | bsd-3-clause | 438 |
package sorm.reflection
import reflect.runtime.universe._
import reflect.runtime.{currentMirror => mirror}
import sext._, embrace._
object ScalaApi {
implicit class TypeApi ( t : Type ) {
def s : Symbol = t.typeSymbol
private def members : Stream[Symbol] = t.members.toStream
def properties
= members.filter(_.isTerm).map(_.asTerm).filter(_.isAccessor)
def constructors
= members.view
.collect{
case m : MethodSymbol
if m.isConstructor && m.owner == s
=> m
}
.reverse
def javaClass = mirror.runtimeClass(t)
}
implicit class SymbolApi ( s : Symbol ) {
def t : Type = s.typeSignature
def decodedName = s.name.toString.trim
def instantiate
( constructor : MethodSymbol,
args : Traversable[Any] = Nil )
: Any
= s match {
case s : ClassSymbol =>
mirror.reflectClass(s)
.reflectConstructor(constructor)(args.toSeq : _*)
}
def ancestors
= s.unfold{ s =>
if( s.owner == NoSymbol || s.decodedName == "<root>" ) None
else Some(s -> s.owner)
}
}
}
| sorm/sorm | src/main/scala/sorm/reflection/ScalaApi.scala | Scala | mit | 1,172 |
package debop4s.core.jodatime
import org.joda.time.LocalDate.Property
import org.joda.time._
class JodaRichLocalDate(val self: LocalDate) extends AnyVal with Ordered[LocalDate] {
def -(period: ReadablePeriod): LocalDate = self.minus(period)
def -(builder: DurationBuilder): LocalDate = self.minus(builder.underlying)
def +(period: ReadablePeriod): LocalDate = self.plus(period)
def +(builder: DurationBuilder): LocalDate = self.plus(builder.underlying)
def day: Property = self.dayOfMonth()
def week: Property = self.weekOfWeekyear()
def weekyear: Property = self.weekyear()
def month: Property = self.monthOfYear()
def year: Property = self.yearOfCentury()
def century: Property = self.centuryOfEra()
def era: Property = self.era
def withDay(day: Int): LocalDate = self.withDayOfMonth(day)
def withWeek(week: Int): LocalDate = self.withWeekOfWeekyear(week)
def withWeekyear(weekyear: Int): LocalDate = self.withWeekyear(weekyear)
def withMonth(month: Int): LocalDate = self.withMonthOfYear(month)
def withYear(year: Int): LocalDate = self.withYear(year)
def withCentury(century: Int): LocalDate = self.withCenturyOfEra(century)
def withEra(era: Int): LocalDate = self.withEra(era)
def compare(that: LocalDate): Int = self.compareTo(that)
def interval: Interval = self.toInterval
def interval(zone: DateTimeZone): Interval = self.toInterval(zone)
}
| debop/debop4s | debop4s-core/src/main/scala/debop4s/core/jodatime/JodaRichLocalDate.scala | Scala | apache-2.0 | 1,414 |
/*
* Copyright (c) 2012-2016 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.schemaguru
// json4s
import org.json4s._
import org.json4s.jackson.Serialization
// This library
import Common.{ Schema, SchemaDescription, SchemaVer }
/**
* Module containing json4s decoders/encoders for standard Schema/Iglu datatypes
*/
object SchemaCodecs {
private implicit val defaulFormats: Formats = Serialization.formats(NoTypeHints) + SchemaVerSerializer
// Public formats. Import it
lazy val formats: Formats = defaulFormats + SchemaSerializer
/**
* Extract SchemaVer (*-*-*) from JValue
*/
object SchemaVerSerializer extends CustomSerializer[SchemaVer](_ => (
{
case JString(version) => SchemaVer.parse(version) match {
case Some(schemaVer) => schemaVer
case None => throw new MappingException("Can't convert " + version + " to SchemaVer")
}
case x => throw new MappingException("Can't convert " + x + " to SchemaVer")
},
{
case x: SchemaVer => JString(s"${x.model}-${x.revision}-${x.addition}")
}
))
/**
* Extract Schema with self-description and data out of JValue
*/
object SchemaSerializer extends CustomSerializer[Schema](_ => (
{
case fullSchema: JObject =>
(fullSchema \\ "self").extractOpt[SchemaDescription] match {
case Some(desc) =>
val cleanSchema = fullSchema.obj.filterNot {
case ("self", _) => true
case _ => false
}
Schema(desc, JObject(cleanSchema))
case None => throw new MappingException("JSON isn't self-describing")
}
case _ => throw new MappingException("Not an JSON object")
},
{
case x: Schema => JObject(JField("self", Extraction.decompose(x.self)) :: x.data.obj)
}
))
}
| snowplow/schema-guru | src/main/scala/com.snowplowanalytics/schemaguru/SchemaCodecs.scala | Scala | apache-2.0 | 2,474 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.runtime.batch.table
import org.apache.flink.table.api._
import org.apache.flink.table.planner.factories.TestValuesTableFactory
import org.apache.flink.table.planner.runtime.utils.BatchTestBase
import org.apache.flink.table.planner.runtime.utils.TestData._
import org.apache.flink.util.ExceptionUtils
import org.junit.Assert.{assertEquals, assertTrue, fail}
import org.junit.Test
import scala.collection.JavaConversions._
class TableSinkITCase extends BatchTestBase {
@Test
def testDecimalOnOutputFormatTableSink(): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE sink (
| `c` VARCHAR(5),
| `b` DECIMAL(10, 0),
| `d` CHAR(5)
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true',
| 'runtime-sink' = 'OutputFormat'
|)
|""".stripMargin)
registerCollection("MyTable", data3, type3, "a, b, c", nullablesOfData3)
val table = tEnv.from("MyTable")
.where('a > 20)
.select("12345", 55.cast(DataTypes.DECIMAL(10, 0)), "12345".cast(DataTypes.CHAR(5)))
table.executeInsert("sink").await()
val result = TestValuesTableFactory.getResults("sink")
val expected = Seq("12345,55,12345")
assertEquals(expected.sorted, result.sorted)
}
@Test
def testDecimalOnSinkFunctionTableSink(): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE sink (
| `c` VARCHAR(5),
| `b` DECIMAL(10, 0),
| `d` CHAR(5)
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true'
|)
|""".stripMargin)
registerCollection("MyTable", data3, type3, "a, b, c", nullablesOfData3)
val table = tEnv.from("MyTable")
.where('a > 20)
.select("12345", 55.cast(DataTypes.DECIMAL(10, 0)), "12345".cast(DataTypes.CHAR(5)))
table.executeInsert("sink").await()
val result = TestValuesTableFactory.getResults("sink")
val expected = Seq("12345,55,12345")
assertEquals(expected.sorted, result.sorted)
}
@Test
def testSinkWithKey(): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE testSink (
| `a` INT,
| `b` DOUBLE,
| PRIMARY KEY (a) NOT ENFORCED
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true'
|)
|""".stripMargin)
registerCollection("MyTable", simpleData2, simpleType2, "a, b", nullableOfSimpleData2)
val table = tEnv.from("MyTable")
.groupBy('a)
.select('a, 'b.sum())
table.executeInsert("testSink").await()
val result = TestValuesTableFactory.getResults("testSink")
val expected = List(
"1,0.1",
"2,0.4",
"3,1.0",
"4,2.2",
"5,3.9")
assertEquals(expected.sorted, result.sorted)
}
@Test
def testSinkWithoutKey(): Unit = {
tEnv.executeSql(
s"""
|CREATE TABLE testSink (
| `a` INT,
| `b` DOUBLE
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true'
|)
|""".stripMargin)
registerCollection("MyTable", simpleData2, simpleType2, "a, b", nullableOfSimpleData2)
val table = tEnv.from("MyTable")
.groupBy('a)
.select('a, 'b.sum())
table.executeInsert("testSink").await()
val result = TestValuesTableFactory.getResults("testSink")
val expected = List(
"1,0.1",
"2,0.4",
"3,1.0",
"4,2.2",
"5,3.9")
assertEquals(expected.sorted, result.sorted)
}
@Test
def testNotNullEnforcer(): Unit = {
val dataId = TestValuesTableFactory.registerData(nullData4)
tEnv.executeSql(
s"""
|CREATE TABLE nullable_src (
| category STRING,
| shopId INT,
| num INT
|) WITH (
| 'connector' = 'values',
| 'data-id' = '$dataId',
| 'bounded' = 'true'
|)
|""".stripMargin)
tEnv.executeSql(
s"""
|CREATE TABLE not_null_sink (
| category STRING,
| shopId INT,
| num INT NOT NULL
|) WITH (
| 'connector' = 'values',
| 'sink-insert-only' = 'true'
|)
|""".stripMargin)
// default should fail, because there are null values in the source
try {
tEnv.executeSql("INSERT INTO not_null_sink SELECT * FROM nullable_src").await()
fail("Execution should fail.")
} catch {
case t: Throwable =>
val exception = ExceptionUtils.findThrowableWithMessage(
t,
"Column 'num' is NOT NULL, however, a null value is being written into it. " +
"You can set job configuration 'table.exec.sink.not-null-enforcer'='drop' " +
"to suppress this exception and drop such records silently.")
assertTrue(exception.isPresent)
}
// enable drop enforcer to make the query can run
tEnv.getConfig.getConfiguration.setString("table.exec.sink.not-null-enforcer", "drop")
tEnv.executeSql("INSERT INTO not_null_sink SELECT * FROM nullable_src").await()
val result = TestValuesTableFactory.getResults("not_null_sink")
val expected = List("book,1,12", "book,4,11", "fruit,3,44")
assertEquals(expected.sorted, result.sorted)
}
}
| darionyaphet/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/runtime/batch/table/TableSinkITCase.scala | Scala | apache-2.0 | 6,130 |
package com.lyrx.html
import java.io.File
import com.lyrx.text._
import HTMLTypes._
import scalatags.Text.all._
trait ParBasedEbook extends ParBasedHTMLGenerator with BookData{
override def wordCount(): Int = ???
override def convertToString()(implicit coll:Collector[HTMLTag],ctx:Context):String=generateTags().render
override def generateTags()(implicit coll:Collector[HTMLTag],ctx:Context): HTMLTag = html(
head(
attr("profile") := "http://dublincore.org/documents/dcq-html/",
meta(charset := "utf-8"),
link(rel := "schema.DC", href := "http://purl.org/dc/elements/1.1/"),
link(rel := "schema.DCTERMS", href := "http://purl.org/dc/terms/"),
scalatags.Text.tags2.title(title),
meta(name := "title", content := title),
meta(name := "author", content := author),
meta(name := "DC.title", content := title),
meta(name := "DC.Creator", content := author),
meta(name := "DC.publisher", content := publisher),
raw(s"\\n<!--TITLE=${title} -->\\n"),
raw(s"<!--AUTHOR=${author} -->\\n"),
raw(s"<!--PUBLISHER=${publisher} -->\\n")),
body(
div(SimpleStylesheet.centered,
h1(title),
h2(subtitle),
h3("von ", author)),
contents()
))
override def writeOut()(implicit ctx: Context, coll:Collector[HTMLTag]): Either[File, String] = {
//TODO: Write out epub format here!
super.writeOut();
}
}
| lyrx/lyrxgenerator | src/main/scala/com/lyrx/html/ParBasedEbook.scala | Scala | gpl-3.0 | 1,490 |
package de.fuberlin.wiwiss.silk.linkagerule
import de.fuberlin.wiwiss.silk.util.{ValidatingXMLReader, DPair}
import de.fuberlin.wiwiss.silk.entity.{Path, EntityDescription, Entity}
import de.fuberlin.wiwiss.silk.config.Prefixes
import de.fuberlin.wiwiss.silk.runtime.resource.ResourceLoader
import scala.xml.Node
import de.fuberlin.wiwiss.silk.linkagerule.input.{TransformInput, PathInput, Input}
/**
* A transform rule.
*/
case class TransformRule(operator: Option[Input] = None, targetProperty: String = "http://silk.wbsg.de/transformed") {
/**
* Generates the transformed values.
*
* @param entity The source entity.
*
* @return The transformed values.
*/
def apply(entity: Entity): Set[String] = {
operator match {
case Some(op) => op(DPair.fill(entity))
case None => Set.empty
}
}
/**
* Collects all paths in this rule.
*/
def paths: Set[Path] = {
def collectPaths(param: Input): Set[Path] = param match {
case p: PathInput => Set(p.path)
case p: TransformInput => p.inputs.flatMap(collectPaths).toSet
}
operator match {
case Some(op) => collectPaths(op)
case None => Set[Path]()
}
}
/**
* Serializes this transform rule as XML.
*/
def toXML(implicit prefixes: Prefixes = Prefixes.empty) = {
<TransformRule targetProperty={targetProperty}>
{operator.toList.map(_.toXML)}
</TransformRule>
}
}
/**
* Creates new transform rules.
*/
object TransformRule {
/**
* Creates a new transform rule with one root operator.
*/
def apply(operator: Input, targetProperty: String): TransformRule = TransformRule(Some(operator), targetProperty)
def load(resourceLoader: ResourceLoader)(implicit prefixes: Prefixes) = {
new ValidatingXMLReader(node => fromXML(node, resourceLoader)(prefixes, None), "de/fuberlin/wiwiss/silk/LinkSpecificationLanguage.xsd")
}
/**
* Reads a transform rule from xml.
*/
def fromXML(node: Node, resourceLoader: ResourceLoader)(implicit prefixes: Prefixes, globalThreshold: Option[Double]) = {
TransformRule(
operator = Input.fromXML(node.child, resourceLoader).headOption,
targetProperty = (node \\ "@targetProperty").text
)
}
}
| fusepoolP3/p3-silk | silk-core/src/main/scala/de/fuberlin/wiwiss/silk/linkagerule/TransformRule.scala | Scala | apache-2.0 | 2,229 |
package dotty.tools.dotc
package transform
import core._
import MegaPhase._
import dotty.tools.dotc.ast.tpd._
import dotty.tools.dotc.core.Contexts.Context
import dotty.tools.dotc.core.StdNames._
import ast._
import Trees._
import Flags._
import SymUtils._
import Symbols._
import Decorators._
import DenotTransformers._
import Constants.Constant
import collection.mutable
object Constructors {
val name: String = "constructors"
}
/** This transform
* - moves initializers from body to constructor.
* - makes all supercalls explicit
* - also moves private fields that are accessed only from constructor
* into the constructor if possible.
*/
class Constructors extends MiniPhase with IdentityDenotTransformer { thisPhase =>
import tpd._
override def phaseName: String = Constructors.name
override def runsAfter: Set[String] = Set(HoistSuperArgs.name)
override def runsAfterGroupsOf: Set[String] = Set(Memoize.name)
// Memoized needs to be finished because we depend on the ownerchain after Memoize
// when checking whether an ident is an access in a constructor or outside it.
// This test is done in the right-hand side of a value definition. If Memoize
// was in the same group as Constructors, the test on the rhs ident would be
// performed before the rhs undergoes the owner change. This would lead
// to more symbols being retained as parameters. Test case in run/capturing.scala.
/** The private vals that are known to be retained as class fields */
private val retainedPrivateVals = mutable.Set[Symbol]()
/** The private vals whose definition comes before the current focus */
private val seenPrivateVals = mutable.Set[Symbol]()
// Collect all private parameter accessors and value definitions that need
// to be retained. There are several reasons why a parameter accessor or
// definition might need to be retained:
// 1. It is accessed after the constructor has finished
// 2. It is accessed before it is defined
// 3. It is accessed on an object other than `this`
// 4. It is a mutable parameter accessor
// 5. It is has a wildcard initializer `_`
private def markUsedPrivateSymbols(tree: RefTree)(implicit ctx: Context): Unit = {
val sym = tree.symbol
def retain() = retainedPrivateVals.add(sym)
if (sym.exists && sym.owner.isClass && mightBeDropped(sym)) {
val owner = sym.owner.asClass
tree match {
case Ident(_) | Select(This(_), _) =>
def inConstructor = {
val method = ctx.owner.enclosingMethod
method.isPrimaryConstructor && ctx.owner.enclosingClass == owner
}
if (inConstructor &&
(sym.is(ParamAccessor) || seenPrivateVals.contains(sym))) {
// used inside constructor, accessed on this,
// could use constructor argument instead, no need to retain field
}
else retain()
case _ => retain()
}
}
}
override def transformIdent(tree: tpd.Ident)(implicit ctx: Context): tpd.Tree = {
markUsedPrivateSymbols(tree)
tree
}
override def transformSelect(tree: tpd.Select)(implicit ctx: Context): tpd.Tree = {
markUsedPrivateSymbols(tree)
tree
}
override def transformValDef(tree: tpd.ValDef)(implicit ctx: Context): tpd.Tree = {
if (mightBeDropped(tree.symbol)) seenPrivateVals += tree.symbol
tree
}
/** All initializers for non-lazy fields should be moved into constructor.
* All non-abstract methods should be implemented (this is assured for constructors
* in this phase and for other methods in memoize).
*/
override def checkPostCondition(tree: tpd.Tree)(implicit ctx: Context): Unit = {
def emptyRhsOK(sym: Symbol) =
sym.isOneOf(DeferredOrLazy) || sym.isConstructor && sym.owner.isAllOf(NoInitsTrait)
tree match {
case tree: ValDef if tree.symbol.exists && tree.symbol.owner.isClass && !tree.symbol.is(Lazy) && !tree.symbol.hasAnnotation(defn.ScalaStaticAnnot) =>
assert(tree.rhs.isEmpty, i"$tree: initializer should be moved to constructors")
case tree: DefDef if !emptyRhsOK(tree.symbol) =>
assert(!tree.rhs.isEmpty, i"unimplemented: $tree")
case _ =>
}
}
/** @return true if after ExplicitOuter, all references from this tree go via an
* outer link, so no parameter accessors need to be rewired to parameters
*/
private def noDirectRefsFrom(tree: Tree)(implicit ctx: Context) =
tree.isDef && tree.symbol.isClass
/** Class members that can be eliminated if referenced only from their own
* constructor.
*/
private def mightBeDropped(sym: Symbol)(implicit ctx: Context) =
sym.is(Private, butNot = MethodOrLazy) && !sym.isAllOf(MutableParamAccessor)
private final val MutableParamAccessor = Mutable | ParamAccessor
override def transformTemplate(tree: Template)(implicit ctx: Context): Tree = {
val cls = ctx.owner.asClass
val constr @ DefDef(nme.CONSTRUCTOR, Nil, vparams :: Nil, _, EmptyTree) = tree.constr
// Produce aligned accessors and constructor parameters. We have to adjust
// for any outer parameters, which are last in the sequence of original
// parameter accessors but come first in the constructor parameter list.
val accessors = cls.paramAccessors.filterNot(x => x.isSetter)
val vparamsWithOuterLast = vparams match {
case vparam :: rest if vparam.name == nme.OUTER => rest ::: vparam :: Nil
case _ => vparams
}
val paramSyms = vparamsWithOuterLast map (_.symbol)
// Adjustments performed when moving code into the constructor:
// (1) Replace references to param accessors by constructor parameters
// except possibly references to mutable variables, if `excluded = Mutable`.
// (Mutable parameters should be replaced only during the super call)
// (2) If the parameter accessor reference was to an alias getter,
// drop the () when replacing by the parameter.
object intoConstr extends TreeMap {
override def transform(tree: Tree)(implicit ctx: Context): Tree = tree match {
case Ident(_) | Select(This(_), _) =>
var sym = tree.symbol
if (sym.is(ParamAccessor, butNot = Mutable)) sym = sym.subst(accessors, paramSyms)
if (sym.owner.isConstructor) ref(sym).withSpan(tree.span) else tree
case Apply(fn, Nil) =>
val fn1 = transform(fn)
if ((fn1 ne fn) && fn1.symbol.is(Param) && fn1.symbol.owner.isPrimaryConstructor)
fn1 // in this case, fn1.symbol was an alias for a parameter in a superclass
else cpy.Apply(tree)(fn1, Nil)
case _ =>
if (noDirectRefsFrom(tree)) tree else super.transform(tree)
}
def apply(tree: Tree, prevOwner: Symbol)(implicit ctx: Context): Tree =
transform(tree).changeOwnerAfter(prevOwner, constr.symbol, thisPhase)
}
def isRetained(acc: Symbol) =
!mightBeDropped(acc) || retainedPrivateVals(acc)
val constrStats, clsStats = new mutable.ListBuffer[Tree]
/** Map outer getters $outer and outer accessors $A$B$$$outer to the given outer parameter. */
def mapOuter(outerParam: Symbol) = new TreeMap {
override def transform(tree: Tree)(implicit ctx: Context) = tree match {
case Apply(fn, Nil)
if (fn.symbol.is(OuterAccessor)
|| fn.symbol.isGetter && fn.symbol.name == nme.OUTER
) &&
fn.symbol.info.resultType.classSymbol == outerParam.info.classSymbol =>
ref(outerParam)
case tree: RefTree if tree.symbol.is(ParamAccessor) && tree.symbol.name == nme.OUTER =>
ref(outerParam)
case _ =>
super.transform(tree)
}
}
val dropped = mutable.Set[Symbol]()
// Split class body into statements that go into constructor and
// definitions that are kept as members of the class.
def splitStats(stats: List[Tree]): Unit = stats match {
case stat :: stats1 =>
stat match {
case stat @ ValDef(name, tpt, _) if !stat.symbol.is(Lazy) && !stat.symbol.hasAnnotation(defn.ScalaStaticAnnot) =>
val sym = stat.symbol
if (isRetained(sym)) {
if (!stat.rhs.isEmpty && !isWildcardArg(stat.rhs))
constrStats += Assign(ref(sym), intoConstr(stat.rhs, sym)).withSpan(stat.span)
clsStats += cpy.ValDef(stat)(rhs = EmptyTree)
}
else if (!stat.rhs.isEmpty) {
dropped += sym
sym.copySymDenotation(
initFlags = sym.flags &~ Private,
owner = constr.symbol).installAfter(thisPhase)
constrStats += intoConstr(stat, sym)
}
case DefDef(nme.CONSTRUCTOR, _, ((outerParam @ ValDef(nme.OUTER, _, _)) :: _) :: Nil, _, _) =>
clsStats += mapOuter(outerParam.symbol).transform(stat)
case _: DefTree =>
clsStats += stat
case _ =>
constrStats += intoConstr(stat, tree.symbol)
}
splitStats(stats1)
case Nil =>
}
splitStats(tree.body)
// The initializers for the retained accessors */
val copyParams = accessors flatMap { acc =>
if (!isRetained(acc)) {
dropped += acc
Nil
}
else if (!isRetained(acc.field)) { // It may happen for unit fields, tests/run/i6987.scala
dropped += acc.field
Nil
}
else {
val param = acc.subst(accessors, paramSyms)
if (param.hasAnnotation(defn.ConstructorOnlyAnnot))
ctx.error(em"${acc.name} is marked `@constructorOnly` but it is retained as a field in ${acc.owner}", acc.sourcePos)
val target = if (acc.is(Method)) acc.field else acc
if (!target.exists) Nil // this case arises when the parameter accessor is an alias
else {
val assigns = Assign(ref(target), ref(param)).withSpan(tree.span) :: Nil
if (acc.name != nme.OUTER) assigns
else {
// insert test: if ($outer eq null) throw new NullPointerException
val nullTest =
If(ref(param).select(defn.Object_eq).appliedTo(nullLiteral),
Throw(New(defn.NullPointerExceptionClass.typeRef, Nil)),
unitLiteral)
nullTest :: assigns
}
}
}
}
// Drop accessors that are not retained from class scope
if (dropped.nonEmpty) {
val clsInfo = cls.classInfo
cls.copy(
info = clsInfo.derivedClassInfo(
decls = clsInfo.decls.filteredScope(!dropped.contains(_))))
// TODO: this happens to work only because Constructors is the last phase in group
}
val (superCalls, followConstrStats) = constrStats.toList match {
case (sc: Apply) :: rest if sc.symbol.isConstructor => (sc :: Nil, rest)
case stats => (Nil, stats)
}
val mappedSuperCalls = vparams match {
case (outerParam @ ValDef(nme.OUTER, _, _)) :: _ =>
superCalls.map(mapOuter(outerParam.symbol).transform)
case _ => superCalls
}
// Lazy Vals may decide to create an eager val instead of a lazy val
// this val should be assigned before constructor body code starts running
val (lazyAssignments, stats) = followConstrStats.partition {
case Assign(l, r) if l.symbol.name.is(NameKinds.LazyLocalName) => true
case _ => false
}
val finalConstrStats = copyParams ::: mappedSuperCalls ::: lazyAssignments ::: stats
val expandedConstr =
if (cls.isAllOf(NoInitsTrait)) {
assert(finalConstrStats.isEmpty)
constr
}
else cpy.DefDef(constr)(rhs = Block(finalConstrStats, unitLiteral))
cpy.Template(tree)(constr = expandedConstr, body = clsStats.toList)
}
}
| som-snytt/dotty | compiler/src/dotty/tools/dotc/transform/Constructors.scala | Scala | apache-2.0 | 11,827 |
/*
*************************************************************************************
* Copyright 2011 Normation SAS
*************************************************************************************
*
* This file is part of Rudder.
*
* Rudder is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* In accordance with the terms of section 7 (7. Additional Terms.) of
* the GNU General Public License version 3, the copyright holders add
* the following Additional permissions:
* Notwithstanding to the terms of section 5 (5. Conveying Modified Source
* Versions) and 6 (6. Conveying Non-Source Forms.) of the GNU General
* Public License version 3, when you create a Related Module, this
* Related Module is not considered as a part of the work and may be
* distributed under the license agreement of your choice.
* A "Related Module" means a set of sources files including their
* documentation that, without modification of the Source Code, enables
* supplementary functions or services in addition to those offered by
* the Software.
*
* Rudder is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with Rudder. If not, see <http://www.gnu.org/licenses/>.
*
*************************************************************************************
*/
package com.normation.rudder.web.snippet
//lift std import
import scala.xml._
import net.liftweb.common._
import net.liftweb.http._
import net.liftweb.util._
import Helpers._
import net.liftweb.http.js._
import JsCmds._
import com.normation.inventory.ldap.core.InventoryDit
import com.normation.ldap.sdk.LDAPConnectionProvider
import com.normation.ldap.sdk.BuildFilter._
import com.normation.rudder.domain.NodeDit
import com.normation.rudder.domain.RudderLDAPConstants._
import com.normation.rudder.repository.RoRuleRepository
import JE._
import net.liftweb.http.SHtml._
import com.normation.ldap.sdk.RoLDAPConnection
import bootstrap.liftweb.RudderConfig
import com.normation.ldap.sdk.FALSE
import com.normation.rudder.domain.reports.ComplianceLevel
import com.normation.inventory.domain.AcceptedInventory
import com.normation.rudder.domain.logger.TimingDebugLogger
import com.normation.inventory.domain.NodeId
import com.normation.inventory.domain.InventoryStatus
import com.normation.inventory.domain.Software
import com.normation.inventory.domain.Version
import com.normation.rudder.domain.nodes.NodeInfo
import com.normation.utils.Control.sequence
import com.unboundid.ldap.sdk.SearchRequest
import com.unboundid.ldap.sdk.controls.MatchedValuesRequestControl
import com.unboundid.ldap.sdk.controls.MatchedValuesFilter
import com.normation.inventory.domain.VirtualMachineType
import com.normation.inventory.domain.PhysicalMachineType
sealed trait ComplianceLevelPieChart{
def color : String
def label : String
def value : Int
def jsValue = {
JsArray(label, value)
}
def jsColor = {
(label -> Str(color))
}
}
case class DisabledChart (value : Int) extends ComplianceLevelPieChart{
val label = "Reports Disabled"
val color = "#b4b4b4"
}
case class GreenChart (value : Int) extends ComplianceLevelPieChart{
val label = "Perfect (100%)"
val color = "#5cb85c"
}
case class BlueChart (value : Int) extends ComplianceLevelPieChart{
val label = "Good (> 75%)"
val color = "#9bc832"
}
case class OrangeChart (value : Int) extends ComplianceLevelPieChart{
val label = "Average (> 50%)"
val color = "#f0ad4e"
}
case class RedChart (value : Int) extends ComplianceLevelPieChart{
val label = "Poor (< 50%)"
val color = "#c9302c"
}
case class PendingChart (value : Int) extends ComplianceLevelPieChart{
val label = "Applying"
val color = "#5bc0de"
}
object HomePage {
private val nodeInfosService = RudderConfig.nodeInfoService
object boxNodeInfos extends RequestVar[Box[Map[NodeId, NodeInfo]]](initNodeInfos) {
override def doSync[F](f: => F): F = this.synchronized(f)
}
def initNodeInfos(): Box[Map[NodeId, NodeInfo]] = {
TimingDebugLogger.debug(s"Start timing homepage")
val n1 = System.currentTimeMillis
val n = nodeInfosService.getAll
val n2 = System.currentTimeMillis
TimingDebugLogger.debug(s"Getting node infos: ${n2 - n1}ms")
n
}
}
class HomePage extends Loggable {
private[this] val ldap = RudderConfig.roLDAPConnectionProvider
private[this] val pendingNodesDit = RudderConfig.pendingNodesDit
private[this] val acceptedNodesDit = RudderConfig.acceptedNodesDit
private[this] val nodeDit = RudderConfig.nodeDit
private[this] val rudderDit = RudderConfig.rudderDit
private[this] val reportingService = RudderConfig.reportingService
private[this] val softwareService = RudderConfig.readOnlySoftwareDAO
private[this] val mapper = RudderConfig.ldapInventoryMapper
private[this] val roRuleRepo = RudderConfig.roRuleRepository
def pendingNodes(html : NodeSeq) : NodeSeq = {
displayCount(countPendingNodes, "pending nodes")
}
def acceptedNodes(html : NodeSeq) : NodeSeq = {
displayCount(countAcceptedNodes, "accepted nodes")
}
def rules(html : NodeSeq) : NodeSeq = {
displayCount(countAllRules, "rules")
}
def directives(html : NodeSeq) : NodeSeq = {
displayCount(countAllDirectives,"directives")
}
def groups(html : NodeSeq) : NodeSeq = {
displayCount(countAllGroups,"groups")
}
def techniques(html : NodeSeq) : NodeSeq = {
displayCount(countAllTechniques,"techniques")
}
def getAllCompliance = {
trait ChartType
case object PendingChartType extends ChartType
case object DisabledChartType extends ChartType
case class ColoredChartType(value: Double) extends ChartType
( for {
nodeInfos <- HomePage.boxNodeInfos.get
n2 = System.currentTimeMillis
userRules <- roRuleRepo.getIds()
n3 = System.currentTimeMillis
_ = TimingDebugLogger.trace(s"Get rules: ${n3 - n2}ms")
reports <- reportingService.findRuleNodeStatusReports(nodeInfos.keySet, userRules)
n4 = System.currentTimeMillis
_ = TimingDebugLogger.trace(s"Compute Rule Node status reports for all nodes: ${n4 - n3}ms")
_ = TimingDebugLogger.debug(s"Compute compliance: ${n4 - n2}ms")
} yield {
val reportsByNode = reports.mapValues { status => ComplianceLevel.sum(status.report.reports.map(_.compliance)) }
/*
* Here, for the compliance by node, we want to distinguish (but NOT ignore, like in globalCompliance) the
* case where the node is pending.
*
* We are using a coarse grain here: if a node as even ONE report not pending, we compute it's compliance.
* Else, if the node's reports are ALL pending, we use a special pending case.
*
* Note: node without reports are also put in "pending".
*/
val complianceByNode : List[ChartType] = reportsByNode.values.map { r =>
if(r.pending == r.total) { PendingChartType }
else if(r.reportsDisabled == r.total) { DisabledChartType }
else { ColoredChartType(r.complianceWithoutPending) }
}.toList
val complianceDiagram : List[ComplianceLevelPieChart] = (complianceByNode.groupBy{compliance => compliance match {
case PendingChartType => PendingChart
case DisabledChartType => DisabledChart
case ColoredChartType(100) => GreenChart
case ColoredChartType(x) if x >= 75 => BlueChart
case ColoredChartType(x) if x >= 50 => OrangeChart
case ColoredChartType(_) => RedChart
} }.map {
case (PendingChart , compliance) => PendingChart(compliance.size)
case (DisabledChart, compliance) => DisabledChart(compliance.size)
case (GreenChart , compliance) => GreenChart(compliance.size)
case (BlueChart , compliance) => BlueChart(compliance.size)
case (OrangeChart , compliance) => OrangeChart(compliance.size)
case (RedChart , compliance) => RedChart(compliance.size)
case (_ , compliance) => RedChart(compliance.size)
}).toList
val sorted = complianceDiagram.sortWith{
case (_:PendingChart ,_) => true
case (_:DisabledChart ,_) => true
case (_:GreenChart ,_) => true
case (_:BlueChart ,_:GreenChart) => false
case (_:BlueChart ,_) => true
case (_:OrangeChart , ( _:GreenChart| _:BlueChart)) => false
case (_:OrangeChart ,_) => true
case (_:RedChart ,_) => false
}
val numberOfNodes = complianceByNode.size
val pendingNodes = complianceDiagram.collectFirst{
case p : PendingChart => p.value
} match {
case None =>
JsObj (
"pending" -> JsNull
, "active" -> numberOfNodes
)
case Some(pending) =>
JsObj (
"pending" ->
JsObj (
"nodes" -> pending
, "percent" -> (pending * 100.0 / numberOfNodes).round
)
, "active" -> (numberOfNodes - pending)
)
}
val diagramData = JsArray(sorted.map(_.jsValue):_*)
val diagramColor = JsObj(sorted.map(_.jsColor):_*)
// Data used for compliance bar, compliance without pending
val compliance = ComplianceLevel.sum(reports.flatMap( _._2.report.reports.toSeq.map( _.compliance))).copy(pending = 0)
import com.normation.rudder.domain.reports.ComplianceLevelSerialisation._
val complianceBar = compliance.toJsArray
val globalCompliance = compliance.complianceWithoutPending.round
val n4 = System.currentTimeMillis
TimingDebugLogger.debug(s"Compute compliance for HomePage: ${n4 - n2}ms")
Script(OnLoad(JsRaw(s"""
homePage(
${complianceBar.toJsCmd}
, ${globalCompliance}
, ${diagramData.toJsCmd}
, ${diagramColor.toJsCmd}
, ${pendingNodes.toJsCmd}
)""")))
} ) match {
case Full(homePageCompliance) => homePageCompliance
case eb : EmptyBox =>
logger.error(eb)
NodeSeq.Empty
}
}
def inventoryInfo() = {
( for {
nodeInfos <- HomePage.boxNodeInfos.get
} yield {
val machines = nodeInfos.values.map { _.machine.map(_.machineType) match {
case Some(_: VirtualMachineType) => "Virtual"
case Some(PhysicalMachineType) => "Physical"
case _ => "No Machine Inventory"
} }.groupBy(identity).mapValues(_.size).map{case (a,b) => JsArray(a, b)}
val machinesArray = JsArray(machines.toList)
val os = nodeInfos.values.groupBy(_.osDetails.os.name).mapValues(_.size).map{case (a,b) => JsArray(a, b)}
val osArray = JsArray(os.toList)
Script(OnLoad(JsRaw(s"""
homePageInventory(
${machinesArray.toJsCmd}
, ${osArray.toJsCmd}
)""")))
} ) match {
case Full(inventory) => inventory
case _ => NodeSeq.Empty
}
}
def rudderAgentVersion() = {
val n4 = System.currentTimeMillis
val agents = getRudderAgentVersion match {
case Full(x) => x
case eb: EmptyBox =>
val e = eb ?~! "Error when getting installed agent version on nodes"
logger.debug(e.messageChain)
e.rootExceptionCause.foreach { ex =>
logger.debug("Root exception was:", ex)
}
Map("Unknown" -> 1)
}
TimingDebugLogger.debug(s"Get software: ${System.currentTimeMillis-n4}ms")
val agentsValue = agents.map{case (a,b) => JsArray(a, b)}
val agentsData =JsArray(agentsValue.toList)
Script(OnLoad(JsRaw(s"""
homePageSoftware(
${agentsData.toJsCmd}
)""")))
}
/**
* Get the count of agent version name -> size for accepted nodes
*/
private[this] def getRudderAgentVersion() : Box[Map[String, Int]] = {
import com.normation.ldap.sdk._
import com.normation.ldap.sdk.BuildFilter.{EQ,OR}
import com.normation.inventory.ldap.core.LDAPConstants.{A_NAME, A_SOFTWARE_UUID, A_NODE_UUID, A_SOFTWARE_DN}
import com.unboundid.ldap.sdk.DN
val unknown = new Version("Unknown")
val n1 = System.currentTimeMillis
for {
con <- ldap
nodeInfos <- HomePage.boxNodeInfos.get
n2 = System.currentTimeMillis
agentSoftEntries = con.searchOne(acceptedNodesDit.SOFTWARE.dn, EQ(A_NAME, "rudder-agent"))
agentSoftDn = agentSoftEntries.map(_.dn.toString).toSet
agentSoft <- sequence(agentSoftEntries){ entry =>
mapper.softwareFromEntry(entry) ?~! "Error when mapping LDAP entry %s to a software".format(entry)
}
n3 = System.currentTimeMillis
_ = TimingDebugLogger.debug(s"Get agent software entries: ${n3-n2}ms")
nodeEntries = {
val sr = new SearchRequest(
acceptedNodesDit.NODES.dn.toString
, One
, OR(agentSoft.map(x => EQ(A_SOFTWARE_DN, acceptedNodesDit.SOFTWARE.SOFT.dn(x.id).toString)):_*)
, A_NODE_UUID, A_SOFTWARE_DN
)
// Skip if there is no rudder-agent packages in software DN
if (! agentSoftDn.isEmpty) {
//only get interesting entries control - that make a huge difference in perf
sr.addControl(new MatchedValuesRequestControl(agentSoftDn.map(dn => MatchedValuesFilter.createEqualityFilter(A_SOFTWARE_DN, dn)).toSeq:_*))
con.search(sr)
} else {
Seq()
}
}
n4 = System.currentTimeMillis
_ = TimingDebugLogger.debug(s"Get nodes for agent: ${n4-n3}ms")
} yield {
val agentMap = agentSoft.map(x => (x.id.value, x)).toMap
val agents = agentMap.keySet
val agentVersionByNodeEntries = nodeEntries.map { e =>
(
NodeId(e.value_!(A_NODE_UUID))
, e.valuesFor(A_SOFTWARE_DN).intersect(agentSoftDn).flatMap { x =>
acceptedNodesDit.SOFTWARE.SOFT.idFromDN(new DN(x)).flatMap(s => agentMap(s.value).version)
}
)
}.toMap.mapValues{_.maxBy(_.value)} // Get the max version available
// take back the initial set of nodes to be sure to have one agent for each
val allAgents = nodeInfos.keySet.toSeq.map(nodeId => agentVersionByNodeEntries.getOrElse(nodeId, unknown) )
// Format different version naming type into one
def formatVersion (version : String) : String= {
// All that is before '.release' (rpm relases, like 3.0.6.release), OR all until first dash ( debian releases, like 3.0.6~wheezy)
val versionRegexp = "(.+)(?=\\\\.release)|([^-]+)".r
versionRegexp.
findFirstIn(version).
getOrElse(version).
// Replace all '~' by '.' to normalize alpha/beta/rc releases and nightlies
replace("~", ".")
}
val res = allAgents.groupBy(ag => formatVersion(ag.value)).mapValues( _.size)
TimingDebugLogger.debug(s"=> group and count agents: ${System.currentTimeMillis-n4}ms")
res
}
}
private[this] def countPendingNodes() : Box[Int] = {
ldap.map { con =>
con.searchOne(pendingNodesDit.NODES.dn, ALL, "1.1")
}.map(x => x.size)
}
private[this] def countAcceptedNodes() : Box[Int] = {
ldap.map { con =>
con.searchOne(nodeDit.NODES.dn, ALL, "1.1")
}.map(x => x.size)
}
private[this] def countAllRules() : Box[Int] = {
roRuleRepo.getIds().map(_.size)
}
private[this] def countAllDirectives() : Box[Int] = {
ldap.map { con =>
con.searchSub(rudderDit.ACTIVE_TECHNIQUES_LIB.dn, AND(IS(OC_DIRECTIVE), EQ(A_IS_SYSTEM, FALSE.toLDAPString)), "1.1")
}.map(x => x.size)
}
private[this] def countAllTechniques() : Box[Int] = {
ldap.map { con =>
con.searchSub(rudderDit.ACTIVE_TECHNIQUES_LIB.dn, AND(IS(OC_ACTIVE_TECHNIQUE), EQ(A_IS_SYSTEM, FALSE.toLDAPString)), "1.1")
}.map(x => x.size)
}
private[this] def countAllGroups() : Box[Int] = {
ldap.map { con =>
con.searchSub(rudderDit.GROUP.dn, AND(IS(OC_RUDDER_NODE_GROUP), EQ(A_IS_SYSTEM, FALSE.toLDAPString)), "1.1")
}.map(x => x.size)
}
private[this] def displayCount( count : () => Box[Int], name : String) ={
Text((count() match {
case Empty => 0
case m:Failure =>
logger.error(s"Could not fetch the number of ${name}. reason : ${m.messageChain}")
0
case Full(x) => x
}).toString)
}
}
| armeniaca/rudder | rudder-web/src/main/scala/com/normation/rudder/web/snippet/HomePage.scala | Scala | gpl-3.0 | 17,364 |
package org.maproulette.cache
import org.joda.time.DateTime
import org.maproulette.Config
import org.maproulette.data.{ItemType, ProjectType}
import org.maproulette.models.BaseObject
import org.scalatestplus.play.PlaySpec
import play.api.Configuration
import play.api.libs.json.{DefaultWrites, JodaReads, JodaWrites, Json, Reads, Writes}
/**
* @author cuthbertm
*/
case class TestBaseObject(override val id: Long,
override val name: String,
override val created: DateTime = DateTime.now(),
override val modified: DateTime = DateTime.now()) extends BaseObject[Long] {
override val itemType: ItemType = ProjectType()
}
class CacheSpec extends PlaySpec with JodaWrites with JodaReads {
implicit val groupWrites: Writes[TestBaseObject] = Json.writes[TestBaseObject]
implicit val groupReads: Reads[TestBaseObject] = Json.reads[TestBaseObject]
implicit val configuration = Configuration.from(Map(Config.KEY_CACHING_CACHE_LIMIT -> 6, Config.KEY_CACHING_CACHE_EXPIRY -> 5))
implicit val manager = new CacheManager[Long, TestBaseObject](new Config())
val theCache = manager.cache
"CacheManager" should {
"cache element withOptionCaching" in {
theCache.clear()
cacheObject(25, "test")
val cachedObj = theCache.get(25)
cachedObj.isDefined mustEqual true
cachedObj.get.name mustEqual "test"
cachedObj.get.id mustEqual 25
}
"delete cached element withCacheIDDeletion" in {
theCache.clear()
cacheObject(1, "name1")
implicit val ids = List(1L)
manager.withCacheIDDeletion { () =>
val cachedObj = theCache.get(1)
cachedObj.isEmpty mustEqual true
}
}
"delete cached elements withCacheIDDeletion" in {
theCache.clear()
cacheObject(1, "name1")
cacheObject(2, "name2")
cacheObject(3, "name3")
implicit val ids = List(1L, 2L)
manager.withCacheIDDeletion { () =>
theCache.get(1).isEmpty mustEqual true
theCache.get(2).isEmpty mustEqual true
theCache.get(3).isDefined mustEqual true
}
}
"delete cached elements withCacheNameDeletion" in {
theCache.clear()
cacheObject(1, "name1")
cacheObject(2, "name2")
cacheObject(3, "name3")
implicit val names = List("name1", "name2")
manager.withCacheNameDeletion { () =>
theCache.find("name1").isEmpty mustEqual true
theCache.find("name2").isEmpty mustEqual true
theCache.find("name3").isDefined mustEqual true
}
}
"cache multiple elements withIDListCaching" in {
theCache.clear()
implicit val cacheList = List(1L, 2L, 3L)
manager.withIDListCaching { implicit uncachedList =>
uncachedList.size mustEqual 3
uncachedList.map(id => TestBaseObject(id, s"name$id"))
}
theCache.get(1).isDefined mustEqual true
theCache.get(2).isDefined mustEqual true
theCache.get(3).isDefined mustEqual true
}
"cache multiple elements withNameListCaching" in {
theCache.clear()
implicit val cacheNames = List("name1", "name2", "name3")
manager.withNameListCaching { implicit uncachedList =>
uncachedList.size mustEqual 3
uncachedList.map(name => TestBaseObject(name.charAt(4).toString.toInt, name))
}
theCache.get(1).isDefined mustEqual true
theCache.get(2).isDefined mustEqual true
theCache.get(3).isDefined mustEqual true
}
"caching must be able to be disabled" in {
theCache.clear()
implicit val caching = false
manager.withOptionCaching { () => Some(TestBaseObject(1, "name1")) }
theCache.size mustEqual 0
}
"cache only the elements that are not already cached withIDListCaching" in {
theCache.clear()
cacheObject(1, "name1")
cacheObject(2, "name2")
cacheObject(3, "name3")
implicit val ids = List(2L, 3L, 5L, 6L)
manager.withIDListCaching { implicit uncachedList =>
uncachedList.size mustEqual 2
uncachedList.map(id => TestBaseObject(id, s"name$id"))
}
theCache.size mustEqual 5
theCache.get(5).isDefined mustEqual true
theCache.get(6).isDefined mustEqual true
}
"cache updated changes for element" in {
implicit val id = 1L
theCache.clear()
cacheObject(id, "name1")
manager.withUpdatingCache(fakeRetrieve) { implicit cached =>
Some(TestBaseObject(1, "name2"))
}
val cachedObj = theCache.get(1)
cachedObj.isDefined mustEqual true
cachedObj.get.name mustEqual "name2"
}
"cache updated changes for element and retrieve from supplied function" in {
implicit val id = 3L
theCache.clear()
manager.withUpdatingCache(fakeRetrieve) { implicit cached =>
cached.name mustEqual "testObject"
Some(TestBaseObject(3, "updated"))
}
val cachedObj = theCache.get(3)
cachedObj.isDefined mustEqual true
cachedObj.get.name mustEqual "updated"
}
"cache must handle size limits correctly" in {
theCache.clear()
cacheObject(25L, "test1")
Thread.sleep(100)
cacheObject(26L, "test2")
Thread.sleep(100)
cacheObject(27L, "test3")
Thread.sleep(100)
cacheObject(28L, "test4")
Thread.sleep(100)
cacheObject(29L, "test5")
Thread.sleep(100)
cacheObject(30L, "test6")
// at this point we should be at the cache limit, the next one should add new and remove test1
Thread.sleep(100)
cacheObject(31L, "test7")
theCache.size mustEqual 6
theCache.get(25L).isEmpty mustEqual true
// by getting cache object 26L we should renew access time and so next entry would remove test3 instead of test2
theCache.get(26L)
Thread.sleep(100)
cacheObject(32L, "test8")
theCache.size mustEqual 6
theCache.get(26L).isDefined mustEqual true
theCache.get(27L).isEmpty mustEqual true
}
"cache must expire values correctly" in {
theCache.clear()
cacheObject(1L, "test1")
theCache.addObject(TestBaseObject(2L, "test2"), Some(1))
Thread.sleep(2000)
theCache.trueSize mustEqual 1
Thread.sleep(5000)
theCache.trueSize mustEqual 0
}
}
private def fakeRetrieve(id: Long): Option[TestBaseObject] = Some(TestBaseObject(3, "testObject"))
private def cacheObject(id: Long, name: String) =
manager.withOptionCaching { () => Some(TestBaseObject(id, name)) }
}
| mvexel/maproulette2 | test/org/maproulette/cache/CacheSpec.scala | Scala | apache-2.0 | 6,537 |
/**
* Created on: Feb 2, 2014
*/
package com.iteamsolutions.angular.services
package atom.actor
import scalaz.{
Failure => _,
Success => _,
_
}
import com.iteamsolutions.angular.models.atom.Feed
/**
* The '''AvailableFeedsRequest''' type serves as the parent for ''all''
* [[com.iteamsolutions.angular.services.Request]]s understood by the
* [[com.iteamsolutions.angular.services.atom.actor.AvailableFeeds]] Akka
* `Actor`.
*
* @author svickers
*
*/
sealed trait AvailableFeedsRequest[A <: AvailableFeedsResponse[A]]
extends Request[A]
{
}
sealed trait AvailableFeedsResponse[A <: AvailableFeedsResponse[A]]
extends Response[A]
{
}
case object CurrentlyAvailableFeedsRequest
extends AvailableFeedsRequest[CurrentlyAvailableFeedsResponse]
case class CurrentlyAvailableFeedsResponse (
val result : Throwable \\/ List[Feed]
)
extends AvailableFeedsResponse[CurrentlyAvailableFeedsResponse]
| osxhacker/angular-codegen | src/main/scala/com/iteamsolutions/angular/services/atom/actor/AvailableFeedsMessages.scala | Scala | bsd-2-clause | 918 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.utils
import com.intel.analytics.bigdl.nn.Module
import com.intel.analytics.bigdl.utils.caffe.CaffeLoader
import com.intel.analytics.bigdl.numeric.NumericFloat
import scopt.OptionParser
object ConvertModel {
case class ConverterParam(
from: String = "",
to: String = "",
input: String = "",
output: String = "",
prototxt: String = "",
tf_inputs: String = "",
tf_outputs: String = "",
quantize: Boolean = false
)
val fromSupports = Set("bigdl", "caffe", "torch", "tensorflow")
val toSupports = Set("bigdl", "caffe", "torch")
val converterParser = new OptionParser[ConverterParam](
"Convert models between different dl frameworks") {
opt[String]("from")
.text(s"What's the type origin model ${fromSupports.mkString(",")}?")
.action((x, c) => c.copy(from = x))
.validate(x =>
if (fromSupports.contains(x.toLowerCase)) {
success
} else {
failure(s"Only support ${fromSupports.mkString(",")}")
})
.required()
opt[String]("to")
.text(s"What's the type of model you want ${toSupports.mkString(",")}?")
.action((x, c) => c.copy(to = x))
.validate(x =>
if (toSupports.contains(x.toLowerCase)) {
success
} else {
failure(s"Only support ${toSupports.mkString(",")}")
})
.required()
opt[String]("input")
.text("Where's the origin model file?")
.action((x, c) => c.copy(input = x))
.required()
opt[String]("output")
.text("Where's the bigdl model file to save?")
.action((x, c) => c.copy(output = x))
.required()
opt[String]("prototxt")
.text("Where's the caffe deploy prototxt?")
.action((x, c) => c.copy(prototxt = x))
opt[Boolean]("quantize")
.text("Do you want to quantize the model? Only works when \\"--to\\" is bigdl;" +
"you can only perform inference using the new quantized model.")
.action((x, c) => c.copy(quantize = x))
opt[String]("tf_inputs")
.text("Inputs for Tensorflow")
.action((x, c) => c.copy(tf_inputs = x))
opt[String]("tf_outputs")
.text("Outputs for Tensorflow")
.action((x, c) => c.copy(tf_outputs = x))
checkConfig(c =>
if (c.from.toLowerCase == "caffe" && c.prototxt.isEmpty) {
failure(s"If model is converted from caffe, the prototxt should be given with --prototxt.")
} else if (c.from.toLowerCase == "tensorflow" &&
(c.tf_inputs.isEmpty || c.tf_outputs.isEmpty)) {
failure(s"If model is converted from tensorflow, inputs and outputs should be given")
} else if (c.quantize == true && c.to.toLowerCase != "bigdl") {
failure(s"Only support quantizing models to BigDL model now.")
} else {
success
}
)
}
def main(args: Array[String]): Unit = {
converterParser.parse(args, ConverterParam()).foreach { param =>
val input = param.input
val output = param.output
val ifs = ","
var loadedModel = param.from.toLowerCase match {
case "bigdl" =>
Module.loadModule(input)
case "torch" =>
Module.loadTorch(input)
case "caffe" =>
CaffeLoader.loadCaffe(param.prototxt, input)._1
case "tensorflow" =>
val inputs = param.tf_inputs.split(ifs)
val outputs = param.tf_outputs.split(ifs)
Module.loadTF(input, inputs, outputs)
}
val model = if (param.quantize) {
loadedModel.quantize()
} else {
loadedModel
}
param.to.toLowerCase match {
case "bigdl" =>
model.saveModule(output, overWrite = true)
case "torch" =>
model.saveTorch(output)
case "caffe" =>
model.saveCaffe(param.prototxt, output)
}
}
}
}
| jenniew/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/utils/ConvertModel.scala | Scala | apache-2.0 | 4,462 |
/**
* Created by zyst on 1/12/2016.
*/
object Procedures {
def main(args: Array[String]) {
// A procedure is called only for its effect, and it returns no value.
def box(s: String) {
val border = "-" * s.length + "--\\n"
println(border + "|" + s + "|\\n" + border)
}
// -----------------------
// |Allow me to interject|
// -----------------------
box("Allow me to interject")
// some people prefer to use an explicit return type
def explicitBox(s: String): Unit = {
// Some stuff
}
}
}
| Zyst/SCALA-Fundamentals | Scala for the Impatient/L1/src/Procedures.scala | Scala | mit | 564 |
trait x0[T] { self: x0 => } // error
trait x1[T] { self: (=> String) => } // error
trait x2[T] { self: ([X] =>> X) => } // error
| som-snytt/dotty | tests/neg/bad-selftype.scala | Scala | apache-2.0 | 145 |
package com.morenware.tvcrawler.config
import java.io.File
import com.typesafe.scalalogging.Logger
import net.liftweb.json.DefaultFormats
import io._
import net.liftweb.json._
import org.slf4j.LoggerFactory
case class Config(crawlerConf: File = new File("."))
/**
* Created by david on 20/04/2016.
*/
object Configuration {
val log = Logger(LoggerFactory.getLogger("crawlers"))
def readConfigFromFile(): CrawlersConfig = {
val source = Source.fromFile("config.json")
val lines = try source.mkString finally source.close()
log.debug(s"**** Configuration is: $lines")
implicit val formats = DefaultFormats
val json = parse(lines)
val config: CrawlersConfig = json.extract[CrawlersConfig]
// Log some values
config.crawlers.foreach( crawler => {
log.debug(s"***** Base url: ${crawler.baseUrl}" )
log.debug(s"***** Name: ${crawler.name}" )
log.debug(s"***** Site ID: ${crawler.siteId}" )
log.debug(s"***** Section 1 link type: ${crawler.sections.head.torrentLinkType}" )
})
// Return this config
config
}
def readCommandLineArguments(args: Array[String]): Unit = {
val parser = new scopt.OptionParser[Config]("scopt") {
head("tvCrawler", "0.1")
opt[File]('c', "conf") required() valueName "<confFile.json>" action {
(x, c) =>
c.copy(crawlerConf = x) } text "conf requires a json configuration file"
}
parser.parse(args, Config()) match {
case Some(config) =>
println("Read config: ", config.crawlerConf)
val f = config.crawlerConf.getName
val source = Source.fromFile(f)
val lines = try source.mkString finally source.close()
println("Lines:\\n" + lines);
case None =>
println("Bad Arguments")
// arguments are bad, error message will have been displayed
}
}
// Try / catch example
// try {
// throw new NullPointerException()
// } catch {
//
// // Could use wildcard _, but that way cannot be used afterwards like I am doing here
// case any: Throwable => log.error(s"Exception occurred: ${any.getMessage}")
// }
}
| dfernandezm/tv-crawler-scala | src/main/scala/com/morenware/tvcrawler/config/Configuration.scala | Scala | mit | 2,137 |
def get[S]: State[S, S] =
State(s => (s, s))
def set[S](s: S): State[S, Unit] =
State(_ => ((), s)) | galarragas/FpInScala | answerkey/state/12.answer.scala | Scala | mit | 104 |
/* Example of case class */
abstract class Term
case class Var(name: String) extends Term
case class Fun(arg: String, body: Term) extends Term
case class App(f: Term, v: Term) extends Term
object TermTest extends Application {
def printTerm(term: Term) {
term match {
case Var(n) =>
print(n)
case Fun(x, b) =>
print("^" + x + ".")
printTerm(b)
case App(f, v) =>
Console.print("(")
printTerm(f)
print(" ")
printTerm(v)
print(")")
}
}
def isIdentityFun(term: Term): Boolean = term match {
case Fun(x, Var(y)) if x == y => true
case _ => false
}
val id = Fun("x", Var("x"))
val t = Fun("x", Fun("y", App(Var("x"), Var("y"))))
printTerm(t)
println
println(isIdentityFun(id))
println(isIdentityFun(t))
}
| uthcode/learntosolveit | languages/scala/TermTest.scala | Scala | bsd-3-clause | 938 |
package de.hska.wifl1011.seminararbeit
import scala.concurrent.{ future, promise }
import scala.concurrent.ExecutionContext.Implicits.global
import scala.util.{ Failure, Success }
object Main {
def main(args: Array[String]) {
val myPromise = promise[Integer]
val myFuture = myPromise.future
val apprentice = future {
val toolbox: Integer = Helper.getToolbox()
myPromise.success(toolbox) // complete the promise
Helper.cleanConstructionSite()
}
val craftsman = future {
Helper.takeMeasurement()
myFuture.onSuccess {
case toolbox: Integer =>
Helper.pickTheRightTool(toolbox)
}
}
// Doing this would cause an execption because the promise
// has already been completed.
// Thread.sleep(1000)
// myPromise.success(23)
// keep the jvm running
Thread.sleep(3000)
}
}
| fwilhe/hska-seminararbeit | code/constructionsite/Main.scala | Scala | mit | 883 |
package hammock
package circe
import cats.implicits._
import io.circe.{Encoder => CirceEncoder, Decoder => CirceDecoder}
import io.circe.parser.{decode => circeDecode}
import io.circe.syntax._
class HammockEncoderForCirce[A: CirceEncoder] extends Encoder[A] {
override def encode(a: A): Entity =
Entity.StringEntity(a.asJson.noSpaces, ContentType.`application/json`)
}
class HammockDecoderForCirce[A: CirceDecoder] extends Decoder[A] {
override def decode(entity: Entity): Either[CodecException, A] = entity match {
case Entity.StringEntity(str, _) =>
circeDecode[A](str).left.map(err => CodecException.withMessageAndException(err.getMessage, err))
case _: Entity.ByteArrayEntity =>
CodecException.withMessage("unable to decode a ByteArrayEntity. Only StringEntity is supported").asLeft
case Entity.EmptyEntity =>
CodecException.withMessage("unable to decode an EmptyEntity. Only StringEntity is supported").asLeft
}
}
class HammockCodecForCirce[A: CirceEncoder: CirceDecoder] extends Codec[A] {
override def encode(a: A): Entity =
new HammockEncoderForCirce[A].encode(a)
override def decode(entity: Entity): Either[CodecException, A] =
new HammockDecoderForCirce[A].decode(entity)
}
| pepegar/hammock | hammock-circe/src/main/scala/hammock/circe/CirceCodec.scala | Scala | mit | 1,240 |
/**
* Copyright 2011-2012 eBusiness Information, Groupe Excilys (www.excilys.com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.excilys.ebi.gatling.http.request.builder
import com.excilys.ebi.gatling.core.session.EvaluatableString
object GetHttpRequestBuilder {
def apply(requestName: EvaluatableString, url: EvaluatableString) = new GetHttpRequestBuilder(HttpAttributes(requestName, "GET", url, Nil, Map.empty, None, Nil))
}
/**
* This class defines an HTTP request with word GET in the DSL
*/
class GetHttpRequestBuilder(httpAttributes: HttpAttributes) extends AbstractHttpRequestBuilder[GetHttpRequestBuilder](httpAttributes) {
private[http] def newInstance(httpAttributes: HttpAttributes) = new GetHttpRequestBuilder(httpAttributes)
} | Tjoene/thesis | Case_Programs/gatling-1.4.0/gatling-http/src/main/scala/com/excilys/ebi/gatling/http/request/builder/GetHttpRequestBuilder.scala | Scala | gpl-2.0 | 1,274 |
package ch.ninecode.cim
import org.apache.spark.graphx.VertexId
/**
* Vertex data for topological processing.
*
* @param island the minimum (hash code) of all connected ConnectivityNode (single topological island)
* @param island_label a user friendly label for the island
* @param node the minimum (hash code) of equivalent ConnectivityNode (a single topological node)
* @param node_label a user friendly label for the node
* @param voltage the nominal voltage of the node
* @param container the node container
*/
case class CIMVertexData (
island: VertexId = Long.MaxValue,
island_label: String = "",
node: VertexId = Long.MaxValue,
node_label: String = "",
voltage: String = null,
container: String = null)
{
/**
* Generate an appropriate name for the topological node based on the node label.
*
* @return The best guess as to what the topological node should be called.
*/
def name: String =
{
if (node_label.endsWith("_node_fuse"))
s"${node_label.substring(0, node_label.length - "_node_fuse".length)}_fuse_topo"
else
if (node_label.endsWith("_fuse_node"))
s"${node_label.substring(0, node_label.length - "_fuse_node".length)}_fuse_topo"
else
if (node_label.endsWith("_node"))
s"${node_label.substring(0, node_label.length - "_node".length)}_topo"
else
s"${node_label}_topo"
}
}
| derrickoswald/CIMScala | CIMReader/src/main/scala/ch/ninecode/cim/CIMVertexData.scala | Scala | mit | 1,519 |
/*
Deduction Tactics
Copyright (C) 2012-2015 Raymond Dodge
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.rayrobdod.deductionTactics
import Elements.Element
import Weaponkinds.Weaponkind
import Statuses.Status
import BodyTypes.BodyType
import Directions.Direction
import scala.collection.immutable.{Seq, Map}
/**
* A description of the attributes of a unit.
*
* @version next
*
* @constructor
* @param name A class's name
* @param body A class's bodytype.
* @param atkElement The element a unit attacks with. Also determines it's defenses against elements.
* @param atkWeapon The weapon a unit attacks with.
* @param atkStatus The status a unit attacks with.
* @param isSpy Whether the unit can perform the "Spy" action
* @param range How far away from itself a unit can attack.
* @param speed How far a unit can move in one turn.
* @param weakDirection When a unit is attacked from this direction, the attack is strongest
* @param weakWeapon The weaknesses when a unit is attacked form a type of weapon
* @param weakStatus When a unit is attacked while suffering this status, the attack is strongest
*/
final case class TokenClass(
val name:String,
val body:BodyType,
val atkElement:Element,
val atkWeapon:Weaponkind,
val atkStatus:Status,
val isSpy:Boolean,
val range:Int,
val speed:Int,
val weakDirection:Direction,
val weakWeapon:Map[Weaponkind,Float],
val weakStatus:Status
)
/**
* Loads token classes as a service.
*
* @version next
*/
object TokenClass
{
private[this] final class WeaponWeaknessMap(blade:Float, blunt:Float, spear:Float, whip:Float, powder:Float) extends Map[Weaponkind, Float] {
override def apply(key:Weaponkind):Float = key match {
case Weaponkinds.Bladekind => this.blade
case Weaponkinds.Bluntkind => this.blunt
case Weaponkinds.Spearkind => this.spear
case Weaponkinds.Whipkind => this.whip
case Weaponkinds.Powderkind => this.powder
}
def get(key:Weaponkind):Option[Float] = Option(this.apply(key))
def iterator:Iterator[(Weaponkind, Float)] = Weaponkinds.values.map{x => ((x, this(x)))}.iterator
def +[B1 >: Float](kv:(Weaponkind, B1)):Map[Weaponkind, B1] = this.iterator.toMap + kv
def -(k:Weaponkind):Map[Weaponkind, Float] = this.iterator.toMap - k
}
private[this] def basicTokens:Seq[TokenClass] = {
val elems:Seq[(Element, String, Status)] = Seq(
(Elements.Light, "Shining", Statuses.Blind),
(Elements.Electric, "Static", Statuses.Neuro),
(Elements.Fire, "Flaming", Statuses.Burn),
(Elements.Frost, "Frosty", Statuses.Sleep),
(Elements.Sound, "Sonic", Statuses.Confuse)
)
val weapons:Seq[(Weaponkind, String, Status, WeaponWeaknessMap)] = Seq(
(Weaponkinds.Bladekind, "Swordsman", Statuses.Sleep, new WeaponWeaknessMap(blade = .5f, blunt = .75f, spear = 1.5f, whip = 2f, powder = 1f)),
(Weaponkinds.Bluntkind, "Clubsman", Statuses.Burn, new WeaponWeaknessMap(blade = 1.5f, blunt = .5f, spear = 2f, whip = 1f, powder = .75f)),
(Weaponkinds.Spearkind, "Pikeman", Statuses.Blind, new WeaponWeaknessMap(blade = 2f, blunt = 1f, spear = .5f, whip = .75f, powder = 1.5f)),
(Weaponkinds.Whipkind, "Whipman", Statuses.Confuse, new WeaponWeaknessMap(blade = 1f, blunt = 1.5f, spear = .75f, whip = .5f, powder = 2f)),
(Weaponkinds.Powderkind, "Powderman", Statuses.Neuro, new WeaponWeaknessMap(blade = .75f, blunt = 2f, spear = 1f, whip = 1.5f, powder = .5f))
)
for (
(atkElement, elemNamePart, atkStatus) <- elems;
(atkWeapon, weaponNamePart, weakStatus, weakWeapon) <- weapons
) yield {
TokenClass(
name = s"$elemNamePart $weaponNamePart",
atkElement = atkElement,
atkWeapon = atkWeapon,
atkStatus = atkStatus,
weakDirection = Directions.Left,
weakWeapon = weakWeapon,
weakStatus = weakStatus,
body = BodyTypes.Humanoid,
isSpy = false,
range = 1,
speed = 3
)
}
}
private[this] def birdTokens:Seq[TokenClass] = Seq(
TokenClass(
name = "Eagle",
atkElement = Elements.Fire,
atkWeapon = Weaponkinds.Spearkind,
atkStatus = Statuses.Blind,
range = 1,
speed = 4,
body = BodyTypes.Avian,
weakWeapon = new WeaponWeaknessMap(blade = 0.75f, blunt = 2f, spear = 1f, whip = 1.5f, powder = 0.5f),
weakStatus = Statuses.Sleep,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Falcon",
atkElement = Elements.Electric,
atkWeapon = Weaponkinds.Spearkind,
atkStatus = Statuses.Blind,
range = 1,
speed = 5,
body = BodyTypes.Avian,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 2f, spear = 0.75f, whip = 1.5f, powder = 0.5f),
weakStatus = Statuses.Blind,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Penguin",
atkElement = Elements.Frost,
atkWeapon = Weaponkinds.Spearkind,
atkStatus = Statuses.Blind,
range = 1,
speed = 3,
body = BodyTypes.Avian,
// It can't fly, but the game will act like it can?,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 1.5f, spear = 0.75f, whip = 2f, powder = 0.5f),
weakStatus = Statuses.Confuse,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Turkey",
atkElement = Elements.Light,
atkWeapon = Weaponkinds.Spearkind,
atkStatus = Statuses.Blind,
range = 1,
speed = 3,
body = BodyTypes.Avian,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 2f, spear = 0.75f, whip = 1.5f, powder = 0.5f),
weakStatus = Statuses.Burn,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Bluebird",
atkElement = Elements.Sound,
// songbird, yes?,
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Sleep,
range = 1,
speed = 3,
body = BodyTypes.Avian,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 1.5f, spear = 0.75f, whip = 2f, powder = 0.5f),
weakStatus = Statuses.Confuse,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Cardinal",
atkElement = Elements.Sound,
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Confuse,
// colors confuse predators trying to go for the eggs,
range = 1,
speed = 3,
body = BodyTypes.Avian,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 2f, spear = 0.75f, whip = 1.5f, powder = 0.5f),
weakStatus = Statuses.Sleep,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Crow",
// and/or raven,
atkElement = Elements.Light,
atkWeapon = Weaponkinds.Bladekind,
atkStatus = Statuses.Confuse,
range = 1,
speed = 3,
body = BodyTypes.Avian,
weakWeapon = new WeaponWeaknessMap(blade = 1.5f, blunt = 2f, spear = 1f, whip = 0.75f, powder = 0.5f),
weakStatus = Statuses.Neuro,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Parrot",
atkElement = Elements.Sound,
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Sleep,
range = 2,
speed = 3,
body = BodyTypes.Avian,
weakWeapon = new WeaponWeaknessMap(blade = 1.5f, blunt = 2f, spear = 1f, whip = 1.5f, powder = 0.5f),
weakStatus = Statuses.Confuse,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Thunderbird",
// Fictional: Native American mythology,
atkElement = Elements.Electric,
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Neuro,
range = 1,
speed = 3,
body = BodyTypes.Avian,
weakWeapon = new WeaponWeaknessMap(blade = 0.75f, blunt = 1.5f, spear = 2f, whip = 1f, powder = 0.5f),
weakStatus = Statuses.Blind,
// Thunderbolts come from eyes, apparently,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Pigeon",
atkElement = Elements.Fire,
// According to Wikipedia, Passenger Pigeons were apparently a major cause of Forest Fires
// Also, they commonly suffered from migrating too early
// Also also, in this game, Fire << Frost
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Confuse,
range = 1,
speed = 4,
body = BodyTypes.Avian,
weakWeapon = new WeaponWeaknessMap(blade = 1.5f, blunt = 0.5f, spear = 0.75f, whip = 1f, powder = 2f),
weakStatus = Statuses.Sleep,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Owl",
atkElement = Elements.Sound,
atkWeapon = Weaponkinds.Spearkind,
atkStatus = Statuses.Sleep,
range = 1,
speed = 3,
body = BodyTypes.Avian,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 1.5f, spear = 0.5f, whip = 0.75f, powder = 2f),
weakStatus = Statuses.Sleep,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Phoenix",
atkElement = Elements.Fire,
atkWeapon = Weaponkinds.Spearkind,
atkStatus = Statuses.Blind,
// Nope. No Harry Potter reference here.
range = 1,
speed = 3,
body = BodyTypes.Avian,
weakWeapon = new WeaponWeaknessMap(blade = 1.5f, blunt = 1f, spear = 0.75f, whip = 2f, powder = 0.5f),
weakStatus = Statuses.Sleep,
weakDirection = Directions.Left,
isSpy = false
)
)
private[this] def sportTokens:Seq[TokenClass] = Seq(
TokenClass(
name = "Bowler",
atkElement = Elements.Frost,
atkWeapon = Weaponkinds.Bluntkind,
atkStatus = Statuses.Burn,
range = 3,
speed = 2,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 0.75f, blunt = 1f, spear = 2f, whip = 0.5f, powder = 1.5f),
weakStatus = Statuses.Blind,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Baseball Pitcher",
atkElement = Elements.Light,
atkWeapon = Weaponkinds.Bluntkind,
atkStatus = Statuses.Confuse,
range = 2,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 2f, spear = 0.5f, whip = 1.5f, powder = 0.75f),
weakStatus = Statuses.Blind,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Baseball Batter",
atkElement = Elements.Sound,
atkWeapon = Weaponkinds.Bluntkind,
atkStatus = Statuses.Neuro,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 2f, blunt = 0.5f, spear = 1.5f, whip = 1f, powder = 1.25f),
weakStatus = Statuses.Burn,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
// not sure how important direction it - it seems like the first thing someone would find out, after attacking elements
name = "Lefty Batter",
atkElement = Elements.Sound,
atkWeapon = Weaponkinds.Bluntkind,
atkStatus = Statuses.Neuro,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 2f, blunt = 0.5f, spear = 1.5f, whip = 1f, powder = 1.25f),
weakStatus = Statuses.Burn,
weakDirection = Directions.Right,
isSpy = false
),
TokenClass(
name = "Soccer Striker",
atkElement = Elements.Electric,
atkWeapon = Weaponkinds.Bluntkind,
atkStatus = Statuses.Snake,
range = 2,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1.25f, blunt = 1.5f, spear = 2f, whip = 1f, powder = 0.5f),
weakStatus = Statuses.Snake,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Pigskin Quarterback",
atkElement = Elements.Fire,
atkWeapon = Weaponkinds.Spearkind,
atkStatus = Statuses.Confuse,
range = 3,
speed = 2,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 0.75f, blunt = 2f, spear = 1.5f, whip = 0.5f, powder = 1f),
weakStatus = Statuses.Blind,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Pigskin Lineback",
atkElement = Elements.Fire,
atkWeapon = Weaponkinds.Bluntkind,
atkStatus = Statuses.Neuro,
// flattening is close enough
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 0.75f, blunt = 0.5f, spear = 1.5f, whip = 1f, powder = 2f),
weakStatus = Statuses.Confuse,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Hockey Player",
atkElement = Elements.Frost,
atkWeapon = Weaponkinds.Bladekind,
atkStatus = Statuses.Confuse,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1.5f, blunt = 0.75f, spear = 2f, whip = 1f, powder = 0.5f),
weakStatus = Statuses.Sleep,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Lacrosse Sticksman",
atkElement = Elements.Light,
atkWeapon = Weaponkinds.Whipkind,
atkStatus = Statuses.Burn,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 2f, blunt = 0.5f, spear = 1f, whip = 0.75f, powder = 1.5f),
weakStatus = Statuses.Snake,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Tennis Racketman",
atkElement = Elements.Sound,
atkWeapon = Weaponkinds.Whipkind,
atkStatus = Statuses.Burn,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 0.75f, blunt = 1f, spear = 1.5f, whip = 0.5f, powder = 2f),
weakStatus = Statuses.Blind,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Golfer",
atkElement = Elements.Sound,
atkWeapon = Weaponkinds.Bluntkind,
atkStatus = Statuses.Snake,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 0.5f, blunt = 1f, spear = 2f, whip = 1.5f, powder = 0.75f),
weakStatus = Statuses.Blind,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Runner",
// More details about the attack? tvtropes:Main/CrashIntoHello
atkElement = Elements.Electric,
atkWeapon = Weaponkinds.Bluntkind,
atkStatus = Statuses.Sleep,
range = 1,
speed = 5,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1.25f, blunt = 2f, spear = 1f, whip = 0.5f, powder = 1.5f),
weakStatus = Statuses.Snake,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Volleyball hitter",
atkElement = Elements.Sound,
atkWeapon = Weaponkinds.Bluntkind,
atkStatus = Statuses.Confuse,
range = 2,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1.25f, blunt = 2f, spear = 1f, whip = 0.5f, powder = 1.5f),
weakStatus = Statuses.Blind,
// All lighting comes from the east side, so clearly the sun is that way
weakDirection = Directions.Right,
isSpy = false
)
)
private[this] def miscTokens:Seq[TokenClass] = Seq(
TokenClass(
name = "Ninja",
atkElement = Elements.Sound,
atkWeapon = Weaponkinds.Bladekind,
atkStatus = Statuses.Confuse,
range = 1,
speed = 5,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 0.5f, spear = 1.75f, whip = 2f, powder = 1f),
weakStatus = Statuses.Neuro,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Farmer",
atkElement = Elements.Light,
atkWeapon = Weaponkinds.Spearkind,
atkStatus = Statuses.Snake,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1.5f ,blunt = 0.5f, spear = 0.75f ,whip = 1f, powder = 2f),
weakStatus = Statuses.Blind,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Umbreallaswordsman",
atkElement = Elements.Frost,
atkWeapon = Weaponkinds.Bladekind,
atkStatus = Statuses.Blind,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1.5f, blunt = 1f, spear = 2f ,whip = 0.75f, powder = 0.5f),
weakStatus = Statuses.Blind,
weakDirection = Directions.Down,
isSpy = false
),
TokenClass(
name = "Umbreallagunsman",
atkElement = Elements.Frost,
atkWeapon = Weaponkinds.Spearkind,
atkStatus = Statuses.Confuse,
range = 2,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1.5f, blunt = 1f, spear = 2f ,whip = 1f, powder = 0.75f),
weakStatus = Statuses.Blind,
weakDirection = Directions.Down,
isSpy = false
),
TokenClass(
name = "Pyro",
atkElement = Elements.Fire,
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Burn,
range = 2,
speed = 2,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 2f, blunt = 1.5f, spear = 0.5f ,whip = 0.5f, powder = 0.75f),
weakStatus = Statuses.Neuro,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Torchbearer",
atkElement = Elements.Fire,
atkWeapon = Weaponkinds.Bluntkind,
atkStatus = Statuses.Burn,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 0.75f, blunt = 0.5f, spear = 1.5f ,whip = 2f, powder = 1f),
weakStatus = Statuses.Sleep,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Yoyoer",
atkElement = Elements.Light,
atkWeapon = Weaponkinds.Whipkind,
atkStatus = Statuses.Confuse,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 2f, blunt = 0.5f, spear = 1.5f, whip = 1f, powder = 0.75f),
weakStatus = Statuses.Confuse,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Flashlightman",
atkElement = Elements.Light,
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Blind,
range = 2,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 1.5f, spear = 2f, whip = 0.75f, powder = 0.5f),
weakStatus = Statuses.Confuse,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Medic",
atkElement = Elements.Light,
atkWeapon = Weaponkinds.Bluntkind,
atkStatus = Statuses.Heal,
// It's not possible to attack your own units, so this is very counter productive
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 0.5f, blunt = 0.75f, spear = 2f, whip = 1f, powder = 1.5f),
weakStatus = Statuses.Confuse,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Haunted Hoodie",
atkElement = Elements.Sound,
atkWeapon = Weaponkinds.Whipkind,
// Silence shouldn't become an option, but if it did…
atkStatus = Statuses.Confuse,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 2f, blunt = 0.5f, spear = 1.5f, whip = 0.75f, powder = 1f),
// Silence shouldn't become an option, but if it did…
weakStatus = Statuses.Sleep,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
// This one isn't decorated; a more shocking one later?
name = "Tiny Evergreen Tree",
atkElement = Elements.Electric,
atkWeapon = Weaponkinds.Spearkind,
atkStatus = Statuses.Blind,
range = 3,
speed = 2,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 2f, blunt = 0.75f, spear = 1f, whip = 0.75f, powder = 0.5f),
weakStatus = Statuses.Burn,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Snowman",
atkElement = Elements.Frost,
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Sleep,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 1.5f, spear = 0.75f, whip = 0.5f, powder = 2f),
weakStatus = Statuses.Burn,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
// Holmes
name = "Detective",
atkElement = Elements.Light,
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Sleep,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 2f, blunt = 1f, spear = 1.5f, whip = 0.5f, powder = 0.75f),
weakStatus = Statuses.Snake,
weakDirection = Directions.Left,
isSpy = true
),
TokenClass(
// Ms. Vitch, TF2 Spy
// I'm debating whether spies should be allowed to attack for damage; if not this specific class should be able to stance change between spy and not-spy
name = "Infiltrator",
atkElement = Elements.Frost,
atkWeapon = Weaponkinds.Bladekind,
atkStatus = Statuses.Confuse,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 0.75f, blunt = 1.5f, spear = 1f, whip = 2f, powder = 0.5f),
weakStatus = Statuses.Neuro,
weakDirection = Directions.Left,
isSpy = true
),
TokenClass(
name = "Eavesdropper",
atkElement = Elements.Sound,
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Blind,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 1.5f, spear = 0.75f, whip = 0.5f, powder = 2f),
weakStatus = Statuses.Confuse,
weakDirection = Directions.Left,
isSpy = true
),
TokenClass(
// I actually think I'll make the format the other one, where both stances are in the same JsonObject
// The intention is to make both Rangers alternate stances
name = "Ranger (sword)",
atkElement = Elements.Electric,
atkWeapon = Weaponkinds.Bladekind,
atkStatus = Statuses.Blind,
range = 1,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 0.75f, spear = 2f, whip = 1.5f, powder = 0.5f),
weakStatus = Statuses.Neuro,
weakDirection = Directions.Down,
isSpy = false
),
TokenClass(
name = "Ranger (arrow)",
atkElement = Elements.Electric,
atkWeapon = Weaponkinds.Spearkind,
atkStatus = Statuses.Blind,
range = 2,
speed = 3,
body = BodyTypes.Humanoid,
weakWeapon = new WeaponWeaknessMap(blade = 1f, blunt = 0.75f, spear = 2f, whip = 1.5f, powder = 0.5f),
weakStatus = Statuses.Neuro,
weakDirection = Directions.Down,
isSpy = false
),
TokenClass(
name = "Magenta Lion",
// Entei; Qilin; Magma Lion
atkElement = Elements.Fire,
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Burn,
range = 1,
speed = 5,
body = BodyTypes.Gerbil,
weakWeapon = new WeaponWeaknessMap(blade = 1.25f, blunt = 0.5f, spear = 1.5f, whip = 1f, powder = 2f),
weakStatus = Statuses.Blind,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Yellow Tiger",
// Raikou; Raiju; ???
atkElement = Elements.Electric,
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Neuro,
range = 1,
speed = 5,
body = BodyTypes.Gerbil,
weakWeapon = new WeaponWeaknessMap(blade = 1.25f, blunt = 0.5f, spear = 1.5f, whip = 1f, powder = 2f),
weakStatus = Statuses.Blind,
weakDirection = Directions.Left,
isSpy = false
),
TokenClass(
name = "Cyan Leopard",
// Suicune; ???; Snow Leopard
atkElement = Elements.Frost,
atkWeapon = Weaponkinds.Powderkind,
atkStatus = Statuses.Sleep,
range = 1,
speed = 5,
body = BodyTypes.Gerbil,
weakWeapon = new WeaponWeaknessMap(blade = 1.25f, blunt = 0.5f, spear = 1.5f, whip = 1f, powder = 2f),
weakStatus = Statuses.Blind,
weakDirection = Directions.Left,
isSpy = false
)
)
val allKnown:Seq[TokenClass] = {
basicTokens ++
birdTokens ++
sportTokens ++
miscTokens
}
}
| rayrobdod/deductionTactics | src/main/scala/com/rayrobdod/deductionTactics/TokenClass.scala | Scala | gpl-3.0 | 23,955 |
// generated by tscfg 0.9.4 on Sat Jun 15 02:49:06 IST 2019
// source: src/main/resources/beam-template.conf
package beam.sim.config
case class BeamConfig(
beam: BeamConfig.Beam,
matsim: BeamConfig.Matsim
)
object BeamConfig {
case class Beam(
agentsim: BeamConfig.Beam.Agentsim,
calibration: BeamConfig.Beam.Calibration,
cluster: BeamConfig.Beam.Cluster,
debug: BeamConfig.Beam.Debug,
exchange: BeamConfig.Beam.Exchange,
experimental: BeamConfig.Beam.Experimental,
inputDirectory: java.lang.String,
logger: BeamConfig.Beam.Logger,
metrics: BeamConfig.Beam.Metrics,
outputs: BeamConfig.Beam.Outputs,
physsim: BeamConfig.Beam.Physsim,
replanning: BeamConfig.Beam.Replanning,
routing: BeamConfig.Beam.Routing,
spatial: BeamConfig.Beam.Spatial,
useLocalWorker: scala.Boolean,
warmStart: BeamConfig.Beam.WarmStart
)
object Beam {
case class Agentsim(
agentSampleSizeAsFractionOfPopulation: scala.Double,
agents: BeamConfig.Beam.Agentsim.Agents,
endTime: java.lang.String,
firstIteration: scala.Int,
lastIteration: scala.Int,
numAgents: scala.Int,
populationAdjustment: java.lang.String,
scenarios: BeamConfig.Beam.Agentsim.Scenarios,
scheduleMonitorTask: BeamConfig.Beam.Agentsim.ScheduleMonitorTask,
schedulerParallelismWindow: scala.Int,
simulationName: java.lang.String,
startTime: java.lang.String,
taz: BeamConfig.Beam.Agentsim.Taz,
thresholdForMakingParkingChoiceInMeters: scala.Int,
thresholdForWalkingInMeters: scala.Int,
timeBinSize: scala.Int,
toll: BeamConfig.Beam.Agentsim.Toll,
tuning: BeamConfig.Beam.Agentsim.Tuning
)
object Agentsim {
case class Agents(
bodyType: java.lang.String,
households: BeamConfig.Beam.Agentsim.Agents.Households,
modalBehaviors: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors,
modeIncentive: BeamConfig.Beam.Agentsim.Agents.ModeIncentive,
plans: BeamConfig.Beam.Agentsim.Agents.Plans,
population: BeamConfig.Beam.Agentsim.Agents.Population,
ptFare: BeamConfig.Beam.Agentsim.Agents.PtFare,
rideHail: BeamConfig.Beam.Agentsim.Agents.RideHail,
rideHailTransit: BeamConfig.Beam.Agentsim.Agents.RideHailTransit,
vehicles: BeamConfig.Beam.Agentsim.Agents.Vehicles
)
object Agents {
case class Households(
inputFilePath: java.lang.String,
inputHouseholdAttributesFilePath: java.lang.String
)
object Households {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.Households = {
BeamConfig.Beam.Agentsim.Agents.Households(
inputFilePath =
if (c.hasPathOrNull("inputFilePath")) c.getString("inputFilePath")
else "/test/input/beamville/households.xml.gz",
inputHouseholdAttributesFilePath =
if (c.hasPathOrNull("inputHouseholdAttributesFilePath")) c.getString("inputHouseholdAttributesFilePath")
else "/test/input/beamville/householdAttributes.xml.gz"
)
}
}
case class ModalBehaviors(
defaultValueOfTime: scala.Double,
highTimeSensitivity: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity,
lccm: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.Lccm,
lowTimeSensitivity: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity,
maximumNumberOfReplanningAttempts: scala.Int,
modeChoiceClass: java.lang.String,
modeVotMultiplier: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.ModeVotMultiplier,
mulitnomialLogit: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.MulitnomialLogit,
overrideAutomationForVOTT: scala.Boolean,
overrideAutomationLevel: scala.Int,
poolingMultiplier: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.PoolingMultiplier
)
object ModalBehaviors {
case class HighTimeSensitivity(
highCongestion: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.HighCongestion,
lowCongestion: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.LowCongestion
)
object HighTimeSensitivity {
case class HighCongestion(
highwayFactor: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.HighCongestion.HighwayFactor,
nonHighwayFactor: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.HighCongestion.NonHighwayFactor
)
object HighCongestion {
case class HighwayFactor(
Level3: scala.Double,
Level4: scala.Double,
Level5: scala.Double,
LevelLE2: scala.Double
)
object HighwayFactor {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.HighCongestion.HighwayFactor = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.HighCongestion.HighwayFactor(
Level3 = if (c.hasPathOrNull("Level3")) c.getDouble("Level3") else 1.0,
Level4 = if (c.hasPathOrNull("Level4")) c.getDouble("Level4") else 1.0,
Level5 = if (c.hasPathOrNull("Level5")) c.getDouble("Level5") else 1.0,
LevelLE2 = if (c.hasPathOrNull("LevelLE2")) c.getDouble("LevelLE2") else 1.0
)
}
}
case class NonHighwayFactor(
Level3: scala.Double,
Level4: scala.Double,
Level5: scala.Double,
LevelLE2: scala.Double
)
object NonHighwayFactor {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.HighCongestion.NonHighwayFactor = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.HighCongestion.NonHighwayFactor(
Level3 = if (c.hasPathOrNull("Level3")) c.getDouble("Level3") else 1.0,
Level4 = if (c.hasPathOrNull("Level4")) c.getDouble("Level4") else 1.0,
Level5 = if (c.hasPathOrNull("Level5")) c.getDouble("Level5") else 1.0,
LevelLE2 = if (c.hasPathOrNull("LevelLE2")) c.getDouble("LevelLE2") else 1.0
)
}
}
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.HighCongestion = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.HighCongestion(
highwayFactor =
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.HighCongestion.HighwayFactor(
if (c.hasPathOrNull("highwayFactor")) c.getConfig("highwayFactor")
else com.typesafe.config.ConfigFactory.parseString("highwayFactor{}")
),
nonHighwayFactor =
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.HighCongestion.NonHighwayFactor(
if (c.hasPathOrNull("nonHighwayFactor")) c.getConfig("nonHighwayFactor")
else com.typesafe.config.ConfigFactory.parseString("nonHighwayFactor{}")
)
)
}
}
case class LowCongestion(
highwayFactor: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.LowCongestion.HighwayFactor,
nonHighwayFactor: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.LowCongestion.NonHighwayFactor
)
object LowCongestion {
case class HighwayFactor(
Level3: scala.Double,
Level4: scala.Double,
Level5: scala.Double,
LevelLE2: scala.Double
)
object HighwayFactor {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.LowCongestion.HighwayFactor = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.LowCongestion.HighwayFactor(
Level3 = if (c.hasPathOrNull("Level3")) c.getDouble("Level3") else 1.0,
Level4 = if (c.hasPathOrNull("Level4")) c.getDouble("Level4") else 1.0,
Level5 = if (c.hasPathOrNull("Level5")) c.getDouble("Level5") else 1.0,
LevelLE2 = if (c.hasPathOrNull("LevelLE2")) c.getDouble("LevelLE2") else 1.0
)
}
}
case class NonHighwayFactor(
Level3: scala.Double,
Level4: scala.Double,
Level5: scala.Double,
LevelLE2: scala.Double
)
object NonHighwayFactor {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.LowCongestion.NonHighwayFactor = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.LowCongestion.NonHighwayFactor(
Level3 = if (c.hasPathOrNull("Level3")) c.getDouble("Level3") else 1.0,
Level4 = if (c.hasPathOrNull("Level4")) c.getDouble("Level4") else 1.0,
Level5 = if (c.hasPathOrNull("Level5")) c.getDouble("Level5") else 1.0,
LevelLE2 = if (c.hasPathOrNull("LevelLE2")) c.getDouble("LevelLE2") else 1.0
)
}
}
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.LowCongestion = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.LowCongestion(
highwayFactor =
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.LowCongestion.HighwayFactor(
if (c.hasPathOrNull("highwayFactor")) c.getConfig("highwayFactor")
else com.typesafe.config.ConfigFactory.parseString("highwayFactor{}")
),
nonHighwayFactor =
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.LowCongestion.NonHighwayFactor(
if (c.hasPathOrNull("nonHighwayFactor")) c.getConfig("nonHighwayFactor")
else com.typesafe.config.ConfigFactory.parseString("nonHighwayFactor{}")
)
)
}
}
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity(
highCongestion = BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.HighCongestion(
if (c.hasPathOrNull("highCongestion")) c.getConfig("highCongestion")
else com.typesafe.config.ConfigFactory.parseString("highCongestion{}")
),
lowCongestion = BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity.LowCongestion(
if (c.hasPathOrNull("lowCongestion")) c.getConfig("lowCongestion")
else com.typesafe.config.ConfigFactory.parseString("lowCongestion{}")
)
)
}
}
case class Lccm(
filePath: java.lang.String
)
object Lccm {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.Lccm = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.Lccm(
filePath =
if (c.hasPathOrNull("filePath")) c.getString("filePath") else "/test/input/beamville/lccm-long.csv"
)
}
}
case class LowTimeSensitivity(
highCongestion: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.HighCongestion,
lowCongestion: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.LowCongestion
)
object LowTimeSensitivity {
case class HighCongestion(
highwayFactor: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.HighCongestion.HighwayFactor,
nonHighwayFactor: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.HighCongestion.NonHighwayFactor
)
object HighCongestion {
case class HighwayFactor(
Level3: scala.Double,
Level4: scala.Double,
Level5: scala.Double,
LevelLE2: scala.Double
)
object HighwayFactor {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.HighCongestion.HighwayFactor = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.HighCongestion.HighwayFactor(
Level3 = if (c.hasPathOrNull("Level3")) c.getDouble("Level3") else 1.0,
Level4 = if (c.hasPathOrNull("Level4")) c.getDouble("Level4") else 1.0,
Level5 = if (c.hasPathOrNull("Level5")) c.getDouble("Level5") else 1.0,
LevelLE2 = if (c.hasPathOrNull("LevelLE2")) c.getDouble("LevelLE2") else 1.0
)
}
}
case class NonHighwayFactor(
Level3: scala.Double,
Level4: scala.Double,
Level5: scala.Double,
LevelLE2: scala.Double
)
object NonHighwayFactor {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.HighCongestion.NonHighwayFactor = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.HighCongestion.NonHighwayFactor(
Level3 = if (c.hasPathOrNull("Level3")) c.getDouble("Level3") else 1.0,
Level4 = if (c.hasPathOrNull("Level4")) c.getDouble("Level4") else 1.0,
Level5 = if (c.hasPathOrNull("Level5")) c.getDouble("Level5") else 1.0,
LevelLE2 = if (c.hasPathOrNull("LevelLE2")) c.getDouble("LevelLE2") else 1.0
)
}
}
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.HighCongestion = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.HighCongestion(
highwayFactor =
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.HighCongestion.HighwayFactor(
if (c.hasPathOrNull("highwayFactor")) c.getConfig("highwayFactor")
else com.typesafe.config.ConfigFactory.parseString("highwayFactor{}")
),
nonHighwayFactor =
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.HighCongestion.NonHighwayFactor(
if (c.hasPathOrNull("nonHighwayFactor")) c.getConfig("nonHighwayFactor")
else com.typesafe.config.ConfigFactory.parseString("nonHighwayFactor{}")
)
)
}
}
case class LowCongestion(
highwayFactor: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.LowCongestion.HighwayFactor,
nonHighwayFactor: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.LowCongestion.NonHighwayFactor
)
object LowCongestion {
case class HighwayFactor(
Level3: scala.Double,
Level4: scala.Double,
Level5: scala.Double,
LevelLE2: scala.Double
)
object HighwayFactor {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.LowCongestion.HighwayFactor = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.LowCongestion.HighwayFactor(
Level3 = if (c.hasPathOrNull("Level3")) c.getDouble("Level3") else 1.0,
Level4 = if (c.hasPathOrNull("Level4")) c.getDouble("Level4") else 1.0,
Level5 = if (c.hasPathOrNull("Level5")) c.getDouble("Level5") else 1.0,
LevelLE2 = if (c.hasPathOrNull("LevelLE2")) c.getDouble("LevelLE2") else 1.0
)
}
}
case class NonHighwayFactor(
Level3: scala.Double,
Level4: scala.Double,
Level5: scala.Double,
LevelLE2: scala.Double
)
object NonHighwayFactor {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.LowCongestion.NonHighwayFactor = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.LowCongestion.NonHighwayFactor(
Level3 = if (c.hasPathOrNull("Level3")) c.getDouble("Level3") else 1.0,
Level4 = if (c.hasPathOrNull("Level4")) c.getDouble("Level4") else 1.0,
Level5 = if (c.hasPathOrNull("Level5")) c.getDouble("Level5") else 1.0,
LevelLE2 = if (c.hasPathOrNull("LevelLE2")) c.getDouble("LevelLE2") else 1.0
)
}
}
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.LowCongestion = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.LowCongestion(
highwayFactor =
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.LowCongestion.HighwayFactor(
if (c.hasPathOrNull("highwayFactor")) c.getConfig("highwayFactor")
else com.typesafe.config.ConfigFactory.parseString("highwayFactor{}")
),
nonHighwayFactor =
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.LowCongestion.NonHighwayFactor(
if (c.hasPathOrNull("nonHighwayFactor")) c.getConfig("nonHighwayFactor")
else com.typesafe.config.ConfigFactory.parseString("nonHighwayFactor{}")
)
)
}
}
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity(
highCongestion = BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.HighCongestion(
if (c.hasPathOrNull("highCongestion")) c.getConfig("highCongestion")
else com.typesafe.config.ConfigFactory.parseString("highCongestion{}")
),
lowCongestion = BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity.LowCongestion(
if (c.hasPathOrNull("lowCongestion")) c.getConfig("lowCongestion")
else com.typesafe.config.ConfigFactory.parseString("lowCongestion{}")
)
)
}
}
case class ModeVotMultiplier(
CAV: scala.Double,
bike: scala.Double,
drive: scala.Double,
rideHail: scala.Double,
rideHailPooled: scala.Double,
rideHailTransit: scala.Double,
transit: scala.Double,
waiting: scala.Double,
walk: scala.Double
)
object ModeVotMultiplier {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.ModeVotMultiplier = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.ModeVotMultiplier(
CAV = if (c.hasPathOrNull("CAV")) c.getDouble("CAV") else 1.0,
bike = if (c.hasPathOrNull("bike")) c.getDouble("bike") else 1.0,
drive = if (c.hasPathOrNull("drive")) c.getDouble("drive") else 1.0,
rideHail = if (c.hasPathOrNull("rideHail")) c.getDouble("rideHail") else 1.0,
rideHailPooled = if (c.hasPathOrNull("rideHailPooled")) c.getDouble("rideHailPooled") else 1.0,
rideHailTransit = if (c.hasPathOrNull("rideHailTransit")) c.getDouble("rideHailTransit") else 1.0,
transit = if (c.hasPathOrNull("transit")) c.getDouble("transit") else 1.0,
waiting = if (c.hasPathOrNull("waiting")) c.getDouble("waiting") else 1.0,
walk = if (c.hasPathOrNull("walk")) c.getDouble("walk") else 1.0
)
}
}
case class MulitnomialLogit(
params: BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.MulitnomialLogit.Params
)
object MulitnomialLogit {
case class Params(
bike_intercept: scala.Double,
car_intercept: scala.Double,
cav_intercept: scala.Double,
drive_transit_intercept: scala.Double,
ride_hail_intercept: scala.Double,
ride_hail_pooled_intercept: scala.Double,
ride_hail_transit_intercept: scala.Double,
transfer: scala.Double,
walk_intercept: scala.Double,
walk_transit_intercept: scala.Double
)
object Params {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.MulitnomialLogit.Params = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.MulitnomialLogit.Params(
bike_intercept = if (c.hasPathOrNull("bike_intercept")) c.getDouble("bike_intercept") else 0.0,
car_intercept = if (c.hasPathOrNull("car_intercept")) c.getDouble("car_intercept") else 0.0,
cav_intercept = if (c.hasPathOrNull("cav_intercept")) c.getDouble("cav_intercept") else 0.0,
drive_transit_intercept =
if (c.hasPathOrNull("drive_transit_intercept")) c.getDouble("drive_transit_intercept") else 0.0,
ride_hail_intercept =
if (c.hasPathOrNull("ride_hail_intercept")) c.getDouble("ride_hail_intercept") else 0.0,
ride_hail_pooled_intercept =
if (c.hasPathOrNull("ride_hail_pooled_intercept")) c.getDouble("ride_hail_pooled_intercept")
else 0.0,
ride_hail_transit_intercept =
if (c.hasPathOrNull("ride_hail_transit_intercept")) c.getDouble("ride_hail_transit_intercept")
else 0.0,
transfer = if (c.hasPathOrNull("transfer")) c.getDouble("transfer") else -1.4,
walk_intercept = if (c.hasPathOrNull("walk_intercept")) c.getDouble("walk_intercept") else 0.0,
walk_transit_intercept =
if (c.hasPathOrNull("walk_transit_intercept")) c.getDouble("walk_transit_intercept") else 0.0
)
}
}
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.MulitnomialLogit = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.MulitnomialLogit(
params = BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.MulitnomialLogit.Params(
if (c.hasPathOrNull("params")) c.getConfig("params")
else com.typesafe.config.ConfigFactory.parseString("params{}")
)
)
}
}
case class PoolingMultiplier(
Level3: scala.Double,
Level4: scala.Double,
Level5: scala.Double,
LevelLE2: scala.Double
)
object PoolingMultiplier {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.PoolingMultiplier = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.PoolingMultiplier(
Level3 = if (c.hasPathOrNull("Level3")) c.getDouble("Level3") else 1.0,
Level4 = if (c.hasPathOrNull("Level4")) c.getDouble("Level4") else 1.0,
Level5 = if (c.hasPathOrNull("Level5")) c.getDouble("Level5") else 1.0,
LevelLE2 = if (c.hasPathOrNull("LevelLE2")) c.getDouble("LevelLE2") else 1.0
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.ModalBehaviors = {
BeamConfig.Beam.Agentsim.Agents.ModalBehaviors(
defaultValueOfTime =
if (c.hasPathOrNull("defaultValueOfTime")) c.getDouble("defaultValueOfTime") else 8.0,
highTimeSensitivity = BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.HighTimeSensitivity(
if (c.hasPathOrNull("highTimeSensitivity")) c.getConfig("highTimeSensitivity")
else com.typesafe.config.ConfigFactory.parseString("highTimeSensitivity{}")
),
lccm = BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.Lccm(
if (c.hasPathOrNull("lccm")) c.getConfig("lccm")
else com.typesafe.config.ConfigFactory.parseString("lccm{}")
),
lowTimeSensitivity = BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.LowTimeSensitivity(
if (c.hasPathOrNull("lowTimeSensitivity")) c.getConfig("lowTimeSensitivity")
else com.typesafe.config.ConfigFactory.parseString("lowTimeSensitivity{}")
),
maximumNumberOfReplanningAttempts =
if (c.hasPathOrNull("maximumNumberOfReplanningAttempts")) c.getInt("maximumNumberOfReplanningAttempts")
else 3,
modeChoiceClass =
if (c.hasPathOrNull("modeChoiceClass")) c.getString("modeChoiceClass")
else "ModeChoiceMultinomialLogit",
modeVotMultiplier = BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.ModeVotMultiplier(
if (c.hasPathOrNull("modeVotMultiplier")) c.getConfig("modeVotMultiplier")
else com.typesafe.config.ConfigFactory.parseString("modeVotMultiplier{}")
),
mulitnomialLogit = BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.MulitnomialLogit(
if (c.hasPathOrNull("mulitnomialLogit")) c.getConfig("mulitnomialLogit")
else com.typesafe.config.ConfigFactory.parseString("mulitnomialLogit{}")
),
overrideAutomationForVOTT = c.hasPathOrNull("overrideAutomationForVOTT") && c.getBoolean(
"overrideAutomationForVOTT"
),
overrideAutomationLevel =
if (c.hasPathOrNull("overrideAutomationLevel")) c.getInt("overrideAutomationLevel") else 1,
poolingMultiplier = BeamConfig.Beam.Agentsim.Agents.ModalBehaviors.PoolingMultiplier(
if (c.hasPathOrNull("poolingMultiplier")) c.getConfig("poolingMultiplier")
else com.typesafe.config.ConfigFactory.parseString("poolingMultiplier{}")
)
)
}
}
case class ModeIncentive(
filePath: java.lang.String
)
object ModeIncentive {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.ModeIncentive = {
BeamConfig.Beam.Agentsim.Agents.ModeIncentive(
filePath = if (c.hasPathOrNull("filePath")) c.getString("filePath") else ""
)
}
}
case class Plans(
inputPersonAttributesFilePath: java.lang.String,
inputPlansFilePath: java.lang.String
)
object Plans {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.Plans = {
BeamConfig.Beam.Agentsim.Agents.Plans(
inputPersonAttributesFilePath =
if (c.hasPathOrNull("inputPersonAttributesFilePath")) c.getString("inputPersonAttributesFilePath")
else "/test/input/beamville/populationAttributes.xml.gz",
inputPlansFilePath =
if (c.hasPathOrNull("inputPlansFilePath")) c.getString("inputPlansFilePath")
else "/test/input/beamville/population.xml.gz"
)
}
}
case class Population(
useVehicleSampling: scala.Boolean
)
object Population {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.Population = {
BeamConfig.Beam.Agentsim.Agents.Population(
useVehicleSampling = c.hasPathOrNull("useVehicleSampling") && c.getBoolean("useVehicleSampling")
)
}
}
case class PtFare(
filePath: java.lang.String
)
object PtFare {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.PtFare = {
BeamConfig.Beam.Agentsim.Agents.PtFare(
filePath = if (c.hasPathOrNull("filePath")) c.getString("filePath") else ""
)
}
}
case class RideHail(
allocationManager: BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager,
defaultBaseCost: scala.Double,
defaultCostPerMile: scala.Double,
defaultCostPerMinute: scala.Double,
initialization: BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization,
iterationStats: BeamConfig.Beam.Agentsim.Agents.RideHail.IterationStats,
pooledBaseCost: scala.Double,
pooledCostPerMile: scala.Double,
pooledCostPerMinute: scala.Double,
pooledToRegularRideCostRatio: scala.Double,
refuelLocationType: java.lang.String,
refuelThresholdInMeters: scala.Double,
rideHailManager: BeamConfig.Beam.Agentsim.Agents.RideHail.RideHailManager,
surgePricing: BeamConfig.Beam.Agentsim.Agents.RideHail.SurgePricing
)
object RideHail {
case class AllocationManager(
alonsoMora: BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager.AlonsoMora,
name: java.lang.String,
randomRepositioning: BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager.RandomRepositioning,
repositionLowWaitingTimes: BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager.RepositionLowWaitingTimes,
repositionTimeoutInSeconds: scala.Int,
requestBufferTimeoutInSeconds: scala.Int
)
object AllocationManager {
case class AlonsoMora(
dropoffTimeWindowInSec: scala.Int,
maxRequestsPerVehicle: scala.Int,
pickupTimeWindowInSec: scala.Int
)
object AlonsoMora {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager.AlonsoMora = {
BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager.AlonsoMora(
dropoffTimeWindowInSec =
if (c.hasPathOrNull("dropoffTimeWindowInSec")) c.getInt("dropoffTimeWindowInSec") else 600,
maxRequestsPerVehicle =
if (c.hasPathOrNull("maxRequestsPerVehicle")) c.getInt("maxRequestsPerVehicle") else 5,
pickupTimeWindowInSec =
if (c.hasPathOrNull("pickupTimeWindowInSec")) c.getInt("pickupTimeWindowInSec") else 360
)
}
}
case class RandomRepositioning(
repositioningShare: scala.Double
)
object RandomRepositioning {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager.RandomRepositioning = {
BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager.RandomRepositioning(
repositioningShare =
if (c.hasPathOrNull("repositioningShare")) c.getDouble("repositioningShare") else 0.1
)
}
}
case class RepositionLowWaitingTimes(
allowIncreasingRadiusIfDemandInRadiusLow: scala.Boolean,
demandWeight: scala.Double,
distanceWeight: scala.Double,
keepMaxTopNScores: scala.Int,
minDemandPercentageInRadius: scala.Double,
minScoreThresholdForRepositioning: scala.Double,
minimumNumberOfIdlingVehiclesThresholdForRepositioning: scala.Int,
percentageOfVehiclesToReposition: scala.Double,
produceDebugImages: scala.Boolean,
repositionCircleRadiusInMeters: scala.Double,
repositioningMethod: java.lang.String,
timeWindowSizeInSecForDecidingAboutRepositioning: scala.Double,
waitingTimeWeight: scala.Double
)
object RepositionLowWaitingTimes {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager.RepositionLowWaitingTimes = {
BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager.RepositionLowWaitingTimes(
allowIncreasingRadiusIfDemandInRadiusLow = !c.hasPathOrNull(
"allowIncreasingRadiusIfDemandInRadiusLow"
) || c.getBoolean("allowIncreasingRadiusIfDemandInRadiusLow"),
demandWeight = if (c.hasPathOrNull("demandWeight")) c.getDouble("demandWeight") else 4.0,
distanceWeight = if (c.hasPathOrNull("distanceWeight")) c.getDouble("distanceWeight") else 0.01,
keepMaxTopNScores = if (c.hasPathOrNull("keepMaxTopNScores")) c.getInt("keepMaxTopNScores") else 1,
minDemandPercentageInRadius =
if (c.hasPathOrNull("minDemandPercentageInRadius")) c.getDouble("minDemandPercentageInRadius")
else 0.1,
minScoreThresholdForRepositioning =
if (c.hasPathOrNull("minScoreThresholdForRepositioning"))
c.getDouble("minScoreThresholdForRepositioning")
else 0.1,
minimumNumberOfIdlingVehiclesThresholdForRepositioning =
if (c.hasPathOrNull("minimumNumberOfIdlingVehiclesThresholdForRepositioning"))
c.getInt("minimumNumberOfIdlingVehiclesThresholdForRepositioning")
else 1,
percentageOfVehiclesToReposition =
if (c.hasPathOrNull("percentageOfVehiclesToReposition"))
c.getDouble("percentageOfVehiclesToReposition")
else 0.01,
produceDebugImages = !c.hasPathOrNull("produceDebugImages") || c.getBoolean("produceDebugImages"),
repositionCircleRadiusInMeters =
if (c.hasPathOrNull("repositionCircleRadiusInMeters")) c.getDouble("repositionCircleRadiusInMeters")
else 3000,
repositioningMethod =
if (c.hasPathOrNull("repositioningMethod")) c.getString("repositioningMethod") else "TOP_SCORES",
timeWindowSizeInSecForDecidingAboutRepositioning =
if (c.hasPathOrNull("timeWindowSizeInSecForDecidingAboutRepositioning"))
c.getDouble("timeWindowSizeInSecForDecidingAboutRepositioning")
else 1200,
waitingTimeWeight =
if (c.hasPathOrNull("waitingTimeWeight")) c.getDouble("waitingTimeWeight") else 4.0
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager = {
BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager(
alonsoMora = BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager.AlonsoMora(
if (c.hasPathOrNull("alonsoMora")) c.getConfig("alonsoMora")
else com.typesafe.config.ConfigFactory.parseString("alonsoMora{}")
),
name = if (c.hasPathOrNull("name")) c.getString("name") else "DEFAULT_MANAGER",
randomRepositioning = BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager.RandomRepositioning(
if (c.hasPathOrNull("randomRepositioning")) c.getConfig("randomRepositioning")
else com.typesafe.config.ConfigFactory.parseString("randomRepositioning{}")
),
repositionLowWaitingTimes =
BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager.RepositionLowWaitingTimes(
if (c.hasPathOrNull("repositionLowWaitingTimes")) c.getConfig("repositionLowWaitingTimes")
else com.typesafe.config.ConfigFactory.parseString("repositionLowWaitingTimes{}")
),
repositionTimeoutInSeconds =
if (c.hasPathOrNull("repositionTimeoutInSeconds")) c.getInt("repositionTimeoutInSeconds") else 0,
requestBufferTimeoutInSeconds =
if (c.hasPathOrNull("requestBufferTimeoutInSeconds")) c.getInt("requestBufferTimeoutInSeconds") else 0
)
}
}
case class Initialization(
filePath: java.lang.String,
initType: java.lang.String,
procedural: BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization.Procedural
)
object Initialization {
case class Procedural(
fractionOfInitialVehicleFleet: scala.Double,
initialLocation: BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization.Procedural.InitialLocation,
vehicleTypeId: java.lang.String,
vehicleTypePrefix: java.lang.String
)
object Procedural {
case class InitialLocation(
home: BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization.Procedural.InitialLocation.Home,
name: java.lang.String
)
object InitialLocation {
case class Home(
radiusInMeters: scala.Double
)
object Home {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization.Procedural.InitialLocation.Home = {
BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization.Procedural.InitialLocation.Home(
radiusInMeters = if (c.hasPathOrNull("radiusInMeters")) c.getDouble("radiusInMeters") else 10000
)
}
}
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization.Procedural.InitialLocation = {
BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization.Procedural.InitialLocation(
home = BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization.Procedural.InitialLocation.Home(
if (c.hasPathOrNull("home")) c.getConfig("home")
else com.typesafe.config.ConfigFactory.parseString("home{}")
),
name = if (c.hasPathOrNull("name")) c.getString("name") else "HOME"
)
}
}
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization.Procedural = {
BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization.Procedural(
fractionOfInitialVehicleFleet =
if (c.hasPathOrNull("fractionOfInitialVehicleFleet")) c.getDouble("fractionOfInitialVehicleFleet")
else 0.1,
initialLocation = BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization.Procedural.InitialLocation(
if (c.hasPathOrNull("initialLocation")) c.getConfig("initialLocation")
else com.typesafe.config.ConfigFactory.parseString("initialLocation{}")
),
vehicleTypeId = if (c.hasPathOrNull("vehicleTypeId")) c.getString("vehicleTypeId") else "Car",
vehicleTypePrefix =
if (c.hasPathOrNull("vehicleTypePrefix")) c.getString("vehicleTypePrefix") else "RH"
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization = {
BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization(
filePath =
if (c.hasPathOrNull("filePath")) c.getString("filePath")
else "/test/input/beamville/ride-hail-fleet.csv",
initType = if (c.hasPathOrNull("initType")) c.getString("initType") else "PROCEDURAL",
procedural = BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization.Procedural(
if (c.hasPathOrNull("procedural")) c.getConfig("procedural")
else com.typesafe.config.ConfigFactory.parseString("procedural{}")
)
)
}
}
case class IterationStats(
timeBinSizeInSec: scala.Double
)
object IterationStats {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.RideHail.IterationStats = {
BeamConfig.Beam.Agentsim.Agents.RideHail.IterationStats(
timeBinSizeInSec = if (c.hasPathOrNull("timeBinSizeInSec")) c.getDouble("timeBinSizeInSec") else 3600.0
)
}
}
case class RideHailManager(
radiusInMeters: scala.Double
)
object RideHailManager {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.RideHail.RideHailManager = {
BeamConfig.Beam.Agentsim.Agents.RideHail.RideHailManager(
radiusInMeters = if (c.hasPathOrNull("radiusInMeters")) c.getDouble("radiusInMeters") else 5000
)
}
}
case class SurgePricing(
minimumSurgeLevel: scala.Double,
numberOfCategories: scala.Int,
priceAdjustmentStrategy: java.lang.String,
surgeLevelAdaptionStep: scala.Double
)
object SurgePricing {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.RideHail.SurgePricing = {
BeamConfig.Beam.Agentsim.Agents.RideHail.SurgePricing(
minimumSurgeLevel = if (c.hasPathOrNull("minimumSurgeLevel")) c.getDouble("minimumSurgeLevel") else 0.1,
numberOfCategories = if (c.hasPathOrNull("numberOfCategories")) c.getInt("numberOfCategories") else 6,
priceAdjustmentStrategy =
if (c.hasPathOrNull("priceAdjustmentStrategy")) c.getString("priceAdjustmentStrategy")
else "KEEP_PRICE_LEVEL_FIXED_AT_ONE",
surgeLevelAdaptionStep =
if (c.hasPathOrNull("surgeLevelAdaptionStep")) c.getDouble("surgeLevelAdaptionStep") else 0.1
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.RideHail = {
BeamConfig.Beam.Agentsim.Agents.RideHail(
allocationManager = BeamConfig.Beam.Agentsim.Agents.RideHail.AllocationManager(
if (c.hasPathOrNull("allocationManager")) c.getConfig("allocationManager")
else com.typesafe.config.ConfigFactory.parseString("allocationManager{}")
),
defaultBaseCost = if (c.hasPathOrNull("defaultBaseCost")) c.getDouble("defaultBaseCost") else 1.8,
defaultCostPerMile =
if (c.hasPathOrNull("defaultCostPerMile")) c.getDouble("defaultCostPerMile") else 0.91,
defaultCostPerMinute =
if (c.hasPathOrNull("defaultCostPerMinute")) c.getDouble("defaultCostPerMinute") else 0.28,
initialization = BeamConfig.Beam.Agentsim.Agents.RideHail.Initialization(
if (c.hasPathOrNull("initialization")) c.getConfig("initialization")
else com.typesafe.config.ConfigFactory.parseString("initialization{}")
),
iterationStats = BeamConfig.Beam.Agentsim.Agents.RideHail.IterationStats(
if (c.hasPathOrNull("iterationStats")) c.getConfig("iterationStats")
else com.typesafe.config.ConfigFactory.parseString("iterationStats{}")
),
pooledBaseCost = if (c.hasPathOrNull("pooledBaseCost")) c.getDouble("pooledBaseCost") else 1.89,
pooledCostPerMile = if (c.hasPathOrNull("pooledCostPerMile")) c.getDouble("pooledCostPerMile") else 1.11,
pooledCostPerMinute =
if (c.hasPathOrNull("pooledCostPerMinute")) c.getDouble("pooledCostPerMinute") else 0.07,
pooledToRegularRideCostRatio =
if (c.hasPathOrNull("pooledToRegularRideCostRatio")) c.getDouble("pooledToRegularRideCostRatio")
else 0.6,
refuelLocationType =
if (c.hasPathOrNull("refuelLocationType")) c.getString("refuelLocationType") else "AtTAZCenter",
refuelThresholdInMeters =
if (c.hasPathOrNull("refuelThresholdInMeters")) c.getDouble("refuelThresholdInMeters") else 5000.0,
rideHailManager = BeamConfig.Beam.Agentsim.Agents.RideHail.RideHailManager(
if (c.hasPathOrNull("rideHailManager")) c.getConfig("rideHailManager")
else com.typesafe.config.ConfigFactory.parseString("rideHailManager{}")
),
surgePricing = BeamConfig.Beam.Agentsim.Agents.RideHail.SurgePricing(
if (c.hasPathOrNull("surgePricing")) c.getConfig("surgePricing")
else com.typesafe.config.ConfigFactory.parseString("surgePricing{}")
)
)
}
}
case class RideHailTransit(
modesToConsider: java.lang.String
)
object RideHailTransit {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.RideHailTransit = {
BeamConfig.Beam.Agentsim.Agents.RideHailTransit(
modesToConsider = if (c.hasPathOrNull("modesToConsider")) c.getString("modesToConsider") else "MASS"
)
}
}
case class Vehicles(
downsamplingMethod: java.lang.String,
fractionOfInitialVehicleFleet: scala.Double,
fuelTypesFilePath: java.lang.String,
linkToGradePercentFilePath: java.lang.String,
sharedFleets: scala.List[BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm],
transitVehicleTypesByRouteFile: java.lang.String,
vehicleTypesFilePath: java.lang.String,
vehiclesFilePath: java.lang.String
)
object Vehicles {
case class SharedFleets$Elm(
fixed_non_reserving: scala.Option[
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.FixedNonReserving
],
fixed_non_reserving_fleet_by_taz: scala.Option[
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.FixedNonReservingFleetByTaz
],
inexhaustible_reserving: scala.Option[
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.InexhaustibleReserving
],
managerType: java.lang.String,
name: java.lang.String,
reposition: scala.Option[BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.Reposition]
)
object SharedFleets$Elm {
case class FixedNonReserving(
maxWalkingDistance: scala.Int,
vehicleTypeId: java.lang.String
)
object FixedNonReserving {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.FixedNonReserving = {
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.FixedNonReserving(
maxWalkingDistance =
if (c.hasPathOrNull("maxWalkingDistance")) c.getInt("maxWalkingDistance") else 500,
vehicleTypeId = if (c.hasPathOrNull("vehicleTypeId")) c.getString("vehicleTypeId") else "sharedCar"
)
}
}
case class FixedNonReservingFleetByTaz(
fleetSize: scala.Int,
maxWalkingDistance: scala.Int,
vehicleTypeId: java.lang.String,
vehiclesSharePerTAZFromCSV: scala.Option[java.lang.String]
)
object FixedNonReservingFleetByTaz {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.FixedNonReservingFleetByTaz = {
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.FixedNonReservingFleetByTaz(
fleetSize = if (c.hasPathOrNull("fleetSize")) c.getInt("fleetSize") else 10,
maxWalkingDistance =
if (c.hasPathOrNull("maxWalkingDistance")) c.getInt("maxWalkingDistance") else 500,
vehicleTypeId = if (c.hasPathOrNull("vehicleTypeId")) c.getString("vehicleTypeId") else "sharedCar",
vehiclesSharePerTAZFromCSV =
if (c.hasPathOrNull("vehiclesSharePerTAZFromCSV")) Some(c.getString("vehiclesSharePerTAZFromCSV"))
else None
)
}
}
case class InexhaustibleReserving(
vehicleTypeId: java.lang.String
)
object InexhaustibleReserving {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.InexhaustibleReserving = {
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.InexhaustibleReserving(
vehicleTypeId = if (c.hasPathOrNull("vehicleTypeId")) c.getString("vehicleTypeId") else "sharedCar"
)
}
}
case class Reposition(
min_availability_undersupply_algorithm: scala.Option[
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.Reposition.MinAvailabilityUndersupplyAlgorithm
],
name: java.lang.String,
repositionTimeBin: scala.Int,
statTimeBin: scala.Int
)
object Reposition {
case class MinAvailabilityUndersupplyAlgorithm(
matchLimit: scala.Int
)
object MinAvailabilityUndersupplyAlgorithm {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.Reposition.MinAvailabilityUndersupplyAlgorithm = {
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.Reposition
.MinAvailabilityUndersupplyAlgorithm(
matchLimit = if (c.hasPathOrNull("matchLimit")) c.getInt("matchLimit") else 99999
)
}
}
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.Reposition = {
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.Reposition(
min_availability_undersupply_algorithm =
if (c.hasPathOrNull("min-availability-undersupply-algorithm"))
scala.Some(
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.Reposition
.MinAvailabilityUndersupplyAlgorithm(c.getConfig("min-availability-undersupply-algorithm"))
)
else None,
name = if (c.hasPathOrNull("name")) c.getString("name") else "my-reposition-algorithm",
repositionTimeBin = if (c.hasPathOrNull("repositionTimeBin")) c.getInt("repositionTimeBin") else 3600,
statTimeBin = if (c.hasPathOrNull("statTimeBin")) c.getInt("statTimeBin") else 300
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm = {
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm(
fixed_non_reserving =
if (c.hasPathOrNull("fixed-non-reserving"))
scala.Some(
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm
.FixedNonReserving(c.getConfig("fixed-non-reserving"))
)
else None,
fixed_non_reserving_fleet_by_taz =
if (c.hasPathOrNull("fixed-non-reserving-fleet-by-taz"))
scala.Some(
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm
.FixedNonReservingFleetByTaz(c.getConfig("fixed-non-reserving-fleet-by-taz"))
)
else None,
inexhaustible_reserving =
if (c.hasPathOrNull("inexhaustible-reserving"))
scala.Some(
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm
.InexhaustibleReserving(c.getConfig("inexhaustible-reserving"))
)
else None,
managerType = if (c.hasPathOrNull("managerType")) c.getString("managerType") else "fixed-non-reserving",
name = if (c.hasPathOrNull("name")) c.getString("name") else "my-fixed-non-reserving-fleet",
reposition =
if (c.hasPathOrNull("reposition"))
scala.Some(
BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm.Reposition(c.getConfig("reposition"))
)
else None
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents.Vehicles = {
BeamConfig.Beam.Agentsim.Agents.Vehicles(
downsamplingMethod =
if (c.hasPathOrNull("downsamplingMethod")) c.getString("downsamplingMethod")
else "SECONDARY_VEHICLES_FIRST",
fractionOfInitialVehicleFleet =
if (c.hasPathOrNull("fractionOfInitialVehicleFleet")) c.getDouble("fractionOfInitialVehicleFleet")
else 1.0,
fuelTypesFilePath =
if (c.hasPathOrNull("fuelTypesFilePath")) c.getString("fuelTypesFilePath")
else "/test/input/beamville/beamFuelTypes.csv",
linkToGradePercentFilePath =
if (c.hasPathOrNull("linkToGradePercentFilePath")) c.getString("linkToGradePercentFilePath") else "",
sharedFleets = $_LBeamConfig_Beam_Agentsim_Agents_Vehicles_SharedFleets$Elm(c.getList("sharedFleets")),
transitVehicleTypesByRouteFile =
if (c.hasPathOrNull("transitVehicleTypesByRouteFile")) c.getString("transitVehicleTypesByRouteFile")
else "",
vehicleTypesFilePath =
if (c.hasPathOrNull("vehicleTypesFilePath")) c.getString("vehicleTypesFilePath")
else "/test/input/beamville/vehicleTypes.csv",
vehiclesFilePath =
if (c.hasPathOrNull("vehiclesFilePath")) c.getString("vehiclesFilePath")
else "/test/input/beamville/vehicles.csv"
)
}
private def $_LBeamConfig_Beam_Agentsim_Agents_Vehicles_SharedFleets$Elm(
cl: com.typesafe.config.ConfigList
): scala.List[BeamConfig.Beam.Agentsim.Agents.Vehicles.SharedFleets$Elm] = {
import scala.collection.JavaConverters._
cl.asScala
.map(
cv =>
BeamConfig.Beam.Agentsim.Agents.Vehicles
.SharedFleets$Elm(cv.asInstanceOf[com.typesafe.config.ConfigObject].toConfig)
)
.toList
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Agents = {
BeamConfig.Beam.Agentsim.Agents(
bodyType = if (c.hasPathOrNull("bodyType")) c.getString("bodyType") else "BODY-TYPE-DEFAULT",
households = BeamConfig.Beam.Agentsim.Agents.Households(
if (c.hasPathOrNull("households")) c.getConfig("households")
else com.typesafe.config.ConfigFactory.parseString("households{}")
),
modalBehaviors = BeamConfig.Beam.Agentsim.Agents.ModalBehaviors(
if (c.hasPathOrNull("modalBehaviors")) c.getConfig("modalBehaviors")
else com.typesafe.config.ConfigFactory.parseString("modalBehaviors{}")
),
modeIncentive = BeamConfig.Beam.Agentsim.Agents.ModeIncentive(
if (c.hasPathOrNull("modeIncentive")) c.getConfig("modeIncentive")
else com.typesafe.config.ConfigFactory.parseString("modeIncentive{}")
),
plans = BeamConfig.Beam.Agentsim.Agents.Plans(
if (c.hasPathOrNull("plans")) c.getConfig("plans")
else com.typesafe.config.ConfigFactory.parseString("plans{}")
),
population = BeamConfig.Beam.Agentsim.Agents.Population(
if (c.hasPathOrNull("population")) c.getConfig("population")
else com.typesafe.config.ConfigFactory.parseString("population{}")
),
ptFare = BeamConfig.Beam.Agentsim.Agents.PtFare(
if (c.hasPathOrNull("ptFare")) c.getConfig("ptFare")
else com.typesafe.config.ConfigFactory.parseString("ptFare{}")
),
rideHail = BeamConfig.Beam.Agentsim.Agents.RideHail(
if (c.hasPathOrNull("rideHail")) c.getConfig("rideHail")
else com.typesafe.config.ConfigFactory.parseString("rideHail{}")
),
rideHailTransit = BeamConfig.Beam.Agentsim.Agents.RideHailTransit(
if (c.hasPathOrNull("rideHailTransit")) c.getConfig("rideHailTransit")
else com.typesafe.config.ConfigFactory.parseString("rideHailTransit{}")
),
vehicles = BeamConfig.Beam.Agentsim.Agents.Vehicles(
if (c.hasPathOrNull("vehicles")) c.getConfig("vehicles")
else com.typesafe.config.ConfigFactory.parseString("vehicles{}")
)
)
}
}
case class Scenarios(
frequencyAdjustmentFile: java.lang.String
)
object Scenarios {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Scenarios = {
BeamConfig.Beam.Agentsim.Scenarios(
frequencyAdjustmentFile =
if (c.hasPathOrNull("frequencyAdjustmentFile")) c.getString("frequencyAdjustmentFile")
else "/test/input/beamville/r5/FrequencyAdjustment.csv"
)
}
}
case class ScheduleMonitorTask(
initialDelay: scala.Int,
interval: scala.Int
)
object ScheduleMonitorTask {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.ScheduleMonitorTask = {
BeamConfig.Beam.Agentsim.ScheduleMonitorTask(
initialDelay = if (c.hasPathOrNull("initialDelay")) c.getInt("initialDelay") else 1,
interval = if (c.hasPathOrNull("interval")) c.getInt("interval") else 30
)
}
}
case class Taz(
filePath: java.lang.String,
parkingFilePath: java.lang.String
)
object Taz {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Taz = {
BeamConfig.Beam.Agentsim.Taz(
filePath =
if (c.hasPathOrNull("filePath")) c.getString("filePath") else "/test/input/beamville/taz-centers.csv",
parkingFilePath =
if (c.hasPathOrNull("parkingFilePath")) c.getString("parkingFilePath")
else "/test/input/beamville/taz-parking.csv"
)
}
}
case class Toll(
filePath: java.lang.String
)
object Toll {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Toll = {
BeamConfig.Beam.Agentsim.Toll(
filePath =
if (c.hasPathOrNull("filePath")) c.getString("filePath") else "/test/input/beamville/toll-prices.csv"
)
}
}
case class Tuning(
fuelCapacityInJoules: scala.Double,
rideHailPrice: scala.Double,
tollPrice: scala.Double,
transitCapacity: scala.Option[scala.Double],
transitPrice: scala.Double
)
object Tuning {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim.Tuning = {
BeamConfig.Beam.Agentsim.Tuning(
fuelCapacityInJoules =
if (c.hasPathOrNull("fuelCapacityInJoules")) c.getDouble("fuelCapacityInJoules") else 86400000,
rideHailPrice = if (c.hasPathOrNull("rideHailPrice")) c.getDouble("rideHailPrice") else 1.0,
tollPrice = if (c.hasPathOrNull("tollPrice")) c.getDouble("tollPrice") else 1.0,
transitCapacity = if (c.hasPathOrNull("transitCapacity")) Some(c.getDouble("transitCapacity")) else None,
transitPrice = if (c.hasPathOrNull("transitPrice")) c.getDouble("transitPrice") else 1.0
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Agentsim = {
BeamConfig.Beam.Agentsim(
agentSampleSizeAsFractionOfPopulation =
if (c.hasPathOrNull("agentSampleSizeAsFractionOfPopulation"))
c.getDouble("agentSampleSizeAsFractionOfPopulation")
else 1.0,
agents = BeamConfig.Beam.Agentsim.Agents(
if (c.hasPathOrNull("agents")) c.getConfig("agents")
else com.typesafe.config.ConfigFactory.parseString("agents{}")
),
endTime = if (c.hasPathOrNull("endTime")) c.getString("endTime") else "30:00:00",
firstIteration = if (c.hasPathOrNull("firstIteration")) c.getInt("firstIteration") else 0,
lastIteration = if (c.hasPathOrNull("lastIteration")) c.getInt("lastIteration") else 0,
numAgents = if (c.hasPathOrNull("numAgents")) c.getInt("numAgents") else 100,
populationAdjustment =
if (c.hasPathOrNull("populationAdjustment")) c.getString("populationAdjustment") else "DEFAULT_ADJUSTMENT",
scenarios = BeamConfig.Beam.Agentsim.Scenarios(
if (c.hasPathOrNull("scenarios")) c.getConfig("scenarios")
else com.typesafe.config.ConfigFactory.parseString("scenarios{}")
),
scheduleMonitorTask = BeamConfig.Beam.Agentsim.ScheduleMonitorTask(
if (c.hasPathOrNull("scheduleMonitorTask")) c.getConfig("scheduleMonitorTask")
else com.typesafe.config.ConfigFactory.parseString("scheduleMonitorTask{}")
),
schedulerParallelismWindow =
if (c.hasPathOrNull("schedulerParallelismWindow")) c.getInt("schedulerParallelismWindow") else 30,
simulationName = if (c.hasPathOrNull("simulationName")) c.getString("simulationName") else "beamville",
startTime = if (c.hasPathOrNull("startTime")) c.getString("startTime") else "00:00:00",
taz = BeamConfig.Beam.Agentsim.Taz(
if (c.hasPathOrNull("taz")) c.getConfig("taz") else com.typesafe.config.ConfigFactory.parseString("taz{}")
),
thresholdForMakingParkingChoiceInMeters =
if (c.hasPathOrNull("thresholdForMakingParkingChoiceInMeters"))
c.getInt("thresholdForMakingParkingChoiceInMeters")
else 100,
thresholdForWalkingInMeters =
if (c.hasPathOrNull("thresholdForWalkingInMeters")) c.getInt("thresholdForWalkingInMeters") else 100,
timeBinSize = if (c.hasPathOrNull("timeBinSize")) c.getInt("timeBinSize") else 3600,
toll = BeamConfig.Beam.Agentsim.Toll(
if (c.hasPathOrNull("toll")) c.getConfig("toll")
else com.typesafe.config.ConfigFactory.parseString("toll{}")
),
tuning = BeamConfig.Beam.Agentsim.Tuning(
if (c.hasPathOrNull("tuning")) c.getConfig("tuning")
else com.typesafe.config.ConfigFactory.parseString("tuning{}")
)
)
}
}
case class Calibration(
counts: BeamConfig.Beam.Calibration.Counts,
meanToCountsWeightRatio: scala.Double,
mode: BeamConfig.Beam.Calibration.Mode,
objectiveFunction: java.lang.String,
roadNetwork: BeamConfig.Beam.Calibration.RoadNetwork
)
object Calibration {
case class Counts(
averageCountsOverIterations: scala.Int,
countsScaleFactor: scala.Int,
inputCountsFile: java.lang.String,
writeCountsInterval: scala.Int
)
object Counts {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Calibration.Counts = {
BeamConfig.Beam.Calibration.Counts(
averageCountsOverIterations =
if (c.hasPathOrNull("averageCountsOverIterations")) c.getInt("averageCountsOverIterations") else 1,
countsScaleFactor = if (c.hasPathOrNull("countsScaleFactor")) c.getInt("countsScaleFactor") else 10,
inputCountsFile =
if (c.hasPathOrNull("inputCountsFile")) c.getString("inputCountsFile")
else "/test/input/beamville/counts.xml",
writeCountsInterval = if (c.hasPathOrNull("writeCountsInterval")) c.getInt("writeCountsInterval") else 1
)
}
}
case class Mode(
benchmarkFilePath: java.lang.String
)
object Mode {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Calibration.Mode = {
BeamConfig.Beam.Calibration.Mode(
benchmarkFilePath = if (c.hasPathOrNull("benchmarkFilePath")) c.getString("benchmarkFilePath") else ""
)
}
}
case class RoadNetwork(
travelTimes: BeamConfig.Beam.Calibration.RoadNetwork.TravelTimes
)
object RoadNetwork {
case class TravelTimes(
zoneBoundariesFilePath: java.lang.String,
zoneODTravelTimesFilePath: java.lang.String
)
object TravelTimes {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Calibration.RoadNetwork.TravelTimes = {
BeamConfig.Beam.Calibration.RoadNetwork.TravelTimes(
zoneBoundariesFilePath =
if (c.hasPathOrNull("zoneBoundariesFilePath")) c.getString("zoneBoundariesFilePath") else "",
zoneODTravelTimesFilePath =
if (c.hasPathOrNull("zoneODTravelTimesFilePath")) c.getString("zoneODTravelTimesFilePath") else ""
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Calibration.RoadNetwork = {
BeamConfig.Beam.Calibration.RoadNetwork(
travelTimes = BeamConfig.Beam.Calibration.RoadNetwork.TravelTimes(
if (c.hasPathOrNull("travelTimes")) c.getConfig("travelTimes")
else com.typesafe.config.ConfigFactory.parseString("travelTimes{}")
)
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Calibration = {
BeamConfig.Beam.Calibration(
counts = BeamConfig.Beam.Calibration.Counts(
if (c.hasPathOrNull("counts")) c.getConfig("counts")
else com.typesafe.config.ConfigFactory.parseString("counts{}")
),
meanToCountsWeightRatio =
if (c.hasPathOrNull("meanToCountsWeightRatio")) c.getDouble("meanToCountsWeightRatio") else 0.5,
mode = BeamConfig.Beam.Calibration.Mode(
if (c.hasPathOrNull("mode")) c.getConfig("mode")
else com.typesafe.config.ConfigFactory.parseString("mode{}")
),
objectiveFunction =
if (c.hasPathOrNull("objectiveFunction")) c.getString("objectiveFunction")
else "ModeChoiceObjectiveFunction",
roadNetwork = BeamConfig.Beam.Calibration.RoadNetwork(
if (c.hasPathOrNull("roadNetwork")) c.getConfig("roadNetwork")
else com.typesafe.config.ConfigFactory.parseString("roadNetwork{}")
)
)
}
}
case class Cluster(
clusterType: scala.Option[java.lang.String],
enabled: scala.Boolean
)
object Cluster {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Cluster = {
BeamConfig.Beam.Cluster(
clusterType = if (c.hasPathOrNull("clusterType")) Some(c.getString("clusterType")) else None,
enabled = c.hasPathOrNull("enabled") && c.getBoolean("enabled")
)
}
}
case class Debug(
actor: BeamConfig.Beam.Debug.Actor,
agentTripScoresInterval: scala.Int,
clearRoutedOutstandingWorkEnabled: scala.Boolean,
debugActorTimerIntervalInSec: scala.Int,
debugEnabled: scala.Boolean,
memoryConsumptionDisplayTimeoutInSec: scala.Int,
secondsToWaitToClearRoutedOutstandingWork: scala.Int,
stuckAgentDetection: BeamConfig.Beam.Debug.StuckAgentDetection,
triggerMeasurer: BeamConfig.Beam.Debug.TriggerMeasurer
)
object Debug {
case class Actor(
logDepth: scala.Int
)
object Actor {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Debug.Actor = {
BeamConfig.Beam.Debug.Actor(
logDepth = if (c.hasPathOrNull("logDepth")) c.getInt("logDepth") else 0
)
}
}
case class StuckAgentDetection(
checkIntervalMs: scala.Long,
checkMaxNumberOfMessagesEnabled: scala.Boolean,
defaultTimeoutMs: scala.Long,
enabled: scala.Boolean,
overallSimulationTimeoutMs: scala.Long,
thresholds: scala.List[BeamConfig.Beam.Debug.StuckAgentDetection.Thresholds$Elm]
)
object StuckAgentDetection {
case class Thresholds$Elm(
actorTypeToMaxNumberOfMessages: BeamConfig.Beam.Debug.StuckAgentDetection.Thresholds$Elm.ActorTypeToMaxNumberOfMessages,
markAsStuckAfterMs: scala.Long,
triggerType: java.lang.String
)
object Thresholds$Elm {
case class ActorTypeToMaxNumberOfMessages(
population: scala.Option[scala.Int],
rideHailAgent: scala.Option[scala.Int],
rideHailManager: scala.Option[scala.Int],
transitDriverAgent: scala.Option[scala.Int]
)
object ActorTypeToMaxNumberOfMessages {
def apply(
c: com.typesafe.config.Config
): BeamConfig.Beam.Debug.StuckAgentDetection.Thresholds$Elm.ActorTypeToMaxNumberOfMessages = {
BeamConfig.Beam.Debug.StuckAgentDetection.Thresholds$Elm.ActorTypeToMaxNumberOfMessages(
population = if (c.hasPathOrNull("population")) Some(c.getInt("population")) else None,
rideHailAgent = if (c.hasPathOrNull("rideHailAgent")) Some(c.getInt("rideHailAgent")) else None,
rideHailManager = if (c.hasPathOrNull("rideHailManager")) Some(c.getInt("rideHailManager")) else None,
transitDriverAgent =
if (c.hasPathOrNull("transitDriverAgent")) Some(c.getInt("transitDriverAgent")) else None
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Debug.StuckAgentDetection.Thresholds$Elm = {
BeamConfig.Beam.Debug.StuckAgentDetection.Thresholds$Elm(
actorTypeToMaxNumberOfMessages =
BeamConfig.Beam.Debug.StuckAgentDetection.Thresholds$Elm.ActorTypeToMaxNumberOfMessages(
if (c.hasPathOrNull("actorTypeToMaxNumberOfMessages")) c.getConfig("actorTypeToMaxNumberOfMessages")
else com.typesafe.config.ConfigFactory.parseString("actorTypeToMaxNumberOfMessages{}")
),
markAsStuckAfterMs =
if (c.hasPathOrNull("markAsStuckAfterMs"))
c.getDuration("markAsStuckAfterMs", java.util.concurrent.TimeUnit.MILLISECONDS)
else 20000,
triggerType =
if (c.hasPathOrNull("triggerType")) c.getString("triggerType")
else "beam.agentsim.agents.PersonAgent$ActivityStartTrigger"
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Debug.StuckAgentDetection = {
BeamConfig.Beam.Debug.StuckAgentDetection(
checkIntervalMs =
if (c.hasPathOrNull("checkIntervalMs"))
c.getDuration("checkIntervalMs", java.util.concurrent.TimeUnit.MILLISECONDS)
else 200,
checkMaxNumberOfMessagesEnabled = !c.hasPathOrNull("checkMaxNumberOfMessagesEnabled") || c.getBoolean(
"checkMaxNumberOfMessagesEnabled"
),
defaultTimeoutMs =
if (c.hasPathOrNull("defaultTimeoutMs"))
c.getDuration("defaultTimeoutMs", java.util.concurrent.TimeUnit.MILLISECONDS)
else 60000,
enabled = c.hasPathOrNull("enabled") && c.getBoolean("enabled"),
overallSimulationTimeoutMs =
if (c.hasPathOrNull("overallSimulationTimeoutMs"))
c.getDuration("overallSimulationTimeoutMs", java.util.concurrent.TimeUnit.MILLISECONDS)
else 100000,
thresholds = $_LBeamConfig_Beam_Debug_StuckAgentDetection_Thresholds$Elm(c.getList("thresholds"))
)
}
private def $_LBeamConfig_Beam_Debug_StuckAgentDetection_Thresholds$Elm(
cl: com.typesafe.config.ConfigList
): scala.List[BeamConfig.Beam.Debug.StuckAgentDetection.Thresholds$Elm] = {
import scala.collection.JavaConverters._
cl.asScala
.map(
cv =>
BeamConfig.Beam.Debug.StuckAgentDetection
.Thresholds$Elm(cv.asInstanceOf[com.typesafe.config.ConfigObject].toConfig)
)
.toList
}
}
case class TriggerMeasurer(
enabled: scala.Boolean,
writeStuckAgentDetectionConfig: scala.Boolean
)
object TriggerMeasurer {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Debug.TriggerMeasurer = {
BeamConfig.Beam.Debug.TriggerMeasurer(
enabled = c.hasPathOrNull("enabled") && c.getBoolean("enabled"),
writeStuckAgentDetectionConfig = !c.hasPathOrNull("writeStuckAgentDetectionConfig") || c.getBoolean(
"writeStuckAgentDetectionConfig"
)
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Debug = {
BeamConfig.Beam.Debug(
actor = BeamConfig.Beam.Debug.Actor(
if (c.hasPathOrNull("actor")) c.getConfig("actor")
else com.typesafe.config.ConfigFactory.parseString("actor{}")
),
agentTripScoresInterval =
if (c.hasPathOrNull("agentTripScoresInterval")) c.getInt("agentTripScoresInterval") else 0,
clearRoutedOutstandingWorkEnabled = c.hasPathOrNull("clearRoutedOutstandingWorkEnabled") && c.getBoolean(
"clearRoutedOutstandingWorkEnabled"
),
debugActorTimerIntervalInSec =
if (c.hasPathOrNull("debugActorTimerIntervalInSec")) c.getInt("debugActorTimerIntervalInSec") else 0,
debugEnabled = c.hasPathOrNull("debugEnabled") && c.getBoolean("debugEnabled"),
memoryConsumptionDisplayTimeoutInSec =
if (c.hasPathOrNull("memoryConsumptionDisplayTimeoutInSec"))
c.getInt("memoryConsumptionDisplayTimeoutInSec")
else 0,
secondsToWaitToClearRoutedOutstandingWork =
if (c.hasPathOrNull("secondsToWaitToClearRoutedOutstandingWork"))
c.getInt("secondsToWaitToClearRoutedOutstandingWork")
else 60,
stuckAgentDetection = BeamConfig.Beam.Debug.StuckAgentDetection(
if (c.hasPathOrNull("stuckAgentDetection")) c.getConfig("stuckAgentDetection")
else com.typesafe.config.ConfigFactory.parseString("stuckAgentDetection{}")
),
triggerMeasurer = BeamConfig.Beam.Debug.TriggerMeasurer(
if (c.hasPathOrNull("triggerMeasurer")) c.getConfig("triggerMeasurer")
else com.typesafe.config.ConfigFactory.parseString("triggerMeasurer{}")
)
)
}
}
case class Exchange(
scenario: BeamConfig.Beam.Exchange.Scenario
)
object Exchange {
case class Scenario(
convertWgs2Utm: scala.Boolean,
fileFormat: java.lang.String,
folder: java.lang.String,
source: java.lang.String
)
object Scenario {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Exchange.Scenario = {
BeamConfig.Beam.Exchange.Scenario(
convertWgs2Utm = c.hasPathOrNull("convertWgs2Utm") && c.getBoolean("convertWgs2Utm"),
fileFormat = if (c.hasPathOrNull("fileFormat")) c.getString("fileFormat") else "xml",
folder = if (c.hasPathOrNull("folder")) c.getString("folder") else "",
source = if (c.hasPathOrNull("source")) c.getString("source") else "Beam"
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Exchange = {
BeamConfig.Beam.Exchange(
scenario = BeamConfig.Beam.Exchange.Scenario(
if (c.hasPathOrNull("scenario")) c.getConfig("scenario")
else com.typesafe.config.ConfigFactory.parseString("scenario{}")
)
)
}
}
case class Experimental(
optimizer: BeamConfig.Beam.Experimental.Optimizer
)
object Experimental {
case class Optimizer(
enabled: scala.Boolean
)
object Optimizer {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Experimental.Optimizer = {
BeamConfig.Beam.Experimental.Optimizer(
enabled = c.hasPathOrNull("enabled") && c.getBoolean("enabled")
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Experimental = {
BeamConfig.Beam.Experimental(
optimizer = BeamConfig.Beam.Experimental.Optimizer(
if (c.hasPathOrNull("optimizer")) c.getConfig("optimizer")
else com.typesafe.config.ConfigFactory.parseString("optimizer{}")
)
)
}
}
case class Logger(
keepConsoleAppenderOn: scala.Boolean
)
object Logger {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Logger = {
BeamConfig.Beam.Logger(
keepConsoleAppenderOn = !c.hasPathOrNull("keepConsoleAppenderOn") || c.getBoolean("keepConsoleAppenderOn")
)
}
}
case class Metrics(
level: java.lang.String
)
object Metrics {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Metrics = {
BeamConfig.Beam.Metrics(
level = if (c.hasPathOrNull("level")) c.getString("level") else "verbose"
)
}
}
case class Outputs(
addTimestampToOutputDirectory: scala.Boolean,
baseOutputDirectory: java.lang.String,
defaultWriteInterval: scala.Int,
displayPerformanceTimings: scala.Boolean,
events: BeamConfig.Beam.Outputs.Events,
generalizedLinkStats: BeamConfig.Beam.Outputs.GeneralizedLinkStats,
generalizedLinkStatsInterval: scala.Int,
stats: BeamConfig.Beam.Outputs.Stats,
writeEventsInterval: scala.Int,
writeGraphs: scala.Boolean,
writeLinkTraversalInterval: scala.Int,
writePlansInterval: scala.Int,
writeSkimsInterval: scala.Int
)
object Outputs {
case class Events(
eventsToWrite: java.lang.String,
fileOutputFormats: java.lang.String
)
object Events {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Outputs.Events = {
BeamConfig.Beam.Outputs.Events(
eventsToWrite =
if (c.hasPathOrNull("eventsToWrite")) c.getString("eventsToWrite")
else
"ActivityEndEvent,ActivityStartEvent,PersonEntersVehicleEvent,PersonLeavesVehicleEvent,ModeChoiceEvent,PathTraversalEvent,ReserveRideHailEvent,ReplanningEvent,RefuelEvent,ParkEvent,LeavingParkingEvent",
fileOutputFormats = if (c.hasPathOrNull("fileOutputFormats")) c.getString("fileOutputFormats") else "csv"
)
}
}
case class GeneralizedLinkStats(
endTime: scala.Int,
startTime: scala.Int
)
object GeneralizedLinkStats {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Outputs.GeneralizedLinkStats = {
BeamConfig.Beam.Outputs.GeneralizedLinkStats(
endTime = if (c.hasPathOrNull("endTime")) c.getInt("endTime") else 32400,
startTime = if (c.hasPathOrNull("startTime")) c.getInt("startTime") else 25200
)
}
}
case class Stats(
binSize: scala.Int
)
object Stats {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Outputs.Stats = {
BeamConfig.Beam.Outputs.Stats(
binSize = if (c.hasPathOrNull("binSize")) c.getInt("binSize") else 3600
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Outputs = {
BeamConfig.Beam.Outputs(
addTimestampToOutputDirectory = !c.hasPathOrNull("addTimestampToOutputDirectory") || c.getBoolean(
"addTimestampToOutputDirectory"
),
baseOutputDirectory =
if (c.hasPathOrNull("baseOutputDirectory")) c.getString("baseOutputDirectory") else "output",
defaultWriteInterval = if (c.hasPathOrNull("defaultWriteInterval")) c.getInt("defaultWriteInterval") else 1,
displayPerformanceTimings = c.hasPathOrNull("displayPerformanceTimings") && c.getBoolean(
"displayPerformanceTimings"
),
events = BeamConfig.Beam.Outputs.Events(
if (c.hasPathOrNull("events")) c.getConfig("events")
else com.typesafe.config.ConfigFactory.parseString("events{}")
),
generalizedLinkStats = BeamConfig.Beam.Outputs.GeneralizedLinkStats(
if (c.hasPathOrNull("generalizedLinkStats")) c.getConfig("generalizedLinkStats")
else com.typesafe.config.ConfigFactory.parseString("generalizedLinkStats{}")
),
generalizedLinkStatsInterval =
if (c.hasPathOrNull("generalizedLinkStatsInterval")) c.getInt("generalizedLinkStatsInterval") else 0,
stats = BeamConfig.Beam.Outputs.Stats(
if (c.hasPathOrNull("stats")) c.getConfig("stats")
else com.typesafe.config.ConfigFactory.parseString("stats{}")
),
writeEventsInterval = if (c.hasPathOrNull("writeEventsInterval")) c.getInt("writeEventsInterval") else 1,
writeGraphs = !c.hasPathOrNull("writeGraphs") || c.getBoolean("writeGraphs"),
writeLinkTraversalInterval =
if (c.hasPathOrNull("writeLinkTraversalInterval")) c.getInt("writeLinkTraversalInterval") else 0,
writePlansInterval = if (c.hasPathOrNull("writePlansInterval")) c.getInt("writePlansInterval") else 0,
writeSkimsInterval = if (c.hasPathOrNull("writeSkimsInterval")) c.getInt("writeSkimsInterval") else 0
)
}
}
case class Physsim(
eventsForFullVersionOfVia: scala.Boolean,
eventsSampling: scala.Double,
flowCapacityFactor: scala.Double,
initializeRouterWithFreeFlowTimes: scala.Boolean,
inputNetworkFilePath: java.lang.String,
jdeqsim: BeamConfig.Beam.Physsim.Jdeqsim,
linkStatsBinSize: scala.Int,
linkStatsWriteInterval: scala.Int,
ptSampleSize: scala.Double,
quick_fix_minCarSpeedInMetersPerSecond: scala.Double,
skipPhysSim: scala.Boolean,
storageCapacityFactor: scala.Double,
writeEventsInterval: scala.Int,
writeMATSimNetwork: scala.Boolean,
writePlansInterval: scala.Int
)
object Physsim {
case class Jdeqsim(
agentSimPhysSimInterfaceDebugger: BeamConfig.Beam.Physsim.Jdeqsim.AgentSimPhysSimInterfaceDebugger,
cacc: BeamConfig.Beam.Physsim.Jdeqsim.Cacc
)
object Jdeqsim {
case class AgentSimPhysSimInterfaceDebugger(
enabled: scala.Boolean
)
object AgentSimPhysSimInterfaceDebugger {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Physsim.Jdeqsim.AgentSimPhysSimInterfaceDebugger = {
BeamConfig.Beam.Physsim.Jdeqsim.AgentSimPhysSimInterfaceDebugger(
enabled = c.hasPathOrNull("enabled") && c.getBoolean("enabled")
)
}
}
case class Cacc(
capacityPlansWriteInterval: scala.Int,
enabled: scala.Boolean,
minRoadCapacity: scala.Int,
minSpeedMetersPerSec: scala.Int,
speedAdjustmentFactor: scala.Double
)
object Cacc {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Physsim.Jdeqsim.Cacc = {
BeamConfig.Beam.Physsim.Jdeqsim.Cacc(
capacityPlansWriteInterval =
if (c.hasPathOrNull("capacityPlansWriteInterval")) c.getInt("capacityPlansWriteInterval") else 0,
enabled = c.hasPathOrNull("enabled") && c.getBoolean("enabled"),
minRoadCapacity = if (c.hasPathOrNull("minRoadCapacity")) c.getInt("minRoadCapacity") else 2000,
minSpeedMetersPerSec =
if (c.hasPathOrNull("minSpeedMetersPerSec")) c.getInt("minSpeedMetersPerSec") else 20,
speedAdjustmentFactor =
if (c.hasPathOrNull("speedAdjustmentFactor")) c.getDouble("speedAdjustmentFactor") else 1.0
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Physsim.Jdeqsim = {
BeamConfig.Beam.Physsim.Jdeqsim(
agentSimPhysSimInterfaceDebugger = BeamConfig.Beam.Physsim.Jdeqsim.AgentSimPhysSimInterfaceDebugger(
if (c.hasPathOrNull("agentSimPhysSimInterfaceDebugger")) c.getConfig("agentSimPhysSimInterfaceDebugger")
else com.typesafe.config.ConfigFactory.parseString("agentSimPhysSimInterfaceDebugger{}")
),
cacc = BeamConfig.Beam.Physsim.Jdeqsim.Cacc(
if (c.hasPathOrNull("cacc")) c.getConfig("cacc")
else com.typesafe.config.ConfigFactory.parseString("cacc{}")
)
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Physsim = {
BeamConfig.Beam.Physsim(
eventsForFullVersionOfVia = !c.hasPathOrNull("eventsForFullVersionOfVia") || c.getBoolean(
"eventsForFullVersionOfVia"
),
eventsSampling = if (c.hasPathOrNull("eventsSampling")) c.getDouble("eventsSampling") else 1.0,
flowCapacityFactor = if (c.hasPathOrNull("flowCapacityFactor")) c.getDouble("flowCapacityFactor") else 1.0,
initializeRouterWithFreeFlowTimes = !c.hasPathOrNull("initializeRouterWithFreeFlowTimes") || c.getBoolean(
"initializeRouterWithFreeFlowTimes"
),
inputNetworkFilePath =
if (c.hasPathOrNull("inputNetworkFilePath")) c.getString("inputNetworkFilePath")
else "/test/input/beamville/r5/physsim-network.xml",
jdeqsim = BeamConfig.Beam.Physsim.Jdeqsim(
if (c.hasPathOrNull("jdeqsim")) c.getConfig("jdeqsim")
else com.typesafe.config.ConfigFactory.parseString("jdeqsim{}")
),
linkStatsBinSize = if (c.hasPathOrNull("linkStatsBinSize")) c.getInt("linkStatsBinSize") else 3600,
linkStatsWriteInterval =
if (c.hasPathOrNull("linkStatsWriteInterval")) c.getInt("linkStatsWriteInterval") else 0,
ptSampleSize = if (c.hasPathOrNull("ptSampleSize")) c.getDouble("ptSampleSize") else 1.0,
quick_fix_minCarSpeedInMetersPerSecond =
if (c.hasPathOrNull("quick_fix_minCarSpeedInMetersPerSecond"))
c.getDouble("quick_fix_minCarSpeedInMetersPerSecond")
else 0.5,
skipPhysSim = c.hasPathOrNull("skipPhysSim") && c.getBoolean("skipPhysSim"),
storageCapacityFactor =
if (c.hasPathOrNull("storageCapacityFactor")) c.getDouble("storageCapacityFactor") else 1.0,
writeEventsInterval = if (c.hasPathOrNull("writeEventsInterval")) c.getInt("writeEventsInterval") else 0,
writeMATSimNetwork = !c.hasPathOrNull("writeMATSimNetwork") || c.getBoolean("writeMATSimNetwork"),
writePlansInterval = if (c.hasPathOrNull("writePlansInterval")) c.getInt("writePlansInterval") else 0
)
}
}
case class Replanning(
ModuleProbability_1: scala.Double,
ModuleProbability_2: scala.Double,
ModuleProbability_3: scala.Double,
ModuleProbability_4: scala.Int,
Module_1: java.lang.String,
Module_2: java.lang.String,
Module_3: java.lang.String,
Module_4: java.lang.String,
fractionOfIterationsToDisableInnovation: scala.Double,
maxAgentPlanMemorySize: scala.Int
)
object Replanning {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Replanning = {
BeamConfig.Beam.Replanning(
ModuleProbability_1 = if (c.hasPathOrNull("ModuleProbability_1")) c.getDouble("ModuleProbability_1") else 0.8,
ModuleProbability_2 = if (c.hasPathOrNull("ModuleProbability_2")) c.getDouble("ModuleProbability_2") else 0.1,
ModuleProbability_3 = if (c.hasPathOrNull("ModuleProbability_3")) c.getDouble("ModuleProbability_3") else 0.1,
ModuleProbability_4 = if (c.hasPathOrNull("ModuleProbability_4")) c.getInt("ModuleProbability_4") else 0,
Module_1 = if (c.hasPathOrNull("Module_1")) c.getString("Module_1") else "SelectExpBeta",
Module_2 = if (c.hasPathOrNull("Module_2")) c.getString("Module_2") else "ClearRoutes",
Module_3 = if (c.hasPathOrNull("Module_3")) c.getString("Module_3") else "ClearModes",
Module_4 = if (c.hasPathOrNull("Module_4")) c.getString("Module_4") else "TimeMutator",
fractionOfIterationsToDisableInnovation =
if (c.hasPathOrNull("fractionOfIterationsToDisableInnovation"))
c.getDouble("fractionOfIterationsToDisableInnovation")
else Double.PositiveInfinity,
maxAgentPlanMemorySize =
if (c.hasPathOrNull("maxAgentPlanMemorySize")) c.getInt("maxAgentPlanMemorySize") else 5
)
}
}
case class Routing(
baseDate: java.lang.String,
r5: BeamConfig.Beam.Routing.R5,
startingIterationForTravelTimesMSA: scala.Int,
transitOnStreetNetwork: scala.Boolean
)
object Routing {
case class R5(
departureWindow: scala.Double,
directory: java.lang.String,
mNetBuilder: BeamConfig.Beam.Routing.R5.MNetBuilder,
numberOfSamples: scala.Int,
osmFile: java.lang.String,
osmMapdbFile: java.lang.String
)
object R5 {
case class MNetBuilder(
fromCRS: java.lang.String,
toCRS: java.lang.String
)
object MNetBuilder {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Routing.R5.MNetBuilder = {
BeamConfig.Beam.Routing.R5.MNetBuilder(
fromCRS = if (c.hasPathOrNull("fromCRS")) c.getString("fromCRS") else "EPSG:4326",
toCRS = if (c.hasPathOrNull("toCRS")) c.getString("toCRS") else "EPSG:26910"
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Routing.R5 = {
BeamConfig.Beam.Routing.R5(
departureWindow = if (c.hasPathOrNull("departureWindow")) c.getDouble("departureWindow") else 15.0,
directory = if (c.hasPathOrNull("directory")) c.getString("directory") else "/test/input/beamville/r5",
mNetBuilder = BeamConfig.Beam.Routing.R5.MNetBuilder(
if (c.hasPathOrNull("mNetBuilder")) c.getConfig("mNetBuilder")
else com.typesafe.config.ConfigFactory.parseString("mNetBuilder{}")
),
numberOfSamples = if (c.hasPathOrNull("numberOfSamples")) c.getInt("numberOfSamples") else 1,
osmFile =
if (c.hasPathOrNull("osmFile")) c.getString("osmFile") else "/test/input/beamville/r5/beamville.osm.pbf",
osmMapdbFile =
if (c.hasPathOrNull("osmMapdbFile")) c.getString("osmMapdbFile") else "/test/input/beamville/r5/osm.mapdb"
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Routing = {
BeamConfig.Beam.Routing(
baseDate = if (c.hasPathOrNull("baseDate")) c.getString("baseDate") else "2016-10-17T00:00:00-07:00",
r5 = BeamConfig.Beam.Routing.R5(
if (c.hasPathOrNull("r5")) c.getConfig("r5") else com.typesafe.config.ConfigFactory.parseString("r5{}")
),
startingIterationForTravelTimesMSA =
if (c.hasPathOrNull("startingIterationForTravelTimesMSA")) c.getInt("startingIterationForTravelTimesMSA")
else 0,
transitOnStreetNetwork = !c.hasPathOrNull("transitOnStreetNetwork") || c.getBoolean("transitOnStreetNetwork")
)
}
}
case class Spatial(
boundingBoxBuffer: scala.Int,
localCRS: java.lang.String
)
object Spatial {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.Spatial = {
BeamConfig.Beam.Spatial(
boundingBoxBuffer = if (c.hasPathOrNull("boundingBoxBuffer")) c.getInt("boundingBoxBuffer") else 5000,
localCRS = if (c.hasPathOrNull("localCRS")) c.getString("localCRS") else "epsg:32631"
)
}
}
case class WarmStart(
enabled: scala.Boolean,
path: java.lang.String
)
object WarmStart {
def apply(c: com.typesafe.config.Config): BeamConfig.Beam.WarmStart = {
BeamConfig.Beam.WarmStart(
enabled = c.hasPathOrNull("enabled") && c.getBoolean("enabled"),
path = if (c.hasPathOrNull("path")) c.getString("path") else ""
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Beam = {
BeamConfig.Beam(
agentsim = BeamConfig.Beam.Agentsim(
if (c.hasPathOrNull("agentsim")) c.getConfig("agentsim")
else com.typesafe.config.ConfigFactory.parseString("agentsim{}")
),
calibration = BeamConfig.Beam.Calibration(
if (c.hasPathOrNull("calibration")) c.getConfig("calibration")
else com.typesafe.config.ConfigFactory.parseString("calibration{}")
),
cluster = BeamConfig.Beam.Cluster(
if (c.hasPathOrNull("cluster")) c.getConfig("cluster")
else com.typesafe.config.ConfigFactory.parseString("cluster{}")
),
debug = BeamConfig.Beam.Debug(
if (c.hasPathOrNull("debug")) c.getConfig("debug")
else com.typesafe.config.ConfigFactory.parseString("debug{}")
),
exchange = BeamConfig.Beam.Exchange(
if (c.hasPathOrNull("exchange")) c.getConfig("exchange")
else com.typesafe.config.ConfigFactory.parseString("exchange{}")
),
experimental = BeamConfig.Beam.Experimental(
if (c.hasPathOrNull("experimental")) c.getConfig("experimental")
else com.typesafe.config.ConfigFactory.parseString("experimental{}")
),
inputDirectory =
if (c.hasPathOrNull("inputDirectory")) c.getString("inputDirectory") else "/test/input/beamville",
logger = BeamConfig.Beam.Logger(
if (c.hasPathOrNull("logger")) c.getConfig("logger")
else com.typesafe.config.ConfigFactory.parseString("logger{}")
),
metrics = BeamConfig.Beam.Metrics(
if (c.hasPathOrNull("metrics")) c.getConfig("metrics")
else com.typesafe.config.ConfigFactory.parseString("metrics{}")
),
outputs = BeamConfig.Beam.Outputs(
if (c.hasPathOrNull("outputs")) c.getConfig("outputs")
else com.typesafe.config.ConfigFactory.parseString("outputs{}")
),
physsim = BeamConfig.Beam.Physsim(
if (c.hasPathOrNull("physsim")) c.getConfig("physsim")
else com.typesafe.config.ConfigFactory.parseString("physsim{}")
),
replanning = BeamConfig.Beam.Replanning(
if (c.hasPathOrNull("replanning")) c.getConfig("replanning")
else com.typesafe.config.ConfigFactory.parseString("replanning{}")
),
routing = BeamConfig.Beam.Routing(
if (c.hasPathOrNull("routing")) c.getConfig("routing")
else com.typesafe.config.ConfigFactory.parseString("routing{}")
),
spatial = BeamConfig.Beam.Spatial(
if (c.hasPathOrNull("spatial")) c.getConfig("spatial")
else com.typesafe.config.ConfigFactory.parseString("spatial{}")
),
useLocalWorker = !c.hasPathOrNull("useLocalWorker") || c.getBoolean("useLocalWorker"),
warmStart = BeamConfig.Beam.WarmStart(
if (c.hasPathOrNull("warmStart")) c.getConfig("warmStart")
else com.typesafe.config.ConfigFactory.parseString("warmStart{}")
)
)
}
}
case class Matsim(
conversion: BeamConfig.Matsim.Conversion,
modules: BeamConfig.Matsim.Modules
)
object Matsim {
case class Conversion(
defaultHouseholdIncome: BeamConfig.Matsim.Conversion.DefaultHouseholdIncome,
generateVehicles: scala.Boolean,
matsimNetworkFile: java.lang.String,
osmFile: java.lang.String,
populationFile: java.lang.String,
scenarioDirectory: java.lang.String,
shapeConfig: BeamConfig.Matsim.Conversion.ShapeConfig,
vehiclesFile: java.lang.String
)
object Conversion {
case class DefaultHouseholdIncome(
currency: java.lang.String,
period: java.lang.String,
value: scala.Int
)
object DefaultHouseholdIncome {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Conversion.DefaultHouseholdIncome = {
BeamConfig.Matsim.Conversion.DefaultHouseholdIncome(
currency = if (c.hasPathOrNull("currency")) c.getString("currency") else "usd",
period = if (c.hasPathOrNull("period")) c.getString("period") else "year",
value = if (c.hasPathOrNull("value")) c.getInt("value") else 50000
)
}
}
case class ShapeConfig(
shapeFile: java.lang.String,
tazIdFieldName: java.lang.String
)
object ShapeConfig {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Conversion.ShapeConfig = {
BeamConfig.Matsim.Conversion.ShapeConfig(
shapeFile = if (c.hasPathOrNull("shapeFile")) c.getString("shapeFile") else "tz46_d00.shp",
tazIdFieldName = if (c.hasPathOrNull("tazIdFieldName")) c.getString("tazIdFieldName") else "TZ46_D00_I"
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Conversion = {
BeamConfig.Matsim.Conversion(
defaultHouseholdIncome = BeamConfig.Matsim.Conversion.DefaultHouseholdIncome(
if (c.hasPathOrNull("defaultHouseholdIncome")) c.getConfig("defaultHouseholdIncome")
else com.typesafe.config.ConfigFactory.parseString("defaultHouseholdIncome{}")
),
generateVehicles = !c.hasPathOrNull("generateVehicles") || c.getBoolean("generateVehicles"),
matsimNetworkFile =
if (c.hasPathOrNull("matsimNetworkFile")) c.getString("matsimNetworkFile") else "Siouxfalls_network_PT.xml",
osmFile = if (c.hasPathOrNull("osmFile")) c.getString("osmFile") else "south-dakota-latest.osm.pbf",
populationFile =
if (c.hasPathOrNull("populationFile")) c.getString("populationFile") else "Siouxfalls_population.xml",
scenarioDirectory =
if (c.hasPathOrNull("scenarioDirectory")) c.getString("scenarioDirectory")
else "/path/to/scenario/directory",
shapeConfig = BeamConfig.Matsim.Conversion.ShapeConfig(
if (c.hasPathOrNull("shapeConfig")) c.getConfig("shapeConfig")
else com.typesafe.config.ConfigFactory.parseString("shapeConfig{}")
),
vehiclesFile = if (c.hasPathOrNull("vehiclesFile")) c.getString("vehiclesFile") else "Siouxfalls_vehicles.xml"
)
}
}
case class Modules(
changeMode: BeamConfig.Matsim.Modules.ChangeMode,
controler: BeamConfig.Matsim.Modules.Controler,
counts: BeamConfig.Matsim.Modules.Counts,
global: BeamConfig.Matsim.Modules.Global,
households: BeamConfig.Matsim.Modules.Households,
network: BeamConfig.Matsim.Modules.Network,
parallelEventHandling: BeamConfig.Matsim.Modules.ParallelEventHandling,
planCalcScore: BeamConfig.Matsim.Modules.PlanCalcScore,
plans: BeamConfig.Matsim.Modules.Plans,
qsim: BeamConfig.Matsim.Modules.Qsim,
strategy: BeamConfig.Matsim.Modules.Strategy,
transit: BeamConfig.Matsim.Modules.Transit,
vehicles: BeamConfig.Matsim.Modules.Vehicles
)
object Modules {
case class ChangeMode(
modes: java.lang.String
)
object ChangeMode {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.ChangeMode = {
BeamConfig.Matsim.Modules.ChangeMode(
modes = if (c.hasPathOrNull("modes")) c.getString("modes") else "car,pt"
)
}
}
case class Controler(
eventsFileFormat: java.lang.String,
firstIteration: scala.Int,
lastIteration: scala.Int,
mobsim: java.lang.String,
outputDirectory: java.lang.String,
overwriteFiles: java.lang.String
)
object Controler {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.Controler = {
BeamConfig.Matsim.Modules.Controler(
eventsFileFormat = if (c.hasPathOrNull("eventsFileFormat")) c.getString("eventsFileFormat") else "xml",
firstIteration = if (c.hasPathOrNull("firstIteration")) c.getInt("firstIteration") else 0,
lastIteration = if (c.hasPathOrNull("lastIteration")) c.getInt("lastIteration") else 0,
mobsim = if (c.hasPathOrNull("mobsim")) c.getString("mobsim") else "metasim",
outputDirectory = if (c.hasPathOrNull("outputDirectory")) c.getString("outputDirectory") else "",
overwriteFiles =
if (c.hasPathOrNull("overwriteFiles")) c.getString("overwriteFiles") else "overwriteExistingFiles"
)
}
}
case class Counts(
averageCountsOverIterations: scala.Int,
countsScaleFactor: scala.Double,
inputCountsFile: java.lang.String,
outputformat: java.lang.String,
writeCountsInterval: scala.Int
)
object Counts {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.Counts = {
BeamConfig.Matsim.Modules.Counts(
averageCountsOverIterations =
if (c.hasPathOrNull("averageCountsOverIterations")) c.getInt("averageCountsOverIterations") else 0,
countsScaleFactor = if (c.hasPathOrNull("countsScaleFactor")) c.getDouble("countsScaleFactor") else 10.355,
inputCountsFile = if (c.hasPathOrNull("inputCountsFile")) c.getString("inputCountsFile") else "",
outputformat = if (c.hasPathOrNull("outputformat")) c.getString("outputformat") else "all",
writeCountsInterval = if (c.hasPathOrNull("writeCountsInterval")) c.getInt("writeCountsInterval") else 0
)
}
}
case class Global(
coordinateSystem: java.lang.String,
randomSeed: scala.Int
)
object Global {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.Global = {
BeamConfig.Matsim.Modules.Global(
coordinateSystem = if (c.hasPathOrNull("coordinateSystem")) c.getString("coordinateSystem") else "Atlantis",
randomSeed = if (c.hasPathOrNull("randomSeed")) c.getInt("randomSeed") else 4711
)
}
}
case class Households(
inputFile: java.lang.String,
inputHouseholdAttributesFile: java.lang.String
)
object Households {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.Households = {
BeamConfig.Matsim.Modules.Households(
inputFile =
if (c.hasPathOrNull("inputFile")) c.getString("inputFile") else "/test/input/beamville/households.xml",
inputHouseholdAttributesFile =
if (c.hasPathOrNull("inputHouseholdAttributesFile")) c.getString("inputHouseholdAttributesFile")
else "/test/input/beamville/householdAttributes.xml"
)
}
}
case class Network(
inputNetworkFile: java.lang.String
)
object Network {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.Network = {
BeamConfig.Matsim.Modules.Network(
inputNetworkFile =
if (c.hasPathOrNull("inputNetworkFile")) c.getString("inputNetworkFile")
else "/test/input/beamville/physsim-network.xml"
)
}
}
case class ParallelEventHandling(
estimatedNumberOfEvents: scala.Int,
numberOfThreads: scala.Int,
oneThreadPerHandler: scala.Boolean,
synchronizeOnSimSteps: scala.Boolean
)
object ParallelEventHandling {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.ParallelEventHandling = {
BeamConfig.Matsim.Modules.ParallelEventHandling(
estimatedNumberOfEvents =
if (c.hasPathOrNull("estimatedNumberOfEvents")) c.getInt("estimatedNumberOfEvents") else 1000000000,
numberOfThreads = if (c.hasPathOrNull("numberOfThreads")) c.getInt("numberOfThreads") else 1,
oneThreadPerHandler = c.hasPathOrNull("oneThreadPerHandler") && c.getBoolean("oneThreadPerHandler"),
synchronizeOnSimSteps = c.hasPathOrNull("synchronizeOnSimSteps") && c.getBoolean("synchronizeOnSimSteps")
)
}
}
case class PlanCalcScore(
BrainExpBeta: scala.Long,
earlyDeparture: scala.Long,
lateArrival: scala.Long,
learningRate: scala.Long,
parameterset: scala.List[BeamConfig.Matsim.Modules.PlanCalcScore.Parameterset$Elm],
performing: scala.Long,
traveling: scala.Long,
waiting: scala.Long,
writeExperiencedPlans: scala.Boolean
)
object PlanCalcScore {
case class Parameterset$Elm(
activityType: java.lang.String,
priority: scala.Int,
scoringThisActivityAtAll: scala.Boolean,
`type`: java.lang.String,
typicalDuration: java.lang.String,
typicalDurationScoreComputation: java.lang.String
)
object Parameterset$Elm {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.PlanCalcScore.Parameterset$Elm = {
BeamConfig.Matsim.Modules.PlanCalcScore.Parameterset$Elm(
activityType = if (c.hasPathOrNull("activityType")) c.getString("activityType") else "Home",
priority = if (c.hasPathOrNull("priority")) c.getInt("priority") else 1,
scoringThisActivityAtAll = !c.hasPathOrNull("scoringThisActivityAtAll") || c.getBoolean(
"scoringThisActivityAtAll"
),
`type` = if (c.hasPathOrNull("type")) c.getString("type") else "activityParams",
typicalDuration = if (c.hasPathOrNull("typicalDuration")) c.getString("typicalDuration") else "01:00:00",
typicalDurationScoreComputation =
if (c.hasPathOrNull("typicalDurationScoreComputation")) c.getString("typicalDurationScoreComputation")
else "uniform"
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.PlanCalcScore = {
BeamConfig.Matsim.Modules.PlanCalcScore(
BrainExpBeta =
if (c.hasPathOrNull("BrainExpBeta"))
c.getDuration("BrainExpBeta", java.util.concurrent.TimeUnit.MILLISECONDS)
else 2,
earlyDeparture =
if (c.hasPathOrNull("earlyDeparture"))
c.getDuration("earlyDeparture", java.util.concurrent.TimeUnit.MILLISECONDS)
else 0,
lateArrival =
if (c.hasPathOrNull("lateArrival"))
c.getDuration("lateArrival", java.util.concurrent.TimeUnit.MILLISECONDS)
else -18,
learningRate =
if (c.hasPathOrNull("learningRate"))
c.getDuration("learningRate", java.util.concurrent.TimeUnit.MILLISECONDS)
else 1,
parameterset = $_LBeamConfig_Matsim_Modules_PlanCalcScore_Parameterset$Elm(c.getList("parameterset")),
performing =
if (c.hasPathOrNull("performing")) c.getDuration("performing", java.util.concurrent.TimeUnit.MILLISECONDS)
else 6,
traveling =
if (c.hasPathOrNull("traveling")) c.getDuration("traveling", java.util.concurrent.TimeUnit.MILLISECONDS)
else -6,
waiting =
if (c.hasPathOrNull("waiting")) c.getDuration("waiting", java.util.concurrent.TimeUnit.MILLISECONDS)
else 0,
writeExperiencedPlans = !c.hasPathOrNull("writeExperiencedPlans") || c.getBoolean("writeExperiencedPlans")
)
}
private def $_LBeamConfig_Matsim_Modules_PlanCalcScore_Parameterset$Elm(
cl: com.typesafe.config.ConfigList
): scala.List[BeamConfig.Matsim.Modules.PlanCalcScore.Parameterset$Elm] = {
import scala.collection.JavaConverters._
cl.asScala
.map(
cv =>
BeamConfig.Matsim.Modules.PlanCalcScore
.Parameterset$Elm(cv.asInstanceOf[com.typesafe.config.ConfigObject].toConfig)
)
.toList
}
}
case class Plans(
inputPersonAttributesFile: java.lang.String,
inputPlansFile: java.lang.String
)
object Plans {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.Plans = {
BeamConfig.Matsim.Modules.Plans(
inputPersonAttributesFile =
if (c.hasPathOrNull("inputPersonAttributesFile")) c.getString("inputPersonAttributesFile")
else "/test/input/beamville/populationAttributes.xml",
inputPlansFile =
if (c.hasPathOrNull("inputPlansFile")) c.getString("inputPlansFile")
else "/test/input/beamville/population.xml"
)
}
}
case class Qsim(
endTime: java.lang.String,
snapshotperiod: java.lang.String,
startTime: java.lang.String
)
object Qsim {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.Qsim = {
BeamConfig.Matsim.Modules.Qsim(
endTime = if (c.hasPathOrNull("endTime")) c.getString("endTime") else "30:00:00",
snapshotperiod = if (c.hasPathOrNull("snapshotperiod")) c.getString("snapshotperiod") else "00:00:00",
startTime = if (c.hasPathOrNull("startTime")) c.getString("startTime") else "00:00:00"
)
}
}
case class Strategy(
ModuleProbability_1: scala.Int,
ModuleProbability_2: scala.Int,
ModuleProbability_3: scala.Int,
ModuleProbability_4: scala.Int,
Module_1: java.lang.String,
Module_2: java.lang.String,
Module_3: java.lang.String,
Module_4: java.lang.String,
fractionOfIterationsToDisableInnovation: scala.Int,
maxAgentPlanMemorySize: scala.Int,
parameterset: scala.List[BeamConfig.Matsim.Modules.Strategy.Parameterset$Elm],
planSelectorForRemoval: java.lang.String
)
object Strategy {
case class Parameterset$Elm(
disableAfterIteration: scala.Int,
strategyName: java.lang.String,
`type`: java.lang.String,
weight: scala.Int
)
object Parameterset$Elm {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.Strategy.Parameterset$Elm = {
BeamConfig.Matsim.Modules.Strategy.Parameterset$Elm(
disableAfterIteration =
if (c.hasPathOrNull("disableAfterIteration")) c.getInt("disableAfterIteration") else -1,
strategyName = if (c.hasPathOrNull("strategyName")) c.getString("strategyName") else "",
`type` = if (c.hasPathOrNull("type")) c.getString("type") else "strategysettings",
weight = if (c.hasPathOrNull("weight")) c.getInt("weight") else 0
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.Strategy = {
BeamConfig.Matsim.Modules.Strategy(
ModuleProbability_1 = if (c.hasPathOrNull("ModuleProbability_1")) c.getInt("ModuleProbability_1") else 0,
ModuleProbability_2 = if (c.hasPathOrNull("ModuleProbability_2")) c.getInt("ModuleProbability_2") else 0,
ModuleProbability_3 = if (c.hasPathOrNull("ModuleProbability_3")) c.getInt("ModuleProbability_3") else 0,
ModuleProbability_4 = if (c.hasPathOrNull("ModuleProbability_4")) c.getInt("ModuleProbability_4") else 0,
Module_1 = if (c.hasPathOrNull("Module_1")) c.getString("Module_1") else "",
Module_2 = if (c.hasPathOrNull("Module_2")) c.getString("Module_2") else "",
Module_3 = if (c.hasPathOrNull("Module_3")) c.getString("Module_3") else "",
Module_4 = if (c.hasPathOrNull("Module_4")) c.getString("Module_4") else "",
fractionOfIterationsToDisableInnovation =
if (c.hasPathOrNull("fractionOfIterationsToDisableInnovation"))
c.getInt("fractionOfIterationsToDisableInnovation")
else 999999,
maxAgentPlanMemorySize =
if (c.hasPathOrNull("maxAgentPlanMemorySize")) c.getInt("maxAgentPlanMemorySize") else 5,
parameterset = $_LBeamConfig_Matsim_Modules_Strategy_Parameterset$Elm(c.getList("parameterset")),
planSelectorForRemoval =
if (c.hasPathOrNull("planSelectorForRemoval")) c.getString("planSelectorForRemoval")
else "WorstPlanForRemovalSelector"
)
}
private def $_LBeamConfig_Matsim_Modules_Strategy_Parameterset$Elm(
cl: com.typesafe.config.ConfigList
): scala.List[BeamConfig.Matsim.Modules.Strategy.Parameterset$Elm] = {
import scala.collection.JavaConverters._
cl.asScala
.map(
cv =>
BeamConfig.Matsim.Modules.Strategy
.Parameterset$Elm(cv.asInstanceOf[com.typesafe.config.ConfigObject].toConfig)
)
.toList
}
}
case class Transit(
transitModes: java.lang.String,
useTransit: scala.Boolean,
vehiclesFile: java.lang.String
)
object Transit {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.Transit = {
BeamConfig.Matsim.Modules.Transit(
transitModes = if (c.hasPathOrNull("transitModes")) c.getString("transitModes") else "pt",
useTransit = c.hasPathOrNull("useTransit") && c.getBoolean("useTransit"),
vehiclesFile = if (c.hasPathOrNull("vehiclesFile")) c.getString("vehiclesFile") else ""
)
}
}
case class Vehicles(
vehiclesFile: java.lang.String
)
object Vehicles {
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules.Vehicles = {
BeamConfig.Matsim.Modules.Vehicles(
vehiclesFile = if (c.hasPathOrNull("vehiclesFile")) c.getString("vehiclesFile") else ""
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim.Modules = {
BeamConfig.Matsim.Modules(
changeMode = BeamConfig.Matsim.Modules.ChangeMode(
if (c.hasPathOrNull("changeMode")) c.getConfig("changeMode")
else com.typesafe.config.ConfigFactory.parseString("changeMode{}")
),
controler = BeamConfig.Matsim.Modules.Controler(
if (c.hasPathOrNull("controler")) c.getConfig("controler")
else com.typesafe.config.ConfigFactory.parseString("controler{}")
),
counts = BeamConfig.Matsim.Modules.Counts(
if (c.hasPathOrNull("counts")) c.getConfig("counts")
else com.typesafe.config.ConfigFactory.parseString("counts{}")
),
global = BeamConfig.Matsim.Modules.Global(
if (c.hasPathOrNull("global")) c.getConfig("global")
else com.typesafe.config.ConfigFactory.parseString("global{}")
),
households = BeamConfig.Matsim.Modules.Households(
if (c.hasPathOrNull("households")) c.getConfig("households")
else com.typesafe.config.ConfigFactory.parseString("households{}")
),
network = BeamConfig.Matsim.Modules.Network(
if (c.hasPathOrNull("network")) c.getConfig("network")
else com.typesafe.config.ConfigFactory.parseString("network{}")
),
parallelEventHandling = BeamConfig.Matsim.Modules.ParallelEventHandling(
if (c.hasPathOrNull("parallelEventHandling")) c.getConfig("parallelEventHandling")
else com.typesafe.config.ConfigFactory.parseString("parallelEventHandling{}")
),
planCalcScore = BeamConfig.Matsim.Modules.PlanCalcScore(
if (c.hasPathOrNull("planCalcScore")) c.getConfig("planCalcScore")
else com.typesafe.config.ConfigFactory.parseString("planCalcScore{}")
),
plans = BeamConfig.Matsim.Modules.Plans(
if (c.hasPathOrNull("plans")) c.getConfig("plans")
else com.typesafe.config.ConfigFactory.parseString("plans{}")
),
qsim = BeamConfig.Matsim.Modules.Qsim(
if (c.hasPathOrNull("qsim")) c.getConfig("qsim")
else com.typesafe.config.ConfigFactory.parseString("qsim{}")
),
strategy = BeamConfig.Matsim.Modules.Strategy(
if (c.hasPathOrNull("strategy")) c.getConfig("strategy")
else com.typesafe.config.ConfigFactory.parseString("strategy{}")
),
transit = BeamConfig.Matsim.Modules.Transit(
if (c.hasPathOrNull("transit")) c.getConfig("transit")
else com.typesafe.config.ConfigFactory.parseString("transit{}")
),
vehicles = BeamConfig.Matsim.Modules.Vehicles(
if (c.hasPathOrNull("vehicles")) c.getConfig("vehicles")
else com.typesafe.config.ConfigFactory.parseString("vehicles{}")
)
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig.Matsim = {
BeamConfig.Matsim(
conversion = BeamConfig.Matsim.Conversion(
if (c.hasPathOrNull("conversion")) c.getConfig("conversion")
else com.typesafe.config.ConfigFactory.parseString("conversion{}")
),
modules = BeamConfig.Matsim.Modules(
if (c.hasPathOrNull("modules")) c.getConfig("modules")
else com.typesafe.config.ConfigFactory.parseString("modules{}")
)
)
}
}
def apply(c: com.typesafe.config.Config): BeamConfig = {
BeamConfig(
beam = BeamConfig.Beam(
if (c.hasPathOrNull("beam")) c.getConfig("beam") else com.typesafe.config.ConfigFactory.parseString("beam{}")
),
matsim = BeamConfig.Matsim(
if (c.hasPathOrNull("matsim")) c.getConfig("matsim")
else com.typesafe.config.ConfigFactory.parseString("matsim{}")
)
)
}
}
| colinsheppard/beam | src/main/scala/beam/sim/config/BeamConfig.scala | Scala | gpl-3.0 | 122,487 |
package com.tpl.lib.gui
import net.bdew.lib.gui.{Texture, WidgetContainer, Rect, Color}
import net.bdew.lib.gui.widgets.WidgetSubcontainer
class WidgetPlayerInventory(x: Int, y: Int, texture: Texture, invColor: Color = Color.white, hotbarColor: Color = Color.white, hotbarSpace: Int = 4, cellSize: Int = 18)
extends WidgetSubcontainer(new Rect(x,y,(3*cellSize)+hotbarSpace+18,9*cellSize)) with WidgetContainer {
(0 to 2).foreach(row => {
(0 to 8).foreach(col => {
add(new WidgetInventorySlot((col*cellSize), (row*cellSize), texture, invColor))
})
})
val hotbarY = (3*cellSize)+hotbarSpace
(0 to 8).foreach(col => {
add(new WidgetInventorySlot((col*cellSize), hotbarY, texture, hotbarColor))
})
}
| piotrb/hamcraft | src/main/scala/com/tpl/lib/gui/WidgetPlayerInventory.scala | Scala | bsd-2-clause | 730 |
/*
* Copyright 2011 Goldman Sachs.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.gs.collections.impl
import factory.Lists
class EmptyListScalaTest extends EmptyIterableTestTrait
{
val classUnderTest = Lists.fixedSize.of[String]
}
| jlz27/gs-collections | scala-unit-tests/src/test/scala/com/gs/collections/impl/EmptyListScalaTest.scala | Scala | apache-2.0 | 766 |
// Example: Hello CRON scheduling!
// This a minimal cuttle project demonstrating the CRON scheduler
package com.criteo.cuttle.examples
// The main package contains everything needed to create
// a cuttle project.
import com.criteo.cuttle._
import com.criteo.cuttle.cron.Implicits._
import com.criteo.cuttle.cron.{CronProject, CronScheduler, CronScheduling, CronWorkload}
import scala.io.Source
// The local platform allows to locally fork some processes
// (_here we will just fork shell scripts_).
import com.criteo.cuttle.platforms.local._
import scala.concurrent.Future
object HelloCronScheduling {
// A cuttle project is just embedded into any Scala application.
def main(args: Array[String]): Unit = {
// We are going to:
// 1. Each 10 seconds call https://api.coinmarketcap.com to get the last available price of Bitcoin
// and save this price to a file on file system.
// 2. Each 10 seconds comptute the average of last 3 saved prices.
// If we have less than 3 prices our job is going to fail.
val fileName = "price.log"
// __Now let's define our first cron job!__
val tickerJob = Job(
id = "ticker_job",
// We define out schedule by a simple Cron expression, that is parsed with cron4s library.
// For more documentation see https://github.com/alonsodomin/cron4s.
// 1 corresponds to the maximum number of retries that we allow for this job
scheduling = CronScheduling(1),
name = "Ticker Job",
description = "Get ticker for Bitcoin price from CoinMarketCap"
) {
// The side effect function takes the execution as parameter. The execution
// contains useful meta data as well as the __context__ which is basically the
// input data for our execution.
// In our case the context contains the scheduling date and the retry number.
implicit e =>
// We can read execution parameters from the context.
val timestamp = e.context.instant
// We can output the information in execution streams that are persisted
// is a state DB.
e.streams.info(s"Launching the job ${e.job.id} at $timestamp")
// Now do some real work in BASH by calling CoinMarketCap API and processing the result with Python.
exec"""
bash -c 'curl https://api.coinmarketcap.com/v2/ticker/1/ | python -c "import sys, json; print(json.load(sys.stdin)[\\"data\\"][\\"quotes\\"][\\"USD\\"][\\"price\\"])" >> $fileName'
""" ()
}
// __Let's compute the average of 3 last Bitcoin prices, if we have less than 3 entries this job will fail
val avgJob =
Job(id = "avg_job", scheduling = CronScheduling(10), description = "Average Bitcoin price for last 3 value") {
implicit e =>
Future {
// We use plain old Scala APi to interact with file system.
val lines = Source.fromFile(fileName).getLines.toList
val last3Lines = lines.drop(lines.length - 3)
if (last3Lines.length < 3)
// Just throw an exception if you want to fail.
throw new UnsupportedOperationException("We have less than 3 values to compute the average!")
else {
// We compute the average, it can fail in some locales.
val avgPrice = last3Lines.map(_.toDouble).sum / 3
// We output some ASCII art just to make our message visible in the logs :)
e.streams.debug("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
e.streams.info(s"The average of last 3 Bitcoin prices is $avgPrice")
e.streams.debug("$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$$")
Completed
}
}
}
// Jobs are grouped in workload.
val workload = CronWorkload(Set(tickerJob.every("0-59/10 * * ? * *"), avgJob.every("0-59/10 * * ? * *")))
// Instantiate Cron scheduler with a default stdout logger which will be passed implicitly to Cron project.
implicit val scheduler = CronScheduler(logger)
// Project instantiation, it takes an implicit scheduler that we've just defined!
val project = CronProject(
name = "Hello Cron Scheduling Example",
version = "0.0.1",
description = "My first Cron with Cuttle project"
)(workload)
// __Finally start it!__
project.start()
}
}
| criteo/cuttle | examples/src/main/scala/com/criteo/cuttle/examples/HelloCronScheduling.scala | Scala | apache-2.0 | 4,368 |
package org.apache.toree
/**
* A wrapper used to workaround
* <a href="http://docs.scala-lang.org/overviews/reflection/thread-safety.html">thread safety issues</a> in Scala 2.10 reflection
* facilities.
* <p>Any interaction with scala.reflect must be performed only via the [[ReflectionAccessor.useReflection()]] method.</p>
* <p>As the issue is Scala 2.10-specific, in Scala 2.11 environment, this class has no "real" functionality.</p>
*/
object ReflectionAccessor {
/**
* Executes the specified code without any additional actions.
*
* @param code the code to be executed
*
* @return what the specified code returns
*/
def useReflection[T](code: => T): T = {
code
}
}
| Myllyenko/incubator-toree | plugins/src/main/scala-2.11/org/apache/toree/ReflectionAccessor.scala | Scala | apache-2.0 | 712 |
package info.mukel.telegrambot4s.api
import java.util.concurrent.Executors
import akka.actor.ActorSystem
import akka.http.scaladsl.Http
import akka.http.scaladsl.marshalling.Marshal
import akka.http.scaladsl.model._
import akka.http.scaladsl.unmarshalling.Unmarshal
import akka.stream.QueueOfferResult.Enqueued
import akka.stream.scaladsl.{Keep, Sink, Source}
import akka.stream.{Materializer, OverflowStrategy}
import com.typesafe.scalalogging.StrictLogging
import info.mukel.telegrambot4s.marshalling.HttpMarshalling
import info.mukel.telegrambot4s.methods.{ApiRequest, ApiResponse}
import scala.concurrent.{ExecutionContext, Future, Promise}
import scala.util.{Failure, Success, Try}
class HttpClientQueued(token: String)(implicit system: ActorSystem, materializer: Materializer) extends RequestHandler with StrictLogging {
import HttpMarshalling._
private implicit val ec = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(2))
private lazy val pool = Http().cachedHostConnectionPoolHttps[Promise[HttpResponse]](host = "api.telegram.org")
private lazy val queue = Source.queue[(ApiRequest[_], Promise[HttpResponse])](1000, OverflowStrategy.dropNew)
.mapAsync(4){ case (r, p) => toHttpRequest(r) map { (_ -> p)} }
.via(pool)
.toMat(Sink.foreach[(Try[HttpResponse], Promise[HttpResponse])]({
case ((Success(resp), p)) => p.success(resp)
case ((Failure(e), p)) => p.failure(e)
}))(Keep.left)
.run()
private def toHttpRequest[R](r: ApiRequest[R]): Future[HttpRequest] = {
Marshal(r).to[RequestEntity]
.map {
re =>
HttpRequest(HttpMethods.POST, Uri(s"/bot$token/" + r.methodName), entity = re)
}
}
private def toApiResponse[R: Manifest](httpResponse: HttpResponse): Future[ApiResponse[R]] = {
Unmarshal(httpResponse.entity).to[ApiResponse[R]]
}
/** Spawns a type-safe request.
*
* @param request
* @tparam R Request's expected result type
* @return The request result wrapped in a Future (async)
*/
override def apply[R: Manifest](request: ApiRequest[R]): Future[R] = {
val promise = Promise[HttpResponse]
val rp = request -> promise
val response = queue.offer((request, promise)).flatMap {
case Enqueued => promise.future.flatMap(r => toApiResponse[R](r))
case _ => Future.failed(new RuntimeException("Failed to send request, pending queue is full."))
}
response flatMap {
case ApiResponse(true, Some(result), _, _, _) =>
Future.successful(result)
case ApiResponse(false, _, description, Some(errorCode), parameters) =>
val e = TelegramApiException(description.getOrElse("Unexpected/invalid/empty response"), errorCode, None, parameters)
logger.error("Telegram API exception", e)
Future.failed(e)
case _ =>
val msg = "Error on request response"
logger.error(msg)
Future.failed(new RuntimeException(msg))
}
}
} | hugemane/telegrambot4s | src/main/scala/info/mukel/telegrambot4s/api/HttpClientQueued.scala | Scala | apache-2.0 | 2,955 |
/*
* Copyright 2016 The BigDL Authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.intel.analytics.bigdl.parameters
import java.nio.ByteBuffer
import com.intel.analytics.bigdl.tensor.Tensor
import scala.reflect._
class FP16SplitsCompressedTensor[T: ClassTag](buffers: Array[Array[Byte]], size: Int)
extends CompressedTensor[T] {
def this(tensor: Tensor[T], splitsNum: Int) {
this(new Array[Array[Byte]](splitsNum), tensor.nElement())
compress(tensor)
}
def this(length: Int, splitsNum: Int) {
this(new Array[Array[Byte]](splitsNum), length)
}
@inline
private def overlap(splitOffset: Int, splitLength: Int, offset: Int,
length: Int): Option[(Int, Int)] = {
if ((splitOffset > offset + length || splitOffset + splitLength < offset)) {
None
} else {
Some(math.max(offset - splitOffset, 0),
math.min(splitOffset + splitLength, offset + length) - math.max(splitOffset, offset))
}
}
override def compress(offset: Int, src: Tensor[T], srcOffset: Int,
length: Int): FP16SplitsCompressedTensor.this.type = {
require(src.isContiguous() && offset >= 0 && srcOffset >= 0 &&
srcOffset + length <= src.nElement())
require(offset + length <= size)
val tOffset = src.storageOffset() - 1 + srcOffset
val splitSize = size / buffers.length
val extraSize = size % buffers.length
var i = 0
while (i < buffers.length) {
val start = splitSize * i + math.min(extraSize, i)
val curLength = splitSize + (if (i < extraSize) 1 else 0)
overlap(start, curLength, offset, length) match {
case Some((splitOffset, overlapLength)) =>
if (buffers(i) == null) {
buffers(i) = new Array[Byte](curLength * 2)
}
if (classTag[T] == classTag[Double]) {
FP16CompressedTensor.toFP16(src.storage().array().asInstanceOf[Array[Double]],
tOffset + start, buffers(i), splitOffset, overlapLength)
} else if (classTag[T] == classTag[Float]) {
FP16CompressedTensor.toFP16(src.storage().array().asInstanceOf[Array[Float]],
tOffset + start, buffers(i), splitOffset, overlapLength)
} else {
throw new IllegalArgumentException
}
case _ =>
}
i += 1
}
this
}
override def compress(tensor: Tensor[T]): FP16SplitsCompressedTensor.this.type =
compress(0, tensor, 0, tensor.nElement())
override def deCompress(srcOffset: Int, tensor: Tensor[T], tgtOffset: Int, length: Int): Unit = {
require(srcOffset >= 0 && length > 0 && srcOffset + length <= size &&
tgtOffset >= 0 && tgtOffset + length <= tensor.nElement())
require(tensor.isContiguous())
val splitSize = size / buffers.length
val extraSize = size % buffers.length
var i = 0
while (i < buffers.length) {
val start = splitSize * i + math.min(extraSize, i)
val curLength = splitSize + (if (i < extraSize) 1 else 0)
overlap(start, curLength, srcOffset, length) match {
case Some((splitOffset, overlapLength)) =>
if (classTag[T] == classTag[Double]) {
val tdata = tensor.storage().array().asInstanceOf[Array[Double]]
val toffset = tensor.storageOffset() - 1 + tgtOffset
FP16CompressedTensor.fromFP16(buffers(i), splitOffset * 2, overlapLength * 2,
tdata, toffset + start)
} else if (classTag[T] == classTag[Float]) {
val tdata = tensor.storage().array().asInstanceOf[Array[Float]]
val toffset = tensor.storageOffset() - 1 + tgtOffset
FP16CompressedTensor.fromFP16(buffers(i), splitOffset * 2, overlapLength * 2,
tdata, toffset + start)
} else {
throw new IllegalArgumentException
}
case _ =>
}
i += 1
}
}
override def deCompress(tensor: Tensor[T]): Unit = deCompress(0, tensor, 0, tensor.nElement())
override def bytes(offset: Int, length: Int): ByteBuffer = {
val splitSize = size / buffers.length
val extraSize = size % buffers.length
var i = 0
while (i < buffers.length) {
val start = splitSize * i + math.min(extraSize, i)
val curLength = splitSize + (if (i < extraSize) 1 else 0)
if (start == offset && curLength == length) {
require(buffers(i) != null, "split has not been inited")
return ByteBuffer.wrap(buffers(i))
}
i += 1
}
throw new IllegalArgumentException("Offset and length not match")
}
override def bytes(): ByteBuffer = bytes(0, size)
// scalastyle:off
override def add(data: ByteBuffer, offset: Int,
length: Int): FP16SplitsCompressedTensor.this.type = ???
override def add(data: ByteBuffer): FP16SplitsCompressedTensor.this.type = ???
override def parAdd(data: ByteBuffer, offset: Int,
length: Int): FP16SplitsCompressedTensor.this.type = ???
override def parAdd(data: ByteBuffer): FP16SplitsCompressedTensor.this.type = ???
// scalastyle:on
}
| yiheng/BigDL | spark/dl/src/main/scala/com/intel/analytics/bigdl/parameters/FP16SplitsCompressedTensor.scala | Scala | apache-2.0 | 5,560 |
package scala.meta
import scala.meta.dialects._
import scala.meta.internal.dialects._
import scala.collection.immutable.TreeMap
/**
* A dialect is used to configure what Scala syntax is allowed during tokenization and parsing.
*/
final class Dialect private (
// Are `&` intersection types supported by this dialect?
val allowAndTypes: Boolean,
// Are extractor varargs specified using ats, i.e. is `case Extractor(xs @ _*)` legal or not?
val allowAtForExtractorVarargs: Boolean,
// Can case classes be declared without a parameter list?
// Deprecated in 2.10, not supported in 2.11 and newer.
val allowCaseClassWithoutParameterList: Boolean,
// Are extractor varargs specified using colons, i.e. is `case Extractor(xs: _*)` legal or not?
val allowColonForExtractorVarargs: Boolean,
// Are enums allowed?
// They are in Dotty, but not in Scala 2.12 or older.
val allowEnums: Boolean,
// Are implicit by name parameters supported?
// They are in Dotty, but not in Scala 2.12 or older.
val allowImplicitByNameParameters: Boolean,
// Are `inline` identifiers supported by this dialect?
val allowInlineIdents: Boolean,
// Are inline vals and defs supported by this dialect?
val allowInlineMods: Boolean,
// Are literal types allowed, i.e. is `val a : 42 = 42` legal or not?
val allowLiteralTypes: Boolean,
// Are multiline programs allowed?
// Some quasiquotes only support single-line snippets.
val allowMultilinePrograms: Boolean,
// Are `|` (union types) supported by this dialect?
val allowOrTypes: Boolean,
// Are unquotes ($x) and splices (..$xs, ...$xss) allowed?
// If yes, they will be parsed as patterns.
val allowPatUnquotes: Boolean,
// Are naked underscores allowed after $ in pattern interpolators, i.e. is `case q"$_ + $_" =>` legal or not?
val allowSpliceUnderscores: Boolean,
// Are unquotes ($x) and splices (..$xs, ...$xss) allowed?
// If yes, they will be parsed as terms.
val allowTermUnquotes: Boolean,
// Are terms on the top level supported by this dialect?
// Necessary to support popular script-like DSLs.
val allowToplevelTerms: Boolean,
// Are trailing commas allowed? SIP-27.
val allowTrailingCommas: Boolean,
// Are trait allowed to have parameters?
// They are in Dotty, but not in Scala 2.12 or older.
val allowTraitParameters: Boolean,
// Are type lambdas allowed, i.e. is `[T] => (T, T)` legal or not?
val allowTypeLambdas: Boolean,
// Are view bounds supported by this dialect?
// def f[A <% Int](a: A)
// Removed in Dotty.
val allowViewBounds: Boolean,
// Are XML literals supported by this dialect?
// We plan to deprecate XML literal syntax, and some dialects
// might go ahead and drop support completely.
val allowXmlLiterals: Boolean,
@deprecated("toplevelSeparator has never been used", ">4.4.35")
val toplevelSeparator: String,
// Are numeric literal underscore separators, i.e. `1_000_000` legal or not?
val allowNumericLiteralUnderscoreSeparators: Boolean,
// Can try body contain any expression? (2.13.1 https://github.com/scala/scala/pull/8071)
val allowTryWithAnyExpr: Boolean,
// Given/using introduced in dotty
val allowGivenUsing: Boolean,
// Extension methods introduced in dotty
val allowExtensionMethods: Boolean,
// Open modifier for classes introduced in dotty
val allowOpenClass: Boolean,
// Top level statements introduced in dotty.
// differs from ToplevelTerms because here you can define packages
val allowToplevelStatements: Boolean,
// Opaque types introduced in dotty
val allowOpaqueTypes: Boolean,
// Export selected members of an object introduced in dotty
val allowExportClause: Boolean,
// Extended classes separated by ',' introduced in dotty
val allowCommaSeparatedExtend: Boolean,
// end marker introduced in dotty
val allowEndMarker: Boolean,
// Support for escaping `"` in interpolated strings using $ - "$""
val allowInterpolationDolarQuoteEscape: Boolean,
// Significant identation introduced in dotty
val allowSignificantIndentation: Boolean,
// Dotty changed placeholder for types from `_` to `?`
val allowQuestionMarkPlaceholder: Boolean,
// Dotty rejects placeholder as Type parameter
val allowTypeParamUnderscore: Boolean,
// Dotty allows by-name repeated parameters
val allowByNameRepeatedParameters: Boolean,
// Dotty allows lazy val abstract values
val allowLazyValAbstractValues: Boolean,
// Dotty allows capital pattern vars in `case A @ _ =>`
val allowUpperCasePatternVarBinding: Boolean,
// Dotty allows to use derives to automatically generate given instances for type classes
val allowDerives: Boolean,
// Dotty allows to specify `type T` inside blocks
val allowTypeInBlock: Boolean,
// Dotty allows to define function like `[T] => (ts: List[T]) => ts.headOption`
val allowPolymorphicFunctions: Boolean,
// Dotty allows `.match` expressions and chaining matches
val allowMatchAsOperator: Boolean,
// Dotty allows `match` on type
val allowTypeMatch: Boolean,
// Dotty allows to define types and methods with an `infix` soft keyword modifier
val allowInfixMods: Boolean,
// Scala 3 splices/quotes
val allowSpliceAndQuote: Boolean,
// Scala 3 disallowed symbol literals
val allowSymbolLiterals: Boolean,
// Scala 3 disallowed symbol literals
val allowDependentFunctionTypes: Boolean,
/* Scala 3 added possibility to use simpler splices such as:
* val arr = Array(1, 2, 3)
* val lst = List(0, arr*) // vararg splice argument
* lst match
* case List(0, 1, xs*) => println(xs) // binds xs to Seq(2, 3)
* case List(1, _*) => // wildcard pattern
*/
val allowPostfixStarVarargSplices: Boolean,
/* Scala 3 allows us to specify:
* `case tp @ OrNull(tp1): OrType`
* the last section after : was not allowed previously.
*/
val allowAllTypedPatterns: Boolean,
// Scala 3 import renames can use as soft keyword `import a.b.C as D`
val allowAsForImportRename: Boolean,
// Scala 3 wildcard imports can be specified as `import a.b.*`
val allowStarWildcardImport: Boolean,
// Scala 3 no longer allows def hello(){} - `=` is always needed
val allowProcedureSyntax: Boolean,
// Scala 3 no longer allows `do {...} while(...)`
val allowDoWhile: Boolean,
/* Kind-project support
* works under -Xsource3 flag
* https://github.com/scala/scala/pull/9605
*/
val allowPlusMinusUnderscoreAsIdent: Boolean,
/* The same as previous but for Scala3
* works under -Ykind-projector:underscores
*/
val allowPlusMinusUnderscoreAsPlaceholder: Boolean,
// import a.b.c.{ given, _} used for -X:source3
val allowGivenImports: Boolean
) extends Product with Serializable {
// NOTE(olafur) checklist for adding a new dialect field in a binary compatible way:
// - add new field to primary constructor.
// - add new parameter to `privateCopy()` method.
// - add new `withX()` method to allow overriding that specific field.
// - add new `x = FIELD_DEFAULT_VALUE` in the `def this()` constructor below.
def this(
allowAndTypes: Boolean,
allowAtForExtractorVarargs: Boolean,
allowCaseClassWithoutParameterList: Boolean,
allowColonForExtractorVarargs: Boolean,
allowEnums: Boolean,
allowImplicitByNameParameters: Boolean,
allowInlineIdents: Boolean,
allowInlineMods: Boolean,
allowLiteralTypes: Boolean,
allowMultilinePrograms: Boolean,
allowOrTypes: Boolean,
allowPatUnquotes: Boolean,
allowSpliceUnderscores: Boolean,
allowTermUnquotes: Boolean,
allowToplevelTerms: Boolean,
allowTrailingCommas: Boolean,
allowTraitParameters: Boolean,
allowTypeLambdas: Boolean,
allowViewBounds: Boolean,
allowXmlLiterals: Boolean,
toplevelSeparator: String
) = {
this(
allowAndTypes = allowAndTypes,
allowAtForExtractorVarargs = allowAtForExtractorVarargs,
allowCaseClassWithoutParameterList = allowCaseClassWithoutParameterList,
allowColonForExtractorVarargs = allowColonForExtractorVarargs,
allowEnums = allowEnums,
allowImplicitByNameParameters = allowImplicitByNameParameters,
allowInlineIdents = allowInlineIdents,
allowInlineMods = allowInlineMods,
allowLiteralTypes = allowLiteralTypes,
allowMultilinePrograms = allowMultilinePrograms,
allowOrTypes = allowOrTypes,
allowPatUnquotes = allowPatUnquotes,
allowSpliceUnderscores = allowSpliceUnderscores,
allowTermUnquotes = allowTermUnquotes,
allowToplevelTerms = allowToplevelTerms,
allowTrailingCommas = allowTrailingCommas,
allowTraitParameters = allowTraitParameters,
allowTypeLambdas = allowTypeLambdas,
allowViewBounds = allowViewBounds,
allowXmlLiterals = allowXmlLiterals,
toplevelSeparator = toplevelSeparator,
allowNumericLiteralUnderscoreSeparators = false,
allowTryWithAnyExpr = false,
allowGivenUsing = false,
allowExtensionMethods = false,
allowOpenClass = false,
allowToplevelStatements = false,
allowOpaqueTypes = false,
allowExportClause = false,
allowCommaSeparatedExtend = false,
allowEndMarker = false,
allowInterpolationDolarQuoteEscape = false,
allowSignificantIndentation = false,
allowQuestionMarkPlaceholder = false,
allowTypeParamUnderscore = true,
allowByNameRepeatedParameters = false,
allowLazyValAbstractValues = false,
allowUpperCasePatternVarBinding = false,
allowDerives = false,
allowTypeInBlock = false,
allowPolymorphicFunctions = false,
allowMatchAsOperator = false,
allowTypeMatch = false,
allowInfixMods = false,
allowSpliceAndQuote = false,
allowSymbolLiterals = true,
allowDependentFunctionTypes = false,
allowPostfixStarVarargSplices = false,
allowAllTypedPatterns = false,
allowAsForImportRename = false,
allowStarWildcardImport = false,
allowProcedureSyntax = true,
allowDoWhile = true,
allowPlusMinusUnderscoreAsIdent = false,
allowPlusMinusUnderscoreAsPlaceholder = false,
allowGivenImports = false
// NOTE(olafur): declare the default value for new fields above this comment.
)
}
// Are unquotes ($x) and splices (..$xs, ...$xss) allowed?
def allowUnquotes: Boolean = allowTermUnquotes || allowPatUnquotes
def withAllowAndTypes(newValue: Boolean): Dialect = {
privateCopy(allowAndTypes = newValue)
}
def withAllowAtForExtractorVarargs(newValue: Boolean): Dialect = {
privateCopy(allowAtForExtractorVarargs = newValue)
}
def withAllowCaseClassWithoutParameterList(
newValue: Boolean
): Dialect = {
privateCopy(allowCaseClassWithoutParameterList = newValue)
}
def withAllowColonForExtractorVarargs(newValue: Boolean): Dialect = {
privateCopy(allowColonForExtractorVarargs = newValue)
}
def withAllowEnums(newValue: Boolean): Dialect = {
privateCopy(allowEnums = newValue)
}
def withAllowImplicitByNameParameters(newValue: Boolean): Dialect = {
privateCopy(allowImplicitByNameParameters = newValue)
}
def withAllowInlineIdents(newValue: Boolean): Dialect = {
privateCopy(allowInlineIdents = newValue)
}
def withAllowInlineMods(newValue: Boolean): Dialect = {
privateCopy(allowInlineMods = newValue)
}
def withAllowLiteralTypes(newValue: Boolean): Dialect = {
privateCopy(allowLiteralTypes = newValue)
}
def withAllowMultilinePrograms(newValue: Boolean): Dialect = {
privateCopy(allowMultilinePrograms = newValue)
}
def withAllowOrTypes(newValue: Boolean): Dialect = {
privateCopy(allowOrTypes = newValue)
}
def withAllowPatUnquotes(newValue: Boolean): Dialect = {
privateCopy(allowPatUnquotes = newValue)
}
def withAllowSpliceUnderscores(newValue: Boolean): Dialect = {
privateCopy(allowSpliceUnderscores = newValue)
}
def withAllowTermUnquotes(newValue: Boolean): Dialect = {
privateCopy(allowTermUnquotes = newValue)
}
def withAllowToplevelTerms(newValue: Boolean): Dialect = {
privateCopy(allowToplevelTerms = newValue)
}
def withAllowTrailingCommas(newValue: Boolean): Dialect = {
privateCopy(allowTrailingCommas = newValue)
}
def withAllowTraitParameters(newValue: Boolean): Dialect = {
privateCopy(allowTraitParameters = newValue)
}
def withAllowTypeLambdas(newValue: Boolean): Dialect = {
privateCopy(allowTypeLambdas = newValue)
}
def withAllowViewBounds(newValue: Boolean): Dialect = {
privateCopy(allowViewBounds = newValue)
}
def withAllowXmlLiterals(newValue: Boolean): Dialect = {
privateCopy(allowXmlLiterals = newValue)
}
@deprecated("toplevelSeparator has never been used", ">4.4.35")
def withToplevelSeparator(newValue: String): Dialect = {
privateCopy(toplevelSeparator = newValue)
}
def withAllowNumericLiteralUnderscoreSeparators(newValue: Boolean): Dialect = {
privateCopy(allowNumericLiteralUnderscoreSeparators = newValue)
}
def withAllowTryWithAnyExpr(newValue: Boolean): Dialect = {
privateCopy(allowTryWithAnyExpr = newValue)
}
def withAllowGivenUsing(newValue: Boolean): Dialect = {
privateCopy(allowGivenUsing = newValue)
}
def withAllowExtensionMethods(newValue: Boolean): Dialect = {
privateCopy(allowExtensionMethods = newValue)
}
def withAllowOpenClass(newValue: Boolean): Dialect = {
privateCopy(allowOpenClass = newValue)
}
def withAllowToplevelStatements(newValue: Boolean): Dialect = {
privateCopy(allowToplevelStatements = newValue)
}
def withAllowOpaqueTypes(newValue: Boolean): Dialect = {
privateCopy(allowOpaqueTypes = newValue)
}
def withAllowInterpolationDolarQuoteEscape(newValue: Boolean): Dialect = {
privateCopy(allowInterpolationDolarQuoteEscape = newValue)
}
def withAllowExportClause(newValue: Boolean): Dialect = {
privateCopy(allowExportClause = newValue)
}
def withAllowCommaSeparatedExtend(newValue: Boolean): Dialect = {
privateCopy(allowCommaSeparatedExtend = newValue)
}
def withAllowEndMarker(newValue: Boolean): Dialect = {
privateCopy(allowEndMarker = newValue)
}
def withAllowSignificantIndentation(newValue: Boolean): Dialect = {
privateCopy(allowSignificantIndentation = newValue)
}
def withAllowQuestionMarkPlaceholder(newValue: Boolean): Dialect = {
privateCopy(allowQuestionMarkPlaceholder = newValue)
}
def withAllowTypeParamUnderscore(newValue: Boolean): Dialect = {
privateCopy(allowTypeParamUnderscore = newValue)
}
def withAllowByNameRepeatedParameters(newValue: Boolean): Dialect = {
privateCopy(allowByNameRepeatedParameters = newValue)
}
def withAllowLazyValAbstractValues(newValue: Boolean): Dialect = {
privateCopy(allowLazyValAbstractValues = newValue)
}
def withAllowMatchAsOperator(newValue: Boolean): Dialect = {
privateCopy(allowMatchAsOperator = newValue)
}
def withAllowUpperCasePatternVarBinding(newValue: Boolean): Dialect = {
privateCopy(allowUpperCasePatternVarBinding = newValue)
}
def withAllowDerives(newValue: Boolean): Dialect = {
privateCopy(allowDerives = newValue)
}
def withAllowTypeInBlock(newValue: Boolean): Dialect = {
privateCopy(allowTypeInBlock = newValue)
}
def withAllowPolymorphicFunctions(newValue: Boolean): Dialect = {
privateCopy(allowPolymorphicFunctions = newValue)
}
def withAllowTypeMatch(newValue: Boolean): Dialect = {
privateCopy(allowTypeMatch = newValue)
}
def withAllowInfixMods(newValue: Boolean): Dialect = {
privateCopy(allowInfixMods = newValue)
}
def withAllowSpliceAndQuote(newValue: Boolean): Dialect = {
privateCopy(allowSpliceAndQuote = newValue)
}
def withAllowSymbolLiterals(newValue: Boolean): Dialect = {
privateCopy(allowSymbolLiterals = newValue)
}
def withAllowDependentFunctionTypes(newValue: Boolean): Dialect = {
privateCopy(allowDependentFunctionTypes = newValue)
}
def withAllowPostfixStarVarargSplices(newValue: Boolean): Dialect = {
privateCopy(allowPostfixStarVarargSplices = newValue)
}
def withAllowAllTypedPatterns(newValue: Boolean): Dialect = {
privateCopy(allowAllTypedPatterns = newValue)
}
def withAllowAsForImportRename(newValue: Boolean): Dialect = {
privateCopy(allowAsRenames = newValue)
}
def withAllowStarWildcardImport(newValue: Boolean): Dialect = {
privateCopy(allowStarWildcardImport = newValue)
}
def withAllowProcedureSyntax(newValue: Boolean): Dialect = {
privateCopy(allowProcedureSyntax = newValue)
}
def withAllowDoWhile(newValue: Boolean): Dialect = {
privateCopy(allowDoWhile = newValue)
}
def withAllowPlusMinusUnderscoreAsIdent(newValue: Boolean): Dialect = {
privateCopy(allowPlusMinusUnderscoreAsIdent = newValue)
}
def withAllowPlusMinusUnderscoreAsPlaceholder(newValue: Boolean): Dialect = {
privateCopy(allowPlusMinusUnderscoreAsPlaceholder = newValue)
}
def withAllowGivenImports(newValue: Boolean): Dialect = {
privateCopy(allowGivenImports = newValue)
}
// NOTE(olafur): add the next `withX()` method above this comment. Please try
// to use consistent formatting, use `newValue` as the parameter name and wrap
// the body inside curly braces.
private[this] def privateCopy(
allowAndTypes: Boolean = this.allowAndTypes,
allowAtForExtractorVarargs: Boolean = this.allowAtForExtractorVarargs,
allowCaseClassWithoutParameterList: Boolean = this.allowCaseClassWithoutParameterList,
allowColonForExtractorVarargs: Boolean = this.allowColonForExtractorVarargs,
allowEnums: Boolean = this.allowEnums,
allowImplicitByNameParameters: Boolean = this.allowImplicitByNameParameters,
allowInlineIdents: Boolean = this.allowInlineIdents,
allowInlineMods: Boolean = this.allowInlineMods,
allowLiteralTypes: Boolean = this.allowLiteralTypes,
allowMultilinePrograms: Boolean = this.allowMultilinePrograms,
allowOrTypes: Boolean = this.allowOrTypes,
allowPatUnquotes: Boolean = this.allowPatUnquotes,
allowSpliceUnderscores: Boolean = this.allowSpliceUnderscores,
allowTermUnquotes: Boolean = this.allowTermUnquotes,
allowToplevelTerms: Boolean = this.allowToplevelTerms,
allowTrailingCommas: Boolean = this.allowTrailingCommas,
allowTraitParameters: Boolean = this.allowTraitParameters,
allowTypeLambdas: Boolean = this.allowTypeLambdas,
allowViewBounds: Boolean = this.allowViewBounds,
allowXmlLiterals: Boolean = this.allowXmlLiterals,
toplevelSeparator: String = this.toplevelSeparator,
allowNumericLiteralUnderscoreSeparators: Boolean =
this.allowNumericLiteralUnderscoreSeparators,
allowTryWithAnyExpr: Boolean = this.allowTryWithAnyExpr,
allowGivenUsing: Boolean = this.allowGivenUsing,
allowExtensionMethods: Boolean = this.allowExtensionMethods,
allowOpenClass: Boolean = this.allowOpenClass,
allowToplevelStatements: Boolean = this.allowToplevelStatements,
allowOpaqueTypes: Boolean = this.allowOpaqueTypes,
allowExportClause: Boolean = this.allowExportClause,
allowCommaSeparatedExtend: Boolean = this.allowCommaSeparatedExtend,
allowEndMarker: Boolean = this.allowEndMarker,
allowInterpolationDolarQuoteEscape: Boolean = this.allowInterpolationDolarQuoteEscape,
allowSignificantIndentation: Boolean = this.allowSignificantIndentation,
allowQuestionMarkPlaceholder: Boolean = this.allowQuestionMarkPlaceholder,
allowTypeParamUnderscore: Boolean = this.allowTypeParamUnderscore,
allowByNameRepeatedParameters: Boolean = this.allowByNameRepeatedParameters,
allowLazyValAbstractValues: Boolean = this.allowLazyValAbstractValues,
allowUpperCasePatternVarBinding: Boolean = this.allowUpperCasePatternVarBinding,
allowDerives: Boolean = this.allowDerives,
allowTypeInBlock: Boolean = this.allowTypeInBlock,
allowPolymorphicFunctions: Boolean = this.allowPolymorphicFunctions,
allowMatchAsOperator: Boolean = this.allowMatchAsOperator,
allowTypeMatch: Boolean = this.allowTypeMatch,
allowInfixMods: Boolean = this.allowInfixMods,
allowSpliceAndQuote: Boolean = this.allowSpliceAndQuote,
allowSymbolLiterals: Boolean = this.allowSymbolLiterals,
allowDependentFunctionTypes: Boolean = this.allowDependentFunctionTypes,
allowPostfixStarVarargSplices: Boolean = this.allowPostfixStarVarargSplices,
allowAllTypedPatterns: Boolean = this.allowAllTypedPatterns,
allowAsRenames: Boolean = this.allowAsForImportRename,
allowStarWildcardImport: Boolean = this.allowStarWildcardImport,
allowProcedureSyntax: Boolean = this.allowProcedureSyntax,
allowDoWhile: Boolean = this.allowDoWhile,
allowPlusMinusUnderscoreAsIdent: Boolean = this.allowPlusMinusUnderscoreAsIdent,
allowPlusMinusUnderscoreAsPlaceholder: Boolean = this.allowPlusMinusUnderscoreAsPlaceholder,
allowGivenImports: Boolean = this.allowGivenImports
// NOTE(olafur): add the next parameter above this comment.
): Dialect = {
new Dialect(
allowAndTypes,
allowAtForExtractorVarargs,
allowCaseClassWithoutParameterList,
allowColonForExtractorVarargs,
allowEnums,
allowImplicitByNameParameters,
allowInlineIdents,
allowInlineMods,
allowLiteralTypes,
allowMultilinePrograms,
allowOrTypes,
allowPatUnquotes,
allowSpliceUnderscores,
allowTermUnquotes,
allowToplevelTerms,
allowTrailingCommas,
allowTraitParameters,
allowTypeLambdas,
allowViewBounds,
allowXmlLiterals,
toplevelSeparator,
allowNumericLiteralUnderscoreSeparators,
allowTryWithAnyExpr,
allowGivenUsing,
allowExtensionMethods,
allowOpenClass,
allowToplevelStatements,
allowOpaqueTypes,
allowExportClause,
allowCommaSeparatedExtend,
allowEndMarker,
allowInterpolationDolarQuoteEscape,
allowSignificantIndentation,
allowQuestionMarkPlaceholder,
allowTypeParamUnderscore,
allowByNameRepeatedParameters,
allowLazyValAbstractValues,
allowUpperCasePatternVarBinding,
allowDerives,
allowTypeInBlock,
allowPolymorphicFunctions,
allowMatchAsOperator,
allowTypeMatch,
allowInfixMods,
allowSpliceAndQuote,
allowSymbolLiterals,
allowDependentFunctionTypes,
allowPostfixStarVarargSplices,
allowAllTypedPatterns,
allowAsRenames,
allowStarWildcardImport,
allowProcedureSyntax,
allowDoWhile,
allowPlusMinusUnderscoreAsIdent,
allowPlusMinusUnderscoreAsPlaceholder,
allowGivenImports
// NOTE(olafur): add the next argument above this comment.
)
}
// NOTE(olafur): Do not edit below here, these methods can remain unchanged.
@deprecated("Dialect should not be a Product", "4.3.11")
override def productPrefix: String = "Dialect"
@deprecated("Dialect should not be a Product", "4.3.11")
def productArity: Int = 0
@deprecated("Dialect should not be a Product", "4.3.11")
def productElement(n: Int): Any = throw new IndexOutOfBoundsException(n.toString)
// Dialects have reference equality semantics,
// because sometimes dialects representing distinct Scala versions
// can be structurally equal to each other.
override def canEqual(that: Any): Boolean = this eq that.asInstanceOf[AnyRef]
override def equals(other: Any): Boolean = this eq other.asInstanceOf[AnyRef]
override def hashCode: Int = System.identityHashCode(this)
// Smart prettyprinting that knows about standard dialects.
override def toString = {
Dialect.inverseStandards.getOrElse(this, "Dialect()")
}
@deprecated("Use withX method instead", "4.3.11")
def copy(
allowAndTypes: Boolean = this.allowAndTypes,
allowAtForExtractorVarargs: Boolean = this.allowAtForExtractorVarargs,
allowCaseClassWithoutParameterList: Boolean = this.allowCaseClassWithoutParameterList,
allowColonForExtractorVarargs: Boolean = this.allowColonForExtractorVarargs,
allowEnums: Boolean = this.allowEnums,
allowImplicitByNameParameters: Boolean = this.allowImplicitByNameParameters,
allowInlineIdents: Boolean = this.allowInlineIdents,
allowInlineMods: Boolean = this.allowInlineMods,
allowLiteralTypes: Boolean = this.allowLiteralTypes,
allowMultilinePrograms: Boolean = this.allowMultilinePrograms,
allowOrTypes: Boolean = this.allowOrTypes,
allowPatUnquotes: Boolean = this.allowPatUnquotes,
allowSpliceUnderscores: Boolean = this.allowSpliceUnderscores,
allowTermUnquotes: Boolean = this.allowTermUnquotes,
allowToplevelTerms: Boolean = this.allowToplevelTerms,
allowTrailingCommas: Boolean = this.allowTrailingCommas,
allowTraitParameters: Boolean = this.allowTraitParameters,
allowTypeLambdas: Boolean = this.allowTypeLambdas,
allowViewBounds: Boolean = this.allowViewBounds,
allowXmlLiterals: Boolean = this.allowXmlLiterals,
toplevelSeparator: String = this.toplevelSeparator
): Dialect = {
privateCopy(
allowAndTypes,
allowAtForExtractorVarargs,
allowCaseClassWithoutParameterList,
allowColonForExtractorVarargs,
allowEnums,
allowImplicitByNameParameters,
allowInlineIdents,
allowInlineMods,
allowLiteralTypes,
allowMultilinePrograms,
allowOrTypes,
allowPatUnquotes,
allowSpliceUnderscores,
allowTermUnquotes,
allowToplevelTerms,
allowTrailingCommas,
allowTraitParameters,
allowTypeLambdas,
allowViewBounds,
allowXmlLiterals,
toplevelSeparator
)
}
private[meta] def unquoteVariant(): Dialect = privateCopy(
allowTermUnquotes = false,
allowPatUnquotes = false,
allowMultilinePrograms = true
)
}
object Dialect extends InternalDialect {
def apply(
allowAndTypes: Boolean,
allowAtForExtractorVarargs: Boolean,
allowCaseClassWithoutParameterList: Boolean,
allowColonForExtractorVarargs: Boolean,
allowEnums: Boolean,
allowImplicitByNameParameters: Boolean,
allowInlineIdents: Boolean,
allowInlineMods: Boolean,
allowLiteralTypes: Boolean,
allowMethodTypes: Boolean,
allowMultilinePrograms: Boolean,
allowOrTypes: Boolean,
allowPatUnquotes: Boolean,
allowSpliceUnderscores: Boolean,
allowTermUnquotes: Boolean,
allowToplevelTerms: Boolean,
allowTrailingCommas: Boolean,
allowTraitParameters: Boolean,
allowTypeLambdas: Boolean,
allowViewBounds: Boolean,
allowXmlLiterals: Boolean,
@deprecated("toplevelSeparator has never been used", ">4.4.35")
toplevelSeparator: String
): Dialect = {
new Dialect(
allowAndTypes = allowAndTypes,
allowAtForExtractorVarargs = allowAtForExtractorVarargs,
allowCaseClassWithoutParameterList = allowCaseClassWithoutParameterList,
allowColonForExtractorVarargs = allowColonForExtractorVarargs,
allowEnums = allowEnums,
allowImplicitByNameParameters = allowImplicitByNameParameters,
allowInlineIdents = allowInlineIdents,
allowInlineMods = allowInlineMods,
allowLiteralTypes = allowLiteralTypes,
allowMultilinePrograms = allowMultilinePrograms,
allowOrTypes = allowOrTypes,
allowPatUnquotes = allowPatUnquotes,
allowSpliceUnderscores = allowSpliceUnderscores,
allowTermUnquotes = allowTermUnquotes,
allowToplevelTerms = allowToplevelTerms,
allowTrailingCommas = allowTrailingCommas,
allowTraitParameters = allowTraitParameters,
allowTypeLambdas = allowTypeLambdas,
allowViewBounds = allowViewBounds,
allowXmlLiterals = allowXmlLiterals,
toplevelSeparator = toplevelSeparator
)
}
private[meta] lazy val standards: Map[String, Dialect] = Seq[sourcecode.Text[Dialect]](
Dotty,
Scala3,
Paradise211,
Paradise212,
ParadiseTypelevel211,
ParadiseTypelevel212,
Sbt0136,
Sbt0137,
Sbt1,
Scala210,
Scala211,
Scala212,
Scala213,
Scala213Source3,
Scala212Source3,
Typelevel211,
Typelevel212
).map(x => x.source -> x.value).toMap
private[meta] lazy val inverseStandards: Map[Dialect, String] =
standards.iterator.map(_.swap).toMap
}
| scalameta/scalameta | scalameta/dialects/shared/src/main/scala/scala/meta/Dialect.scala | Scala | bsd-3-clause | 28,841 |
package org.apache.datacommons.prepbuddy.normalizers
import org.apache.datacommons.prepbuddy.rdds.TransformableRDD
/**
* A normalizer technique which normalizes data by their standard score.
* Formula for Z Score Normalization : (X - Mean) / Standard Deviation.
*/
class ZScoreNormalizer extends NormalizationStrategy {
private var standardDeviation: Double = 0
private var mean: Double = 0
override def prepare(transformableRDD: TransformableRDD, columnIndex: Int): Unit = {
val doubleRDD = transformableRDD.toDoubleRDD(columnIndex)
standardDeviation = doubleRDD.stdev
mean = doubleRDD.mean
}
override def normalize(rawValue: String): String = {
String.valueOf((rawValue.toDouble - mean) / standardDeviation)
}
}
| blpabhishek/prep-buddy | src/main/scala/org/apache/datacommons/prepbuddy/normalizers/ZScoreNormalizer.scala | Scala | apache-2.0 | 782 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package ai.h2o.sparkling.backend.api
import javax.servlet.http.HttpServletRequest
trait ParameterBase {
protected def getParameterAsString(request: HttpServletRequest, parameterName: String): String = {
val result = request.getParameter(parameterName)
if (result == null) {
throw new IllegalArgumentException(s"Cannot find value for the parameter '$parameterName'")
}
result
}
}
| h2oai/sparkling-water | core/src/main/scala/ai/h2o/sparkling/backend/api/ParameterBase.scala | Scala | apache-2.0 | 1,209 |
/*
* Copyright 2015-2016 IBM Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package whisk.core.entity
import java.nio.charset.StandardCharsets
import scala.language.postfixOps
import scala.util.matching.Regex
import spray.json._
import spray.json.DefaultJsonProtocol._
import whisk.core.entity.Attachments._
import whisk.core.entity.ExecManifest._
import whisk.core.entity.size.SizeInt
import whisk.core.entity.size.SizeOptionString
import whisk.core.entity.size.SizeString
/**
* Exec encodes the executable details of an action. For black
* box container, an image name is required. For Javascript and Python
* actions, the code to execute is required.
* For Swift actions, the source code to execute the action is required.
* For Java actions, a base64-encoded string representing a jar file is
* required, as well as the name of the entrypoint class.
* An example exec looks like this:
* { kind : one of supported language runtimes,
* code : code to execute if kind is supported,
* image : container name when kind is "blackbox",
* binary: for some runtimes that allow binary attachments,
* main : name of the entry point function, when using a non-default value (for Java, the name of the main class)" }
*/
sealed abstract class Exec extends ByteSizeable {
override def toString = Exec.serdes.write(this).compactPrint
/** A type descriptor. */
val kind: String
/** When true exec may not be executed or updated. */
val deprecated: Boolean
}
/**
* A common super class for all action exec types that contain their executable
* code explicitly (i.e., any action other than a sequence).
*/
sealed abstract class CodeExec[+T <% SizeConversion] extends Exec {
/** An entrypoint (typically name of 'main' function). 'None' means a default value will be used. */
val entryPoint: Option[String]
/** The executable code. */
val code: T
/** Serialize code to a JSON value. */
def codeAsJson: JsValue
/** The runtime image (either built-in or a public image). */
val image: ImageName
/** Indicates if the action execution generates log markers to stdout/stderr once action activation completes. */
val sentinelledLogs: Boolean
/** Indicates if a container image is required from the registry to execute the action. */
val pull: Boolean
/**
* Indicates whether the code is stored in a text-readable or binary format.
* The binary bit may be read from the database but currently it is always computed
* when the "code" is moved to an attachment this may get changed to avoid recomputing
* the binary property.
*/
val binary: Boolean
override def size = code.sizeInBytes + entryPoint.map(_.sizeInBytes).getOrElse(0.B)
}
protected[core] case class CodeExecAsString(
manifest: RuntimeManifest,
override val code: String,
override val entryPoint: Option[String])
extends CodeExec[String] {
override val kind = manifest.kind
override val image = manifest.image
override val sentinelledLogs = manifest.sentinelledLogs.getOrElse(true)
override val deprecated = manifest.deprecated.getOrElse(false)
override val pull = false
override lazy val binary = Exec.isBinaryCode(code)
override def codeAsJson = JsString(code)
}
protected[core] case class CodeExecAsAttachment(
manifest: RuntimeManifest,
override val code: Attachment[String],
override val entryPoint: Option[String])
extends CodeExec[Attachment[String]] {
override val kind = manifest.kind
override val image = manifest.image
override val sentinelledLogs = manifest.sentinelledLogs.getOrElse(true)
override val deprecated = manifest.deprecated.getOrElse(false)
override val pull = false
override lazy val binary = true
override def codeAsJson = code.toJson
def inline(bytes: Array[Byte]): CodeExecAsAttachment = {
val encoded = new String(bytes, StandardCharsets.UTF_8)
copy(code = Inline(encoded))
}
def attach: CodeExecAsAttachment = {
manifest.attached.map { a =>
copy(code = Attached(a.attachmentName, a.attachmentType))
} getOrElse this
}
}
/**
* @param image the image name
* @param code an optional script or zip archive (as base64 encoded) string
*/
protected[core] case class BlackBoxExec(
override val image: ImageName,
override val code: Option[String],
override val entryPoint: Option[String],
val native: Boolean)
extends CodeExec[Option[String]] {
override val kind = Exec.BLACKBOX
override val deprecated = false
override def codeAsJson = code.toJson
override lazy val binary = code map { Exec.isBinaryCode(_) } getOrElse false
override val sentinelledLogs = native
override val pull = !native
override def size = super.size + image.publicImageName.sizeInBytes
}
protected[core] case class SequenceExec(components: Vector[FullyQualifiedEntityName]) extends Exec {
override val kind = Exec.SEQUENCE
override val deprecated = false
override def size = components.map(_.size).reduceOption(_ + _).getOrElse(0.B)
}
protected[core] object Exec
extends ArgNormalizer[Exec]
with DefaultJsonProtocol {
val sizeLimit = 48 MB
// The possible values of the JSON 'kind' field for certain runtimes:
// - Sequence because it is an intrinsic
// - Black Box because it is a type marker
protected[core] val SEQUENCE = "sequence"
protected[core] val BLACKBOX = "blackbox"
private def execManifests = ExecManifest.runtimesManifest
override protected[core] implicit lazy val serdes = new RootJsonFormat[Exec] {
private def attFmt[T: JsonFormat] = Attachments.serdes[T]
private lazy val runtimes: Set[String] = execManifests.knownContainerRuntimes ++ Set(SEQUENCE, BLACKBOX)
override def write(e: Exec) = e match {
case c: CodeExecAsString =>
val base = Map("kind" -> JsString(c.kind), "code" -> JsString(c.code), "binary" -> JsBoolean(c.binary))
val main = c.entryPoint.map("main" -> JsString(_))
JsObject(base ++ main)
case a: CodeExecAsAttachment =>
val base = Map("kind" -> JsString(a.kind), "code" -> attFmt[String].write(a.code), "binary" -> JsBoolean(a.binary))
val main = a.entryPoint.map("main" -> JsString(_))
JsObject(base ++ main)
case s @ SequenceExec(comp) =>
JsObject("kind" -> JsString(s.kind), "components" -> comp.map(_.qualifiedNameWithLeadingSlash).toJson)
case b: BlackBoxExec =>
val base = Map("kind" -> JsString(b.kind), "image" -> JsString(b.image.publicImageName), "binary" -> JsBoolean(b.binary))
val code = b.code.filter(_.trim.nonEmpty).map("code" -> JsString(_))
val main = b.entryPoint.map("main" -> JsString(_))
JsObject(base ++ code ++ main)
}
override def read(v: JsValue) = {
require(v != null)
val obj = v.asJsObject
val kind = obj.fields.get("kind") match {
case Some(JsString(k)) => k.trim.toLowerCase
case _ => throw new DeserializationException("'kind' must be a string defined in 'exec'")
}
lazy val optMainField: Option[String] = obj.fields.get("main") match {
case Some(JsString(m)) => Some(m)
case Some(_) => throw new DeserializationException(s"if defined, 'main' be a string in 'exec' for '$kind' actions")
case None => None
}
kind match {
case Exec.SEQUENCE =>
val comp: Vector[FullyQualifiedEntityName] = obj.fields.get("components") match {
case Some(JsArray(components)) => components map (FullyQualifiedEntityName.serdes.read(_))
case Some(_) => throw new DeserializationException(s"'components' must be an array")
case None => throw new DeserializationException(s"'components' must be defined for sequence kind")
}
SequenceExec(comp)
case Exec.BLACKBOX =>
val image: ImageName = obj.fields.get("image") match {
case Some(JsString(i)) => ImageName.fromString(i).get // throws deserialization exception on failure
case _ => throw new DeserializationException(s"'image' must be a string defined in 'exec' for '${Exec.BLACKBOX}' actions")
}
val code: Option[String] = obj.fields.get("code") match {
case Some(JsString(i)) => if (i.trim.nonEmpty) Some(i) else None
case Some(_) => throw new DeserializationException(s"if defined, 'code' must a string defined in 'exec' for '${Exec.BLACKBOX}' actions")
case None => None
}
val native = execManifests.blackboxImages.contains(image)
BlackBoxExec(image, code, optMainField, native)
case _ =>
// map "default" virtual runtime versions to the currently blessed actual runtime version
val manifest = execManifests.resolveDefaultRuntime(kind) match {
case Some(k) => k
case None => throw new DeserializationException(s"kind '$kind' not in $runtimes")
}
manifest.attached.map { a =>
val jar: Attachment[String] = {
// java actions once stored the attachment in "jar" instead of "code"
obj.fields.get("code").orElse(obj.fields.get("jar"))
} map {
attFmt[String].read(_)
} getOrElse {
throw new DeserializationException(s"'code' must be a valid base64 string in 'exec' for '$kind' actions")
}
val main = optMainField.orElse {
if (manifest.requireMain.exists(identity)) {
throw new DeserializationException(s"'main' must be a string defined in 'exec' for '$kind' actions")
} else None
}
CodeExecAsAttachment(manifest, jar, main)
}.getOrElse {
val code: String = obj.fields.get("code") match {
case Some(JsString(c)) => c
case _ => throw new DeserializationException(s"'code' must be a string defined in 'exec' for '$kind' actions")
}
CodeExecAsString(manifest, code, optMainField)
}
}
}
}
val isBase64Pattern = new Regex("^([A-Za-z0-9+/]{4})*([A-Za-z0-9+/]{4}|[A-Za-z0-9+/]{3}=|[A-Za-z0-9+/]{2}==)$").pattern
def isBinaryCode(code: String): Boolean = {
if (code != null) {
val t = code.trim
(t.length > 0) && (t.length % 4 == 0) && isBase64Pattern.matcher(t).matches()
} else false
}
}
| xin-cai/openwhisk | common/scala/src/main/scala/whisk/core/entity/Exec.scala | Scala | apache-2.0 | 11,974 |
/*
* This file is part of AnyMime, a program to help you swap files
* wirelessly between mobile devices.
*
* Copyright (C) 2011 Timur Mehrvarz, timur.mehrvarz(a)gmail(.)com
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.timur.anymime
import android.content.Context
import android.util.Log
import android.view.View
import android.content.SharedPreferences
import android.widget.TextView
import android.widget.ImageView
import android.widget.ArrayAdapter
import android.widget.RadioButton
import android.view.LayoutInflater
import android.view.ViewGroup
import android.view.animation.AnimationUtils
class SlotListAdapter(context:Context, messageResourceId:Int, count:Int)
extends ArrayAdapter[String](context, messageResourceId) {
private val TAG = "SlotListAdapter"
private val D = Static.DBGLOG
var selected = -1
override def getView(position:Int, setView:View, parentViewGroup:ViewGroup) :View = {
var view = setView
if(view == null) {
if(D) Log.i(TAG, "getView position="+position+" inflate a new view")
val layoutInflater = context.getSystemService(Context.LAYOUT_INFLATER_SERVICE).asInstanceOf[LayoutInflater]
if(layoutInflater!=null) {
view = layoutInflater.inflate(messageResourceId, null)
}
}
if(view == null) {
Log.e(TAG, "getView view==null abort")
return null
}
val commaSeparatedString = getItem(position)
//if(D) Log.i(TAG, "getView("+position+") view!=null commaSeparatedString="+commaSeparatedString)
var slotName = ""
var fileList = "(empty)"
if(commaSeparatedString!=null && commaSeparatedString.length>0) {
val idxFirstComma = commaSeparatedString.indexOf(",")
if(idxFirstComma>=0) {
slotName = commaSeparatedString.substring(0,idxFirstComma).trim
fileList = commaSeparatedString.substring(idxFirstComma+1).trim
}
}
val visibleTextView = view.findViewById(R.id.visibleText).asInstanceOf[TextView]
if(visibleTextView != null) {
if(slotName!=null && slotName.length>0)
visibleTextView.setText("Slot "+(position+1)+": "+slotName)
else
visibleTextView.setText("Slot "+(position+1))
}
val visibleText2View = view.findViewById(R.id.visibleText2).asInstanceOf[TextView]
if(visibleText2View != null) {
visibleText2View.setText(fileList)
}
val iconView = view.findViewById(R.id.icon).asInstanceOf[ImageView]
if(iconView != null) {
if(position==selected)
iconView.setImageResource(R.drawable.checkmark)
else
iconView.setImageResource(R.drawable.empty)
}
return view
}
}
| mehrvarz/Anymime | src/org/timur/anymime/SlotListAdapter.scala | Scala | gpl-3.0 | 3,248 |
package io.getquill.codegen.integration
import com.typesafe.scalalogging.Logger
import java.io.Closeable
import java.sql.Connection
import javax.sql.DataSource
import io.getquill.codegen.jdbc.DatabaseTypes._
import io.getquill.codegen.jdbc.gen.DefaultJdbcSchemaReader
import io.getquill.codegen.model.JdbcTableMeta
import io.getquill.codegen.util.ConfigPrefix
import io.getquill.codegen.util.OptionOps._
import io.getquill.codegen.util.SchemaConfig
import io.getquill.codegen.util.StringUtil._
import io.getquill.codegen.util.TryOps._
import io.getquill.util.Using.Manager
import org.slf4j.LoggerFactory
import scala.util.Try
object DbHelper {
private val logger = Logger(LoggerFactory.getLogger(classOf[DbHelper]))
private def getDatabaseType(ds: DataSource): DatabaseType = {
Manager { use =>
val conn = use(ds.getConnection)
val meta = conn.getMetaData
val productType = meta.getDatabaseProductName
DatabaseType.fromProductName(productType)
}.flatten.orThrow
}
def syncDbRun(rawSql: String, ds: DataSource): Try[Unit] = {
val databaseType = getDatabaseType(ds)
val createSchemas =
"""
|CREATE SCHEMA IF NOT EXISTS Alpha;
|CREATE SCHEMA IF NOT EXISTS Bravo;
""".stripMargin
val mysqlAnsiQuoteMode =
"SET sql_mode='STRICT_TRANS_TABLES,ANSI_QUOTES';"
val sql = databaseType match {
// For mysql need to turn on ansi-quoting mode so that quoted columns will work correctly
case MySql =>
List(mysqlAnsiQuoteMode, createSchemas, rawSql).mkString("\\n")
// Oracle does not have built-in 32 or 64 bit integer types so the effect has to be simulated with digits.
case Oracle =>
rawSql
.replaceAll("\\\\bint\\\\b", "number(9,0)")
.replaceAll("\\\\bbigint\\\\b", "number(18,0)")
// Creating schemas is not possible in sqlite
case Sqlite =>
rawSql
case SqlServer =>
rawSql
.replaceAll("(?i)Alpha\\\\.Person", "Alpha.dbo.Person")
.replaceAll("(?i)Bravo\\\\.Person", "Bravo.dbo.Person")
case _ =>
List(createSchemas, rawSql).mkString("\\n")
}
if (sql.trim.isEmpty) throw new IllegalArgumentException("Cannot execute empty query")
val result = Manager { use =>
appendSequence(use(ds.getConnection), sql.split(";").toList.filter(!_.trim.isEmpty))
}
result.map(_ => ())
}
private def appendSequence(conn: Connection, actions: List[String]) = {
actions.map { actStr =>
logger.debug(s"Executing: ${actStr}")
Manager { use =>
val stmt = use(conn.prepareStatement(actStr))
stmt.execute()
}
}
}
def dropTables(ds: DataSource with Closeable) = {
val databaseType = getDatabaseType(ds)
val allTables = databaseType match {
// For Oracle, need to connect to other schemas to get info
case Oracle =>
new DefaultJdbcSchemaReader(databaseType).extractTables(() => ds.getConnection) ++
new DefaultJdbcSchemaReader(databaseType) { override def schemaPattern(schema: String) = "ALPHA" }.extractTables(() => ds.getConnection) ++
new DefaultJdbcSchemaReader(databaseType) { override def schemaPattern(schema: String) = "BRAVO" }.extractTables(() => ds.getConnection)
// For SQL Server need to run a manual query to get tables from alpha/bravo databases if they exist
case SqlServer => {
import io.getquill._
val ctx = new SqlServerJdbcContext[Literal](Literal, ds)
import ctx._
val tables =
ctx.run(
infix"""
(select table_catalog as _1, table_schema as _2, table_name as _3, table_type as _4 from codegen_test.information_schema.tables) UNION
(select table_catalog as _1, table_schema as _2, table_name as _3, table_type as _4 from alpha.information_schema.tables) UNION
(select table_catalog as _1, table_schema as _2, table_name as _3, table_type as _4 from bravo.information_schema.tables)
""".as[Query[(String, String, String, String)]]
)
tables.map { case (cat, schem, name, tpe) => JdbcTableMeta(Option(cat), Option(schem), name, Option(tpe)) }
}
case _ =>
new DefaultJdbcSchemaReader(databaseType).extractTables(() => ds.getConnection)
}
val getSchema: JdbcTableMeta => Option[String] = databaseType match {
case MySql => tm => tm.tableCat
case SqlServer => tm => tm.tableCat.flatMap(tc => tm.tableSchem.flatMap(ts => Some(s"${tc}.${ts}")))
case _ => tm => tm.tableSchem
}
val tables = allTables.filter { tm =>
databaseType match {
case MySql =>
tm.tableCat.existsInSetNocase("codegen_test", "alpha", "bravo")
case SqlServer =>
tm.tableCat.existsInSetNocase("codegen_test", "alpha", "bravo") && tm.tableSchem.exists(_.toLowerCase == "dbo")
case Oracle =>
tm.tableSchem.existsInSetNocase("codegen_test", "alpha", "bravo")
case Sqlite => // SQLite does not have individual schemas at all.
true
case Postgres =>
tm.tableSchem.existsInSetNocase("public", "alpha", "bravo")
case H2 =>
tm.tableCat.exists(_.toLowerCase == "codegen_test.h2") &&
tm.tableSchem.exists(_.toLowerCase != "information_schema")
}
}
val query = tables.map(t => s"drop table ${getSchema(t).map(_ + ".").getOrElse("") + s""""${t.tableName}""""};").mkString("\\n")
logger.info("Cleanup:\\n" + query)
Option(query).andNotEmpty.foreach(DbHelper.syncDbRun(_, ds).orThrow)
}
}
class DbHelper(config: SchemaConfig, dbPrefix: ConfigPrefix, ds: DataSource) {
def setup(): Unit = Option(config.content).andNotEmpty.foreach(setupScript =>
DbHelper.syncDbRun(setupScript, ds).orThrow(
new IllegalArgumentException(s"Database Setup Failed for ${dbPrefix}. Could not execute DB config ${config} command:\\n${config.content}", _)
))
}
| getquill/quill | quill-codegen-jdbc/src/test/scala/io/getquill/codegen/integration/DbHelper.scala | Scala | apache-2.0 | 5,966 |
package com.codahale.jerkson.deser
import scala.collection.JavaConversions._
import scala.collection.mutable.ArrayBuffer
import com.codahale.jerkson.JsonSnakeCase
import com.codahale.jerkson.util._
import com.codahale.jerkson.Util._
import org.codehaus.jackson.{JsonNode, JsonToken, JsonParser}
import org.codehaus.jackson.map._
import org.codehaus.jackson.map.annotate.JsonCachable
import org.codehaus.jackson.node.{ObjectNode, NullNode, TreeTraversingParser}
import org.codehaus.jackson.`type`.JavaType
@JsonCachable
class CaseClassDeserializer(config: DeserializationConfig,
javaType: JavaType,
provider: DeserializerProvider,
classLoader: ClassLoader) extends JsonDeserializer[Object] {
private val isSnakeCase = javaType.getRawClass.isAnnotationPresent(classOf[JsonSnakeCase])
private val params = CaseClassSigParser.parse(javaType.getRawClass, config.getTypeFactory, classLoader).map {
case (name, jt) => (if (isSnakeCase) snakeCase(name) else name, jt)
}.toArray
private val paramTypes = params.map { _._2.getRawClass }.toList
private val constructor = javaType.getRawClass.getConstructors.find { c =>
val constructorTypes = c.getParameterTypes.toList.map { t =>
t.toString match {
case "byte" => classOf[java.lang.Byte]
case "short" => classOf[java.lang.Short]
case "int" => classOf[java.lang.Integer]
case "long" => classOf[java.lang.Long]
case "float" => classOf[java.lang.Float]
case "double" => classOf[java.lang.Double]
case "char" => classOf[java.lang.Character]
case "boolean" => classOf[java.lang.Boolean]
case _ => t
}
}
constructorTypes == paramTypes
}.getOrElse { throw new JsonMappingException("Unable to find a case accessor for " + javaType.getRawClass.getName) }
def deserialize(jp: JsonParser, ctxt: DeserializationContext): Object = {
if (jp.getCurrentToken == JsonToken.START_OBJECT) {
jp.nextToken()
}
if (jp.getCurrentToken != JsonToken.FIELD_NAME &&
jp.getCurrentToken != JsonToken.END_OBJECT) {
throw ctxt.mappingException(javaType.getRawClass)
}
val node = jp.readValueAsTree
val values = new ArrayBuffer[AnyRef]
for ((paramName, paramType) <- params) {
val field = node.get(paramName)
val tp = new TreeTraversingParser(if (field == null) NullNode.getInstance else field, jp.getCodec)
val value = if (paramType.getRawClass == classOf[Option[_]]) {
// thanks again for special-casing VALUE_NULL
Option(tp.getCodec.readValue[Object](tp, paramType.containedType(0)))
} else {
tp.getCodec.readValue[Object](tp, paramType)
}
if (field != null || value != null) {
values += value
}
if (values.size == params.size) {
return constructor.newInstance(values.toArray: _*).asInstanceOf[Object]
}
}
throw new JsonMappingException(errorMessage(node))
}
private def errorMessage(node: JsonNode) = {
val names = params.map { _._1 }.mkString("[", ", ", "]")
val existing = node match {
case obj: ObjectNode => obj.getFieldNames.mkString("[", ", ", "]")
case _: NullNode => "[]" // this is what Jackson deserializes the inside of an empty object to
case unknown => "a non-object"
}
"Invalid JSON. Needed %s, but found %s.".format(names, existing)
}
}
| cphylabs/jerkson-old | src/main/scala/com/codahale/jerkson/deser/CaseClassDeserializer.scala | Scala | mit | 3,461 |
package io.techcode.streamy.event
/**
* All monitor events.
*/
object MonitorEvent {
/**
* Represent a process monitor event.
* This event is fired when a monitoring check has been done.
*/
case class Process(
timestamp: Long,
openFileDescriptors: Long,
maxFileDescriptors: Long,
cpuPercent: Short,
cpuTotal: Long,
memTotalVirtual: Long
)
/**
* Represent a os monitor event.
* This event is fired when a monitoring check has been done.
*/
case class Os(
timestamp: Long,
cpuPercent: Short,
cpuLoadAverage: Array[Double],
memFree: Long,
memAvailable: Long,
memTotal: Long,
swapFree: Long,
swapTotal: Long
)
/**
* Represent a jvm monitor event.
* This event is fired when a monitoring check has been done.
*/
case class Jvm(
timestamp: Long,
uptime: Long,
memHeapUsed: Long,
memHeapCommitted: Long,
memHeapMax: Long,
memNonHeapCommitted: Long,
memNonHeapUsed: Long,
thread: Int,
threadPeak: Int,
classLoaded: Long,
classLoadedTotal: Long,
classUnloaded: Long,
bufferPools: Seq[Jvm.BufferPool],
garbageCollectors: Seq[Jvm.GarbageCollector]
)
/**
* Represent a garbage collector overhead monitor event.
* This event is fired when a monitoring check has been done.
*/
case class GarbageCollectorOverhead(
timestamp: Long,
time: Long,
elapsed: Long,
percent: Short
)
/**
* Jvm event extensions.
*/
object Jvm {
// Buffer poll data
case class BufferPool(
name: String,
count: Long,
totalCapacity: Long,
memUsed: Long
)
// Garbage collector data
case class GarbageCollector(
name: String,
collectionCount: Long,
collectionTime: Long
)
}
}
| amannocci/streamy | core/src/main/scala/io/techcode/streamy/event/MonitorEvent.scala | Scala | mit | 1,825 |
import org.fusesource.scalate.support.TemplatePackage
import org.fusesource.scalate.TemplateSource
import org.fusesource.scalate.Binding
class ScalatePackage extends TemplatePackage {
def header(source: TemplateSource, bindings: List[Binding]) = """
"""
} | MomoPain/scalajs-react-crud | jvm/src/main/scala/ScalatePackage.scala | Scala | mit | 262 |
package org.saddle.framework
import org.saddle._
import org.specs2.matcher._
import scala.reflect.ClassTag
/**
* A matcher for two numeric Mats that must be equal to within
* a tolerance
*/
class BeCloseToMat[T: Numeric : ClassTag](m: Mat[T], delta: T) extends Matcher[Mat[T]] {
def apply[S <: Mat[T]](x: Expectable[S]) = {
val num = implicitly[Numeric[T]]
result(m.length == 0 || {
val res = m.contents.zipWithIndex map {
case (n, i) =>
num.lteq(num.minus(n, delta), x.value.contents(i)) &&
num.lteq(x.value.contents(i), num.plus(n, delta))
}
Vec(res: _*).all
},
" are close +/- " + delta, " are close +/- " + delta, x)
}
}
object BeCloseToMat {
def apply[T: Numeric : ClassTag](v: Mat[T], delta: T) = new BeCloseToMat[T](v, delta)
} | amaizing/amaizing-saddle | saddle-test-framework/src/main/scala/org/saddle/framework/BeCloseToMat.scala | Scala | apache-2.0 | 812 |
package eu.akkamo
import akka.actor.ActorSystem
import com.typesafe.config.{Config, ConfigFactory}
import scala.concurrent.Future
import scala.util.Try
/**
* Register one or more Actor System
* {{{
* configuration example:
* akkamo.akka = {
* // one block with akka configuration contains several aliases with the name name
* name1 = {
* aliases = ["alias1, "alias2"]
* // standard akka attributes for example:
* akka{
* loglevel = "DEBUG"
* debug {
* lifecycle = on
* }
* }
* // ....
* },
* name2 = { // not aliases - only one block allowed
* default = true
* ....
* }
* }
* }}}
* In a case when more than one akka configuration exists, one must be denoted as `default` <br/>
* In case when missing configuration one default Akka system is created with name default.
*/
class AkkaModule extends Module with Initializable with Disposable with Publisher {
/**
* pointer to array containing set of akka Actor System names in configuration
*/
val CfgKey = "akkamo.akka"
val default =
s"""
|$CfgKey = {
| system = {}
|}
""".stripMargin
/**
* Initializes the module into provided mutable context, blocking
*/
override def initialize(ctx: Context) = Try {
val cfg = ctx.get[Config]
val registered = Initializable.parseConfig[Config](CfgKey, cfg).getOrElse {
Initializable.parseConfig[Config](CfgKey, ConfigFactory.parseString(default)).get
}.map { case (default, alliases, cfg) => (default, alliases, ActorSystem.apply(alliases.head, cfg)) }
ctx.register(Initializable.defaultReport(CfgKey, registered))
}
@throws[DisposableError]("If dispose execution fails")
override def dispose(ctx: Context) = {
import scala.concurrent.ExecutionContext.Implicits.global
val futures = ctx.registered[ActorSystem].map { case (s, _) => s.terminate() }
Future.sequence(futures).map { p => () }
}
override def dependencies(dependencies: TypeInfoChain): TypeInfoChain = dependencies.&&[Config]
override def publish(services: TypeInfoChain): TypeInfoChain = services.&&[ActorSystem]
}
| akkamo/akkamo | akkamoAkka/src/main/scala/eu/akkamo/AkkaModule.scala | Scala | unlicense | 2,224 |
package stealthnet.scala.ui.web
import com.typesafe.config.{Config, ConfigFactory}
import java.util.concurrent.TimeUnit
import stealthnet.scala.{BaseSettings, Settings => coreSettings}
/** ''StealthNet'' web UI settings companion object. */
object Settings {
/** Core settings. */
val core: coreSettings = coreSettings.core
/** Web UI settings. */
val ui = new Settings(ConfigFactory.load())
}
/** ''StealthNet'' web UI settings. */
class Settings(config: Config) extends BaseSettings(config) {
protected val confPath = "stealthnet.ui.web"
/** Web server port. */
val webServerPort: Int = config.getInt(optionPath("server.port"))
/** Shutdown grace period (ms). `2s` by default. */
val shutdownGracePeriod: Long =
config.getDuration(optionPath("server.shutdown.grace.period"), TimeUnit.MILLISECONDS)
}
| suiryc/StealthNet | ui-web-jsf/src/main/scala/stealthnet/scala/ui/web/Settings.scala | Scala | gpl-3.0 | 834 |
package com.metebalci
import org.scalatest._
import org.scalatest.Assertions._
class ModifySpec extends FunSuite {
test("modify") {
case class Foo(name:String, age:Int)
val foo = Foo("foo", 30)
val fooLens = Lens[Foo, String] (
f => f.name,
(n, f) => f.copy(name = n)
)
assert( fooLens.get(foo) == "foo" )
assert( fooLens.modify({_ => "bar"}, foo) == Foo("bar", 30) )
}
}
| metebalci/experiment-lenses-scala | src/test/scala/02-ModifySpec.scala | Scala | gpl-2.0 | 420 |
/*
* Copyright 2001-2014 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest
abstract class AsyncFlatSpec extends AsyncFlatSpecLike {
/**
* Returns a user friendly string for this suite, composed of the
* simple name of the class (possibly simplified further by removing dollar signs if added by the Scala interpeter) and, if this suite
* contains nested suites, the result of invoking <code>toString</code> on each
* of the nested suites, separated by commas and surrounded by parentheses.
*
* @return a user-friendly string for this suite
*/
override def toString: String = Suite.suiteToString(None, this)
} | SRGOM/scalatest | scalatest/src/main/scala/org/scalatest/AsyncFlatSpec.scala | Scala | apache-2.0 | 1,182 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.util
import java.io._
import java.net.{InetAddress, Inet4Address, NetworkInterface, URI, URL}
import java.nio.ByteBuffer
import java.util.{Locale, Random, UUID}
import java.util.concurrent.{ConcurrentHashMap, Executors, ThreadPoolExecutor}
import scala.collection.JavaConversions._
import scala.collection.Map
import scala.collection.mutable.ArrayBuffer
import scala.io.Source
import scala.reflect.ClassTag
import com.google.common.io.Files
import com.google.common.util.concurrent.ThreadFactoryBuilder
import org.apache.hadoop.fs.{FileSystem, FileUtil, Path}
import org.apache.spark.{Logging, SparkConf, SparkException}
import org.apache.spark.serializer.{DeserializationStream, SerializationStream, SerializerInstance}
import org.apache.spark.deploy.SparkHadoopUtil
/**
* Various utility methods used by Spark.
*/
private[spark] object Utils extends Logging {
/** Serialize an object using Java serialization */
def serialize[T](o: T): Array[Byte] = {
val bos = new ByteArrayOutputStream()
val oos = new ObjectOutputStream(bos)
oos.writeObject(o)
oos.close()
bos.toByteArray
}
/** Deserialize an object using Java serialization */
def deserialize[T](bytes: Array[Byte]): T = {
val bis = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bis)
ois.readObject.asInstanceOf[T]
}
/** Deserialize an object using Java serialization and the given ClassLoader */
def deserialize[T](bytes: Array[Byte], loader: ClassLoader): T = {
val bis = new ByteArrayInputStream(bytes)
val ois = new ObjectInputStream(bis) {
override def resolveClass(desc: ObjectStreamClass) =
Class.forName(desc.getName, false, loader)
}
ois.readObject.asInstanceOf[T]
}
/** Deserialize a Long value (used for {@link org.apache.spark.api.python.PythonPartitioner}) */
def deserializeLongValue(bytes: Array[Byte]) : Long = {
// Note: we assume that we are given a Long value encoded in network (big-endian) byte order
var result = bytes(7) & 0xFFL
result = result + ((bytes(6) & 0xFFL) << 8)
result = result + ((bytes(5) & 0xFFL) << 16)
result = result + ((bytes(4) & 0xFFL) << 24)
result = result + ((bytes(3) & 0xFFL) << 32)
result = result + ((bytes(2) & 0xFFL) << 40)
result = result + ((bytes(1) & 0xFFL) << 48)
result + ((bytes(0) & 0xFFL) << 56)
}
/** Serialize via nested stream using specific serializer */
def serializeViaNestedStream(os: OutputStream, ser: SerializerInstance)(
f: SerializationStream => Unit) = {
val osWrapper = ser.serializeStream(new OutputStream {
def write(b: Int) = os.write(b)
override def write(b: Array[Byte], off: Int, len: Int) = os.write(b, off, len)
})
try {
f(osWrapper)
} finally {
osWrapper.close()
}
}
/** Deserialize via nested stream using specific serializer */
def deserializeViaNestedStream(is: InputStream, ser: SerializerInstance)(
f: DeserializationStream => Unit) = {
val isWrapper = ser.deserializeStream(new InputStream {
def read(): Int = is.read()
override def read(b: Array[Byte], off: Int, len: Int): Int = is.read(b, off, len)
})
try {
f(isWrapper)
} finally {
isWrapper.close()
}
}
/**
* Primitive often used when writing {@link java.nio.ByteBuffer} to {@link java.io.DataOutput}.
*/
def writeByteBuffer(bb: ByteBuffer, out: ObjectOutput) = {
if (bb.hasArray) {
out.write(bb.array(), bb.arrayOffset() + bb.position(), bb.remaining())
} else {
val bbval = new Array[Byte](bb.remaining())
bb.get(bbval)
out.write(bbval)
}
}
def isAlpha(c: Char): Boolean = {
(c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z')
}
/** Split a string into words at non-alphabetic characters */
def splitWords(s: String): Seq[String] = {
val buf = new ArrayBuffer[String]
var i = 0
while (i < s.length) {
var j = i
while (j < s.length && isAlpha(s.charAt(j))) {
j += 1
}
if (j > i) {
buf += s.substring(i, j)
}
i = j
while (i < s.length && !isAlpha(s.charAt(i))) {
i += 1
}
}
buf
}
private val shutdownDeletePaths = new scala.collection.mutable.HashSet[String]()
// Register the path to be deleted via shutdown hook
def registerShutdownDeleteDir(file: File) {
val absolutePath = file.getAbsolutePath()
shutdownDeletePaths.synchronized {
shutdownDeletePaths += absolutePath
}
}
// Is the path already registered to be deleted via a shutdown hook ?
def hasShutdownDeleteDir(file: File): Boolean = {
val absolutePath = file.getAbsolutePath()
shutdownDeletePaths.synchronized {
shutdownDeletePaths.contains(absolutePath)
}
}
// Note: if file is child of some registered path, while not equal to it, then return true;
// else false. This is to ensure that two shutdown hooks do not try to delete each others
// paths - resulting in IOException and incomplete cleanup.
def hasRootAsShutdownDeleteDir(file: File): Boolean = {
val absolutePath = file.getAbsolutePath()
val retval = shutdownDeletePaths.synchronized {
shutdownDeletePaths.find { path =>
!absolutePath.equals(path) && absolutePath.startsWith(path)
}.isDefined
}
if (retval) {
logInfo("path = " + file + ", already present as root for deletion.")
}
retval
}
/** Create a temporary directory inside the given parent directory */
def createTempDir(root: String = System.getProperty("java.io.tmpdir")): File = {
var attempts = 0
val maxAttempts = 10
var dir: File = null
while (dir == null) {
attempts += 1
if (attempts > maxAttempts) {
throw new IOException("Failed to create a temp directory (under " + root + ") after " +
maxAttempts + " attempts!")
}
try {
dir = new File(root, "spark-" + UUID.randomUUID.toString)
if (dir.exists() || !dir.mkdirs()) {
dir = null
}
} catch { case e: IOException => ; }
}
registerShutdownDeleteDir(dir)
// Add a shutdown hook to delete the temp dir when the JVM exits
Runtime.getRuntime.addShutdownHook(new Thread("delete Spark temp dir " + dir) {
override def run() {
// Attempt to delete if some patch which is parent of this is not already registered.
if (! hasRootAsShutdownDeleteDir(dir)) Utils.deleteRecursively(dir)
}
})
dir
}
/** Copy all data from an InputStream to an OutputStream */
def copyStream(in: InputStream,
out: OutputStream,
closeStreams: Boolean = false)
{
val buf = new Array[Byte](8192)
var n = 0
while (n != -1) {
n = in.read(buf)
if (n != -1) {
out.write(buf, 0, n)
}
}
if (closeStreams) {
in.close()
out.close()
}
}
/**
* Download a file requested by the executor. Supports fetching the file in a variety of ways,
* including HTTP, HDFS and files on a standard filesystem, based on the URL parameter.
*
* Throws SparkException if the target file already exists and has different contents than
* the requested file.
*/
def fetchFile(url: String, targetDir: File, conf: SparkConf) {
val filename = url.split("/").last
val tempDir = getLocalDir(conf)
val tempFile = File.createTempFile("fetchFileTemp", null, new File(tempDir))
val targetFile = new File(targetDir, filename)
val uri = new URI(url)
val fileOverwrite = conf.getBoolean("spark.files.overwrite", false)
uri.getScheme match {
case "http" | "https" | "ftp" =>
logInfo("Fetching " + url + " to " + tempFile)
val in = new URL(url).openStream()
val out = new FileOutputStream(tempFile)
Utils.copyStream(in, out, true)
if (targetFile.exists && !Files.equal(tempFile, targetFile)) {
if (fileOverwrite) {
targetFile.delete()
logInfo(("File %s exists and does not match contents of %s, " +
"replacing it with %s").format(targetFile, url, url))
} else {
tempFile.delete()
throw new SparkException(
"File " + targetFile + " exists and does not match contents of" + " " + url)
}
}
Files.move(tempFile, targetFile)
case "file" | null =>
// In the case of a local file, copy the local file to the target directory.
// Note the difference between uri vs url.
val sourceFile = if (uri.isAbsolute) new File(uri) else new File(url)
var shouldCopy = true
if (targetFile.exists) {
if (!Files.equal(sourceFile, targetFile)) {
if (fileOverwrite) {
targetFile.delete()
logInfo(("File %s exists and does not match contents of %s, " +
"replacing it with %s").format(targetFile, url, url))
} else {
throw new SparkException(
"File " + targetFile + " exists and does not match contents of" + " " + url)
}
} else {
// Do nothing if the file contents are the same, i.e. this file has been copied
// previously.
logInfo(sourceFile.getAbsolutePath + " has been previously copied to "
+ targetFile.getAbsolutePath)
shouldCopy = false
}
}
if (shouldCopy) {
// The file does not exist in the target directory. Copy it there.
logInfo("Copying " + sourceFile.getAbsolutePath + " to " + targetFile.getAbsolutePath)
Files.copy(sourceFile, targetFile)
}
case _ =>
// Use the Hadoop filesystem library, which supports file://, hdfs://, s3://, and others
val conf = SparkHadoopUtil.get.newConfiguration()
val fs = FileSystem.get(uri, conf)
val in = fs.open(new Path(uri))
val out = new FileOutputStream(tempFile)
Utils.copyStream(in, out, true)
if (targetFile.exists && !Files.equal(tempFile, targetFile)) {
if (fileOverwrite) {
targetFile.delete()
logInfo(("File %s exists and does not match contents of %s, " +
"replacing it with %s").format(targetFile, url, url))
} else {
tempFile.delete()
throw new SparkException(
"File " + targetFile + " exists and does not match contents of" + " " + url)
}
}
Files.move(tempFile, targetFile)
}
// Decompress the file if it's a .tar or .tar.gz
if (filename.endsWith(".tar.gz") || filename.endsWith(".tgz")) {
logInfo("Untarring " + filename)
Utils.execute(Seq("tar", "-xzf", filename), targetDir)
} else if (filename.endsWith(".tar")) {
logInfo("Untarring " + filename)
Utils.execute(Seq("tar", "-xf", filename), targetDir)
}
// Make the file executable - That's necessary for scripts
FileUtil.chmod(targetFile.getAbsolutePath, "a+x")
}
/**
* Get a temporary directory using Spark's spark.local.dir property, if set. This will always
* return a single directory, even though the spark.local.dir property might be a list of
* multiple paths.
*/
def getLocalDir(conf: SparkConf): String = {
conf.get("spark.local.dir", System.getProperty("java.io.tmpdir")).split(',')(0)
}
/**
* Shuffle the elements of a collection into a random order, returning the
* result in a new collection. Unlike scala.util.Random.shuffle, this method
* uses a local random number generator, avoiding inter-thread contention.
*/
def randomize[T: ClassTag](seq: TraversableOnce[T]): Seq[T] = {
randomizeInPlace(seq.toArray)
}
/**
* Shuffle the elements of an array into a random order, modifying the
* original array. Returns the original array.
*/
def randomizeInPlace[T](arr: Array[T], rand: Random = new Random): Array[T] = {
for (i <- (arr.length - 1) to 1 by -1) {
val j = rand.nextInt(i)
val tmp = arr(j)
arr(j) = arr(i)
arr(i) = tmp
}
arr
}
/**
* Get the local host's IP address in dotted-quad format (e.g. 1.2.3.4).
* Note, this is typically not used from within core spark.
*/
lazy val localIpAddress: String = findLocalIpAddress()
lazy val localIpAddressHostname: String = getAddressHostName(localIpAddress)
private def findLocalIpAddress(): String = {
val defaultIpOverride = System.getenv("SPARK_LOCAL_IP")
if (defaultIpOverride != null) {
defaultIpOverride
} else {
val address = InetAddress.getLocalHost
if (address.isLoopbackAddress) {
// Address resolves to something like 127.0.1.1, which happens on Debian; try to find
// a better address using the local network interfaces
for (ni <- NetworkInterface.getNetworkInterfaces) {
for (addr <- ni.getInetAddresses if !addr.isLinkLocalAddress &&
!addr.isLoopbackAddress && addr.isInstanceOf[Inet4Address]) {
// We've found an address that looks reasonable!
logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" +
" a loopback address: " + address.getHostAddress + "; using " + addr.getHostAddress +
" instead (on interface " + ni.getName + ")")
logWarning("Set SPARK_LOCAL_IP if you need to bind to another address")
return addr.getHostAddress
}
}
logWarning("Your hostname, " + InetAddress.getLocalHost.getHostName + " resolves to" +
" a loopback address: " + address.getHostAddress + ", but we couldn't find any" +
" external IP address!")
logWarning("Set SPARK_LOCAL_IP if you need to bind to another address")
}
address.getHostAddress
}
}
private var customHostname: Option[String] = None
/**
* Allow setting a custom host name because when we run on Mesos we need to use the same
* hostname it reports to the master.
*/
def setCustomHostname(hostname: String) {
// DEBUG code
Utils.checkHost(hostname)
customHostname = Some(hostname)
}
/**
* Get the local machine's hostname.
*/
def localHostName(): String = {
customHostname.getOrElse(localIpAddressHostname)
}
def getAddressHostName(address: String): String = {
InetAddress.getByName(address).getHostName
}
def checkHost(host: String, message: String = "") {
assert(host.indexOf(':') == -1, message)
}
def checkHostPort(hostPort: String, message: String = "") {
assert(hostPort.indexOf(':') != -1, message)
}
// Typically, this will be of order of number of nodes in cluster
// If not, we should change it to LRUCache or something.
private val hostPortParseResults = new ConcurrentHashMap[String, (String, Int)]()
def parseHostPort(hostPort: String): (String, Int) = {
{
// Check cache first.
val cached = hostPortParseResults.get(hostPort)
if (cached != null) return cached
}
val indx: Int = hostPort.lastIndexOf(':')
// This is potentially broken - when dealing with ipv6 addresses for example, sigh ...
// but then hadoop does not support ipv6 right now.
// For now, we assume that if port exists, then it is valid - not check if it is an int > 0
if (-1 == indx) {
val retval = (hostPort, 0)
hostPortParseResults.put(hostPort, retval)
return retval
}
val retval = (hostPort.substring(0, indx).trim(), hostPort.substring(indx + 1).trim().toInt)
hostPortParseResults.putIfAbsent(hostPort, retval)
hostPortParseResults.get(hostPort)
}
private val daemonThreadFactoryBuilder: ThreadFactoryBuilder =
new ThreadFactoryBuilder().setDaemon(true)
/**
* Wrapper over newCachedThreadPool. Thread names are formatted as prefix-ID, where ID is a
* unique, sequentially assigned integer.
*/
def newDaemonCachedThreadPool(prefix: String): ThreadPoolExecutor = {
val threadFactory = daemonThreadFactoryBuilder.setNameFormat(prefix + "-%d").build()
Executors.newCachedThreadPool(threadFactory).asInstanceOf[ThreadPoolExecutor]
}
/**
* Return the string to tell how long has passed in seconds. The passing parameter should be in
* millisecond.
*/
def getUsedTimeMs(startTimeMs: Long): String = {
return " " + (System.currentTimeMillis - startTimeMs) + " ms"
}
/**
* Wrapper over newFixedThreadPool. Thread names are formatted as prefix-ID, where ID is a
* unique, sequentially assigned integer.
*/
def newDaemonFixedThreadPool(nThreads: Int, prefix: String): ThreadPoolExecutor = {
val threadFactory = daemonThreadFactoryBuilder.setNameFormat(prefix + "-%d").build()
Executors.newFixedThreadPool(nThreads, threadFactory).asInstanceOf[ThreadPoolExecutor]
}
private def listFilesSafely(file: File): Seq[File] = {
val files = file.listFiles()
if (files == null) {
throw new IOException("Failed to list files for dir: " + file)
}
files
}
/**
* Delete a file or directory and its contents recursively.
*/
def deleteRecursively(file: File) {
if (file.isDirectory) {
for (child <- listFilesSafely(file)) {
deleteRecursively(child)
}
}
if (!file.delete()) {
throw new IOException("Failed to delete: " + file)
}
}
/**
* Convert a Java memory parameter passed to -Xmx (such as 300m or 1g) to a number of megabytes.
* This is used to figure out how much memory to claim from Mesos based on the SPARK_MEM
* environment variable.
*/
def memoryStringToMb(str: String): Int = {
val lower = str.toLowerCase
if (lower.endsWith("k")) {
(lower.substring(0, lower.length-1).toLong / 1024).toInt
} else if (lower.endsWith("m")) {
lower.substring(0, lower.length-1).toInt
} else if (lower.endsWith("g")) {
lower.substring(0, lower.length-1).toInt * 1024
} else if (lower.endsWith("t")) {
lower.substring(0, lower.length-1).toInt * 1024 * 1024
} else {// no suffix, so it's just a number in bytes
(lower.toLong / 1024 / 1024).toInt
}
}
/**
* Convert a quantity in bytes to a human-readable string such as "4.0 MB".
*/
def bytesToString(size: Long): String = {
val TB = 1L << 40
val GB = 1L << 30
val MB = 1L << 20
val KB = 1L << 10
val (value, unit) = {
if (size >= 2*TB) {
(size.asInstanceOf[Double] / TB, "TB")
} else if (size >= 2*GB) {
(size.asInstanceOf[Double] / GB, "GB")
} else if (size >= 2*MB) {
(size.asInstanceOf[Double] / MB, "MB")
} else if (size >= 2*KB) {
(size.asInstanceOf[Double] / KB, "KB")
} else {
(size.asInstanceOf[Double], "B")
}
}
"%.1f %s".formatLocal(Locale.US, value, unit)
}
/**
* Returns a human-readable string representing a duration such as "35ms"
*/
def msDurationToString(ms: Long): String = {
val second = 1000
val minute = 60 * second
val hour = 60 * minute
ms match {
case t if t < second =>
"%d ms".format(t)
case t if t < minute =>
"%.1f s".format(t.toFloat / second)
case t if t < hour =>
"%.1f m".format(t.toFloat / minute)
case t =>
"%.2f h".format(t.toFloat / hour)
}
}
/**
* Convert a quantity in megabytes to a human-readable string such as "4.0 MB".
*/
def megabytesToString(megabytes: Long): String = {
bytesToString(megabytes * 1024L * 1024L)
}
/**
* Execute a command in the given working directory, throwing an exception if it completes
* with an exit code other than 0.
*/
def execute(command: Seq[String], workingDir: File) {
val process = new ProcessBuilder(command: _*)
.directory(workingDir)
.redirectErrorStream(true)
.start()
new Thread("read stdout for " + command(0)) {
override def run() {
for (line <- Source.fromInputStream(process.getInputStream).getLines) {
System.err.println(line)
}
}
}.start()
val exitCode = process.waitFor()
if (exitCode != 0) {
throw new SparkException("Process " + command + " exited with code " + exitCode)
}
}
/**
* Execute a command in the current working directory, throwing an exception if it completes
* with an exit code other than 0.
*/
def execute(command: Seq[String]) {
execute(command, new File("."))
}
/**
* Execute a command and get its output, throwing an exception if it yields a code other than 0.
*/
def executeAndGetOutput(command: Seq[String], workingDir: File = new File("."),
extraEnvironment: Map[String, String] = Map.empty): String = {
val builder = new ProcessBuilder(command: _*)
.directory(workingDir)
val environment = builder.environment()
for ((key, value) <- extraEnvironment) {
environment.put(key, value)
}
val process = builder.start()
new Thread("read stderr for " + command(0)) {
override def run() {
for (line <- Source.fromInputStream(process.getErrorStream).getLines) {
System.err.println(line)
}
}
}.start()
val output = new StringBuffer
val stdoutThread = new Thread("read stdout for " + command(0)) {
override def run() {
for (line <- Source.fromInputStream(process.getInputStream).getLines) {
output.append(line)
}
}
}
stdoutThread.start()
val exitCode = process.waitFor()
stdoutThread.join() // Wait for it to finish reading output
if (exitCode != 0) {
throw new SparkException("Process " + command + " exited with code " + exitCode)
}
output.toString
}
/**
* A regular expression to match classes of the "core" Spark API that we want to skip when
* finding the call site of a method.
*/
private val SPARK_CLASS_REGEX = """^org\.apache\.spark(\.api\.java)?(\.util)?(\.rdd)?\.[A-Z]""".r
private[spark] class CallSiteInfo(val lastSparkMethod: String, val firstUserFile: String,
val firstUserLine: Int, val firstUserClass: String)
/**
* When called inside a class in the spark package, returns the name of the user code class
* (outside the spark package) that called into Spark, as well as which Spark method they called.
* This is used, for example, to tell users where in their code each RDD got created.
*/
def getCallSiteInfo: CallSiteInfo = {
val trace = Thread.currentThread.getStackTrace().filter( el =>
(!el.getMethodName.contains("getStackTrace")))
// Keep crawling up the stack trace until we find the first function not inside of the spark
// package. We track the last (shallowest) contiguous Spark method. This might be an RDD
// transformation, a SparkContext function (such as parallelize), or anything else that leads
// to instantiation of an RDD. We also track the first (deepest) user method, file, and line.
var lastSparkMethod = "<unknown>"
var firstUserFile = "<unknown>"
var firstUserLine = 0
var finished = false
var firstUserClass = "<unknown>"
for (el <- trace) {
if (!finished) {
if (SPARK_CLASS_REGEX.findFirstIn(el.getClassName).isDefined) {
lastSparkMethod = if (el.getMethodName == "<init>") {
// Spark method is a constructor; get its class name
el.getClassName.substring(el.getClassName.lastIndexOf('.') + 1)
} else {
el.getMethodName
}
}
else {
firstUserLine = el.getLineNumber
firstUserFile = el.getFileName
firstUserClass = el.getClassName
finished = true
}
}
}
new CallSiteInfo(lastSparkMethod, firstUserFile, firstUserLine, firstUserClass)
}
def formatSparkCallSite = {
val callSiteInfo = getCallSiteInfo
"%s at %s:%s".format(callSiteInfo.lastSparkMethod, callSiteInfo.firstUserFile,
callSiteInfo.firstUserLine)
}
/** Return a string containing part of a file from byte 'start' to 'end'. */
def offsetBytes(path: String, start: Long, end: Long): String = {
val file = new File(path)
val length = file.length()
val effectiveEnd = math.min(length, end)
val effectiveStart = math.max(0, start)
val buff = new Array[Byte]((effectiveEnd-effectiveStart).toInt)
val stream = new FileInputStream(file)
stream.skip(effectiveStart)
stream.read(buff)
stream.close()
Source.fromBytes(buff).mkString
}
/**
* Clone an object using a Spark serializer.
*/
def clone[T](value: T, serializer: SerializerInstance): T = {
serializer.deserialize[T](serializer.serialize(value))
}
/**
* Detect whether this thread might be executing a shutdown hook. Will always return true if
* the current thread is a running a shutdown hook but may spuriously return true otherwise (e.g.
* if System.exit was just called by a concurrent thread).
*
* Currently, this detects whether the JVM is shutting down by Runtime#addShutdownHook throwing
* an IllegalStateException.
*/
def inShutdown(): Boolean = {
try {
val hook = new Thread {
override def run() {}
}
Runtime.getRuntime.addShutdownHook(hook)
Runtime.getRuntime.removeShutdownHook(hook)
} catch {
case ise: IllegalStateException => return true
}
false
}
def isSpace(c: Char): Boolean = {
" \t\r\n".indexOf(c) != -1
}
/**
* Split a string of potentially quoted arguments from the command line the way that a shell
* would do it to determine arguments to a command. For example, if the string is 'a "b c" d',
* then it would be parsed as three arguments: 'a', 'b c' and 'd'.
*/
def splitCommandString(s: String): Seq[String] = {
val buf = new ArrayBuffer[String]
var inWord = false
var inSingleQuote = false
var inDoubleQuote = false
val curWord = new StringBuilder
def endWord() {
buf += curWord.toString
curWord.clear()
}
var i = 0
while (i < s.length) {
var nextChar = s.charAt(i)
if (inDoubleQuote) {
if (nextChar == '"') {
inDoubleQuote = false
} else if (nextChar == '\\') {
if (i < s.length - 1) {
// Append the next character directly, because only " and \ may be escaped in
// double quotes after the shell's own expansion
curWord.append(s.charAt(i + 1))
i += 1
}
} else {
curWord.append(nextChar)
}
} else if (inSingleQuote) {
if (nextChar == '\'') {
inSingleQuote = false
} else {
curWord.append(nextChar)
}
// Backslashes are not treated specially in single quotes
} else if (nextChar == '"') {
inWord = true
inDoubleQuote = true
} else if (nextChar == '\'') {
inWord = true
inSingleQuote = true
} else if (!isSpace(nextChar)) {
curWord.append(nextChar)
inWord = true
} else if (inWord && isSpace(nextChar)) {
endWord()
inWord = false
}
i += 1
}
if (inWord || inDoubleQuote || inSingleQuote) {
endWord()
}
buf
}
/* Calculates 'x' modulo 'mod', takes to consideration sign of x,
* i.e. if 'x' is negative, than 'x' % 'mod' is negative too
* so function return (x % mod) + mod in that case.
*/
def nonNegativeMod(x: Int, mod: Int): Int = {
val rawMod = x % mod
rawMod + (if (rawMod < 0) mod else 0)
}
// Handles idiosyncracies with hash (add more as required)
def nonNegativeHash(obj: AnyRef): Int = {
// Required ?
if (obj eq null) return 0
val hash = obj.hashCode
// math.abs fails for Int.MinValue
val hashAbs = if (Int.MinValue != hash) math.abs(hash) else 0
// Nothing else to guard against ?
hashAbs
}
/** Returns a copy of the system properties that is thread-safe to iterator over. */
def getSystemProperties(): Map[String, String] = {
System.getProperties.clone().asInstanceOf[java.util.Properties].toMap[String, String]
}
/**
* Method executed for repeating a task for side effects.
* Unlike a for comprehension, it permits JVM JIT optimization
*/
def times(numIters: Int)(f: => Unit): Unit = {
var i = 0
while (i < numIters) {
f
i += 1
}
}
/**
* Timing method based on iterations that permit JVM JIT optimization.
* @param numIters number of iterations
* @param f function to be executed
*/
def timeIt(numIters: Int)(f: => Unit): Long = {
val start = System.currentTimeMillis
times(numIters)(f)
System.currentTimeMillis - start
}
/**
* Counts the number of elements of an iterator using a while loop rather than calling
* [[scala.collection.Iterator#size]] because it uses a for loop, which is slightly slower
* in the current version of Scala.
*/
def getIteratorSize[T](iterator: Iterator[T]): Long = {
var count = 0L
while (iterator.hasNext) {
count += 1L
iterator.next()
}
count
}
}
| sryza/spark | core/src/main/scala/org/apache/spark/util/Utils.scala | Scala | apache-2.0 | 30,085 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.execution
import java.io.{DataInput, DataOutput, File, PrintWriter}
import java.util.{ArrayList, Arrays, Properties}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hive.ql.exec.UDF
import org.apache.hadoop.hive.ql.udf.{UDAFPercentile, UDFType}
import org.apache.hadoop.hive.ql.udf.generic._
import org.apache.hadoop.hive.ql.udf.generic.GenericUDF.DeferredObject
import org.apache.hadoop.hive.serde2.{AbstractSerDe, SerDeStats}
import org.apache.hadoop.hive.serde2.objectinspector.{ObjectInspector, ObjectInspectorFactory}
import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory
import org.apache.hadoop.io.{LongWritable, Writable}
import org.apache.spark.sql.{AnalysisException, QueryTest, Row}
import org.apache.spark.sql.catalyst.plans.logical.Project
import org.apache.spark.sql.execution.command.FunctionsCommand
import org.apache.spark.sql.functions.max
import org.apache.spark.sql.hive.test.TestHiveSingleton
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.test.SQLTestUtils
import org.apache.spark.util.Utils
case class Fields(f1: Int, f2: Int, f3: Int, f4: Int, f5: Int)
// Case classes for the custom UDF's.
case class IntegerCaseClass(i: Int)
case class ListListIntCaseClass(lli: Seq[(Int, Int, Int)])
case class StringCaseClass(s: String)
case class ListStringCaseClass(l: Seq[String])
/**
* A test suite for Hive custom UDFs.
*/
class HiveUDFSuite extends QueryTest with TestHiveSingleton with SQLTestUtils {
import spark.udf
import spark.implicits._
test("spark sql udf test that returns a struct") {
udf.register("getStruct", (_: Int) => Fields(1, 2, 3, 4, 5))
assert(sql(
"""
|SELECT getStruct(1).f1,
| getStruct(1).f2,
| getStruct(1).f3,
| getStruct(1).f4,
| getStruct(1).f5 FROM src LIMIT 1
""".stripMargin).head() === Row(1, 2, 3, 4, 5))
}
test("SPARK-4785 When called with arguments referring column fields, PMOD throws NPE") {
checkAnswer(
sql("SELECT PMOD(CAST(key as INT), 10) FROM src LIMIT 1"),
Row(8)
)
}
test("hive struct udf") {
withTable("hiveUDFTestTable") {
sql(
"""
|CREATE TABLE hiveUDFTestTable (
| pair STRUCT<id: INT, value: INT>
|)
|PARTITIONED BY (partition STRING)
|ROW FORMAT SERDE '%s'
|STORED AS SEQUENCEFILE
""".
stripMargin.format(classOf[PairSerDe].getName))
val location = Utils.getSparkClassLoader.getResource("data/files/testUDF").getFile
sql(s"""
ALTER TABLE hiveUDFTestTable
ADD IF NOT EXISTS PARTITION(partition='testUDF')
LOCATION '$location'""")
sql(s"CREATE TEMPORARY FUNCTION testUDF AS '${classOf[PairUDF].getName}'")
sql("SELECT testUDF(pair) FROM hiveUDFTestTable")
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDF")
}
}
test("Max/Min on named_struct") {
checkAnswer(sql(
"""
|SELECT max(named_struct(
| "key", key,
| "value", value)).value FROM src
""".stripMargin), Seq(Row("val_498")))
checkAnswer(sql(
"""
|SELECT min(named_struct(
| "key", key,
| "value", value)).value FROM src
""".stripMargin), Seq(Row("val_0")))
// nested struct cases
checkAnswer(sql(
"""
|SELECT max(named_struct(
| "key", named_struct(
"key", key,
"value", value),
| "value", value)).value FROM src
""".stripMargin), Seq(Row("val_498")))
checkAnswer(sql(
"""
|SELECT min(named_struct(
| "key", named_struct(
"key", key,
"value", value),
| "value", value)).value FROM src
""".stripMargin), Seq(Row("val_0")))
}
test("SPARK-6409 UDAF Average test") {
sql(s"CREATE TEMPORARY FUNCTION test_avg AS '${classOf[GenericUDAFAverage].getName}'")
checkAnswer(
sql("SELECT test_avg(1), test_avg(substr(value,5)) FROM src"),
Seq(Row(1.0, 260.182)))
sql("DROP TEMPORARY FUNCTION IF EXISTS test_avg")
hiveContext.reset()
}
test("SPARK-2693 udaf aggregates test") {
checkAnswer(sql("SELECT percentile(key, 1) FROM src LIMIT 1"),
sql("SELECT max(key) FROM src").collect().toSeq)
checkAnswer(sql("SELECT percentile(key, array(1, 1)) FROM src LIMIT 1"),
sql("SELECT array(max(key), max(key)) FROM src").collect().toSeq)
}
test("Generic UDAF aggregates") {
checkAnswer(sql(
"""
|SELECT percentile_approx(2, 0.99999),
| sum(distinct 1),
| count(distinct 1,2,3,4) FROM src LIMIT 1
""".stripMargin), sql("SELECT 2, 1, 1 FROM src LIMIT 1").collect().toSeq)
checkAnswer(sql(
"""
|SELECT ceiling(percentile_approx(distinct key, 0.99999)),
| count(distinct key),
| sum(distinct key),
| count(distinct 1),
| sum(distinct 1),
| sum(1) FROM src LIMIT 1
""".stripMargin),
sql(
"""
|SELECT max(key),
| count(distinct key),
| sum(distinct key),
| 1, 1, sum(1) FROM src LIMIT 1
""".stripMargin).collect().toSeq)
checkAnswer(sql(
"""
|SELECT ceiling(percentile_approx(distinct key, 0.9 + 0.09999)),
| count(distinct key), sum(distinct key),
| count(distinct 1), sum(distinct 1),
| sum(1) FROM src LIMIT 1
""".stripMargin),
sql("SELECT max(key), count(distinct key), sum(distinct key), 1, 1, sum(1) FROM src LIMIT 1")
.collect().toSeq)
checkAnswer(sql("SELECT ceiling(percentile_approx(key, 0.99999D)) FROM src LIMIT 1"),
sql("SELECT max(key) FROM src LIMIT 1").collect().toSeq)
checkAnswer(sql("SELECT percentile_approx(100.0D, array(0.9D, 0.9D)) FROM src LIMIT 1"),
sql("SELECT array(100, 100) FROM src LIMIT 1").collect().toSeq)
}
test("UDFIntegerToString") {
val testData = spark.sparkContext.parallelize(
IntegerCaseClass(1) :: IntegerCaseClass(2) :: Nil).toDF()
testData.createOrReplaceTempView("integerTable")
val udfName = classOf[UDFIntegerToString].getName
sql(s"CREATE TEMPORARY FUNCTION testUDFIntegerToString AS '$udfName'")
checkAnswer(
sql("SELECT testUDFIntegerToString(i) FROM integerTable"),
Seq(Row("1"), Row("2")))
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFIntegerToString")
hiveContext.reset()
}
test("UDFToListString") {
val testData = spark.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
testData.createOrReplaceTempView("inputTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFToListString AS '${classOf[UDFToListString].getName}'")
checkAnswer(
sql("SELECT testUDFToListString(s) FROM inputTable"),
Seq(Row(Seq("data1", "data2", "data3"))))
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFToListString")
hiveContext.reset()
}
test("UDFToListInt") {
val testData = spark.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
testData.createOrReplaceTempView("inputTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFToListInt AS '${classOf[UDFToListInt].getName}'")
checkAnswer(
sql("SELECT testUDFToListInt(s) FROM inputTable"),
Seq(Row(Seq(1, 2, 3))))
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFToListInt")
hiveContext.reset()
}
test("UDFToStringIntMap") {
val testData = spark.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
testData.createOrReplaceTempView("inputTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFToStringIntMap " +
s"AS '${classOf[UDFToStringIntMap].getName}'")
checkAnswer(
sql("SELECT testUDFToStringIntMap(s) FROM inputTable"),
Seq(Row(Map("key1" -> 1, "key2" -> 2, "key3" -> 3))))
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFToStringIntMap")
hiveContext.reset()
}
test("UDFToIntIntMap") {
val testData = spark.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
testData.createOrReplaceTempView("inputTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFToIntIntMap " +
s"AS '${classOf[UDFToIntIntMap].getName}'")
checkAnswer(
sql("SELECT testUDFToIntIntMap(s) FROM inputTable"),
Seq(Row(Map(1 -> 1, 2 -> 1, 3 -> 1))))
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFToIntIntMap")
hiveContext.reset()
}
test("UDFToListMapStringListInt") {
val testData = spark.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
testData.createOrReplaceTempView("inputTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFToListMapStringListInt " +
s"AS '${classOf[UDFToListMapStringListInt].getName}'")
checkAnswer(
sql("SELECT testUDFToListMapStringListInt(s) FROM inputTable"),
Seq(Row(Seq(Map("a" -> Seq(1, 2), "b" -> Seq(3, 4))))))
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFToListMapStringListInt")
hiveContext.reset()
}
test("UDFRawList") {
val testData = spark.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
testData.createOrReplaceTempView("inputTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFRawList " +
s"AS '${classOf[UDFRawList].getName}'")
val err = intercept[AnalysisException](sql("SELECT testUDFRawList(s) FROM inputTable"))
assert(err.getMessage.contains(
"Raw list type in java is unsupported because Spark cannot infer the element type."))
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFRawList")
hiveContext.reset()
}
test("UDFRawMap") {
val testData = spark.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
testData.createOrReplaceTempView("inputTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFRawMap " +
s"AS '${classOf[UDFRawMap].getName}'")
val err = intercept[AnalysisException](sql("SELECT testUDFRawMap(s) FROM inputTable"))
assert(err.getMessage.contains(
"Raw map type in java is unsupported because Spark cannot infer key and value types."))
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFRawMap")
hiveContext.reset()
}
test("UDFWildcardList") {
val testData = spark.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
testData.createOrReplaceTempView("inputTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFWildcardList " +
s"AS '${classOf[UDFWildcardList].getName}'")
val err = intercept[AnalysisException](sql("SELECT testUDFWildcardList(s) FROM inputTable"))
assert(err.getMessage.contains(
"Collection types with wildcards (e.g. List<?> or Map<?, ?>) are unsupported " +
"because Spark cannot infer the data type for these type parameters."))
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFWildcardList")
hiveContext.reset()
}
test("UDFListListInt") {
val testData = spark.sparkContext.parallelize(
ListListIntCaseClass(Nil) ::
ListListIntCaseClass(Seq((1, 2, 3))) ::
ListListIntCaseClass(Seq((4, 5, 6), (7, 8, 9))) :: Nil).toDF()
testData.createOrReplaceTempView("listListIntTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFListListInt AS '${classOf[UDFListListInt].getName}'")
checkAnswer(
sql("SELECT testUDFListListInt(lli) FROM listListIntTable"),
Seq(Row(0), Row(2), Row(13)))
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFListListInt")
hiveContext.reset()
}
test("UDFListString") {
val testData = spark.sparkContext.parallelize(
ListStringCaseClass(Seq("a", "b", "c")) ::
ListStringCaseClass(Seq("d", "e")) :: Nil).toDF()
testData.createOrReplaceTempView("listStringTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFListString AS '${classOf[UDFListString].getName}'")
checkAnswer(
sql("SELECT testUDFListString(l) FROM listStringTable"),
Seq(Row("a,b,c"), Row("d,e")))
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFListString")
hiveContext.reset()
}
test("UDFStringString") {
val testData = spark.sparkContext.parallelize(
StringCaseClass("world") :: StringCaseClass("goodbye") :: Nil).toDF()
testData.createOrReplaceTempView("stringTable")
sql(s"CREATE TEMPORARY FUNCTION testStringStringUDF AS '${classOf[UDFStringString].getName}'")
checkAnswer(
sql("SELECT testStringStringUDF(\"hello\", s) FROM stringTable"),
Seq(Row("hello world"), Row("hello goodbye")))
checkAnswer(
sql("SELECT testStringStringUDF(\"\", testStringStringUDF(\"hello\", s)) FROM stringTable"),
Seq(Row(" hello world"), Row(" hello goodbye")))
sql("DROP TEMPORARY FUNCTION IF EXISTS testStringStringUDF")
hiveContext.reset()
}
test("UDFTwoListList") {
val testData = spark.sparkContext.parallelize(
ListListIntCaseClass(Nil) ::
ListListIntCaseClass(Seq((1, 2, 3))) ::
ListListIntCaseClass(Seq((4, 5, 6), (7, 8, 9))) ::
Nil).toDF()
testData.createOrReplaceTempView("TwoListTable")
sql(s"CREATE TEMPORARY FUNCTION testUDFTwoListList AS '${classOf[UDFTwoListList].getName}'")
checkAnswer(
sql("SELECT testUDFTwoListList(lli, lli) FROM TwoListTable"),
Seq(Row("0, 0"), Row("2, 2"), Row("13, 13")))
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFTwoListList")
hiveContext.reset()
}
test("non-deterministic children of UDF") {
withUserDefinedFunction("testStringStringUDF" -> true, "testGenericUDFHash" -> true) {
// HiveSimpleUDF
sql(s"CREATE TEMPORARY FUNCTION testStringStringUDF AS '${classOf[UDFStringString].getName}'")
val df1 = sql("SELECT testStringStringUDF(rand(), \"hello\")")
assert(!df1.logicalPlan.asInstanceOf[Project].projectList.forall(_.deterministic))
// HiveGenericUDF
sql(s"CREATE TEMPORARY FUNCTION testGenericUDFHash AS '${classOf[GenericUDFHash].getName}'")
val df2 = sql("SELECT testGenericUDFHash(rand())")
assert(!df2.logicalPlan.asInstanceOf[Project].projectList.forall(_.deterministic))
}
}
test("Hive UDFs with insufficient number of input arguments should trigger an analysis error") {
withTempView("testUDF") {
Seq((1, 2)).toDF("a", "b").createOrReplaceTempView("testUDF")
def testErrorMsgForFunc(funcName: String, className: String): Unit = {
withUserDefinedFunction(funcName -> true) {
sql(s"CREATE TEMPORARY FUNCTION $funcName AS '$className'")
val message = intercept[AnalysisException] {
sql(s"SELECT $funcName() FROM testUDF")
}.getMessage
assert(message.contains(s"No handler for UDF/UDAF/UDTF '$className'"))
}
}
// HiveSimpleUDF
testErrorMsgForFunc("testUDFTwoListList", classOf[UDFTwoListList].getName)
// HiveGenericUDF
testErrorMsgForFunc("testUDFAnd", classOf[GenericUDFOPAnd].getName)
// Hive UDAF
testErrorMsgForFunc("testUDAFPercentile", classOf[UDAFPercentile].getName)
// AbstractGenericUDAFResolver
testErrorMsgForFunc("testUDAFAverage", classOf[GenericUDAFAverage].getName)
// AbstractGenericUDAFResolver
testErrorMsgForFunc("testUDTFExplode", classOf[GenericUDTFExplode].getName)
}
}
test("Hive UDF in group by") {
withTempView("tab1") {
Seq(Tuple1(1451400761)).toDF("test_date").createOrReplaceTempView("tab1")
sql(s"CREATE TEMPORARY FUNCTION testUDFToDate AS '${classOf[GenericUDFToDate].getName}'")
val count = sql("select testUDFToDate(timestamp_seconds(test_date))" +
" from tab1 group by testUDFToDate(timestamp_seconds(test_date))").count()
sql("DROP TEMPORARY FUNCTION IF EXISTS testUDFToDate")
assert(count == 1)
}
}
test("SPARK-11522 select input_file_name from non-parquet table") {
withTempDir { tempDir =>
// EXTERNAL OpenCSVSerde table pointing to LOCATION
val file1 = new File(tempDir + "/data1")
Utils.tryWithResource(new PrintWriter(file1)) { writer =>
writer.write("1,2")
}
val file2 = new File(tempDir + "/data2")
Utils.tryWithResource(new PrintWriter(file2)) { writer =>
writer.write("1,2")
}
sql(
s"""CREATE EXTERNAL TABLE csv_table(page_id INT, impressions INT)
ROW FORMAT SERDE 'org.apache.hadoop.hive.serde2.OpenCSVSerde'
WITH SERDEPROPERTIES (
\"separatorChar\" = \",\",
\"quoteChar\" = \"\\\"\",
\"escapeChar\" = \"\\\\\")
LOCATION '${tempDir.toURI}'
""")
val answer1 =
sql("SELECT input_file_name() FROM csv_table").head().getString(0)
assert(answer1.contains("data1") || answer1.contains("data2"))
val count1 = sql("SELECT input_file_name() FROM csv_table").distinct().count()
assert(count1 == 2)
sql("DROP TABLE csv_table")
// EXTERNAL pointing to LOCATION
sql(
s"""CREATE EXTERNAL TABLE external_t5 (c1 int, c2 int)
ROW FORMAT DELIMITED FIELDS TERMINATED BY ','
LOCATION '${tempDir.toURI}'
""")
val answer2 =
sql("SELECT input_file_name() as file FROM external_t5").head().getString(0)
assert(answer1.contains("data1") || answer1.contains("data2"))
val count2 = sql("SELECT input_file_name() as file FROM external_t5").distinct().count
assert(count2 == 2)
sql("DROP TABLE external_t5")
}
withTempDir { tempDir =>
// External parquet pointing to LOCATION
val parquetLocation = s"${tempDir.toURI}/external_parquet"
sql("SELECT 1, 2").write.parquet(parquetLocation)
sql(
s"""CREATE EXTERNAL TABLE external_parquet(c1 int, c2 int)
STORED AS PARQUET
LOCATION '$parquetLocation'
""")
val answer3 =
sql("SELECT input_file_name() as file FROM external_parquet").head().getString(0)
assert(answer3.contains("external_parquet"))
val count3 = sql("SELECT input_file_name() as file FROM external_parquet").distinct().count
assert(count3 == 1)
sql("DROP TABLE external_parquet")
}
// Non-External parquet pointing to /tmp/...
sql("CREATE TABLE parquet_tmp STORED AS parquet AS SELECT 1, 2")
val answer4 =
sql("SELECT input_file_name() as file FROM parquet_tmp").head().getString(0)
assert(answer4.contains("parquet_tmp"))
val count4 = sql("SELECT input_file_name() as file FROM parquet_tmp").distinct().count
assert(count4 == 1)
sql("DROP TABLE parquet_tmp")
}
test("Hive Stateful UDF") {
withUserDefinedFunction("statefulUDF" -> true, "statelessUDF" -> true) {
sql(s"CREATE TEMPORARY FUNCTION statefulUDF AS '${classOf[StatefulUDF].getName}'")
sql(s"CREATE TEMPORARY FUNCTION statelessUDF AS '${classOf[StatelessUDF].getName}'")
val testData = spark.range(10).repartition(1)
// Expected Max(s) is 10 as statefulUDF returns the sequence number starting from 1.
checkAnswer(testData.selectExpr("statefulUDF() as s").agg(max($"s")), Row(10))
// Expected Max(s) is 5 as statefulUDF returns the sequence number starting from 1,
// and the data is evenly distributed into 2 partitions.
checkAnswer(testData.repartition(2)
.selectExpr("statefulUDF() as s").agg(max($"s")), Row(5))
// Expected Max(s) is 1, as stateless UDF is deterministic and foldable and replaced
// by constant 1 by ConstantFolding optimizer.
checkAnswer(testData.selectExpr("statelessUDF() as s").agg(max($"s")), Row(1))
}
}
test("Show persistent functions") {
val testData = spark.sparkContext.parallelize(StringCaseClass("") :: Nil).toDF()
withTempView("inputTable") {
testData.createOrReplaceTempView("inputTable")
withUserDefinedFunction("testUDFToListInt" -> false) {
val numFunc = spark.catalog.listFunctions().count()
sql(s"CREATE FUNCTION testUDFToListInt AS '${classOf[UDFToListInt].getName}'")
assert(spark.catalog.listFunctions().count() == numFunc + 1)
checkAnswer(
sql("SELECT testUDFToListInt(s) FROM inputTable"),
Seq(Row(Seq(1, 2, 3))))
assert(sql("show functions").count() ==
numFunc + FunctionsCommand.virtualOperators.size + 1)
assert(spark.catalog.listFunctions().count() == numFunc + 1)
}
}
}
test("Temp function has dots in the names") {
withUserDefinedFunction("test_avg" -> false, "`default.test_avg`" -> true) {
sql(s"CREATE FUNCTION test_avg AS '${classOf[GenericUDAFAverage].getName}'")
checkAnswer(sql("SELECT test_avg(1)"), Row(1.0))
// temp function containing dots in the name
spark.udf.register("default.test_avg", () => { Math.random() + 2})
assert(sql("SELECT `default.test_avg`()").head().getDouble(0) >= 2.0)
checkAnswer(sql("SELECT test_avg(1)"), Row(1.0))
}
}
test("Call the function registered in the not-current database") {
Seq("true", "false").foreach { caseSensitive =>
withSQLConf(SQLConf.CASE_SENSITIVE.key -> caseSensitive) {
withDatabase("dAtABaSe1") {
sql("CREATE DATABASE dAtABaSe1")
withUserDefinedFunction("dAtABaSe1.test_avg" -> false) {
sql(s"CREATE FUNCTION dAtABaSe1.test_avg AS '${classOf[GenericUDAFAverage].getName}'")
checkAnswer(sql("SELECT dAtABaSe1.test_avg(1)"), Row(1.0))
}
val message = intercept[AnalysisException] {
sql("SELECT dAtABaSe1.unknownFunc(1)")
}.getMessage
assert(message.contains("Undefined function: 'unknownFunc'") &&
message.contains("nor a permanent function registered in the database 'dAtABaSe1'"))
}
}
}
}
test("UDTF") {
withUserDefinedFunction("udtf_count2" -> true) {
sql(s"ADD JAR ${hiveContext.getHiveFile("TestUDTF.jar").getCanonicalPath}")
// The function source code can be found at:
// https://cwiki.apache.org/confluence/display/Hive/DeveloperGuide+UDTF
sql(
"""
|CREATE TEMPORARY FUNCTION udtf_count2
|AS 'org.apache.spark.sql.hive.execution.GenericUDTFCount2'
""".stripMargin)
checkAnswer(
sql("SELECT key, cc FROM src LATERAL VIEW udtf_count2(value) dd AS cc"),
Row(97, 500) :: Row(97, 500) :: Nil)
checkAnswer(
sql("SELECT udtf_count2(a) FROM (SELECT 1 AS a FROM src LIMIT 3) t"),
Row(3) :: Row(3) :: Nil)
}
}
test("permanent UDTF") {
withUserDefinedFunction("udtf_count_temp" -> false) {
sql(
s"""
|CREATE FUNCTION udtf_count_temp
|AS 'org.apache.spark.sql.hive.execution.GenericUDTFCount2'
|USING JAR '${hiveContext.getHiveFile("TestUDTF.jar").toURI}'
""".stripMargin)
checkAnswer(
sql("SELECT key, cc FROM src LATERAL VIEW udtf_count_temp(value) dd AS cc"),
Row(97, 500) :: Row(97, 500) :: Nil)
checkAnswer(
sql("SELECT udtf_count_temp(a) FROM (SELECT 1 AS a FROM src LIMIT 3) t"),
Row(3) :: Row(3) :: Nil)
}
}
test("SPARK-25768 constant argument expecting Hive UDF") {
withTempView("inputTable") {
spark.range(10).createOrReplaceTempView("inputTable")
withUserDefinedFunction("testGenericUDAFPercentileApprox" -> false) {
val numFunc = spark.catalog.listFunctions().count()
sql(s"CREATE FUNCTION testGenericUDAFPercentileApprox AS '" +
s"${classOf[GenericUDAFPercentileApprox].getName}'")
checkAnswer(
sql("SELECT testGenericUDAFPercentileApprox(id, 0.5) FROM inputTable"),
Seq(Row(4.0)))
}
}
}
test("SPARK-28012 Hive UDF supports struct type foldable expression") {
withUserDefinedFunction("testUDFStructType" -> false) {
// Simulate a hive udf that supports struct parameters
sql("CREATE FUNCTION testUDFStructType AS '" +
s"${classOf[GenericUDFArray].getName}'")
checkAnswer(
sql("SELECT testUDFStructType(named_struct('name', 'xx', 'value', 1))[0].value"),
Seq(Row(1)))
}
}
test("SPARK-32877: add test for Hive UDF complex decimal type") {
withUserDefinedFunction("testArraySum" -> false) {
sql(s"CREATE FUNCTION testArraySum AS '${classOf[ArraySumUDF].getName}'")
checkAnswer(
sql("SELECT testArraySum(array(1, 1.1, 1.2))"),
Seq(Row(3.3)))
val msg = intercept[AnalysisException] {
sql("SELECT testArraySum(1)")
}.getMessage
assert(msg.contains(s"No handler for UDF/UDAF/UDTF '${classOf[ArraySumUDF].getName}'"))
val msg2 = intercept[AnalysisException] {
sql("SELECT testArraySum(1, 2)")
}.getMessage
assert(msg2.contains(s"No handler for UDF/UDAF/UDTF '${classOf[ArraySumUDF].getName}'"))
}
}
}
class TestPair(x: Int, y: Int) extends Writable with Serializable {
def this() = this(0, 0)
var entry: (Int, Int) = (x, y)
override def write(output: DataOutput): Unit = {
output.writeInt(entry._1)
output.writeInt(entry._2)
}
override def readFields(input: DataInput): Unit = {
val x = input.readInt()
val y = input.readInt()
entry = (x, y)
}
}
class PairSerDe extends AbstractSerDe {
override def initialize(p1: Configuration, p2: Properties): Unit = {}
override def getObjectInspector: ObjectInspector = {
ObjectInspectorFactory
.getStandardStructObjectInspector(
Arrays.asList("pair"),
Arrays.asList(ObjectInspectorFactory.getStandardStructObjectInspector(
Arrays.asList("id", "value"),
Arrays.asList(PrimitiveObjectInspectorFactory.javaIntObjectInspector,
PrimitiveObjectInspectorFactory.javaIntObjectInspector))
))
}
override def getSerializedClass: Class[_ <: Writable] = classOf[TestPair]
override def getSerDeStats: SerDeStats = null
override def serialize(p1: scala.Any, p2: ObjectInspector): Writable = null
override def deserialize(value: Writable): AnyRef = {
val pair = value.asInstanceOf[TestPair]
val row = new ArrayList[ArrayList[AnyRef]]
row.add(new ArrayList[AnyRef](2))
row.get(0).add(Integer.valueOf(pair.entry._1))
row.get(0).add(Integer.valueOf(pair.entry._2))
row
}
}
class PairUDF extends GenericUDF {
override def initialize(p1: Array[ObjectInspector]): ObjectInspector =
ObjectInspectorFactory.getStandardStructObjectInspector(
Arrays.asList("id", "value"),
Arrays.asList(PrimitiveObjectInspectorFactory.javaIntObjectInspector,
PrimitiveObjectInspectorFactory.javaIntObjectInspector)
)
override def evaluate(args: Array[DeferredObject]): AnyRef = {
Integer.valueOf(args(0).get.asInstanceOf[TestPair].entry._2)
}
override def getDisplayString(p1: Array[String]): String = ""
}
@UDFType(stateful = true)
class StatefulUDF extends UDF {
private val result = new LongWritable(0)
def evaluate(): LongWritable = {
result.set(result.get() + 1)
result
}
}
class StatelessUDF extends UDF {
private val result = new LongWritable(0)
def evaluate(): LongWritable = {
result.set(result.get() + 1)
result
}
}
class ArraySumUDF extends UDF {
import scala.collection.JavaConverters._
def evaluate(values: java.util.List[java.lang.Double]): java.lang.Double = {
var r = 0d
for (v <- values.asScala) {
r += v
}
r
}
}
| witgo/spark | sql/hive/src/test/scala/org/apache/spark/sql/hive/execution/HiveUDFSuite.scala | Scala | apache-2.0 | 28,344 |
package org.openapitools.server.model
/**
* @param `class` for example: ''null''
* @param links for example: ''null''
* @param classes for example: ''null''
*/
final case class ExtensionClassImpl (
`class`: Option[String],
links: Option[ExtensionClassImpllinks],
classes: Option[Seq[String]]
)
| cliffano/swaggy-jenkins | clients/scala-akka-http-server/generated/src/main/scala/org/openapitools/server/model/ExtensionClassImpl.scala | Scala | mit | 309 |
package gremlin.scala
import org.apache.commons.configuration2.Configuration
import org.apache.tinkerpop.gremlin.process.computer.GraphComputer
import org.apache.tinkerpop.gremlin.structure.Graph.Variables
import org.apache.tinkerpop.gremlin.structure.{T, Transaction}
import shapeless._
object ScalaGraph {
def apply(graph: Graph): ScalaGraph =
ScalaGraph(TraversalSource(graph))
}
case class ScalaGraph(traversalSource: TraversalSource) {
lazy val traversal = traversalSource
lazy val graph = traversalSource.graph
def configure(conf: TraversalSource => TraversalSource) =
ScalaGraph(conf(TraversalSource(graph)))
def addVertex(): Vertex =
traversalSource.underlying.addV().next
def addVertex(label: String): Vertex =
traversalSource.underlying.addV(label).next
def addVertex(properties: (String, Any)*): Vertex = {
val traversal = traversalSource.underlying.addV()
properties.foreach { case (key, value) => traversal.property(key, value) }
traversal.next
}
def addVertex(label: String, properties: (String, Any)*): Vertex = {
val traversal = traversalSource.underlying.addV(label)
properties.foreach { case (key, value) => traversal.property(key, value) }
traversal.next
}
def addVertex(label: String, properties: Map[String, Any]): Vertex =
addVertex(label, properties.toSeq: _*)
def addVertex(properties: Map[String, Any]): Vertex =
addVertex(properties.toSeq: _*)
/**
* Save an object's values as a new vertex
* Note: `@id` members cannot be set for all graphs (e.g. remote graphs), so it is ignored here generally
*/
def addVertex[CC <: Product: Marshallable](cc: CC): Vertex = {
val fromCC = implicitly[Marshallable[CC]].fromCC(cc)
addVertex(fromCC.label, fromCC.properties: _*)
}
def +[CC <: Product: Marshallable](cc: CC): Vertex = addVertex(cc)
def +(label: String): Vertex = addVertex(label)
def +(label: String, properties: KeyValue[_]*): Vertex =
addVertex(label, properties.map(v => (v.key.name, v.value)).toMap)
/** start a traversal with `addV` */
def addV(): GremlinScala.Aux[Vertex, HNil] =
traversalSource.addV()
/** start a traversal with `addV` */
def addV(label: String): GremlinScala.Aux[Vertex, HNil] =
traversalSource.addV(label)
/** start a traversal with `addV` */
def addE(label: String): GremlinScala.Aux[Edge, HNil] =
traversalSource.addE(label)
/** start a traversal with given `starts`` */
def inject[S](starts: S*): GremlinScala.Aux[S, HNil] =
traversalSource.inject(starts: _*)
/** start traversal with all vertices */
def V(): GremlinScala.Aux[Vertex, HNil] =
traversalSource.V()
/** start traversal with all edges */
def E(): GremlinScala.Aux[Edge, HNil] =
traversalSource.E()
/** start traversal with some vertices identified by given ids */
def V(vertexIds: Any*): GremlinScala.Aux[Vertex, HNil] =
traversalSource.V(vertexIds: _*)
/** start traversal with some edges identified by given ids */
def E(edgeIds: Any*): GremlinScala.Aux[Edge, HNil] =
traversalSource.E(edgeIds: _*)
def tx(): Transaction = graph.tx()
def variables(): Variables = graph.variables()
def configuration(): Configuration = graph.configuration()
def compute[C <: GraphComputer](graphComputerClass: Class[C]): C =
graph.compute(graphComputerClass)
def compute(): GraphComputer = graph.compute()
def close(): Unit = graph.close()
/* TODO: reimplement with createThreadedTx, if the underlying graph supports it */
// def transactional[R](work: Graph => R) = graph.tx.submit(work)
}
| mpollmeier/gremlin-scala | gremlin-scala/src/main/scala/gremlin/scala/ScalaGraph.scala | Scala | apache-2.0 | 3,612 |
/***********************************************************************
* Copyright (c) 2013-2019 Commonwealth Computer Research, Inc.
* All rights reserved. This program and the accompanying materials
* are made available under the terms of the Apache License, Version 2.0
* which accompanies this distribution and is available at
* http://www.opensource.org/licenses/apache2.0.php.
***********************************************************************/
package org.locationtech.geomesa.kudu.tools.stats
import com.beust.jcommander.Parameters
import org.locationtech.geomesa.kudu.data.KuduDataStore
import org.locationtech.geomesa.kudu.tools.KuduDataStoreCommand
import org.locationtech.geomesa.kudu.tools.KuduDataStoreCommand.KuduParams
import org.locationtech.geomesa.kudu.tools.stats.KuduStatsBoundsCommand.KuduStatsBoundsParams
import org.locationtech.geomesa.tools.stats.StatsBoundsCommand
import org.locationtech.geomesa.tools.stats.StatsBoundsCommand.StatsBoundsParams
class KuduStatsBoundsCommand extends StatsBoundsCommand[KuduDataStore] with KuduDataStoreCommand {
override val params = new KuduStatsBoundsParams
}
object KuduStatsBoundsCommand {
@Parameters(commandDescription = "View or calculate bounds on attributes in a GeoMesa feature type")
class KuduStatsBoundsParams extends StatsBoundsParams with KuduParams
}
| elahrvivaz/geomesa | geomesa-kudu/geomesa-kudu-tools/src/main/scala/org/locationtech/geomesa/kudu/tools/stats/KuduStatsBoundsCommand.scala | Scala | apache-2.0 | 1,350 |
/**
* Copyright (c) 2012 Alexey Aksenov [email protected]
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.digimead.digi.lib.ctrl.declaration
import org.digimead.digi.lib.log.Logging
import android.os.Parcelable
import android.os.Parcel
case class DConnection(
val connectionID: Int,
val processID: Int,
val key: Int) extends Parcelable {
var FD, localIP, localPort, remoteIP, remotePort, PID, UID, GID = -1
var timestamp: Long = -1
var cmd: String = ""
def this(in: Parcel) = this(connectionID = in.readInt,
processID = in.readInt,
key = in.readInt)
def writeToParcel(out: Parcel, flags: Int) {
if (DConnection.log.isTraceExtraEnabled)
DConnection.log.trace("writeToParcel DConnection with flags " + flags)
out.writeInt(connectionID)
out.writeInt(processID)
out.writeInt(key)
out.writeInt(FD)
out.writeInt(localIP)
out.writeInt(remoteIP)
out.writeInt(remotePort)
out.writeInt(PID)
out.writeInt(UID)
out.writeInt(GID)
out.writeLong(timestamp)
out.writeString(cmd)
}
def describeContents() = 0
}
object DConnection extends Logging {
final val CREATOR: Parcelable.Creator[DConnection] = new Parcelable.Creator[DConnection]() {
def createFromParcel(in: Parcel): DConnection = try {
if (log.isTraceExtraEnabled)
log.trace("createFromParcel new DConnection")
val obj = new DConnection(in)
obj.FD = in.readInt()
obj.localIP = in.readInt()
obj.remoteIP = in.readInt()
obj.remotePort = in.readInt()
obj.PID = in.readInt()
obj.UID = in.readInt()
obj.GID = in.readInt()
obj.timestamp = in.readLong()
obj.cmd = in.readString()
obj
} catch {
case e =>
log.error(e.getMessage, e)
null
}
def newArray(size: Int): Array[DConnection] = new Array[DConnection](size)
}
}
| ezh/digi-lib-ctrl | src/main/scala/org/digimead/digi/lib/ctrl/declaration/DConnection.scala | Scala | apache-2.0 | 2,389 |
package com.github.ponkin.bloom.server
import com.twitter.finagle.Thrift
class BloomServer(val conf: BloomConfig) {
import KryoSerializer._
val storeImpl = new BloomFilterStoreImpl(DiskStoreManager(conf.storage))
def run = Thrift
.server
.withLabel("BloomServer")
.serveIface(s"${conf.server.host}:${conf.server.port}", storeImpl)
}
object BloomServer {
def apply(conf: BloomConfig) = new BloomServer(conf)
}
| ponkin/bloom | server/src/main/scala/com/github/ponkin/bloom/server/BloomServer.scala | Scala | apache-2.0 | 435 |
/*
* Copyright (c) 2013 Christos KK Loverdos
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.ckkloverdos.thrift3r
package codec
package struct
import com.ckkloverdos.thrift3r.descriptor.{FieldInfo, StructDescriptor}
import com.ckkloverdos.thrift3r.protocol.Protocol
import com.ckkloverdos.thrift3r.protocol.helper.ProtocolHelpers
import com.google.common.reflect.TypeToken
/**
*
* @author Christos KK Loverdos <[email protected]>
*/
case class StructCodec[T <: AnyRef](
thrifter: Thrift3r,
descriptor: StructDescriptor[T]
) extends Codec[T] with CodecToString with UnsupportedDirectStringTransformations[T] {
final val fieldInfoByID = for((fieldId, fieldDescr) ← descriptor.fields) yield {
val fieldType = fieldDescr.jvmType
val fieldCodec = thrifter.codecOfType(fieldType).asInstanceOf[Codec[Any]]
(fieldId, FieldInfo(fieldDescr, fieldCodec))
}
final val fieldInfoByName = fieldInfoByID.map { case (id, fi) ⇒ (fi.name, fi) }
final val arity: Short = descriptor.arity
/**
* The supported [[com.ckkloverdos.thrift3r.BinReprType]].
*/
final def binReprType = BinReprType.STRUCT
final def typeToken = descriptor.typeToken.asInstanceOf[TypeToken[T]]
final def encode(protocol: Protocol, value: T) {
ProtocolHelpers.writeStruct(protocol, descriptor, fieldInfoByID, value)
}
final def decode(protocol: Protocol) = {
val params = new Array[AnyRef](arity)
ProtocolHelpers.readStruct(protocol, fieldInfoByID, fieldInfoByName, params)
val struct = descriptor.construct(params)
struct.asInstanceOf[T]
}
override protected def extraToStringElements = List(descriptor)
}
| loverdos/thrift3r | src/main/scala/com/ckkloverdos/thrift3r/codec/struct/StructCodec.scala | Scala | apache-2.0 | 2,172 |
/*
* Copyright (C) 2015 Jason Mar
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.jasonmar
class BigBitMap (_length: Long, initialValue: Byte = 0, _children: Array[BitMap] = null) extends BigCollection {
override val length = _length
override val children: Array[BitMap] = {
if (_children != null) {
// Allow construction from preallocated sub-arrays
if (_children.length == nChildren) {
verifyChildSizes(_children.map{_.length.toInt})
_children
} else {
throw new IllegalArgumentException
}
} else {
val a = new Array[BitMap](nChildren)
if (length <= childSize) {
// Only need one BitMap
a(0) = new BitMap(length.toInt)
} else {
for (i <- 0 to nChildren - 2) {
// Allocate full size BitMap
a(i) = new BitMap(childSize.toInt)
}
// Last BitMap may be smaller
a(nChildren - 1) = new BitMap((length % childSize).toInt)
}
a.toIndexedSeq
a
}
}
def get(i: Long): Boolean = {
val id0: Int = getChildId(i)
val id1: Long = getChildInternalId(i)
children(id0).get(id1)
}
def set(i: Long): Unit = {
val id0: Int = getChildId(i)
val id1: Long = getChildInternalId(i)
children(id0).set(id1)
}
def unset(i: Long): Unit = {
val id0: Int = getChildId(i)
val id1: Long = getChildInternalId(i)
children(id0).unset(id1)
}
def scard: Long = {
var bitMapId = 0
var c: Long = 0L
while (bitMapId < nChildren) {
c += children(bitMapId).scard
bitMapId += 1
}
c
}
def flip(): Unit = {
var bitMapId = 0
while (bitMapId < nChildren) {
children(bitMapId).flip()
bitMapId += 1
}
}
def clear(): Unit = {
var bitMapId = 0
while (bitMapId < nChildren) {
children(bitMapId).clear()
bitMapId += 1
}
}
def &= (x: BigBitMap): Unit = {
compareSize(x)
var i = 0
while (i < nChildren) {
children(i) &= x.children(i)
i += 1
}
}
def ^= (x: BigBitMap): Unit = {
compareSize(x)
var i = 0
while (i < nChildren) {
children(i) ^= x.children(i)
i += 1
}
}
def |= (x: BigBitMap): Unit = {
compareSize(x)
var i = 0
while (i < nChildren) {
children(i) |= x.children(i)
i += 1
}
}
def & (x: BigBitMap): BigBitMap = {
compareSize(x)
val b = clone
var i = 0
while (i < nChildren) {
b.children(i) &= x.children(i)
i += 1
}
b
}
def ^ (x: BigBitMap): BigBitMap = {
compareSize(x)
val b = clone
var i = 0
while (i < nChildren) {
b.children(i) ^= x.children(i)
i += 1
}
b
}
def | (x: BigBitMap): BigBitMap = {
compareSize(x)
val b = clone
var i = 0
while (i < nChildren) {
b.children(i) |= x.children(i)
i += 1
}
b
}
override def clone: BigBitMap = {
val a = new Array[BitMap](nChildren)
for (i <- a.indices) {
a(i) = children(i).clone
}
new BigBitMap(length, _children = a)
}
}
| jasonmar/scala-bigcollections | src/main/scala/com/jasonmar/BigBitMap.scala | Scala | apache-2.0 | 3,621 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.