code
stringlengths
5
1M
repo_name
stringlengths
5
109
path
stringlengths
6
208
language
stringclasses
1 value
license
stringclasses
15 values
size
int64
5
1M
/* * Copyright (c) 2014-2015 by its authors. Some rights reserved. * See the project homepage at: http://www.monifu.org * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package monifu.concurrent.atomic.padded import monifu.concurrent.atomic object Atomic { /** * Constructs an `Atomic[T]` reference. Based on the `initialValue`, it will return the best, most specific * type. E.g. you give it a number, it will return something inheriting from `AtomicNumber[T]`. That's why * it takes an `AtomicBuilder[T, R]` as an implicit parameter - but worry not about such details as it just works. * * @param initialValue is the initial value with which to initialize the Atomic reference * @param builder is the builder that helps us to build the best reference possible, based on our `initialValue` */ def apply[T, R <: atomic.Atomic[T]](initialValue: T)(implicit builder: AtomicBuilder[T, R]): R = builder.buildInstance(initialValue) /** * Returns the builder that would be chosen to construct Atomic references * for the given `initialValue`. */ def builderFor[T, R <: atomic.Atomic[T]](initialValue: T)(implicit builder: AtomicBuilder[T, R]): AtomicBuilder[T, R] = builder }
sergius/monifu
core/js/src/main/scala/monifu/concurrent/atomic/padded/Atomic.scala
Scala
apache-2.0
1,735
package scala.meta.trees import com.intellij.psi.{PsiElement, PsiFile} import org.jetbrains.plugins.scala.lang.psi.api.base._ import org.jetbrains.plugins.scala.lang.psi.api.base.patterns._ import org.jetbrains.plugins.scala.lang.psi.api.base.types._ import org.jetbrains.plugins.scala.lang.psi.api.expr._ import org.jetbrains.plugins.scala.lang.psi.api.expr.xml._ import org.jetbrains.plugins.scala.lang.psi.api.statements._ import org.jetbrains.plugins.scala.lang.psi.api.statements.params._ import org.jetbrains.plugins.scala.lang.psi.api.toplevel.imports._ import org.jetbrains.plugins.scala.lang.psi.api.toplevel.typedef._ import org.jetbrains.plugins.scala.lang.psi.api.{ScalaPsiElement, ScalaRecursiveElementVisitor} import org.jetbrains.plugins.scala.lang.scaladoc.psi.api.{ScDocComment, ScDocInlinedTag, ScDocSyntaxElement, ScDocTag} import scala.collection.immutable.Seq import scala.language.postfixOps import scala.meta.Tree import scala.{meta => m, Seq => _} trait TreeConverterBuilder { self: TreeConverter => def convert(elem:PsiElement): Tree = { var ret: m.Tree = null val v: ScalaRecursiveElementVisitor = new ScalaRecursiveElementVisitor { override def visitTypeAliasDefinition(alias: ScTypeAliasDefinition): Unit = super.visitTypeAliasDefinition(alias) override def visitElement(element: PsiElement): Unit = { super.visitElement(element) } override def visitTypeAlias(alias: ScTypeAlias): Unit = super.visitTypeAlias(alias) override def visitTypeAliasDeclaration(alias: ScTypeAliasDeclaration): Unit = super.visitTypeAliasDeclaration(alias) override def visitParameters(parameters: ScParameters): Unit = super.visitParameters(parameters) override def visitModifierList(modifierList: ScModifierList): Unit = super.visitModifierList(modifierList) override def visitConstructorInvocation(constrInvocation: ScConstructorInvocation): Unit = super.visitConstructorInvocation(constrInvocation) override def visitFunctionDefinition(fun: ScFunctionDefinition): Unit = { fun.body.get.accept(this) val body = ret.asInstanceOf[m.Term] ret = m.Defn.Def(convertMods(fun), toTermName(fun), fun.typeParameters.map(toTypeParams).toList, fun.paramClauses.clauses.map(convertParamClause).toList, fun.definedReturnType.toOption.map(toType(_)), body ) } override def visitFunctionDeclaration(fun: ScFunctionDeclaration): Unit = super.visitFunctionDeclaration(fun) override def visitMacroDefinition(fun: ScMacroDefinition): Unit = super.visitMacroDefinition(fun) override def visitCatchBlock(c: ScCatchBlock): Unit = super.visitCatchBlock(c) override def visitFile(file: PsiFile): Unit = super.visitFile(file) //Override also visitReferenceExpression! and visitTypeProjection! override def visitReference(ref: ScReference): Unit = super.visitReference(ref) override def visitParameter(parameter: ScParameter): Unit = super.visitParameter(parameter) override def visitClassParameter(parameter: ScClassParameter): Unit = super.visitClassParameter(parameter) override def visitPatternDefinition(pat: ScPatternDefinition): Unit = super.visitPatternDefinition(pat) override def visitValueDeclaration(v: ScValueDeclaration): Unit = super.visitValueDeclaration(v) override def visitVariableDefinition(varr: ScVariableDefinition): Unit = super.visitVariableDefinition(varr) override def visitVariableDeclaration(varr: ScVariableDeclaration): Unit = super.visitVariableDeclaration(varr) override def visitVariable(varr: ScVariable): Unit = super.visitVariable(varr) override def visitValue(v: ScValue): Unit = super.visitValue(v) override def visitCaseClause(cc: ScCaseClause): Unit = super.visitCaseClause(cc) override def visitPattern(pat: ScPattern): Unit = super.visitPattern(pat) override def visitForBinding(forBinding: ScForBinding): Unit = super.visitForBinding(forBinding) override def visitGenerator(gen: ScGenerator): Unit = super.visitGenerator(gen) override def visitGuard(guard: ScGuard): Unit = super.visitGuard(guard) override def visitFunction(fun: ScFunction): Unit = super.visitFunction(fun) override def visitTypeDefinition(typedef: ScTypeDefinition): Unit = super.visitTypeDefinition(typedef) override def visitImportExpr(expr: ScImportExpr): Unit = super.visitImportExpr(expr) override def visitSelfInvocation(self: ScSelfInvocation): Unit = super.visitSelfInvocation(self) override def visitAnnotation(annotation: ScAnnotation): Unit = super.visitAnnotation(annotation) // Expressions override def visitExpression(expr: ScExpression): Unit = super.visitExpression(expr) override def visitThisReference(t: ScThisReference): Unit = super.visitThisReference(t) override def visitSuperReference(t: ScSuperReference): Unit = super.visitSuperReference(t) override def visitPostfixExpression(p: ScPostfixExpr): Unit = super.visitPostfixExpression(p) override def visitPrefixExpression(p: ScPrefixExpr): Unit = super.visitPrefixExpression(p) override def visitIf(stmt: ScIf): Unit = super.visitIf(stmt) override def visitLiteral(l: ScLiteral): Unit = super.visitLiteral(l) override def visitAssignment(stmt: ScAssignment): Unit = super.visitAssignment(stmt) override def visitMethodCallExpression(call: ScMethodCall): Unit = super.visitMethodCallExpression(call) override def visitGenericCallExpression(call: ScGenericCall): Unit = super.visitGenericCallExpression(call) override def visitInfixExpression(infix: ScInfixExpr): Unit = super.visitInfixExpression(infix) override def visitWhile(ws: ScWhile): Unit = super.visitWhile(ws) override def visitReturn(ret: ScReturn): Unit = super.visitReturn(ret) override def visitMatch(ms: ScMatch): Unit = super.visitMatch(ms) override def visitFor(expr: ScFor): Unit = super.visitFor(expr) override def visitDo(stmt: ScDo): Unit = super.visitDo(stmt) override def visitFunctionExpression(stmt: ScFunctionExpr): Unit = super.visitFunctionExpression(stmt) override def visitThrow(throwStmt: ScThrow): Unit = super.visitThrow(throwStmt) override def visitTry(tryStmt: ScTry): Unit = super.visitTry(tryStmt) override def visitParenthesisedExpr(expr: ScParenthesisedExpr): Unit = super.visitParenthesisedExpr(expr) override def visitNewTemplateDefinition(templ: ScNewTemplateDefinition): Unit = super.visitNewTemplateDefinition(templ) override def visitTypedExpr(stmt: ScTypedExpression): Unit = super.visitTypedExpr(stmt) override def visitTuple(tuple: ScTuple): Unit = super.visitTuple(tuple) override def visitBlockExpression(block: ScBlockExpr): Unit = super.visitBlockExpression(block) override def visitUnderscoreExpression(under: ScUnderscoreSection): Unit = super.visitUnderscoreExpression(under) override def visitConstrBlockExpr(constr: ScConstrBlockExpr): Unit = super.visitConstrBlockExpr(constr) //type elements override def visitTypeElement(te: ScTypeElement): Unit = super.visitTypeElement(te) override def visitSimpleTypeElement(simple: ScSimpleTypeElement): Unit = super.visitSimpleTypeElement(simple) override def visitWildcardTypeElement(wildcard: ScWildcardTypeElement): Unit = super.visitWildcardTypeElement(wildcard) override def visitTupleTypeElement(tuple: ScTupleTypeElement): Unit = super.visitTupleTypeElement(tuple) override def visitParenthesisedTypeElement(parenthesised: ScParenthesisedTypeElement): Unit = super.visitParenthesisedTypeElement(parenthesised) override def visitParameterizedTypeElement(parameterized: ScParameterizedTypeElement): Unit = super.visitParameterizedTypeElement(parameterized) override def visitInfixTypeElement(infix: ScInfixTypeElement): Unit = super.visitInfixTypeElement(infix) override def visitFunctionalTypeElement(fun: ScFunctionalTypeElement): Unit = super.visitFunctionalTypeElement(fun) override def visitExistentialTypeElement(exist: ScExistentialTypeElement): Unit = super.visitExistentialTypeElement(exist) override def visitCompoundTypeElement(compound: ScCompoundTypeElement): Unit = super.visitCompoundTypeElement(compound) override def visitAnnotTypeElement(annot: ScAnnotTypeElement): Unit = super.visitAnnotTypeElement(annot) override def visitTypeVariableTypeElement(tvar: ScTypeVariableTypeElement): Unit = super.visitTypeVariableTypeElement(tvar) //scaladoc override def visitDocComment(s: ScDocComment): Unit = super.visitDocComment(s) override def visitScaladocElement(s: ScalaPsiElement): Unit = super.visitScaladocElement(s) override def visitWikiSyntax(s: ScDocSyntaxElement): Unit = super.visitWikiSyntax(s) override def visitInlinedTag(s: ScDocInlinedTag): Unit = super.visitInlinedTag(s) override def visitTag(s: ScDocTag): Unit = super.visitTag(s) //xml override def visitXmlStartTag(s: ScXmlStartTag): Unit = super.visitXmlStartTag(s) override def visitXmlEndTag(s: ScXmlEndTag): Unit = super.visitXmlEndTag(s) } elem.accept(v) ret } }
JetBrains/intellij-scala
scala/scala-impl/src/scala/meta/trees/TreeConverterBuilder.scala
Scala
apache-2.0
9,353
/* Copyright 2012 Twitter, Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package com.twitter.scalding import java.io.{ InputStream, OutputStream } import java.util.{ UUID, Properties } import cascading.scheme.Scheme import cascading.scheme.local.{ TextLine => CLTextLine, TextDelimited => CLTextDelimited } import cascading.scheme.hadoop.{ TextLine => CHTextLine, TextDelimited => CHTextDelimited, SequenceFile => CHSequenceFile } import cascading.tap.hadoop.Hfs import cascading.tap.MultiSourceTap import cascading.tap.SinkMode import cascading.tap.Tap import cascading.tap.local.FileTap import cascading.tuple.Fields import com.etsy.cascading.tap.local.LocalTap import org.apache.hadoop.conf.Configuration import org.apache.hadoop.fs.{ FileStatus, PathFilter, Path } import org.apache.hadoop.mapred.JobConf import org.apache.hadoop.mapred.OutputCollector import org.apache.hadoop.mapred.RecordReader import scala.util.control.Exception.allCatch /** * A base class for sources that take a scheme trait. */ abstract class SchemedSource extends Source { /** The scheme to use if the source is local. */ def localScheme: Scheme[Properties, InputStream, OutputStream, _, _] = throw ModeException("Cascading local mode not supported for: " + toString) /** The scheme to use if the source is on hdfs. */ def hdfsScheme: Scheme[JobConf, RecordReader[_, _], OutputCollector[_, _], _, _] = throw ModeException("Cascading Hadoop mode not supported for: " + toString) // The mode to use for output taps determining how conflicts with existing output are handled. val sinkMode: SinkMode = SinkMode.REPLACE } /** * A trait which provides a method to create a local tap. */ trait LocalSourceOverride extends SchemedSource { /** A path to use for the local tap. */ def localPath: String /** * Creates a local tap. * * @param sinkMode The mode for handling output conflicts. * @returns A tap. */ def createLocalTap(sinkMode: SinkMode): Tap[_, _, _] = new FileTap(localScheme, localPath, sinkMode) } object HiddenFileFilter extends PathFilter { def accept(p: Path) = { val name = p.getName !name.startsWith("_") && !name.startsWith(".") } } object SuccessFileFilter extends PathFilter { def accept(p: Path) = { p.getName == "_SUCCESS" } } object AcceptAllPathFilter extends PathFilter { def accept(p: Path) = true } object FileSource { def glob(glob: String, conf: Configuration, filter: PathFilter = AcceptAllPathFilter): Iterable[FileStatus] = { val path = new Path(glob) Option(path.getFileSystem(conf).globStatus(path, filter)).map { _.toIterable // convert java Array to scala Iterable } getOrElse { Iterable.empty } } /** * @return whether globPath contains non hidden files */ def globHasNonHiddenPaths(globPath: String, conf: Configuration): Boolean = { !glob(globPath, conf, HiddenFileFilter).isEmpty } /** * @return whether globPath contains a _SUCCESS file */ def globHasSuccessFile(globPath: String, conf: Configuration): Boolean = { !glob(globPath, conf, SuccessFileFilter).isEmpty } } /** * This is a base class for File-based sources */ abstract class FileSource extends SchemedSource with LocalSourceOverride { /** * Determines if a path is 'valid' for this source. In strict mode all paths must be valid. * In non-strict mode, all invalid paths will be filtered out. * * Subclasses can override this to validate paths. * * The default implementation is a quick sanity check to look for missing or empty directories. * It is necessary but not sufficient -- there are cases where this will return true but there is * in fact missing data. * * TODO: consider writing a more in-depth version of this method in [[TimePathedSource]] that looks for * TODO: missing days / hours etc. */ protected def pathIsGood(p: String, conf: Configuration) = FileSource.globHasNonHiddenPaths(p, conf) def hdfsPaths: Iterable[String] // By default, we write to the LAST path returned by hdfsPaths def hdfsWritePath = hdfsPaths.last override def createTap(readOrWrite: AccessMode)(implicit mode: Mode): Tap[_, _, _] = { mode match { // TODO support strict in Local case Local(_) => { createLocalTap(sinkMode) } case hdfsMode @ Hdfs(_, _) => readOrWrite match { case Read => createHdfsReadTap(hdfsMode) case Write => CastHfsTap(new Hfs(hdfsScheme, hdfsWritePath, sinkMode)) } case _ => { allCatch.opt( TestTapFactory(this, hdfsScheme, sinkMode)).map { _.createTap(readOrWrite) // these java types are invariant, so we cast here .asInstanceOf[Tap[Any, Any, Any]] } .orElse { allCatch.opt( TestTapFactory(this, localScheme.getSourceFields, sinkMode)).map { _.createTap(readOrWrite) .asInstanceOf[Tap[Any, Any, Any]] } }.getOrElse(sys.error("Failed to create a tap for: " + toString)) } } } // This is only called when Mode.sourceStrictness is true protected def hdfsReadPathsAreGood(conf: Configuration) = { hdfsPaths.forall { pathIsGood(_, conf) } } /* * This throws InvalidSourceException if: * 1) we are in sourceStrictness mode and all sources are not present. * 2) we are not in the above, but some source has no input whatsoever * TODO this only does something for HDFS now. Maybe we should do the same for LocalMode */ override def validateTaps(mode: Mode): Unit = { mode match { case Hdfs(strict, conf) => { if (strict && (!hdfsReadPathsAreGood(conf))) { throw new InvalidSourceException( "[" + this.toString + "] Data is missing from one or more paths in: " + hdfsPaths.toString) } else if (!hdfsPaths.exists { pathIsGood(_, conf) }) { //Check that there is at least one good path: throw new InvalidSourceException( "[" + this.toString + "] No good paths in: " + hdfsPaths.toString) } } case _ => () } } /* * Get all the set of valid paths based on source strictness. */ protected def goodHdfsPaths(hdfsMode: Hdfs) = { hdfsMode match { //we check later that all the paths are good case Hdfs(true, _) => hdfsPaths // If there are no matching paths, this is still an error, we need at least something: case Hdfs(false, conf) => hdfsPaths.filter{ pathIsGood(_, conf) } } } protected def createHdfsReadTap(hdfsMode: Hdfs): Tap[JobConf, _, _] = { val taps: List[Tap[JobConf, RecordReader[_, _], OutputCollector[_, _]]] = goodHdfsPaths(hdfsMode) .toList.map { path => CastHfsTap(new Hfs(hdfsScheme, path, sinkMode)) } taps.size match { case 0 => { // This case is going to result in an error, but we don't want to throw until // validateTaps, so we just put a dummy path to return something so the // Job constructor does not fail. CastHfsTap(new Hfs(hdfsScheme, hdfsPaths.head, sinkMode)) } case 1 => taps.head case _ => new ScaldingMultiSourceTap(taps) } } } class ScaldingMultiSourceTap(taps: Seq[Tap[JobConf, RecordReader[_, _], OutputCollector[_, _]]]) extends MultiSourceTap[Tap[JobConf, RecordReader[_, _], OutputCollector[_, _]], JobConf, RecordReader[_, _]](taps: _*) { private final val randomId = UUID.randomUUID.toString override def getIdentifier() = randomId } /** * The fields here are ('offset, 'line) */ trait TextSourceScheme extends SchemedSource { // The text-encoding to use when writing out the lines (default is UTF-8). val textEncoding: String = CHTextLine.DEFAULT_CHARSET override def localScheme = new CLTextLine(new Fields("offset", "line"), Fields.ALL, textEncoding) override def hdfsScheme = HadoopSchemeInstance(new CHTextLine(CHTextLine.DEFAULT_SOURCE_FIELDS, textEncoding)) } trait TextLineScheme extends TextSourceScheme with SingleMappable[String] { //In textline, 0 is the byte position, the actual text string is in column 1 override def sourceFields = Dsl.intFields(Seq(1)) } /** * Mix this in for delimited schemes such as TSV or one-separated values * By default, TSV is given */ trait DelimitedScheme extends SchemedSource { //override these as needed: val fields = Fields.ALL //This is passed directly to cascading where null is interpretted as string val types: Array[Class[_]] = null val separator = "\\t" val skipHeader = false val writeHeader = false val quote: String = null // Whether to throw an exception or not if the number of fields does not match an expected number. // If set to false, missing fields will be set to null. val strict = true // Whether to throw an exception if a field cannot be coerced to the right type. // If set to false, then fields that cannot be coerced will be set to null. val safe = true //These should not be changed: override def localScheme = new CLTextDelimited(fields, skipHeader, writeHeader, separator, strict, quote, types, safe) override def hdfsScheme = HadoopSchemeInstance(new CHTextDelimited(fields, null, skipHeader, writeHeader, separator, strict, quote, types, safe)) } trait SequenceFileScheme extends SchemedSource { //override these as needed: val fields = Fields.ALL // TODO Cascading doesn't support local mode yet override def hdfsScheme = { HadoopSchemeInstance(new CHSequenceFile(fields)) } } /** * Ensures that a _SUCCESS file is present in the Source path, which must be a glob, * as well as the requirements of [[FileSource.pathIsGood]] */ trait SuccessFileSource extends FileSource { override protected def pathIsGood(p: String, conf: Configuration) = { FileSource.globHasNonHiddenPaths(p, conf) && FileSource.globHasSuccessFile(p, conf) } } /** * Use this class to add support for Cascading local mode via the Hadoop tap. * Put another way, this runs a Hadoop tap outside of Hadoop in the Cascading local mode */ trait LocalTapSource extends LocalSourceOverride { override def createLocalTap(sinkMode: SinkMode) = new LocalTap(localPath, hdfsScheme, sinkMode).asInstanceOf[Tap[_, _, _]] } abstract class FixedPathSource(path: String*) extends FileSource { def localPath = { assert(path.size == 1, "Cannot use multiple input files on local mode"); path(0) } def hdfsPaths = path.toList override def toString = getClass.getName + path override def hashCode = toString.hashCode override def equals(that: Any): Boolean = (that != null) && (that.toString == toString) } /** * Tab separated value source */ case class Tsv(p: String, override val fields: Fields = Fields.ALL, override val skipHeader: Boolean = false, override val writeHeader: Boolean = false, override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p) with DelimitedScheme /** * Allows the use of multiple Tsv input paths. The Tsv files will * be process through your flow as if they are a single pipe. Tsv * files must have the same schema. * For more details on how multiple files are handled check the * cascading docs. */ case class MultipleTsvFiles(p: Seq[String], override val fields: Fields = Fields.ALL, override val skipHeader: Boolean = false, override val writeHeader: Boolean = false) extends FixedPathSource(p: _*) with DelimitedScheme /** * Csv value source * separated by commas and quotes wrapping all fields */ case class Csv(p: String, override val separator: String = ",", override val fields: Fields = Fields.ALL, override val skipHeader: Boolean = false, override val writeHeader: Boolean = false, override val quote: String = "\\"", override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p) with DelimitedScheme /** * One separated value (commonly used by Pig) */ case class Osv(p: String, f: Fields = Fields.ALL, override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p) with DelimitedScheme { override val fields = f override val separator = "\\1" } object TextLine { // Default encoding is UTF-8 val defaultTextEncoding: String = CHTextLine.DEFAULT_CHARSET val defaultSinkMode: SinkMode = SinkMode.REPLACE def apply(p: String, sm: SinkMode = defaultSinkMode, textEncoding: String = defaultTextEncoding): TextLine = new TextLine(p, sm, textEncoding) } class TextLine(p: String, override val sinkMode: SinkMode, override val textEncoding: String) extends FixedPathSource(p) with TextLineScheme { // For some Java interop def this(p: String) = this(p, TextLine.defaultSinkMode, TextLine.defaultTextEncoding) } /** * Alternate typed TextLine source that keeps both 'offset and 'line fields. */ class OffsetTextLine(filepath: String, override val sinkMode: SinkMode, override val textEncoding: String) extends FixedPathSource(filepath) with TypedSource[(Long, String)] with TextSourceScheme { override def converter[U >: (Long, String)] = TupleConverter.asSuperConverter[(Long, String), U](TupleConverter.of[(Long, String)]) } /** * Alternate typed TextLine source that keeps both 'offset and 'line fields. */ object OffsetTextLine { // Default encoding is UTF-8 val defaultTextEncoding: String = CHTextLine.DEFAULT_CHARSET val defaultSinkMode: SinkMode = SinkMode.REPLACE def apply(p: String, sm: SinkMode = defaultSinkMode, textEncoding: String = defaultTextEncoding): OffsetTextLine = new OffsetTextLine(p, sm, textEncoding) } case class SequenceFile(p: String, f: Fields = Fields.ALL, override val sinkMode: SinkMode = SinkMode.REPLACE) extends FixedPathSource(p) with SequenceFileScheme with LocalTapSource { override val fields = f } case class MultipleSequenceFiles(p: String*) extends FixedPathSource(p: _*) with SequenceFileScheme with LocalTapSource case class MultipleTextLineFiles(p: String*) extends FixedPathSource(p: _*) with TextLineScheme /** * Delimited files source * allowing to override separator and quotation characters and header configuration */ case class MultipleDelimitedFiles(f: Fields, override val separator: String, override val quote: String, override val skipHeader: Boolean, override val writeHeader: Boolean, p: String*) extends FixedPathSource(p: _*) with DelimitedScheme { override val fields = f }
lucamilanesio/scalding
scalding-core/src/main/scala/com/twitter/scalding/FileSource.scala
Scala
apache-2.0
14,894
package org.jetbrains.plugins.scala.projectHighlighting import org.jetbrains.plugins.scala.HighlightingTests import org.junit.experimental.categories.Category /** * Nikolay.Tropin * 01-Aug-17 */ @Category(Array(classOf[HighlightingTests])) class BetterFilesProjectHighlighingTest extends GithubSbtAllProjectHighlightingTest { override def githubUsername = "pathikrit" override def githubRepoName = "better-files" //v.3.0.0 override def revision = "eb7a357713c083534de9eeaee771750582c8ad31" }
loskutov/intellij-scala
test/org/jetbrains/plugins/scala/projectHighlighting/BetterFilesProjectHighlighingTest.scala
Scala
apache-2.0
510
package handlers.server import handlers.GameClient import handlers.packets.{PacketWriter, ServerCodes} import scala.concurrent.ExecutionContext.Implicits.global import scala.concurrent.Future /** * Created by franblas on 14/04/17. */ class NonHybridSpellLines(gameClient: GameClient) { def process(): Future[Array[Byte]] = { gameClient.player.map(_ => compute()).getOrElse(Future { Array.emptyByteArray }) } private def compute(): Future[Array[Byte]] = { val writer = new PacketWriter(ServerCodes.nonHybridSpellLines) writer.writeByte(0x02) writer.writeByte(0x00) writer.writeByte(0x63) writer.writeByte(0x00) writer.toFinalFuture() } }
franblas/NAOC
src/main/scala/handlers/server/NonHybridSpellLines.scala
Scala
mit
681
package io.criticality.cookbook.scala import java.io._ import org.apache.commons._ import org.apache.http._ import org.apache.http.client._ import org.apache.http.client.methods.HttpPost import org.apache.http.impl.client.DefaultHttpClient import java.util.ArrayList import org.apache.http.message.BasicNameValuePair import org.apache.http.client.entity.UrlEncodedFormEntity import org.apache.http.entity.StringEntity import scala.collection.immutable.HashMap import org.apache.http.client.methods.HttpPut import org.apache.http.client.methods.HttpEntityEnclosingRequestBase import org.apache.http.impl.client.HttpClients import org.apache.http.client.methods.HttpGet import org.apache.http.entity.FileEntity /** * * A simple http client that offers post and authentication for (primarily JSON) in scala * */ class SimpleHttpClient { def sendPost(url: String, payload: String, contentType: ContentType.Value, headers: Map[String, String]): Result = { val post = new HttpPost(url) send(payload, contentType, headers, post) } def sendPut(url: String, payload: String, contentType: ContentType.Value, headers: Map[String, String]): Result = { val put = new HttpPut(url) send(payload, contentType, headers, put) } private def send(payload: String, contentType: ContentType.Value, headers: Map[String, String], post: HttpEntityEnclosingRequestBase): Result = { headers.foreach { case (name, value) => post.addHeader(name, value) } val input = new StringEntity(payload); input.setContentType(contentType.toString); post.setEntity(input); val httpClient = HttpClients.createDefault() // send the post request val response = httpClient.execute(post) val br = new BufferedReader(new InputStreamReader((response.getEntity().getContent()))); val buf = new StringBuilder var line: String = br.readLine while (line != null) { buf.append(line); line = br.readLine } Result(response.getStatusLine().getStatusCode(), buf.toString()); } def get(url: String): Result = { val httpClient = HttpClients.createDefault() // send the post request val response = httpClient.execute(new HttpGet(url)) val br = new BufferedReader(new InputStreamReader((response.getEntity().getContent()))); val buf = new StringBuilder var line: String = br.readLine while (line != null) { buf.append(line); line = br.readLine } Result(response.getStatusLine().getStatusCode(), buf.toString()) } def sendFile(url: String, file: String, headers: Map[String, String]): Result = { val post = new HttpPost(url) headers.foreach { case (name, value) => post.addHeader(name, value) } val input = new FileEntity(new File(file), org.apache.http.entity.ContentType.DEFAULT_BINARY); input.setContentType(ContentType.BIN.toString); post.setEntity(input); // send the post request val response = HttpClients.createDefault().execute(post) val br = new BufferedReader(new InputStreamReader((response.getEntity().getContent()))); val buf = new StringBuilder var line: String = br.readLine while (line != null) { buf.append(line); line = br.readLine } Result(response.getStatusLine().getStatusCode(), buf.toString()); } def authenticate(url: String, auth: Auth, formatableJSon: String): String = { sendPost(url, (formatableJSon format (auth.username, auth.password)), ContentType.JSON, new HashMap).response } } case class Auth(username: String, password: String) case class Result(status: Int, response: String) object ContentType extends Enumeration { type ContentType = Value val JSON = Value("application/json") val XML = Value("application/xml") val BIN = Value("application/octet-stream") }
schmiegelow/scala-cookbook
src/main/scala/io/criticality/cookbook/scala/SimpleHttpClient.scala
Scala
gpl-2.0
3,830
package test import edu.mit.csail.cap.query._ import util._ import org.scalatest._ import java.util.concurrent.Executors class IntervalSpec extends FunSuite { val POOL_SIZE = 4 // Increase N for a larger scope of model checking val N = 2 val intervals: Seq[Interval] = (for (i <- 0 to N; j <- i + 1 to N) yield Segment(i, j)) ++ (for (i <- 0 to N) yield AfterInt(i)) ++ (for (i <- 0 to N) yield BeforeInt(i)) ++ Seq(AfterInt(Integer.MAX_VALUE), BeforeInt(Integer.MAX_VALUE), AfterInt(Integer.MIN_VALUE), BeforeInt(Integer.MIN_VALUE), Empty, Forever) val inputs: Seq[Int] = Integer.MIN_VALUE +: (Integer.MIN_VALUE + 1) +: Integer.MAX_VALUE +: (Integer.MAX_VALUE - 1) +: (-1 to N + 1) private val service = Executors.newFixedThreadPool(POOL_SIZE) private def schedule(f: => Unit) = service.submit(new Runnable { override def run = f }) private def compare(a: Interval, b: Interval) { for (i <- inputs) assert(a.contains(i) == b.contains(i), "" + a + " and " + b + " don't match at " + i) assert(a.isEmpty == b.isEmpty) } private def compare(a: Interval, oracle: Int => Boolean) { for (i <- inputs) assert(a.contains(i) == oracle(i), "" + a + " disagrees with oracle at " + i) } test("normal form") { for (a <- intervals) compare(a, a.normalize) } test("empty, forever") { compare(Empty, _ => false) compare(Forever, t => t < Integer.MAX_VALUE) assert(Empty.isEmpty) assert(!Forever.isEmpty) compare(Forever.complement, Empty) compare(Empty.complement, Forever) compare(Intersection(Forever, Empty), Empty) compare(Union(Empty, Empty), Empty) compare(Intersection(Forever, Forever), Forever) compare(Intersection(Empty, Empty), Empty) compare(Union(Forever, Empty), Forever) compare(Union(Forever, Forever), Forever) } test("union, union normal form") { (for (a <- intervals; b <- intervals; c <- intervals) yield schedule { val ab = Union(a, b) compare(ab, ab.normalize) compare(ab, i => a.contains(i) || b.contains(i)) val abc = Union(c, ab) compare(abc, abc.normalize) compare(abc, i => a.contains(i) || b.contains(i) || c.contains(i)) }).toList.map(_.get) } test("intersection, intersection normal form") { (for (a <- intervals; b <- intervals; c <- intervals) yield schedule { val ab = Intersection(a, b) compare(ab, ab.normalize) compare(ab, i => a.contains(i) && b.contains(i)) val abc = Intersection(c, ab) compare(abc, abc.normalize) compare(abc, i => a.contains(i) && b.contains(i) && c.contains(i)) }).toList.map(_.get) } private def complementTest(a: Interval) { val b = a.complement compare(b, i => i < Integer.MAX_VALUE && !a.contains(i)) compare(a, i => i < Integer.MAX_VALUE && !b.contains(i)) compare(b, b.normalize) compare(b.complement, a) } test("complement, complement normal form") { for (a <- intervals) complementTest(a) for (a <- intervals; b <- intervals) { complementTest(a union b) complementTest(a intersect b) } } }
kyessenov/semeru
src/test/scala/test/IntervalSpec.scala
Scala
gpl-3.0
3,191
package org.pico.twiddle.syntax import org.pico.twiddle.FixedInt2FixedInt package object fixedInt2FixedInt { implicit class FixedInt2FixedIntOps[A](val self: A) extends AnyVal { def fixAs[B](implicit ev: FixedInt2FixedInt[A, B]): B = ev.fixAs(self) } }
newhoggy/pico-cuckoo-filter
pico-twiddle/src/main/scala/org/pico/twiddle/syntax/fixedInt2FixedInt/package.scala
Scala
bsd-3-clause
263
package ml.combust.mleap.core.tree /** Trait for a decision tree. */ trait DecisionTree extends Serializable { /** Root node of the decision tree. * * @return root node */ def rootNode: Node }
combust-ml/mleap
mleap-core/src/main/scala/ml/combust/mleap/core/tree/DecisionTree.scala
Scala
apache-2.0
212
/* * Copyright 2001-2008 Artima, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.scalatest.matchers import org.scalatest._ class ShouldPlusOrMinusSpec extends FunSpec with ShouldMatchers { describe("The be (X plusOrMinus Y) syntax") { val sevenDotOh = 7.0 val minusSevenDotOh = -7.0 val sevenDotOhFloat = 7.0f val minusSevenDotOhFloat = -7.0f val sevenLong = 7L val minusSevenLong = -7L val sevenInt = 7 val minusSevenInt = -7 val sevenShort: Short = 7 val minusSevenShort: Short = -7 val sevenByte: Byte = 7 val minusSevenByte: Byte = -7 /* I decided that for X plusOrMinus Y, Y can be any numeric type that's implicitly convertible to X. So if X is Double, Y could be Double, Float, Long, Int, Short, Byte. If X is Long, Y could be Long, Int, Short, Byte. If X is Short, Y could be Short or Byte. And if X is Byte, Y must be Byte. minusSevenDotOhFloat should be (-6.8f plusOrMinus 0.2d) */ it("should do nothing if the number is within the specified range") { // Double plusOrMinus Double sevenDotOh should be (7.1 plusOrMinus 0.2) sevenDotOh should be (6.9 plusOrMinus 0.2) sevenDotOh should be (7.0 plusOrMinus 0.2) sevenDotOh should be (7.2 plusOrMinus 0.2) sevenDotOh should be (6.8 plusOrMinus 0.2) minusSevenDotOh should be (-7.1 plusOrMinus 0.2) minusSevenDotOh should be (-6.9 plusOrMinus 0.2) minusSevenDotOh should be (-7.0 plusOrMinus 0.2) minusSevenDotOh should be (-7.2 plusOrMinus 0.2) minusSevenDotOh should be (-6.8 plusOrMinus 0.2) // Double plusOrMinus Float sevenDotOh should be (7.1 plusOrMinus 0.2f) sevenDotOh should be (6.9 plusOrMinus 0.2f) sevenDotOh should be (7.0 plusOrMinus 0.2f) sevenDotOh should be (7.2 plusOrMinus 0.2f) sevenDotOh should be (6.8 plusOrMinus 0.2f) minusSevenDotOh should be (-7.1 plusOrMinus 0.2f) minusSevenDotOh should be (-6.9 plusOrMinus 0.2f) minusSevenDotOh should be (-7.0 plusOrMinus 0.2f) minusSevenDotOh should be (-7.2 plusOrMinus 0.2f) minusSevenDotOh should be (-6.8 plusOrMinus 0.2f) // Double plusOrMinus Long sevenDotOh should be (7.1 plusOrMinus 2L) sevenDotOh should be (6.9 plusOrMinus 2L) sevenDotOh should be (7.0 plusOrMinus 2L) sevenDotOh should be (7.2 plusOrMinus 2L) sevenDotOh should be (6.8 plusOrMinus 2L) minusSevenDotOh should be (-7.1 plusOrMinus 2L) minusSevenDotOh should be (-6.9 plusOrMinus 2L) minusSevenDotOh should be (-7.0 plusOrMinus 2L) minusSevenDotOh should be (-7.2 plusOrMinus 2L) minusSevenDotOh should be (-6.8 plusOrMinus 2L) // Double plusOrMinus Int sevenDotOh should be (7.1 plusOrMinus 2) sevenDotOh should be (6.9 plusOrMinus 2) sevenDotOh should be (7.0 plusOrMinus 2) sevenDotOh should be (7.2 plusOrMinus 2) sevenDotOh should be (6.8 plusOrMinus 2) minusSevenDotOh should be (-7.1 plusOrMinus 2) minusSevenDotOh should be (-6.9 plusOrMinus 2) minusSevenDotOh should be (-7.0 plusOrMinus 2) minusSevenDotOh should be (-7.2 plusOrMinus 2) minusSevenDotOh should be (-6.8 plusOrMinus 2) // Double plusOrMinus Short sevenDotOh should be (7.1 plusOrMinus 2.toShort) sevenDotOh should be (6.9 plusOrMinus 2.toShort) sevenDotOh should be (7.0 plusOrMinus 2.toShort) sevenDotOh should be (7.2 plusOrMinus 2.toShort) sevenDotOh should be (6.8 plusOrMinus 2.toShort) minusSevenDotOh should be (-7.1 plusOrMinus 2.toShort) minusSevenDotOh should be (-6.9 plusOrMinus 2.toShort) minusSevenDotOh should be (-7.0 plusOrMinus 2.toShort) minusSevenDotOh should be (-7.2 plusOrMinus 2.toShort) minusSevenDotOh should be (-6.8 plusOrMinus 2.toShort) // Double plusOrMinus Byte sevenDotOh should be (7.1 plusOrMinus 2.toByte) sevenDotOh should be (6.9 plusOrMinus 2.toByte) sevenDotOh should be (7.0 plusOrMinus 2.toByte) sevenDotOh should be (7.2 plusOrMinus 2.toByte) sevenDotOh should be (6.8 plusOrMinus 2.toByte) minusSevenDotOh should be (-7.1 plusOrMinus 2.toByte) minusSevenDotOh should be (-6.9 plusOrMinus 2.toByte) minusSevenDotOh should be (-7.0 plusOrMinus 2.toByte) minusSevenDotOh should be (-7.2 plusOrMinus 2.toByte) minusSevenDotOh should be (-6.8 plusOrMinus 2.toByte) // Float plusOrMinus Float sevenDotOhFloat should be (7.1f plusOrMinus 0.2f) sevenDotOhFloat should be (6.9f plusOrMinus 0.2f) sevenDotOhFloat should be (7.0f plusOrMinus 0.2f) sevenDotOhFloat should be (7.2f plusOrMinus 0.2f) sevenDotOhFloat should be (6.8f plusOrMinus 0.2f) minusSevenDotOhFloat should be (-7.1f plusOrMinus 0.2f) minusSevenDotOhFloat should be (-6.9f plusOrMinus 0.2f) minusSevenDotOhFloat should be (-7.0f plusOrMinus 0.2f) minusSevenDotOhFloat should be (-7.2f plusOrMinus 0.2f) minusSevenDotOhFloat should be (-6.8f plusOrMinus 0.2f) // Float plusOrMinus Long sevenDotOhFloat should be (7.1f plusOrMinus 2L) sevenDotOhFloat should be (6.9f plusOrMinus 2L) sevenDotOhFloat should be (7.0f plusOrMinus 2L) sevenDotOhFloat should be (7.2f plusOrMinus 2L) sevenDotOhFloat should be (6.8f plusOrMinus 2L) minusSevenDotOhFloat should be (-7.1f plusOrMinus 2L) minusSevenDotOhFloat should be (-6.9f plusOrMinus 2L) minusSevenDotOhFloat should be (-7.0f plusOrMinus 2L) minusSevenDotOhFloat should be (-7.2f plusOrMinus 2L) minusSevenDotOhFloat should be (-6.8f plusOrMinus 2L) // Float plusOrMinus Int sevenDotOhFloat should be (7.1f plusOrMinus 2) sevenDotOhFloat should be (6.9f plusOrMinus 2) sevenDotOhFloat should be (7.0f plusOrMinus 2) sevenDotOhFloat should be (7.2f plusOrMinus 2) sevenDotOhFloat should be (6.8f plusOrMinus 2) minusSevenDotOhFloat should be (-7.1f plusOrMinus 2) minusSevenDotOhFloat should be (-6.9f plusOrMinus 2) minusSevenDotOhFloat should be (-7.0f plusOrMinus 2) minusSevenDotOhFloat should be (-7.2f plusOrMinus 2) minusSevenDotOhFloat should be (-6.8f plusOrMinus 2) // Float plusOrMinus Short sevenDotOhFloat should be (7.1f plusOrMinus 2.toShort) sevenDotOhFloat should be (6.9f plusOrMinus 2.toShort) sevenDotOhFloat should be (7.0f plusOrMinus 2.toShort) sevenDotOhFloat should be (7.2f plusOrMinus 2.toShort) sevenDotOhFloat should be (6.8f plusOrMinus 2.toShort) minusSevenDotOhFloat should be (-7.1f plusOrMinus 2.toShort) minusSevenDotOhFloat should be (-6.9f plusOrMinus 2.toShort) minusSevenDotOhFloat should be (-7.0f plusOrMinus 2.toShort) minusSevenDotOhFloat should be (-7.2f plusOrMinus 2.toShort) minusSevenDotOhFloat should be (-6.8f plusOrMinus 2.toShort) // Float plusOrMinus Byte sevenDotOhFloat should be (7.1f plusOrMinus 2.toByte) sevenDotOhFloat should be (6.9f plusOrMinus 2.toByte) sevenDotOhFloat should be (7.0f plusOrMinus 2.toByte) sevenDotOhFloat should be (7.2f plusOrMinus 2.toByte) sevenDotOhFloat should be (6.8f plusOrMinus 2.toByte) minusSevenDotOhFloat should be (-7.1f plusOrMinus 2.toByte) minusSevenDotOhFloat should be (-6.9f plusOrMinus 2.toByte) minusSevenDotOhFloat should be (-7.0f plusOrMinus 2.toByte) minusSevenDotOhFloat should be (-7.2f plusOrMinus 2.toByte) minusSevenDotOhFloat should be (-6.8f plusOrMinus 2.toByte) // Long plusOrMinus Long sevenLong should be (9L plusOrMinus 2L) sevenLong should be (8L plusOrMinus 2L) sevenLong should be (7L plusOrMinus 2L) sevenLong should be (6L plusOrMinus 2L) sevenLong should be (5L plusOrMinus 2L) minusSevenLong should be (-9L plusOrMinus 2L) minusSevenLong should be (-8L plusOrMinus 2L) minusSevenLong should be (-7L plusOrMinus 2L) minusSevenLong should be (-6L plusOrMinus 2L) minusSevenLong should be (-5L plusOrMinus 2L) // Long plusOrMinus Int sevenLong should be (9L plusOrMinus 2) sevenLong should be (8L plusOrMinus 2) sevenLong should be (7L plusOrMinus 2) sevenLong should be (6L plusOrMinus 2) sevenLong should be (5L plusOrMinus 2) minusSevenLong should be (-9L plusOrMinus 2) minusSevenLong should be (-8L plusOrMinus 2) minusSevenLong should be (-7L plusOrMinus 2) minusSevenLong should be (-6L plusOrMinus 2) minusSevenLong should be (-5L plusOrMinus 2) // Long plusOrMinus Short sevenLong should be (9L plusOrMinus 2.toShort) sevenLong should be (8L plusOrMinus 2.toShort) sevenLong should be (7L plusOrMinus 2.toShort) sevenLong should be (6L plusOrMinus 2.toShort) sevenLong should be (5L plusOrMinus 2.toShort) minusSevenLong should be (-9L plusOrMinus 2.toShort) minusSevenLong should be (-8L plusOrMinus 2.toShort) minusSevenLong should be (-7L plusOrMinus 2.toShort) minusSevenLong should be (-6L plusOrMinus 2.toShort) minusSevenLong should be (-5L plusOrMinus 2.toShort) // Long plusOrMinus Byte sevenLong should be (9L plusOrMinus 2.toByte) sevenLong should be (8L plusOrMinus 2.toByte) sevenLong should be (7L plusOrMinus 2.toByte) sevenLong should be (6L plusOrMinus 2.toByte) sevenLong should be (5L plusOrMinus 2.toByte) minusSevenLong should be (-9L plusOrMinus 2.toByte) minusSevenLong should be (-8L plusOrMinus 2.toByte) minusSevenLong should be (-7L plusOrMinus 2.toByte) minusSevenLong should be (-6L plusOrMinus 2.toByte) minusSevenLong should be (-5L plusOrMinus 2.toByte) // Int plusOrMinus Int sevenInt should be (9 plusOrMinus 2) sevenInt should be (8 plusOrMinus 2) sevenInt should be (7 plusOrMinus 2) sevenInt should be (6 plusOrMinus 2) sevenInt should be (5 plusOrMinus 2) minusSevenInt should be (-9 plusOrMinus 2) minusSevenInt should be (-8 plusOrMinus 2) minusSevenInt should be (-7 plusOrMinus 2) minusSevenInt should be (-6 plusOrMinus 2) minusSevenInt should be (-5 plusOrMinus 2) // Int plusOrMinus Short sevenInt should be (9 plusOrMinus 2.toShort) sevenInt should be (8 plusOrMinus 2.toShort) sevenInt should be (7 plusOrMinus 2.toShort) sevenInt should be (6 plusOrMinus 2.toShort) sevenInt should be (5 plusOrMinus 2.toShort) minusSevenInt should be (-9 plusOrMinus 2.toShort) minusSevenInt should be (-8 plusOrMinus 2.toShort) minusSevenInt should be (-7 plusOrMinus 2.toShort) minusSevenInt should be (-6 plusOrMinus 2.toShort) minusSevenInt should be (-5 plusOrMinus 2.toShort) // Int plusOrMinus Byte sevenInt should be (9 plusOrMinus 2.toByte) sevenInt should be (8 plusOrMinus 2.toByte) sevenInt should be (7 plusOrMinus 2.toByte) sevenInt should be (6 plusOrMinus 2.toByte) sevenInt should be (5 plusOrMinus 2.toByte) minusSevenInt should be (-9 plusOrMinus 2.toByte) minusSevenInt should be (-8 plusOrMinus 2.toByte) minusSevenInt should be (-7 plusOrMinus 2.toByte) minusSevenInt should be (-6 plusOrMinus 2.toByte) minusSevenInt should be (-5 plusOrMinus 2.toByte) // Short plusOrMinus Short sevenShort should be (9.toShort plusOrMinus 2.toShort) sevenShort should be (8.toShort plusOrMinus 2.toShort) sevenShort should be (7.toShort plusOrMinus 2.toShort) sevenShort should be (6.toShort plusOrMinus 2.toShort) sevenShort should be (5.toShort plusOrMinus 2.toShort) minusSevenShort should be ((-9).toShort plusOrMinus 2.toShort) minusSevenShort should be ((-8).toShort plusOrMinus 2.toShort) minusSevenShort should be ((-7).toShort plusOrMinus 2.toShort) minusSevenShort should be ((-6).toShort plusOrMinus 2.toShort) minusSevenShort should be ((-5).toShort plusOrMinus 2.toShort) // Short plusOrMinus Byte sevenShort should be (9.toShort plusOrMinus 2.toByte) sevenShort should be (8.toShort plusOrMinus 2.toByte) sevenShort should be (7.toShort plusOrMinus 2.toByte) sevenShort should be (6.toShort plusOrMinus 2.toByte) sevenShort should be (5.toShort plusOrMinus 2.toByte) minusSevenShort should be ((-9).toShort plusOrMinus 2.toByte) minusSevenShort should be ((-8).toShort plusOrMinus 2.toByte) minusSevenShort should be ((-7).toShort plusOrMinus 2.toByte) minusSevenShort should be ((-6).toShort plusOrMinus 2.toByte) minusSevenShort should be ((-5).toShort plusOrMinus 2.toByte) // Byte plusOrMinus Byte sevenByte should be (9.toByte plusOrMinus 2.toByte) sevenByte should be (8.toByte plusOrMinus 2.toByte) sevenByte should be (7.toByte plusOrMinus 2.toByte) sevenByte should be (6.toByte plusOrMinus 2.toByte) sevenByte should be (5.toByte plusOrMinus 2.toByte) minusSevenByte should be ((-9).toByte plusOrMinus 2.toByte) minusSevenByte should be ((-8).toByte plusOrMinus 2.toByte) minusSevenByte should be ((-7).toByte plusOrMinus 2.toByte) minusSevenByte should be ((-6).toByte plusOrMinus 2.toByte) minusSevenByte should be ((-5).toByte plusOrMinus 2.toByte) } it("should do nothing if the number is within the specified range, when used with not") { // Double plusOrMinus Double sevenDotOh should not { be (7.5 plusOrMinus 0.2) } sevenDotOh should not be (7.5 plusOrMinus 0.2) sevenDotOh should not be (6.5 plusOrMinus 0.2) minusSevenDotOh should not { be (-7.5 plusOrMinus 0.2) } minusSevenDotOh should not be (-7.5 plusOrMinus 0.2) minusSevenDotOh should not be (-6.5 plusOrMinus 0.2) // Double plusOrMinus Float sevenDotOh should not { be (7.5 plusOrMinus 0.2f) } sevenDotOh should not be (7.5 plusOrMinus 0.2f) sevenDotOh should not be (6.5 plusOrMinus 0.2f) minusSevenDotOh should not { be (-7.5 plusOrMinus 0.2f) } minusSevenDotOh should not be (-7.5 plusOrMinus 0.2f) minusSevenDotOh should not be (-6.5 plusOrMinus 0.2f) // Double plusOrMinus Long sevenDotOh should not { be (10.0 plusOrMinus 2L) } sevenDotOh should not be (4.0 plusOrMinus 2L) sevenDotOh should not be (9.1 plusOrMinus 2L) minusSevenDotOh should not { be (-10.0 plusOrMinus 2L) } minusSevenDotOh should not be (-4.0 plusOrMinus 2L) minusSevenDotOh should not be (-9.1 plusOrMinus 2L) // Double plusOrMinus Int sevenDotOh should not { be (10.0 plusOrMinus 2) } sevenDotOh should not be (4.0 plusOrMinus 2) sevenDotOh should not be (9.1 plusOrMinus 2) minusSevenDotOh should not { be (-10.0 plusOrMinus 2) } minusSevenDotOh should not be (-4.0 plusOrMinus 2) minusSevenDotOh should not be (-9.1 plusOrMinus 2) // Double plusOrMinus Short sevenDotOh should not { be (10.0 plusOrMinus 2.toShort) } sevenDotOh should not be (4.0 plusOrMinus 2.toShort) sevenDotOh should not be (9.1 plusOrMinus 2.toShort) minusSevenDotOh should not { be (-10.0 plusOrMinus 2.toShort) } minusSevenDotOh should not be (-4.0 plusOrMinus 2.toShort) minusSevenDotOh should not be (-9.1 plusOrMinus 2.toShort) // Double plusOrMinus Byte sevenDotOh should not { be (10.0 plusOrMinus 2.toByte) } sevenDotOh should not be (4.0 plusOrMinus 2.toByte) sevenDotOh should not be (9.1 plusOrMinus 2.toByte) minusSevenDotOh should not { be (-10.0 plusOrMinus 2.toByte) } minusSevenDotOh should not be (-4.0 plusOrMinus 2.toByte) minusSevenDotOh should not be (-9.1 plusOrMinus 2.toByte) // Float plusOrMinus Float sevenDotOhFloat should not { be (7.5f plusOrMinus 0.2f) } sevenDotOhFloat should not be (7.5f plusOrMinus 0.2f) sevenDotOhFloat should not be (6.5f plusOrMinus 0.2f) minusSevenDotOhFloat should not { be (-7.5f plusOrMinus 0.2f) } minusSevenDotOhFloat should not be (-7.5f plusOrMinus 0.2f) minusSevenDotOhFloat should not be (-6.5f plusOrMinus 0.2f) // Float plusOrMinus Long sevenDotOhFloat should not { be (10.0f plusOrMinus 2L) } sevenDotOhFloat should not be (4.0f plusOrMinus 2L) sevenDotOhFloat should not be (9.1f plusOrMinus 2L) minusSevenDotOhFloat should not { be (-10.0f plusOrMinus 2L) } minusSevenDotOhFloat should not be (-4.0f plusOrMinus 2L) minusSevenDotOhFloat should not be (-9.1f plusOrMinus 2L) // Float plusOrMinus Int sevenDotOhFloat should not { be (10.0f plusOrMinus 2) } sevenDotOhFloat should not be (4.0f plusOrMinus 2) sevenDotOhFloat should not be (9.1f plusOrMinus 2) minusSevenDotOhFloat should not { be (-10.0f plusOrMinus 2) } minusSevenDotOhFloat should not be (-4.0f plusOrMinus 2) minusSevenDotOhFloat should not be (-9.1f plusOrMinus 2) // Float plusOrMinus Short sevenDotOhFloat should not { be (10.0f plusOrMinus 2.toShort) } sevenDotOhFloat should not be (4.0f plusOrMinus 2.toShort) sevenDotOhFloat should not be (9.1f plusOrMinus 2.toShort) minusSevenDotOhFloat should not { be (-10.0f plusOrMinus 2.toShort) } minusSevenDotOhFloat should not be (-4.0f plusOrMinus 2.toShort) minusSevenDotOhFloat should not be (-9.1f plusOrMinus 2.toShort) // Float plusOrMinus Byte sevenDotOhFloat should not { be (10.0f plusOrMinus 2.toByte) } sevenDotOhFloat should not be (4.0f plusOrMinus 2.toByte) sevenDotOhFloat should not be (9.1f plusOrMinus 2.toByte) minusSevenDotOhFloat should not { be (-10.0f plusOrMinus 2.toByte) } minusSevenDotOhFloat should not be (-4.0f plusOrMinus 2.toByte) minusSevenDotOhFloat should not be (-9.1f plusOrMinus 2.toByte) // Long plusOrMinus Long sevenLong should not { be (10L plusOrMinus 2L) } sevenLong should not be (4L plusOrMinus 2L) sevenLong should not be (10L plusOrMinus 2L) minusSevenLong should not { be (-10L plusOrMinus 2L) } minusSevenLong should not be (-4L plusOrMinus 2L) minusSevenLong should not be (-10L plusOrMinus 2L) // Long plusOrMinus Int sevenLong should not { be (10L plusOrMinus 2) } sevenLong should not be (4L plusOrMinus 2) sevenLong should not be (10L plusOrMinus 2) minusSevenLong should not { be (-10L plusOrMinus 2) } minusSevenLong should not be (-4L plusOrMinus 2) minusSevenLong should not be (-10L plusOrMinus 2) // Long plusOrMinus Short sevenLong should not { be (10L plusOrMinus 2.toShort) } sevenLong should not be (4L plusOrMinus 2.toShort) sevenLong should not be (10L plusOrMinus 2.toShort) minusSevenLong should not { be (-10L plusOrMinus 2.toShort) } minusSevenLong should not be (-4L plusOrMinus 2.toShort) minusSevenLong should not be (-10L plusOrMinus 2.toShort) // Long plusOrMinus Byte sevenLong should not { be (10L plusOrMinus 2.toByte) } sevenLong should not be (4L plusOrMinus 2.toByte) sevenLong should not be (10L plusOrMinus 2.toByte) minusSevenLong should not { be (-10L plusOrMinus 2.toByte) } minusSevenLong should not be (-4L plusOrMinus 2.toByte) minusSevenLong should not be (-10L plusOrMinus 2.toByte) // Int plusOrMinus Int sevenInt should not { be (10 plusOrMinus 2) } sevenInt should not be (4 plusOrMinus 2) sevenInt should not be (10 plusOrMinus 2) minusSevenInt should not { be (-10 plusOrMinus 2) } minusSevenInt should not be (-4 plusOrMinus 2) minusSevenInt should not be (-10 plusOrMinus 2) // Int plusOrMinus Short sevenInt should not { be (10 plusOrMinus 2.toShort) } sevenInt should not be (4 plusOrMinus 2.toShort) sevenInt should not be (10 plusOrMinus 2.toShort) minusSevenInt should not { be (-10 plusOrMinus 2.toShort) } minusSevenInt should not be (-4 plusOrMinus 2.toShort) minusSevenInt should not be (-10 plusOrMinus 2.toShort) // Int plusOrMinus Byte sevenInt should not { be (10 plusOrMinus 2.toByte) } sevenInt should not be (4 plusOrMinus 2.toByte) sevenInt should not be (10 plusOrMinus 2.toByte) minusSevenInt should not { be (-10 plusOrMinus 2.toByte) } minusSevenInt should not be (-4 plusOrMinus 2.toByte) minusSevenInt should not be (-10 plusOrMinus 2.toByte) // Short plusOrMinus Short sevenShort should not { be (10.toShort plusOrMinus 2.toShort) } sevenShort should not be (4.toShort plusOrMinus 2.toShort) sevenShort should not be (10.toShort plusOrMinus 2.toShort) minusSevenShort should not { be ((-10).toShort plusOrMinus 2.toShort) } minusSevenShort should not be ((-4).toShort plusOrMinus 2.toShort) minusSevenShort should not be ((-10).toShort plusOrMinus 2.toShort) // Short plusOrMinus Byte sevenShort should not { be (10.toShort plusOrMinus 2.toByte) } sevenShort should not be (4.toShort plusOrMinus 2.toByte) sevenShort should not be (10.toShort plusOrMinus 2.toByte) minusSevenShort should not { be ((-10).toShort plusOrMinus 2.toByte) } minusSevenShort should not be ((-4).toShort plusOrMinus 2.toByte) minusSevenShort should not be ((-10).toShort plusOrMinus 2.toByte) // Byte plusOrMinus Byte sevenByte should not { be (10.toByte plusOrMinus 2.toByte) } sevenByte should not be (4.toByte plusOrMinus 2.toByte) sevenByte should not be (10.toByte plusOrMinus 2.toByte) minusSevenByte should not { be ((-10).toByte plusOrMinus 2.toByte) } minusSevenByte should not be ((-4).toByte plusOrMinus 2.toByte) minusSevenByte should not be ((-10).toByte plusOrMinus 2.toByte) } it("should do nothing if the number is within the specified range, when used in a logical-and expression") { // Double plusOrMinus Double sevenDotOh should ((be (7.1 plusOrMinus 0.2)) and (be (7.1 plusOrMinus 0.2))) sevenDotOh should (be (6.9 plusOrMinus 0.2) and (be (7.1 plusOrMinus 0.2))) sevenDotOh should (be (7.0 plusOrMinus 0.2) and be (7.0 plusOrMinus 0.2)) // Double plusOrMinus Float sevenDotOh should ((be (7.1 plusOrMinus 0.2f)) and (be (7.1 plusOrMinus 0.2f))) sevenDotOh should (be (6.9 plusOrMinus 0.2f) and (be (7.1 plusOrMinus 0.2f))) sevenDotOh should (be (7.0 plusOrMinus 0.2f) and be (7.0 plusOrMinus 0.2f)) // Double plusOrMinus Long sevenDotOh should ((be (7.1 plusOrMinus 2L)) and (be (7.1 plusOrMinus 2L))) sevenDotOh should (be (6.9 plusOrMinus 2L) and (be (7.1 plusOrMinus 2L))) sevenDotOh should (be (7.0 plusOrMinus 2L) and be (7.0 plusOrMinus 2L)) // Double plusOrMinus Int sevenDotOh should ((be (7.1 plusOrMinus 2)) and (be (7.1 plusOrMinus 2))) sevenDotOh should (be (6.9 plusOrMinus 2) and (be (7.1 plusOrMinus 2))) sevenDotOh should (be (7.0 plusOrMinus 2) and be (7.0 plusOrMinus 2)) // Double plusOrMinus Short sevenDotOh should ((be (7.1 plusOrMinus 2.toShort)) and (be (7.1 plusOrMinus 2.toShort))) sevenDotOh should (be (6.9 plusOrMinus 2.toShort) and (be (7.1 plusOrMinus 2.toShort))) sevenDotOh should (be (7.0 plusOrMinus 2.toShort) and be (7.0 plusOrMinus 2.toShort)) // Double plusOrMinus Byte sevenDotOh should ((be (7.1 plusOrMinus 2.toByte)) and (be (7.1 plusOrMinus 2.toByte))) sevenDotOh should (be (6.9 plusOrMinus 2.toByte) and (be (7.1 plusOrMinus 2.toByte))) sevenDotOh should (be (7.0 plusOrMinus 2.toByte) and be (7.0 plusOrMinus 2.toByte)) // Float plusOrMinus Float sevenDotOhFloat should ((be (7.1f plusOrMinus 0.2f)) and (be (7.1f plusOrMinus 0.2f))) sevenDotOhFloat should (be (6.9f plusOrMinus 0.2f) and (be (7.1f plusOrMinus 0.2f))) sevenDotOhFloat should (be (7.0f plusOrMinus 0.2f) and be (7.0f plusOrMinus 0.2f)) // Float plusOrMinus Long sevenDotOhFloat should ((be (7.1f plusOrMinus 2L)) and (be (7.1f plusOrMinus 2L))) sevenDotOhFloat should (be (6.9f plusOrMinus 2L) and (be (7.1f plusOrMinus 2L))) sevenDotOhFloat should (be (7.0f plusOrMinus 2L) and be (7.0f plusOrMinus 2L)) // Float plusOrMinus Int sevenDotOhFloat should ((be (7.1f plusOrMinus 2)) and (be (7.1f plusOrMinus 2))) sevenDotOhFloat should (be (6.9f plusOrMinus 2) and (be (7.1f plusOrMinus 2))) sevenDotOhFloat should (be (7.0f plusOrMinus 2) and be (7.0f plusOrMinus 2)) // Float plusOrMinus Short sevenDotOhFloat should ((be (7.1f plusOrMinus 2.toShort)) and (be (7.1f plusOrMinus 2.toShort))) sevenDotOhFloat should (be (6.9f plusOrMinus 2.toShort) and (be (7.1f plusOrMinus 2.toShort))) sevenDotOhFloat should (be (7.0f plusOrMinus 2.toShort) and be (7.0f plusOrMinus 2.toShort)) // Float plusOrMinus Byte sevenDotOhFloat should ((be (7.1f plusOrMinus 2.toByte)) and (be (7.1f plusOrMinus 2.toByte))) sevenDotOhFloat should (be (6.9f plusOrMinus 2.toByte) and (be (7.1f plusOrMinus 2.toByte))) sevenDotOhFloat should (be (7.0f plusOrMinus 2.toByte) and be (7.0f plusOrMinus 2.toByte)) // Long plusOrMinus Long sevenLong should ((be (9L plusOrMinus 2L)) and (be (9L plusOrMinus 2L))) sevenLong should (be (8L plusOrMinus 2L) and (be (9L plusOrMinus 2L))) sevenLong should (be (7L plusOrMinus 2L) and be (7L plusOrMinus 2L)) // Long plusOrMinus Int sevenLong should ((be (9L plusOrMinus 2)) and (be (9L plusOrMinus 2))) sevenLong should (be (8L plusOrMinus 2) and (be (9L plusOrMinus 2))) sevenLong should (be (7L plusOrMinus 2) and be (7L plusOrMinus 2)) // Long plusOrMinus Short sevenLong should ((be (9L plusOrMinus 2.toShort)) and (be (9L plusOrMinus 2.toShort))) sevenLong should (be (8L plusOrMinus 2.toShort) and (be (9L plusOrMinus 2.toShort))) sevenLong should (be (7L plusOrMinus 2.toShort) and be (7L plusOrMinus 2.toShort)) // Long plusOrMinus Byte sevenLong should ((be (9L plusOrMinus 2.toByte)) and (be (9L plusOrMinus 2.toByte))) sevenLong should (be (8L plusOrMinus 2.toByte) and (be (9L plusOrMinus 2.toByte))) sevenLong should (be (7L plusOrMinus 2.toByte) and be (7L plusOrMinus 2.toByte)) // Int plusOrMinus Int sevenInt should ((be (9 plusOrMinus 2)) and (be (9 plusOrMinus 2))) sevenInt should (be (8 plusOrMinus 2) and (be (9 plusOrMinus 2))) sevenInt should (be (7 plusOrMinus 2) and be (7 plusOrMinus 2)) // Int plusOrMinus Short sevenInt should ((be (9 plusOrMinus 2.toShort)) and (be (9 plusOrMinus 2.toShort))) sevenInt should (be (8 plusOrMinus 2.toShort) and (be (9 plusOrMinus 2.toShort))) sevenInt should (be (7 plusOrMinus 2.toShort) and be (7 plusOrMinus 2.toShort)) // Int plusOrMinus Byte sevenInt should ((be (9 plusOrMinus 2.toByte)) and (be (9 plusOrMinus 2.toByte))) sevenInt should (be (8 plusOrMinus 2.toByte) and (be (9 plusOrMinus 2.toByte))) sevenInt should (be (7 plusOrMinus 2.toByte) and be (7 plusOrMinus 2.toByte)) // Short plusOrMinus Short sevenShort should ((be (9.toShort plusOrMinus 2.toShort)) and (be (9.toShort plusOrMinus 2.toShort))) sevenShort should (be (8.toShort plusOrMinus 2.toShort) and (be (9.toShort plusOrMinus 2.toShort))) sevenShort should (be (7.toShort plusOrMinus 2.toShort) and be (7.toShort plusOrMinus 2.toShort)) // Short plusOrMinus Byte sevenShort should ((be (9.toShort plusOrMinus 2.toByte)) and (be (9.toShort plusOrMinus 2.toByte))) sevenShort should (be (8.toShort plusOrMinus 2.toByte) and (be (9.toShort plusOrMinus 2.toByte))) sevenShort should (be (7.toShort plusOrMinus 2.toByte) and be (7.toShort plusOrMinus 2.toByte)) // Byte plusOrMinus Byte sevenByte should ((be (9.toByte plusOrMinus 2.toByte)) and (be (9.toByte plusOrMinus 2.toByte))) sevenByte should (be (8.toByte plusOrMinus 2.toByte) and (be (9.toByte plusOrMinus 2.toByte))) sevenByte should (be (7.toByte plusOrMinus 2.toByte) and be (7.toByte plusOrMinus 2.toByte)) } it("should do nothing if the number is within the specified range, when used in a logical-or expression") { // Double plusOrMinus Double sevenDotOh should ((be (7.1 plusOrMinus 0.2)) or (be (7.1 plusOrMinus 0.2))) sevenDotOh should (be (6.9 plusOrMinus 0.2) or (be (7.1 plusOrMinus 0.2))) sevenDotOh should (be (7.0 plusOrMinus 0.2) or be (7.0 plusOrMinus 0.2)) // Double plusOrMinus Float sevenDotOh should ((be (7.1 plusOrMinus 0.2f)) or (be (7.1 plusOrMinus 0.2f))) sevenDotOh should (be (6.9 plusOrMinus 0.2f) or (be (7.1 plusOrMinus 0.2f))) sevenDotOh should (be (7.0 plusOrMinus 0.2f) or be (7.0 plusOrMinus 0.2f)) // Double plusOrMinus Long sevenDotOh should ((be (7.1 plusOrMinus 2L)) or (be (7.1 plusOrMinus 2L))) sevenDotOh should (be (6.9 plusOrMinus 2L) or (be (7.1 plusOrMinus 2L))) sevenDotOh should (be (7.0 plusOrMinus 2L) or be (7.0 plusOrMinus 2L)) // Double plusOrMinus Int sevenDotOh should ((be (7.1 plusOrMinus 2)) or (be (7.1 plusOrMinus 2))) sevenDotOh should (be (6.9 plusOrMinus 2) or (be (7.1 plusOrMinus 2))) sevenDotOh should (be (7.0 plusOrMinus 2) or be (7.0 plusOrMinus 2)) // Double plusOrMinus Short sevenDotOh should ((be (7.1 plusOrMinus 2.toShort)) or (be (7.1 plusOrMinus 2.toShort))) sevenDotOh should (be (6.9 plusOrMinus 2.toShort) or (be (7.1 plusOrMinus 2.toShort))) sevenDotOh should (be (7.0 plusOrMinus 2.toShort) or be (7.0 plusOrMinus 2.toShort)) // Double plusOrMinus Byte sevenDotOh should ((be (7.1 plusOrMinus 2.toByte)) or (be (7.1 plusOrMinus 2.toByte))) sevenDotOh should (be (6.9 plusOrMinus 2.toByte) or (be (7.1 plusOrMinus 2.toByte))) sevenDotOh should (be (7.0 plusOrMinus 2.toByte) or be (7.0 plusOrMinus 2.toByte)) // Float plusOrMinus Float sevenDotOhFloat should ((be (7.1f plusOrMinus 0.2f)) or (be (7.1f plusOrMinus 0.2f))) sevenDotOhFloat should (be (6.9f plusOrMinus 0.2f) or (be (7.1f plusOrMinus 0.2f))) sevenDotOhFloat should (be (7.0f plusOrMinus 0.2f) or be (7.0f plusOrMinus 0.2f)) // Float plusOrMinus Long sevenDotOhFloat should ((be (7.1f plusOrMinus 2L)) or (be (7.1f plusOrMinus 2L))) sevenDotOhFloat should (be (6.9f plusOrMinus 2L) or (be (7.1f plusOrMinus 2L))) sevenDotOhFloat should (be (7.0f plusOrMinus 2L) or be (7.0f plusOrMinus 2L)) // Float plusOrMinus Int sevenDotOhFloat should ((be (7.1f plusOrMinus 2)) or (be (7.1f plusOrMinus 2))) sevenDotOhFloat should (be (6.9f plusOrMinus 2) or (be (7.1f plusOrMinus 2))) sevenDotOhFloat should (be (7.0f plusOrMinus 2) or be (7.0f plusOrMinus 2)) // Float plusOrMinus Short sevenDotOhFloat should ((be (7.1f plusOrMinus 2.toShort)) or (be (7.1f plusOrMinus 2.toShort))) sevenDotOhFloat should (be (6.9f plusOrMinus 2.toShort) or (be (7.1f plusOrMinus 2.toShort))) sevenDotOhFloat should (be (7.0f plusOrMinus 2.toShort) or be (7.0f plusOrMinus 2.toShort)) // Float plusOrMinus Byte sevenDotOhFloat should ((be (7.1f plusOrMinus 2.toByte)) or (be (7.1f plusOrMinus 2.toByte))) sevenDotOhFloat should (be (6.9f plusOrMinus 2.toByte) or (be (7.1f plusOrMinus 2.toByte))) sevenDotOhFloat should (be (7.0f plusOrMinus 2.toByte) or be (7.0f plusOrMinus 2.toByte)) // Long plusOrMinus Long sevenLong should ((be (9L plusOrMinus 2L)) or (be (9L plusOrMinus 2L))) sevenLong should (be (8L plusOrMinus 2L) or (be (9L plusOrMinus 2L))) sevenLong should (be (7L plusOrMinus 2L) or be (7L plusOrMinus 2L)) // Long plusOrMinus Int sevenLong should ((be (9L plusOrMinus 2)) or (be (9L plusOrMinus 2))) sevenLong should (be (8L plusOrMinus 2) or (be (9L plusOrMinus 2))) sevenLong should (be (7L plusOrMinus 2) or be (7L plusOrMinus 2)) // Long plusOrMinus Short sevenLong should ((be (9L plusOrMinus 2.toShort)) or (be (9L plusOrMinus 2.toShort))) sevenLong should (be (8L plusOrMinus 2.toShort) or (be (9L plusOrMinus 2.toShort))) sevenLong should (be (7L plusOrMinus 2.toShort) or be (7L plusOrMinus 2.toShort)) // Long plusOrMinus Byte sevenLong should ((be (9L plusOrMinus 2.toByte)) or (be (9L plusOrMinus 2.toByte))) sevenLong should (be (8L plusOrMinus 2.toByte) or (be (9L plusOrMinus 2.toByte))) sevenLong should (be (7L plusOrMinus 2.toByte) or be (7L plusOrMinus 2.toByte)) // Int plusOrMinus Int sevenInt should ((be (9 plusOrMinus 2)) or (be (9 plusOrMinus 2))) sevenInt should (be (8 plusOrMinus 2) or (be (9 plusOrMinus 2))) sevenInt should (be (7 plusOrMinus 2) or be (7 plusOrMinus 2)) // Int plusOrMinus Short sevenInt should ((be (9 plusOrMinus 2.toShort)) or (be (9 plusOrMinus 2.toShort))) sevenInt should (be (8 plusOrMinus 2.toShort) or (be (9 plusOrMinus 2.toShort))) sevenInt should (be (7 plusOrMinus 2.toShort) or be (7 plusOrMinus 2.toShort)) // Int plusOrMinus Byte sevenInt should ((be (9 plusOrMinus 2.toByte)) or (be (9 plusOrMinus 2.toByte))) sevenInt should (be (8 plusOrMinus 2.toByte) or (be (9 plusOrMinus 2.toByte))) sevenInt should (be (7 plusOrMinus 2.toByte) or be (7 plusOrMinus 2.toByte)) // Short plusOrMinus Short sevenShort should ((be (9.toShort plusOrMinus 2.toShort)) or (be (9.toShort plusOrMinus 2.toShort))) sevenShort should (be (8.toShort plusOrMinus 2.toShort) or (be (9.toShort plusOrMinus 2.toShort))) sevenShort should (be (7.toShort plusOrMinus 2.toShort) or be (7.toShort plusOrMinus 2.toShort)) // Short plusOrMinus Byte sevenShort should ((be (9.toShort plusOrMinus 2.toByte)) or (be (9.toShort plusOrMinus 2.toByte))) sevenShort should (be (8.toShort plusOrMinus 2.toByte) or (be (9.toShort plusOrMinus 2.toByte))) sevenShort should (be (7.toShort plusOrMinus 2.toByte) or be (7.toShort plusOrMinus 2.toByte)) // Byte plusOrMinus Byte sevenByte should ((be (9.toByte plusOrMinus 2.toByte)) or (be (9.toByte plusOrMinus 2.toByte))) sevenByte should (be (8.toByte plusOrMinus 2.toByte) or (be (9.toByte plusOrMinus 2.toByte))) sevenByte should (be (7.toByte plusOrMinus 2.toByte) or be (7.toByte plusOrMinus 2.toByte)) } it("should do nothing if the number is not within the specified range, when used in a logical-and expression with not") { // Double plusOrMinus Double sevenDotOh should ((not be (17.1 plusOrMinus 0.2)) and (not be (17.1 plusOrMinus 0.2))) sevenDotOh should (not (be (16.9 plusOrMinus 0.2)) and not (be (17.1 plusOrMinus 0.2))) sevenDotOh should (not be (17.0 plusOrMinus 0.2) and not be (17.0 plusOrMinus 0.2)) // Double plusOrMinus Float sevenDotOh should ((not be (17.1 plusOrMinus 0.2f)) and (not be (17.1 plusOrMinus 0.2f))) sevenDotOh should (not (be (16.9 plusOrMinus 0.2f)) and not (be (17.1 plusOrMinus 0.2f))) sevenDotOh should (not be (17.0 plusOrMinus 0.2f) and not be (17.0 plusOrMinus 0.2f)) // Double plusOrMinus Long sevenDotOh should ((not be (17.1 plusOrMinus 2L)) and (not be (17.1 plusOrMinus 2L))) sevenDotOh should (not (be (16.9 plusOrMinus 2L)) and not (be (17.1 plusOrMinus 2L))) sevenDotOh should (not be (17.0 plusOrMinus 2L) and not be (17.0 plusOrMinus 2L)) // Double plusOrMinus Int sevenDotOh should ((not be (17.1 plusOrMinus 2)) and (not be (17.1 plusOrMinus 2))) sevenDotOh should (not (be (16.9 plusOrMinus 2)) and not (be (17.1 plusOrMinus 2))) sevenDotOh should (not be (17.0 plusOrMinus 2) and not be (17.0 plusOrMinus 2)) // Double plusOrMinus Short sevenDotOh should ((not be (17.1 plusOrMinus 2.toShort)) and (not be (17.1 plusOrMinus 2.toShort))) sevenDotOh should (not (be (16.9 plusOrMinus 2.toShort)) and not (be (17.1 plusOrMinus 2.toShort))) sevenDotOh should (not be (17.0 plusOrMinus 2.toShort) and not be (17.0 plusOrMinus 2.toShort)) // Double plusOrMinus Byte sevenDotOh should ((not be (17.1 plusOrMinus 2.toByte)) and (not be (17.1 plusOrMinus 2.toByte))) sevenDotOh should (not (be (16.9 plusOrMinus 2.toByte)) and not (be (17.1 plusOrMinus 2.toByte))) sevenDotOh should (not be (17.0 plusOrMinus 2.toByte) and not be (17.0 plusOrMinus 2.toByte)) // Float plusOrMinus Float sevenDotOhFloat should ((not be (17.1f plusOrMinus 0.2f)) and (not be (17.1f plusOrMinus 0.2f))) sevenDotOhFloat should (not (be (16.9f plusOrMinus 0.2f)) and not (be (17.1f plusOrMinus 0.2f))) sevenDotOhFloat should (not be (17.0f plusOrMinus 0.2f) and not be (17.0f plusOrMinus 0.2f)) // Float plusOrMinus Long sevenDotOhFloat should ((not be (17.1f plusOrMinus 2L)) and (not be (17.1f plusOrMinus 2L))) sevenDotOhFloat should (not (be (16.9f plusOrMinus 2L)) and not (be (17.1f plusOrMinus 2L))) sevenDotOhFloat should (not be (17.0f plusOrMinus 2L) and not be (17.0f plusOrMinus 2L)) // Float plusOrMinus Int sevenDotOhFloat should ((not be (17.1f plusOrMinus 2)) and (not be (17.1f plusOrMinus 2))) sevenDotOhFloat should (not (be (16.9f plusOrMinus 2)) and not (be (17.1f plusOrMinus 2))) sevenDotOhFloat should (not be (17.0f plusOrMinus 2) and not be (17.0f plusOrMinus 2)) // Float plusOrMinus Short sevenDotOhFloat should ((not be (17.1f plusOrMinus 2.toShort)) and (not be (17.1f plusOrMinus 2.toShort))) sevenDotOhFloat should (not (be (16.9f plusOrMinus 2.toShort)) and not (be (17.1f plusOrMinus 2.toShort))) sevenDotOhFloat should (not be (17.0f plusOrMinus 2.toShort) and not be (17.0f plusOrMinus 2.toShort)) // Float plusOrMinus Byte sevenDotOhFloat should ((not be (17.1f plusOrMinus 2.toByte)) and (not be (17.1f plusOrMinus 2.toByte))) sevenDotOhFloat should (not (be (16.9f plusOrMinus 2.toByte)) and not (be (17.1f plusOrMinus 2.toByte))) sevenDotOhFloat should (not be (17.0f plusOrMinus 2.toByte) and not be (17.0f plusOrMinus 2.toByte)) // Long plusOrMinus Long sevenLong should ((not be (19L plusOrMinus 2L)) and (not be (19L plusOrMinus 2L))) sevenLong should (not (be (18L plusOrMinus 2L)) and not (be (19L plusOrMinus 2L))) sevenLong should (not be (17L plusOrMinus 2L) and not be (17L plusOrMinus 2L)) // Long plusOrMinus Int sevenLong should ((not be (19L plusOrMinus 2)) and (not be (19L plusOrMinus 2))) sevenLong should (not (be (18L plusOrMinus 2)) and not (be (19L plusOrMinus 2))) sevenLong should (not be (17L plusOrMinus 2) and not be (17L plusOrMinus 2)) // Long plusOrMinus Short sevenLong should ((not be (19L plusOrMinus 2.toShort)) and (not be (19L plusOrMinus 2.toShort))) sevenLong should (not (be (18L plusOrMinus 2.toShort)) and not (be (19L plusOrMinus 2.toShort))) sevenLong should (not be (17L plusOrMinus 2.toShort) and not be (17L plusOrMinus 2.toShort)) // Long plusOrMinus Byte sevenLong should ((not be (19L plusOrMinus 2.toByte)) and (not be (19L plusOrMinus 2.toByte))) sevenLong should (not (be (18L plusOrMinus 2.toByte)) and not (be (19L plusOrMinus 2.toByte))) sevenLong should (not be (17L plusOrMinus 2.toByte) and not be (17L plusOrMinus 2.toByte)) // Int plusOrMinus Int sevenInt should ((not be (19 plusOrMinus 2)) and (not be (19 plusOrMinus 2))) sevenInt should (not (be (18 plusOrMinus 2)) and not (be (19 plusOrMinus 2))) sevenInt should (not be (17 plusOrMinus 2) and not be (17 plusOrMinus 2)) // Int plusOrMinus Short sevenInt should ((not be (19 plusOrMinus 2.toShort)) and (not be (19 plusOrMinus 2.toShort))) sevenInt should (not (be (18 plusOrMinus 2.toShort)) and not (be (19 plusOrMinus 2.toShort))) sevenInt should (not be (17 plusOrMinus 2.toShort) and not be (17 plusOrMinus 2.toShort)) // Int plusOrMinus Byte sevenInt should ((not be (19 plusOrMinus 2.toByte)) and (not be (19 plusOrMinus 2.toByte))) sevenInt should (not (be (18 plusOrMinus 2.toByte)) and not (be (19 plusOrMinus 2.toByte))) sevenInt should (not be (17 plusOrMinus 2.toByte) and not be (17 plusOrMinus 2.toByte)) // Short plusOrMinus Short sevenShort should ((not be (19.toShort plusOrMinus 2.toShort)) and (not be (19.toShort plusOrMinus 2.toShort))) sevenShort should (not (be (18.toShort plusOrMinus 2.toShort)) and not (be (19.toShort plusOrMinus 2.toShort))) sevenShort should (not be (17.toShort plusOrMinus 2.toShort) and not be (17.toShort plusOrMinus 2.toShort)) // Short plusOrMinus Byte sevenShort should ((not be (19.toShort plusOrMinus 2.toByte)) and (not be (19.toShort plusOrMinus 2.toByte))) sevenShort should (not (be (18.toShort plusOrMinus 2.toByte)) and not (be (19.toShort plusOrMinus 2.toByte))) sevenShort should (not be (17.toShort plusOrMinus 2.toByte) and not be (17.toShort plusOrMinus 2.toByte)) // Byte plusOrMinus Byte sevenByte should ((not be (19.toByte plusOrMinus 2.toByte)) and (not be (19.toByte plusOrMinus 2.toByte))) sevenByte should (not (be (18.toByte plusOrMinus 2.toByte)) and not (be (19.toByte plusOrMinus 2.toByte))) sevenByte should (not be (17.toByte plusOrMinus 2.toByte) and not be (17.toByte plusOrMinus 2.toByte)) } it("should do nothing if the number is not within the specified range, when used in a logical-or expression with not") { // Double plusOrMinus Double sevenDotOh should ((not be (17.1 plusOrMinus 0.2)) or (not be (17.1 plusOrMinus 0.2))) sevenDotOh should (not (be (16.9 plusOrMinus 0.2)) or not (be (17.1 plusOrMinus 0.2))) sevenDotOh should (not be (17.0 plusOrMinus 0.2) or not be (17.0 plusOrMinus 0.2)) // Double plusOrMinus Float sevenDotOh should ((not be (17.1 plusOrMinus 0.2f)) or (not be (17.1 plusOrMinus 0.2f))) sevenDotOh should (not (be (16.9 plusOrMinus 0.2f)) or not (be (17.1 plusOrMinus 0.2f))) sevenDotOh should (not be (17.0 plusOrMinus 0.2f) or not be (17.0 plusOrMinus 0.2f)) // Double plusOrMinus Long sevenDotOh should ((not be (17.1 plusOrMinus 2L)) or (not be (17.1 plusOrMinus 2L))) sevenDotOh should (not (be (16.9 plusOrMinus 2L)) or not (be (17.1 plusOrMinus 2L))) sevenDotOh should (not be (17.0 plusOrMinus 2L) or not be (17.0 plusOrMinus 2L)) // Double plusOrMinus Int sevenDotOh should ((not be (17.1 plusOrMinus 2)) or (not be (17.1 plusOrMinus 2))) sevenDotOh should (not (be (16.9 plusOrMinus 2)) or not (be (17.1 plusOrMinus 2))) sevenDotOh should (not be (17.0 plusOrMinus 2) or not be (17.0 plusOrMinus 2)) // Double plusOrMinus Short sevenDotOh should ((not be (17.1 plusOrMinus 2.toShort)) or (not be (17.1 plusOrMinus 2.toShort))) sevenDotOh should (not (be (16.9 plusOrMinus 2.toShort)) or not (be (17.1 plusOrMinus 2.toShort))) sevenDotOh should (not be (17.0 plusOrMinus 2.toShort) or not be (17.0 plusOrMinus 2.toShort)) // Double plusOrMinus Byte sevenDotOh should ((not be (17.1 plusOrMinus 2.toByte)) or (not be (17.1 plusOrMinus 2.toByte))) sevenDotOh should (not (be (16.9 plusOrMinus 2.toByte)) or not (be (17.1 plusOrMinus 2.toByte))) sevenDotOh should (not be (17.0 plusOrMinus 2.toByte) or not be (17.0 plusOrMinus 2.toByte)) // Float plusOrMinus Float sevenDotOhFloat should ((not be (17.1f plusOrMinus 0.2f)) or (not be (17.1f plusOrMinus 0.2f))) sevenDotOhFloat should (not (be (16.9f plusOrMinus 0.2f)) or not (be (17.1f plusOrMinus 0.2f))) sevenDotOhFloat should (not be (17.0f plusOrMinus 0.2f) or not be (17.0f plusOrMinus 0.2f)) // Float plusOrMinus Long sevenDotOhFloat should ((not be (17.1f plusOrMinus 2L)) or (not be (17.1f plusOrMinus 2L))) sevenDotOhFloat should (not (be (16.9f plusOrMinus 2L)) or not (be (17.1f plusOrMinus 2L))) sevenDotOhFloat should (not be (17.0f plusOrMinus 2L) or not be (17.0f plusOrMinus 2L)) // Float plusOrMinus Int sevenDotOhFloat should ((not be (17.1f plusOrMinus 2)) or (not be (17.1f plusOrMinus 2))) sevenDotOhFloat should (not (be (16.9f plusOrMinus 2)) or not (be (17.1f plusOrMinus 2))) sevenDotOhFloat should (not be (17.0f plusOrMinus 2) or not be (17.0f plusOrMinus 2)) // Float plusOrMinus Short sevenDotOhFloat should ((not be (17.1f plusOrMinus 2.toShort)) or (not be (17.1f plusOrMinus 2.toShort))) sevenDotOhFloat should (not (be (16.9f plusOrMinus 2.toShort)) or not (be (17.1f plusOrMinus 2.toShort))) sevenDotOhFloat should (not be (17.0f plusOrMinus 2.toShort) or not be (17.0f plusOrMinus 2.toShort)) // Float plusOrMinus Byte sevenDotOhFloat should ((not be (17.1f plusOrMinus 2.toByte)) or (not be (17.1f plusOrMinus 2.toByte))) sevenDotOhFloat should (not (be (16.9f plusOrMinus 2.toByte)) or not (be (17.1f plusOrMinus 2.toByte))) sevenDotOhFloat should (not be (17.0f plusOrMinus 2.toByte) or not be (17.0f plusOrMinus 2.toByte)) // Long plusOrMinus Long sevenLong should ((not be (19L plusOrMinus 2L)) or (not be (19L plusOrMinus 2L))) sevenLong should (not (be (18L plusOrMinus 2L)) or not (be (19L plusOrMinus 2L))) sevenLong should (not be (17L plusOrMinus 2L) or not be (17L plusOrMinus 2L)) // Long plusOrMinus Int sevenLong should ((not be (19L plusOrMinus 2)) or (not be (19L plusOrMinus 2))) sevenLong should (not (be (18L plusOrMinus 2)) or not (be (19L plusOrMinus 2))) sevenLong should (not be (17L plusOrMinus 2) or not be (17L plusOrMinus 2)) // Long plusOrMinus Short sevenLong should ((not be (19L plusOrMinus 2.toShort)) or (not be (19L plusOrMinus 2.toShort))) sevenLong should (not (be (18L plusOrMinus 2.toShort)) or not (be (19L plusOrMinus 2.toShort))) sevenLong should (not be (17L plusOrMinus 2.toShort) or not be (17L plusOrMinus 2.toShort)) // Long plusOrMinus Byte sevenLong should ((not be (19L plusOrMinus 2.toByte)) or (not be (19L plusOrMinus 2.toByte))) sevenLong should (not (be (18L plusOrMinus 2.toByte)) or not (be (19L plusOrMinus 2.toByte))) sevenLong should (not be (17L plusOrMinus 2.toByte) or not be (17L plusOrMinus 2.toByte)) // Int plusOrMinus Int sevenInt should ((not be (19 plusOrMinus 2)) or (not be (19 plusOrMinus 2))) sevenInt should (not (be (18 plusOrMinus 2)) or not (be (19 plusOrMinus 2))) sevenInt should (not be (17 plusOrMinus 2) or not be (17 plusOrMinus 2)) // Int plusOrMinus Short sevenInt should ((not be (19 plusOrMinus 2.toShort)) or (not be (19 plusOrMinus 2.toShort))) sevenInt should (not (be (18 plusOrMinus 2.toShort)) or not (be (19 plusOrMinus 2.toShort))) sevenInt should (not be (17 plusOrMinus 2.toShort) or not be (17 plusOrMinus 2.toShort)) // Int plusOrMinus Byte sevenInt should ((not be (19 plusOrMinus 2.toByte)) or (not be (19 plusOrMinus 2.toByte))) sevenInt should (not (be (18 plusOrMinus 2.toByte)) or not (be (19 plusOrMinus 2.toByte))) sevenInt should (not be (17 plusOrMinus 2.toByte) or not be (17 plusOrMinus 2.toByte)) // Short plusOrMinus Short sevenShort should ((not be (19.toShort plusOrMinus 2.toShort)) or (not be (19.toShort plusOrMinus 2.toShort))) sevenShort should (not (be (18.toShort plusOrMinus 2.toShort)) or not (be (19.toShort plusOrMinus 2.toShort))) sevenShort should (not be (17.toShort plusOrMinus 2.toShort) or not be (17.toShort plusOrMinus 2.toShort)) // Short plusOrMinus Byte sevenShort should ((not be (19.toShort plusOrMinus 2.toByte)) or (not be (19.toShort plusOrMinus 2.toByte))) sevenShort should (not (be (18.toShort plusOrMinus 2.toByte)) or not (be (19.toShort plusOrMinus 2.toByte))) sevenShort should (not be (17.toShort plusOrMinus 2.toByte) or not be (17.toShort plusOrMinus 2.toByte)) // Byte plusOrMinus Byte sevenByte should ((not be (19.toByte plusOrMinus 2.toByte)) or (not be (19.toByte plusOrMinus 2.toByte))) sevenByte should (not (be (18.toByte plusOrMinus 2.toByte)) or not (be (19.toByte plusOrMinus 2.toByte))) sevenByte should (not be (17.toByte plusOrMinus 2.toByte) or not be (17.toByte plusOrMinus 2.toByte)) } it("should throw TestFailedException if the number is not within the specified range") { // Double plusOrMinus Double val caught1 = intercept[TestFailedException] { sevenDotOh should be (17.1 plusOrMinus 0.2) } assert(caught1.getMessage === "7.0 was not 17.1 plus or minus 0.2") // Double plusOrMinus Float val caught2 = intercept[TestFailedException] { sevenDotOh should be (17.1 plusOrMinus 0.2f) } assert(caught2.getMessage === "7.0 was not 17.1 plus or minus 0.20000000298023224") // Double plusOrMinus Long val caught3 = intercept[TestFailedException] { sevenDotOh should be (17.1 plusOrMinus 2L) } assert(caught3.getMessage === "7.0 was not 17.1 plus or minus 2.0") // Double plusOrMinus Int val caught4 = intercept[TestFailedException] { sevenDotOh should be (17.1 plusOrMinus 2) } assert(caught4.getMessage === "7.0 was not 17.1 plus or minus 2.0") // Double plusOrMinus Short val caught5 = intercept[TestFailedException] { sevenDotOh should be (17.1 plusOrMinus 2.toShort) } assert(caught5.getMessage === "7.0 was not 17.1 plus or minus 2.0") // Double plusOrMinus Byte val caught6 = intercept[TestFailedException] { sevenDotOh should be (17.1 plusOrMinus 2.toByte) } assert(caught6.getMessage === "7.0 was not 17.1 plus or minus 2.0") // Float plusOrMinus Float val caught7 = intercept[TestFailedException] { sevenDotOhFloat should be (17.1f plusOrMinus 0.2f) } assert(caught7.getMessage === "7.0 was not 17.1 plus or minus 0.2") // Float plusOrMinus Long val caught8 = intercept[TestFailedException] { sevenDotOhFloat should be (17.1f plusOrMinus 2L) } assert(caught8.getMessage === "7.0 was not 17.1 plus or minus 2.0") // Float plusOrMinus Int val caught9 = intercept[TestFailedException] { sevenDotOhFloat should be (17.1f plusOrMinus 2) } assert(caught9.getMessage === "7.0 was not 17.1 plus or minus 2.0") // Float plusOrMinus Short val caught10 = intercept[TestFailedException] { sevenDotOhFloat should be (17.1f plusOrMinus 2.toShort) } assert(caught10.getMessage === "7.0 was not 17.1 plus or minus 2.0") // Float plusOrMinus Byte val caught11 = intercept[TestFailedException] { sevenDotOhFloat should be (17.1f plusOrMinus 2.toByte) } assert(caught11.getMessage === "7.0 was not 17.1 plus or minus 2.0") // Long plusOrMinus Long val caught12 = intercept[TestFailedException] { sevenLong should be (19L plusOrMinus 2L) } assert(caught12.getMessage === "7 was not 19 plus or minus 2") // Long plusOrMinus Int val caught13 = intercept[TestFailedException] { sevenLong should be (19L plusOrMinus 2) } assert(caught13.getMessage === "7 was not 19 plus or minus 2") // Long plusOrMinus Short val caught14 = intercept[TestFailedException] { sevenLong should be (19L plusOrMinus 2.toShort) } assert(caught14.getMessage === "7 was not 19 plus or minus 2") // Long plusOrMinus Byte val caught15 = intercept[TestFailedException] { sevenLong should be (19L plusOrMinus 2.toByte) } assert(caught15.getMessage === "7 was not 19 plus or minus 2") // Int plusOrMinus Int val caught16 = intercept[TestFailedException] { sevenInt should be (19 plusOrMinus 2) } assert(caught16.getMessage === "7 was not 19 plus or minus 2") // Int plusOrMinus Short val caught17 = intercept[TestFailedException] { sevenInt should be (19 plusOrMinus 2.toShort) } assert(caught17.getMessage === "7 was not 19 plus or minus 2") // Int plusOrMinus Byte val caught18 = intercept[TestFailedException] { sevenInt should be (19 plusOrMinus 2.toByte) } assert(caught18.getMessage === "7 was not 19 plus or minus 2") // Short plusOrMinus Short val caught19 = intercept[TestFailedException] { sevenShort should be (19.toShort plusOrMinus 2.toShort) } assert(caught19.getMessage === "7 was not 19 plus or minus 2") // Short plusOrMinus Byte val caught20 = intercept[TestFailedException] { sevenShort should be (19.toShort plusOrMinus 2.toByte) } assert(caught20.getMessage === "7 was not 19 plus or minus 2") // Byte plusOrMinus Byte val caught21 = intercept[TestFailedException] { sevenByte should be (19.toByte plusOrMinus 2.toByte) } assert(caught21.getMessage === "7 was not 19 plus or minus 2") } it("should throw TestFailedException if the number is within the specified range, when used with not") { // Double plusOrMinus Double val caught1 = intercept[TestFailedException] { sevenDotOh should not be (7.1 plusOrMinus 0.2) } assert(caught1.getMessage === "7.0 was 7.1 plus or minus 0.2") // Double plusOrMinus Float val caught2 = intercept[TestFailedException] { sevenDotOh should not be (7.1 plusOrMinus 0.2f) } assert(caught2.getMessage === "7.0 was 7.1 plus or minus 0.20000000298023224") // Double plusOrMinus Long val caught3 = intercept[TestFailedException] { sevenDotOh should not be (7.1 plusOrMinus 2L) } assert(caught3.getMessage === "7.0 was 7.1 plus or minus 2.0") // Double plusOrMinus Int val caught4 = intercept[TestFailedException] { sevenDotOh should not be (7.1 plusOrMinus 2) } assert(caught4.getMessage === "7.0 was 7.1 plus or minus 2.0") // Double plusOrMinus Short val caught5 = intercept[TestFailedException] { sevenDotOh should not be (7.1 plusOrMinus 2.toShort) } assert(caught5.getMessage === "7.0 was 7.1 plus or minus 2.0") // Double plusOrMinus Byte val caught6 = intercept[TestFailedException] { sevenDotOh should not be (7.1 plusOrMinus 2.toByte) } assert(caught6.getMessage === "7.0 was 7.1 plus or minus 2.0") // Float plusOrMinus Float val caught7 = intercept[TestFailedException] { sevenDotOhFloat should not be (7.1f plusOrMinus 0.2f) } assert(caught7.getMessage === "7.0 was 7.1 plus or minus 0.2") // Float plusOrMinus Long val caught8 = intercept[TestFailedException] { sevenDotOhFloat should not be (7.1f plusOrMinus 2L) } assert(caught8.getMessage === "7.0 was 7.1 plus or minus 2.0") // Float plusOrMinus Int val caught9 = intercept[TestFailedException] { sevenDotOhFloat should not be (7.1f plusOrMinus 2) } assert(caught9.getMessage === "7.0 was 7.1 plus or minus 2.0") // Float plusOrMinus Short val caught10 = intercept[TestFailedException] { sevenDotOhFloat should not be (7.1f plusOrMinus 2.toShort) } assert(caught10.getMessage === "7.0 was 7.1 plus or minus 2.0") // Float plusOrMinus Byte val caught11 = intercept[TestFailedException] { sevenDotOhFloat should not be (7.1f plusOrMinus 2.toByte) } assert(caught11.getMessage === "7.0 was 7.1 plus or minus 2.0") // Long plusOrMinus Long val caught12 = intercept[TestFailedException] { sevenLong should not be (9L plusOrMinus 2L) } assert(caught12.getMessage === "7 was 9 plus or minus 2") // Long plusOrMinus Int val caught13 = intercept[TestFailedException] { sevenLong should not be (9L plusOrMinus 2) } assert(caught13.getMessage === "7 was 9 plus or minus 2") // Long plusOrMinus Short val caught14 = intercept[TestFailedException] { sevenLong should not be (9L plusOrMinus 2.toShort) } assert(caught14.getMessage === "7 was 9 plus or minus 2") // Long plusOrMinus Byte val caught15 = intercept[TestFailedException] { sevenLong should not be (9L plusOrMinus 2.toByte) } assert(caught15.getMessage === "7 was 9 plus or minus 2") // Int plusOrMinus Int val caught16 = intercept[TestFailedException] { sevenInt should not be (9 plusOrMinus 2) } assert(caught16.getMessage === "7 was 9 plus or minus 2") // Int plusOrMinus Short val caught17 = intercept[TestFailedException] { sevenInt should not be (9 plusOrMinus 2.toShort) } assert(caught17.getMessage === "7 was 9 plus or minus 2") // Int plusOrMinus Byte val caught18 = intercept[TestFailedException] { sevenInt should not be (9 plusOrMinus 2.toByte) } assert(caught18.getMessage === "7 was 9 plus or minus 2") // Short plusOrMinus Short val caught19 = intercept[TestFailedException] { sevenShort should not be (9.toShort plusOrMinus 2.toShort) } assert(caught19.getMessage === "7 was 9 plus or minus 2") // Short plusOrMinus Byte val caught20 = intercept[TestFailedException] { sevenShort should not be (9.toShort plusOrMinus 2.toByte) } assert(caught20.getMessage === "7 was 9 plus or minus 2") // Byte plusOrMinus Byte val caught21 = intercept[TestFailedException] { sevenByte should not be (9.toByte plusOrMinus 2.toByte) } assert(caught21.getMessage === "7 was 9 plus or minus 2") } it("should throw TestFailedException if the number is not within the specified range, when used in a logical-and expression") { // Double plusOrMinus Double val caught1 = intercept[TestFailedException] { sevenDotOh should ((be (17.1 plusOrMinus 0.2)) and (be (17.1 plusOrMinus 0.2))) } assert(caught1.getMessage === "7.0 was not 17.1 plus or minus 0.2") val caught2 = intercept[TestFailedException] { sevenDotOh should (be (6.9 plusOrMinus 0.2) and (be (17.1 plusOrMinus 0.2))) } assert(caught2.getMessage === "7.0 was 6.9 plus or minus 0.2, but 7.0 was not 17.1 plus or minus 0.2") val caught3 = intercept[TestFailedException] { sevenDotOh should (be (17.0 plusOrMinus 0.2) and be (7.0 plusOrMinus 0.2)) } assert(caught3.getMessage === "7.0 was not 17.0 plus or minus 0.2") // Double plusOrMinus Float val caught4 = intercept[TestFailedException] { sevenDotOh should ((be (17.1 plusOrMinus 0.2f)) and (be (17.1 plusOrMinus 0.2f))) } assert(caught4.getMessage === "7.0 was not 17.1 plus or minus 0.20000000298023224") val caught5 = intercept[TestFailedException] { sevenDotOh should (be (6.9 plusOrMinus 0.2f) and (be (17.1 plusOrMinus 0.2f))) } assert(caught5.getMessage === "7.0 was 6.9 plus or minus 0.20000000298023224, but 7.0 was not 17.1 plus or minus 0.20000000298023224") val caught6 = intercept[TestFailedException] { sevenDotOh should (be (17.0 plusOrMinus 0.2f) and be (7.0 plusOrMinus 0.2f)) } assert(caught6.getMessage === "7.0 was not 17.0 plus or minus 0.20000000298023224") // Double plusOrMinus Long val caught7 = intercept[TestFailedException] { sevenDotOh should ((be (17.1 plusOrMinus 2L)) and (be (17.1 plusOrMinus 2L))) } assert(caught7.getMessage === "7.0 was not 17.1 plus or minus 2.0") val caught8 = intercept[TestFailedException] { sevenDotOh should (be (6.9 plusOrMinus 2L) and (be (17.1 plusOrMinus 2L))) } assert(caught8.getMessage === "7.0 was 6.9 plus or minus 2.0, but 7.0 was not 17.1 plus or minus 2.0") val caught9 = intercept[TestFailedException] { sevenDotOh should (be (17.0 plusOrMinus 2L) and be (7.0 plusOrMinus 2L)) } assert(caught9.getMessage === "7.0 was not 17.0 plus or minus 2.0") // Double plusOrMinus Int val caught10 = intercept[TestFailedException] { sevenDotOh should ((be (17.1 plusOrMinus 2)) and (be (17.1 plusOrMinus 2))) } assert(caught10.getMessage === "7.0 was not 17.1 plus or minus 2.0") val caught11 = intercept[TestFailedException] { sevenDotOh should (be (6.9 plusOrMinus 2) and (be (17.1 plusOrMinus 2))) } assert(caught2.getMessage === "7.0 was 6.9 plus or minus 0.2, but 7.0 was not 17.1 plus or minus 0.2") val caught12 = intercept[TestFailedException] { sevenDotOh should (be (7.0 plusOrMinus 2) and be (17.0 plusOrMinus 2)) } assert(caught12.getMessage === "7.0 was 7.0 plus or minus 2.0, but 7.0 was not 17.0 plus or minus 2.0") // Double plusOrMinus Short val caught13 = intercept[TestFailedException] { sevenDotOh should ((be (17.1 plusOrMinus 2.toShort)) and (be (17.1 plusOrMinus 2.toShort))) } assert(caught13.getMessage === "7.0 was not 17.1 plus or minus 2.0") val caught14 = intercept[TestFailedException] { sevenDotOh should (be (6.9 plusOrMinus 2.toShort) and (be (17.1 plusOrMinus 2.toShort))) } assert(caught14.getMessage === "7.0 was 6.9 plus or minus 2.0, but 7.0 was not 17.1 plus or minus 2.0") val caught15 = intercept[TestFailedException] { sevenDotOh should (be (17.0 plusOrMinus 2.toShort) and be (7.0 plusOrMinus 2.toShort)) } assert(caught15.getMessage === "7.0 was not 17.0 plus or minus 2.0") // Double plusOrMinus Byte val caught16 = intercept[TestFailedException] { sevenDotOh should ((be (17.1 plusOrMinus 2.toByte)) and (be (17.1 plusOrMinus 2.toByte))) } assert(caught16.getMessage === "7.0 was not 17.1 plus or minus 2.0") val caught17 = intercept[TestFailedException] { sevenDotOh should (be (6.9 plusOrMinus 2.toByte) and (be (17.1 plusOrMinus 2.toByte))) } assert(caught17.getMessage === "7.0 was 6.9 plus or minus 2.0, but 7.0 was not 17.1 plus or minus 2.0") val caught18 = intercept[TestFailedException] { sevenDotOh should (be (17.0 plusOrMinus 2.toByte) and be (7.0 plusOrMinus 2.toByte)) } assert(caught18.getMessage === "7.0 was not 17.0 plus or minus 2.0") // Float plusOrMinus Float val caught19 = intercept[TestFailedException] { sevenDotOhFloat should ((be (17.1f plusOrMinus 0.2f)) and (be (17.1f plusOrMinus 0.2f))) } assert(caught19.getMessage === "7.0 was not 17.1 plus or minus 0.2") val caught20 = intercept[TestFailedException] { sevenDotOhFloat should (be (6.9f plusOrMinus 0.2f) and (be (17.1f plusOrMinus 0.2f))) } assert(caught20.getMessage === "7.0 was 6.9 plus or minus 0.2, but 7.0 was not 17.1 plus or minus 0.2") val caught21 = intercept[TestFailedException] { sevenDotOhFloat should (be (17.0f plusOrMinus 0.2f) and be (7.0f plusOrMinus 0.2f)) } assert(caught21.getMessage === "7.0 was not 17.0 plus or minus 0.2") // Float plusOrMinus Long val caught22 = intercept[TestFailedException] { sevenDotOhFloat should ((be (17.1f plusOrMinus 2L)) and (be (17.1f plusOrMinus 2L))) } assert(caught22.getMessage === "7.0 was not 17.1 plus or minus 2.0") val caught23 = intercept[TestFailedException] { sevenDotOhFloat should (be (6.9f plusOrMinus 2L) and (be (17.1f plusOrMinus 2L))) } assert(caught23.getMessage === "7.0 was 6.9 plus or minus 2.0, but 7.0 was not 17.1 plus or minus 2.0") val caught24 = intercept[TestFailedException] { sevenDotOhFloat should (be (17.0f plusOrMinus 2L) and be (7.0f plusOrMinus 2L)) } assert(caught24.getMessage === "7.0 was not 17.0 plus or minus 2.0") // Float plusOrMinus Int val caught25 = intercept[TestFailedException] { sevenDotOhFloat should ((be (17.1f plusOrMinus 2)) and (be (17.1f plusOrMinus 2))) } assert(caught25.getMessage === "7.0 was not 17.1 plus or minus 2.0") val caught26 = intercept[TestFailedException] { sevenDotOhFloat should (be (6.9f plusOrMinus 2) and (be (17.1f plusOrMinus 2))) } assert(caught26.getMessage === "7.0 was 6.9 plus or minus 2.0, but 7.0 was not 17.1 plus or minus 2.0") val caught27 = intercept[TestFailedException] { sevenDotOhFloat should (be (17.0f plusOrMinus 2) and be (7.0f plusOrMinus 2)) } assert(caught27.getMessage === "7.0 was not 17.0 plus or minus 2.0") // Float plusOrMinus Short val caught28 = intercept[TestFailedException] { sevenDotOhFloat should ((be (17.1f plusOrMinus 2.toShort)) and (be (17.1f plusOrMinus 2.toShort))) } assert(caught28.getMessage === "7.0 was not 17.1 plus or minus 2.0") val caught29 = intercept[TestFailedException] { sevenDotOhFloat should (be (6.9f plusOrMinus 2.toShort) and (be (17.1f plusOrMinus 2.toShort))) } assert(caught29.getMessage === "7.0 was 6.9 plus or minus 2.0, but 7.0 was not 17.1 plus or minus 2.0") val caught30 = intercept[TestFailedException] { sevenDotOhFloat should (be (17.0f plusOrMinus 2.toShort) and be (7.0f plusOrMinus 2.toShort)) } assert(caught30.getMessage === "7.0 was not 17.0 plus or minus 2.0") // Float plusOrMinus Byte val caught31 = intercept[TestFailedException] { sevenDotOhFloat should ((be (17.1f plusOrMinus 2.toByte)) and (be (17.1f plusOrMinus 2.toByte))) } assert(caught31.getMessage === "7.0 was not 17.1 plus or minus 2.0") val caught32 = intercept[TestFailedException] { sevenDotOhFloat should (be (6.9f plusOrMinus 2.toByte) and (be (17.1f plusOrMinus 2.toByte))) } assert(caught32.getMessage === "7.0 was 6.9 plus or minus 2.0, but 7.0 was not 17.1 plus or minus 2.0") val caught33 = intercept[TestFailedException] { sevenDotOhFloat should (be (17.0f plusOrMinus 2.toByte) and be (7.0f plusOrMinus 2.toByte)) } assert(caught33.getMessage === "7.0 was not 17.0 plus or minus 2.0") // Long plusOrMinus Long val caught34 = intercept[TestFailedException] { sevenLong should ((be (19L plusOrMinus 2L)) and (be (9L plusOrMinus 2L))) } assert(caught34.getMessage === "7 was not 19 plus or minus 2") val caught35 = intercept[TestFailedException] { sevenLong should (be (18L plusOrMinus 2L) and (be (19L plusOrMinus 2L))) } assert(caught35.getMessage === "7 was not 18 plus or minus 2") val caught36 = intercept[TestFailedException] { sevenLong should (be (17L plusOrMinus 2L) and be (7L plusOrMinus 2L)) } assert(caught36.getMessage === "7 was not 17 plus or minus 2") // Long plusOrMinus Int val caught37 = intercept[TestFailedException] { sevenLong should ((be (19L plusOrMinus 2)) and (be (9L plusOrMinus 2))) } assert(caught37.getMessage === "7 was not 19 plus or minus 2") val caught38 = intercept[TestFailedException] { sevenLong should (be (8L plusOrMinus 2) and (be (19L plusOrMinus 2))) } assert(caught38.getMessage === "7 was 8 plus or minus 2, but 7 was not 19 plus or minus 2") val caught39 = intercept[TestFailedException] { sevenLong should (be (17L plusOrMinus 2) and be (7L plusOrMinus 2)) } assert(caught39.getMessage === "7 was not 17 plus or minus 2") // Long plusOrMinus Short val caught40 = intercept[TestFailedException] { sevenLong should ((be (19L plusOrMinus 2.toShort)) and (be (9L plusOrMinus 2.toShort))) } assert(caught40.getMessage === "7 was not 19 plus or minus 2") val caught41 = intercept[TestFailedException] { sevenLong should (be (8L plusOrMinus 2.toShort) and (be (19L plusOrMinus 2.toShort))) } assert(caught41.getMessage === "7 was 8 plus or minus 2, but 7 was not 19 plus or minus 2") val caught42 = intercept[TestFailedException] { sevenLong should (be (17L plusOrMinus 2.toShort) and be (7L plusOrMinus 2.toShort)) } assert(caught42.getMessage === "7 was not 17 plus or minus 2") // Long plusOrMinus Byte val caught43 = intercept[TestFailedException] { sevenLong should ((be (19L plusOrMinus 2.toByte)) and (be (9L plusOrMinus 2.toByte))) } assert(caught43.getMessage === "7 was not 19 plus or minus 2") val caught44 = intercept[TestFailedException] { sevenLong should (be (8L plusOrMinus 2.toByte) and (be (19L plusOrMinus 2.toByte))) } assert(caught44.getMessage === "7 was 8 plus or minus 2, but 7 was not 19 plus or minus 2") val caught45 = intercept[TestFailedException] { sevenLong should (be (17L plusOrMinus 2.toByte) and be (7L plusOrMinus 2.toByte)) } assert(caught45.getMessage === "7 was not 17 plus or minus 2") // Int plusOrMinus Int val caught46 = intercept[TestFailedException] { sevenInt should ((be (19 plusOrMinus 2)) and (be (9 plusOrMinus 2))) } assert(caught46.getMessage === "7 was not 19 plus or minus 2") val caught47 = intercept[TestFailedException] { sevenInt should (be (8 plusOrMinus 2) and (be (19 plusOrMinus 2))) } assert(caught47.getMessage === "7 was 8 plus or minus 2, but 7 was not 19 plus or minus 2") val caught48 = intercept[TestFailedException] { sevenInt should (be (17 plusOrMinus 2) and be (7 plusOrMinus 2)) } assert(caught48.getMessage === "7 was not 17 plus or minus 2") // Int plusOrMinus Short val caught49 = intercept[TestFailedException] { sevenInt should ((be (9 plusOrMinus 2.toShort)) and (be (19 plusOrMinus 2.toShort))) } assert(caught49.getMessage === "7 was 9 plus or minus 2, but 7 was not 19 plus or minus 2") val caught50 = intercept[TestFailedException] { sevenInt should (be (8 plusOrMinus 2.toShort) and (be (19 plusOrMinus 2.toShort))) } assert(caught50.getMessage === "7 was 8 plus or minus 2, but 7 was not 19 plus or minus 2") val caught51 = intercept[TestFailedException] { sevenInt should (be (17 plusOrMinus 2.toShort) and be (7 plusOrMinus 2.toShort)) } assert(caught51.getMessage === "7 was not 17 plus or minus 2") // Int plusOrMinus Byte val caught52 = intercept[TestFailedException] { sevenInt should ((be (19 plusOrMinus 2.toByte)) and (be (9 plusOrMinus 2.toByte))) } assert(caught52.getMessage === "7 was not 19 plus or minus 2") val caught53 = intercept[TestFailedException] { sevenInt should (be (8 plusOrMinus 2.toByte) and (be (19 plusOrMinus 2.toByte))) } assert(caught53.getMessage === "7 was 8 plus or minus 2, but 7 was not 19 plus or minus 2") val caught54 = intercept[TestFailedException] { sevenInt should (be (17 plusOrMinus 2.toByte) and be (7 plusOrMinus 2.toByte)) } assert(caught54.getMessage === "7 was not 17 plus or minus 2") // Short plusOrMinus Short val caught55 = intercept[TestFailedException] { sevenShort should ((be (19.toShort plusOrMinus 2.toShort)) and (be (9.toShort plusOrMinus 2.toShort))) } assert(caught55.getMessage === "7 was not 19 plus or minus 2") val caught56 = intercept[TestFailedException] { sevenShort should (be (8.toShort plusOrMinus 2.toShort) and (be (19.toShort plusOrMinus 2.toShort))) } assert(caught56.getMessage === "7 was 8 plus or minus 2, but 7 was not 19 plus or minus 2") val caught57 = intercept[TestFailedException] { sevenShort should (be (17.toShort plusOrMinus 2.toShort) and be (7.toShort plusOrMinus 2.toShort)) } assert(caught57.getMessage === "7 was not 17 plus or minus 2") // Short plusOrMinus Byte val caught58 = intercept[TestFailedException] { sevenShort should ((be (19.toShort plusOrMinus 2.toByte)) and (be (9.toShort plusOrMinus 2.toByte))) } assert(caught58.getMessage === "7 was not 19 plus or minus 2") val caught59 = intercept[TestFailedException] { sevenShort should (be (8.toShort plusOrMinus 2.toByte) and (be (19.toShort plusOrMinus 2.toByte))) } assert(caught59.getMessage === "7 was 8 plus or minus 2, but 7 was not 19 plus or minus 2") val caught60 = intercept[TestFailedException] { sevenShort should (be (17.toShort plusOrMinus 2.toByte) and be (7.toShort plusOrMinus 2.toByte)) } assert(caught60.getMessage === "7 was not 17 plus or minus 2") // Byte plusOrMinus Byte val caught61 = intercept[TestFailedException] { sevenByte should ((be (19.toByte plusOrMinus 2.toByte)) and (be (9.toByte plusOrMinus 2.toByte))) } assert(caught61.getMessage === "7 was not 19 plus or minus 2") val caught62 = intercept[TestFailedException] { sevenByte should (be (8.toByte plusOrMinus 2.toByte) and (be (19.toByte plusOrMinus 2.toByte))) } assert(caught62.getMessage === "7 was 8 plus or minus 2, but 7 was not 19 plus or minus 2") val caught63 = intercept[TestFailedException] { sevenByte should (be (17.toByte plusOrMinus 2.toByte) and be (7.toByte plusOrMinus 2.toByte)) } assert(caught63.getMessage === "7 was not 17 plus or minus 2") } it("should throw TestFailedException if the number is not within the specified range, when used in a logical-or expression") { // Double plusOrMinus Double val caught1 = intercept[TestFailedException] { sevenDotOh should ((be (17.1 plusOrMinus 0.2)) or (be (17.1 plusOrMinus 0.2))) } assert(caught1.getMessage === "7.0 was not 17.1 plus or minus 0.2, and 7.0 was not 17.1 plus or minus 0.2") val caught2 = intercept[TestFailedException] { sevenDotOh should (be (16.9 plusOrMinus 0.2) or (be (17.1 plusOrMinus 0.2))) } assert(caught2.getMessage === "7.0 was not 16.9 plus or minus 0.2, and 7.0 was not 17.1 plus or minus 0.2") val caught3 = intercept[TestFailedException] { sevenDotOh should (be (17.0 plusOrMinus 0.2) or be (97.0 plusOrMinus 0.2)) } assert(caught3.getMessage === "7.0 was not 17.0 plus or minus 0.2, and 7.0 was not 97.0 plus or minus 0.2") } it("should throw TestFailedException if the number is within the specified range, when used in a logical-and expression with not") { // Double plusOrMinus Double val caught1 = intercept[TestFailedException] { sevenDotOh should (not (be (17.1 plusOrMinus 0.2)) and not (be (7.1 plusOrMinus 0.2))) } assert(caught1.getMessage === "7.0 was not 17.1 plus or minus 0.2, but 7.0 was 7.1 plus or minus 0.2") val caught2 = intercept[TestFailedException] { sevenDotOh should (not be (16.9 plusOrMinus 0.2) and (not be (7.1 plusOrMinus 0.2))) } assert(caught2.getMessage === "7.0 was not 16.9 plus or minus 0.2, but 7.0 was 7.1 plus or minus 0.2") val caught3 = intercept[TestFailedException] { sevenDotOh should (not be (17.0 plusOrMinus 0.2) and not be (7.0 plusOrMinus 0.2)) } assert(caught3.getMessage === "7.0 was not 17.0 plus or minus 0.2, but 7.0 was 7.0 plus or minus 0.2") // Check that the error message "short circuits" val caught4 = intercept[TestFailedException] { sevenDotOh should (not (be (7.1 plusOrMinus 0.2)) and not (be (7.1 plusOrMinus 0.2))) } assert(caught4.getMessage === "7.0 was 7.1 plus or minus 0.2") } it("should throw TestFailedException if the number is within the specified range, when used in a logical-or expression with not") { // Double plusOrMinus Double val caught1 = intercept[TestFailedException] { sevenDotOh should (not (be (7.1 plusOrMinus 0.2)) or not (be (7.1 plusOrMinus 0.2))) } assert(caught1.getMessage === "7.0 was 7.1 plus or minus 0.2, and 7.0 was 7.1 plus or minus 0.2") val caught2 = intercept[TestFailedException] { sevenDotOh should ((not be (6.9 plusOrMinus 0.2)) or (not be (7.1 plusOrMinus 0.2))) } assert(caught2.getMessage === "7.0 was 6.9 plus or minus 0.2, and 7.0 was 7.1 plus or minus 0.2") val caught3 = intercept[TestFailedException] { sevenDotOh should (not be (7.0 plusOrMinus 0.2) or not be (7.0 plusOrMinus 0.2)) } assert(caught3.getMessage === "7.0 was 7.0 plus or minus 0.2, and 7.0 was 7.0 plus or minus 0.2") } it("should throw TestFailedException if the number passed as the range is 0 or negative") { // Double plusOrMinus Double val caught1 = intercept[TestFailedException] { sevenDotOh should be (7.1 plusOrMinus -0.2) } assert(caught1.getMessage === "Range (-0.2) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Double plusOrMinus Float val caught2 = intercept[TestFailedException] { sevenDotOh should be (7.1 plusOrMinus -0.2f) } assert(caught2.getMessage === "Range (-0.20000000298023224) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Double plusOrMinus Long val caught3 = intercept[TestFailedException] { sevenDotOh should be (7.1 plusOrMinus -2L) } assert(caught3.getMessage === "Range (-2.0) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Double plusOrMinus Int val caught4 = intercept[TestFailedException] { sevenDotOh should be (7.1 plusOrMinus -2) } assert(caught4.getMessage === "Range (-2.0) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Double plusOrMinus Short val caught5 = intercept[TestFailedException] { sevenDotOh should be (7.1 plusOrMinus (-2).toShort) } assert(caught5.getMessage === "Range (-2.0) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Double plusOrMinus Byte val caught6 = intercept[TestFailedException] { sevenDotOh should be (7.1 plusOrMinus (-2).toByte) } assert(caught6.getMessage === "Range (-2.0) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Float plusOrMinus Float val caught7 = intercept[TestFailedException] { sevenDotOhFloat should be (7.1f plusOrMinus -0.2f) } assert(caught7.getMessage === "Range (-0.2) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Float plusOrMinus Long val caught8 = intercept[TestFailedException] { sevenDotOhFloat should be (7.1f plusOrMinus -2L) } assert(caught8.getMessage === "Range (-2.0) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Float plusOrMinus Int val caught9 = intercept[TestFailedException] { sevenDotOhFloat should be (7.1f plusOrMinus -2) } assert(caught9.getMessage === "Range (-2.0) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Float plusOrMinus Short val caught10 = intercept[TestFailedException] { sevenDotOhFloat should be (7.1f plusOrMinus (-2).toShort) } assert(caught10.getMessage === "Range (-2.0) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Float plusOrMinus Byte val caught11 = intercept[TestFailedException] { sevenDotOhFloat should be (7.1f plusOrMinus (-2).toByte) } assert(caught11.getMessage === "Range (-2.0) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Long plusOrMinus Long val caught12 = intercept[TestFailedException] { sevenLong should be (9L plusOrMinus -2L) } assert(caught12.getMessage === "Range (-2) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Long plusOrMinus Int val caught13 = intercept[TestFailedException] { sevenLong should be (9L plusOrMinus -2) } assert(caught13.getMessage === "Range (-2) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Long plusOrMinus Short val caught14 = intercept[TestFailedException] { sevenLong should be (9L plusOrMinus (-2).toShort) } assert(caught14.getMessage === "Range (-2) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Long plusOrMinus Byte val caught15 = intercept[TestFailedException] { sevenLong should be (9L plusOrMinus (-2).toByte) } assert(caught15.getMessage === "Range (-2) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Int plusOrMinus Int val caught16 = intercept[TestFailedException] { sevenInt should be (9 plusOrMinus -2) } assert(caught16.getMessage === "Range (-2) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Int plusOrMinus Short val caught17 = intercept[TestFailedException] { sevenInt should be (9 plusOrMinus (-2).toShort) } assert(caught17.getMessage === "Range (-2) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Int plusOrMinus Byte val caught18 = intercept[TestFailedException] { sevenInt should be (9 plusOrMinus (-2).toByte) } assert(caught18.getMessage === "Range (-2) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Short plusOrMinus Short val caught19 = intercept[TestFailedException] { sevenShort should be (9.toShort plusOrMinus (-2).toShort) } assert(caught19.getMessage === "Range (-2) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Short plusOrMinus Byte val caught20 = intercept[TestFailedException] { sevenShort should be (9.toShort plusOrMinus (-2).toByte) } assert(caught20.getMessage === "Range (-2) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") // Byte plusOrMinus Byte val caught21 = intercept[TestFailedException] { sevenByte should be (9.toByte plusOrMinus (-2).toByte) } assert(caught21.getMessage === "Range (-2) passed to plusOrMinus was zero or negative. Must be a positive non-zero number.") } } }
yyuu/scalatest
src/test/scala/org/scalatest/matchers/ShouldPlusOrMinusSpec.scala
Scala
apache-2.0
83,973
package com.bwsw.tstreamstransactionserver.options import com.bwsw.tstreamstransactionserver.netty.client.Client import com.bwsw.tstreamstransactionserver.options.ClientOptions._ import com.bwsw.tstreamstransactionserver.options.CommonOptions.ZookeeperOptions import org.apache.curator.framework.CuratorFramework class ClientBuilder private(authOpts: AuthOptions, zookeeperOpts: ZookeeperOptions, connectionOpts: ConnectionOptions, curatorOpt: Option[CuratorFramework] ) { private val authOptions = authOpts private val zookeeperOptions = zookeeperOpts private val connectionOptions = connectionOpts private val curator: Option[CuratorFramework] = curatorOpt def this() = this(AuthOptions(), ZookeeperOptions(), ConnectionOptions(), None) def withAuthOptions(authOptions: AuthOptions) = new ClientBuilder(authOptions, zookeeperOptions, connectionOptions, curator) def withZookeeperOptions(zookeeperOptions: ZookeeperOptions) = new ClientBuilder(authOptions, zookeeperOptions, connectionOptions, curator) def withCuratorConnection(curator: CuratorFramework) = new ClientBuilder(authOptions, zookeeperOptions, connectionOptions, Some(curator)) def withConnectionOptions(clientOptions: ConnectionOptions) = new ClientBuilder(authOptions, zookeeperOptions, clientOptions, curator) def build() = new Client(connectionOptions, authOptions, zookeeperOptions, curator) def getConnectionOptions = connectionOptions.copy() def getZookeeperOptions = zookeeperOptions.copy() def getAuthOptions = authOptions.copy() }
bwsw/tstreams-transaction-server
src/main/scala/com/bwsw/tstreamstransactionserver/options/ClientBuilder.scala
Scala
apache-2.0
1,653
package org.receiver2d.engine.graphics case class RGBA(r: Float, g: Float, b: Float, a: Float) { private val arr = Array(r,g,b,a) def apply(i: Int) = arr.apply(i) /* TODO: more useful color functions */ } object RGBA { final def red = RGBA(1,0,0,1) final def green = RGBA(0,1,0,1) final def blue = RGBA(0,0,1,1) final def white = RGBA(1,1,1,1) }
Prince781/Receiver2D
src/main/scala/org/receiver2d/engine/graphics/RGBA.scala
Scala
gpl-2.0
361
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.expressions import org.apache.spark.sql.catalyst.InternalRow import org.apache.spark.sql.catalyst.analysis.TypeCheckResult import org.apache.spark.sql.catalyst.expressions.codegen.{CodegenContext, ExprCode} import org.apache.spark.sql.catalyst.util.TypeUtils import org.apache.spark.sql.types._ /** * An expression that is evaluated to the first non-null input. * * {{{ * coalesce(1, 2) => 1 * coalesce(null, 1, 2) => 1 * coalesce(null, null, 2) => 2 * coalesce(null, null, null) => null * }}} */ // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(expr1, expr2, ...) - Returns the first non-null argument if exists. Otherwise, null.", examples = """ Examples: > SELECT _FUNC_(NULL, 1, NULL); 1 """) // scalastyle:on line.size.limit case class Coalesce(children: Seq[Expression]) extends Expression { /** Coalesce is nullable if all of its children are nullable, or if it has no children. */ override def nullable: Boolean = children.forall(_.nullable) // Coalesce is foldable if all children are foldable. override def foldable: Boolean = children.forall(_.foldable) override def checkInputDataTypes(): TypeCheckResult = { if (children.length < 1) { TypeCheckResult.TypeCheckFailure( s"input to function $prettyName requires at least one argument") } else { TypeUtils.checkForSameTypeInputExpr(children.map(_.dataType), s"function $prettyName") } } override def dataType: DataType = children.head.dataType override def eval(input: InternalRow): Any = { var result: Any = null val childIterator = children.iterator while (childIterator.hasNext && result == null) { result = childIterator.next().eval(input) } result } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val tmpIsNull = ctx.freshName("coalesceTmpIsNull") ctx.addMutableState(ctx.JAVA_BOOLEAN, tmpIsNull) // all the evals are meant to be in a do { ... } while (false); loop val evals = children.map { e => val eval = e.genCode(ctx) s""" |${eval.code} |if (!${eval.isNull}) { | $tmpIsNull = false; | ${ev.value} = ${eval.value}; | continue; |} """.stripMargin } val resultType = ctx.javaType(dataType) val codes = ctx.splitExpressionsWithCurrentInputs( expressions = evals, funcName = "coalesce", returnType = resultType, makeSplitFunction = func => s""" |$resultType ${ev.value} = ${ctx.defaultValue(dataType)}; |do { | $func |} while (false); |return ${ev.value}; """.stripMargin, foldFunctions = _.map { funcCall => s""" |${ev.value} = $funcCall; |if (!$tmpIsNull) { | continue; |} """.stripMargin }.mkString) ev.copy(code = s""" |$tmpIsNull = true; |$resultType ${ev.value} = ${ctx.defaultValue(dataType)}; |do { | $codes |} while (false); |final boolean ${ev.isNull} = $tmpIsNull; """.stripMargin) } } @ExpressionDescription( usage = "_FUNC_(expr1, expr2) - Returns `expr2` if `expr1` is null, or `expr1` otherwise.", examples = """ Examples: > SELECT _FUNC_(NULL, array('2')); ["2"] """) case class IfNull(left: Expression, right: Expression, child: Expression) extends RuntimeReplaceable { def this(left: Expression, right: Expression) = { this(left, right, Coalesce(Seq(left, right))) } override def flatArguments: Iterator[Any] = Iterator(left, right) override def sql: String = s"$prettyName(${left.sql}, ${right.sql})" } @ExpressionDescription( usage = "_FUNC_(expr1, expr2) - Returns null if `expr1` equals to `expr2`, or `expr1` otherwise.", examples = """ Examples: > SELECT _FUNC_(2, 2); NULL """) case class NullIf(left: Expression, right: Expression, child: Expression) extends RuntimeReplaceable { def this(left: Expression, right: Expression) = { this(left, right, If(EqualTo(left, right), Literal.create(null, left.dataType), left)) } override def flatArguments: Iterator[Any] = Iterator(left, right) override def sql: String = s"$prettyName(${left.sql}, ${right.sql})" } @ExpressionDescription( usage = "_FUNC_(expr1, expr2) - Returns `expr2` if `expr1` is null, or `expr1` otherwise.", examples = """ Examples: > SELECT _FUNC_(NULL, array('2')); ["2"] """) case class Nvl(left: Expression, right: Expression, child: Expression) extends RuntimeReplaceable { def this(left: Expression, right: Expression) = { this(left, right, Coalesce(Seq(left, right))) } override def flatArguments: Iterator[Any] = Iterator(left, right) override def sql: String = s"$prettyName(${left.sql}, ${right.sql})" } // scalastyle:off line.size.limit @ExpressionDescription( usage = "_FUNC_(expr1, expr2, expr3) - Returns `expr2` if `expr1` is not null, or `expr3` otherwise.", examples = """ Examples: > SELECT _FUNC_(NULL, 2, 1); 1 """) // scalastyle:on line.size.limit case class Nvl2(expr1: Expression, expr2: Expression, expr3: Expression, child: Expression) extends RuntimeReplaceable { def this(expr1: Expression, expr2: Expression, expr3: Expression) = { this(expr1, expr2, expr3, If(IsNotNull(expr1), expr2, expr3)) } override def flatArguments: Iterator[Any] = Iterator(expr1, expr2, expr3) override def sql: String = s"$prettyName(${expr1.sql}, ${expr2.sql}, ${expr3.sql})" } /** * Evaluates to `true` iff it's NaN. */ @ExpressionDescription( usage = "_FUNC_(expr) - Returns true if `expr` is NaN, or false otherwise.", examples = """ Examples: > SELECT _FUNC_(cast('NaN' as double)); true """) case class IsNaN(child: Expression) extends UnaryExpression with Predicate with ImplicitCastInputTypes { override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(DoubleType, FloatType)) override def nullable: Boolean = false override def eval(input: InternalRow): Any = { val value = child.eval(input) if (value == null) { false } else { child.dataType match { case DoubleType => value.asInstanceOf[Double].isNaN case FloatType => value.asInstanceOf[Float].isNaN } } } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val eval = child.genCode(ctx) child.dataType match { case DoubleType | FloatType => ev.copy(code = s""" ${eval.code} ${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)}; ${ev.value} = !${eval.isNull} && Double.isNaN(${eval.value});""", isNull = "false") } } } /** * An Expression evaluates to `left` iff it's not NaN, or evaluates to `right` otherwise. * This Expression is useful for mapping NaN values to null. */ @ExpressionDescription( usage = "_FUNC_(expr1, expr2) - Returns `expr1` if it's not NaN, or `expr2` otherwise.", examples = """ Examples: > SELECT _FUNC_(cast('NaN' as double), 123); 123.0 """) case class NaNvl(left: Expression, right: Expression) extends BinaryExpression with ImplicitCastInputTypes { override def dataType: DataType = left.dataType override def inputTypes: Seq[AbstractDataType] = Seq(TypeCollection(DoubleType, FloatType), TypeCollection(DoubleType, FloatType)) override def eval(input: InternalRow): Any = { val value = left.eval(input) if (value == null) { null } else { left.dataType match { case DoubleType => if (!value.asInstanceOf[Double].isNaN) value else right.eval(input) case FloatType => if (!value.asInstanceOf[Float].isNaN) value else right.eval(input) } } } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val leftGen = left.genCode(ctx) val rightGen = right.genCode(ctx) left.dataType match { case DoubleType | FloatType => ev.copy(code = s""" ${leftGen.code} boolean ${ev.isNull} = false; ${ctx.javaType(dataType)} ${ev.value} = ${ctx.defaultValue(dataType)}; if (${leftGen.isNull}) { ${ev.isNull} = true; } else { if (!Double.isNaN(${leftGen.value})) { ${ev.value} = ${leftGen.value}; } else { ${rightGen.code} if (${rightGen.isNull}) { ${ev.isNull} = true; } else { ${ev.value} = ${rightGen.value}; } } }""") } } } /** * An expression that is evaluated to true if the input is null. */ @ExpressionDescription( usage = "_FUNC_(expr) - Returns true if `expr` is null, or false otherwise.", examples = """ Examples: > SELECT _FUNC_(1); false """) case class IsNull(child: Expression) extends UnaryExpression with Predicate { override def nullable: Boolean = false override def eval(input: InternalRow): Any = { child.eval(input) == null } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val eval = child.genCode(ctx) ExprCode(code = eval.code, isNull = "false", value = eval.isNull) } override def sql: String = s"(${child.sql} IS NULL)" } /** * An expression that is evaluated to true if the input is not null. */ @ExpressionDescription( usage = "_FUNC_(expr) - Returns true if `expr` is not null, or false otherwise.", examples = """ Examples: > SELECT _FUNC_(1); true """) case class IsNotNull(child: Expression) extends UnaryExpression with Predicate { override def nullable: Boolean = false override def eval(input: InternalRow): Any = { child.eval(input) != null } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val eval = child.genCode(ctx) ExprCode(code = eval.code, isNull = "false", value = s"(!(${eval.isNull}))") } override def sql: String = s"(${child.sql} IS NOT NULL)" } /** * A predicate that is evaluated to be true if there are at least `n` non-null and non-NaN values. */ case class AtLeastNNonNulls(n: Int, children: Seq[Expression]) extends Predicate { override def nullable: Boolean = false override def foldable: Boolean = children.forall(_.foldable) override def toString: String = s"AtLeastNNulls(n, ${children.mkString(",")})" private[this] val childrenArray = children.toArray override def eval(input: InternalRow): Boolean = { var numNonNulls = 0 var i = 0 while (i < childrenArray.length && numNonNulls < n) { val evalC = childrenArray(i).eval(input) if (evalC != null) { childrenArray(i).dataType match { case DoubleType => if (!evalC.asInstanceOf[Double].isNaN) numNonNulls += 1 case FloatType => if (!evalC.asInstanceOf[Float].isNaN) numNonNulls += 1 case _ => numNonNulls += 1 } } i += 1 } numNonNulls >= n } override def doGenCode(ctx: CodegenContext, ev: ExprCode): ExprCode = { val nonnull = ctx.freshName("nonnull") // all evals are meant to be inside a do { ... } while (false); loop val evals = children.map { e => val eval = e.genCode(ctx) e.dataType match { case DoubleType | FloatType => s""" |if ($nonnull < $n) { | ${eval.code} | if (!${eval.isNull} && !Double.isNaN(${eval.value})) { | $nonnull += 1; | } |} else { | continue; |} """.stripMargin case _ => s""" |if ($nonnull < $n) { | ${eval.code} | if (!${eval.isNull}) { | $nonnull += 1; | } |} else { | continue; |} """.stripMargin } } val codes = ctx.splitExpressionsWithCurrentInputs( expressions = evals, funcName = "atLeastNNonNulls", extraArguments = (ctx.JAVA_INT, nonnull) :: Nil, returnType = ctx.JAVA_INT, makeSplitFunction = body => s""" |do { | $body |} while (false); |return $nonnull; """.stripMargin, foldFunctions = _.map { funcCall => s""" |$nonnull = $funcCall; |if ($nonnull >= $n) { | continue; |} """.stripMargin }.mkString) ev.copy(code = s""" |${ctx.JAVA_INT} $nonnull = 0; |do { | $codes |} while (false); |${ctx.JAVA_BOOLEAN} ${ev.value} = $nonnull >= $n; """.stripMargin, isNull = "false") } }
ron8hu/spark
sql/catalyst/src/main/scala/org/apache/spark/sql/catalyst/expressions/nullExpressions.scala
Scala
apache-2.0
13,709
package de.htwg.zeta.server.model.modelValidator.generator.consistencyRules import scala.collection.immutable.Seq import de.htwg.zeta.common.models.project.concept.elements.MClass import de.htwg.zeta.common.models.project.concept.Concept import org.scalatest.flatspec.AnyFlatSpec import org.scalatest.matchers.should.Matchers class NoCyclicInheritanceTest extends AnyFlatSpec with Matchers { val nonCyclicClassOne = MClass( name = "nonCyclicClassOne", description = "", abstractness = false, superTypeNames = Seq(), inputReferenceNames = Seq(), outputReferenceNames = Seq(), attributes = Seq(), methods = Seq.empty ) val nonCyclicClassTwo = MClass( name = "nonCyclicClassTwo", description = "", abstractness = false, superTypeNames = Seq(nonCyclicClassOne.name), inputReferenceNames = Seq(), outputReferenceNames = Seq(), attributes = Seq(), methods = Seq.empty ) val nonCyclicClassThree = MClass( name = "nonCyclicClassThree", description = "", abstractness = false, superTypeNames = Seq(nonCyclicClassTwo.name), inputReferenceNames = Seq(), outputReferenceNames = Seq(), attributes = Seq(), methods = Seq.empty ) val nonCyclicClassFour = MClass( name = "nonCyclicClassFour", description = "", abstractness = false, superTypeNames = Seq(nonCyclicClassTwo.name), inputReferenceNames = Seq(), outputReferenceNames = Seq(), attributes = Seq(), methods = Seq.empty ) val nonCyclicMetaModel = Concept( classes = Seq(nonCyclicClassOne, nonCyclicClassTwo, nonCyclicClassThree, nonCyclicClassFour), references = Seq(), enums = Seq.empty, methods = Seq.empty, attributes = Seq.empty, uiState = "" ) val cyclicClassOne = MClass( name = "cyclicClassOne", description = "", abstractness = false, superTypeNames = Seq("cyclicClassFour"), inputReferenceNames = Seq(), outputReferenceNames = Seq(), attributes = Seq(), methods = Seq.empty ) val cyclicClassTwo = MClass( name = "cyclicClassTwo", description = "", abstractness = false, superTypeNames = Seq(cyclicClassOne.name), inputReferenceNames = Seq(), outputReferenceNames = Seq(), attributes = Seq(), methods = Seq.empty ) val cyclicClassThree = MClass( name = "cyclicClassThree", description = "", abstractness = false, superTypeNames = Seq(cyclicClassTwo.name), inputReferenceNames = Seq(), outputReferenceNames = Seq(), attributes = Seq(), methods = Seq.empty ) val cyclicClassFour = MClass( name = "cyclicClassFour", description = "", abstractness = false, superTypeNames = Seq(cyclicClassThree.name), inputReferenceNames = Seq(), outputReferenceNames = Seq(), attributes = Seq(), methods = Seq.empty ) val cyclicClassFive = MClass( name = "cyclicClassFive", description = "", abstractness = false, superTypeNames = Seq(cyclicClassThree.name), inputReferenceNames = Seq(), outputReferenceNames = Seq(), attributes = Seq(), methods = Seq.empty ) val cyclicClassSix = MClass( name = "cyclicClassSix", description = "", abstractness = false, superTypeNames = Seq(), inputReferenceNames = Seq(), outputReferenceNames = Seq(), attributes = Seq(), methods = Seq.empty ) val cyclicMetaModel = Concept( classes = Seq(cyclicClassOne, cyclicClassTwo, cyclicClassThree, cyclicClassFour, cyclicClassFive, cyclicClassSix), references = Seq(), enums = Seq.empty, methods = Seq.empty, attributes = Seq.empty, uiState = "" ) val rule = new NoCyclicInheritance "check" should "return true on non-cyclic meta models" in { rule.check(nonCyclicMetaModel) should be (true) } it should "return false on cyclic meta models" in { rule.check(cyclicMetaModel) should be (false) } }
Zeta-Project/zeta
api/server/test/de/htwg/zeta/server/model/modelValidator/generator/consistencyRules/NoCyclicInheritanceTest.scala
Scala
bsd-2-clause
3,955
package monocle.std import monocle.MonocleSuite import monocle.law.discipline.IsoTests import monocle.law.discipline.function._ import scalaz.Cofree import scalaz.std.option.optionInstance class CofreeSpec extends MonocleSuite { checkAll("cofreeToStream", IsoTests(cofreeToStream[Int])) checkAll("cofreeToTree", IsoTests(cofreeToTree[Int])) checkAll("cons1 cofree", Cons1Tests[Cofree[Option, Int], Int, Option[Cofree[Option, Int]]]) checkAll("each cofree", EachTests[Cofree[Option, Int], Int]) }
japgolly/Monocle
test/src/test/scala/monocle/std/CofreeSpec.scala
Scala
mit
512
/* * Copyright 2016 The BigDL Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.intel.analytics.bigdl.dllib.utils.tf.loaders import com.intel.analytics.bigdl.dllib.tensor.Tensor import com.intel.analytics.bigdl.dllib.utils.T import com.intel.analytics.bigdl.dllib.utils.tf.Tensorflow.typeAttr import com.intel.analytics.bigdl.dllib.utils.tf.TensorflowSpecHelper import org.tensorflow.framework.{DataType, NodeDef} class TruncateDivSpec extends TensorflowSpecHelper { "TruncateDiv" should "be correct for int" in { compare[Float]( NodeDef.newBuilder() .setName("truncateDiv_test") .putAttr("T", typeAttr(DataType.DT_INT32)) .setOp("TruncateDiv"), Seq(Tensor[Int](T(1, 6, 8, -1, -6, -8)), Tensor[Int](T(1, 5, 5, 1, 5, 5))), 0 ) } }
intel-analytics/BigDL
scala/dllib/src/test/scala/com/intel/analytics/bigdl/dllib/utils/tf/loaders/TruncateDivSpec.scala
Scala
apache-2.0
1,329
package autolift.cats import autolift.{LiftMergeWith, LiftedMergeWith, LiftMergeWithSyntax, LiftMergeWithContext} import cats.{Functor, Apply} trait CatsLiftMergeWith[Obj1, Obj2, Fn] extends LiftMergeWith[Obj1, Obj2, Fn] with Serializable object CatsLiftMergeWith extends LowPriorityCatsLiftMergeWith{ def apply[Obj1, Obj2, Fn](implicit lift: CatsLiftMergeWith[Obj1, Obj2, Fn]): Aux[Obj1, Obj2, Fn, lift.Out] = lift implicit def base[F[_], G, H, G1 >: G, H1 >: H, Out0](implicit ap: Apply[F]): Aux[F[G], F[H], (G1, H1) => Out0, F[Out0]] = new CatsLiftMergeWith[F[G], F[H], (G1, H1) => Out0]{ type Out = F[Out0] def apply(fg: F[G], fh: F[H], f: (G1, H1) => Out0) = ap.ap{ ap.map(fh){ h: H => f(_: G, h) } }(fg) } } trait LowPriorityCatsLiftMergeWith{ type Aux[Obj1, Obj2, Fn, Out0] = CatsLiftMergeWith[Obj1, Obj2, Fn]{ type Out = Out0 } implicit def recur[F[_], G, H, Fn](implicit functor: Functor[F], lift: LiftMergeWith[G, H, Fn]): Aux[F[G], H, Fn, F[lift.Out]] = new CatsLiftMergeWith[F[G], H, Fn]{ type Out = F[lift.Out] def apply(fg: F[G], h: H, f: Fn) = functor.map(fg){ g: G => lift(g, h, f) } } } trait LiftMergeWithPackage extends LiftMergeWithSyntax with LiftMergeWithContext{ implicit def liftedMergeWithFunctor[A, B] = new Functor[LiftedMergeWith[A, B, ?]]{ def map[C, D](lm: LiftedMergeWith[A, B, C])(f: C => D) = lm map f } implicit def mkJw[Obj1, Obj2, Fn](implicit lift: CatsLiftMergeWith[Obj1, Obj2, Fn]): CatsLiftMergeWith.Aux[Obj1, Obj2, Fn, lift.Out] = lift }
wheaties/AutoLifts
autolift-cats/src/main/scala/autolift/cats/LiftMergeWith.scala
Scala
apache-2.0
1,563
/* * Copyright 2014, by Vladimir Kostyukov and Contributors. * * This file is a part of a Finch library that may be found at * * https://github.com/finagle/finch * * Licensed under the Apache License, Version 2.0 (the "License"); * You may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * * Contributor(s): * Ben Edwards */ package io.finch import com.twitter.finagle.Service import com.twitter.finagle.httpx.Request import com.twitter.util.{Await, Future} import io.finch.response.Ok import org.scalatest.{Matchers, FlatSpec} class ServiceOpsSpec extends FlatSpec with Matchers { val foo = Service.mk { (_: HttpRequest) => Future.value("foo") } val bar = Service.mk { (req: String) => { Future.value(Ok(req ++ "bar")) } } val combined = foo ! bar "ServiceOps" should "allow for chaining services" in { val req = Request("/") val content = combined(req) map { r => r.getContentString } Await.result(content) shouldBe "foobar" } }
trane/finch
core/src/test/scala/io/finch/ServiceOpsSpec.scala
Scala
apache-2.0
1,422
/* * Copyright 2015 Dennis Vriend * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.dnvriend import com.github.dnvriend.activiti.ActivitiImplicits._ class HistoryServiceTest extends TestSpec { "HistoryService" should "show history" in { val deploymentOperation = repositoryService.createDeployment() .addClasspathResource("processes/simpletest.bpmn20.xml") .name("simpletest") .doDeploy deploymentOperation should be a 'success deploymentOperation.foreach { deployment ⇒ identityService.authenticateUserId("kermit") should be a 'success val startProcessOperation = runtimeService.startProcessByKey("simpletest") startProcessOperation should be a 'success startProcessOperation.foreach { processInstance ⇒ historyService.createHistoricProcessInstanceQuery().processInstanceId(processInstance.getId).asList should not be 'empty repositoryService.deleteDeploymentById(deployment.id, cascade = true) should be a 'success } } } }
dnvriend/activiti-test
helloworld/src/test/scala/com/github/dnvriend/HistoryServiceTest.scala
Scala
apache-2.0
1,551
package cakesolutions.kafka.akka import java.time.LocalDateTime import java.time.temporal.ChronoUnit import akka.actor._ import cakesolutions.kafka.KafkaConsumer import com.typesafe.config.Config import org.apache.kafka.clients.consumer.{CommitFailedException, KafkaConsumer => JKafkaConsumer} import org.apache.kafka.common.TopicPartition import org.apache.kafka.common.errors.WakeupException import org.apache.kafka.common.serialization.Deserializer import scala.collection.JavaConverters._ import scala.concurrent.duration._ import scala.language.implicitConversions import scala.reflect.runtime.universe.TypeTag import scala.util.{Failure, Success, Try} /** * An actor that wraps [[KafkaConsumer]]. * * The actor pulls batches of messages from Kafka for all subscribed partitions, * and forwards them to the supplied Akka actor reference in [[ConsumerRecords]] format. * * Before the actor continues pulling more data from Kafka, * the receiver of the data must confirm the batches by sending back a [[KafkaConsumerActor.Confirm]] message * that contains the offsets from the received batch. * This mechanism allows the receiver to control the maximum rate of messages it will receive. * By including the received offsets in the confirmation message, * we avoid accidentally confirming batches that have not been fully processed yet. * * Actor's Kafka subscriptions can be controlled via the Actor Messages: * [[KafkaConsumerActor.Subscribe]] and [[KafkaConsumerActor.Unsubscribe]]. * * For cases where there is Kafka or configuration issues, the Actor's supervisor strategy is applied. */ object KafkaConsumerActor { private[akka] sealed trait FullMessageApi /** * Actor API */ sealed trait MessageApi extends FullMessageApi /** * Actor API - Confirm receipt of previous records. * * The message should provide the offsets that are to be confirmed. * If the offsets don't match the offsets that were last sent, the confirmation is ignored. * Offsets can be committed to Kafka using optional commit flag. * * @param offsets the offsets that are to be confirmed * @param commit true to commit offsets */ final case class Confirm(offsets: Offsets, commit: Boolean = false) extends MessageApi /** * Sent when the actor is backing off delivery of messages due to suspected backpressure * caused by missed deliveries. * @param redeliveryCount the current redelivery count */ final case class BackingOff(redeliveryCount: Int) extends MessageApi /** * Actor API - Initiate consumption from Kafka or reset an already started stream. * * Subscription has four modes which provide a combination of either auto or manual partition assignment and either Kafka * managed or self managed commit offsets. * * - Auto partition: Kafka manages partition assignments between members of a consumer group. Offset commit points for each * partition are managed by Kafka. * * - Auto partition with manual offset: Kafka manages partition assignments between members of a consumer group. Offset commit * points are maintained by the client. * * - Manual partition: Topic and partitions are specified by the client. Offset commit points for each * partition are managed by Kafka. * * - Manual offset: Topic and partitions are specified by the client. Offset commit * points are maintained by the client. */ sealed trait Subscribe extends MessageApi object Subscribe { /** * Subscribe to topics in auto assigned partition mode, relying on Kafka to manage the commit point for each partition. * This is this simplest and most common subscription mode that provides a parallel streaming capability with at-least-once * semantics. * * In auto assigned partition mode, the consumer partitions are managed by Kafka. * This means that they can get automatically rebalanced with other consumers consuming from the same topic with the same group-id. * * The message consumption starting point will be decided by offset reset strategy in the consumer configuration. * * The client should ensure that received records are confirmed with 'commit = true' to ensure kafka tracks the commit point. * * @param topics the topics to subscribe to start consuming from * @param assignedListener Optionally provide a callback when partitions are assigned. Can be used if any initialisation is * required prior to receiving messages for the partition, such as to populate a cache. Default implementation * is to do nothing. * @param revokedListener Optionally provide a callback when partitions are revoked. Can be used if any cleanup is * required after a partition assignment is revoked. Default implementation * is to do nothing. */ final case class AutoPartition( topics: Iterable[String] = List(), assignedListener: List[TopicPartition] => Unit = _ => (), revokedListener: List[TopicPartition] => Unit = _ => () ) extends Subscribe /** * Subscribe to topics in auto assigned partition mode, relying on Kafka to manage the commit point for each partition. * This is this simplest and most common subscription mode that provides a parallel streaming capability with at-least-once * semantics. * * Unlike [[AutoPartition]], this mode relies exclusively on Kafka for commit offset management and without attempting any * optimizations for when same partitions are reassigned after a rebalance. * * In auto assigned partition mode, the consumer partitions are managed by Kafka. * This means that they can get automatically rebalanced with other consumers consuming from the same topic with the same group-id. * * The message consumption starting point will be decided by offset reset strategy in the consumer configuration. * * The client should ensure that received records are confirmed with 'commit = true' to ensure kafka tracks the commit point. * * @param topics the topics to subscribe to start consuming from * @param assignedListener Optionally provide a callback when partitions are assigned. Can be used if any initialisation is * required prior to receiving messages for the partition, such as to populate a cache. Default implementation * is to do nothing. * @param revokedListener Optionally provide a callback when partitions are revoked. Can be used if any cleanup is * required after a partition assignment is revoked. Default implementation * is to do nothing. */ final case class AutoPartitionBasic( topics: Iterable[String] = List(), assignedListener: List[TopicPartition] => Unit = _ => (), revokedListener: List[TopicPartition] => Unit = _ => () ) extends Subscribe /** * Subscribe to topics in auto assigned partition mode with client managed offset commit positions for each partition. * This subscription mode is typically used when performing some parallel stateful computation and storing the offset * position along with the state in some kind of persistent store. This allows for exactly-once state manipulation against * an at-least-once delivery stream. * * The client should provide callbacks to receive notifications of when partitions have been assigned or revoked. When * a partition has been assigned, the client should lookup the latest offsets for the given partitions from its store, and supply * those. The KafkaConsumerActor will seek to the specified positions. * * The client should ensure that received records are confirmed with 'commit = false' to ensure consumed records are * not committed back to kafka. * * @param topics the topics to subscribe to start consuming from * @param assignedListener a callback handler that should lookup the latest offsets for the provided topic/partitions. * @param revokedListener a callback to provide the oppurtunity to cleanup any in memory state for revoked partitions. */ final case class AutoPartitionWithManualOffset( topics: Iterable[String], assignedListener: List[TopicPartition] => Offsets, revokedListener: List[TopicPartition] => Unit ) extends Subscribe /** * Subscribe to topics in manually assigned partition mode, relying on Kafka to manage the commit point for each partition. * * In manually assigned partition mode, the consumer will specify the partitions directly, this means that Kafka will not be automatically * rebalance the partitions when new consumers appear in the consumer group. * * The message consumption starting point will be decided by offset reset strategy in the consumer configuration. * * The client should ensure that received records are confirmed with 'commit = true' to ensure kafka tracks the commit point. * * @param topicPartitions the topics with partitions to start consuming from */ final case class ManualPartition(topicPartitions: Iterable[TopicPartition]) extends Subscribe /** * Subscribe to topics in manually assigned partition mode, with client managed offset commit positions for each partition. * * In manually assigned partition mode, the consumer will specify the partitions directly, * This means that Kafka will not be automatically rebalance the partitions when new consumers appear in the consumer group. * * In addition to manually assigning the partitions, the partition offsets will be set to start from the given offsets. * * The client should ensure that received records are confirmed with 'commit = false' to ensure consumed records are * not committed back to kafka. * * @param offsets the topics with partitions and offsets to start consuming from */ final case class ManualOffset(offsets: Offsets) extends Subscribe /** * Subscribe to topics by providing a timestamp per partition denoting the point in time with the first offset that must be retrieved. * * In manually assigned partition mode, the consumer will specify the partitions directly, * This means that Kafka will not be automatically rebalance the partitions when new consumers appear in the consumer group. * * In addition to manually assigning the partitions, the partition offsets will be set to start from the given offsets. * * The client should ensure that received records are confirmed with 'commit = false' to ensure consumed records are * not committed back to kafka. * * @param offsets the topics with partitions and timestamps to start consuming from */ final case class ManualOffsetForTimes(offsets: Offsets) extends Subscribe } /** * Actor API - Unsubscribe from Kafka. */ case object Unsubscribe extends MessageApi private[akka] sealed trait InternalMessageApi extends FullMessageApi /** * Internal message indicating partitions have been revoked. */ private[akka] case object RevokeReset extends InternalMessageApi /** * Internal message indicating partitions have been reassigned. */ private[akka] case object RevokeResume extends InternalMessageApi /** * Internal message used for triggering a [[ConsumerException]] */ private[akka] case object TriggerConsumerFailure extends InternalMessageApi /** * Exception type escalated through supervision to indicate an unrecoverable error. * * The last known subscription is included as part of the exception to support resubscription attempt on actor restart. */ final case class ConsumerException( lastSubscription: Option[Subscribe], message: String = "Exception thrown from Kafka consumer!", cause: Throwable = null ) extends Exception(message, cause) final case class KafkaConsumerInitFail( message: String = "Error occurred while initializing Kafka consumer!", cause: Throwable = null ) extends Exception(message, cause) /** * Utilities for creating configurations for the [[KafkaConsumerActor]]. */ object Conf { import scala.concurrent.duration.{MILLISECONDS => Millis} /** * Create configuration for [[KafkaConsumerActor]] from Typesafe config. * * Expected configuration values: * * - schedule.interval: poll latency (default 1 second) * - unconfirmed.timeout: Seconds before unconfirmed messages is considered for redelivery. To disable message redelivery provide a duration of 0. (default 3 seconds) */ def apply(config: Config): Conf = Conf().withConf(config) def durationFromConfig(config: Config, path: String) = Duration(config.getDuration(path, Millis), Millis) } /** * Configuration for [[KafkaConsumerActor]]. * * @param scheduleInterval Poll Latency. * @param unconfirmedTimeout Seconds before unconfirmed messages is considered for redelivery. * To disable message redelivery provide a duration of 0. * @param maxRedeliveries Maximum number of times an unconfirmed message will be redelivered downstream. * Redeliveries are only attempted if unconfirmedTimeout > 0. */ final case class Conf( scheduleInterval: FiniteDuration = 1000.millis, unconfirmedTimeout: FiniteDuration = 3.seconds, maxRedeliveries: Int = 3 ) { /** * Extend the config with additional Typesafe config. * The supplied config overrides existing properties. */ def withConf(config: Config): Conf = copy( scheduleInterval = if (config.hasPath("schedule.interval")) Conf.durationFromConfig(config, "schedule.interval") else scheduleInterval, unconfirmedTimeout = if (config.hasPath("unconfirmed.timeout")) Conf.durationFromConfig(config, "unconfirmed.timeout") else unconfirmedTimeout, maxRedeliveries= if (config.hasPath("max.redeliveries")) config.getInt("max.redeliveries") else maxRedeliveries ) } /** * Create Akka `Props` for [[KafkaConsumerActor]] from a Typesafe config. * * @param conf Typesafe config containing all the [[KafkaConsumer.Conf]] and [[KafkaConsumerActor.Conf]] related configurations. * @param keyDeserializer deserializer for the key * @param valueDeserializer deserializer for the value * @param downstreamActor the actor where all the consumed messages will be sent to * @tparam K key deserialiser type * @tparam V value deserialiser type */ def props[K: TypeTag, V: TypeTag]( conf: Config, keyDeserializer: Deserializer[K], valueDeserializer: Deserializer[V], downstreamActor: ActorRef ): Props = props( KafkaConsumer.Conf[K, V](conf, keyDeserializer, valueDeserializer), KafkaConsumerActor.Conf(conf), downstreamActor ) /** * Create Akka `Props` for [[KafkaConsumerActor]] from a Typesafe config. * * @param conf Typesafe config containing all the [[KafkaConsumer.Conf]] and [[KafkaConsumerActor.Conf]] related configurations. * @param keyDeserializer deserializer for the key * @param valueDeserializer deserializer for the value * @param downstreamActor the actor where all the consumed messages will be sent to * @param consumer Kafka consumer to inject (in which case `consumerConf` is ignored) * @tparam K key deserialiser type * @tparam V value deserialiser type */ def props[K: TypeTag, V: TypeTag]( conf: Config, keyDeserializer: Deserializer[K], valueDeserializer: Deserializer[V], downstreamActor: ActorRef, consumer: JKafkaConsumer[K, V] ): Props = props( KafkaConsumer.Conf[K, V](conf, keyDeserializer, valueDeserializer), KafkaConsumerActor.Conf(conf), downstreamActor, consumer ) /** * Create Akka `Props` for [[KafkaConsumerActor]]. * * @param consumerConf configurations for the [[KafkaConsumer]] * @param actorConf configurations for the [[KafkaConsumerActor]] * @param downstreamActor the actor where all the consumed messages will be sent to * @tparam K key deserialiser type * @tparam V value deserialiser type */ def props[K: TypeTag, V: TypeTag]( consumerConf: KafkaConsumer.Conf[K, V], actorConf: KafkaConsumerActor.Conf, downstreamActor: ActorRef ): Props = Props(new KafkaConsumerActorImpl[K, V](consumerConf, actorConf, downstreamActor)) /** * Create Akka `Props` for [[KafkaConsumerActor]]. * * @param consumerConf configurations for the [[KafkaConsumer]] * @param actorConf configurations for the [[KafkaConsumerActor]] * @param downstreamActor the actor where all the consumed messages will be sent to * @param consumer Kafka consumer to inject (in which case `consumerConf` is ignored) * @tparam K key deserialiser type * @tparam V value deserialiser type */ def props[K: TypeTag, V: TypeTag]( consumerConf: KafkaConsumer.Conf[K, V], actorConf: KafkaConsumerActor.Conf, downstreamActor: ActorRef, consumer: JKafkaConsumer[K, V] ): Props = Props(new KafkaConsumerActorImpl[K, V](consumerConf, actorConf, downstreamActor, Some(consumer))) /** * Create a [[KafkaConsumerActor]] from a Typesafe config. * * @param conf Typesafe config containing all the [[KafkaConsumer.Conf]] and [[KafkaConsumerActor.Conf]] related configurations. * @param keyDeserializer deserializer for the key * @param valueDeserializer deserializer for the value * @param downstreamActor the actor where all the consumed messages will be sent to * @tparam K key deserialiser type * @tparam V value deserialiser type * @param actorFactory the actor factory to create the actor with */ def apply[K: TypeTag, V: TypeTag]( conf: Config, keyDeserializer: Deserializer[K], valueDeserializer: Deserializer[V], downstreamActor: ActorRef )(implicit actorFactory: ActorRefFactory): KafkaConsumerActor = { val p = props(conf, keyDeserializer, valueDeserializer, downstreamActor) val ref = actorFactory.actorOf(p) fromActorRef(ref) } /** * Create a [[KafkaConsumerActor]] from a Typesafe config. * * @param conf Typesafe config containing all the [[KafkaConsumer.Conf]] and [[KafkaConsumerActor.Conf]] related configurations. * @param keyDeserializer deserializer for the key * @param valueDeserializer deserializer for the value * @param downstreamActor the actor where all the consumed messages will be sent to * @param consumer Kafka consumer to inject (in which case `consumerConf` is ignored) * @tparam K key deserialiser type * @tparam V value deserialiser type * @param actorFactory the actor factory to create the actor with */ def apply[K: TypeTag, V: TypeTag]( conf: Config, keyDeserializer: Deserializer[K], valueDeserializer: Deserializer[V], downstreamActor: ActorRef, consumer: JKafkaConsumer[K, V] )(implicit actorFactory: ActorRefFactory): KafkaConsumerActor = { val p = props(conf, keyDeserializer, valueDeserializer, downstreamActor, consumer) val ref = actorFactory.actorOf(p) fromActorRef(ref) } /** * Create a [[KafkaConsumerActor]]. * * @param consumerConf configurations for the [[KafkaConsumer]] * @param actorConf configurations for the [[KafkaConsumerActor]] * @param downstreamActor the actor where all the consumed messages will be sent to * @tparam K key deserialiser type * @tparam V value deserialiser type * @param actorFactory the actor factory to create the actor with */ def apply[K: TypeTag, V: TypeTag]( consumerConf: KafkaConsumer.Conf[K, V], actorConf: KafkaConsumerActor.Conf, downstreamActor: ActorRef )(implicit actorFactory: ActorRefFactory): KafkaConsumerActor = { val p = props(consumerConf, actorConf, downstreamActor) val ref = actorFactory.actorOf(p) fromActorRef(ref) } /** * Create a [[KafkaConsumerActor]]. * * @param consumerConf configurations for the [[KafkaConsumer]] * @param actorConf configurations for the [[KafkaConsumerActor]] * @param downstreamActor the actor where all the consumed messages will be sent to * @param consumer Kafka consumer to inject (in which case `consumerConf` is ignored) * @tparam K key deserialiser type * @tparam V value deserialiser type * @param actorFactory the actor factory to create the actor with */ def apply[K: TypeTag, V: TypeTag]( consumerConf: KafkaConsumer.Conf[K, V], actorConf: KafkaConsumerActor.Conf, downstreamActor: ActorRef, consumer: JKafkaConsumer[K, V] )(implicit actorFactory: ActorRefFactory): KafkaConsumerActor = { val p = props(consumerConf, actorConf, downstreamActor, consumer) val ref = actorFactory.actorOf(p) fromActorRef(ref) } /** * Create a [[KafkaConsumerActor]] wrapper from an existing ActorRef. */ def fromActorRef(ref: ActorRef): KafkaConsumerActor = new KafkaConsumerActor(ref) } /** * Classic, non-Akka API for interacting with [[KafkaConsumerActor]]. */ final class KafkaConsumerActor private (val ref: ActorRef) { import KafkaConsumerActor.{Confirm, Subscribe, Unsubscribe} /** * Initiate consumption from Kafka or reset an already started stream. * * @param subscription Either an AutoPartition, ManualPartition or ManualOffsets subscription. */ def subscribe(subscription: Subscribe): Unit = ref ! subscription /** * Unsubscribe from Kafka */ def unsubscribe(): Unit = ref ! Unsubscribe /** * Confirm receipt of previous records. * * The message should provide the offsets that are to be confirmed. * If the offsets don't match the offsets that were last sent, the confirmation is ignored. * Offsets can be committed to Kafka using optional commit flag. * * @param offsets the offsets that are to be confirmed * @param commit true to commit offsets */ def confirm(offsets: Offsets, commit: Boolean = false): Unit = ref ! Confirm(offsets, commit) } private final class KafkaConsumerActorImpl[K: TypeTag, V: TypeTag]( consumerConf: KafkaConsumer.Conf[K, V], actorConf: KafkaConsumerActor.Conf, downstreamActor: ActorRef, consumerOpt: Option[JKafkaConsumer[K, V]] = None ) extends Actor with ActorLogging with PollScheduling { import KafkaConsumerActor._ import PollScheduling.Poll import context.become /** * Implicit conversion to support calling the org.apache.kafka.clients.consumer.KafkaConsumer.offsetsForTimes method with a Map[TopicPartition, scala.Long]. */ implicit def toJavaOffsetQuery(offsetQuery: Map[TopicPartition, scala.Long]): java.util.Map[TopicPartition, java.lang.Long] = offsetQuery .map { case (tp, time) => tp -> new java.lang.Long(time) } .asJava type Records = ConsumerRecords[K, V] private val consumer = consumerOpt.getOrElse(KafkaConsumer[K, V](consumerConf)) // Handles partition reassignments in the kafka client private var trackPartitions:TrackPartitions = new EmptyTrackPartitions private val isTimeoutUsed = actorConf.unconfirmedTimeout.toMillis > 0 private val delayedPollTimeout = 200 // Receive states private sealed trait StateData { val subscription: Subscribe val lastConfirmedOffsets: Option[Offsets] def scheduleInterval: FiniteDuration = actorConf.scheduleInterval def toSubscribed: Subscribed = Subscribed(subscription, lastConfirmedOffsets) def advanceSubscription: Subscribe = { def advance(offsets: Offsets) = subscription match { case s: Subscribe.AutoPartition => s case s: Subscribe.AutoPartitionBasic => s case s: Subscribe.AutoPartitionWithManualOffset => Subscribe.AutoPartitionWithManualOffset(s.topics, s.assignedListener, s.revokedListener) case _: Subscribe.ManualPartition => Subscribe.ManualOffset(offsets) case _: Subscribe.ManualOffset => Subscribe.ManualOffset(offsets) case _: Subscribe.ManualOffsetForTimes => val timeOffsets = timeOffsets2regularOffsets(offsets) Subscribe.ManualOffset(timeOffsets) } lastConfirmedOffsets.map(advance).getOrElse(subscription) } } private def timeOffsets2regularOffsets(timeOffsets: Offsets) : Offsets = { import scala.collection.JavaConverters._ val javaOffsetsAndTimestamps = consumer.offsetsForTimes(timeOffsets.offsetsMap).asScala.toMap val offsets = javaOffsetsAndTimestamps.mapValues(_.offset()).toMap Offsets(offsets) } private case class Subscribed( subscription: Subscribe, lastConfirmedOffsets: Option[Offsets] ) extends StateData { def toUnconfirmed(unconfirmed: Records): Unconfirmed = Unconfirmed(subscription, lastConfirmedOffsets, unconfirmed) } private sealed trait UnconfirmedRecordsStateData extends StateData { val unconfirmed: Records /** * Number of attempts that have been made to deliver the unconfirmed records downstream */ def redeliveryCount: Int def noBackoffNeeded(): Boolean = redeliveryCount < actorConf.maxRedeliveries /** * Naive strategy to increment poll backoff when in redelivery. * @return */ override def scheduleInterval: FiniteDuration = redeliveryCount * super.scheduleInterval + super.scheduleInterval def isCurrentOffset(offsets: Offsets): Boolean = unconfirmed.offsets == offsets } private case class Unconfirmed( subscription: Subscribe, lastConfirmedOffsets: Option[Offsets], unconfirmed: Records, deliveryTime: LocalDateTime = LocalDateTime.now(), redeliveryCount: Int = 0 ) extends UnconfirmedRecordsStateData { def confirm(offsets: Offsets): Subscribed = Subscribed(subscription, Some(offsets)) def redelivered: Unconfirmed = copy(deliveryTime = LocalDateTime.now(), redeliveryCount = redeliveryCount + 1) def addToBuffer(buffered: Records): Buffered = Buffered(subscription, lastConfirmedOffsets, unconfirmed, deliveryTime, buffered, redeliveryCount) } private case class Buffered( subscription: Subscribe, lastConfirmedOffsets: Option[Offsets], unconfirmed: Records, deliveryTime: LocalDateTime = LocalDateTime.now(), buffered: Records, redeliveryCount: Int = 0 ) extends UnconfirmedRecordsStateData { def confirm(offsets: Offsets): Unconfirmed = Unconfirmed(subscription, Some(offsets), buffered, LocalDateTime.now()) def redelivered: Buffered = copy(deliveryTime = LocalDateTime.now(), redeliveryCount = redeliveryCount + 1) } override def receive: Receive = unsubscribed // Initial state private val unsubscribed: Receive = terminatedDownstreamReceive orElse { case Unsubscribe => log.debug("Already unsubscribed") case sub: Subscribe => subscribe(sub) log.debug("To Ready state") become(ready(Subscribed(sub, None))) pollImmediate(delayedPollTimeout) case Confirm(_, _) => log.warning("Attempted to confirm offsets while consumer wasn't subscribed") case _: Poll => // Do nothing } private def subscribedCommonReceive(state: StateData): Receive = { case Unsubscribe => log.info("Unsubscribing from Kafka") cancelPoll() unsubscribe() become(unsubscribed) case RevokeReset => log.info("Revoking Assignments - resetting state!") become(ready(state.toSubscribed)) case _: Subscribe => log.warning("Attempted to subscribe while consumer was already subscribed") case TriggerConsumerFailure => log.info("Triggering consumer failed!") throw consumerFailure(state) case RevokeResume => //Do nothing case poll: Poll if !isCurrentPoll(poll) => // Do nothing } private def unsubscribe(): Unit = { consumer.unsubscribe() trackPartitions.reset() } // No unconfirmed or buffered messages private def ready(state: Subscribed): Receive = subscribedCommonReceive(state) orElse terminatedDownstreamReceive orElse { case poll: Poll if isCurrentPoll(poll) => pollKafka(state, poll.timeout) match { case Some(records) => sendRecords(records) log.debug("To unconfirmed state") become(unconfirmed(state.toUnconfirmed(records))) pollImmediate() case None => schedulePoll(stateData = state) } case c: Confirm => log.info("Received a confirmation while nothing was unconfirmed. Offsets: {}", c.offsets) } // Unconfirmed message with client, buffer empty private def unconfirmed(state: Unconfirmed): Receive = unconfirmedCommonReceive(state) orElse { case poll: Poll if isCurrentPoll(poll) => if (isConfirmationTimeout(state.deliveryTime)) { log.debug("In unconfirmed: records timed out while waiting for a confirmation.") if (state.noBackoffNeeded()) { log.debug("In unconfirmed: redelivering.") sendRecords(state.unconfirmed) } else { log.debug("In unconfirmed: backing off.") downstreamActor ! BackingOff(state.redeliveryCount) } become(unconfirmed(state.redelivered)) } // If the last commit caused a partition revocation, // we don't poll to allow the unconfirmed to flush through, prior to the rebalance completion. if (trackPartitions.isRevoked) { log.debug("Partitions revoked. Not polling.") schedulePoll(stateData = state) } else { pollKafka(state, poll.timeout) match { case Some(records) => log.debug("To Buffer Full state") become(bufferFull(state.addToBuffer(records))) schedulePoll(stateData = state) case None => schedulePoll(stateData = state) } } case Confirm(offsets, commit) if state.isCurrentOffset(offsets) => log.debug("Records confirmed") val updatedState = state.confirm(offsets) val commitResult = if (commit) commitOffsets(updatedState, offsets) else Success({}) commitResult match { case Success(_) => log.debug("To Ready state") become(ready(updatedState)) // Immediate poll after confirm with block to reduce poll latency in case the is a backlog in Kafka but processing is fast. pollImmediate(delayedPollTimeout) case Failure(_) => log.debug("To RevokeAwait State") become(revokeAwait(updatedState, offsets)) schedulePoll(stateData = state) } } // Buffered message and unconfirmed message with the client. No need to poll until its confirmed, or timed out. private def bufferFull(state: Buffered): Receive = unconfirmedCommonReceive(state) orElse terminatedDownstreamReceive orElse { case poll: Poll if isCurrentPoll(poll) => // If an confirmation timeout is set and has expired, the message is redelivered if (isConfirmationTimeout(state.deliveryTime)) { log.debug("In bufferFull: records timed out while waiting for a confirmation.") if (state.noBackoffNeeded()) { log.debug("In bufferFull: redelivering.") sendRecords(state.unconfirmed) } else { log.debug("In bufferFull: backing off.") downstreamActor ! BackingOff(state.redeliveryCount) } become(bufferFull(state.redelivered)) } log.debug(s"Buffer is full. Not going to poll.") schedulePoll(stateData = state) // The next message can be sent immediately from the buffer. A poll to Kafka for new messages for the buffer also happens immediately. case Confirm(offsets, commit) if state.isCurrentOffset(offsets) => log.debug("Records confirmed") val updatedState = state.confirm(offsets) val commitResult = if (commit) commitOffsets(updatedState, offsets) else Success({}) commitResult match { case Success(_) => sendRecords(updatedState.unconfirmed) log.debug("To unconfirmed state") become(unconfirmed(updatedState)) pollImmediate() case Failure(_) => log.debug("To RevokeAwait State") become(revokeAwait(updatedState, offsets)) schedulePoll(stateData = state) } } /** * A state after a commit failure, awaiting confirmation of a rebalance to occur. We can either continue processing * if the rebalance completes and no existing partition assignments are removed, otherwise we clear down state are resume * from last committed offsets, which may result in some unavoidable redelivery. * @param offsets The offsets of the last delivered records that failed to commit to Kafka */ private def revokeAwait(state: StateData, offsets: Offsets): Receive = terminatedDownstreamReceive orElse { case RevokeResume => log.info("RevokeResume - Resuming processing post rebalance") state match { case u: Unconfirmed => sendRecords(u.unconfirmed) become(unconfirmed(u)) case b: Buffered => sendRecords(b.unconfirmed) become(bufferFull(b)) case s: Subscribed => become(ready(s)) } case RevokeReset => log.warning("RevokeReset - Resetting state to Committed offsets") become(ready(Subscribed(state.subscription, None))) case poll: Poll if isCurrentPoll(poll) => log.debug("Poll in Revoke") pollKafka(state, poll.timeout) match { case Some(records) => state match { case s: Subscribed => become(revokeAwait(s.toUnconfirmed(records), offsets)) case u: Unconfirmed => become(revokeAwait(u.addToBuffer(records), offsets)) case b: Buffered => throw consumerFailure(b) } schedulePoll(stateData = state) case None => schedulePoll(stateData = state) } case c: Confirm => log.info("Received a confirmation while waiting for rebalance to finish. Received offsets: {}", c.offsets) } private def subscribe(s: Subscribe): Unit = s match { case Subscribe.AutoPartition(topics, assignedListener, revokedListener) => log.info(s"Subscribing in auto partition assignment mode to topics [{}].", topics.mkString(",")) trackPartitions = new TrackPartitionsCommitMode(consumer, context.self, assignedListener, revokedListener) consumer.subscribe(topics.toList.asJava, trackPartitions) case Subscribe.AutoPartitionBasic(topics, assignedListener, revokedListener) => log.info(s"Subscribing in basic auto partition assignment mode to topics [{}].", topics.mkString(",")) trackPartitions = new TrackPartitionsCommitModeBasic(consumer, context.self, assignedListener, revokedListener) consumer.subscribe(topics.toList.asJava, trackPartitions) case Subscribe.AutoPartitionWithManualOffset(topics, assignedListener, revokedListener) => log.info(s"Subscribing in auto partition assignment with manual offset mode to topics [{}].", topics.mkString(",")) trackPartitions = new TrackPartitionsManualOffset(consumer, context.self, assignedListener, revokedListener) consumer.subscribe(topics.toList.asJava, trackPartitions) case Subscribe.ManualPartition(topicPartitions) => log.info("Subscribing in manual partition assignment mode to topic/partitions [{}].", topicPartitions.mkString(",")) consumer.assign(topicPartitions.toList.asJava) case Subscribe.ManualOffset(offsets) => log.info("Subscribing in manual partition assignment mode to partitions with offsets [{}]", offsets) consumer.assign(offsets.topicPartitions.toList.asJava) seekOffsets(offsets) case Subscribe.ManualOffsetForTimes(offsets) => log.info("Subscribing in manual partition assignment mode with timestamps to partitions with offsets [{}]", offsets) consumer.assign(offsets.topicPartitions.toList.asJava) val regularOffsets = timeOffsets2regularOffsets(offsets) seekOffsets(regularOffsets) } // The client is usually misusing the Consumer if incorrect Confirm offsets are provided private def unconfirmedCommonReceive(state: UnconfirmedRecordsStateData): Receive = subscribedCommonReceive(state) orElse { case Confirm(offsets, _) if !state.isCurrentOffset(offsets) => log.warning("Received confirmation for unexpected offsets: {}", offsets) } private def terminatedDownstreamReceive: Receive = { case Terminated(`downstreamActor`) => log.info("Downstream Actor terminated") context stop self } private def seekOffsets(offsets: Offsets): Unit = offsets.offsetsMap.foreach { case (key, value) => log.info(s"Seek to $key, $value") consumer.seek(key, value) } private def sendRecords(records: Records): Unit = { downstreamActor ! records } /** * Attempt to get new records from Kafka, * * @param timeout - specify a blocking poll timeout. Default 0 for non blocking poll. */ private def pollKafka(state: StateData, timeout: Int): Option[Records] = tryWithConsumer(state) { log.debug("Poll Kafka for {} milliseconds", timeout) val rs = consumer.poll(timeout) log.debug("Poll Complete!") if (rs.count() > 0) Some(ConsumerRecords(currentConsumerOffsets, rs)) else None } private def tryWithConsumer[T](state: StateData)(effect: => Option[T]): Option[T] = { try { effect } catch { case _: WakeupException => log.debug("Wakeup Exception, ignoring.") None case error: Exception => log.debug("Exception thrown from Kafka Consumer") throw consumerFailure(state, error) } } private def commitOffsets(state: StateData, offsets: Offsets): Try[Unit] = { log.debug("Committing offsets. {}", offsets) val currentOffsets = currentConsumerOffsets val currentPartitions = currentOffsets.topicPartitions val offsetsToCommit = offsets.keepOnly(currentPartitions) val nonCommittedOffsets = offsets.remove(currentPartitions) if (nonCommittedOffsets.nonEmpty) { log.warning(s"Cannot commit offsets for partitions the consumer is not subscribed to: {}", nonCommittedOffsets.topicPartitions.mkString(", ")) } tryCommit(offsetsToCommit, state) } private def tryCommit(offsetsToCommit: Offsets, state: StateData): Try[Unit] = { try { consumer.commitSync(offsetsToCommit.toCommitMap.asJava) Success({}) } catch { case _: WakeupException => log.debug("Wakeup Exception. Ignoring.") Success({}) case cfe: CommitFailedException => log.warning("Exception while committing {}", cfe.getMessage) Failure(cfe) case error: Exception => log.debug("Exception thrown from Kafka Consumer") throw consumerFailure(state, error) } } private def consumerFailure(state: StateData, cause: Exception = null) = ConsumerException(Some(state.advanceSubscription), cause = cause) private def schedulePoll(stateData: StateData): Unit = schedulePoll(stateData.scheduleInterval) private def currentConsumerOffsets: Offsets = { val offsetsMap = consumer.assignment().asScala .map(p => p -> consumer.position(p)) .toMap Offsets(offsetsMap) } override def postRestart(reason: Throwable): Unit = { super.postRestart(reason) recoverFromException(reason) } private def recoverFromException(ex: Throwable): Unit = ex match { case ConsumerException(lastSubscription, message, _) => log.warning(s"KafkaConsumerActor restarted: {}", message) lastSubscription.foreach { sub => log.info("Resubscribing: {}", sub) self ! sub } case _ => throw new RuntimeException("Unexpected exception thrown by KafkaConsumerActor", ex) } /** * True if records unconfirmed for longer than unconfirmedTimeoutSecs. */ private def isConfirmationTimeout(deliveryTime: LocalDateTime): Boolean = isTimeoutUsed && timeoutTime(deliveryTime).isBefore(LocalDateTime.now()) private def timeoutTime(deliveryTime: LocalDateTime) = deliveryTime.plus(actorConf.unconfirmedTimeout.toMillis, ChronoUnit.MILLIS) override def postStop(): Unit = { log.info("KafkaConsumerActor stopping") close() } override def preStart(): Unit = { context.watch(downstreamActor) } private def close(): Unit = try { consumer.close() } catch { case ex: Exception => log.error(ex, "Error occurred while closing consumer") } }
simonsouter/scala-kafka-client
akka/src/main/scala/cakesolutions/kafka/akka/KafkaConsumerActor.scala
Scala
mit
41,215
import org.opencv.imgcodecs.Imgcodecs import org.opencv.features2d.DescriptorExtractor import org.opencv.features2d.Features2d import org.opencv.core.MatOfKeyPoint import org.opencv.core.Mat import org.opencv.features2d.FeatureDetector import org.opencv.features2d.DescriptorMatcher import org.opencv.core.MatOfDMatch import reflect._ /* * Finds corresponding points between a pair of images using local descriptors. * The correspondences are visualized in the image "scalaCorrespondences.png", * which is written to disk. */ object ScalaCorrespondenceMatchingDemo { def run() { println(s"\\nRunning ${classTag[this.type].toString.replace("$", "")}") // Detects keypoints and extracts descriptors in a given image of type Mat. def detectAndExtract(mat: Mat) = { // A special container class for KeyPoint. val keyPoints = new MatOfKeyPoint // We're using the ORB detector. val detector = FeatureDetector.create(FeatureDetector.ORB) detector.detect(mat, keyPoints) println(s"There were ${keyPoints.toArray.size} KeyPoints detected") // Let's just use the best KeyPoints. val sorted = keyPoints.toArray.sortBy(_.response).reverse.take(50) // There isn't a constructor that takes Array[KeyPoint], so we unpack // the array and use the constructor that can take any number of // arguments. val bestKeyPoints: MatOfKeyPoint = new MatOfKeyPoint(sorted: _*) // We're using the ORB descriptor. val extractor = DescriptorExtractor.create(DescriptorExtractor.ORB) val descriptors = new Mat extractor.compute(mat, bestKeyPoints, descriptors) println(s"${descriptors.rows} descriptors were extracted, each with dimension ${descriptors.cols}") (bestKeyPoints, descriptors) } // Load the images from the |resources| directory. val leftImage = Imgcodecs.imread(getClass.getResource("/img1.png").getPath) val rightImage = Imgcodecs.imread(getClass.getResource("/img2.png").getPath) // Detect KeyPoints and extract descriptors. val (leftKeyPoints, leftDescriptors) = detectAndExtract(leftImage) val (rightKeyPoints, rightDescriptors) = detectAndExtract(rightImage) // Match the descriptors. val matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE) // A special container class for DMatch. val dmatches = new MatOfDMatch // The backticks are because "match" is a keyword in Scala. matcher.`match`(leftDescriptors, rightDescriptors, dmatches) // Visualize the matches and save the visualization. val correspondenceImage = new Mat Features2d.drawMatches(leftImage, leftKeyPoints, rightImage, rightKeyPoints, dmatches, correspondenceImage) val filename = "scalaCorrespondences.png" println(s"Writing ${filename}") assert(Imgcodecs.imwrite(filename, correspondenceImage)) } }
DamianPilot382/Rubiks-Cube-Solver
opencv/sources/samples/java/sbt/src/main/scala/ScalaCorrespondenceMatchingDemo.scala
Scala
apache-2.0
2,879
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.codegen import java.util import org.apache.calcite.plan.RelOptPlanner import org.apache.calcite.rex.{RexBuilder, RexNode} import org.apache.calcite.sql.`type`.SqlTypeName import org.apache.commons.lang3.StringEscapeUtils import org.apache.flink.api.common.functions.MapFunction import org.apache.flink.api.common.typeinfo.BasicTypeInfo import org.apache.flink.api.java.typeutils.RowTypeInfo import org.apache.flink.table.api.TableConfig import org.apache.flink.table.calcite.FlinkTypeFactory import org.apache.flink.types.Row import scala.collection.JavaConverters._ /** * Evaluates constant expressions using Flink's [[FunctionCodeGenerator]]. */ class ExpressionReducer(config: TableConfig) extends RelOptPlanner.Executor with Compiler[MapFunction[Row, Row]] { private val EMPTY_ROW_INFO = new RowTypeInfo() private val EMPTY_ROW = new Row(0) override def reduce( rexBuilder: RexBuilder, constExprs: util.List[RexNode], reducedValues: util.List[RexNode]): Unit = { val typeFactory = rexBuilder.getTypeFactory.asInstanceOf[FlinkTypeFactory] val literals = constExprs.asScala.map(e => (e.getType.getSqlTypeName, e)).flatMap { // we need to cast here for RexBuilder.makeLiteral case (SqlTypeName.DATE, e) => Some( rexBuilder.makeCast( typeFactory.createTypeFromTypeInfo(BasicTypeInfo.INT_TYPE_INFO, e.getType.isNullable), e) ) case (SqlTypeName.TIME, e) => Some( rexBuilder.makeCast( typeFactory.createTypeFromTypeInfo(BasicTypeInfo.INT_TYPE_INFO, e.getType.isNullable), e) ) case (SqlTypeName.TIMESTAMP, e) => Some( rexBuilder.makeCast( typeFactory.createTypeFromTypeInfo(BasicTypeInfo.LONG_TYPE_INFO, e.getType.isNullable), e) ) // we don't support object literals yet, we skip those constant expressions case (SqlTypeName.ANY, _) | (SqlTypeName.ROW, _) | (SqlTypeName.ARRAY, _) | (SqlTypeName.MAP, _) | (SqlTypeName.MULTISET, _) => None case (_, e) => Some(e) } val literalTypes = literals.map(e => FlinkTypeFactory.toTypeInfo(e.getType)) val resultType = new RowTypeInfo(literalTypes: _*) // generate MapFunction val generator = new FunctionCodeGenerator(config, false, EMPTY_ROW_INFO) val result = generator.generateResultExpression( resultType, resultType.getFieldNames, literals) val generatedFunction = generator.generateFunction[MapFunction[Row, Row], Row]( "ExpressionReducer", classOf[MapFunction[Row, Row]], s""" |${result.code} |return ${result.resultTerm}; |""".stripMargin, resultType) val clazz = compile(getClass.getClassLoader, generatedFunction.name, generatedFunction.code) val function = clazz.newInstance() // execute val reduced = function.map(EMPTY_ROW) // add the reduced results or keep them unreduced var i = 0 var reducedIdx = 0 while (i < constExprs.size()) { val unreduced = constExprs.get(i) unreduced.getType.getSqlTypeName match { // we insert the original expression for object literals case SqlTypeName.ANY | SqlTypeName.ROW | SqlTypeName.ARRAY | SqlTypeName.MAP | SqlTypeName.MULTISET => reducedValues.add(unreduced) case _ => val reducedValue = reduced.getField(reducedIdx) // RexBuilder handle double literal incorrectly, convert it into BigDecimal manually val value = if (unreduced.getType.getSqlTypeName == SqlTypeName.DOUBLE) { new java.math.BigDecimal(reducedValue.asInstanceOf[Number].doubleValue()) } else { reducedValue } val literal = rexBuilder.makeLiteral( value, unreduced.getType, true) reducedValues.add(literal) reducedIdx += 1 } i += 1 } } }
yew1eb/flink
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/codegen/ExpressionReducer.scala
Scala
apache-2.0
4,903
package cmdreader.std import cmdreader._ import types._ import util._ import java.math.BigInteger class OAvg extends CommandOperator { override def getName(): String = "avg" override def getOpAlias() = "@" override def isValidArg0(n: Int): Boolean = n == 2 override def apply(args: Array[Type]): Type = { MathUtil.idivide(MathUtil.add(args(0), args(1)), TMountain(2)) } def getPrecedence() = PStandard.ADD_SUBT def isReversed() = false def hasAssignmentEquiv() = true def getDoubleBase() = None }
bluebear94/bag
src/main/scala/cmdreader/std/OAvg.scala
Scala
gpl-3.0
520
/* Author: Matei Zaharia Developed as part of the SNAP project (http://snap.cs.berkeley.edu/) */ package siren import scala.collection.mutable.ArrayBuffer object Utils { /** Count how many bytes in the array are equal to a given value */ def count(bytes: Array[Byte], value: Byte): Int = { var i = 0 var count = 0 while (i < bytes.length) { if (bytes(i) == value) count += 1 i += 1 } count } /** Split a string around instances of a given delimiter */ def split(s: String, delimiter: Char): Seq[String] = { val buf = new ArrayBuffer[String] var i = 0 while (i < s.length) { var j = i while (j < s.length && s.charAt(j) != delimiter) { j += 1 } if (j > i) { buf += s.substring(i, j); } i = j while (i < s.length && s.charAt(i) == delimiter) { i += 1 } } return buf } /* // Check whether a SNP in a child could have been inherited from two parents def mendelOK(child: SNP, mom: SNP, dad: SNP): Boolean = { ((mom.contains(child.allele1) && dad.contains(child.allele2)) || (dad.contains(child.allele1) && mom.contains(child.allele2))) } // Check whether the differences between a child and parents are OK in terms of inheritance; // prints a report on the differences to stdout def scoreTrio( child: SimpleVC, mom: SimpleVC, dad: SimpleVC, start: Int = 0, end: Int = Int.MaxValue, onlyPrintBad: Boolean = false) { val locs = getDifferenceLocs(child, mom, dad, start, end) println("%15s %5s %5s".format("child", "mom", "dad")) var numOk = 0 var numUnsure = 0 var numBad = 0 for (loc <- locs) { val (childSnp, momSnp, dadSnp) = (child.snps(loc), mom.snps(loc), dad.snps(loc)) val ok = mendelOK(childSnp, momSnp, dadSnp) val oneUncalled = childSnp.uncalled || momSnp.uncalled || dadSnp.uncalled var mark = "-" if (ok) { mark = "-" numOk += 1 } else if (oneUncalled) { mark = "?" numUnsure += 1 } else { mark = "EEK" numBad += 1 } if (!onlyPrintBad || mark == "EEK") { println("%8d:%6s%6s%6s%6s".format(loc, childSnp, momSnp, dadSnp, mark)) } } printf("Total: %d, OK: %d, unsure: %d, bad: %d\\n", locs.size, numOk, numUnsure, numBad) } // Get the list of locations where any of the genomes in a trio differs from the reference def getDifferenceLocs( child: SimpleVC, mom: SimpleVC, dad: SimpleVC, start: Int = 0, end: Int = Int.MaxValue): Seq[Int] = { def locsInRange(vc: SimpleVC) = { vc.diffList.map(_._1).filter(x => x >= start && x < end) } (locsInRange(child) ++ locsInRange(mom) ++ locsInRange(dad)).toSet.toArray.sorted } def parsePhred(score: Char): Int = score - 33 def parsePhred(score: Byte): Int = score - 33 def scoreSNPs(vc: SimpleVC, trueSNPs: Seq[(Int, SNP)], start: Int = 0, end: Int = Int.MaxValue) { val calledDiffs = vc.diffList.filter(p => p._1 >= start && p._1 < end).toSet val trueDiffs = trueSNPs.filter(p => p._1 >= start && p._1 < end).toSet val calledDiffsMap = calledDiffs.toMap val trueDiffsMap = trueDiffs.toMap val falsePositiveLocs = calledDiffs.map(_._1) -- trueDiffs.map(_._1) // Set of locations we called wrong val falseNegativeLocs = trueDiffs.map(_._1) -- calledDiffs.map(_._1) // Set of locations we missed val wronglyCalled = calledDiffs.filter(p => trueDiffsMap.contains(p._1) && trueDiffsMap(p._1) != p._2) val falseNegativesSkipped = falseNegativeLocs.filter(l => vc.snps(l).uncalled).size val badFalseNegatives = falseNegativeLocs.size - falseNegativesSkipped println("False negatives:") falseNegativeLocs.toSeq.sorted.foreach(l => printf("%9d %5s %5s\\n", l, vc.snps(l), trueDiffsMap(l))) println("False negatives not skipped:") falseNegativeLocs.toSeq.sorted.filter(l => !vc.snps(l).uncalled) .foreach(l => printf("%9d %5s %5s\\n", l, vc.snps(l), trueDiffsMap(l))) println("False positives:") falsePositiveLocs.toSeq.sorted.foreach(l => printf("%9d %5s %5s\\n", l, calledDiffsMap(l), "-")) println("Wrongly called:") wronglyCalled.toSeq.sortBy(_._1).foreach(p => printf("%9d %5s %5s\\n", p._1, p._2, trueDiffsMap(p._1))) printf("Total: %d, false negatives: %d (%d non-skipped), false positives: %d, wrongly called: %d\\n", trueDiffs.size, falseNegativeLocs.size, badFalseNegatives, falsePositiveLocs.size, wronglyCalled.size) } */ }
fnothaft/siren-release
src/main/scala/siren/Utils.scala
Scala
bsd-2-clause
4,629
package org.functionalkoans.forscala import org.functionalkoans.forscala.support.KoanFunSuite import org.scalatest.Matchers class AboutLazySequences extends KoanFunSuite with Matchers { koan("Creating a lazy collection form a strict collection") { val strictList = List(10, 20, 30) val lazyList = strictList.view lazyList.head should be(__) } koan("Strict collection always processes its elements but " + "lazy collection does it on demand") { var x = 0 def inc = {x += 1; x} val strictList = List(inc _, inc _, inc _) strictList.map(f => f).head should be(__) x should be(__) strictList.map(f => f).head x should be(__) x = 0 val lazyList = strictList.view lazyList.map(f => f).head should be(__) x should be(__) lazyList.map(f => f).head should be(__) x should be(__) } koan("Lazy collection sometimes avoid processing errors") { val lazyList = List(2, -2, 0, 4).view map { 2 / _ } lazyList.head should be(__) lazyList(1) should be(__) intercept[ArithmeticException] { lazyList(2) } } koan("Lazy collections could also be infinite") { val infinite = Stream.from(1) infinite.take(4).sum should be(__) Stream.continually(1).take(4).sum should be(__) } koan("Always remember tail of a lazy collection is never computed unless required") { def makeLazy(value: Int): Stream[Int] = { Stream.cons(value, makeLazy(value + 1)) } val stream = makeLazy(1) stream.head should be(__) stream.tail.head should be(__) } }
pharmpress/codingdojo
scala-koans/src/test/scala/org/functionalkoans/forscala/AboutLazySequences.scala
Scala
apache-2.0
1,574
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.streaming.ui import scala.xml.Node import org.apache.spark.ui.{UIUtils => SparkUIUtils} private[ui] abstract class BatchTableBase(tableId: String, batchInterval: Long) { protected def columns: Seq[Node] = { <th>Batch Time</th> <th>Records</th> <th>Scheduling Delay {SparkUIUtils.tooltip("Time taken by Streaming scheduler to submit jobs of a batch", "top")} </th> <th>Processing Time {SparkUIUtils.tooltip("Time taken to process all jobs of a batch", "top")}</th> } /** * Return the first failure reason if finding in the batches. */ protected def getFirstFailureReason(batches: Seq[BatchUIData]): Option[String] = { batches.flatMap(_.outputOperations.flatMap(_._2.failureReason)).headOption } protected def getFirstFailureTableCell(batch: BatchUIData): Seq[Node] = { val firstFailureReason = batch.outputOperations.flatMap(_._2.failureReason).headOption firstFailureReason.map { failureReason => val failureReasonForUI = UIUtils.createOutputOperationFailureForUI(failureReason) UIUtils.failureReasonCell( failureReasonForUI, rowspan = 1, includeFirstLineInExpandDetails = false) }.getOrElse(<td>-</td>) } protected def baseRow(batch: BatchUIData): Seq[Node] = { val batchTime = batch.batchTime.milliseconds val formattedBatchTime = SparkUIUtils.formatBatchTime(batchTime, batchInterval) val numRecords = batch.numRecords val schedulingDelay = batch.schedulingDelay val formattedSchedulingDelay = schedulingDelay.map(SparkUIUtils.formatDuration).getOrElse("-") val processingTime = batch.processingDelay val formattedProcessingTime = processingTime.map(SparkUIUtils.formatDuration).getOrElse("-") val batchTimeId = s"batch-$batchTime" <td id={batchTimeId} sorttable_customkey={batchTime.toString} isFailed={batch.isFailed.toString}> <a href={s"batch?id=$batchTime"}> {formattedBatchTime} </a> </td> <td sorttable_customkey={numRecords.toString}>{numRecords.toString} records</td> <td sorttable_customkey={schedulingDelay.getOrElse(Long.MaxValue).toString}> {formattedSchedulingDelay} </td> <td sorttable_customkey={processingTime.getOrElse(Long.MaxValue).toString}> {formattedProcessingTime} </td> } private def batchTable: Seq[Node] = { <table id={tableId} class="table table-bordered table-striped table-sm sortable"> <thead> {columns} </thead> <tbody> {renderRows} </tbody> </table> } def toNodeSeq: Seq[Node] = { batchTable } protected def createOutputOperationProgressBar(batch: BatchUIData): Seq[Node] = { <td class="progress-cell"> { SparkUIUtils.makeProgressBar( started = batch.numActiveOutputOp, completed = batch.numCompletedOutputOp, failed = batch.numFailedOutputOp, skipped = 0, reasonToNumKilled = Map.empty, total = batch.outputOperations.size) } </td> } /** * Return HTML for all rows of this table. */ protected def renderRows: Seq[Node] } private[ui] class ActiveBatchTable( runningBatches: Seq[BatchUIData], waitingBatches: Seq[BatchUIData], batchInterval: Long) extends BatchTableBase("active-batches-table", batchInterval) { private val firstFailureReason = getFirstFailureReason(runningBatches) override protected def columns: Seq[Node] = super.columns ++ { <th>Output Ops: Succeeded/Total</th> <th>Status</th> ++ { if (firstFailureReason.nonEmpty) { <th>Error</th> } else { Nil } } } override protected def renderRows: Seq[Node] = { // The "batchTime"s of "waitingBatches" must be greater than "runningBatches"'s, so display // waiting batches before running batches waitingBatches.flatMap(batch => <tr>{waitingBatchRow(batch)}</tr>) ++ runningBatches.flatMap(batch => <tr>{runningBatchRow(batch)}</tr>) } private def runningBatchRow(batch: BatchUIData): Seq[Node] = { baseRow(batch) ++ createOutputOperationProgressBar(batch) ++ <td>processing</td> ++ { if (firstFailureReason.nonEmpty) { getFirstFailureTableCell(batch) } else { Nil } } } private def waitingBatchRow(batch: BatchUIData): Seq[Node] = { baseRow(batch) ++ createOutputOperationProgressBar(batch) ++ <td>queued</td>++ { if (firstFailureReason.nonEmpty) { // Waiting batches have not run yet, so must have no failure reasons. <td>-</td> } else { Nil } } } } private[ui] class CompletedBatchTable(batches: Seq[BatchUIData], batchInterval: Long) extends BatchTableBase("completed-batches-table", batchInterval) { private val firstFailureReason = getFirstFailureReason(batches) override protected def columns: Seq[Node] = super.columns ++ { <th>Total Delay {SparkUIUtils.tooltip("Total time taken to handle a batch", "top")}</th> <th>Output Ops: Succeeded/Total</th> ++ { if (firstFailureReason.nonEmpty) { <th>Error</th> } else { Nil } } } override protected def renderRows: Seq[Node] = { batches.flatMap(batch => <tr>{completedBatchRow(batch)}</tr>) } private def completedBatchRow(batch: BatchUIData): Seq[Node] = { val totalDelay = batch.totalDelay val formattedTotalDelay = totalDelay.map(SparkUIUtils.formatDuration).getOrElse("-") baseRow(batch) ++ { <td sorttable_customkey={totalDelay.getOrElse(Long.MaxValue).toString}> {formattedTotalDelay} </td> } ++ createOutputOperationProgressBar(batch)++ { if (firstFailureReason.nonEmpty) { getFirstFailureTableCell(batch) } else { Nil } } } }
matthewfranglen/spark
streaming/src/main/scala/org/apache/spark/streaming/ui/AllBatchesTable.scala
Scala
mit
6,627
package io.buoyant.linkerd import com.twitter.finagle.service.Retries import com.twitter.util.Duration import io.buoyant.config.Parser import io.buoyant.router.RetryBudgetConfig import org.scalatest.FunSuite class RetriesConfigTest extends FunSuite { def parse(yaml: String): RetriesConfig = { val mapper = Parser.objectMapper(yaml, Nil) mapper.readValue[RetriesConfig](yaml) } test("empty") { assert(parse("{}") == RetriesConfig(None, None)) } test("constant backoff") { val yaml = s"""|backoff: | kind: constant | ms: 30 |""".stripMargin val config = ConstantBackoffConfig(30) assert(parse(yaml) == RetriesConfig(Some(config), None)) assert(config.mk.isInstanceOf[Stream[Duration]]) } test("jittered backoff") { val yaml = s"""|backoff: | kind: jittered | minMs: 30 | maxMs: 6000 |""".stripMargin val config = JitteredBackoffConfig(Some(30), Some(6000)) assert(parse(yaml) == RetriesConfig(Some(config), None)) assert(config.mk.isInstanceOf[Stream[Duration]]) } test("jittered backoff: no min") { val yaml = s"""|backoff: | kind: jittered | maxMs: 6000 |""".stripMargin val config = JitteredBackoffConfig(None, Some(6000)) assert(parse(yaml) == RetriesConfig(Some(config), None)) val e = intercept[IllegalArgumentException] { assert(config.mk.isInstanceOf[Stream[Duration]]) } assert(e.getMessage == "'minMs' must be specified") } test("budget") { val yaml = s"""|budget: | ttlSecs: 12 | minRetriesPerSec: 20 | percentCanRetry: 0.33 |""".stripMargin assert(parse(yaml) == RetriesConfig(None, Some(RetryBudgetConfig(Some(12), Some(20), Some(0.33))))) } }
denverwilliams/linkerd
linkerd/core/src/test/scala/io/buoyant/linkerd/RetriesConfigTest.scala
Scala
apache-2.0
1,851
package com.rasterfoundry.database import com.rasterfoundry.datamodel._ import com.rasterfoundry.common.Generators.Implicits._ import com.rasterfoundry.database.Implicits._ import doobie.implicits._ import cats.implicits._ import org.scalacheck.Prop.forAll import org.scalatest._ import org.scalatestplus.scalacheck.Checkers import com.rasterfoundry.datamodel.PageRequest import com.typesafe.scalalogging.LazyLogging import scala.util.Random class ProjectDaoSpec extends FunSuite with Matchers with Checkers with DBTestConfig with LazyLogging with PropTestHelpers { test("insert a project") { check { forAll { (user: User.Create, org: Organization.Create, project: Project.Create, platform: Platform) => { val projInsertIO = for { (_, _, _, dbProject) <- insertUserOrgPlatProject(user, org, platform, project) } yield dbProject val insertedProject = projInsertIO.transact(xa).unsafeRunSync insertedProject.name == project.name && insertedProject.description == project.description && insertedProject.visibility == project.visibility && insertedProject.tileVisibility == project.tileVisibility && insertedProject.isAOIProject == project.isAOIProject && insertedProject.aoiCadenceMillis == project.aoiCadenceMillis && insertedProject.tags == project.tags && insertedProject.isSingleBand == project.isSingleBand && insertedProject.singleBandOptions == project.singleBandOptions } } } } test("update a project") { check { forAll { (user: User.Create, org: Organization.Create, insertProject: Project.Create, updateProject: Project.Create, platform: Platform) => { val rowsAndUpdateIO = for { (dbUser, _, _, dbProject) <- insertUserOrgPlatProject( user, org, platform, insertProject) fixedUpUpdateProject = fixupProjectCreate(dbUser, updateProject) .toProject(dbUser, dbProject.defaultLayerId) affectedRows <- ProjectDao.updateProject(fixedUpUpdateProject, dbProject.id) fetched <- ProjectDao.unsafeGetProjectById(dbProject.id) } yield { (affectedRows, fetched) } val (affectedRows, updatedProject) = rowsAndUpdateIO.transact(xa).unsafeRunSync affectedRows == 1 && updatedProject.owner == user.id && updatedProject.name == updateProject.name && updatedProject.description == updateProject.description && updatedProject.visibility == updateProject.visibility && updatedProject.tileVisibility == updateProject.tileVisibility && updatedProject.isAOIProject == updateProject.isAOIProject && updatedProject.aoiCadenceMillis == updateProject.aoiCadenceMillis && updatedProject.tags == updateProject.tags && updatedProject.isSingleBand == updateProject.isSingleBand && updatedProject.singleBandOptions == updateProject.singleBandOptions } } } } test("delete a project") { check { forAll { (user: User.Create, org: Organization.Create, project: Project.Create, platform: Platform) => { val deleteIO = for { (_, _, _, dbProject) <- insertUserOrgPlatProject(user, org, platform, project) fetched <- ProjectDao.getProjectById(dbProject.id) _ <- ProjectDao.deleteProject(dbProject.id) fetched2 <- ProjectDao.getProjectById(dbProject.id) } yield { (fetched, fetched2) } val (fetched1, fetched2) = deleteIO.transact(xa).unsafeRunSync !fetched1.isEmpty && fetched2.isEmpty } } } } test("list projects") { check { forAll { (user: User.Create, org: Organization.Create, project: Project.Create, platform: Platform, pageRequest: PageRequest) => { val projectsListIO = for { (dbUser, _, _, _) <- insertUserOrgPlatProject(user, org, platform, project) listedProjects <- { ProjectDao .authQuery(dbUser, ObjectType.Project) .filter(dbUser) .page(pageRequest) .flatMap(ProjectDao.projectsToProjectsWithRelated) } } yield { listedProjects } val projectsWithRelatedPage = projectsListIO.transact(xa).unsafeRunSync.results.head.owner assert( projectsWithRelatedPage.id == user.id, "Listed project's owner should be the same as the creating user") true } } } } // addPermission and getPermissions test("add a permission to a project and get it back") { check { forAll { (userTeamOrgPlat: (User.Create, Team.Create, Organization.Create, Platform), acr: ObjectAccessControlRule, project: Project.Create, grantedUser: User.Create) => { val projectPermissionIO = for { projMiscInsert <- fixUpProjMiscInsert(userTeamOrgPlat, project) (projectInsert, dbUserTeamOrgPlat) = projMiscInsert dbGrantedUser <- UserDao.create(grantedUser) dbUserGrantedTeamOrgPlat = dbUserTeamOrgPlat.copy( _1 = dbGrantedUser) acrInsert = fixUpObjectAcr(acr, dbUserGrantedTeamOrgPlat) _ <- ProjectDao.addPermission(projectInsert.id, acrInsert) permissions <- ProjectDao.getPermissions(projectInsert.id) } yield { (permissions, acrInsert) } val (permissions, acrInsert) = projectPermissionIO.transact(xa).unsafeRunSync assert( permissions.flatten.headOption == Some(acrInsert), "Inserting a permission to a project and get it back should return the same granted permission.") true } } } } // addPermissionsMany and getPermissions test("add multiple permissions to a project and get them back") { check { forAll { (userTeamOrgPlat: (User.Create, Team.Create, Organization.Create, Platform), acrList: List[ObjectAccessControlRule], project: Project.Create, grantedUser: User.Create) => { val projectPermissionsIO = for { projMiscInsert <- fixUpProjMiscInsert(userTeamOrgPlat, project) (projectInsert, dbUserTeamOrgPlat) = projMiscInsert dbGrantedUser <- UserDao.create(grantedUser) dbUserGrantedTeamOrgPlat = dbUserTeamOrgPlat.copy( _1 = dbGrantedUser) acrListToInsert = acrList.map( fixUpObjectAcr(_, dbUserGrantedTeamOrgPlat)) permissionsInsert <- ProjectDao.addPermissionsMany( projectInsert.id, acrListToInsert) permissionsBack <- ProjectDao.getPermissions(projectInsert.id) } yield { (permissionsInsert, permissionsBack) } val (permissionsInsert, permissionsBack) = projectPermissionsIO.transact(xa).unsafeRunSync assert( permissionsInsert.diff(permissionsBack).length == 0, "Inserting a list of permissions and getting them back should be the same list of permissions.") true } } } } // addPermission, replacePermissions, and getPermissions test("add a permission to a project and replace it with many others") { check { forAll { (userTeamOrgPlat: (User.Create, Team.Create, Organization.Create, Platform), acr1: ObjectAccessControlRule, acrList: List[ObjectAccessControlRule], project: Project.Create, grantedUser: User.Create) => { val projectReplacePermissionsIO = for { projMiscInsert <- fixUpProjMiscInsert(userTeamOrgPlat, project) (projectInsert, dbUserTeamOrgPlat) = projMiscInsert dbGrantedUser <- UserDao.create(grantedUser) dbUserGrantedTeamOrgPlat = dbUserTeamOrgPlat.copy( _1 = dbGrantedUser) acrInsert1 = fixUpObjectAcr(acr1, dbUserGrantedTeamOrgPlat) permissions <- ProjectDao.addPermission(projectInsert.id, acrInsert1) _ = logger.trace(s"Access Control Rules Available: $permissions") acrListToInsert = acrList.map( fixUpObjectAcr(_, dbUserGrantedTeamOrgPlat)) permReplaced <- ProjectDao.replacePermissions(projectInsert.id, acrListToInsert) permissionsBack <- ProjectDao.getPermissions(projectInsert.id) } yield { (permReplaced, permissionsBack) } val (permReplaced, permissionsBack) = projectReplacePermissionsIO.transact(xa).unsafeRunSync assert( permReplaced.diff(permissionsBack).length == 0, "Replacing project permissions and get them back should return same permissions.") true } } } } // addPermissionsMany, deletePermissions, and getPermissions test("add permissions to a project and delete them") { check { forAll { (userTeamOrgPlat: (User.Create, Team.Create, Organization.Create, Platform), acrList: List[ObjectAccessControlRule], project: Project.Create, grantedUser: User.Create) => { val projectDeletePermissionsIO = for { projMiscInsert <- fixUpProjMiscInsert(userTeamOrgPlat, project) (projectInsert, dbUserTeamOrgPlat) = projMiscInsert dbGrantedUser <- UserDao.create(grantedUser) dbUserGrantedTeamOrgPlat = dbUserTeamOrgPlat.copy( _1 = dbGrantedUser) acrListToInsert = acrList.map( fixUpObjectAcr(_, dbUserGrantedTeamOrgPlat)) _ <- ProjectDao.addPermissionsMany(projectInsert.id, acrListToInsert) permsDeleted <- ProjectDao.deletePermissions(projectInsert.id) permissionsBack <- ProjectDao.getPermissions(projectInsert.id) } yield { (permsDeleted, permissionsBack) } val (permsDeleted, permissionsBack) = projectDeletePermissionsIO.transact(xa).unsafeRunSync assert( permsDeleted == 1, "Deleting project permissions should give number of rows updated.") assert( permissionsBack.length == 0, "Getting back permissions after deletion should return an empty list.") true } } } } // listUserActions test("add permissions to a project and list user actions") { check { forAll { (userTeamOrgPlat: (User.Create, Team.Create, Organization.Create, Platform), acrList: List[ObjectAccessControlRule], project: Project.Create, grantedUser: User.Create) => { val userActionsIO = for { projMiscInsert <- fixUpProjMiscInsert(userTeamOrgPlat, project) (projectInsert, dbUserTeamOrgPlat) = projMiscInsert (dbUser, dbTeam, dbOrg, dbPlatform) = dbUserTeamOrgPlat dbGrantedUser <- UserDao.create(grantedUser) dbUserGrantedTeamOrgPlat = dbUserTeamOrgPlat.copy( _1 = dbGrantedUser) ugr <- { UserGroupRoleDao.create( UserGroupRole .Create(dbGrantedUser.id, GroupType.Platform, dbPlatform.id, GroupRole.Member) .toUserGroupRole(dbUser, MembershipStatus.Approved)) >> UserGroupRoleDao.create( UserGroupRole .Create(dbGrantedUser.id, GroupType.Organization, dbOrg.id, GroupRole.Member) .toUserGroupRole(dbUser, MembershipStatus.Approved)) >> UserGroupRoleDao.create( UserGroupRole .Create(dbGrantedUser.id, GroupType.Team, dbTeam.id, GroupRole.Member) .toUserGroupRole(dbUser, MembershipStatus.Approved)) } _ = logger.trace(s"Created UGR: $ugr") acrListToInsert = acrList.map( fixUpObjectAcr(_, dbUserGrantedTeamOrgPlat)) _ <- ProjectDao.addPermissionsMany(projectInsert.id, acrListToInsert) actions <- ProjectDao.listUserActions(dbGrantedUser, projectInsert.id) permissionsBack <- ProjectDao.getPermissions(projectInsert.id) } yield { (actions, permissionsBack) } val (userActions, permissionsBack) = userActionsIO.transact(xa).unsafeRunSync val acrActionsDistinct = permissionsBack.flatten.map(_.actionType.toString).distinct assert( acrActionsDistinct.diff(userActions).length == 0, "Listing actions granted to a user on a project should return all action string") true } } } } test("list projects a user can view") { check { forAll { (userTeamOrgPlat: (User.Create, Team.Create, Organization.Create, Platform), acrList: List[ObjectAccessControlRule], project1: Project.Create, project2: Project.Create, grantedUser: User.Create, page: PageRequest) => { val listProjectsIO = for { projMiscInsert <- fixUpProjMiscInsert(userTeamOrgPlat, project1) (projectInsert1, dbUserTeamOrgPlat) = projMiscInsert (dbUser, dbTeam, dbOrg, dbPlatform) = dbUserTeamOrgPlat projectInsert2 <- ProjectDao.insertProject( fixupProjectCreate(dbUser, project2), dbUser) dbGrantedUserInsert <- UserDao.create(grantedUser) dbGrantedUser = dbGrantedUserInsert.copy(isSuperuser = false) dbUserGrantedTeamOrgPlat = dbUserTeamOrgPlat.copy( _1 = dbGrantedUser) ugr <- { UserGroupRoleDao.create( UserGroupRole .Create(dbGrantedUser.id, GroupType.Platform, dbPlatform.id, GroupRole.Member) .toUserGroupRole(dbUser, MembershipStatus.Approved)) >> UserGroupRoleDao.create( UserGroupRole .Create(dbGrantedUser.id, GroupType.Organization, dbOrg.id, GroupRole.Member) .toUserGroupRole(dbUser, MembershipStatus.Approved)) >> UserGroupRoleDao.create( UserGroupRole .Create(dbGrantedUser.id, GroupType.Team, dbTeam.id, GroupRole.Member) .toUserGroupRole(dbUser, MembershipStatus.Approved)) } _ = logger.trace(s"Created UGR: $ugr") acrListToInsert = acrList.map( fixUpObjectAcr(_, dbUserGrantedTeamOrgPlat)) _ <- ProjectDao.addPermissionsMany(projectInsert1.id, acrListToInsert) permissionsBack <- ProjectDao.getPermissions(projectInsert1.id) paginatedProjects <- ProjectDao .authQuery(dbGrantedUser, ObjectType.Project) .filter(fr"owner=${dbUser.id}") .page(page) } yield { (projectInsert1, projectInsert2, permissionsBack, paginatedProjects) } val (projectInsert1, projectInsert2, permissionsBack, paginatedProjects) = listProjectsIO.transact(xa).unsafeRunSync val hasViewPermission = permissionsBack.flatten.exists(_.actionType == ActionType.View) (hasViewPermission, projectInsert1.visibility, projectInsert2.visibility) match { case (true, _, Visibility.Public) => paginatedProjects.results .map(_.id) .diff(List(projectInsert1.id, projectInsert2.id)) .length == 0 case (true, _, _) => paginatedProjects.results .map(_.id) .diff(List(projectInsert1.id)) .length == 0 case (false, Visibility.Public, Visibility.Public) => paginatedProjects.results .map(_.id) .diff(List(projectInsert1.id, projectInsert2.id)) .length == 0 case (false, Visibility.Public, _) => paginatedProjects.results .map(_.id) .diff(List(projectInsert1.id)) .length == 0 case (false, _, Visibility.Public) => paginatedProjects.results .map(_.id) .diff(List(projectInsert2.id)) .length == 0 case _ => paginatedProjects.results.length == 0 } } } } } // authorized -- a function for checking a permission of a user on an object test("check if a user can have a certain action on a project") { check { forAll { (userTeamOrgPlat: (User.Create, Team.Create, Organization.Create, Platform), acrList: List[ObjectAccessControlRule], project1: Project.Create, project2: Project.Create, grantedUser: User.Create) => { val projectCreateIO = for { projMiscInsert <- fixUpProjMiscInsert(userTeamOrgPlat, project1) (projectInsert1, dbUserTeamOrgPlat) = projMiscInsert (dbUser, dbTeam, dbOrg, dbPlatform) = dbUserTeamOrgPlat projectInsert2 <- ProjectDao.insertProject( fixupProjectCreate(dbUser, project2), dbUser) dbGrantedUserInsert <- UserDao.create(grantedUser) dbGrantedUser = dbGrantedUserInsert.copy(isSuperuser = false) dbUserGrantedTeamOrgPlat = dbUserTeamOrgPlat.copy( _1 = dbGrantedUser) _ <- { UserGroupRoleDao.create( UserGroupRole .Create(dbGrantedUser.id, GroupType.Platform, dbPlatform.id, GroupRole.Member) .toUserGroupRole(dbUser, MembershipStatus.Approved)) >> UserGroupRoleDao.create( UserGroupRole .Create(dbGrantedUser.id, GroupType.Organization, dbOrg.id, GroupRole.Member) .toUserGroupRole(dbUser, MembershipStatus.Approved)) >> UserGroupRoleDao.create( UserGroupRole .Create(dbGrantedUser.id, GroupType.Team, dbTeam.id, GroupRole.Member) .toUserGroupRole(dbUser, MembershipStatus.Approved)) } } yield { (projectInsert1, projectInsert2, dbUserGrantedTeamOrgPlat, dbGrantedUser) } val isUserPermittedIO = for { projectCreate <- projectCreateIO (projectInsert1, projectInsert2, dbUserGrantedTeamOrgPlat, dbGrantedUser) = projectCreate acrListToInsert = acrList.map( fixUpObjectAcr(_, dbUserGrantedTeamOrgPlat)) acrs <- ProjectDao.addPermissionsMany(projectInsert1.id, acrListToInsert) _ = logger.info(s"ACRS Added: $acrs") action = Random.shuffle(acrListToInsert.map(_.actionType)).head isPermitted1 <- ProjectDao.authorized(dbGrantedUser, ObjectType.Project, projectInsert1.id, action) isPermitted2 <- ProjectDao.authorized(dbGrantedUser, ObjectType.Project, projectInsert2.id, action) } yield { (action, projectInsert2, isPermitted1.toBoolean, isPermitted2.toBoolean) } val (action, projectInsert2, isPermitted1, isPermitted2) = isUserPermittedIO.transact(xa).unsafeRunSync if (projectInsert2.visibility == Visibility.Public) { (action) match { case ActionType.View | ActionType.Export | ActionType.Annotate => isPermitted1 && isPermitted2 case _ => isPermitted1 && !isPermitted2 } } else { isPermitted1 } } } } } }
aaronxsu/raster-foundry
app-backend/db/src/test/scala/com/azavea/rf/database/ProjectDaoSpec.scala
Scala
apache-2.0
23,679
package org.sisioh.config import com.typesafe.config.ConfigResolveOptions object ConfigurationResolveOptions { private[config] def apply( useSystemEnvironment: Boolean): ConfigurationResolveOptions = new ConfigurationResolveOptionsImpl(useSystemEnvironment) def defaults: ConfigurationResolveOptions = apply(useSystemEnvironment = true) def noSystem: ConfigurationResolveOptions = apply(useSystemEnvironment = false) } trait ConfigurationResolveOptions { val underlying: ConfigResolveOptions def setUseSystemEnvironment(value: Boolean): ConfigurationResolveOptions def getUseSystemEnvironment: Boolean } private[config] case class ConfigurationResolveOptionsImpl( private val useSystemEnvironment: Boolean) extends ConfigurationResolveOptions { val underlying = ConfigResolveOptions.defaults.setUseSystemEnvironment(useSystemEnvironment) def setUseSystemEnvironment(value: Boolean): ConfigurationResolveOptions = ConfigurationResolveOptions(value) def getUseSystemEnvironment: Boolean = useSystemEnvironment }
sisioh/sisioh-config
config/src/main/scala/org/sisioh/config/ConfigurationResolveOptions.scala
Scala
apache-2.0
1,074
object Test { def prettyPrintArray(x: Array[_]) = println("Array(" + x.mkString(", ") + ")") def main(args: Array[String]): Unit = { prettyPrintArray(Array(1,2,3) :+ 4) prettyPrintArray(1 +: Array(2,3,4)) prettyPrintArray(Array() :+ 1) prettyPrintArray(1 +: Array()) } }
yusuke2255/dotty
tests/run/array-addition.scala
Scala
bsd-3-clause
295
/* * Copyright (C) FuseSource, Inc. * http://fusesource.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.fusesource.fabric.camel.dslio import java.io.PrintWriter import org.apache.camel.model.language.ExpressionDefinition import org.apache.camel.model._ class JavaDslWriter(out: PrintWriter) extends DslWriter(out) { def flush() = out.flush() import out._ var indentText = " " var indentLevel = 2 var startedStatement = false def doubleQuote(text: String): String = { "\\"" + text + "\\"" } override def write(from: FromDefinition) { val uri = from.getUri val args = if (uri != null) { List(doubleQuote(uri)) } else { List() } functionStatement("from", args) } override def write(to: ToDefinition) { val uri = to.getUri val args = if (uri != null) { List("\\"" + uri + "\\"") } else { List() } functionStatement("to", args) } def writeExpression(node: ExpressionDefinition): Unit = { print("." + node.getLanguage + "(\\"" + node.getExpression + "\\")") } override def write(node: FilterDefinition): Unit = { functionStatement("filter") writeExpression(node.getExpression) } override def write(route: RouteDefinition): Unit = { super.write(route) indentLevel -= 1 startedStatement = false println(";") } protected def functionStatement(name: String, parameters: List[String] = List()): Unit = { if (startedStatement) { println(".") } indent print(name + parametersText(parameters)) if (!startedStatement) { indentLevel += 1 startedStatement = true } } protected def parametersText(parameters: scala.List[String]): String = { parameters.mkString("(", ",", ")") } protected def indent: Unit = { for (i <- 0 to indentLevel) { print(indentText) } } }
janstey/fuse
fabric/fabric-camel-dslio/src/main/scala/org/fusesource/fabric/camel/dslio/JavaDslWriter.scala
Scala
apache-2.0
2,393
/* * Copyright 2014 JHC Systems Limited * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package sqlest.executor import org.joda.time.LocalDate import org.scalatest._ import org.scalatest.matchers._ import scala.concurrent.{ Await, Future } import scala.concurrent.duration._ import scala.util.Try import sqlest._ import sqlest.ast.{ LiteralColumn, Setter } import sqlest.extractor.TestResultSet import scala.concurrent.ExecutionContext.Implicits.global class ExecutorSpec extends FlatSpec with Matchers { import TestData._ implicit def testDatabase = TestDatabase(testResultSet) val selectStatement = select(TableOne.col1, TableOne.col2).from(TableOne) val updateStatement = update(TableOne).set(TableOne.col1 -> 123).where(TableOne.col2 === "12") val insertStatement = insert.into(TableOne).set(TableOne.col1 -> 123) val optionInsertStatement = insert.into(TableThree).values(TableThree.col3 -> Option[Int](1), TableThree.col4 -> Option[String](null)) val mappedOptionInsertStatement1 = { insert .into(TableSix) .columns(TableSix.trimmedString) .values(Setter(TableSix.trimmedString, LiteralColumn(Some(WrappedString("a")): Option[WrappedString]))) } val mappedOptionInsertStatement2 = { insert .into(TableSix) .columns(TableSix.trimmedString) .values(Setter(TableSix.trimmedString, LiteralColumn(None: Option[WrappedString]))) } val mappedOptionSelectStatement1 = { select(TableSix.zeroIsNoneLocalDate) .from(TableSix) .where(TableSix.zeroIsNoneLocalDate === Option.empty[LocalDate]) } val mappedOptionSelectStatement2 = { select(TableSix.zeroIsNoneLocalDate) .from(TableSix) .where(TableSix.zeroIsNoneLocalDate === Some(new LocalDate(2015, 1, 1))) } val deleteStatement = delete.from(TableOne).where(TableOne.col2 === "12") val extractor = extract[One]( a = TableOne.col1, b = TableOne.col2 ) "a select statement" should "be executable with an extractor" in { selectStatement.extractAll(extractor) } it should "be able to return all results" in { val actualExtracted = select.from(TableOne).extractAll(extractor) actualExtracted should equal(Seq( One(1, "a"), One(3, "c"), One(-1, "e") )) } it should "be able to return an option of the first result" in { val actual = selectStatement.extractHeadOption(extractor) actual should equal(Some(One(1, "a"))) } it should "be able to return the first result" in { val actual = selectStatement.extractHead(extractor) actual should equal(One(1, "a")) } it should "be able to return the types of the columns passed in" in { val head: Int = select(TableOne.col1).from(TableOne).fetchHead head should be(1) val headOption: Option[String] = select(TableOne.col2).from(TableOne).fetchHeadOption headOption should be(Some("a")) val all: List[(Int, String)] = select(TableOne.col1, TableOne.col2).from(TableOne).fetchAll all should equal(Seq( (1, "a"), (3, "c"), (-1, "e") )) } "a unioned select statement" should "be executable with an extractor with same number of columns to the select" in { val actualExtracted = select(TableOne.col1, TableOne.col2).from(TableOne) .union(select(TableOne.col1, TableOne.col2).from(TableOne)) .extractAll(extractor) actualExtracted should equal(Seq( One(1, "a"), One(3, "c"), One(-1, "e") )) } it should "not be executable with an extractor with a different number of columns to the select" in { intercept[AssertionError] { select(TableOne.col1).from(TableOne) .union(select(TableOne.col1).from(TableOne)) .extractAll(extractor) } } "withConnection" should "close the connection on completion" in { val database = TestDatabase(testResultSet) database.withConnection { connection => connection.createStatement.execute("values(0)") } database.lastConnection.get.closed shouldBe true } it should "close the connection when an exception is thrown" in { val database = TestDatabase(testResultSet) intercept[Exception] { database.withConnection { connection => throw new Exception("Catastrophic failure") } } database.lastConnection.get.closed shouldBe true } "an update" should "require a transaction to run" in { TestDatabase(testResultSet).withTransaction { implicit transaction => updateStatement.execute } } it should "close the connection on completion" in { val database = TestDatabase(testResultSet) database.withTransaction { implicit transaction => updateStatement.execute } database.lastConnection.get.closed shouldBe true } it should "commit the transaction on success" in { val database = TestDatabase(testResultSet) database.withTransaction { implicit transaction => updateStatement.execute } database.lastConnection.get.committed shouldBe true } it should "roll back the transaction and close the connection on rollback" in { val database = TestDatabase(testResultSet) database.withTransaction { implicit transaction => val result = updateStatement.execute transaction.rollback result } database.lastConnection.get.closed shouldBe true database.lastConnection.get.rolledBack shouldBe true database.lastConnection.get.committed shouldBe false } it should "roll back the transaction and close the connection when an exception is thrown" in { val database = TestDatabase(testResultSet) intercept[Exception] { database.withTransaction { implicit transaction => throw new Exception("Catastrophic error") } } database.lastConnection.get.closed shouldBe true database.lastConnection.get.rolledBack shouldBe true database.lastConnection.get.committed shouldBe false } "an asynchronous update" should "require a transaction to run" in { TestDatabase(testResultSet).withTransactionAsync { implicit transaction => Future(updateStatement.execute) } } it should "close the connection on completion" in { val database = TestDatabase(testResultSet) val transaction = database.withTransactionAsync { implicit transaction => Future(updateStatement.execute) } val result = Try(Await.result(transaction, 20.seconds)) result shouldBe 'success database.lastConnection.get.closed shouldBe true } it should "commit the transaction on success" in { val database = TestDatabase(testResultSet) val transaction = database.withTransactionAsync { implicit transaction => Future(updateStatement.execute) } val result = Try(Await.result(transaction, 20.seconds)) result shouldBe 'success database.lastConnection.get.committed shouldBe true } it should "roll back the transaction and close the connection on rollback" in { val database = TestDatabase(testResultSet) val transaction = database.withTransactionAsync { implicit transaction => Future { val result = updateStatement.execute transaction.rollback result } } val result = Try(Await.result(transaction, 20.seconds)) result shouldBe 'success database.lastConnection.get.closed shouldBe true database.lastConnection.get.rolledBack shouldBe true database.lastConnection.get.committed shouldBe false } it should "roll back the transaction and close the connection when an exception is thrown" in { val database = TestDatabase(testResultSet) val transaction = database.withTransactionAsync { implicit transaction => throw new Exception("Catastrophic error") } val result = Try(Await.result(transaction, 20.seconds)) result shouldBe 'failure database.lastConnection.get.closed shouldBe true database.lastConnection.get.rolledBack shouldBe true database.lastConnection.get.committed shouldBe false } it should "roll back the transaction and close the connection when the Future fails" in { val database = TestDatabase(testResultSet) val transaction = database.withTransactionAsync { implicit transaction => Future(throw new Exception("Catastrophic error")) } val result = Try(Await.result(transaction, 20.seconds)) result shouldBe 'failure database.lastConnection.get.closed shouldBe true database.lastConnection.get.rolledBack shouldBe true database.lastConnection.get.committed shouldBe false } "an insert" should "require a transaction to run" in { TestDatabase(testResultSet).withTransaction { implicit transaction => insertStatement.execute } } "an asynchronous insert" should "require a transaction to run" in { TestDatabase(testResultSet).withTransactionAsync { implicit transaction => Future(insertStatement.execute) } } "executeReturningKeys" should "return the generated String key" in { TestDatabase(testResultSet, Some(keyResultSet)).withTransaction { implicit transaction => val keys: List[String] = insertStatement.executeReturningKeys[String] keys should equal(List[String]("34")) } } it should "return the generated Integer key" in { TestDatabase(testResultSet, Some(keyResultSet)).withTransaction { implicit transaction => val keys: List[Int] = insertStatement.executeReturningKeys[Int] keys should equal(List[Int](46)) } } "a delete" should "require a transaction to run" in { TestDatabase(testResultSet).withTransaction { implicit transaction => deleteStatement.execute } } "an asynchronous delete" should "require a transaction to run" in { TestDatabase(testResultSet).withTransactionAsync { implicit transaction => Future(deleteStatement.execute) } } "an insert with optional values" should "execute correctly" in { TestDatabase(testResultSet).withTransaction { implicit transaction => optionInsertStatement.execute } } it should "generate raw SQL correctly" in { TestDatabase(testResultSet).statementBuilder.generateRawSql(optionInsertStatement) should equal( "insert into three (col3, col4) values (1, null)" ) } it should "set parameters correctly" in { val testDatabase = TestDatabase(testResultSet) testDatabase.withTransaction { implicit transaction => optionInsertStatement.execute } testDatabase.preparedStatement.get.sql shouldBe "insert into three (col3, col4) values (?, ?)" testDatabase.preparedStatement.get.parameters shouldBe Map(1 -> 1, 2 -> null) } "an insert with mapped optional values" should "execute correctly" in { TestDatabase(testResultSet).withTransaction { implicit transaction => mappedOptionInsertStatement1.execute } } it should "generate raw SQL correctly" in { TestDatabase(testResultSet).statementBuilder.generateRawSql(mappedOptionInsertStatement1) should equal( "insert into six (trimmedString) values ('a')" ) testDatabase.statementBuilder.generateRawSql(mappedOptionInsertStatement2) should equal( "insert into six (trimmedString) values ('')" ) testDatabase.statementBuilder.generateRawSql(mappedOptionSelectStatement1) should equal( "select six.zeroIsNoneDateTime as six_zeroIsNoneDateTime from six where (six.zeroIsNoneDateTime = 0)" ) testDatabase.statementBuilder.generateRawSql(mappedOptionSelectStatement2) should equal( "select six.zeroIsNoneDateTime as six_zeroIsNoneDateTime from six where (six.zeroIsNoneDateTime = 20150101)" ) } it should "set parameters correctly" in { val testDatabase = TestDatabase(testResultSet) testDatabase.withTransaction { implicit transaction => mappedOptionInsertStatement1.execute } testDatabase.preparedStatement.get.sql shouldBe "insert into six (trimmedString) values (?)" testDatabase.preparedStatement.get.parameters shouldBe Map(1 -> "a") testDatabase.withTransaction { implicit transaction => mappedOptionInsertStatement2.execute } testDatabase.preparedStatement.get.sql shouldBe "insert into six (trimmedString) values (?)" testDatabase.preparedStatement.get.parameters shouldBe Map(1 -> "") } it should "compile with both implicit database and transaction for a select" in { TestDatabase(testResultSet).withTransaction { implicit transaction => selectStatement.fetchAll insertStatement.execute } } it should "return verbose exception messages when configured to do so" in { val database = TestDatabase(testResultSet, Some(keyResultSet), shouldThrow = true, verboseExceptionMessages = true) val selectException = intercept[SqlestException] { database.withSession { implicit session => selectStatement.fetchAll } } assert(selectException.message.startsWith("Exception running sql")) selectException.cause shouldBe database.anException val insertException = intercept[SqlestException] { database.withTransaction { implicit transaction => insertStatement.execute } } assert(insertException.message.startsWith("Exception running sql")) insertException.cause shouldBe database.anException val insertReturningKeysException = intercept[SqlestException] { database.withTransaction { implicit transaction => insertStatement.executeReturningKeys[String] } } assert(insertReturningKeysException.message.startsWith("Exception running sql")) insertReturningKeysException.cause shouldBe database.anException val updateException = intercept[SqlestException] { database.withTransaction { implicit transaction => updateStatement.execute } } assert(updateException.message.startsWith("Exception running sql")) updateException.cause shouldBe database.anException } it should "return the underlying exception otherwise" in { val database = TestDatabase(testResultSet, Some(keyResultSet), shouldThrow = true, verboseExceptionMessages = false) val selectException = intercept[Exception] { database.withSession { implicit session => selectStatement.fetchAll } } selectException shouldBe database.anException val insertException = intercept[Exception] { database.withTransaction { implicit transaction => insertStatement.execute } } insertException shouldBe database.anException val insertReturningKeysException = intercept[Exception] { database.withTransaction { implicit transaction => insertStatement.executeReturningKeys[String] } } insertReturningKeysException shouldBe database.anException val updateException = intercept[Exception] { database.withTransaction { implicit transaction => updateStatement.execute } } updateException shouldBe database.anException } it should "return verbose exception messages on prepare when configured to do so" in { val database = TestDatabase(testResultSet, Some(keyResultSet), shouldThrow = true, verboseExceptionMessages = true, throwExceptionOnPrepare = true) val selectException = intercept[SqlestException] { database.withSession { implicit session => selectStatement.fetchAll } } assert(selectException.message.startsWith("Exception running sql")) selectException.cause shouldBe database.anException val insertException = intercept[SqlestException] { database.withTransaction { implicit transaction => insertStatement.execute } } assert(insertException.message.startsWith("Exception running sql")) insertException.cause shouldBe database.anException val insertReturningKeysException = intercept[SqlestException] { database.withTransaction { implicit transaction => insertStatement.executeReturningKeys[String] } } assert(insertReturningKeysException.message.startsWith("Exception running sql")) insertReturningKeysException.cause shouldBe database.anException val updateException = intercept[SqlestException] { database.withTransaction { implicit transaction => updateStatement.execute } } assert(updateException.message.startsWith("Exception running sql")) updateException.cause shouldBe database.anException } it should "return the underlying exception on prepare otherwise " in { val database = TestDatabase(testResultSet, Some(keyResultSet), shouldThrow = true, verboseExceptionMessages = false, throwExceptionOnPrepare = true) val selectException = intercept[Exception] { database.withSession { implicit session => selectStatement.fetchAll } } selectException shouldBe database.anException val insertException = intercept[Exception] { database.withTransaction { implicit transaction => insertStatement.execute } } insertException shouldBe database.anException val insertReturningKeysException = intercept[Exception] { database.withTransaction { implicit transaction => insertStatement.executeReturningKeys[String] } } insertReturningKeysException shouldBe database.anException val updateException = intercept[Exception] { database.withTransaction { implicit transaction => updateStatement.execute } } updateException shouldBe database.anException } }
jhc-systems/sqlest
sqlest/src/test/scala/sqlest/executor/ExecutorSpec.scala
Scala
apache-2.0
17,993
package cl.asa.yaml.cchart class Cchart { var form: Array[String] = Array.empty var ctype: String = "" def setForm(list: Array[String]) { this.form = list } def setCtype(str: String) { this.ctype = str } }
Takeuchi-Lab-LM/scala_asa3
ASA/src/main/scala/cl/asa/yaml/cchart/Cchart.scala
Scala
mit
218
package mesosphere.marathon package core.task.update.impl.steps import akka.Done import mesosphere.UnitTest import mesosphere.marathon.core.instance.TestInstanceBuilder import mesosphere.marathon.core.instance.update.{ InstanceChange, InstanceChangeHandler } import mesosphere.marathon.core.task.bus.TaskStatusUpdateTestHelper import mesosphere.marathon.state.PathId import mesosphere.marathon.test.CaptureLogEvents import scala.concurrent.Future class ContinueOnErrorStepTest extends UnitTest { "ContinueOnErrorStep" should { "name uses nested name" in { object nested extends InstanceChangeHandler { override def name: String = "nested" override def process(update: InstanceChange): Future[Done] = { throw new scala.RuntimeException("not implemted") } } ContinueOnErrorStep(nested).name should equal("continueOnError(nested)") } "A successful step should not produce logging output" in { val f = new Fixture Given("a nested step that is always successful") f.nested.process(any) returns Future.successful(Done) val step = ContinueOnErrorStep(f.nested) When("executing the step") val logEvents = CaptureLogEvents.forBlock { val resultFuture = step.process(TaskStatusUpdateTestHelper.running(f.dummyInstanceBuilder.getInstance()).wrapped) resultFuture.futureValue } Then("it should execute the nested step") verify(f.nested, times(1)).process(any) And("not produce any logging output") logEvents.filter(_.getMessage.contains(s"[${f.dummyInstance.instanceId.idString}]")) should be(empty) } "A failing step should log the error but proceed" in { val f = new Fixture Given("a nested step that always fails") f.nested.name returns "nested" f.nested.process(any) returns Future.failed(new RuntimeException("error!")) val step = ContinueOnErrorStep(f.nested) When("executing the step") val logEvents = CaptureLogEvents.forBlock { val resultFuture = step.process(TaskStatusUpdateTestHelper.running(f.dummyInstanceBuilder.getInstance()).wrapped) resultFuture.futureValue } Then("it should execute the nested step") verify(f.nested, times(1)).process(any) And("produce an error message in the log") logEvents.map(_.toString) should contain( s"[ERROR] while executing step nested for [${f.dummyInstance.instanceId.idString}], continue with other steps" ) } } class Fixture { private[this] val appId: PathId = PathId("/test") val dummyInstanceBuilder = TestInstanceBuilder.newBuilderWithLaunchedTask(appId) val dummyInstance = dummyInstanceBuilder.getInstance() val nested = mock[InstanceChangeHandler] } }
guenter/marathon
src/test/scala/mesosphere/marathon/core/task/update/impl/steps/ContinueOnErrorStepTest.scala
Scala
apache-2.0
2,792
package rugloom.rug /** * RugLoom - Explorative analysis pipeline prototype * Created by oliverr on 8/11/2015. */ object VariStats { def apply(vari: Variation, ns: Map[Int, Int]): VariStats = VariStats(vari, ns.getOrElse(0, 0), ns.getOrElse(1, 0), ns.getOrElse(2, 0)) } case class VariStats(vari: Variation, n0: Int, n1: Int, n2: Int) { def n(zygosity: Int): Int = zygosity match { case 0 => n0 case 1 => n1 case 2 => n2 case _ => 0 } def total = n0 + n1 + n2 def maxZygosity = vari.chr match { case Chrs.chrY => 1 case _ => 2 } def freq(zygosity: Int) = n(zygosity).toDouble / total def report: String = "" + vari + (0 to maxZygosity).map(z => "" + z + ":" + n(z)).mkString("(", ", ", ")") }
curoli/rugloom-client
app/rugloom/rug/VariStats.scala
Scala
mit
748
package epic.sequences import epic.framework.LossAugmentation import epic.constraints.LabeledSpanConstraints import breeze.util.{OptionIndex, Index} /** * TODO * * @author dlwh **/ class HammingLossAugmentation[L, W](startSymbol: L, labelIndex: Index[L], precisionScale: Double = 1.0, recallScale: Double = 1.0) extends LossAugmentation[Segmentation[L, W], SemiCRF.Anchoring[L, W]] { def lossAugmentation(datum: Segmentation[L, W]): SemiCRF.Anchoring[L, W] = { val gt = GoldSegmentPolicy.goldSegmentForcing[L](datum.segments.map{ case (k,v) => (labelIndex(k), v)}) new HammingLossAugmentation.Anchoring(startSymbol, new OptionIndex(labelIndex), datum.words, gt, precisionScale, recallScale) } object HammingLossAugmentation { case class Anchoring[L, W](startSymbol: L, labelIndex: OptionIndex[L], words: IndexedSeq[W], gt: GoldSegmentPolicy[L], precisionScale: Double, recallScale: Double, constraints: LabeledSpanConstraints[L] = LabeledSpanConstraints.noConstraints[L]) extends SemiCRF.Anchoring[L, W]{ def scoreTransition(prev: Int, cur: Int, begin: Int, end: Int): Double = { if (gt.isGoldSegment(begin, end, cur)) -precisionScale else recallScale } override def ignoreTransitionModel: Boolean = true } } }
langkilde/epic
src/main/scala/epic/sequences/HammingLossAugmentation.scala
Scala
apache-2.0
1,509
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark import java.util.concurrent.TimeUnit import scala.collection.mutable import scala.collection.mutable.ArrayBuffer import scala.util.control.{ControlThrowable, NonFatal} import com.codahale.metrics.{Gauge, MetricRegistry} import org.apache.spark.internal.{config, Logging} import org.apache.spark.internal.config._ import org.apache.spark.metrics.source.Source import org.apache.spark.scheduler._ import org.apache.spark.storage.BlockManagerMaster import org.apache.spark.util.{Clock, SystemClock, ThreadUtils, Utils} /** * An agent that dynamically allocates and removes executors based on the workload. * * The ExecutorAllocationManager maintains a moving target number of executors which is periodically * synced to the cluster manager. The target starts at a configured initial value and changes with * the number of pending and running tasks. * * Decreasing the target number of executors happens when the current target is more than needed to * handle the current load. The target number of executors is always truncated to the number of * executors that could run all current running and pending tasks at once. * * Increasing the target number of executors happens in response to backlogged tasks waiting to be * scheduled. If the scheduler queue is not drained in N seconds, then new executors are added. If * the queue persists for another M seconds, then more executors are added and so on. The number * added in each round increases exponentially from the previous round until an upper bound has been * reached. The upper bound is based both on a configured property and on the current number of * running and pending tasks, as described above. * * The rationale for the exponential increase is twofold: (1) Executors should be added slowly * in the beginning in case the number of extra executors needed turns out to be small. Otherwise, * we may add more executors than we need just to remove them later. (2) Executors should be added * quickly over time in case the maximum number of executors is very high. Otherwise, it will take * a long time to ramp up under heavy workloads. * * The remove policy is simpler: If an executor has been idle for K seconds, meaning it has not * been scheduled to run any tasks, then it is removed. * * There is no retry logic in either case because we make the assumption that the cluster manager * will eventually fulfill all requests it receives asynchronously. * * The relevant Spark properties include the following: * * spark.dynamicAllocation.enabled - Whether this feature is enabled * spark.dynamicAllocation.minExecutors - Lower bound on the number of executors * spark.dynamicAllocation.maxExecutors - Upper bound on the number of executors * spark.dynamicAllocation.initialExecutors - Number of executors to start with * * spark.dynamicAllocation.executorAllocationRatio - * This is used to reduce the parallelism of the dynamic allocation that can waste * resources when tasks are small * * spark.dynamicAllocation.schedulerBacklogTimeout (M) - * If there are backlogged tasks for this duration, add new executors * * spark.dynamicAllocation.sustainedSchedulerBacklogTimeout (N) - * If the backlog is sustained for this duration, add more executors * This is used only after the initial backlog timeout is exceeded * * spark.dynamicAllocation.executorIdleTimeout (K) - * If an executor has been idle for this duration, remove it */ private[spark] class ExecutorAllocationManager( client: ExecutorAllocationClient, listenerBus: LiveListenerBus, conf: SparkConf, blockManagerMaster: BlockManagerMaster) extends Logging { allocationManager => import ExecutorAllocationManager._ // Lower and upper bounds on the number of executors. private val minNumExecutors = conf.get(DYN_ALLOCATION_MIN_EXECUTORS) private val maxNumExecutors = conf.get(DYN_ALLOCATION_MAX_EXECUTORS) private val initialNumExecutors = Utils.getDynamicAllocationInitialExecutors(conf) // How long there must be backlogged tasks for before an addition is triggered (seconds) private val schedulerBacklogTimeoutS = conf.getTimeAsSeconds( "spark.dynamicAllocation.schedulerBacklogTimeout", "1s") // Same as above, but used only after `schedulerBacklogTimeoutS` is exceeded private val sustainedSchedulerBacklogTimeoutS = conf.getTimeAsSeconds( "spark.dynamicAllocation.sustainedSchedulerBacklogTimeout", s"${schedulerBacklogTimeoutS}s") // How long an executor must be idle for before it is removed (seconds) private val executorIdleTimeoutS = conf.getTimeAsSeconds( "spark.dynamicAllocation.executorIdleTimeout", "60s") private val cachedExecutorIdleTimeoutS = conf.getTimeAsSeconds( "spark.dynamicAllocation.cachedExecutorIdleTimeout", s"${Integer.MAX_VALUE}s") // During testing, the methods to actually kill and add executors are mocked out private val testing = conf.getBoolean("spark.dynamicAllocation.testing", false) // TODO: The default value of 1 for spark.executor.cores works right now because dynamic // allocation is only supported for YARN and the default number of cores per executor in YARN is // 1, but it might need to be attained differently for different cluster managers private val tasksPerExecutorForFullParallelism = conf.getInt("spark.executor.cores", 1) / conf.getInt("spark.task.cpus", 1) private val executorAllocationRatio = conf.get(DYN_ALLOCATION_EXECUTOR_ALLOCATION_RATIO) validateSettings() // Number of executors to add in the next round private var numExecutorsToAdd = 1 // The desired number of executors at this moment in time. If all our executors were to die, this // is the number of executors we would immediately want from the cluster manager. private var numExecutorsTarget = initialNumExecutors // Executors that have been requested to be removed but have not been killed yet private val executorsPendingToRemove = new mutable.HashSet[String] // All known executors private val executorIds = new mutable.HashSet[String] // A timestamp of when an addition should be triggered, or NOT_SET if it is not set // This is set when pending tasks are added but not scheduled yet private var addTime: Long = NOT_SET // A timestamp for each executor of when the executor should be removed, indexed by the ID // This is set when an executor is no longer running a task, or when it first registers private val removeTimes = new mutable.HashMap[String, Long] // Polling loop interval (ms) private val intervalMillis: Long = if (Utils.isTesting) { conf.getLong(TESTING_SCHEDULE_INTERVAL_KEY, 100) } else { 100 } // Clock used to schedule when executors should be added and removed private var clock: Clock = new SystemClock() // Listener for Spark events that impact the allocation policy val listener = new ExecutorAllocationListener // Executor that handles the scheduling task. private val executor = ThreadUtils.newDaemonSingleThreadScheduledExecutor("spark-dynamic-executor-allocation") // Metric source for ExecutorAllocationManager to expose internal status to MetricsSystem. val executorAllocationManagerSource = new ExecutorAllocationManagerSource // Whether we are still waiting for the initial set of executors to be allocated. // While this is true, we will not cancel outstanding executor requests. This is // set to false when: // (1) a stage is submitted, or // (2) an executor idle timeout has elapsed. @volatile private var initializing: Boolean = true // Number of locality aware tasks, used for executor placement. private var localityAwareTasks = 0 // Host to possible task running on it, used for executor placement. private var hostToLocalTaskCount: Map[String, Int] = Map.empty /** * Verify that the settings specified through the config are valid. * If not, throw an appropriate exception. */ private def validateSettings(): Unit = { if (minNumExecutors < 0 || maxNumExecutors < 0) { throw new SparkException("spark.dynamicAllocation.{min/max}Executors must be positive!") } if (maxNumExecutors == 0) { throw new SparkException("spark.dynamicAllocation.maxExecutors cannot be 0!") } if (minNumExecutors > maxNumExecutors) { throw new SparkException(s"spark.dynamicAllocation.minExecutors ($minNumExecutors) must " + s"be less than or equal to spark.dynamicAllocation.maxExecutors ($maxNumExecutors)!") } if (schedulerBacklogTimeoutS <= 0) { throw new SparkException("spark.dynamicAllocation.schedulerBacklogTimeout must be > 0!") } if (sustainedSchedulerBacklogTimeoutS <= 0) { throw new SparkException( "spark.dynamicAllocation.sustainedSchedulerBacklogTimeout must be > 0!") } if (executorIdleTimeoutS < 0) { throw new SparkException("spark.dynamicAllocation.executorIdleTimeout must be >= 0!") } if (cachedExecutorIdleTimeoutS < 0) { throw new SparkException("spark.dynamicAllocation.cachedExecutorIdleTimeout must be >= 0!") } // Require external shuffle service for dynamic allocation // Otherwise, we may lose shuffle files when killing executors if (!conf.get(config.SHUFFLE_SERVICE_ENABLED) && !testing) { throw new SparkException("Dynamic allocation of executors requires the external " + "shuffle service. You may enable this through spark.shuffle.service.enabled.") } if (tasksPerExecutorForFullParallelism == 0) { throw new SparkException("spark.executor.cores must not be < spark.task.cpus.") } if (executorAllocationRatio > 1.0 || executorAllocationRatio <= 0.0) { throw new SparkException( "spark.dynamicAllocation.executorAllocationRatio must be > 0 and <= 1.0") } } /** * Use a different clock for this allocation manager. This is mainly used for testing. */ def setClock(newClock: Clock): Unit = { clock = newClock } /** * Register for scheduler callbacks to decide when to add and remove executors, and start * the scheduling task. */ def start(): Unit = { listenerBus.addToManagementQueue(listener) val scheduleTask = new Runnable() { override def run(): Unit = { try { schedule() } catch { case ct: ControlThrowable => throw ct case t: Throwable => logWarning(s"Uncaught exception in thread ${Thread.currentThread().getName}", t) } } } executor.scheduleWithFixedDelay(scheduleTask, 0, intervalMillis, TimeUnit.MILLISECONDS) client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount) } /** * Stop the allocation manager. */ def stop(): Unit = { executor.shutdown() executor.awaitTermination(10, TimeUnit.SECONDS) } /** * Reset the allocation manager when the cluster manager loses track of the driver's state. * This is currently only done in YARN client mode, when the AM is restarted. * * This method forgets about any state about existing executors, and forces the scheduler to * re-evaluate the number of needed executors the next time it's run. */ def reset(): Unit = synchronized { addTime = 0L numExecutorsTarget = initialNumExecutors executorsPendingToRemove.clear() removeTimes.clear() } /** * The maximum number of executors we would need under the current load to satisfy all running * and pending tasks, rounded up. */ private def maxNumExecutorsNeeded(): Int = { val numRunningOrPendingTasks = listener.totalPendingTasks + listener.totalRunningTasks math.ceil(numRunningOrPendingTasks * executorAllocationRatio / tasksPerExecutorForFullParallelism) .toInt } private def totalRunningTasks(): Int = synchronized { listener.totalRunningTasks } /** * This is called at a fixed interval to regulate the number of pending executor requests * and number of executors running. * * First, adjust our requested executors based on the add time and our current needs. * Then, if the remove time for an existing executor has expired, kill the executor. * * This is factored out into its own method for testing. */ private def schedule(): Unit = synchronized { val now = clock.getTimeMillis updateAndSyncNumExecutorsTarget(now) val executorIdsToBeRemoved = ArrayBuffer[String]() removeTimes.retain { case (executorId, expireTime) => val expired = now >= expireTime if (expired) { initializing = false executorIdsToBeRemoved += executorId } !expired } if (executorIdsToBeRemoved.nonEmpty) { removeExecutors(executorIdsToBeRemoved) } } /** * Updates our target number of executors and syncs the result with the cluster manager. * * Check to see whether our existing allocation and the requests we've made previously exceed our * current needs. If so, truncate our target and let the cluster manager know so that it can * cancel pending requests that are unneeded. * * If not, and the add time has expired, see if we can request new executors and refresh the add * time. * * @return the delta in the target number of executors. */ private def updateAndSyncNumExecutorsTarget(now: Long): Int = synchronized { val maxNeeded = maxNumExecutorsNeeded if (initializing) { // Do not change our target while we are still initializing, // Otherwise the first job may have to ramp up unnecessarily 0 } else if (maxNeeded < numExecutorsTarget) { // The target number exceeds the number we actually need, so stop adding new // executors and inform the cluster manager to cancel the extra pending requests val oldNumExecutorsTarget = numExecutorsTarget numExecutorsTarget = math.max(maxNeeded, minNumExecutors) numExecutorsToAdd = 1 // If the new target has not changed, avoid sending a message to the cluster manager if (numExecutorsTarget < oldNumExecutorsTarget) { // We lower the target number of executors but don't actively kill any yet. Killing is // controlled separately by an idle timeout. It's still helpful to reduce the target number // in case an executor just happens to get lost (eg., bad hardware, or the cluster manager // preempts it) -- in that case, there is no point in trying to immediately get a new // executor, since we wouldn't even use it yet. client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount) logDebug(s"Lowering target number of executors to $numExecutorsTarget (previously " + s"$oldNumExecutorsTarget) because not all requested executors are actually needed") } numExecutorsTarget - oldNumExecutorsTarget } else if (addTime != NOT_SET && now >= addTime) { val delta = addExecutors(maxNeeded) logDebug(s"Starting timer to add more executors (to " + s"expire in $sustainedSchedulerBacklogTimeoutS seconds)") addTime = now + (sustainedSchedulerBacklogTimeoutS * 1000) delta } else { 0 } } /** * Request a number of executors from the cluster manager. * If the cap on the number of executors is reached, give up and reset the * number of executors to add next round instead of continuing to double it. * * @param maxNumExecutorsNeeded the maximum number of executors all currently running or pending * tasks could fill * @return the number of additional executors actually requested. */ private def addExecutors(maxNumExecutorsNeeded: Int): Int = { // Do not request more executors if it would put our target over the upper bound if (numExecutorsTarget >= maxNumExecutors) { logDebug(s"Not adding executors because our current target total " + s"is already $numExecutorsTarget (limit $maxNumExecutors)") numExecutorsToAdd = 1 return 0 } val oldNumExecutorsTarget = numExecutorsTarget // There's no point in wasting time ramping up to the number of executors we already have, so // make sure our target is at least as much as our current allocation: numExecutorsTarget = math.max(numExecutorsTarget, executorIds.size) // Boost our target with the number to add for this round: numExecutorsTarget += numExecutorsToAdd // Ensure that our target doesn't exceed what we need at the present moment: numExecutorsTarget = math.min(numExecutorsTarget, maxNumExecutorsNeeded) // Ensure that our target fits within configured bounds: numExecutorsTarget = math.max(math.min(numExecutorsTarget, maxNumExecutors), minNumExecutors) val delta = numExecutorsTarget - oldNumExecutorsTarget // If our target has not changed, do not send a message // to the cluster manager and reset our exponential growth if (delta == 0) { // Check if there is any speculative jobs pending if (listener.pendingTasks == 0 && listener.pendingSpeculativeTasks > 0) { numExecutorsTarget = math.max(math.min(maxNumExecutorsNeeded + 1, maxNumExecutors), minNumExecutors) } else { numExecutorsToAdd = 1 return 0 } } val addRequestAcknowledged = try { testing || client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount) } catch { case NonFatal(e) => // Use INFO level so the error it doesn't show up by default in shells. Errors here are more // commonly caused by YARN AM restarts, which is a recoverable issue, and generate a lot of // noisy output. logInfo("Error reaching cluster manager.", e) false } if (addRequestAcknowledged) { val executorsString = "executor" + { if (delta > 1) "s" else "" } logInfo(s"Requesting $delta new $executorsString because tasks are backlogged" + s" (new desired total will be $numExecutorsTarget)") numExecutorsToAdd = if (delta == numExecutorsToAdd) { numExecutorsToAdd * 2 } else { 1 } delta } else { logWarning( s"Unable to reach the cluster manager to request $numExecutorsTarget total executors!") numExecutorsTarget = oldNumExecutorsTarget 0 } } /** * Request the cluster manager to remove the given executors. * Returns the list of executors which are removed. */ private def removeExecutors(executors: Seq[String]): Seq[String] = synchronized { val executorIdsToBeRemoved = new ArrayBuffer[String] logInfo("Request to remove executorIds: " + executors.mkString(", ")) val numExistingExecutors = allocationManager.executorIds.size - executorsPendingToRemove.size var newExecutorTotal = numExistingExecutors executors.foreach { executorIdToBeRemoved => if (newExecutorTotal - 1 < minNumExecutors) { logDebug(s"Not removing idle executor $executorIdToBeRemoved because there are only " + s"$newExecutorTotal executor(s) left (minimum number of executor limit $minNumExecutors)") } else if (newExecutorTotal - 1 < numExecutorsTarget) { logDebug(s"Not removing idle executor $executorIdToBeRemoved because there are only " + s"$newExecutorTotal executor(s) left (number of executor target $numExecutorsTarget)") } else if (canBeKilled(executorIdToBeRemoved)) { executorIdsToBeRemoved += executorIdToBeRemoved newExecutorTotal -= 1 } } if (executorIdsToBeRemoved.isEmpty) { return Seq.empty[String] } // Send a request to the backend to kill this executor(s) val executorsRemoved = if (testing) { executorIdsToBeRemoved } else { // We don't want to change our target number of executors, because we already did that // when the task backlog decreased. client.killExecutors(executorIdsToBeRemoved, adjustTargetNumExecutors = false, countFailures = false, force = false) } // [SPARK-21834] killExecutors api reduces the target number of executors. // So we need to update the target with desired value. client.requestTotalExecutors(numExecutorsTarget, localityAwareTasks, hostToLocalTaskCount) // reset the newExecutorTotal to the existing number of executors newExecutorTotal = numExistingExecutors if (testing || executorsRemoved.nonEmpty) { executorsRemoved.foreach { removedExecutorId => // If it is a cached block, it uses cachedExecutorIdleTimeoutS for timeout val idleTimeout = if (blockManagerMaster.hasCachedBlocks(removedExecutorId)) { cachedExecutorIdleTimeoutS } else { executorIdleTimeoutS } newExecutorTotal -= 1 logInfo(s"Removing executor $removedExecutorId because it has been idle for " + s"$idleTimeout seconds (new desired total will be $newExecutorTotal)") executorsPendingToRemove.add(removedExecutorId) } executorsRemoved } else { logWarning(s"Unable to reach the cluster manager to kill executor/s " + s"${executorIdsToBeRemoved.mkString(",")} or no executor eligible to kill!") Seq.empty[String] } } /** * Request the cluster manager to remove the given executor. * Return whether the request is acknowledged. */ private def removeExecutor(executorId: String): Boolean = synchronized { val executorsRemoved = removeExecutors(Seq(executorId)) executorsRemoved.nonEmpty && executorsRemoved(0) == executorId } /** * Determine if the given executor can be killed. */ private def canBeKilled(executorId: String): Boolean = synchronized { // Do not kill the executor if we are not aware of it (should never happen) if (!executorIds.contains(executorId)) { logWarning(s"Attempted to remove unknown executor $executorId!") return false } // Do not kill the executor again if it is already pending to be killed (should never happen) if (executorsPendingToRemove.contains(executorId)) { logWarning(s"Attempted to remove executor $executorId " + s"when it is already pending to be removed!") return false } true } /** * Callback invoked when the specified executor has been added. */ private def onExecutorAdded(executorId: String): Unit = synchronized { if (!executorIds.contains(executorId)) { executorIds.add(executorId) // If an executor (call this executor X) is not removed because the lower bound // has been reached, it will no longer be marked as idle. When new executors join, // however, we are no longer at the lower bound, and so we must mark executor X // as idle again so as not to forget that it is a candidate for removal. (see SPARK-4951) executorIds.filter(listener.isExecutorIdle).foreach(onExecutorIdle) logInfo(s"New executor $executorId has registered (new total is ${executorIds.size})") } else { logWarning(s"Duplicate executor $executorId has registered") } } /** * Callback invoked when the specified executor has been removed. */ private def onExecutorRemoved(executorId: String): Unit = synchronized { if (executorIds.contains(executorId)) { executorIds.remove(executorId) removeTimes.remove(executorId) logInfo(s"Existing executor $executorId has been removed (new total is ${executorIds.size})") if (executorsPendingToRemove.contains(executorId)) { executorsPendingToRemove.remove(executorId) logDebug(s"Executor $executorId is no longer pending to " + s"be removed (${executorsPendingToRemove.size} left)") } } else { logWarning(s"Unknown executor $executorId has been removed!") } } /** * Callback invoked when the scheduler receives new pending tasks. * This sets a time in the future that decides when executors should be added * if it is not already set. */ private def onSchedulerBacklogged(): Unit = synchronized { if (addTime == NOT_SET) { logDebug(s"Starting timer to add executors because pending tasks " + s"are building up (to expire in $schedulerBacklogTimeoutS seconds)") addTime = clock.getTimeMillis + schedulerBacklogTimeoutS * 1000 } } /** * Callback invoked when the scheduler queue is drained. * This resets all variables used for adding executors. */ private def onSchedulerQueueEmpty(): Unit = synchronized { logDebug("Clearing timer to add executors because there are no more pending tasks") addTime = NOT_SET numExecutorsToAdd = 1 } /** * Callback invoked when the specified executor is no longer running any tasks. * This sets a time in the future that decides when this executor should be removed if * the executor is not already marked as idle. */ private def onExecutorIdle(executorId: String): Unit = synchronized { if (executorIds.contains(executorId)) { if (!removeTimes.contains(executorId) && !executorsPendingToRemove.contains(executorId)) { // Note that it is not necessary to query the executors since all the cached // blocks we are concerned with are reported to the driver. Note that this // does not include broadcast blocks. val hasCachedBlocks = blockManagerMaster.hasCachedBlocks(executorId) val now = clock.getTimeMillis() val timeout = { if (hasCachedBlocks) { // Use a different timeout if the executor has cached blocks. now + cachedExecutorIdleTimeoutS * 1000 } else { now + executorIdleTimeoutS * 1000 } } val realTimeout = if (timeout <= 0) Long.MaxValue else timeout // overflow removeTimes(executorId) = realTimeout logDebug(s"Starting idle timer for $executorId because there are no more tasks " + s"scheduled to run on the executor (to expire in ${(realTimeout - now)/1000} seconds)") } } else { logWarning(s"Attempted to mark unknown executor $executorId idle") } } /** * Callback invoked when the specified executor is now running a task. * This resets all variables used for removing this executor. */ private def onExecutorBusy(executorId: String): Unit = synchronized { logDebug(s"Clearing idle timer for $executorId because it is now running a task") removeTimes.remove(executorId) } /** * A listener that notifies the given allocation manager of when to add and remove executors. * * This class is intentionally conservative in its assumptions about the relative ordering * and consistency of events returned by the listener. */ private[spark] class ExecutorAllocationListener extends SparkListener { private val stageIdToNumTasks = new mutable.HashMap[Int, Int] // Number of running tasks per stage including speculative tasks. // Should be 0 when no stages are active. private val stageIdToNumRunningTask = new mutable.HashMap[Int, Int] private val stageIdToTaskIndices = new mutable.HashMap[Int, mutable.HashSet[Int]] private val executorIdToTaskIds = new mutable.HashMap[String, mutable.HashSet[Long]] // Number of speculative tasks to be scheduled in each stage private val stageIdToNumSpeculativeTasks = new mutable.HashMap[Int, Int] // The speculative tasks started in each stage private val stageIdToSpeculativeTaskIndices = new mutable.HashMap[Int, mutable.HashSet[Int]] // stageId to tuple (the number of task with locality preferences, a map where each pair is a // node and the number of tasks that would like to be scheduled on that node) map, // maintain the executor placement hints for each stage Id used by resource framework to better // place the executors. private val stageIdToExecutorPlacementHints = new mutable.HashMap[Int, (Int, Map[String, Int])] override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit = { initializing = false val stageId = stageSubmitted.stageInfo.stageId val numTasks = stageSubmitted.stageInfo.numTasks allocationManager.synchronized { stageIdToNumTasks(stageId) = numTasks stageIdToNumRunningTask(stageId) = 0 allocationManager.onSchedulerBacklogged() // Compute the number of tasks requested by the stage on each host var numTasksPending = 0 val hostToLocalTaskCountPerStage = new mutable.HashMap[String, Int]() stageSubmitted.stageInfo.taskLocalityPreferences.foreach { locality => if (!locality.isEmpty) { numTasksPending += 1 locality.foreach { location => val count = hostToLocalTaskCountPerStage.getOrElse(location.host, 0) + 1 hostToLocalTaskCountPerStage(location.host) = count } } } stageIdToExecutorPlacementHints.put(stageId, (numTasksPending, hostToLocalTaskCountPerStage.toMap)) // Update the executor placement hints updateExecutorPlacementHints() } } override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = { val stageId = stageCompleted.stageInfo.stageId allocationManager.synchronized { stageIdToNumTasks -= stageId stageIdToNumRunningTask -= stageId stageIdToNumSpeculativeTasks -= stageId stageIdToTaskIndices -= stageId stageIdToSpeculativeTaskIndices -= stageId stageIdToExecutorPlacementHints -= stageId // Update the executor placement hints updateExecutorPlacementHints() // If this is the last stage with pending tasks, mark the scheduler queue as empty // This is needed in case the stage is aborted for any reason if (stageIdToNumTasks.isEmpty && stageIdToNumSpeculativeTasks.isEmpty) { allocationManager.onSchedulerQueueEmpty() } } } override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { val stageId = taskStart.stageId val taskId = taskStart.taskInfo.taskId val taskIndex = taskStart.taskInfo.index val executorId = taskStart.taskInfo.executorId allocationManager.synchronized { if (stageIdToNumRunningTask.contains(stageId)) { stageIdToNumRunningTask(stageId) += 1 } // This guards against the race condition in which the `SparkListenerTaskStart` // event is posted before the `SparkListenerBlockManagerAdded` event, which is // possible because these events are posted in different threads. (see SPARK-4951) if (!allocationManager.executorIds.contains(executorId)) { allocationManager.onExecutorAdded(executorId) } // If this is the last pending task, mark the scheduler queue as empty if (taskStart.taskInfo.speculative) { stageIdToSpeculativeTaskIndices.getOrElseUpdate(stageId, new mutable.HashSet[Int]) += taskIndex } else { stageIdToTaskIndices.getOrElseUpdate(stageId, new mutable.HashSet[Int]) += taskIndex } if (totalPendingTasks() == 0) { allocationManager.onSchedulerQueueEmpty() } // Mark the executor on which this task is scheduled as busy executorIdToTaskIds.getOrElseUpdate(executorId, new mutable.HashSet[Long]) += taskId allocationManager.onExecutorBusy(executorId) } } override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = { val executorId = taskEnd.taskInfo.executorId val taskId = taskEnd.taskInfo.taskId val taskIndex = taskEnd.taskInfo.index val stageId = taskEnd.stageId allocationManager.synchronized { if (stageIdToNumRunningTask.contains(stageId)) { stageIdToNumRunningTask(stageId) -= 1 } // If the executor is no longer running any scheduled tasks, mark it as idle if (executorIdToTaskIds.contains(executorId)) { executorIdToTaskIds(executorId) -= taskId if (executorIdToTaskIds(executorId).isEmpty) { executorIdToTaskIds -= executorId allocationManager.onExecutorIdle(executorId) } } // If the task failed, we expect it to be resubmitted later. To ensure we have // enough resources to run the resubmitted task, we need to mark the scheduler // as backlogged again if it's not already marked as such (SPARK-8366) if (taskEnd.reason != Success) { if (totalPendingTasks() == 0) { allocationManager.onSchedulerBacklogged() } if (taskEnd.taskInfo.speculative) { stageIdToSpeculativeTaskIndices.get(stageId).foreach {_.remove(taskIndex)} } else { stageIdToTaskIndices.get(stageId).foreach {_.remove(taskIndex)} } } } } override def onExecutorAdded(executorAdded: SparkListenerExecutorAdded): Unit = { val executorId = executorAdded.executorId if (executorId != SparkContext.DRIVER_IDENTIFIER) { // This guards against the race condition in which the `SparkListenerTaskStart` // event is posted before the `SparkListenerBlockManagerAdded` event, which is // possible because these events are posted in different threads. (see SPARK-4951) if (!allocationManager.executorIds.contains(executorId)) { allocationManager.onExecutorAdded(executorId) } } } override def onExecutorRemoved(executorRemoved: SparkListenerExecutorRemoved): Unit = { allocationManager.onExecutorRemoved(executorRemoved.executorId) } override def onSpeculativeTaskSubmitted(speculativeTask: SparkListenerSpeculativeTaskSubmitted) : Unit = { val stageId = speculativeTask.stageId allocationManager.synchronized { stageIdToNumSpeculativeTasks(stageId) = stageIdToNumSpeculativeTasks.getOrElse(stageId, 0) + 1 allocationManager.onSchedulerBacklogged() } } /** * An estimate of the total number of pending tasks remaining for currently running stages. Does * not account for tasks which may have failed and been resubmitted. * * Note: This is not thread-safe without the caller owning the `allocationManager` lock. */ def pendingTasks(): Int = { stageIdToNumTasks.map { case (stageId, numTasks) => numTasks - stageIdToTaskIndices.get(stageId).map(_.size).getOrElse(0) }.sum } def pendingSpeculativeTasks(): Int = { stageIdToNumSpeculativeTasks.map { case (stageId, numTasks) => numTasks - stageIdToSpeculativeTaskIndices.get(stageId).map(_.size).getOrElse(0) }.sum } def totalPendingTasks(): Int = { pendingTasks + pendingSpeculativeTasks } /** * The number of tasks currently running across all stages. */ def totalRunningTasks(): Int = { stageIdToNumRunningTask.values.sum } /** * Return true if an executor is not currently running a task, and false otherwise. * * Note: This is not thread-safe without the caller owning the `allocationManager` lock. */ def isExecutorIdle(executorId: String): Boolean = { !executorIdToTaskIds.contains(executorId) } /** * Update the Executor placement hints (the number of tasks with locality preferences, * a map where each pair is a node and the number of tasks that would like to be scheduled * on that node). * * These hints are updated when stages arrive and complete, so are not up-to-date at task * granularity within stages. */ def updateExecutorPlacementHints(): Unit = { var localityAwareTasks = 0 val localityToCount = new mutable.HashMap[String, Int]() stageIdToExecutorPlacementHints.values.foreach { case (numTasksPending, localities) => localityAwareTasks += numTasksPending localities.foreach { case (hostname, count) => val updatedCount = localityToCount.getOrElse(hostname, 0) + count localityToCount(hostname) = updatedCount } } allocationManager.localityAwareTasks = localityAwareTasks allocationManager.hostToLocalTaskCount = localityToCount.toMap } } /** * Metric source for ExecutorAllocationManager to expose its internal executor allocation * status to MetricsSystem. * Note: These metrics heavily rely on the internal implementation of * ExecutorAllocationManager, metrics or value of metrics will be changed when internal * implementation is changed, so these metrics are not stable across Spark version. */ private[spark] class ExecutorAllocationManagerSource extends Source { val sourceName = "ExecutorAllocationManager" val metricRegistry = new MetricRegistry() private def registerGauge[T](name: String, value: => T, defaultValue: T): Unit = { metricRegistry.register(MetricRegistry.name("executors", name), new Gauge[T] { override def getValue: T = synchronized { Option(value).getOrElse(defaultValue) } }) } registerGauge("numberExecutorsToAdd", numExecutorsToAdd, 0) registerGauge("numberExecutorsPendingToRemove", executorsPendingToRemove.size, 0) registerGauge("numberAllExecutors", executorIds.size, 0) registerGauge("numberTargetExecutors", numExecutorsTarget, 0) registerGauge("numberMaxNeededExecutors", maxNumExecutorsNeeded(), 0) } } private object ExecutorAllocationManager { val NOT_SET = Long.MaxValue val TESTING_SCHEDULE_INTERVAL_KEY = "spark.testing.dynamicAllocation.scheduleInterval" }
michalsenkyr/spark
core/src/main/scala/org/apache/spark/ExecutorAllocationManager.scala
Scala
apache-2.0
38,653
package com.outr.arango.api.model import io.circe.Json case class DeleteAPIAqlfunctionRc200(error: Boolean, code: Option[Long] = None, deletedCount: Option[Long] = None)
outr/arangodb-scala
api/src/main/scala/com/outr/arango/api/model/DeleteAPIAqlfunctionRc200.scala
Scala
mit
246
package org.clulab.twitter4food import org.clulab.twitter4food.util._ object TestDownloadAccounts { def main(args: Array[String]): Unit = { val keyset = args(0).toInt val numWindows = args(1).toInt val (api, config) = Utils.init(keyset) val hlMap = Utils.loadHandles(s"${config.getString("classifier")}/overweight/ow_accounts.txt") val (subH, subL) = Utils.splitHandles(keyset, numWindows, hlMap) val accounts = Utils.fetchAccounts(api, subH, true, true, true) FileUtils.saveToFile(accounts, subL, config.getString("classifiers.overweight.opt")+keyset+".txt") } }
clulab/twitter4food
src/test/scala/org/clulab/twitter4food/TestDownloadAccounts.scala
Scala
apache-2.0
605
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.catalog import org.apache.flink.table.api.{DatabaseAlreadyExistException, DatabaseNotExistException, TableAlreadyExistException, TableNotExistException} import java.util.{List => JList} import scala.collection.mutable.HashMap import scala.collection.JavaConverters._ /** * This class is an in-memory implementation of [[ExternalCatalog]]. * * It could be used for testing or developing instead of used in production environment. */ class InMemoryExternalCatalog extends CrudExternalCatalog { private val databases = new HashMap[String, Database] @throws[DatabaseNotExistException] @throws[TableAlreadyExistException] override def createTable( table: ExternalCatalogTable, ignoreIfExists: Boolean): Unit = synchronized { val dbName = table.identifier.database val tables = getTables(dbName) val tableName = table.identifier.table if (tables.contains(tableName)) { if (!ignoreIfExists) { throw new TableAlreadyExistException(dbName, tableName) } } else { tables.put(tableName, table) } } @throws[DatabaseNotExistException] @throws[TableNotExistException] override def dropTable( dbName: String, tableName: String, ignoreIfNotExists: Boolean): Unit = synchronized { val tables = getTables(dbName) if (tables.remove(tableName).isEmpty && !ignoreIfNotExists) { throw new TableNotExistException(dbName, tableName) } } @throws[DatabaseNotExistException] @throws[TableNotExistException] override def alterTable( table: ExternalCatalogTable, ignoreIfNotExists: Boolean): Unit = synchronized { val dbName = table.identifier.database val tables = getTables(dbName) val tableName = table.identifier.table if (tables.contains(tableName)) { tables.put(tableName, table) } else if (!ignoreIfNotExists) { throw new TableNotExistException(dbName, tableName) } } @throws[DatabaseNotExistException] override def listTables(dbName: String): JList[String] = synchronized { val tables = getTables(dbName) tables.keys.toList.asJava } @throws[DatabaseNotExistException] @throws[TableNotExistException] override def getTable(dbName: String, tableName: String): ExternalCatalogTable = synchronized { val tables = getTables(dbName) tables.get(tableName) match { case Some(table) => table case None => throw new TableNotExistException(dbName, tableName) } } @throws[DatabaseAlreadyExistException] override def createDatabase( db: ExternalCatalogDatabase, ignoreIfExists: Boolean): Unit = synchronized { val dbName = db.dbName if (databases.contains(dbName)) { if (!ignoreIfExists) { throw new DatabaseAlreadyExistException(dbName) } } else { databases.put(dbName, new Database(db)) } } @throws[DatabaseNotExistException] override def alterDatabase( db: ExternalCatalogDatabase, ignoreIfNotExists: Boolean): Unit = synchronized { val dbName = db.dbName databases.get(dbName) match { case Some(database) => database.db = db case None => if (!ignoreIfNotExists) { throw new DatabaseNotExistException(dbName) } } } @throws[DatabaseNotExistException] override def dropDatabase( dbName: String, ignoreIfNotExists: Boolean): Unit = synchronized { if (databases.remove(dbName).isEmpty && !ignoreIfNotExists) { throw new DatabaseNotExistException(dbName) } } override def listDatabases(): JList[String] = synchronized { databases.keys.toList.asJava } @throws[DatabaseNotExistException] override def getDatabase(dbName: String): ExternalCatalogDatabase = synchronized { databases.get(dbName) match { case Some(database) => database.db case None => throw new DatabaseNotExistException(dbName) } } private def getTables(db: String): HashMap[String, ExternalCatalogTable] = databases.get(db) match { case Some(database) => database.tables case None => throw new DatabaseNotExistException(db) } private class Database(var db: ExternalCatalogDatabase) { val tables = new HashMap[String, ExternalCatalogTable] } }
DieBauer/flink
flink-libraries/flink-table/src/main/scala/org/apache/flink/table/catalog/InMemoryExternalCatalog.scala
Scala
apache-2.0
5,075
package scintuit.contrib.play.data.api import play.api.libs.json._ import scintuit.data.api.institution._ import scintuit.contrib.play.data.raw object institution { object InstitutionFormats extends InstitutionFormats trait InstitutionFormats { import raw.institution.{InstitutionFormats => RawInstitutionFormats} implicit val addressFormat: Format[Address] = RawInstitutionFormats.addressFormat implicit val keyFormat: Format[Key] = xmap(RawInstitutionFormats.keyFormat)(Key.apply, _.raw) implicit val institutionSummaryFormat: Format[InstitutionSummary] = xmap(RawInstitutionFormats.institutionFormat)(InstitutionSummary.apply, _.raw) implicit val institutionFormat: Format[Institution] = xmap(RawInstitutionFormats.institutionDetailsFormat)(Institution.apply, _.raw) private def xmap[A, B](format: Format[A])(fab: A => B, fba: B => A): Format[B] = Format(format map fab, Writes(b => format.writes(fba(b)))) } }
drbild/scintuit
contrib/play-json/src/main/scala/scintuit/contrib/play/data/api/institution.scala
Scala
apache-2.0
980
package com.github.wakfudecrypt.types.data import com.github.wakfudecrypt._ @BinaryDecoder case class HavenBagModelView( id: Int, restrictionWorld: Boolean, restrictionMarket: Boolean, backgroundMapId: Int, innate: Boolean, _5_float32: Float, _6_float32: Float, _7_float32: Float ) object HavenBagModelView extends BinaryDataCompanion[HavenBagModelView] { override val dataId = 33 }
jac3km4/wakfudecrypt
types/src/main/scala/com/github/wakfudecrypt/types/data/HavenBagModelView.scala
Scala
mit
404
package com.github.ldaniels528.trifecta.util import scala.util.{Failure, Success, Try} /** * Parsing Helper * @author [email protected] */ object ParsingHelper { def deQuote(quotedString: String): String = { quotedString match { case s if s.startsWith("`") && s.endsWith("`") => s.drop(1).dropRight(1) case s if s.startsWith("\\"") && s.endsWith("\\"") => s.drop(1).dropRight(1) case s if s.startsWith("'") && s.endsWith("'") => s.drop(1).dropRight(1) case s if s.contains(",") && s.replaceAll(",", "").matches("\\\\d+") => s.replaceAll(",", "") case s => s } } /** * Indicates whether the given string is hexadecimal dot-notation * @param value the given string value * @return true, if the string is hexadecimal dot-notation (e.g. "de.ad.be.ef.ca.fe.ba.be") */ def isDottedHex(value: String): Boolean = { value.split("[.]").forall(_.matches( """[0-9a-fA-F]{2}""")) } /** * Parses the given input value and return its boolean equivalent * @param label the label of the value being parsed * @param value the given input value * @return the boolean equivalent value */ def parseBoolean(label: String, value: String): Boolean = { value.toLowerCase match { case "true" | "yes" | "t" | "y" => true case "false" | "no" | "f" | "n" => false case invalid => throw new IllegalArgumentException(s"$label: Expected a boolean value (true/false, yes/no, y/n, t/f) found '$invalid'") } } /** * Parses the given delta value (e.g. "+5") * @param label the label of the value being parsed * @param value the given input value * @return the equivalent integer value */ def parseDelta(label: String, value: String): Int = { value.head match { case '+' => parseInt(label, value.tail) case _ => parseInt(label, value) } } /** * Converts a binary string to a byte array * @param dottedHex the given binary string (e.g. "de.ad.be.ef.00") * @return a byte array */ def parseDottedHex(dottedHex: String): Array[Byte] = dottedHex.split("[.]") map (Integer.parseInt(_, 16)) map (_.toByte) /** * Parses the given integer value * @param label the label of the value being parsed * @param value the given input value * @return the equivalent integer value */ def parseDouble(label: String, value: String): Double = { Try(value.toDouble) match { case Success(v) => v case Failure(e) => throw new IllegalArgumentException(s"$label: Expected a decimal value, found '$value'") } } /** * Parses the given double value * @param label the label of the value being parsed * @param value the given input value * @return the equivalent double value */ def parseInt(label: String, value: String): Int = { Try(value.toInt) match { case Success(v) => v case Failure(e) => throw new IllegalArgumentException(s"$label: Expected an integer value, found '$value'") } } /** * Parses the given long integer value * @param label the label of the value being parsed * @param value the given input value * @return the equivalent long integer value */ def parseLong(label: String, value: String): Long = { Try(value.toLong) match { case Success(v) => v case Failure(e) => throw new IllegalArgumentException(s"$label: Expected a long integer value, found '$value'") } } /** * Parses the given partition string into an integer value * @param partition the given partition string value * @return the equivalent integer value */ def parsePartition(partition: String): Int = parseInt("partition", partition) /** * Parses the given port string into an integer value * @param port the given port string value * @return the equivalent integer value */ def parsePort(port: String): Int = parseInt("port", port) /** * Parses the given offset string into a long integer value * @param offset the given offset string value * @return the equivalent long integer value */ def parseOffset(offset: String): Long = parseLong("offset", offset) }
ldaniels528/trifecta
src/main/scala/com/github/ldaniels528/trifecta/util/ParsingHelper.scala
Scala
apache-2.0
4,177
package org.akoshterek.backgammon.agent.fa import org.encog.neural.networks.BasicNetwork abstract class AbsNeuralNetworkFA(val network: BasicNetwork) extends NeuralNetworkFA { }
akoshterek/MultiGammonJava
multi-gammon-core/src/main/java/org/akoshterek/backgammon/agent/fa/AbsNeuralNetworkFA.scala
Scala
gpl-3.0
183
package pdi.jwt import scala.util.matching.Regex import java.time.Clock /** Test implementation of [[JwtCore]] using only Strings. Most of the time, you should use a lib * implementing JSON and shouldn't be using this object. But just in case you need pure Scala support, * here it is. * * To see a full list of samples, check the [[https://jwt-scala.github.io/jwt-scala/jwt-core-jwt.html online documentation]]. * * '''Warning''': since there is no JSON support in Scala, this object doesn't have any way to parse * a JSON string as an AST, so it only uses regex with all the limitations it implies. Try not to use * keys like `exp` and `nbf` in sub-objects of the claim. For example, if you try to use the following * claim: `{"user":{"exp":1},"exp":1300819380}`, it should be correct but it will fail because the regex * extracting the expiration will return `1` instead of `1300819380`. Sorry about that. */ object Jwt extends JwtCore[JwtHeader, JwtClaim] { def apply(clock: Clock): Jwt = new Jwt(clock) private val extractAlgorithmRegex = "\"alg\" *: *\"([a-zA-Z0-9]+)\"".r protected def extractAlgorithm(header: String): Option[JwtAlgorithm] = (extractAlgorithmRegex findFirstMatchIn header).map(_.group(1)).flatMap { case "none" => None case name: String => Some(JwtAlgorithm.fromString(name)) } private val extractIssuerRegex = "\"iss\" *: *\"([a-zA-Z0-9]*)\"".r protected def extractIssuer(claim: String): Option[String] = (extractIssuerRegex findFirstMatchIn claim).map(_.group(1)) private val extractSubjectRegex = "\"sub\" *: *\"([a-zA-Z0-9]*)\"".r protected def extractSubject(claim: String): Option[String] = (extractSubjectRegex findFirstMatchIn claim).map(_.group(1)) private val extractExpirationRegex = "\"exp\" *: *([0-9]+)".r protected def extractExpiration(claim: String): Option[Long] = (extractExpirationRegex findFirstMatchIn claim).map(_.group(1)).map(_.toLong) private val extractNotBeforeRegex = "\"nbf\" *: *([0-9]+)".r protected def extractNotBefore(claim: String): Option[Long] = (extractNotBeforeRegex findFirstMatchIn claim).map(_.group(1)).map(_.toLong) private val extractIssuedAtRegex = "\"iat\" *: *([0-9]+)".r protected def extractIssuedAt(claim: String): Option[Long] = (extractIssuedAtRegex findFirstMatchIn claim).map(_.group(1)).map(_.toLong) private val extractJwtIdRegex = "\"jti\" *: *\"([a-zA-Z0-9]*)\"".r protected def extractJwtId(claim: String): Option[String] = (extractJwtIdRegex findFirstMatchIn claim).map(_.group(1)) private val clearStartRegex = "\\{ *,".r protected def clearStart(json: String): String = clearStartRegex.replaceFirstIn(json, "{") private val clearMiddleRegex = ", *(?=,)".r protected def clearMiddle(json: String): String = clearMiddleRegex.replaceAllIn(json, "") private val clearEndRegex = ", *\\}".r protected def clearEnd(json: String): String = clearEndRegex.replaceFirstIn(json, "}") protected def clearRegex(json: String, regex: Regex): String = regex.replaceFirstIn(json, "") protected def clearAll(json: String): String = { val dirtyJson = List( extractIssuerRegex, extractSubjectRegex, extractExpirationRegex, extractNotBeforeRegex, extractIssuedAtRegex, extractJwtIdRegex ).foldLeft(json)(clearRegex) clearStart(clearEnd(clearMiddle(dirtyJson))) } protected def parseHeader(header: String): JwtHeader = JwtHeader(extractAlgorithm(header)) protected def parseClaim(claim: String): JwtClaim = JwtClaim( content = clearAll(claim), issuer = extractIssuer(claim), subject = extractSubject(claim), expiration = extractExpiration(claim), notBefore = extractNotBefore(claim), issuedAt = extractIssuedAt(claim), jwtId = extractJwtId(claim) ) protected def headerToJson(header: JwtHeader): String = header.toJson protected def claimToJson(claim: JwtClaim): String = claim.toJson protected def extractAlgorithm(header: JwtHeader): Option[JwtAlgorithm] = header.algorithm protected def extractExpiration(claim: JwtClaim): Option[Long] = claim.expiration protected def extractNotBefore(claim: JwtClaim): Option[Long] = claim.notBefore } class Jwt private (override val clock: Clock) extends JwtCore[JwtHeader, JwtClaim] { protected def parseHeader(header: String): JwtHeader = Jwt.parseHeader(header) protected def parseClaim(claim: String): JwtClaim = Jwt.parseClaim(claim) protected def extractAlgorithm(header: JwtHeader): Option[JwtAlgorithm] = header.algorithm protected def extractExpiration(claim: JwtClaim): Option[Long] = claim.expiration protected def extractNotBefore(claim: JwtClaim): Option[Long] = claim.notBefore protected def headerToJson(header: JwtHeader): String = header.toJson protected def claimToJson(claim: JwtClaim): String = claim.toJson }
pauldijou/jwt-scala
core/src/main/scala/Jwt.scala
Scala
apache-2.0
4,920
package ionroller.deployment.eb import java.io.{ByteArrayInputStream, ByteArrayOutputStream} import java.util.zip.{ZipEntry, ZipOutputStream} import com.amazonaws.services.elasticbeanstalk.AWSElasticBeanstalk import com.amazonaws.services.elasticbeanstalk.model._ import com.amazonaws.services.s3.AmazonS3 import com.amazonaws.services.s3.model.{ObjectMetadata, PutObjectResult} import com.amazonaws.util.IOUtils import com.typesafe.scalalogging.LazyLogging import ionroller.aws.{AWSClientCache, ElasticBeanstalk} import ionroller.deployment.eb.v1.{DockerrunAWSJson, Dockerfile} import ionroller.{EBSSetup, ExternalElbSettings, TimelineName} import play.api.libs.json.{JsObject, Json} import scala.collection.JavaConverters._ import scala.io.Source import scalaz.Kleisli import scalaz.concurrent.Task import scalaz.std.option._ final case class DeploymentResult(applicationVersion: String, environment: CreateEnvironmentResult) class EBSDeployer(timeline: TimelineName, domain: String, externalElb: Option[ExternalElbSettings], deploymentConfig: EBSSetup) extends LazyLogging { val deploy: Kleisli[Task, AWSClientCache, DeploymentResult] = { Kleisli { cache => logger.debug(s"Deploying ${timeline.name} ${deploymentConfig.dockerImage}") for { config <- EBSDeploymentConfiguration(timeline, domain, externalElb, deploymentConfig).run(cache.s3) sourceBundle <- createSourceBundle(config).run(cache.s3) ver <- createApplicationVersion(config).run(cache) env <- createEnvironment(config).run(cache.elasticBeanstalk) } yield DeploymentResult(config.application.getApplicationName, env) } } def maybeCreateApplication(app: CreateApplicationVersionRequest, curAppVersion: Option[ApplicationVersionDescription]): Kleisli[Task, AWSClientCache, ApplicationVersionDescription] = { curAppVersion match { case None => ElasticBeanstalk.createApplicationVersion(app) case Some(a) => Kleisli { _ => Task.now(a) } } } def createApplicationVersion(config: EBSDeploymentConfiguration): Kleisli[Task, AWSClientCache, ApplicationVersionDescription] = { val app = config.application for { appVersionDescriptions <- ElasticBeanstalk.describeApplicationVersions(app.getApplicationName, app.getVersionLabel).mapK(_.attempt.map(_.fold(t => none, d => d.headOption))) newAppVersionDescription <- maybeCreateApplication(app, appVersionDescriptions) } yield newAppVersionDescription } def createEnvironment(config: EBSDeploymentConfiguration): Kleisli[Task, AWSElasticBeanstalk, CreateEnvironmentResult] = { Kleisli { client => Task.delay(client.createEnvironment(config.environment)) } } def createSourceBundle(config: EBSDeploymentConfiguration): Kleisli[Task, AmazonS3, PutObjectResult] = { Kleisli { client => val key = config.application.getSourceBundle.getS3Key val bucket = config.application.getSourceBundle.getS3Bucket def setupMetadata(bos: ByteArrayOutputStream): Task[ObjectMetadata] = { Task.delay { val metadata = new ObjectMetadata metadata.setContentType("application/zip") // we need to call new ByteArrayInputStream again, as checking the length reads the stream val contentBytes = IOUtils.toByteArray(new ByteArrayInputStream(bos.toByteArray)).length.toLong metadata.setContentLength(contentBytes) metadata } } def createZip(bos: ByteArrayOutputStream, dockerfile: Dockerfile, dockerrun: DockerrunAWSJson, customEnvSettings: Map[String, JsObject]) = { val zos = new ZipOutputStream(bos) Task.delay { zos.putNextEntry(new ZipEntry(dockerfile.getName)) zos.write(dockerfile.toString.getBytes) zos.closeEntry zos.putNextEntry(new ZipEntry(dockerrun.getName)) zos.write(Json.toJson(dockerrun).toString.getBytes) zos.closeEntry zos.putNextEntry(new ZipEntry(".ebextensions/01custom.config")) zos.write(play.api.libs.json.Json.toJson(customEnvSettings).toString().getBytes) zos.closeEntry } onFinish { _ => Task.delay(zos.close()) } } val bos = new ByteArrayOutputStream() for { _ <- createZip(bos, config.dockerfile, config.dockerrun, config.envCustomSettings) metadata <- setupMetadata(bos) inputStream = new ByteArrayInputStream(bos.toByteArray) } yield client.putObject(bucket, key, inputStream, metadata) } } def getSettings(config: EBSDeploymentConfiguration, env: CreateEnvironmentResult): Kleisli[Task, AWSClientCache, Seq[ConfigurationOptionSetting]] = { def getOptionSettings(configs: Seq[ConfigurationSettingsDescription]): Seq[ConfigurationOptionSetting] = { val optionSettings = for { desc <- configs.headOption.toSeq optionSetting <- desc.getOptionSettings.asScala } yield optionSetting optionSettings } val configurationSettings = ElasticBeanstalk.describeConfigurationSettings(config.application.getApplicationName, env.getEnvironmentName) val settings = configurationSettings.map(getOptionSettings) val configSetting = config.environment.getOptionSettings.asScala.toSeq settings.map(_.intersect(configSetting)) } } object EBSDeployer { def apply(timeline: TimelineName, domain: String, externalELB: Option[ExternalElbSettings], config: EBSSetup) = { new EBSDeployer(timeline, domain, externalELB, config) } }
browngeek666/ionroller
core/src/main/scala/ionroller/deployment/eb/EBSDeployer.scala
Scala
mit
5,512
package com.hindog.grid.repo class ResourceNotAcceptedException(val resource: Resource, val repository: Repository, reason: String) extends RuntimeException(s"Resource $resource was rejected by repository $repository (reason: $reason)") object ResourceNotAcceptedException { def readOnly(resource: Resource, repo: Repository): ResourceNotAcceptedException = new ResourceNotAcceptedException(resource, repo, s"Repository '$repo' is read-only") } class ResourceNotFoundException(val filename: String, val contentHash: String, val repository: Repository) extends RuntimeException(s"No resource matching filename '$filename' and content hash '$contentHash' was not found in repository $repository") class RepositoryException(val message: String, val cause: Throwable) extends RuntimeException(message, cause) { def this(message: String) = this(message, null) }
hindog/grid-executor
grid-executor/src/main/scala/com/hindog/grid/repo/ResourceNotAcceptedException.scala
Scala
apache-2.0
864
package cgta.oscala package extensions ////////////////////////////////////////////////////////////// // Copyright (c) 2014 Ben Jackman // All Rights Reserved // please contact [email protected] // for licensing inquiries // Created by bjackman @ 8/11/14 3:27 AM ////////////////////////////////////////////////////////////// class BooleanExtensions(val b: Boolean) extends AnyVal { @inline def ifElse[A](t: => A, f: => A): A = if (b) t else f @inline def trueToSome[A](t: => A): Option[A] = if (b) Some(t) else None @inline def falseToSome[A](f: => A): Option[A] = if (!b) Some(f) else None }
cgta/open
oscala/shared/src/main/scala/cgta/oscala/extensions/BooleanExtensions.scala
Scala
mit
601
package ca.innovativemedicine.vcf.solr import org.apache.solr.common.SolrInputDocument import org.apache.solr.client.solrj.SolrServer import org.apache.solr.client.solrj.impl.ConcurrentUpdateSolrServer import scala.collection.mutable /** * Provides instances of `SolrServer`s that can be used to query and update * Solr. */ trait SolrServerProvider { /** * Given a Solr `core`, this gives `f` a `SolrServer` for that core. If * `core` is `None`, then the default core is used. Note that the SolrServer * is only valid inside `f`. * * TODO: It may be wise to split this up into 2 functions: one for use with * updates and one for reads, so different `SolrServer`s can be used. * However, I did check out the source, and `StreamingUpdateSolrServer` does * just fall back to a normal `SolrServer` for anything other than updates, * so I doubt there will be any gain in such a split for the time being. * * @param core The `Solr` core to use. * @param f The callback function that will use a `SolrServer`. */ def withSolrServer[A](core: Option[String] = None)(f: SolrServer => A): A } /** * Uses a Typesafe Config to determine the variables needed to connect to a * Solr server. */ trait ConfiguredSolrServerProvider extends SolrServerProvider { self: Configured => // Obviously the most important, the URL of the solr instance. private lazy val url = if (config.hasPath("solr.url")) { val u = config.getString("solr.url") if (u.endsWith("/")) u else (u + "/") } else "http://localhost:8080/solr/" // This is the batch size of the updates for Solr. private lazy val queueSize = if (config.hasPath("solr.update.queueSize")) { config.getInt("solr.update.queueSize") } else 1000 // This is the number of threads that will handle requests to Solr. private lazy val threads = if (config.hasPath("solr.update.threads")) { config.getInt("solr.update.threads") } else 2 def withSolrServer[A](core: Option[String] = None)(f: SolrServer => A): A = { val coreUrl = core map (url + _) getOrElse url val solr = new ConcurrentUpdateSolrServer(coreUrl, queueSize, threads) val result = f(solr) solr.commit() solr.blockUntilFinished() result } }
innovativemedicine/vcfimp
vcfimp-solr/src/main/scala/ca/innovativemedicine/vcf/solr/SolrServerProvider.scala
Scala
bsd-2-clause
2,261
package examples1 case class VarArgsCaseClass(args: String *)
pharmpress/codingdojo
scala-case/src/main/scala/examples1/VarArgsCaseClass.scala
Scala
apache-2.0
63
package lila.video case class TagNb(_id: Tag, nb: Int) { def tag = _id def empty = nb == 0 def isNumeric = tag forall (_.isDigit) } case class Filter(tags: List[String]) { def toggle(tag: String) = copy( tags = if (tags contains tag) tags filter (tag !=) else tags :+ tag ) } case class UserControl( filter: Filter, tags: List[TagNb], query: Option[String], bot: Boolean ) { def toggleTag(tag: String) = copy( filter = filter toggle tag, query = none ) def queryString = List( filter.tags.nonEmpty option s"tags=${filter.tags.sorted mkString "/"}".replace(' ', '+'), query.map { q => s"q=$q" } ).flatten mkString "&" def queryStringUnlessBot = !bot ?? queryString }
luanlv/lila
modules/video/src/main/control.scala
Scala
mit
774
/** * Copyright 2016 www.alaraph.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ /* * This is an example showing how to use Type-Parametric Segment Trees * to compute the avg() function * [email protected] */ package com.alaraph.util /** * @author mauromorelli */ object STAvg { case class Value(avg: Float, len:Integer) { override def toString = "(avg:%1$.3f, len=%d)".format(avg, len) } def intToValue(i: Int) = Value(i, 1) def fun(v1: Value, v2: Value): Value = { val len = (v1.len + v2.len).toFloat Value((v1.len/len)*v1.avg+(v2.len/len)*v2.avg, v1.len + v2.len) } import scala.io.StdIn.readLine def readInts: Vector[Int] = readLine.split(' ').map(_.toInt).toVector def main(args: Array[String]): Unit = { val Vector(nelem, nqueries) = readInts val sTree = SegmentTree.build(readInts, fun, intToValue) val queries = for (i <- 1 to nqueries) yield readInts queries.foreach(x => println("%1.2f".format(sTree.eval(x(0), x(1)).avg))) } }
maumorelli/alaraph
util/test/com/alaraph/util/ATAvg.scala
Scala
apache-2.0
1,571
/* * Copyright 2017 helloscala.com * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package helloscala.common.data case class IdValue(id: Int, value: String)
helloscala/helloscala
hs-core/src/main/scala/helloscala/common/data/IdValue.scala
Scala
apache-2.0
674
/* sbt -- Simple Build Tool * Copyright 2009, 2010, 2011 Mark Harrah */ package xsbt.api import scala.util import xsbti.api._ import util.MurmurHash import TagTypeVariables.TypeVars import HashAPI.Hash object HashAPI { type Hash = Int def apply(a: SourceAPI): Hash = { /** de Bruijn levels for type parameters in source a and b*/ val tags = TagTypeVariables(a) (new HashAPI(tags, false, true)).hashAPI(a) } } final class HashAPI(tags: TypeVars, includePrivate: Boolean, includeParamNames: Boolean) { import scala.collection.mutable import MurmurHash._ private[this] val visitedStructures = visitedMap[Structure] private[this] val visitedClassLike = visitedMap[ClassLike] private[this] def visitedMap[T] = new mutable.HashMap[T, List[Hash]] private[this] def visit[T](map: mutable.Map[T, List[Hash]], t: T)(hashF: T => Unit) { map.put(t, hash :: map.getOrElse(t,Nil)) match { case Some(x :: _) => extend(x) case _ => hashF(t) for(hs <- map(t)) extend(hs) map.put(t, hash :: Nil) } } private[this] final val ValHash = 1 private[this] final val VarHash = 2 private[this] final val DefHash = 3 private[this] final val ClassHash = 4 private[this] final val TypeDeclHash = 5 private[this] final val TypeAliasHash = 6 private[this] final val PublicHash = 30 private[this] final val ProtectedHash = 31 private[this] final val PrivateHash = 32 private[this] final val UnqualifiedHash = 33 private[this] final val ThisQualifierHash = 34 private[this] final val IdQualifierHash = 35 private[this] final val IdPathHash = 20 private[this] final val SuperHash = 21 private[this] final val ThisPathHash = 22 private[this] final val ValueParamsHash = 40 private[this] final val ClassPendingHash = 41 private[this] final val StructurePendingHash = 42 private[this] final val EmptyTypeHash = 51 private[this] final val ParameterRefHash = 52 private[this] final val SingletonHash = 53 private[this] final val ProjectionHash = 54 private[this] final val ParameterizedHash = 55 private[this] final val AnnotatedHash = 56 private[this] final val PolymorphicHash = 57 private[this] final val ConstantHash = 58 private[this] final val ExistentialHash = 59 private[this] final val StructureHash = 60 private[this] final val TrueHash = 97 private[this] final val FalseHash = 98 private[this] var hash: Hash = startHash(0) private[this] var magicA: Hash = startMagicA private[this] var magicB: Hash = startMagicB @inline final def hashString(s: String): Unit = extend(stringHash(s)) @inline final def hashBoolean(b: Boolean): Unit = extend(if(b) TrueHash else FalseHash) @inline final def hashSeq[T](s: Seq[T], hashF: T => Unit) { extend(s.length) s foreach hashF } final def hashSymmetric[T](ts: TraversableOnce[T], hashF: T => Unit) { val current = hash val mA = magicA val mB = magicB val (hashes, mAs, mBs) = ts.toList.map { t => hash = startHash(1) magicA = startMagicA magicB = startMagicB hashF(t) (finalizeHash(hash), magicA, magicB) } unzip3; hash = current magicA = mA magicB = mB extend(symmetricHash(hashes, 0xb592f7ae)) // constant from MurmurHash3 } @inline final def extend(a: Hash) { hash = extendHash(hash, a, magicA, magicB) magicA = nextMagicA(magicA) magicB = nextMagicB(magicB) } def hashModifiers(m: Modifiers) = extend(m.raw) def hashAPI(s: SourceAPI): Hash = { hash = startHash(0) hashSymmetric(s.packages, hashPackage) hashDefinitions(s.definitions, true) finalizeHash(hash) } def hashPackage(p: Package) = hashString(p.name) def hashDefinitions(ds: Seq[Definition], topLevel: Boolean): Unit = { val defs = SameAPI.filterDefinitions(ds, topLevel, includePrivate) hashSymmetric(defs, hashDefinition) } def hashDefinition(d: Definition) { hashString(d.name) hashAnnotations(d.annotations) hashModifiers(d.modifiers) hashAccess(d.access) d match { case c: ClassLike => hashClass(c) case f: FieldLike => hashField(f) case d: Def => hashDef(d) case t: TypeDeclaration => hashTypeDeclaration(t) case t: TypeAlias => hashTypeAlias(t) } } final def hashClass(c: ClassLike): Unit = visit(visitedClassLike, c)(hashClass0) def hashClass0(c: ClassLike) { extend(ClassHash) hashParameterizedDefinition(c) hashType(c.selfType) hashStructure(c.structure) } def hashField(f: FieldLike) { f match { case v: Var => extend(VarHash) case v: Val => extend(ValHash) } hashType(f.tpe) } def hashDef(d: Def) { extend(DefHash) hashParameterizedDefinition(d) hashValueParameters(d.valueParameters) hashType(d.returnType) } def hashAccess(a: Access): Unit = a match { case pub: Public => extend(PublicHash) case qual: Qualified => hashQualified(qual) } def hashQualified(qual: Qualified): Unit = { qual match { case p: Protected => extend(ProtectedHash) case p: Private => extend(PrivateHash) } hashQualifier(qual.qualifier) } def hashQualifier(qual: Qualifier): Unit = qual match { case _: Unqualified => extend(UnqualifiedHash) case _: ThisQualifier => extend(ThisQualifierHash) case id: IdQualifier => extend(IdQualifierHash) hashString(id.value) } def hashValueParameters(valueParameters: Seq[ParameterList]) = hashSeq(valueParameters, hashValueParameterList) def hashValueParameterList(list: ParameterList) = { extend(ValueParamsHash) hashBoolean(list.isImplicit) hashSeq(list.parameters, hashValueParameter) } def hashValueParameter(parameter: MethodParameter) = { hashString(parameter.name) hashType(parameter.tpe) extend(parameter.modifier.ordinal) hashBoolean(parameter.hasDefault) } def hashParameterizedDefinition[T <: ParameterizedDefinition](d: T) { hashTypeParameters(d.typeParameters) } def hashTypeDeclaration(d: TypeDeclaration) { extend(TypeDeclHash) hashParameterizedDefinition(d) hashType(d.lowerBound) hashType(d.upperBound) } def hashTypeAlias(d: TypeAlias) { extend(TypeAliasHash) hashParameterizedDefinition(d) hashType(d.tpe) } def hashTypeParameters(parameters: Seq[TypeParameter]) = hashSeq(parameters, hashTypeParameter) def hashTypeParameter(parameter: TypeParameter) { extend(parameter.variance.ordinal) hashTypeParameters(parameter.typeParameters) hashType(parameter.lowerBound) hashType(parameter.upperBound) hashAnnotations(parameter.annotations) } def hashAnnotations(annotations: Seq[Annotation]) = hashSeq(annotations, hashAnnotation) def hashAnnotation(annotation: Annotation) = { hashType(annotation.base) hashAnnotationArguments(annotation.arguments) } def hashAnnotationArguments(args: Seq[AnnotationArgument]) = hashSeq(args, hashAnnotationArgument) def hashAnnotationArgument(arg: AnnotationArgument) { hashString(arg.name) hashString(arg.value) } def hashTypes(ts: Seq[Type]) = hashSeq(ts, hashType) def hashType(t: Type): Unit = t match { case s: Structure => hashStructure(s) case e: Existential => hashExistential(e) case c: Constant => hashConstant(c) case p: Polymorphic => hashPolymorphic(p) case a: Annotated => hashAnnotated(a) case p: Parameterized => hashParameterized(p) case p: Projection => hashProjection(p) case _: EmptyType => extend(EmptyTypeHash) case s: Singleton => hashSingleton(s) case pr: ParameterRef => hashParameterRef(pr) } def hashParameterRef(p: ParameterRef) { extend(ParameterRefHash) tags.get(p.id) match { case Some((a,b)) => extend(a); extend(b) case None => extend(-1) } } def hashSingleton(s: Singleton) { extend(SingletonHash) hashPath(s.path) } def hashPath(path: Path) = hashSeq(path.components, hashPathComponent) def hashPathComponent(pc: PathComponent) = pc match { case _: This => extend(ThisPathHash) case s: Super => hashSuperPath(s) case id: Id => hashIdPath(id) } def hashSuperPath(s: Super) { extend(SuperHash) hashPath(s.qualifier) } def hashIdPath(id: Id) { extend(IdPathHash) hashString(id.id) } def hashConstant(c: Constant) = { extend(ConstantHash) hashString(c.value) hashType(c.baseType) } def hashExistential(e: Existential) = { extend(ExistentialHash) hashParameters(e.clause, e.baseType) } def hashPolymorphic(p: Polymorphic) = { extend(PolymorphicHash) hashParameters(p.parameters, p.baseType) } def hashProjection(p: Projection) = { extend(ProjectionHash) hashString(p.id) hashType(p.prefix) } def hashParameterized(p: Parameterized) { extend(ParameterizedHash) hashType(p.baseType) hashTypes(p.typeArguments) } def hashAnnotated(a: Annotated) { extend(AnnotatedHash) hashType(a.baseType) hashAnnotations(a.annotations) } final def hashStructure(structure: Structure) = visit(visitedStructures, structure)(hashStructure0) def hashStructure0(structure: Structure) { extend(StructureHash) hashTypes(structure.parents) hashDefinitions(structure.declared, false) hashDefinitions(structure.inherited, false) } def hashParameters(parameters: Seq[TypeParameter], base: Type): Unit = { hashTypeParameters(parameters) hashType(base) } }
kuochaoyi/xsbt
compile/api/HashAPI.scala
Scala
bsd-3-clause
9,144
package wom.types import spray.json.{JsNumber, JsString} import wom.values.{WomFloat, WomString} import scala.util.{Success, Try} case object WomFloatType extends WomPrimitiveType { val toDisplayString: String = "Float" override protected def coercion = { case f: Float => WomFloat(f.toDouble) case d: Double => WomFloat(d) case n: JsNumber => WomFloat(n.value.doubleValue()) case f: WomFloat => f case s: String => WomFloat(s.toDouble) case s: JsString => WomFloat(s.value.toDouble) case s: WomString => WomFloat(s.value.toDouble) } private def binaryOperator(rhs: WomType, symbol: String): Try[WomType] = rhs match { case WomIntegerType => Success(WomFloatType) case WomFloatType => Success(WomFloatType) case WomOptionalType(memberType) => binaryOperator(memberType, symbol) case _ => invalid(s"$this $symbol $rhs") } private def comparisonOperator(rhs: WomType, symbol: String): Try[WomType] = rhs match { case WomIntegerType => Success(WomBooleanType) case WomFloatType => Success(WomBooleanType) case WomOptionalType(memberType) => comparisonOperator(memberType, symbol) case _ => invalid(s"$this $symbol $rhs") } override def add(rhs: WomType): Try[WomType] = rhs match { case WomStringType => Success(WomStringType) case WomOptionalType(memberType) => add(memberType) case t => binaryOperator(t, "+") } override def subtract(rhs: WomType): Try[WomType] = binaryOperator(rhs, "-") override def multiply(rhs: WomType): Try[WomType] = binaryOperator(rhs, "*") override def divide(rhs: WomType): Try[WomType] = binaryOperator(rhs, "/") override def mod(rhs: WomType): Try[WomType] = binaryOperator(rhs, "%") override def equals(rhs: WomType): Try[WomType] = comparisonOperator(rhs, "==") override def lessThan(rhs: WomType): Try[WomType] = comparisonOperator(rhs, "<") override def greaterThan(rhs: WomType): Try[WomType] = comparisonOperator(rhs, ">") override def unaryPlus: Try[WomType] = Success(WomFloatType) override def unaryMinus: Try[WomType] = Success(WomFloatType) }
ohsu-comp-bio/cromwell
wom/src/main/scala/wom/types/WomFloatType.scala
Scala
bsd-3-clause
2,098
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.storage import java.nio.ByteBuffer import java.util.concurrent.LinkedBlockingQueue import scala.collection.mutable.ArrayBuffer import scala.collection.mutable.HashSet import scala.collection.mutable.Queue import io.netty.buffer.ByteBuf import org.apache.spark.Logging import org.apache.spark.SparkException import org.apache.spark.network.BufferMessage import org.apache.spark.network.ConnectionManagerId import org.apache.spark.network.netty.ShuffleCopier import org.apache.spark.serializer.Serializer import org.apache.spark.util.Utils /** * A block fetcher iterator interface. There are two implementations: * * BasicBlockFetcherIterator: uses a custom-built NIO communication layer. * NettyBlockFetcherIterator: uses Netty (OIO) as the communication layer. * * Eventually we would like the two to converge and use a single NIO-based communication layer, * but extensive tests show that under some circumstances (e.g. large shuffles with lots of cores), * NIO would perform poorly and thus the need for the Netty OIO one. */ private[storage] trait BlockFetcherIterator extends Iterator[(BlockId, Option[Iterator[Any]])] with Logging with BlockFetchTracker { def initialize() } private[storage] object BlockFetcherIterator { // A request to fetch one or more blocks, complete with their sizes class FetchRequest(val address: BlockManagerId, val blocks: Seq[(BlockId, Long)]) { val size = blocks.map(_._2).sum } // A result of a fetch. Includes the block ID, size in bytes, and a function to deserialize // the block (since we want all deserializaton to happen in the calling thread); can also // represent a fetch failure if size == -1. class FetchResult(val blockId: BlockId, val size: Long, val deserialize: () => Iterator[Any]) { def failed: Boolean = size == -1 } class BasicBlockFetcherIterator( private val blockManager: BlockManager, val blocksByAddress: Seq[(BlockManagerId, Seq[(BlockId, Long)])], serializer: Serializer) extends BlockFetcherIterator { import blockManager._ private var _remoteBytesRead = 0l private var _remoteFetchTime = 0l private var _fetchWaitTime = 0l if (blocksByAddress == null) { throw new IllegalArgumentException("BlocksByAddress is null") } // Total number blocks fetched (local + remote). Also number of FetchResults expected protected var _numBlocksToFetch = 0 protected var startTime = System.currentTimeMillis // This represents the number of local blocks, also counting zero-sized blocks private var numLocal = 0 // BlockIds for local blocks that need to be fetched. Excludes zero-sized blocks protected val localBlocksToFetch = new ArrayBuffer[BlockId]() // This represents the number of remote blocks, also counting zero-sized blocks private var numRemote = 0 // BlockIds for remote blocks that need to be fetched. Excludes zero-sized blocks protected val remoteBlocksToFetch = new HashSet[BlockId]() // A queue to hold our results. protected val results = new LinkedBlockingQueue[FetchResult] // Queue of fetch requests to issue; we'll pull requests off this gradually to make sure that // the number of bytes in flight is limited to maxBytesInFlight private val fetchRequests = new Queue[FetchRequest] // Current bytes in flight from our requests private var bytesInFlight = 0L protected def sendRequest(req: FetchRequest) { logDebug("Sending request for %d blocks (%s) from %s".format( req.blocks.size, Utils.bytesToString(req.size), req.address.hostPort)) val cmId = new ConnectionManagerId(req.address.host, req.address.port) val blockMessageArray = new BlockMessageArray(req.blocks.map { case (blockId, size) => BlockMessage.fromGetBlock(GetBlock(blockId)) }) bytesInFlight += req.size val sizeMap = req.blocks.toMap // so we can look up the size of each blockID val fetchStart = System.currentTimeMillis() val future = connectionManager.sendMessageReliably(cmId, blockMessageArray.toBufferMessage) future.onSuccess { case Some(message) => { val fetchDone = System.currentTimeMillis() _remoteFetchTime += fetchDone - fetchStart val bufferMessage = message.asInstanceOf[BufferMessage] val blockMessageArray = BlockMessageArray.fromBufferMessage(bufferMessage) for (blockMessage <- blockMessageArray) { if (blockMessage.getType != BlockMessage.TYPE_GOT_BLOCK) { throw new SparkException( "Unexpected message " + blockMessage.getType + " received from " + cmId) } val blockId = blockMessage.getId val networkSize = blockMessage.getData.limit() results.put(new FetchResult(blockId, sizeMap(blockId), () => dataDeserialize(blockId, blockMessage.getData, serializer))) _remoteBytesRead += networkSize logDebug("Got remote block " + blockId + " after " + Utils.getUsedTimeMs(startTime)) } } case None => { logError("Could not get block(s) from " + cmId) for ((blockId, size) <- req.blocks) { results.put(new FetchResult(blockId, -1, null)) } } } } protected def splitLocalRemoteBlocks(): ArrayBuffer[FetchRequest] = { // Split local and remote blocks. Remote blocks are further split into FetchRequests of size // at most maxBytesInFlight in order to limit the amount of data in flight. val remoteRequests = new ArrayBuffer[FetchRequest] for ((address, blockInfos) <- blocksByAddress) { if (address == blockManagerId) { numLocal = blockInfos.size // Filter out zero-sized blocks localBlocksToFetch ++= blockInfos.filter(_._2 != 0).map(_._1) _numBlocksToFetch += localBlocksToFetch.size } else { numRemote += blockInfos.size // Make our requests at least maxBytesInFlight / 5 in length; the reason to keep them // smaller than maxBytesInFlight is to allow multiple, parallel fetches from up to 5 // nodes, rather than blocking on reading output from one node. val minRequestSize = math.max(maxBytesInFlight / 5, 1L) logInfo("maxBytesInFlight: " + maxBytesInFlight + ", minRequest: " + minRequestSize) val iterator = blockInfos.iterator var curRequestSize = 0L var curBlocks = new ArrayBuffer[(BlockId, Long)] while (iterator.hasNext) { val (blockId, size) = iterator.next() // Skip empty blocks if (size > 0) { curBlocks += ((blockId, size)) remoteBlocksToFetch += blockId _numBlocksToFetch += 1 curRequestSize += size } else if (size < 0) { throw new BlockException(blockId, "Negative block size " + size) } if (curRequestSize >= minRequestSize) { // Add this FetchRequest remoteRequests += new FetchRequest(address, curBlocks) curRequestSize = 0 curBlocks = new ArrayBuffer[(BlockId, Long)] } } // Add in the final request if (!curBlocks.isEmpty) { remoteRequests += new FetchRequest(address, curBlocks) } } } logInfo("Getting " + _numBlocksToFetch + " non-zero-bytes blocks out of " + totalBlocks + " blocks") remoteRequests } protected def getLocalBlocks() { // Get the local blocks while remote blocks are being fetched. Note that it's okay to do // these all at once because they will just memory-map some files, so they won't consume // any memory that might exceed our maxBytesInFlight for (id <- localBlocksToFetch) { getLocalFromDisk(id, serializer) match { case Some(iter) => { // Pass 0 as size since it's not in flight results.put(new FetchResult(id, 0, () => iter)) logDebug("Got local block " + id) } case None => { throw new BlockException(id, "Could not get block " + id + " from local machine") } } } } override def initialize() { // Split local and remote blocks. val remoteRequests = splitLocalRemoteBlocks() // Add the remote requests into our queue in a random order fetchRequests ++= Utils.randomize(remoteRequests) // Send out initial requests for blocks, up to our maxBytesInFlight while (!fetchRequests.isEmpty && (bytesInFlight == 0 || bytesInFlight + fetchRequests.front.size <= maxBytesInFlight)) { sendRequest(fetchRequests.dequeue()) } val numGets = remoteRequests.size - fetchRequests.size logInfo("Started " + numGets + " remote gets in " + Utils.getUsedTimeMs(startTime)) // Get Local Blocks startTime = System.currentTimeMillis getLocalBlocks() logDebug("Got local blocks in " + Utils.getUsedTimeMs(startTime) + " ms") } //an iterator that will read fetched blocks off the queue as they arrive. @volatile protected var resultsGotten = 0 override def hasNext: Boolean = resultsGotten < _numBlocksToFetch override def next(): (BlockId, Option[Iterator[Any]]) = { resultsGotten += 1 val startFetchWait = System.currentTimeMillis() val result = results.take() val stopFetchWait = System.currentTimeMillis() _fetchWaitTime += (stopFetchWait - startFetchWait) if (! result.failed) bytesInFlight -= result.size while (!fetchRequests.isEmpty && (bytesInFlight == 0 || bytesInFlight + fetchRequests.front.size <= maxBytesInFlight)) { sendRequest(fetchRequests.dequeue()) } (result.blockId, if (result.failed) None else Some(result.deserialize())) } // Implementing BlockFetchTracker trait. override def totalBlocks: Int = numLocal + numRemote override def numLocalBlocks: Int = numLocal override def numRemoteBlocks: Int = numRemote override def remoteFetchTime: Long = _remoteFetchTime override def fetchWaitTime: Long = _fetchWaitTime override def remoteBytesRead: Long = _remoteBytesRead } // End of BasicBlockFetcherIterator class NettyBlockFetcherIterator( blockManager: BlockManager, blocksByAddress: Seq[(BlockManagerId, Seq[(BlockId, Long)])], serializer: Serializer) extends BasicBlockFetcherIterator(blockManager, blocksByAddress, serializer) { import blockManager._ val fetchRequestsSync = new LinkedBlockingQueue[FetchRequest] private def startCopiers(numCopiers: Int): List[_ <: Thread] = { (for ( i <- Range(0,numCopiers) ) yield { val copier = new Thread { override def run(){ try { while(!isInterrupted && !fetchRequestsSync.isEmpty) { sendRequest(fetchRequestsSync.take()) } } catch { case x: InterruptedException => logInfo("Copier Interrupted") //case _ => throw new SparkException("Exception Throw in Shuffle Copier") } } } copier.start copier }).toList } // keep this to interrupt the threads when necessary private def stopCopiers() { for (copier <- copiers) { copier.interrupt() } } override protected def sendRequest(req: FetchRequest) { def putResult(blockId: BlockId, blockSize: Long, blockData: ByteBuf) { val fetchResult = new FetchResult(blockId, blockSize, () => dataDeserialize(blockId, blockData.nioBuffer, serializer)) results.put(fetchResult) } logDebug("Sending request for %d blocks (%s) from %s".format( req.blocks.size, Utils.bytesToString(req.size), req.address.host)) val cmId = new ConnectionManagerId(req.address.host, req.address.nettyPort) val cpier = new ShuffleCopier(blockManager.conf) cpier.getBlocks(cmId, req.blocks, putResult) logDebug("Sent request for remote blocks " + req.blocks + " from " + req.address.host ) } private var copiers: List[_ <: Thread] = null override def initialize() { // Split Local Remote Blocks and set numBlocksToFetch val remoteRequests = splitLocalRemoteBlocks() // Add the remote requests into our queue in a random order for (request <- Utils.randomize(remoteRequests)) { fetchRequestsSync.put(request) } copiers = startCopiers(conf.getInt("spark.shuffle.copier.threads", 6)) logInfo("Started " + fetchRequestsSync.size + " remote gets in " + Utils.getUsedTimeMs(startTime)) // Get Local Blocks startTime = System.currentTimeMillis getLocalBlocks() logDebug("Got local blocks in " + Utils.getUsedTimeMs(startTime) + " ms") } override def next(): (BlockId, Option[Iterator[Any]]) = { resultsGotten += 1 val result = results.take() // If all the results has been retrieved, copiers will exit automatically (result.blockId, if (result.failed) None else Some(result.deserialize())) } } // End of NettyBlockFetcherIterator }
dotunolafunmiloye/spark
core/src/main/scala/org/apache/spark/storage/BlockFetcherIterator.scala
Scala
apache-2.0
14,163
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.planner.plan.rules.physical.batch import org.apache.flink.table.planner.plan.nodes.physical.batch.{BatchExecExchange, BatchExecExpand, BatchExecHashAggregate} import org.apache.calcite.plan.RelOptRule.{any, operand} import org.apache.calcite.plan.RelOptRuleCall /** * An [[EnforceLocalAggRuleBase]] that matches [[BatchExecHashAggregate]] * * for example: select count(*) from t group by rollup (a, b) * The physical plan * * {{{ * HashAggregate(isMerge=[false], groupBy=[a, b, $e], select=[a, b, $e, COUNT(*)]) * +- Exchange(distribution=[hash[a, b, $e]]) * +- Expand(projects=[{a=[$0], b=[$1], $e=[0]}, * {a=[$0], b=[null], $e=[1]}, * {a=[null], b=[null], $e=[3]}]) * }}} * * will be rewritten to * * {{{ * HashAggregate(isMerge=[true], groupBy=[a, b, $e], select=[a, b, $e, Final_COUNT(count1$0)]) * +- Exchange(distribution=[hash[a, b, $e]]) * +- LocalHashAggregate(groupBy=[a, b, $e], select=[a, b, $e, Partial_COUNT(*) AS count1$0] * +- Expand(projects=[{a=[$0], b=[$1], $e=[0]}, * {a=[$0], b=[null], $e=[1]}, * {a=[null], b=[null], $e=[3]}]) * }}} */ class EnforceLocalHashAggRule extends EnforceLocalAggRuleBase( operand(classOf[BatchExecHashAggregate], operand(classOf[BatchExecExchange], operand(classOf[BatchExecExpand], any))), "EnforceLocalHashAggRule") { override def matches(call: RelOptRuleCall): Boolean = { val agg: BatchExecHashAggregate = call.rel(0) val expand: BatchExecExpand = call.rel(2) val enableTwoPhaseAgg = isTwoPhaseAggEnabled(agg) val grouping = agg.getGrouping val constantShuffleKey = hasConstantShuffleKey(grouping, expand) grouping.nonEmpty && enableTwoPhaseAgg && constantShuffleKey } override def onMatch(call: RelOptRuleCall): Unit = { val agg: BatchExecHashAggregate = call.rel(0) val expand: BatchExecExpand = call.rel(2) val localAgg = createLocalAgg(agg, expand, call.builder) val exchange = createExchange(agg, localAgg) val globalAgg = createGlobalAgg(agg, exchange, call.builder) call.transformTo(globalAgg) } } object EnforceLocalHashAggRule { val INSTANCE = new EnforceLocalHashAggRule }
hequn8128/flink
flink-table/flink-table-planner-blink/src/main/scala/org/apache/flink/table/planner/plan/rules/physical/batch/EnforceLocalHashAggRule.scala
Scala
apache-2.0
3,115
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.table.api.stream.table.stringexpr import org.apache.flink.api.scala._ import org.apache.flink.table.api.scala._ import org.apache.flink.table.functions.aggfunctions.CountAggFunction import org.apache.flink.table.runtime.utils.JavaUserDefinedAggFunctions.{WeightedAvg, WeightedAvgWithMergeAndReset} import org.apache.flink.table.utils.TableTestBase import org.junit.Test class AggregateStringExpressionTest extends TableTestBase { @Test def testDistinctNonGroupedAggregate(): Unit = { val util = streamTestUtil() val t = util.addTable[(Int, Long, String)]("Table3") val t1 = t.select('_1.sum.distinct, '_1.count.distinct, '_1.avg.distinct) val t2 = t.select("_1.sum.distinct, _1.count.distinct, _1.avg.distinct") val t3 = t.select("sum.distinct(_1), count.distinct(_1), avg.distinct(_1)") verifyTableEquals(t1, t2) verifyTableEquals(t1, t3) } @Test def testDistinctGroupedAggregate(): Unit = { val util = streamTestUtil() val t = util.addTable[(Int, Long, String)]("Table3", 'a, 'b, 'c) val t1 = t.groupBy('b).select('b, 'a.sum.distinct, 'a.sum) val t2 = t.groupBy("b").select("b, a.sum.distinct, a.sum") val t3 = t.groupBy("b").select("b, sum.distinct(a), sum(a)") verifyTableEquals(t1, t2) verifyTableEquals(t1, t3) } @Test def testDistinctNonGroupAggregateWithUDAGG(): Unit = { val util = streamTestUtil() val t = util.addTable[(Int, Long, String)]("Table3", 'a, 'b, 'c) val myCnt = new CountAggFunction util.tableEnv.registerFunction("myCnt", myCnt) val myWeightedAvg = new WeightedAvgWithMergeAndReset util.tableEnv.registerFunction("myWeightedAvg", myWeightedAvg) val t1 = t.select(myCnt.distinct('a) as 'aCnt, myWeightedAvg.distinct('b, 'a) as 'wAvg) val t2 = t.select("myCnt.distinct(a) as aCnt, myWeightedAvg.distinct(b, a) as wAvg") verifyTableEquals(t1, t2) } @Test def testDistinctGroupedAggregateWithUDAGG(): Unit = { val util = streamTestUtil() val t = util.addTable[(Int, Long, String)]("Table3", 'a, 'b, 'c) val myCnt = new CountAggFunction util.tableEnv.registerFunction("myCnt", myCnt) val myWeightedAvg = new WeightedAvgWithMergeAndReset util.tableEnv.registerFunction("myWeightedAvg", myWeightedAvg) val t1 = t.groupBy('b) .select('b, myCnt.distinct('a) + 9 as 'aCnt, myWeightedAvg.distinct('b, 'a) * 2 as 'wAvg, myWeightedAvg.distinct('a, 'a) as 'distAgg, myWeightedAvg('a, 'a) as 'agg) val t2 = t.groupBy("b") .select("b, myCnt.distinct(a) + 9 as aCnt, myWeightedAvg.distinct(b, a) * 2 as wAvg, " + "myWeightedAvg.distinct(a, a) as distAgg, myWeightedAvg(a, a) as agg") verifyTableEquals(t1, t2) } @Test def testGroupedAggregate(): Unit = { val util = streamTestUtil() val t = util.addTable[(Int, Long, String)]('int, 'long, 'string) val weightAvgFun = new WeightedAvg util.tableEnv.registerFunction("weightAvgFun", weightAvgFun) // Expression / Scala API val resScala = t .groupBy('string) .select('int.count as 'cnt, weightAvgFun('long, 'int)) // String / Java API val resJava = t .groupBy("string") .select("int.count as cnt, weightAvgFun(long, int)") verifyTableEquals(resJava, resScala) } @Test def testNonGroupedAggregate(): Unit = { val util = streamTestUtil() val t = util.addTable[(Int, Long, String)]('int, 'long, 'string) // Expression / Scala API val resScala = t.select('int.count as 'cnt, 'long.sum) // String / Java API val resJava = t.select("int.count as cnt, long.sum") verifyTableEquals(resJava, resScala) } }
mylog00/flink
flink-libraries/flink-table/src/test/scala/org/apache/flink/table/api/stream/table/stringexpr/AggregateStringExpressionTest.scala
Scala
apache-2.0
4,513
/* * Copyright (c) 2014 Robert Conrad - All Rights Reserved. * Unauthorized copying of this file, via any medium is strictly prohibited. * This file is proprietary and confidential. * Last modified by rconrad, 12/24/14 6:06 PM */ package base.entity.auth.context /** * Parameters for setting auth context values * @author rconrad */ case class AuthContextParams() object AuthContextParams { }
robconrad/base-api
project-entity/src/main/scala/base/entity/auth/context/AuthContextParams.scala
Scala
mit
405
package gv.jleon package facade trait JLeonImports { type Mirror = domain.Mirror val Mirror = domain.Mirror type Storage = domain.Storage val Storage = domain.Storage type FetchManager = domain.FetchManager val FetchManager = domain.FetchManager type MirrorRepository = domain.MirrorRepository type FetchRepository = domain.FetchRepository type LockingStorage = domain.LockingStorage type JLeon = facade.JLeon val JLeon = facade.JLeon }
mouchtaris/jleon
src/main/scala-2.12/gv/jleon/facade/JLeonImports.scala
Scala
mit
466
package basic.bundles import scala.reflect.macros.blackbox.Context /** * マクロバンドルの実装 * * http://docs.scala-lang.org/ja/overviews/macros/basic.bundles.html */ class BundlesSample01Impl(val c: Context) { def mono = c.literalUnit def poly[T: c.WeakTypeTag] = c.literal(c.weakTypeOf[T].toString) } object BundlesSample01 { // IDEA1 3 + Scala 0.31.437では↓のBundlesSample01Implが赤く表示される。。。 // エラーは「Cannot resolve symbol BundlesSample01Impl」 def mono = macro BundlesSample01Impl.mono def poly[T] = macro BundlesSample01Impl.poly[T] }
thachi/scala-macro-sample
macro/src/main/scala/basic/bundles/BundlesSample01.scala
Scala
apache-2.0
603
package wandou.math.timeseries import akka.actor.Actor import wandou.math.timeseries.descriptor.Content import wandou.actors.Publisher /** * * @author Caoyuan Deng */ trait Thing extends Actor with Publisher { def identifier: String def name: String def description: String def description_=(description: String) def serOf(freq: TFreq): Option[TSer] /** * The content of each symbol should be got automatailly from PersistenceManager.restoreContent * and keep it there without being refered to another one, so, we only give getter without setter. */ def content: Content /** * A helper method which can be overridden to get another ser provider from identifier */ def thingOf(identifier: String): Option[Thing] = None }
wandoulabs/wandou-math
wandou-math/src/main/scala/wandou/math/timeseries/Thing.scala
Scala
apache-2.0
765
/*********************************************************************** * Copyright (c) 2013-2022 Commonwealth Computer Research, Inc. * All rights reserved. This program and the accompanying materials * are made available under the terms of the Apache License, Version 2.0 * which accompanies this distribution and is available at * http://www.opensource.org/licenses/apache2.0.php. ***********************************************************************/ package org.locationtech.geomesa.tools.status import com.beust.jcommander.Parameters import org.locationtech.geomesa.tools.Command import org.locationtech.geomesa.tools.status.VersionCommand.VersionParameters import org.locationtech.geomesa.utils.conf.GeoMesaProperties class VersionCommand extends Command { override val name = "version" override val params = new VersionParameters override def execute(): Unit = { import GeoMesaProperties._ Command.output.info(s"GeoMesa tools version: $ProjectVersion") Command.output.info(s"Commit ID: $GitCommit") Command.output.info(s"Branch: $GitBranch") Command.output.info(s"Build date: $BuildDate") } } object VersionCommand { @Parameters(commandDescription = "Display the GeoMesa version installed locally") class VersionParameters {} }
locationtech/geomesa
geomesa-tools/src/main/scala/org/locationtech/geomesa/tools/status/VersionCommand.scala
Scala
apache-2.0
1,283
package org.jetbrains.sbt.language.completion import com.intellij.codeInsight.completion._ import com.intellij.codeInsight.lookup.{LookupElement, LookupElementBuilder} import com.intellij.codeInsight.template._ import com.intellij.codeInsight.template.impl.ConstantNode import com.intellij.openapi.project.Project import com.intellij.openapi.util.TextRange import com.intellij.patterns.PlatformPatterns.psiElement import com.intellij.psi.PsiDocumentManager import com.intellij.util.ProcessingContext import org.jetbrains.plugins.scala.extensions.PsiElementExt import org.jetbrains.plugins.scala.lang.completion.{CaptureExt, PsiElementPatternExt, positionFromParameters} import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes import org.jetbrains.plugins.scala.lang.psi.api.base.literals.ScStringLiteral import org.jetbrains.plugins.scala.lang.psi.api.expr.ScReferenceExpression import org.jetbrains.plugins.scala.lang.psi.impl.ScalaPsiElementFactory import org.jetbrains.plugins.scala.project.ScalaLanguageLevel import org.jetbrains.sbt.language.completion.SbtScalacOptionsCompletionContributor._ import org.jetbrains.sbt.language.psi.SbtScalacOptionDocHolder import org.jetbrains.sbt.language.utils.SbtScalacOptionInfo.ArgType import org.jetbrains.sbt.language.utils.{SbtScalacOptionInfo, SbtScalacOptionUtils} import scala.jdk.CollectionConverters._ class SbtScalacOptionsCompletionContributor extends CompletionContributor { extend(CompletionType.BASIC, PATTERN, completionProvider) } object SbtScalacOptionsCompletionContributor { private val PATTERN = (SbtPsiElementPatterns.sbtFilePattern || SbtPsiElementPatterns.scalaFilePattern) && (psiElement.inside(SbtPsiElementPatterns.scalacOptionsReferencePattern).notAfterLeafSkippingWhitespaceComment(ScalaTokenTypes.tDOT) || psiElement.inside(SbtPsiElementPatterns.scalacOptionsStringLiteralPattern)) private val completionProvider = new CompletionProvider[CompletionParameters] { override def addCompletions(parameters: CompletionParameters, context: ProcessingContext, resultSet: CompletionResultSet): Unit = { val place = positionFromParameters(parameters) implicit val project: Project = place.getProject val cleanPrefix = place.getText .stripPrefix("\\"") .dropWhile(_ == '-') // remove '-', '--', etc. from search .stripSuffix("\\"") .replace(CompletionInitializationContext.DUMMY_IDENTIFIER_TRIMMED, "") val newResultSet = resultSet.withPrefixMatcher(cleanPrefix) val scalaVersions = SbtScalacOptionUtils .projectVersionsSorted(place.getProject, reverse = true) val elements = SbtScalacOptionUtils .getScalacOptions .flatMap(lookupElementMatchingVersions(_, scalaVersions)) .asJava newResultSet.addAllElements(elements) } } private def lookupElementMatchingVersions(option: SbtScalacOptionInfo, scalaVersions: List[ScalaLanguageLevel])(implicit project: Project): Option[LookupElement] = { val matchingVersions = scalaVersions.filter(option.scalaVersions) Option.when(matchingVersions.nonEmpty) { LookupElementBuilder.create(option, option.flag) .withPresentableText(option.getText) .withTailText(matchingVersions.map(_.getVersion).mkString(" (", ", ", ")")) .withInsertHandler(new ScalacOptionInsertHandler(option)) .withPsiElement(new SbtScalacOptionDocHolder(option)) .withCaseSensitivity(false) .bold() } } private class ScalacOptionInsertHandler(option: SbtScalacOptionInfo) extends InsertHandler[LookupElement] { override def handleInsert(ctx: InsertionContext, item: LookupElement): Unit = { implicit val context: InsertionContext = ctx context.commitDocument() val elem = context.getFile.findElementAt(context.getStartOffset) elem.getContext match { case _: ScReferenceExpression if option.flag.startsWith("-") => // rewrite `-flag`, `--flag` to "-flag" and "--flag" respectively // handle `-foo-bar-baz` and `--foo-bar-baz` cases as well doHandleInsert(context.getStartOffset, context.getTailOffset) case str: ScStringLiteral => // handle cases when string literal is invalid. E.g.: `"-flag` -> `"-flag"` doHandleInsert(str.startOffset, str.endOffset) case _ => } } private def doHandleInsert(startOffset: Int, endOffset: Int)(implicit context: InsertionContext): Unit = { val newStartOffset = insertOption(startOffset, endOffset) (option.argType, option.defaultValue) match { case (ArgType.No, _) => case (ArgType.OneAfterPrefix(prefix), _) => runPrefixedOptionArgumentsTemplate(newStartOffset, prefix) case (_, Some(defaultValue)) => runOptionArgumentsTemplate(newStartOffset, defaultValue) case _ => context.getEditor.getCaretModel.moveToOffset(newStartOffset + option.getText.length - 1) } } private def insertOption(startOffset: Int, endOffset: Int)(implicit context: InsertionContext): Int = { context.getDocument.replaceString(startOffset, endOffset, option.getText) option.argType match { case ArgType.OneSeparate => context.commitDocument() val element = context.getFile.findElementAt(startOffset) val parent = SbtScalacOptionUtils.getScalacOptionsSbtSettingParent(element) parent match { // simple case `scalacOptions +=/-= "option", "argument"` // rewrite to `scalacOptions ++=/--= Seq("option", "argument")` case Some(expr) if SbtScalacOptionUtils.SINGLE_OPS(expr.operation.refName) && expr.right.startOffset == element.startOffset => context.getDocument.replaceString(startOffset, startOffset + option.getText.length, s"Seq(${option.getText})") context.commitDocument() val op = expr.operation.refName expr.operation.replace(ScalaPsiElementFactory.createElementFromText(op.prepended(op.head))(expr.projectContext)) PsiDocumentManager.getInstance(context.getProject).doPostponedOperationsAndUnblockDocument(context.getDocument) startOffset + 5 // '+' or '-' in operation + 'Seq(' case _ => startOffset } case _ => startOffset } } private def runPrefixedOptionArgumentsTemplate(offset: Int, prefix: String)(implicit context: InsertionContext): Unit = runOptionArgumentsTemplate(offset) { (builder, _) => val argumentText = option.flag.substring(prefix.length) builder.replaceRange( TextRange.from(offset + prefix.length + 1, argumentText.length), new ConstantNode(argumentText) ) } private def runOptionArgumentsTemplate(offset: Int, defaultValue: String)(implicit context: InsertionContext): Unit = runOptionArgumentsTemplate(offset) { (builder, offsetBeforeClosingQuote) => builder.replaceRange( TextRange.from(offsetBeforeClosingQuote, 0), new ConstantNode(defaultValue) ) } private def runOptionArgumentsTemplate(offset: Int)(replaceRange: (TemplateBuilderImpl, Int) => Unit)(implicit context: InsertionContext): Unit = { context.commitDocument() val offsetBeforeClosingQuote = offset + option.getText.length - 1 val templateContainerElement = context.getFile.findElementAt(offsetBeforeClosingQuote) val builder = TemplateBuilderFactory.getInstance() .createTemplateBuilder(templateContainerElement) .asInstanceOf[TemplateBuilderImpl] replaceRange(builder, offsetBeforeClosingQuote) val template = builder.buildTemplate() context.getDocument.replaceString(templateContainerElement.startOffset, templateContainerElement.endOffset, "") context.getEditor.getCaretModel.moveToOffset(templateContainerElement.startOffset) TemplateManager.getInstance(context.getProject).startTemplate(context.getEditor, template) } } }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/sbt/language/completion/SbtScalacOptionsCompletionContributor.scala
Scala
apache-2.0
8,078
package dao.alertwatcherpacs import slick.lifted.TableQuery import scala.concurrent.Future /** * Defines base dao structure every dao should have. */ trait BaseDao[T <: Any] { def toTable(): TableQuery[_] def findAll(): Future[Seq[T]] def remove(id: Long): Future[Int] def insert(o: T): Future[Unit] def update(o: T): Future[Unit] def findById(id: Long): Future[Option[T]] }
tnddn/iv-web
portal/rest-portal/app/dao/alertwatcherpacs/BaseDao.scala
Scala
apache-2.0
391
package amora.backend.indexer import java.io.ByteArrayInputStream import java.nio.charset.StandardCharsets import scala.concurrent.Await import scala.concurrent.Promise import scala.concurrent.duration.Duration import scala.util.Failure import scala.util.Success import org.apache.jena.query.QuerySolution import org.apache.jena.query.ResultSetFactory import org.junit.After import org.junit.ComparisonFailure import com.typesafe.config.ConfigFactory import akka.actor.Cancellable import akka.http.javadsl.model.headers.RawRequestURI import akka.http.scaladsl.Http import akka.http.scaladsl.model.ContentTypes import akka.http.scaladsl.model.HttpEntity import akka.http.scaladsl.model.HttpHeader import akka.http.scaladsl.model.HttpMethods import akka.http.scaladsl.model.HttpRequest import akka.http.scaladsl.model.RequestEntity import akka.http.scaladsl.model.StatusCodes import akka.http.scaladsl.model.Uri import akka.http.scaladsl.model.headers.Accept import akka.http.scaladsl.testkit.RouteTest import akka.http.scaladsl.testkit.RouteTestTimeout import akka.http.scaladsl.testkit.TestFrameworkInterface import amora.api._ import amora.backend.AkkaLogging import amora.backend.CustomContentTypes import amora.backend.Log4jRootLogging import amora.backend.PlatformConstants import amora.backend.WebService import amora.backend.schema.Schema import amora.converter.protocol._ trait RestApiTest extends TestFrameworkInterface with RouteTest with AkkaLogging with Log4jRootLogging { import amora.TestUtils._ override def failTest(msg: String): Nothing = { throw new RuntimeException(msg) } override def testConfigSource = s""" akka { loglevel = INFO # do not log anything on system startup or shutdown stdout-loglevel = OFF # We need to access the raw URIs because they are used as keys in the index. http.server.raw-request-uri-header = on } app { interface = "localhost" port = 7777 test-mode = true forward-internal-logger-to-akka-logger = true # If we want to be able to disable the logging of additional data, we can # do so with this config value. log-additional-debug-data-in-tests = true storage { location = "$binDir/amora" index-dataset = "$binDir/amora/dataset" artifact-repo = "$binDir/amora/repo" } } """ override def testConfig = { val c = super.testConfig // We want to have a slightly different configuration when the tests run in // the IDE or in the build tool. `test-application.conf` is generated in the // build tool in order to override the default configuration. val f = new java.io.File(s"$binDir/test-application.conf") if (f.exists()) ConfigFactory.parseFile(f).withFallback(c) else c } private def binDir = getClass.getClassLoader.getResource(".").getPath private def debugTests = system.settings.config.getBoolean("app.log-additional-debug-data-in-tests") implicit val timeout = { import scala.concurrent.duration._ // - Wait for the time that is available to the server + some more // - The API doesn't allow us to wait for forever in debug mode, therefore just a very long timeout RouteTestTimeout(if (PlatformConstants.runsInDebugMode) 24.hours else PlatformConstants.timeout.duration + 500.millis) } val service = new WebService val route = service.route val config = system.settings.config val interface = config.getString("app.interface") val port = config.getInt("app.port") val binding = Http().bindAndHandle(route, interface, port) Await.ready(binding, Duration.Inf).value.get match { case Success(binding) ⇒ val addr = binding.localAddress log.info(s"Server is listening on ${addr.getHostName}:${addr.getPort}") case Failure(e) ⇒ log.error(e, "Failed to start server") throw e } case class Data(varName: String, value: String) def resultSetAsData(rs: SparqlResultSet): Seq[Seq[Data]] = { transformResultSet(rs) { (v, q) ⇒ val res = q.get(v) val value = if (res == null) null else if (res.isLiteral()) res.asLiteral().getString else res.toString() Data(v, value) } } def resultSetAsList(rs: SparqlResultSet): Seq[String] = { import scala.collection.JavaConverters._ val r = rs.resultSet require(r.getResultVars.size == 1, "Result set can only be shown as a list if it contains only one variable.") val variable = r.getResultVars.get(0) r.asScala.map { q ⇒ val res = q.get(variable) require(res != null, s"The variable `$variable` does not exist in the result set.") if (res.isLiteral()) res.asLiteral().getString else res.toString() }.toList } def transformResultSet[A](rs: SparqlResultSet)(f: (String, QuerySolution) ⇒ A): Seq[Seq[A]] = { import scala.collection.JavaConverters._ val r = rs.resultSet val vars = r.getResultVars.asScala.toSeq for (q ← r.asScala.toSeq) yield for (v ← vars) yield f(v, q) } def scheduleReq(req: ⇒ HttpRequest)(f: ⇒ Boolean): Unit = { import scala.concurrent.duration._ val p = Promise[Unit] var cancellable: Cancellable = null try { cancellable = system.scheduler.schedule(100.millis, 100.millis) { req ~> route ~> check { val res = f if (res) p.success(()) } } Await.ready(p.future, Duration.Inf) } finally { cancellable.cancel() } } def testReq[A](req: ⇒ HttpRequest)(f: ⇒ A): A = { val r = req r ~> route ~> check { def isJsonResponse = r.header[Accept].flatMap(_.mediaRanges.headOption).exists { case m if m matches CustomContentTypes.`application/sparql-results+json` ⇒ true case _ ⇒ false } if (debugTests && status == StatusCodes.OK && isJsonResponse) { log.info("response as query result:\\n" + respAsResultSet.asStringTable) } f } } def respAsString: String = Await.result(response.entity.dataBytes.runFold("")(_ + _.utf8String), timeout.duration) def respAsResultSet: SparqlResultSet = { val in = new ByteArrayInputStream(respAsString.getBytes(StandardCharsets.UTF_8)) new SparqlResultSet(ResultSetFactory.makeRewindable(ResultSetFactory.fromJSON(in))) } def respAsModel: SparqlModel = turtleModel(respAsString) def post(uri: String, request: String, header: HttpHeader*): HttpRequest = { val u = Uri(uri) val e = HttpEntity(CustomContentTypes.`application/sparql-query(UTF-8)`, request) val r = HttpRequest(HttpMethods.POST, u, List(RawRequestURI.create(u.toRelative.toString)) ++ header, e) log.info(s"sending request: $r") r } def post(uri: String, request: RequestEntity, header: HttpHeader*): HttpRequest = { val u = Uri(uri) val r = HttpRequest(HttpMethods.POST, u, List(RawRequestURI.create(u.toRelative.toString)) ++ header, request) log.info(s"sending request: $r") r } def get(uri: String): HttpRequest = { val u = Uri(uri) val r = HttpRequest(HttpMethods.GET, u, List(RawRequestURI.create(u.toRelative.toString))) log.info(s"sending request: $r") r } def showAmoraIndexContent(entries: Int = 100, prefix: String = "http://amora.center/kb/(?!amora/Schema/)"): Unit = { require(entries > 0, "limit needs to be greater than zero") testReq(post("http://amora.center/sparql", s""" select * where { ?s ?p ?o . filter regex(str(?s), "^$prefix") } order by ?s ?p limit $entries """, header = Accept(CustomContentTypes.`application/sparql-results+json`))) { checkStatus() } } def indexData(origin: Schema, data: (String, String)*): Unit = { def mkPkg(pkgs: Seq[String]): Schema = pkgs match { case Nil ⇒ origin case pkg +: pkgs ⇒ Package(pkg, mkPkg(pkgs)) } def escaped(str: String) = str.replace("\\n", "\\\\n").replace("\\"", "\\\\\\"") val PkgFinder = """(?s).*?package ([\\w\\.]+).*?""".r val files = data map { case (fileName, src) ⇒ src match { case PkgFinder(name) ⇒ File(mkPkg(name.split('.').reverse), fileName) case _ ⇒ File(origin, fileName) } } val ttlString = Schema.mkTurtleString(files) serviceRequest(s""" @prefix service:<http://amora.center/kb/Schema/Service/> . @prefix registry:<http://amora.center/kb/Service/> . @prefix request:<http://amora.center/kb/ServiceRequest/> . @prefix cu:<http://amora.center/kb/Schema/CompilationUnit/> . <#this> a request: ; service:serviceId registry:ScalaSourceIndexer ; service:method [ service:name "run" ; service:param [ service:name "origin" ; service:value "${escaped(ttlString)}" ; ] ; service:param [ service:name "data" ; service:value [${ data.map { case (fileName, src) ⇒ s""" cu:fileName "$fileName" ; cu:source "${escaped(src)}" ; ], [""" }.mkString }]; ] ; ] ; . """) } def indexArtifacts(artifacts: Artifact*) = { val ttlString = Schema.mkTurtleString(artifacts) serviceRequest(sparql""" @prefix service:<http://amora.center/kb/Schema/Service/> . @prefix registry:<http://amora.center/kb/Service/> . @prefix request:<http://amora.center/kb/ServiceRequest/> . <#this> a request: ; service:serviceId registry:IndexArtifacts ; service:method [ service:name "run" ; service:param [ service:name "turtleReq" ; service:value "$ttlString" ; ] ; ] ; . """) } sealed trait Region extends Product with Serializable { def len: Int } case class Range(start: Int, end: Int, str: String) extends Region { override def len = "[[]]".length } case class Offset(offset: Int, str: String) extends Region { override def len = "[[!]]".length + str.length } /** * Runs a test against the indexer. * * `rawData` are tuples of the form `(filename, source)`. The sources will be * typechecked and indexed and once this is done the query will run against * the index. * * The sources can contain markers that start with `[[` and end with `]]`. * These markers are the start and end position of a range, which shall be * returned by the query. If the first character after the `[[` marker is a * exclamation mark, the range will become an offset region, whose start and * end position are the same. Offset regions need to be used when implicit * regions need to be tested for their existence (for example implicit return * types of methods). The sources are freed of the region markers before they * are passed to the typechecker to make it convenient to write tests. */ def indexRegionData(query: String, origin: Schema, rawData: (String, String)*): Unit = { def findRegions(src: String, prevStart: Int, prevEnd: Int, regions: IndexedSeq[Region]): IndexedSeq[Region] = { val start = src.indexOf("[[", prevEnd) if (start < 0) regions else { val end = src.indexOf("]]", start) val len = regions.map(_.len).sum val isOffset = src(start + 2) == '!' val range = if (isOffset) Offset(start - len, src.substring(start + 3, end)) else Range(start - len, end - len - "[[".length, src.substring(start + 2, end)) findRegions(src, start, end, regions :+ range) } } val dataWithRegions = rawData map { case (filename, rawSrc) ⇒ val regions = findRegions(rawSrc, 0, 0, Vector()) val src = rawSrc.replaceAll("""\\[\\[!.*?\\]\\]|\\[\\[|\\]\\]""", "") (filename, src, regions) } val data = dataWithRegions.map { case (filename, src, _) ⇒ (filename, src) } indexData(origin, data: _*) val regionOrdering: Region ⇒ (Int, Int, String) = { case Range(start, end, text) ⇒ (start, end, text) case Offset(offset, text) ⇒ (offset, offset, text) } val expectedRegions = dataWithRegions.flatMap { case (_, _, region) ⇒ region }.sortBy(regionOrdering) testReq(post("http://amora.center/sparql", query, header = Accept(CustomContentTypes.`application/sparql-results+json`))) { checkStatus() val foundRegions = respAsResultSet.map { r ⇒ val start = r.int("start") val end = r.int("end") val name = r.string("name") if (start == end) Offset(start, name) else Range(start, end, name) }.sortBy(regionOrdering) foundRegions === expectedRegions } } def nlqRequest(query: String): NlqResponse = { testReq(post("http://amora.center/nlq", HttpEntity(ContentTypes.`text/plain(UTF-8)`, query), header = Accept(CustomContentTypes.`text/turtle`))) { checkStatus() NlqResponse(respAsModel) } } def sparqlRequest(query: SparqlQuery): Seq[Seq[Data]] = sparqlRequest(query.query) def sparqlRequest(query: String): Seq[Seq[Data]] = { testReq(post("http://amora.center/sparql", query, header = Accept(CustomContentTypes.`application/sparql-results+json`))) { checkStatus() resultSetAsData(respAsResultSet) } } def serviceRequest(query: String): SparqlModel = { testReq(post("http://amora.center/service", HttpEntity(CustomContentTypes.`text/turtle(UTF-8)`, query), header = Accept(CustomContentTypes.`text/turtle`))) { checkStatus() respAsModel } } def turtleRequest(req: String): Unit = { testReq(post("http://amora.center/turtle-update", HttpEntity(CustomContentTypes.`text/turtle(UTF-8)`, req))) { checkStatus() } } def headCommit(): String = { testReq(get("http://amora.center/commit/head")) { checkStatus() respAsString } } def listCommits(): String = { testReq(get("http://amora.center/commit/list")) { checkStatus() respAsString } } def showCommit(commit: String): SparqlModel = { testReq(get(s"http://amora.center/commit/show?commit=$commit")) { checkStatus() respAsModel } } def modelAsData(model: SparqlModel, query: SparqlQuery): Seq[Seq[Data]] = { val rs = query.runOnModel(model) if (debugTests) log.info("response as query result:\\n" + rs.asStringTable) resultSetAsData(rs) } case class CursorData(cursorPos: Int, src: String) /** * Takes a string as input that contains the '^' character, which donates the * position of a cursor. The index of the cursor is returned together with the * string that no longer contains the cursor. */ def cursorData(rawSrc: String): CursorData = { val i = rawSrc.indexOf('^') require(i >= 0, "No cursor marker found.") CursorData(i, rawSrc.substring(0, i) + rawSrc.substring(i+1)) } def checkStatus() = { if (status != StatusCodes.OK) throw new ComparisonFailure("", StatusCodes.OK.toString(), s"$status\\n\\n$respAsString\\n\\nRawResponse: $response") } @After def waitForTermination(): Unit = { Await.ready(system.terminate(), Duration.Inf) } } case class NlqResponse(model: SparqlModel) { def nodes: Seq[Node] = { sparqlQuery""" prefix VResponse:<http://amora.center/kb/amora/Schema/VisualizationResponse/> prefix VGraph:<http://amora.center/kb/amora/Schema/VisualizationGraph/> select ?node ?value where { [a VResponse:] VResponse:graph ?node . ?node VGraph:value ?value . } """.runOnModel(model).map { r ⇒ Node(r.literal("value").stringOpt.getOrElse(???), r.uri("node")) }.toList } def renderAsString: String = { val sb = new StringBuilder def renderNode(indent: Int)(n: Node): Unit = { sb append " "*indent append "- " append n.value append "\\n" n.edges.sortBy(_.value) foreach renderNode(indent + 2) } val ns = nodes.sortBy(_.value) ns foreach renderNode(0) sb.toString() } def sortedAsList: Seq[String] = { def loop(n: Node): Seq[String] = n.value +: n.edges.flatMap(loop) nodes.flatMap(loop).sorted } case class Node(value: String, uri: String) { def edges: Seq[Node] = { sparqlQuery""" prefix VGraph:<http://amora.center/kb/amora/Schema/VisualizationGraph/> select * where { $uri VGraph:edges ?edge . ?edge VGraph:value ?value . } """.runOnModel(model).map { r ⇒ Node(r.literal("value").stringOpt.getOrElse(???), r.uri("edge")) }.toList } } }
sschaef/scalajs-test
backend/src/test/scala/amora/backend/indexer/RestApiTest.scala
Scala
mit
16,846
package org.improving.scalify import Scalify._ import org.eclipse.jdt.core._ import org.eclipse.jdt.core.dom import scala.collection.mutable.{ HashMap, HashSet } import scala.collection.immutable // import scalaz.OptionW._ object Global { private var members: Members = _ var javaProject: IJavaProject = _ def setMembers(m: Members) = members = m def types = members.types def methods = members.methods def vars = members.vars def packages = members.packages def creations = members.creations class Members( val packages: immutable.Map[String, dom.PackageDeclaration], val types: immutable.Map[String, dom.AbstractTypeDeclaration], val methods: immutable.Map[String, dom.MethodDeclaration], val vars: immutable.Map[String, dom.VariableDeclaration], val creations: immutable.Set[String] ) def pkgExists(pkg: String, s: String) = packages.contains(pkg + "." + s) def pkgExists(s: String) = packages contains s def pkgExists(imp: dom.ImportDeclaration, s: String) = { val ImportDeclaration(static, name, onDemand) = imp onDemand && packages.contains(name.getFullyQualifiedName + "." + s) } def typeExists(fqname: String) = { log.trace("typeExists? %s", fqname) types.values.exists { case std: dom.TypeDeclaration => std.itype.map(_.getFullyQualifiedName == fqname) getOrElse false case _ => false } } def lookup(key: String): Option[ASTNode] = types.get(key) orElse methods.get(key) orElse vars.get(key) def lookupType(key: String): Option[dom.TypeDeclaration] = types.get(key) match { case Some(x: dom.TypeDeclaration) => Some(x) case _ => None } def lookupMethod(key: String): Option[dom.MethodDeclaration] = methods.get(key) def lookupVar(key: String): Option[dom.VariableDeclaration] = vars.get(key) def lookupPackage(key: String): Option[dom.PackageDeclaration] = packages.get(key) def lookupCreation(key: String): Boolean = creations contains key def logMembers = { def log = Scalify.log log.trace("Logging %d packages: ", packages.toList.size) for ((k, v) <- packages) log.trace("%s => %s", k, v.getName) log.trace("Logging %d types: ", types.keys.toList.size) for ((k, v) <- types) { log.trace("%s => %s", k, v.getName) log.trace("%s is in package: %s", v.getName, v.resolveBinding.getPackage.getName) } log.trace("Logging %d methods: ", methods.keys.toList.size) for ((k, v) <- methods) log.trace(k + " => " + v.getName) log.trace("Logging %d variables: ", vars.keys.toList.size) for ((k, v) <- vars) log.trace(k + " => " + v.getName) } def recordMembers(asts: Map[ICU, dom.CompilationUnit]) = { val packages = new HashMap[String, dom.PackageDeclaration] val types = new HashMap[String, dom.AbstractTypeDeclaration] val methods = new HashMap[String, dom.MethodDeclaration] val vars = new HashMap[String, dom.VariableDeclaration] val creations = new HashSet[String] for ((icu, cu) <- asts) { cu.descendants.foreach { case x: dom.PackageDeclaration => packages(x.getName.getFullyQualifiedName) = x case x: dom.AbstractTypeDeclaration => types(x.tb.getKey) = x case x: dom.MethodDeclaration => methods(x.mb.getKey) = x case x: dom.VariableDeclaration => vars(x.vb.getKey) = x case x: dom.ClassInstanceCreation => creations += x.getType.resolveBinding.getKey case _ => } } Global.setMembers(new Global.Members(packages, types, methods, vars, creations)) } }
mbana/scalify
src/main/core/Global.scala
Scala
isc
3,433
package com.trafficland.augmentsbt.versionmanagement import sbt.SettingKey trait VersionManagementKeys { val versionSettingRegexes: SettingKey[Seq[String]] = SettingKey[Seq[String]]( "version-setting-regexes", "a list of regexes to use to replace versions" ) }
trafficland/augment-sbt
src/main/scala/com/trafficland/augmentsbt/versionmanagement/VersionManagementKeys.scala
Scala
apache-2.0
275
/* Copyright 2012 Anton Kraievoy [email protected] This file is part of Holonet. Holonet is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. Holonet is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Holonet. If not, see <http://www.gnu.org/licenses/>. */ package org.akraievoy.holonet.exp trait Named { def name: String def desc: String }
akraievoy/holonet
src/main/scala/org/akraievoy/holonet/exp/Named.scala
Scala
gpl-3.0
783
/* * Copyright 2013 Twitter Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.twitter.storehaus.mysql import com.twitter.bijection.Injection import com.twitter.finagle.mysql.{ EmptyValue, IntValue, LongValue, NullValue, RawValue, ShortValue, StringValue, Value, Charset, Type } import org.jboss.netty.buffer.ChannelBuffer import org.jboss.netty.buffer.ChannelBuffers import org.jboss.netty.util.CharsetUtil.UTF_8 import scala.util.Try /** Helper class for mapping finagle-mysql Values to types we care about. */ object ValueMapper { // for finagle Value mappings, see: // https://github.com/twitter/finagle/blob/master/finagle-mysql/src/main/scala/com/twitter/ // finagle/mysql/Value.scala // currently supported types and corresponding finagle types: // INTEGER, INT, MEDIUMINT => IntValue // BIGINT => LongValue // SMALLINT => ShortValue // BLOB => RawValue // TEXT => RawValue // CHAR/VARCHAR => StringValue def toChannelBuffer(v: Value): Option[ChannelBuffer] = { v match { case IntValue(d) => Some(ChannelBuffers.copiedBuffer(d.toString, UTF_8)) case LongValue(d) => Some(ChannelBuffers.copiedBuffer(d.toString, UTF_8)) case RawValue(_, _, true, d) => Some(ChannelBuffers.copiedBuffer(d)) // from byte array case RawValue(_, _, false, d) => Some(ChannelBuffers.copiedBuffer(new String(d), UTF_8)) case ShortValue(d) => Some(ChannelBuffers.copiedBuffer(d.toString, UTF_8)) case StringValue(d) => Some(ChannelBuffers.copiedBuffer(d, UTF_8)) case EmptyValue => Some(ChannelBuffers.EMPTY_BUFFER) case NullValue => None // all other types are currently unsupported case other => throw new UnsupportedOperationException( s"${v.getClass.getName} with value $other is currently not supported.") } } def toString(value: Value): Option[String] = { value match { case IntValue(v) => Some(v.toString) case LongValue(v) => Some(v.toString) case RawValue(_, _, _, v) => Some(new String(v)) // from byte array case ShortValue(v) => Some(v.toString) case StringValue(v) => Some(v) // finagle-mysql text protocol wraps null strings as NullValue // and empty strings as EmptyValue case EmptyValue => Some("") case NullValue => None // all other types are currently unsupported case other => throw new UnsupportedOperationException( s"${value.getClass.getName} with value $other is currently not supported.") } } def toLong(v: Value): Option[Long] = { toString(v).map { _.toLong } } } /** Factory for [[com.twitter.storehaus.mysql.MySqlValue]] instances. */ object MySqlValue { def apply(v: Any): MySqlValue = v match { case v: Value => new MySqlValue(v) case v: String => new MySqlValue(RawValue(Type.String, Charset.Utf8_general_ci, isBinary = false, v.getBytes)) case v: Int => new MySqlValue(IntValue(v)) case v: Long => new MySqlValue(LongValue(v)) case v: Short => new MySqlValue(ShortValue(v)) case v: ChannelBuffer => val bytes = Array.ofDim[Byte](v.readableBytes) v.markReaderIndex() v.readBytes(bytes) v.resetReaderIndex() new MySqlValue(RawValue(Type.Blob, Charset.Binary, isBinary = true, bytes)) case other => throw new UnsupportedOperationException( s"${v.getClass.getName} with value $other is currently not supported.") } } /** * Wraps finagle-mysql Value ADT. * * Since finagle maps MySQL column types to specific Value types, * we use this type class as an abstraction. * MySqlValue objects can then be converted to string, channelbuffer or any other type * without having to worry about the underlying finagle type. */ class MySqlValue(val v: Value) { override def equals(o: Any): Boolean = o match { // we consider two values to be equal if their underlying string representation are equal case o: MySqlValue => ValueMapper.toString(o.v) == ValueMapper.toString(this.v) case _ => false } override def hashCode: Int = { ValueMapper.toString(this.v).hashCode } } /** * Injection from MySqlValue to String. * Returns string representation of the finagle-mysql Value wrapped by MySqlValue * Both null values and empty values map to empty string. */ @deprecated("Use String2MySqlValueInjection", "0.10.0") object MySqlStringInjection extends Injection[MySqlValue, String] { // should this be null: String instead? def apply(a: MySqlValue): String = ValueMapper.toString(a.v).getOrElse("") override def invert(b: String): Try[MySqlValue] = Try(MySqlValue(RawValue(Type.String, Charset.Utf8_general_ci, isBinary = false, b.getBytes))) } object String2MySqlValueInjection extends Injection[String, MySqlValue] { def apply(s: String): MySqlValue = MySqlValue(s) override def invert(m: MySqlValue): Try[String] = Try { ValueMapper.toString(m.v).get } } /** * Injection from MySqlValue to ChannelBuffer. * Returns a channel buffer containing the Value wrapped by MySqlValue. * Both null values and empty values map to empty channel buffer. */ @deprecated("Use ChannelBuffer2MySqlValueInjection", "0.10.0") object MySqlCbInjection extends Injection[MySqlValue, ChannelBuffer] { def apply(a: MySqlValue): ChannelBuffer = ValueMapper.toChannelBuffer(a.v).getOrElse(ChannelBuffers.EMPTY_BUFFER) override def invert(b: ChannelBuffer): Try[MySqlValue] = Try(MySqlValue((Type.String, Charset.Utf8_general_ci, false, b.toString(UTF_8)))) } object ChannelBuffer2MySqlValueInjection extends Injection[ChannelBuffer, MySqlValue] { def apply(c: ChannelBuffer): MySqlValue = MySqlValue(c) override def invert(m: MySqlValue): Try[ChannelBuffer] = Try { ValueMapper.toChannelBuffer(m.v).get } } object LongMySqlInjection extends Injection[Long, MySqlValue] { def apply(a: Long): MySqlValue = MySqlValue(a) override def invert(b: MySqlValue): Try[Long] = Try(ValueMapper.toLong(b.v).get) }
twitter/storehaus
storehaus-mysql/src/main/scala/com/twitter/storehaus/mysql/ValueMapper.scala
Scala
apache-2.0
6,481
package querious import hedgehog._ import hedgehog.runner._ import StringInterpolation._ import fastparse.core.Parsed.{Failure, Success} import eu.timepit.refined.auto._ import fastparse.core.Parsed /** * @author Kevin Lee * @since 2017-07-22 */ object StringCharsParserSpec extends Properties { override def tests: List[Test] = List( example( raw"""Parsers.stringChars.parse("''") return Failure(_, 0, _)""", testParsersStringCharsParseTwoSingleQuotes ), example( raw"""Parsers.stringChars.parse("\\\\") return Failure(_, 0, _)""", testParsersStringCharsParseBackslashStr ), property( raw"""Parsers.stringChars.parse(alphaNumStr) return Success(alphaNumStr, alphaNumStr.length)""", testParsersStringCharsParseAlphaNumStr ), property( raw"""Parsers.stringChars.parse(one of ${TestData.escapingCharsToString}) return Failure(_, 0, _)""", testParsersStringCharsParseOneOfEscapingChars ) ) def testParsersStringCharsParseTwoSingleQuotes: Result = { val actual = Parsers.stringChars.parse("''") actual matchPattern { case Failure(_, 0, _) => } } def testParsersStringCharsParseBackslashStr: Result = { val actual = Parsers.stringChars.parse("\\\\") actual matchPattern { case Failure(_, 0, _) => } } def testParsersStringCharsParseAlphaNumStr: Property = for { value <- Gens.genNonEmptyAlphaNumString(10).log("value") } yield { val expectedValue = value.value val expected: Success[String, Char, String] = Success(expectedValue, expectedValue.length) val actual: Parsed[String, Char, String] = Parsers.stringChars.parse(value.value) actual ==== expected } def testParsersStringCharsParseOneOfEscapingChars: Property = for { value <- Gen.elementUnsafe(TestData.escapingChars).log("value") } yield { val actual = Parsers.stringChars.parse(value) actual matchPattern { case Failure(_, 0, _) => } } }
Kevin-Lee/sql-parser-scala
src/test/scala/querious/StringCharsParserSpec.scala
Scala
mit
1,953
/** * Copyright 2017 WorldWide Conferencing, LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package net.liftmodules.kafkaactors import java.util.Properties import org.apache.kafka.clients.consumer._ import net.liftweb.actor._ import scala.collection.JavaConverters._ /** * A kind of LiftActor capable of consuming messages from a Kafka topic. * * This actor imposes a few restrictions that normal LiftActors do not. Specifically: * * - You must define your message handling in `userMessageHandler` instead of `messageHandler`. * - You cannot override the processing of any `InternalKafkaActorMessage`. * * Other than the above, this Actor behaves very similarly to a normal `LiftActor`. * You can send messages directly to it, thus bypassing Kafka, by using its `!` or `send` methods. * * == Configuration == * * For this actor to work correctly with Kafka, you'll ahve to provide it with some basic * configuration. The required overrides are: * * - `bootstrapServers`: This needs to be the broker list for your Kafka cluster * - `groupId`: This is the groupId the actor should consume under. See Kafka docs for more details. * - `kafkaTopic`: The topic the actor should subscribe to * * The Kafka consumer works by polling for a certain number of milliseconds, and then returning if * no messages are retrieved. We abstract away that event loop behavior, but sometimes applications * need to tweak how long the consumer will sleep in order to optimize performance. To change that * you can also override the following: * * - `pollTime`: The amount of time, in milliseconds, the consumer should wait for recrods. Defaults to 500ms. * * If you need to tune more specific settings, you can provide a `consumerPropsCustomizer` that * will get to alter the `Properties` object before we pass it into the `KafkaConsumer` constructor. * This is what you'll need to implement if you want to provide custom settings for things like * authentication, encryption, etc. By default, we provide the bootstrap servers, the group ID, * we disable auto committing, and provide a key and value serializer implementation. * * Please be careful when overriding settings that were set by the `KafkaActor` itself. * * == Starting consumption == * * Once the actor is created, it'll behave like a normal `LiftActor` until its told to connect * up to Kafka and start consuming messages. To do that your code will need to transmit the * `[[StartConsumer]]` message to it like so: * * {{{ * actor ! StartConsumer * }}} * * You can also stop consuming anytime you like by transmitting `[[StopConsumer]]` or you can * force committing offsets by transmitting `[[CommitOffsets]]` to the actor if you need to do * so for some reason, though as mentioned below those cases should be rare. * * == Processing messages == * * When messages come from the topic, they will be parsed and extracted to case class objects * using lift-json. The messages will then be put in the actor's normal mailbox using `!` and be * subjet to normal actor processing rules. Every time the actor consumes messages it'll also * add a `[[CommitOffsets]]` message onto the end of the message batch. * * Because of the way the actor mailbox works, `CommitOffsets` won't be processed until all of * the messages in that batch have been processed. Thus, if you have a class of errors that may * cause you to want to avoid checkpointing offsets to Kafka, you sould throw an exception of * some sort in your `userMessageHandler` so things blow up. * * == Sending messages == * * `KafkaActor` works like a normal actor if you use the regular sending methods. However, you may * find it useful to send messages to this actor _through_ Kafka even when the actor is running * in the local process. To facilitate this, you can send messages through the included `ref` * like so: * * {{{ * myKafkaActor.ref ! MyMessage() * }}} * * This will cause the message to be routed through a Kafka producer and then consumed by the * actor using the normal consumption means. * */ abstract class KafkaActor extends LiftActor { def bootstrapServers: String def groupId: String def kafkaTopic: String def pollTime: Long = 500L /** * The ref for this actor that permits sending messages through the Kafka broker instead of * directly to the actor itself. Override this implementation if you need to include custom * producing behavior in the `KafkaActorRef`. */ lazy val ref = new KafkaActorRef(bootstrapServers, kafkaTopic) /** * Override this method in the implementing class to customize the consumer settings * to your liking. */ def consumerPropsCustomizer(props: Properties): Properties = props private[this] def consumerFn(): KafkaConsumer[Array[Byte], KafkaMessageEnvelope] = { val props = new Properties() props.put("bootstrap.servers", bootstrapServers) props.put("group.id", groupId) props.put("enable.auto.commit", "false") props.put("key.deserializer", "org.apache.kafka.common.serialization.ByteArrayDeserializer") props.put("value.deserializer", "net.liftmodules.kafkaactors.KafkaMessageEnvelopeDeserializer") val customizedProps = consumerPropsCustomizer(props) val consumer = new KafkaConsumer[Array[Byte], KafkaMessageEnvelope](customizedProps) consumer.subscribe(List(kafkaTopic).asJava) consumer } protected lazy val consumingThread: KafkaActorConsumingThread = new KafkaActorConsumingThread( groupId + "-consuming-thread", consumerFn _, this, pollTime ) final override def messageHandler = { case internalMessage: InternalKafkaActorMessage => internalMessage match { case StartConsumer => consumingThread.start() case StopConsumer => consumingThread.shutdown() case CommitOffsets(offsetInfo) => consumingThread.addPendingOffsets(offsetInfo) } case userMessage => userMessageHandler(userMessage) } def userMessageHandler: PartialFunction[Any, Any] }
liftmodules/kafka-actors
src/main/scala/net/liftmodules/kafkaactors/KafkaActor.scala
Scala
apache-2.0
6,593
package com.chinthaka.imagesimilarity.util import org.bytedeco.javacpp.opencv_core._ import org.bytedeco.javacpp.opencv_imgproc._ import org.bytedeco.javacpp.{FloatPointer, IntPointer, PointerPointer} /** * Helper class that simplifies usage of OpenCV `calcHist` function for color images. * * See OpenCV [[http://opencv.itseez.com/modules/imgproc/doc/histograms.html?highlight=histogram]] * documentation to learn backend details. */ class ColorHistogram(var numberOfBins: Int = 256) { private val _minRange = 0.0f private val _maxRange = 255.0f /** * Computes histogram of an image. * * @param image input image * @return OpenCV histogram object */ def getHistogram(image: Mat): Mat = { require(image != null) require(image.channels == 3, "Expecting 3 channel (color) image") // Compute histogram val hist = new Mat() // Since C++ `calcHist` is using arrays of arrays we need wrap to do some wrapping // in `IntPointer` and `PointerPointer` objects. val intPtrChannels = new IntPointer(0, 1, 2) val intPtrHistSize = new IntPointer(numberOfBins, numberOfBins, numberOfBins) val histRange = Array(_minRange, _maxRange) val ptrPtrHistRange: PointerPointer[FloatPointer] = new PointerPointer[FloatPointer](histRange, histRange, histRange) calcHist(image, 1, // histogram of 1 image only intPtrChannels, // the channel used new Mat(), // no mask is used hist, // the resulting histogram 3, // it is a 3D histogram intPtrHistSize, // number of bins ptrPtrHistRange, // pixel value range true, // uniform false) // no accumulation hist } /** * Convert input image from RGB ro HSV color space and compute histogram of the hue channel. * @param image RGB image * @param minSaturation minimum saturation of pixels that are used for histogram calculations. * Pixels with saturation larger than minimum will be used in histogram computation * @return histogram of the hue channel, its range is from 0 to 180. */ def getHueHistogram(image: Mat, minSaturation: Int = 0): Mat = { require(image != null) require(image.channels == 3, "Expecting 3 channel (color) image") // Convert RGB to HSV color space val hsvImage = new Mat() cvtColor(image, hsvImage, CV_BGR2HSV) val saturationMask = new Mat() if (minSaturation > 0) { // Split the 3 channels into 3 images val hsvChannels = new MatVector() split(hsvImage, hsvChannels) threshold(hsvChannels.get(1), saturationMask, minSaturation, 255, CV_THRESH_BINARY) } // Prepare arguments for a 1D hue histogram val hist = new Mat() // range is from 0 to 180 val histRanges = Array(0f, 180f) // the hue channel val channels = Array(0) // Compute histogram calcHist(hsvImage, 1, // histogram of 1 image only channels, // the channel used saturationMask, // binary mask hist, // the resulting histogram 1, // it is a 1D histogram Array(numberOfBins), // number of bins histRanges // pixel value range ) hist } /** * Computes the 2D ab histogram. BGR source image is converted to Lab */ def getabHistogram(image: Mat): Mat = { val hist = new Mat() // Convert to Lab color space val lab = new Mat() cvtColor(image, lab, CV_BGR2Lab) // Prepare arguments for a 2D color histogram val histRange = Array(0f, 255f) val ptrPtrHistRange: PointerPointer[FloatPointer] = new PointerPointer[FloatPointer](histRange, histRange, histRange) // the two channels used are ab val intPtrChannels = new IntPointer(1, 2) val intPtrHistSize = new IntPointer(numberOfBins, numberOfBins) // Compute histogram calcHist(lab, 1, // histogram of 1 image only intPtrChannels, // the channel used new Mat(), // no mask is used hist, // the resulting histogram 2, // it is a 2D histogram intPtrHistSize, // number of bins ptrPtrHistRange, // pixel value range true, // Uniform false // do not accumulate ) hist } }
echinthaka/ImageSimilarity
src/main/scala/com/chinthaka/imagesimilarity/util/ColorHistogram.scala
Scala
apache-2.0
4,358
package dispatch.spec import org.scalacheck._ import net.liftweb.json._ import JsonDSL._ import dispatch._ import org.asynchttpclient._ import org.mockito.Mockito._ object BasicSpecification extends Properties("Lift Json") { import Prop.forAll property("parse json") = forAll(Gen.alphaStr) { sample => val mockedResponse = mock(classOf[Response]) when(mockedResponse.getResponseBody).thenReturn(compactRender( ("out" -> sample) )) val result = as.lift.Json(mockedResponse) result == JObject(JField("out", JString(sample)) :: Nil) } }
dispatch/reboot
liftjson/src/test/scala/json.scala
Scala
lgpl-3.0
571
///* //active-learning-scala: Active Learning library for Scala //Copyright (c) 2014 Davi Pereira dos Santos // // This program is free software: you can redistribute it and/or modify // it under the terms of the GNU General Public License as published by // the Free Software Foundation, either version 3 of the License, or // (at your option) any later version. // // This program is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the // GNU General Public License for more details. // // You should have received a copy of the GNU General Public License // along with this program. If not, see <http://www.gnu.org/licenses/>. //*/ // //package exp.tex // //import app.ArgParser //import app.db.entities.Dataset //import exp.result.ResOld //import ml.classifiers.{VFDT, KNNBatch, NB} //import util.{StatTests, Stat, FriedmanNemenyiTest} // //import scala.collection.mutable // //object ALCAccAllClassif extends ALCAccTrait { // val desc = s"Version ${ArgParser.version} \\nPega ALCs da tabela 'res' e imprime tabela latex. Learner will be ignored!" // val learners = Seq(NB(), VFDT(), NB(), VFDT(), KNNBatch(5, "eucl", Seq(), weighted = true)) // // val learners = Seq(NB(), VFDT(), KNNBatch(5, "eucl", Seq(), "", weighted = true)) // // val learners = Seq(NB(), KNNBatch(5, "eucl", Seq(), "", weighted = true)) // // run() //}
active-learning/active-learning-scala
src/main/scala/exp/tex/ALCAccAllClassif.scala
Scala
gpl-2.0
1,484
package com.datawizards.dqm.repository import com.datawizards.dqm.alert.dto.SlackMessage import org.json4s.{DefaultFormats, Formats} import scalaj.http._ import org.json4s.jackson.Serialization class SlackWebHookRepositoryImpl(webHookUrl: String) extends SlackWebHookRepository { implicit val formats: Formats = DefaultFormats override def sendMessage(message: SlackMessage): Unit = { val request = Http(webHookUrl).postData(Serialization.write(message)) val response: HttpResponse[String] = request.asString if(response.code != 200) { throw new Exception(response.body) } } }
piotr-kalanski/data-quality-monitoring
src/main/scala/com/datawizards/dqm/repository/SlackWebHookRepositoryImpl.scala
Scala
apache-2.0
610
import stainless.lang._ object ObjectParamMutation1 { case class A(var y: Int) def update(a: A): Int = { a.y = 12 a.y } ensuring(res => res == 12) def f(): Int = { val a = A(10) update(a) a.y } ensuring(res => res == 12) }
epfl-lara/stainless
frontends/benchmarks/imperative/valid/ObjectParamMutation1.scala
Scala
apache-2.0
258
package com.datastax.spark.connector.util object SpanningIteratorBenchmark extends App { val iterator = Iterator.from(0) val groupsOf1 = new SpanningIterator(iterator, (i: Int) => i) val groupsOf10 = new SpanningIterator(iterator, (i: Int) => i / 10) val groupsOf1000 = new SpanningIterator(iterator, (i: Int) => i / 1000) println("1,000,000 groups of size 1 per each iteration:") BenchmarkUtil.timeIt { groupsOf10.drop(1000000) } println("100,000 groups of size 10 per each iteration:") BenchmarkUtil.timeIt { groupsOf10.drop(100000) } println("1000 groups of size 1000, per each iteration:") BenchmarkUtil.timeIt { groupsOf1000.drop(1000) } }
viirya/spark-cassandra-connector
spark-cassandra-connector/src/perf/scala/com/datastax/spark/connector/util/SpanningIteratorBenchmark.scala
Scala
apache-2.0
689
/* * Copyright 2010 Twitter, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may * not use this file except in compliance with the License. You may obtain * a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.twitter.ostrich package admin package config import scala.collection.Map import scala.util.matching.Regex import com.twitter.conversions.time._ import com.twitter.logging.Logger import com.twitter.util.{Config, Duration} import stats._ @deprecated("use StatsReporterFactory") abstract class StatsReporterConfig extends Config[(StatsCollection, AdminHttpService) => Service] @deprecated("use JsonStatsLoggerFactory") class JsonStatsLoggerConfig extends StatsReporterConfig { var loggerName: String = "stats" var period: Duration = 1.minute var serviceName: Option[String] = None var separator = "_" def apply() = { (collection: StatsCollection, admin: AdminHttpService) => new JsonStatsLogger(Logger.get(loggerName), period, serviceName, collection, separator) } } @deprecated("use W3CStatsLoggerFactory") class W3CStatsLoggerConfig extends StatsReporterConfig { var loggerName: String = "w3c" var period: Duration = 1.minute def apply() = { (collection: StatsCollection, admin: AdminHttpService) => new W3CStatsLogger(Logger.get(loggerName), period, collection) } } @deprecated("use TimeSeriesCollectorFactory") class TimeSeriesCollectorConfig extends StatsReporterConfig { def apply() = { (collection: StatsCollection, admin: AdminHttpService) => val service = new TimeSeriesCollector(collection) service.registerWith(admin) service } } @deprecated("use StatsFactory") class StatsConfig extends Config[AdminHttpService => StatsCollection] { var name: String = "" var reporters: List[StatsReporterConfig] = Nil def apply() = { (admin: AdminHttpService) => val collection = Stats.make(name) reporters.foreach { reporter => val process = reporter()(collection, admin) ServiceTracker.register(process) process.start() } collection } } @deprecated("use AdminServiceFactory") class AdminServiceConfig extends Config[RuntimeEnvironment => Option[AdminHttpService]] { /** * (optional) HTTP port. */ var httpPort: Option[Int] = None /** * Listen backlog for the HTTP port. */ var httpBacklog: Int = 20 /** * List of configurations for stats nodes. * This is where you would define alternate stats collectors, or attach a json or w3c logger. */ var statsNodes: List[StatsConfig] = Nil /** * The name of the stats collection to use. The default is "" which is the name for Stats. */ var statsCollectionName: Option[String] = None /** * A list of regex patterns to filter out of reported stats when the "filtered" option is given. * This is useful if you know a bunch of stats are being reported that aren't interesting to * graph right now. */ var statsFilters: List[Regex] = Nil /** * Extra handlers for the admin web interface. * Each key is a path prefix, and each value is the handler to invoke for that path. You can use * this to setup extra functionality for the admin web interface. */ var extraHandlers: Map[String, CustomHttpHandler] = Map() /** * Default LatchedStatsListener intervals */ var defaultLatchIntervals: List[Duration] = 1.minute :: Nil def configureStatsListeners(collection: StatsCollection) = { defaultLatchIntervals.map { StatsListener(_, collection, statsFilters) } } def apply() = { (runtime: RuntimeEnvironment) => configureStatsListeners(Stats) // allow the adminPort to be overridden on the command line: httpPort = runtime.arguments.get("adminPort").map { _.toInt }.orElse(httpPort) httpPort.map { port => val statsCollection = statsCollectionName.map { Stats.make(_) }.getOrElse(Stats) val admin = new AdminHttpService(port, httpBacklog, statsCollection, runtime) statsNodes.map { config => config()(admin) }.foreach(configureStatsListeners) admin.start() // handlers can't be added until the admin server is started. extraHandlers.foreach { case (path, handler) => admin.addContext(path, handler) } admin } } }
hydro2k/ostrich
src/main/scala/com/twitter/ostrich/admin/config/AdminServiceConfig.scala
Scala
apache-2.0
4,641
/* * This file is part of the ToolXiT project. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package toolxit.bibtex package renderer import scala.xml.Node /** * @author Lucas Satabin * */ //class XmlRenderer(db: BibTeXDatabase) extends BibTeXRenderer[Node](db) { // // def render = <toto/> // // def groupedBy(fieldName: String) = this // //}
gnieh/toolxit-bibtex
src/main/scala/toolxit/bibtex/renderer/XmlRenderer.scala
Scala
apache-2.0
848
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.mllib.util import scala.language.postfixOps import scala.util.Random import org.jblas.DoubleMatrix import org.apache.spark.annotation.DeveloperApi import org.apache.spark.SparkContext import org.apache.spark.rdd.RDD /** * :: DeveloperApi :: * Generate RDD(s) containing data for Matrix Factorization. * * This method samples training entries according to the oversampling factor * 'trainSampFact', which is a multiplicative factor of the number of * degrees of freedom of the matrix: rank*(m+n-rank). * * It optionally samples entries for a testing matrix using * 'testSampFact', the percentage of the number of training entries * to use for testing. * * This method takes the following inputs: * sparkMaster (String) The master URL. * outputPath (String) Directory to save output. * m (Int) Number of rows in data matrix. * n (Int) Number of columns in data matrix. * rank (Int) Underlying rank of data matrix. * trainSampFact (Double) Oversampling factor. * noise (Boolean) Whether to add gaussian noise to training data. * sigma (Double) Standard deviation of added gaussian noise. * test (Boolean) Whether to create testing RDD. * testSampFact (Double) Percentage of training data to use as test data. */ @DeveloperApi object MFDataGenerator { def main(args: Array[String]) { if (args.length < 2) { println("Usage: MFDataGenerator " + "<master> <outputDir> [m] [n] [rank] [trainSampFact] [noise] [sigma] [test] [testSampFact]") System.exit(1) } val sparkMaster: String = args(0) val outputPath: String = args(1) val m: Int = if (args.length > 2) args(2).toInt else 100 val n: Int = if (args.length > 3) args(3).toInt else 100 val rank: Int = if (args.length > 4) args(4).toInt else 10 val trainSampFact: Double = if (args.length > 5) args(5).toDouble else 1.0 val noise: Boolean = if (args.length > 6) args(6).toBoolean else false val sigma: Double = if (args.length > 7) args(7).toDouble else 0.1 val test: Boolean = if (args.length > 8) args(8).toBoolean else false val testSampFact: Double = if (args.length > 9) args(9).toDouble else 0.1 val sc = new SparkContext(sparkMaster, "MFDataGenerator") val A = DoubleMatrix.randn(m, rank) val B = DoubleMatrix.randn(rank, n) val z = 1 / scala.math.sqrt(scala.math.sqrt(rank)) A.mmuli(z) B.mmuli(z) val fullData = A.mmul(B) val df = rank * (m + n - rank) val sampSize = scala.math.min(scala.math.round(trainSampFact * df), scala.math.round(.99 * m * n)).toInt val rand = new Random() val mn = m * n val shuffled = rand.shuffle(1 to mn toList) val omega = shuffled.slice(0, sampSize) val ordered = omega.sortWith(_ < _).toArray val trainData: RDD[(Int, Int, Double)] = sc.parallelize(ordered) .map(x => (fullData.indexRows(x - 1), fullData.indexColumns(x - 1), fullData.get(x - 1))) // optionally add gaussian noise if (noise) { trainData.map(x => (x._1, x._2, x._3 + rand.nextGaussian * sigma)) } trainData.map(x => x._1 + "," + x._2 + "," + x._3).saveAsTextFile(outputPath) // optionally generate testing data if (test) { val testSampSize = scala.math .min(scala.math.round(sampSize * testSampFact),scala.math.round(mn - sampSize)).toInt val testOmega = shuffled.slice(sampSize, sampSize + testSampSize) val testOrdered = testOmega.sortWith(_ < _).toArray val testData: RDD[(Int, Int, Double)] = sc.parallelize(testOrdered) .map(x => (fullData.indexRows(x - 1), fullData.indexColumns(x - 1), fullData.get(x - 1))) testData.map(x => x._1 + "," + x._2 + "," + x._3).saveAsTextFile(outputPath) } sc.stop() } }
trueyao/spark-lever
mllib/src/main/scala/org/apache/spark/mllib/util/MFDataGenerator.scala
Scala
apache-2.0
4,644
package com.wavesplatform.transaction import com.wavesplatform.account.PublicKey trait Authorized { val sender: PublicKey }
wavesplatform/Waves
node/src/main/scala/com/wavesplatform/transaction/Authorized.scala
Scala
mit
127
package org.jetbrains.plugins.scala package util import com.intellij.notification._ import com.intellij.openapi.project.Project import org.jetbrains.annotations.{Nls, NonNls} import java.net.URL import javax.swing.event.HyperlinkEvent import scala.collection.mutable /** * User: Dmitry Naydanov * Date: 11/27/13 */ // TODO Why do we need a mutable builder when we have named / default arguments? object NotificationUtil { def builder(project: Project, @Nls message: String) = new NotificationBuilder(project, message) class NotificationBuilder protected[NotificationUtil] (project: Project, @Nls message: String) { private var group: String = "Scala" @Nls private var title: Option[String] = None private var notificationType: NotificationType = NotificationType.WARNING private var displayType: NotificationDisplayType = NotificationDisplayType.BALLOON // TODO Why it's present but not applied? private val actions: mutable.Buffer[NotificationAction] = mutable.Buffer() def setGroup(@NonNls group: String): this.type = {this.group = group; this} def setTitle(@Nls title: String): this.type = {this.title = Some(title); this} def removeTitle(): this.type = {this.title = None; this} def setNotificationType(notificationType: NotificationType): this.type = {this.notificationType = notificationType; this} // @deprecated TODO: yeah! but why? and replace with what? def setDisplayType(displayType: NotificationDisplayType): this.type = {this.displayType = displayType; this} def addAction(action: NotificationAction): this.type = {actions += action; this} def show(): Unit = { val notification = build() Notifications.Bus.notify(notification, project) } def build(): Notification = { val notification = title match { case Some(t) => new Notification(group, t, message, notificationType) case None => new Notification(group, message, notificationType) } actions.foreach(notification.addAction) notification } } def showMessage( project: Project, @Nls message: String, group: String = "scala", @Nls title: String = ScalaBundle.message("default.notification.title"), notificationType: NotificationType = NotificationType.WARNING, displayType: NotificationDisplayType = NotificationDisplayType.BALLOON, ): Unit = { val notificationBuilder = builder(project, message) .setGroup(group) .setTitle(title) .setNotificationType(notificationType) .setDisplayType(displayType) notificationBuilder.show() } protected[NotificationUtil] object Link { def unapply(event: HyperlinkEvent): Option[URL] = Option(event.getURL) map (_.getProtocol) collect { case "http" | "https" => event.getURL } } protected[NotificationUtil] object Action { def unapply(event: HyperlinkEvent): Option[String] = Option(event.getURL) map (_.getProtocol) collect { case "ftp" => event.getURL.getHost } } }
JetBrains/intellij-scala
scala/scala-impl/src/org/jetbrains/plugins/scala/util/NotificationUtil.scala
Scala
apache-2.0
3,007
/** * Copyright 2014-2015 Martin Cooper * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.github.martincooper.datatable.DataSetSpecs import com.github.martincooper.datatable.{ DataSet, DataTable } import org.scalatest.{ FlatSpec, Matchers } class DataSetInsertTableSpec extends FlatSpec with Matchers { "A DataSet" should "allow a table to be inserted by name" in { val tableOne = DataTable("TableOne").get val tableThree = DataTable("TableThree").get val dataSet = DataSet("TestDataSet", Seq(tableOne, tableThree)).get val newDataSet = dataSet.insert("TableThree", DataTable("TableTwo").get) newDataSet.isSuccess should be(true) newDataSet.get.tables.length should be(3) newDataSet.get.tables.map(_.name) should be(Seq("TableOne", "TableTwo", "TableThree")) } it should "disallow a table to be inserted by name when its name is not found" in { val tableOne = DataTable("TableOne").get val tableTwo = DataTable("TableTwo").get val dataSet = DataSet("TestDataSet", Seq(tableOne, tableTwo)).get val newDataSet = dataSet.insert("TableOneHundred", DataTable("TableTwo").get) newDataSet.isSuccess should be(false) newDataSet.failed.get.getMessage should be("Table TableOneHundred not found.") } it should "allow a table to be inserted by index" in { val tableOne = DataTable("TableOne").get val tableThree = DataTable("TableThree").get val dataSet = DataSet("TestDataSet", Seq(tableOne, tableThree)).get val newDataSet = dataSet.insert(1, DataTable("TableTwo").get) newDataSet.isSuccess should be(true) newDataSet.get.tables.length should be(3) newDataSet.get.tables.map(_.name) should be(Seq("TableOne", "TableTwo", "TableThree")) } it should "disallow a table to be inserted by index when it is not found" in { val tableOne = DataTable("TableOne").get val tableTwo = DataTable("TableTwo").get val dataSet = DataSet("TestDataSet", Seq(tableOne, tableTwo)).get val newDataSet = dataSet.insert(9, DataTable("TableThree").get) newDataSet.isSuccess should be(false) newDataSet.failed.get.getMessage should be("Item index out of bounds for insert.") } it should "allow a table to be inserted by reference" in { val tableOne = DataTable("TableOne").get val tableThree = DataTable("TableThree").get val dataSet = DataSet("TestDataSet", Seq(tableOne, tableThree)).get val newDataSet = dataSet.insert(tableThree, DataTable("TableTwo").get) newDataSet.isSuccess should be(true) newDataSet.get.tables.length should be(3) newDataSet.get.tables.map(_.name) should be(Seq("TableOne", "TableTwo", "TableThree")) } it should "disallow a table to be inserted by reference when it is not found" in { val tableOne = DataTable("TableOne").get val tableThree = DataTable("TableThree").get val dataSet = DataSet("TestDataSet", Seq(tableOne, tableThree)).get val unknownTable = DataTable("NewTable").get val newDataSet = dataSet.insert(unknownTable, DataTable("NewTable").get) newDataSet.isSuccess should be(false) newDataSet.failed.get.getMessage should be("Item index out of bounds for insert.") } }
martincooper/scala-datatable
src/test/scala/com/github/martincooper/datatable/DataSetSpecs/DataSetInsertTableSpec.scala
Scala
apache-2.0
3,713
package code.service import code.model.{Role, User, UserRoles} import net.liftweb.mapper.By object UserService { def nonAdmin(u: User): Boolean = { val admin = Role.find(By(Role.name, "admin")) val client = Role.find(By(Role.name, "client")) UserRoles.findAll(By(UserRoles.role, admin), By(UserRoles.user, u)).isEmpty && UserRoles.findAll(By(UserRoles.role, client), By(UserRoles.user, u)).nonEmpty } }
dodie/time-admin
src/main/scala/code/service/UserService.scala
Scala
apache-2.0
427
package org.jetbrains.plugins.scala.debugger.evaluateExpression import org.jetbrains.plugins.scala.debugger.{ScalaDebuggerTestCase, ScalaVersion_2_11, ScalaVersion_2_12} /** * User: Alefas * Date: 19.10.11 */ class ScalaExpressionsEvaluator extends ScalaExpressionsEvaluatorBase with ScalaVersion_2_11 class ScalaExpressionsEvaluator_212 extends ScalaExpressionsEvaluatorBase with ScalaVersion_2_12 abstract class ScalaExpressionsEvaluatorBase extends ScalaDebuggerTestCase { addFileWithBreakpoints("PrefixUnary.scala", s""" |object PrefixUnary { | class U { | def unary_!(): Boolean = false | } | def main(args: Array[String]) { | val u = new U | ""$bp | } |} """.stripMargin.trim() ) def testPrefixUnary() { runDebugger() { waitForBreakpoint() evalEquals("!u", "false") evalEquals("!true", "false") } } addFileWithBreakpoints("VariousExprs.scala", s""" |object VariousExprs { | def main(args: Array[String]) { | ""$bp | } |} """.stripMargin.trim() ) def testVariousExprs() { runDebugger() { waitForBreakpoint() evalEquals("(1, 2, 3)", "(1,2,3)") evalEquals("if (true) \\"text\\"", "undefined") evalEquals("if (true) \\"text\\" else \\"next\\"", "text") evalEquals("if (false) \\"text\\" else \\"next\\"", "next") evalEquals("\\"text\\" != null", "true") } } addFileWithBreakpoints("SmartBoxing.scala", s""" |object SmartBoxing { | def foo[T](x: T)(y: T) = x | def main(args: Array[String]) { | val tup = (1, 2) | ""$bp | } | def test(tup: (Int, Int)) = tup._1 | def test2(tup: Tuple2[Int, Int]) = tup._2 |} """.stripMargin.trim() ) def testSmartBoxing() { runDebugger() { waitForBreakpoint() evalEquals("test(tup)", "1") evalEquals("test((1, 2))", "1") evalEquals("test(Tuple2(1, 2))", "1") evalEquals("test2(tup)", "2") evalEquals("test2((1, 2))", "2") evalEquals("test2(Tuple2(1, 2))", "2") evalEquals("foo(1)(2)", "1") evalEquals("scala.collection.immutable.HashSet.empty + 1 + 2", "Set(1, 2)") } } addFileWithBreakpoints("Assignment.scala", s""" |object Assignment { | var m = 0 | def main(args: Array[String]) { | var z = 1 | val y = 0 | val x: Array[Array[Int]] = Array(Array(1, 2), Array(2, 3)) | ""$bp | } |} """.stripMargin.trim() ) def testAssignment() { runDebugger() { waitForBreakpoint() evalEquals("x(0)(0)", "1") evalEquals("x(0)(0) = 2", "2") evalEquals("x(0)(0)", "2") evalEquals("z", "1") evalEquals("z = 2", "2") evalEquals("z", "2") evalEquals("m", "0") evalEquals("m = 2", "undefined") evalEquals("m", "2") evalEquals("y = 1", "1") //local vals may be reassigned in debugger evalEquals("y", "1") } } addFileWithBreakpoints("This.scala", s""" |object This { | def main(args: Array[String]) { | class This { | val x = 1 | def foo() { | ""$bp | } | } | new This().foo() | } |} """.stripMargin.trim() ) def testThis() { runDebugger() { waitForBreakpoint() evalEquals("this.x", "1") } } addFileWithBreakpoints("PrefixedThis.scala", s""" |object PrefixedThis { | def main(args: Array[String]) { | class This { | val x = 1 | def foo() { | val runnable = new Runnable { | def run() { | val x = () => { | This.this.x //to have This.this in scope | ""$bp | } | x() | } | } | runnable.run() | } | } | new This().foo() | } |} """.stripMargin.trim() ) def testPrefixedThis() { runDebugger() { waitForBreakpoint() evalEquals("This.this.x", "1") } } addFileWithBreakpoints("Postfix.scala", s""" |object Postfix { | def main(args: Array[String]) { | object x {val x = 1} | x | ""$bp | } |} """.stripMargin.trim() ) def testPostfix() { runDebugger() { waitForBreakpoint() evalEquals("x x", "1") evalEquals("1 toString ()", "1") } } addFileWithBreakpoints("Backticks.scala", s""" |object Backticks { | def main(args: Array[String]) { | val `val` = 100 | ""$bp | } |} """.stripMargin.trim() ) def testBackticks() { runDebugger() { waitForBreakpoint() evalEquals("`val`", "100") } } addFileWithBreakpoints("Literal.scala", s""" |object Literal { | implicit def intToString(x: Int) = x.toString + x.toString | def main(args: Array[String]) { | val n = 1 | ""$bp | } |} """.stripMargin.trim() ) def testLiteral() { runDebugger() { waitForBreakpoint() evalEquals("\\"x\\".length", "1") evalEquals("s\\"n = $n\\"", "n = 1") evalEquals("1L", "1") evalEquals("'c'", "c") evalEquals("true", "true") evalEquals("null", "null") evalEquals("1", "1") evalEquals("1F", "1.0") evalEquals("Array(1F, 2.0F)", "[1.0,2.0]") evalEquals("123.charAt(3)", "1") evalEquals("\\"a\\".concat(123)", "a123123") evalEquals("'aaa", "'aaa") evalEquals("'aaa.name", "aaa") } } addFileWithBreakpoints("JavaLib.scala", s""" |object JavaLib { | def main(args: Array[String]) { | ""$bp | } |} """.stripMargin.trim() ) def testJavaLib() { runDebugger() { waitForBreakpoint() evalEquals("new StringBuilder(\\"test\\").append(23)", "test23") evalEquals("new Array[Int](2)", "[0,0]") } } addFileWithBreakpoints("InnerClass.scala", s""" |object InnerClass { | class Expr {} | def main(args: Array[String]) { | ""$bp | } |} """.stripMargin.trim() ) def testInnerClass() { runDebugger() { waitForBreakpoint() evalStartsWith("new Expr", "InnerClass$Expr") } } addFileWithBreakpoints("OverloadingClass.scala", s""" |object OverloadingClass { | class Expr(s: String) { | def this(t: Int) { | this("test") | } | } | def main(args: Array[String]) { | ""$bp | } |} """.stripMargin.trim() ) def testOverloadingClass() { runDebugger() { waitForBreakpoint() evalStartsWith("new Expr(\\"\\")", "OverloadingClass$Expr") evalStartsWith("new Expr(2)", "OverloadingClass$Expr") } } addFileWithBreakpoints("IsInstanceOf.scala", s""" |object IsInstanceOf { | class A | class B | def main(args: Array[String]) { | val x = new A | val y = new B | ""$bp | } |} """.stripMargin.trim() ) def testIsInstanceOf() { runDebugger() { waitForBreakpoint() evalEquals("x.isInstanceOf[A]", "true") evalEquals("x.isInstanceOf[B]", "false") evalEquals("y.isInstanceOf[B]", "true") evalEquals("y.isInstanceOf[String]", "false") evalEquals("\\"\\".isInstanceOf[String]", "true") } } addFileWithBreakpoints("SyntheticOperators.scala", s""" |object SyntheticOperators { | def fail: Boolean = throw new Exception("fail!") | def main(args: Array[String]) { | val tr = true | val fls = false | ""$bp | } |} """.stripMargin.trim() ) def testSyntheticOperators(): Unit = { runDebugger() { waitForBreakpoint() evalEquals("tr || fail", "true") evalEquals("fls && fail", "false") evalEquals("fls || tr", "true") evalEquals("tr && fls", "false") evalEquals("1 < 1", "false") evalEquals("1 <= 1", "true") evalEquals("1 + 2", "3") evalEquals("3 - 1.5.toInt", "2") evalEquals("false ^ true", "true") evalEquals("!false", "true") evalEquals("false | false", "false") evalEquals("1 / 2", "0") evalEquals("1 / 2.", "0.5") evalEquals("5 % 2", "1") evalEquals("1 << 2", "4") evalEquals("\\"1\\" + 1", "11") } } }
double-y/translation-idea-plugin
test/org/jetbrains/plugins/scala/debugger/evaluateExpression/ScalaExpressionsEvaluator.scala
Scala
apache-2.0
8,587
package com.twitter.scalding.reducer_estimation import org.apache.hadoop.mapred.JobConf import org.slf4j.LoggerFactory import scala.util.{ Failure, Success } object RatioBasedEstimator { /** * RatioBasedEstimator optionally ignores history items whose input size is * drastically different than the current job. This parameter specifies the * lower bound on allowable input size ratio. Defaults to 0.10 (10%), which * sets the upper bound to 10x. */ val inputRatioThresholdKey = "scalding.reducer.estimator.input.ratio.threshold" def getInputRatioThreshold(conf: JobConf) = conf.getFloat(inputRatioThresholdKey, 0.10f) } abstract class RatioBasedEstimator extends ReducerEstimator { def historyService: HistoryService private val LOG = LoggerFactory.getLogger(this.getClass) /** * Determines if this input and the previous input are close enough. * If they're drastically different, we have no business trying to * make an estimate based on the past job. * * @param threshold Specify lower bound on ratio (e.g. 0.10 for 10%) */ private def acceptableInputRatio(current: Long, past: Long, threshold: Double): Boolean = { val ratio = current / past.toDouble if (threshold > 0 && (ratio < threshold || ratio > 1 / threshold)) { LOG.warn("Input sizes differ too much to use for estimation: " + "current: " + current + ", past: " + past) false } else true } /** * Compute the average ratio of mapper bytes to reducer bytes and use that to * scale the estimate produced by InputSizeReducerEstimator. */ override def estimateReducers(info: FlowStrategyInfo): Option[Int] = { val conf = info.step.getConfig val maxHistory = EstimatorConfig.getMaxHistory(conf) val threshold = RatioBasedEstimator.getInputRatioThreshold(conf) historyService.fetchHistory(info, maxHistory) match { case Success(h) if h.isEmpty => LOG.warn("No matching history found.") None case Success(history) => val inputBytes = Common.totalInputSize(info.step) if (inputBytes == 0) { LOG.warn("No input detected.") None } else { val ratios = for { h <- history if acceptableInputRatio(inputBytes, h.hdfsBytesRead, threshold) } yield h.reduceFileBytesRead / h.hdfsBytesRead.toDouble if (ratios.isEmpty) { LOG.warn(s"No matching history found within input ratio threshold: $threshold") None } else { val reducerRatio = ratios.sum / ratios.length LOG.info("Getting base estimate from InputSizeReducerEstimator") val inputSizeBasedEstimate = new InputSizeReducerEstimator().estimateReducers(info) inputSizeBasedEstimate.map { baseEstimate => // scale reducer estimate based on the historical input ratio val e = (baseEstimate * reducerRatio).ceil.toInt max 1 LOG.info("\\nRatioBasedEstimator" + "\\n - past reducer ratio: " + reducerRatio + "\\n - reducer estimate: " + e) e } } } case Failure(e) => LOG.warn("Unable to fetch history. Disabling RatioBasedEstimator.", e) None } } }
chrismoulton/scalding
scalding-core/src/main/scala/com/twitter/scalding/reducer_estimation/RatioBasedEstimator.scala
Scala
apache-2.0
3,317
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.spark.sql.catalyst.util import java.sql.{Date, Timestamp} import java.text.SimpleDateFormat import java.util.{Calendar, Locale, TimeZone} import org.apache.spark.SparkFunSuite import org.apache.spark.sql.catalyst.util.DateTimeUtils._ import org.apache.spark.unsafe.types.UTF8String class DateTimeUtilsSuite extends SparkFunSuite { val TimeZonePST = TimeZone.getTimeZone("PST") private[this] def getInUTCDays(timestamp: Long): Int = { val tz = TimeZone.getDefault ((timestamp + tz.getOffset(timestamp)) / MILLIS_PER_DAY).toInt } test("nanoseconds truncation") { def checkStringToTimestamp(originalTime: String, expectedParsedTime: String) { val parsedTimestampOp = DateTimeUtils.stringToTimestamp(UTF8String.fromString(originalTime)) assert(parsedTimestampOp.isDefined, "timestamp with nanoseconds was not parsed correctly") assert(DateTimeUtils.timestampToString(parsedTimestampOp.get) === expectedParsedTime) } checkStringToTimestamp("2015-01-02 00:00:00.123456789", "2015-01-02 00:00:00.123456") checkStringToTimestamp("2015-01-02 00:00:00.100000009", "2015-01-02 00:00:00.1") checkStringToTimestamp("2015-01-02 00:00:00.000050000", "2015-01-02 00:00:00.00005") checkStringToTimestamp("2015-01-02 00:00:00.12005", "2015-01-02 00:00:00.12005") checkStringToTimestamp("2015-01-02 00:00:00.100", "2015-01-02 00:00:00.1") checkStringToTimestamp("2015-01-02 00:00:00.000456789", "2015-01-02 00:00:00.000456") checkStringToTimestamp("1950-01-02 00:00:00.000456789", "1950-01-02 00:00:00.000456") } test("timestamp and us") { val now = new Timestamp(System.currentTimeMillis()) now.setNanos(1000) val ns = fromJavaTimestamp(now) assert(ns % 1000000L === 1) assert(toJavaTimestamp(ns) === now) List(-111111111111L, -1L, 0, 1L, 111111111111L).foreach { t => val ts = toJavaTimestamp(t) assert(fromJavaTimestamp(ts) === t) assert(toJavaTimestamp(fromJavaTimestamp(ts)) === ts) } } test("us and julian day") { val (d, ns) = toJulianDay(0) assert(d === JULIAN_DAY_OF_EPOCH) assert(ns === 0) assert(fromJulianDay(d, ns) == 0L) Seq(Timestamp.valueOf("2015-06-11 10:10:10.100"), Timestamp.valueOf("2015-06-11 20:10:10.100"), Timestamp.valueOf("1900-06-11 20:10:10.100")).foreach { t => val (d, ns) = toJulianDay(fromJavaTimestamp(t)) assert(ns > 0) val t1 = toJavaTimestamp(fromJulianDay(d, ns)) assert(t.equals(t1)) } } test("SPARK-6785: java date conversion before and after epoch") { def checkFromToJavaDate(d1: Date): Unit = { val d2 = toJavaDate(fromJavaDate(d1)) assert(d2.toString === d1.toString) } val df1 = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss", Locale.US) val df2 = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss z", Locale.US) checkFromToJavaDate(new Date(100)) checkFromToJavaDate(Date.valueOf("1970-01-01")) checkFromToJavaDate(new Date(df1.parse("1970-01-01 00:00:00").getTime)) checkFromToJavaDate(new Date(df2.parse("1970-01-01 00:00:00 UTC").getTime)) checkFromToJavaDate(new Date(df1.parse("1970-01-01 00:00:01").getTime)) checkFromToJavaDate(new Date(df2.parse("1970-01-01 00:00:01 UTC").getTime)) checkFromToJavaDate(new Date(df1.parse("1969-12-31 23:59:59").getTime)) checkFromToJavaDate(new Date(df2.parse("1969-12-31 23:59:59 UTC").getTime)) checkFromToJavaDate(Date.valueOf("1969-01-01")) checkFromToJavaDate(new Date(df1.parse("1969-01-01 00:00:00").getTime)) checkFromToJavaDate(new Date(df2.parse("1969-01-01 00:00:00 UTC").getTime)) checkFromToJavaDate(new Date(df1.parse("1969-01-01 00:00:01").getTime)) checkFromToJavaDate(new Date(df2.parse("1969-01-01 00:00:01 UTC").getTime)) checkFromToJavaDate(new Date(df1.parse("1989-11-09 11:59:59").getTime)) checkFromToJavaDate(new Date(df2.parse("1989-11-09 19:59:59 UTC").getTime)) checkFromToJavaDate(new Date(df1.parse("1776-07-04 10:30:00").getTime)) checkFromToJavaDate(new Date(df2.parse("1776-07-04 18:30:00 UTC").getTime)) } test("string to date") { var c = Calendar.getInstance() c.set(2015, 0, 28, 0, 0, 0) c.set(Calendar.MILLISECOND, 0) assert(stringToDate(UTF8String.fromString("2015-01-28")).get === millisToDays(c.getTimeInMillis)) c.set(2015, 0, 1, 0, 0, 0) c.set(Calendar.MILLISECOND, 0) assert(stringToDate(UTF8String.fromString("2015")).get === millisToDays(c.getTimeInMillis)) c.set(1, 0, 1, 0, 0, 0) c.set(Calendar.MILLISECOND, 0) assert(stringToDate(UTF8String.fromString("0001")).get === millisToDays(c.getTimeInMillis)) c = Calendar.getInstance() c.set(2015, 2, 1, 0, 0, 0) c.set(Calendar.MILLISECOND, 0) assert(stringToDate(UTF8String.fromString("2015-03")).get === millisToDays(c.getTimeInMillis)) c = Calendar.getInstance() c.set(2015, 2, 18, 0, 0, 0) c.set(Calendar.MILLISECOND, 0) Seq("2015-03-18", "2015-03-18 ", " 2015-03-18", " 2015-03-18 ", "2015-03-18 123142", "2015-03-18T123123", "2015-03-18T").foreach { s => assert(stringToDate(UTF8String.fromString(s)).get === millisToDays(c.getTimeInMillis)) } assert(stringToDate(UTF8String.fromString("2015-03-18X")).isEmpty) assert(stringToDate(UTF8String.fromString("2015/03/18")).isEmpty) assert(stringToDate(UTF8String.fromString("2015.03.18")).isEmpty) assert(stringToDate(UTF8String.fromString("20150318")).isEmpty) assert(stringToDate(UTF8String.fromString("2015-031-8")).isEmpty) assert(stringToDate(UTF8String.fromString("02015-03-18")).isEmpty) assert(stringToDate(UTF8String.fromString("015-03-18")).isEmpty) assert(stringToDate(UTF8String.fromString("015")).isEmpty) assert(stringToDate(UTF8String.fromString("02015")).isEmpty) } test("string to time") { // Tests with UTC. val c = Calendar.getInstance(TimeZone.getTimeZone("UTC")) c.set(Calendar.MILLISECOND, 0) c.set(1900, 0, 1, 0, 0, 0) assert(stringToTime("1900-01-01T00:00:00GMT-00:00") === c.getTime()) c.set(2000, 11, 30, 10, 0, 0) assert(stringToTime("2000-12-30T10:00:00Z") === c.getTime()) // Tests with set time zone. c.setTimeZone(TimeZone.getTimeZone("GMT-04:00")) c.set(Calendar.MILLISECOND, 0) c.set(1900, 0, 1, 0, 0, 0) assert(stringToTime("1900-01-01T00:00:00-04:00") === c.getTime()) c.set(1900, 0, 1, 0, 0, 0) assert(stringToTime("1900-01-01T00:00:00GMT-04:00") === c.getTime()) // Tests with local time zone. c.setTimeZone(TimeZone.getDefault()) c.set(Calendar.MILLISECOND, 0) c.set(2000, 11, 30, 0, 0, 0) assert(stringToTime("2000-12-30") === new Date(c.getTimeInMillis())) c.set(2000, 11, 30, 10, 0, 0) assert(stringToTime("2000-12-30 10:00:00") === new Timestamp(c.getTimeInMillis())) } test("string to timestamp") { for (tz <- DateTimeTestUtils.ALL_TIMEZONES) { def checkStringToTimestamp(str: String, expected: Option[Long]): Unit = { assert(stringToTimestamp(UTF8String.fromString(str), tz) === expected) } var c = Calendar.getInstance(tz) c.set(1969, 11, 31, 16, 0, 0) c.set(Calendar.MILLISECOND, 0) checkStringToTimestamp("1969-12-31 16:00:00", Option(c.getTimeInMillis * 1000)) c.set(1, 0, 1, 0, 0, 0) c.set(Calendar.MILLISECOND, 0) checkStringToTimestamp("0001", Option(c.getTimeInMillis * 1000)) c = Calendar.getInstance(tz) c.set(2015, 2, 1, 0, 0, 0) c.set(Calendar.MILLISECOND, 0) checkStringToTimestamp("2015-03", Option(c.getTimeInMillis * 1000)) c = Calendar.getInstance(tz) c.set(2015, 2, 18, 0, 0, 0) c.set(Calendar.MILLISECOND, 0) Seq("2015-03-18", "2015-03-18 ", " 2015-03-18", " 2015-03-18 ", "2015-03-18T").foreach { s => checkStringToTimestamp(s, Option(c.getTimeInMillis * 1000)) } c = Calendar.getInstance(tz) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 0) checkStringToTimestamp("2015-03-18 12:03:17", Option(c.getTimeInMillis * 1000)) checkStringToTimestamp("2015-03-18T12:03:17", Option(c.getTimeInMillis * 1000)) // If the string value includes timezone string, it represents the timestamp string // in the timezone regardless of the tz parameter. c = Calendar.getInstance(TimeZone.getTimeZone("GMT-13:53")) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 0) checkStringToTimestamp("2015-03-18T12:03:17-13:53", Option(c.getTimeInMillis * 1000)) c = Calendar.getInstance(TimeZone.getTimeZone("UTC")) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 0) checkStringToTimestamp("2015-03-18T12:03:17Z", Option(c.getTimeInMillis * 1000)) checkStringToTimestamp("2015-03-18 12:03:17Z", Option(c.getTimeInMillis * 1000)) c = Calendar.getInstance(TimeZone.getTimeZone("GMT-01:00")) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 0) checkStringToTimestamp("2015-03-18T12:03:17-1:0", Option(c.getTimeInMillis * 1000)) checkStringToTimestamp("2015-03-18T12:03:17-01:00", Option(c.getTimeInMillis * 1000)) c = Calendar.getInstance(TimeZone.getTimeZone("GMT+07:30")) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 0) checkStringToTimestamp("2015-03-18T12:03:17+07:30", Option(c.getTimeInMillis * 1000)) c = Calendar.getInstance(TimeZone.getTimeZone("GMT+07:03")) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 0) checkStringToTimestamp("2015-03-18T12:03:17+07:03", Option(c.getTimeInMillis * 1000)) // tests for the string including milliseconds. c = Calendar.getInstance(tz) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 123) checkStringToTimestamp("2015-03-18 12:03:17.123", Option(c.getTimeInMillis * 1000)) checkStringToTimestamp("2015-03-18T12:03:17.123", Option(c.getTimeInMillis * 1000)) // If the string value includes timezone string, it represents the timestamp string // in the timezone regardless of the tz parameter. c = Calendar.getInstance(TimeZone.getTimeZone("UTC")) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 456) checkStringToTimestamp("2015-03-18T12:03:17.456Z", Option(c.getTimeInMillis * 1000)) checkStringToTimestamp("2015-03-18 12:03:17.456Z", Option(c.getTimeInMillis * 1000)) c = Calendar.getInstance(TimeZone.getTimeZone("GMT-01:00")) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 123) checkStringToTimestamp("2015-03-18T12:03:17.123-1:0", Option(c.getTimeInMillis * 1000)) checkStringToTimestamp("2015-03-18T12:03:17.123-01:00", Option(c.getTimeInMillis * 1000)) c = Calendar.getInstance(TimeZone.getTimeZone("GMT+07:30")) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 123) checkStringToTimestamp("2015-03-18T12:03:17.123+07:30", Option(c.getTimeInMillis * 1000)) c = Calendar.getInstance(TimeZone.getTimeZone("GMT+07:30")) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 123) checkStringToTimestamp("2015-03-18T12:03:17.123+07:30", Option(c.getTimeInMillis * 1000)) c = Calendar.getInstance(TimeZone.getTimeZone("GMT+07:30")) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 123) checkStringToTimestamp( "2015-03-18T12:03:17.123121+7:30", Option(c.getTimeInMillis * 1000 + 121)) c = Calendar.getInstance(TimeZone.getTimeZone("GMT+07:30")) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 123) checkStringToTimestamp( "2015-03-18T12:03:17.12312+7:30", Option(c.getTimeInMillis * 1000 + 120)) c = Calendar.getInstance(tz) c.set(Calendar.HOUR_OF_DAY, 18) c.set(Calendar.MINUTE, 12) c.set(Calendar.SECOND, 15) c.set(Calendar.MILLISECOND, 0) checkStringToTimestamp("18:12:15", Option(c.getTimeInMillis * 1000)) c = Calendar.getInstance(TimeZone.getTimeZone("GMT+07:30")) c.set(Calendar.HOUR_OF_DAY, 18) c.set(Calendar.MINUTE, 12) c.set(Calendar.SECOND, 15) c.set(Calendar.MILLISECOND, 123) checkStringToTimestamp("T18:12:15.12312+7:30", Option(c.getTimeInMillis * 1000 + 120)) c = Calendar.getInstance(TimeZone.getTimeZone("GMT+07:30")) c.set(Calendar.HOUR_OF_DAY, 18) c.set(Calendar.MINUTE, 12) c.set(Calendar.SECOND, 15) c.set(Calendar.MILLISECOND, 123) checkStringToTimestamp("18:12:15.12312+7:30", Option(c.getTimeInMillis * 1000 + 120)) c = Calendar.getInstance(tz) c.set(2011, 4, 6, 7, 8, 9) c.set(Calendar.MILLISECOND, 100) checkStringToTimestamp("2011-05-06 07:08:09.1000", Option(c.getTimeInMillis * 1000)) checkStringToTimestamp("238", None) checkStringToTimestamp("00238", None) checkStringToTimestamp("2015-03-18 123142", None) checkStringToTimestamp("2015-03-18T123123", None) checkStringToTimestamp("2015-03-18X", None) checkStringToTimestamp("2015/03/18", None) checkStringToTimestamp("2015.03.18", None) checkStringToTimestamp("20150318", None) checkStringToTimestamp("2015-031-8", None) checkStringToTimestamp("02015-01-18", None) checkStringToTimestamp("015-01-18", None) checkStringToTimestamp("2015-03-18T12:03.17-20:0", None) checkStringToTimestamp("2015-03-18T12:03.17-0:70", None) checkStringToTimestamp("2015-03-18T12:03.17-1:0:0", None) // Truncating the fractional seconds c = Calendar.getInstance(TimeZone.getTimeZone("GMT+00:00")) c.set(2015, 2, 18, 12, 3, 17) c.set(Calendar.MILLISECOND, 0) checkStringToTimestamp( "2015-03-18T12:03:17.123456789+0:00", Option(c.getTimeInMillis * 1000 + 123456)) } } test("SPARK-15379: special invalid date string") { // Test stringToDate assert(stringToDate( UTF8String.fromString("2015-02-29 00:00:00")).isEmpty) assert(stringToDate( UTF8String.fromString("2015-04-31 00:00:00")).isEmpty) assert(stringToDate(UTF8String.fromString("2015-02-29")).isEmpty) assert(stringToDate(UTF8String.fromString("2015-04-31")).isEmpty) // Test stringToTimestamp assert(stringToTimestamp( UTF8String.fromString("2015-02-29 00:00:00")).isEmpty) assert(stringToTimestamp( UTF8String.fromString("2015-04-31 00:00:00")).isEmpty) assert(stringToTimestamp(UTF8String.fromString("2015-02-29")).isEmpty) assert(stringToTimestamp(UTF8String.fromString("2015-04-31")).isEmpty) } test("hours") { val c = Calendar.getInstance(TimeZonePST) c.set(2015, 2, 18, 13, 2, 11) assert(getHours(c.getTimeInMillis * 1000, TimeZonePST) === 13) assert(getHours(c.getTimeInMillis * 1000, TimeZoneGMT) === 20) c.set(2015, 12, 8, 2, 7, 9) assert(getHours(c.getTimeInMillis * 1000, TimeZonePST) === 2) assert(getHours(c.getTimeInMillis * 1000, TimeZoneGMT) === 10) } test("minutes") { val c = Calendar.getInstance(TimeZonePST) c.set(2015, 2, 18, 13, 2, 11) assert(getMinutes(c.getTimeInMillis * 1000, TimeZonePST) === 2) assert(getMinutes(c.getTimeInMillis * 1000, TimeZoneGMT) === 2) assert(getMinutes(c.getTimeInMillis * 1000, TimeZone.getTimeZone("Australia/North")) === 32) c.set(2015, 2, 8, 2, 7, 9) assert(getMinutes(c.getTimeInMillis * 1000, TimeZonePST) === 7) assert(getMinutes(c.getTimeInMillis * 1000, TimeZoneGMT) === 7) assert(getMinutes(c.getTimeInMillis * 1000, TimeZone.getTimeZone("Australia/North")) === 37) } test("seconds") { val c = Calendar.getInstance(TimeZonePST) c.set(2015, 2, 18, 13, 2, 11) assert(getSeconds(c.getTimeInMillis * 1000, TimeZonePST) === 11) assert(getSeconds(c.getTimeInMillis * 1000, TimeZoneGMT) === 11) c.set(2015, 2, 8, 2, 7, 9) assert(getSeconds(c.getTimeInMillis * 1000, TimeZonePST) === 9) assert(getSeconds(c.getTimeInMillis * 1000, TimeZoneGMT) === 9) } test("hours / minutes / seconds") { Seq(Timestamp.valueOf("2015-06-11 10:12:35.789"), Timestamp.valueOf("2015-06-11 20:13:40.789"), Timestamp.valueOf("1900-06-11 12:14:50.789"), Timestamp.valueOf("1700-02-28 12:14:50.123456")).foreach { t => val us = fromJavaTimestamp(t) assert(toJavaTimestamp(us) === t) } } test("get day in year") { val c = Calendar.getInstance() c.set(2015, 2, 18, 0, 0, 0) assert(getDayInYear(getInUTCDays(c.getTimeInMillis)) === 77) c.set(2012, 2, 18, 0, 0, 0) assert(getDayInYear(getInUTCDays(c.getTimeInMillis)) === 78) } test("get year") { val c = Calendar.getInstance() c.set(2015, 2, 18, 0, 0, 0) assert(getYear(getInUTCDays(c.getTimeInMillis)) === 2015) c.set(2012, 2, 18, 0, 0, 0) assert(getYear(getInUTCDays(c.getTimeInMillis)) === 2012) } test("get quarter") { val c = Calendar.getInstance() c.set(2015, 2, 18, 0, 0, 0) assert(getQuarter(getInUTCDays(c.getTimeInMillis)) === 1) c.set(2012, 11, 18, 0, 0, 0) assert(getQuarter(getInUTCDays(c.getTimeInMillis)) === 4) } test("get month") { val c = Calendar.getInstance() c.set(2015, 2, 18, 0, 0, 0) assert(getMonth(getInUTCDays(c.getTimeInMillis)) === 3) c.set(2012, 11, 18, 0, 0, 0) assert(getMonth(getInUTCDays(c.getTimeInMillis)) === 12) } test("get day of month") { val c = Calendar.getInstance() c.set(2015, 2, 18, 0, 0, 0) assert(getDayOfMonth(getInUTCDays(c.getTimeInMillis)) === 18) c.set(2012, 11, 24, 0, 0, 0) assert(getDayOfMonth(getInUTCDays(c.getTimeInMillis)) === 24) } test("date add months") { val c1 = Calendar.getInstance() c1.set(1997, 1, 28, 10, 30, 0) val days1 = millisToDays(c1.getTimeInMillis) val c2 = Calendar.getInstance() c2.set(2000, 1, 29) assert(dateAddMonths(days1, 36) === millisToDays(c2.getTimeInMillis)) c2.set(1996, 0, 31) assert(dateAddMonths(days1, -13) === millisToDays(c2.getTimeInMillis)) } test("timestamp add months") { val c1 = Calendar.getInstance() c1.set(1997, 1, 28, 10, 30, 0) c1.set(Calendar.MILLISECOND, 0) val ts1 = c1.getTimeInMillis * 1000L val c2 = Calendar.getInstance() c2.set(2000, 1, 29, 10, 30, 0) c2.set(Calendar.MILLISECOND, 123) val ts2 = c2.getTimeInMillis * 1000L assert(timestampAddInterval(ts1, 36, 123000) === ts2) val c3 = Calendar.getInstance(TimeZonePST) c3.set(1997, 1, 27, 16, 0, 0) c3.set(Calendar.MILLISECOND, 0) val ts3 = c3.getTimeInMillis * 1000L val c4 = Calendar.getInstance(TimeZonePST) c4.set(2000, 1, 27, 16, 0, 0) c4.set(Calendar.MILLISECOND, 123) val ts4 = c4.getTimeInMillis * 1000L val c5 = Calendar.getInstance(TimeZoneGMT) c5.set(2000, 1, 29, 0, 0, 0) c5.set(Calendar.MILLISECOND, 123) val ts5 = c5.getTimeInMillis * 1000L assert(timestampAddInterval(ts3, 36, 123000, TimeZonePST) === ts4) assert(timestampAddInterval(ts3, 36, 123000, TimeZoneGMT) === ts5) } test("monthsBetween") { val c1 = Calendar.getInstance() c1.set(1997, 1, 28, 10, 30, 0) val c2 = Calendar.getInstance() c2.set(1996, 9, 30, 0, 0, 0) assert(monthsBetween( c1.getTimeInMillis * 1000L, c2.getTimeInMillis * 1000L, true, c1.getTimeZone) === 3.94959677) assert(monthsBetween( c1.getTimeInMillis * 1000L, c2.getTimeInMillis * 1000L, false, c1.getTimeZone) === 3.9495967741935485) Seq(true, false).foreach { roundOff => c2.set(2000, 1, 28, 0, 0, 0) assert(monthsBetween( c1.getTimeInMillis * 1000L, c2.getTimeInMillis * 1000L, roundOff, c1.getTimeZone) === -36) c2.set(2000, 1, 29, 0, 0, 0) assert(monthsBetween( c1.getTimeInMillis * 1000L, c2.getTimeInMillis * 1000L, roundOff, c1.getTimeZone) === -36) c2.set(1996, 2, 31, 0, 0, 0) assert(monthsBetween( c1.getTimeInMillis * 1000L, c2.getTimeInMillis * 1000L, roundOff, c1.getTimeZone) === 11) } val c3 = Calendar.getInstance(TimeZonePST) c3.set(2000, 1, 28, 16, 0, 0) val c4 = Calendar.getInstance(TimeZonePST) c4.set(1997, 1, 28, 16, 0, 0) assert( monthsBetween(c3.getTimeInMillis * 1000L, c4.getTimeInMillis * 1000L, true, TimeZonePST) === 36.0) assert( monthsBetween(c3.getTimeInMillis * 1000L, c4.getTimeInMillis * 1000L, true, TimeZoneGMT) === 35.90322581) assert( monthsBetween(c3.getTimeInMillis * 1000L, c4.getTimeInMillis * 1000L, false, TimeZoneGMT) === 35.903225806451616) } test("from UTC timestamp") { def test(utc: String, tz: String, expected: String): Unit = { assert(toJavaTimestamp(fromUTCTime(fromJavaTimestamp(Timestamp.valueOf(utc)), tz)).toString === expected) } for (tz <- DateTimeTestUtils.ALL_TIMEZONES) { DateTimeTestUtils.withDefaultTimeZone(tz) { test("2011-12-25 09:00:00.123456", "UTC", "2011-12-25 09:00:00.123456") test("2011-12-25 09:00:00.123456", "JST", "2011-12-25 18:00:00.123456") test("2011-12-25 09:00:00.123456", "PST", "2011-12-25 01:00:00.123456") test("2011-12-25 09:00:00.123456", "Asia/Shanghai", "2011-12-25 17:00:00.123456") } } DateTimeTestUtils.withDefaultTimeZone(TimeZone.getTimeZone("PST")) { // Daylight Saving Time test("2016-03-13 09:59:59.0", "PST", "2016-03-13 01:59:59.0") test("2016-03-13 10:00:00.0", "PST", "2016-03-13 03:00:00.0") test("2016-11-06 08:59:59.0", "PST", "2016-11-06 01:59:59.0") test("2016-11-06 09:00:00.0", "PST", "2016-11-06 01:00:00.0") test("2016-11-06 10:00:00.0", "PST", "2016-11-06 02:00:00.0") } } test("to UTC timestamp") { def test(utc: String, tz: String, expected: String): Unit = { assert(toJavaTimestamp(toUTCTime(fromJavaTimestamp(Timestamp.valueOf(utc)), tz)).toString === expected) } for (tz <- DateTimeTestUtils.ALL_TIMEZONES) { DateTimeTestUtils.withDefaultTimeZone(tz) { test("2011-12-25 09:00:00.123456", "UTC", "2011-12-25 09:00:00.123456") test("2011-12-25 18:00:00.123456", "JST", "2011-12-25 09:00:00.123456") test("2011-12-25 01:00:00.123456", "PST", "2011-12-25 09:00:00.123456") test("2011-12-25 17:00:00.123456", "Asia/Shanghai", "2011-12-25 09:00:00.123456") } } DateTimeTestUtils.withDefaultTimeZone(TimeZone.getTimeZone("PST")) { // Daylight Saving Time test("2016-03-13 01:59:59", "PST", "2016-03-13 09:59:59.0") // 2016-03-13 02:00:00 PST does not exists test("2016-03-13 02:00:00", "PST", "2016-03-13 10:00:00.0") test("2016-03-13 03:00:00", "PST", "2016-03-13 10:00:00.0") test("2016-11-06 00:59:59", "PST", "2016-11-06 07:59:59.0") // 2016-11-06 01:00:00 PST could be 2016-11-06 08:00:00 UTC or 2016-11-06 09:00:00 UTC test("2016-11-06 01:00:00", "PST", "2016-11-06 09:00:00.0") test("2016-11-06 01:59:59", "PST", "2016-11-06 09:59:59.0") test("2016-11-06 02:00:00", "PST", "2016-11-06 10:00:00.0") } } test("truncTimestamp") { def testTrunc( level: Int, expected: String, inputTS: SQLTimestamp, timezone: TimeZone = DateTimeUtils.defaultTimeZone()): Unit = { val truncated = DateTimeUtils.truncTimestamp(inputTS, level, timezone) val expectedTS = DateTimeUtils.stringToTimestamp(UTF8String.fromString(expected)) assert(truncated === expectedTS.get) } val defaultInputTS = DateTimeUtils.stringToTimestamp(UTF8String.fromString("2015-03-05T09:32:05.359")) val defaultInputTS1 = DateTimeUtils.stringToTimestamp(UTF8String.fromString("2015-03-31T20:32:05.359")) val defaultInputTS2 = DateTimeUtils.stringToTimestamp(UTF8String.fromString("2015-04-01T02:32:05.359")) val defaultInputTS3 = DateTimeUtils.stringToTimestamp(UTF8String.fromString("2015-03-30T02:32:05.359")) val defaultInputTS4 = DateTimeUtils.stringToTimestamp(UTF8String.fromString("2015-03-29T02:32:05.359")) testTrunc(DateTimeUtils.TRUNC_TO_YEAR, "2015-01-01T00:00:00", defaultInputTS.get) testTrunc(DateTimeUtils.TRUNC_TO_MONTH, "2015-03-01T00:00:00", defaultInputTS.get) testTrunc(DateTimeUtils.TRUNC_TO_DAY, "2015-03-05T00:00:00", defaultInputTS.get) testTrunc(DateTimeUtils.TRUNC_TO_HOUR, "2015-03-05T09:00:00", defaultInputTS.get) testTrunc(DateTimeUtils.TRUNC_TO_MINUTE, "2015-03-05T09:32:00", defaultInputTS.get) testTrunc(DateTimeUtils.TRUNC_TO_SECOND, "2015-03-05T09:32:05", defaultInputTS.get) testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-02T00:00:00", defaultInputTS.get) testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-30T00:00:00", defaultInputTS1.get) testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-30T00:00:00", defaultInputTS2.get) testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-30T00:00:00", defaultInputTS3.get) testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-23T00:00:00", defaultInputTS4.get) testTrunc(DateTimeUtils.TRUNC_TO_QUARTER, "2015-01-01T00:00:00", defaultInputTS.get) testTrunc(DateTimeUtils.TRUNC_TO_QUARTER, "2015-01-01T00:00:00", defaultInputTS1.get) testTrunc(DateTimeUtils.TRUNC_TO_QUARTER, "2015-04-01T00:00:00", defaultInputTS2.get) for (tz <- DateTimeTestUtils.ALL_TIMEZONES) { DateTimeTestUtils.withDefaultTimeZone(tz) { val inputTS = DateTimeUtils.stringToTimestamp(UTF8String.fromString("2015-03-05T09:32:05.359")) val inputTS1 = DateTimeUtils.stringToTimestamp(UTF8String.fromString("2015-03-31T20:32:05.359")) val inputTS2 = DateTimeUtils.stringToTimestamp(UTF8String.fromString("2015-04-01T02:32:05.359")) val inputTS3 = DateTimeUtils.stringToTimestamp(UTF8String.fromString("2015-03-30T02:32:05.359")) val inputTS4 = DateTimeUtils.stringToTimestamp(UTF8String.fromString("2015-03-29T02:32:05.359")) testTrunc(DateTimeUtils.TRUNC_TO_YEAR, "2015-01-01T00:00:00", inputTS.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_MONTH, "2015-03-01T00:00:00", inputTS.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_DAY, "2015-03-05T00:00:00", inputTS.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_HOUR, "2015-03-05T09:00:00", inputTS.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_MINUTE, "2015-03-05T09:32:00", inputTS.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_SECOND, "2015-03-05T09:32:05", inputTS.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-02T00:00:00", inputTS.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-30T00:00:00", inputTS1.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-30T00:00:00", inputTS2.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-30T00:00:00", inputTS3.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_WEEK, "2015-03-23T00:00:00", inputTS4.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_QUARTER, "2015-01-01T00:00:00", inputTS.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_QUARTER, "2015-01-01T00:00:00", inputTS1.get, tz) testTrunc(DateTimeUtils.TRUNC_TO_QUARTER, "2015-04-01T00:00:00", inputTS2.get, tz) } } } test("daysToMillis and millisToDays") { val c = Calendar.getInstance(TimeZonePST) c.set(2015, 11, 31, 16, 0, 0) assert(millisToDays(c.getTimeInMillis, TimeZonePST) === 16800) assert(millisToDays(c.getTimeInMillis, TimeZoneGMT) === 16801) c.set(2015, 11, 31, 0, 0, 0) c.set(Calendar.MILLISECOND, 0) assert(daysToMillis(16800, TimeZonePST) === c.getTimeInMillis) c.setTimeZone(TimeZoneGMT) c.set(2015, 11, 31, 0, 0, 0) c.set(Calendar.MILLISECOND, 0) assert(daysToMillis(16800, TimeZoneGMT) === c.getTimeInMillis) // There are some days are skipped entirely in some timezone, skip them here. val skipped_days = Map[String, Set[Int]]( "Kwajalein" -> Set(8632), "Pacific/Apia" -> Set(15338), "Pacific/Enderbury" -> Set(9130, 9131), "Pacific/Fakaofo" -> Set(15338), "Pacific/Kiritimati" -> Set(9130, 9131), "Pacific/Kwajalein" -> Set(8632), "MIT" -> Set(15338)) for (tz <- DateTimeTestUtils.ALL_TIMEZONES) { val skipped = skipped_days.getOrElse(tz.getID, Set.empty) (-20000 to 20000).foreach { d => if (!skipped.contains(d)) { assert(millisToDays(daysToMillis(d, tz), tz) === d, s"Round trip of ${d} did not work in tz ${tz}") } } } } }
guoxiaolongzte/spark
sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/util/DateTimeUtilsSuite.scala
Scala
apache-2.0
29,406
/* * Copyright 2020 Precog Data * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package pathy import slamdata.Predef._ import pathy.Path._ import org.specs2.mutable.Spec import scalaz._, Scalaz._ abstract class PlaceholderSpecs(pathCodec: PathCodec) extends Spec with ValidateCodec { val c: Char = pathCodec.separator "placeholder codec" >> { "printPath" >> { "replaces separator in segments with placeholder" in { PathCodec.placeholder(c) .unsafePrintPath(dir(s"foo${c}bar") </> dir("baz") </> file(s"qu${c}ux.txt")) must_== s".${c}foo$$sep$$bar${c}baz${c}qu$$sep$$ux.txt" } "replaces single dot dir name with placeholder" in { PathCodec.placeholder(c) .unsafePrintPath(dir(".") </> file("config")) must_== s".${c}$$dot$$${c}config" } "replaces double dot dir name with placeholder" in { PathCodec.placeholder(c) .unsafePrintPath(dir("foo") </> dir("..") </> file("config")) must_== s".${c}foo${c}$$dotdot$$${c}config" } } "parsePath" >> { /* Weirdly, in these examples must_=== compiles under scala 2.11 but not 2.10. */ "reads separator ph in segments" in { PathCodec.placeholder(c) .parseRelDir(s"foo${c}$$sep$$${c}bar${c}") must_== Some(dir("foo") </> dir(c.shows) </> dir("bar")) } "reads single dot ph in segments" in { PathCodec.placeholder(c) .parseRelFile(s"foo${c}$$dot$$${c}bar") must_== Some(dir("foo") </> dir(".") </> file("bar")) } "reads double dot separator in segments" in { PathCodec.placeholder(c) .parseRelFile(s"foo${c}bar${c}$$dotdot$$") must_== Some(dir("foo") </> dir("bar") </> file("..")) } } "PathCodec is lossless" >> validateIsLossless(pathCodec) } } class PosixPlaceholderSpecs extends PlaceholderSpecs(posixCodec) class WindowsPlaceholderSpecs extends PlaceholderSpecs(windowsCodec)
mossprescott/slamengine-pathy
tests/src/test/scala/pathy/PlaceholderSpecs.scala
Scala
agpl-3.0
2,468
package info.fotm.clustering.implementations import info.fotm.clustering.Clusterer import info.fotm.clustering.Clusterer.Cluster import info.fotm.util.MathVector class ClosestPlusPlusClusterer extends Clusterer { def init(input: Cluster, groupSize: Int): (Seq[Cluster], Cluster) = { val nClusters: Int = input.size / groupSize // take the point furthest from all other clusters (1 to nClusters).foldLeft((Seq.empty[Seq[MathVector]], input)) { (acc, i) => val (clusters, left: Cluster) = acc if (clusters.isEmpty) (clusters :+ Seq(left.head), left diff Seq(left.head)) else { val furthest: MathVector = left.maxBy(v => clusters.flatten.map(_.distTo(v)).sum) (clusters :+ Seq(furthest), left diff Seq(furthest)) } } } override def clusterize(input: Cluster, groupSize: Int): Set[Cluster] = { val (seedClusters: Seq[Cluster], vectorsLeft) = init(input, groupSize) // add points to nearest clusters that are not yet full vectorsLeft.foldLeft(seedClusters) { (clusters, v) => val nonFullClusters = clusters.filter(_.size < groupSize) val closestCluster = nonFullClusters.minBy { cluster => cluster.map(_.distTo(v)).sum } val newCluster = closestCluster :+ v (clusters diff Seq(closestCluster)) :+ newCluster }.toSet } }
Groz/fotm-info
core/src/main/scala/info/fotm/clustering/implementations/ClosestPlusPlusClusterer.scala
Scala
mit
1,351
package tapechart.dtc /** * Created with IntelliJ IDEA. * User: guiga * Date: 26/06/14 * Time: 19:33 */ object TradeModeEnum extends Enumeration { type TradeModeEnum = Value val TRADE_MODE_DEMO = Value(1) val TRADE_MODE_SIMULATED = Value(2) val TRADE_MODE_LIVE = Value(3) } class s_LogonRequest { import TradeModeEnum._ import Consts._ var Size: Short = _ var Type: Short = _ var ProtocolVersion: Int = _ val Username = new Array[Char](32) val Password = new Array[Char](32) val GeneralTextData = new Array[Char](64) var Integer_1: Int = _ var Integer_2: Int = _ var HeartbeatIntervalInSeconds: Int = _ var TradeMode: TradeModeEnum = _ val TradeAccount = new Array[Char](TRADE_ACCOUNT_LENGTH) val HardwareIdentifier = new Array[Char](64) val ClientName = new Array[Char](32) // Constructor: s_LogonRequest() // memset(this, 0,sizeof(s_LogonRequest)); // Type=LOGON_REQUEST; // Size=sizeof(s_LogonRequest); ProtocolVersion = CURRENT_VERSION def GetMessageSize = Size // void CopyFrom(void * p_SourceData); def GetProtocolVersion = ProtocolVersion def GetUsername = Username def SetUsername(u: Array[Char]) = u.copyToArray(Username) }
6qat/dtc-scala
src/main/scala/tapechart/dtc/s_LogonRequest.scala
Scala
gpl-2.0
1,204
package com.twitter.algebird import org.scalacheck.Prop._ import org.scalacheck.{Arbitrary, Gen} class SGDLaws extends CheckProperties { import com.twitter.algebird.BaseProperties._ implicit val sgdMonoid = new SGDMonoid(SGD.constantStep(0.001), SGD.linearGradient) val zeroStepMonoid = new SGDMonoid(SGD.constantStep(0.0), SGD.linearGradient) val (m, b) = (2.0, 4.0) val eps = 1e-3 val sgdPosGen = for { x <- Gen.choose(0.0, 1.0) n <- Gen.choose(0.0, 0.001) } yield SGDPos((m * x + b + n, IndexedSeq(x))) val sgdWGen = for { cnt <- Gen.choose(0L, 100000L) m <- Gen.choose(-10.0, 10.0) b <- Gen.choose(-10.0, 10.0) } yield SGDWeights(cnt, Vector(m, b)) val zeroGen = Gen.const(SGDZero) implicit val sgdPos = Arbitrary(sgdPosGen) implicit val sgdWArb = Arbitrary(sgdWGen) implicit val sgdArb: Arbitrary[SGD[(Double, IndexedSeq[Double])]] = Arbitrary { Gen.oneOf(sgdWGen, sgdPosGen, zeroGen) } property("is a Monoid") { monoidLaws[SGD[(Double, IndexedSeq[Double])]] } property("Gradient is zero on the line") { forAll { (w: SGDWeights, x: Double) => val m = w.weights(0) val b = w.weights(1) val y = m * x + b (y.isInfinity || { val pos = (y, IndexedSeq(x)) val grad = SGD.linearGradient(w.weights, pos) (scala.math.abs(grad(0)) < eps) && (scala.math.abs(grad(1)) < eps) }) } } property("Gradient at x=0 has zero first component") { forAll { (w: SGDWeights, y: Double) => (SGD.linearGradient(w.weights, (y, IndexedSeq(0.0)))(0) == 0.0) } } property("Zero-step leaves Weights unchanged") { forAll { (w: SGDWeights, pos: SGDPos[(Double, IndexedSeq[Double])]) => val next = zeroStepMonoid.newWeights(w, pos.pos.head) (next.weights == w.weights && next.count == (w.count + 1L)) } } def minus(x: IndexedSeq[Double], y: IndexedSeq[Double]): IndexedSeq[Double] = x.zip(y).map { case (x: Double, y: Double) => x - y } val oneStepMonoid = new SGDMonoid(SGD.constantStep(1.0), SGD.linearGradient) property("unit step can be undone by adding gradient") { forAll { (w: SGDWeights, pos: SGDPos[(Double, IndexedSeq[Double])]) => val next = oneStepMonoid.newWeights(w, pos.pos.head) next.weights == minus(w.weights, SGD.linearGradient(w.weights, pos.pos.head)) } } }
nevillelyh/algebird
algebird-test/src/test/scala/com/twitter/algebird/SGDTest.scala
Scala
apache-2.0
2,378
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.github.sclearn.dataset.spark.sql.catalyst.util import io.github.sclearn.dataset.spark.sql.types._ /** * Helper functions to check for valid data types. */ object TypeUtils { def getNumeric(t: DataType): Numeric[Any] = t.asInstanceOf[NumericType].numeric.asInstanceOf[Numeric[Any]] def compareBinary(x: Array[Byte], y: Array[Byte]): Int = { for (i <- 0 until x.length; if i < y.length) { val v1 = x(i) & 0xff val v2 = y(i) & 0xff val res = v1 - v2 if (res != 0) return res } x.length - y.length } }
sclearn/sclearn
sc/src/main/scala/io/github/sclearn/dataset/spark/sql/catalyst/util/TypeUtils.scala
Scala
apache-2.0
1,363
/* * Copyright 2012 Eric Olander * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.googlecode.scala.sound.midi.message /** * Factory object for MonoOn messages */ object MonoOn { def apply() = { ShortMessage(0xB0, 0x7E, 0x00) } }
betandr/scala-midi
src/main/scala/com/googlecode/scala/sound/midi/message/MonoOn.scala
Scala
apache-2.0
766
package com.github.distributed_stumps.stumps.message.subscriber import com.github.distributed_stumps.stumps.message.common.Resource /** * Represents an abstract resource that is / can-be requested from a subscriber. The specific details * of the resource can only be defined by the resource-provider. * * @param name The name of the resource * @param zone The zone that the resource resides in */ case class ResourceIdentifier(name: String, zone: Option[String]) object ResourceIdentifier { def apply(name: String): ResourceIdentifier = ResourceIdentifier(name, None) def apply(name: String, zone: String) = ResourceIdentifier(name, Some(zone)) }
distributed-stumps/stumps-messages
src/main/scala/com/github/distributed_stumps/stumps/message/subscriber/ResourceIdentifier.scala
Scala
apache-2.0
662
import sbt._ import Keys._ object PomTest extends Build { lazy val custom = config("custom") lazy val root = Project("root", file("root")) configs(custom) settings( TaskKey[Unit]("check-pom") <<= checkPom, libraryDependencies ++= Seq( "a" % "a" % "1.0", "b" % "b" % "1.0" % "runtime,optional", "c" % "c" % "1.0" % "optional", "d" % "d" % "1.0" % "test", "e" % "e" % "1.0" % "custom", "f" % "f" % "1.0" % "custom,optional,runtime", "g" % "g" % "1.0" % "custom,runtime" classifier "foo", "h" % "h" % "1.0" % "custom,optional,runtime" classifier "foo" ) ) def checkPom = makePom map { pom => val expected = Seq( ("a", Some("compile"), false, None), ("b", Some("runtime"), true, None), ("c", None, true, None), ("d", Some("test"), false, None), ("e", Some("custom"), false, None), ("f", Some("runtime"), true, None), ("g", Some("runtime"), false, Some("foo")), ("h", Some("runtime"), true, Some("foo")) ) val loaded = xml.XML.loadFile(pom) val deps = loaded \\\\ "dependency" expected foreach { case (id, scope, opt, classifier) => val dep = deps.find(d => (d \\ "artifactId").text == id).getOrElse( error("Dependency '" + id + "' not written to pom:\\n" + loaded)) val actualOpt = java.lang.Boolean.parseBoolean( (dep \\\\ "optional").text ) assert(opt == actualOpt, "Invalid 'optional' section '" + (dep \\\\ "optional") + "' for " + id + ", expected optional=" + opt) val actualScope = (dep \\\\ "scope") match { case Seq() => None; case x => Some(x.text) } val actualClassifier = (dep \\\\ "classifier") match { case Seq() => None; case x => Some(x.text) } assert(actualScope == scope, "Invalid 'scope' section '" + (dep \\\\ "scope") + "' for " + id + ", expected scope=" + scope) assert(actualClassifier == classifier, "Invalid 'classifier' section '" + (dep \\\\ "classifier") + "' for " + id + ", expected classifier=" + classifier) } } }
jroper/sbt
sbt/src/sbt-test/dependency-management/pom-scope/project/PomTest.scala
Scala
bsd-3-clause
1,926
package com.hitsoft.scala.android /** * User: smeagol * Date: 24.05.15 * Time: 9:25 */ trait WidgetImplicits { }
hitsoft/scala-android
src/main/scala/com/hitsoft/scala/android/WidgetImplicits.scala
Scala
mit
119
/* * Copyright 2020 HM Revenue & Customs * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package controllers.registration.attachments import itutil.ControllerISpec import play.api.libs.ws.WSResponse import play.api.test.Helpers.OK import scala.concurrent.Future class Vat5LRequiredControllerISpec extends ControllerISpec { val url: String = controllers.registration.attachments.routes.Vat5LRequiredController.show.url s"GET $url" must { "return an OK" in new Setup { given() .user.isAuthorised() .audit.writesAudit() .audit.writesAuditMerged() insertCurrentProfileIntoDb(currentProfile, sessionId) val response: Future[WSResponse] = buildClient(url).get() whenReady(response) { res => res.status mustBe OK } } } }
hmrc/vat-registration-frontend
it/controllers/registration/attachments/Vat5LRequiredControllerISpec.scala
Scala
apache-2.0
1,314
import stainless.lang._ object ObjectParamMutation7 { case class A(a: Int, var x: BigInt, var y: BigInt, var z: BigInt) def inc(a: A): Unit = { require(a.x >= 0 && a.y >= 0 && a.z >= 0) a.x += 1 a.y += 1 a.z += 1 } ensuring(_ => a.x == old(a).x + 1 && a.y == old(a).y + 1 && a.z == old(a).z + 1) def f(): A = { val a = A(0, 0, 0, 0) inc(a); inc(a); inc(a) a } ensuring(res => res.x == res.y && res.y == res.z && res.z == 3) }
epfl-lara/stainless
frontends/benchmarks/imperative/valid/ObjectParamMutation7.scala
Scala
apache-2.0
469
object Bar: type Fuzzy[W <: Int] = Int opaque type BlaBla[W <: Int] <: Foo.BlaBla[Fuzzy[W], Int] = Foo.BlaBla[Fuzzy[W], Int]
dotty-staging/dotty
sbt-test/opaques/i12927/src/main/scala/Bar.scala
Scala
apache-2.0
133