code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
package scala.generator
import com.bryzek.apidoc.generator.v0.models.InvocationForm
import scala.models.Play23ClientGenerator
import scala.models.ning.Ning18ClientGenerator
import models.TestHelper
import org.scalatest.{FunSpec, Matchers}
class ReferenceWithImportsSpec extends FunSpec with Matchers {
lazy val ssd = new ScalaService(models.TestHelper.referenceWithImportsApiService)
it("user case classes") {
val model = ssd.models.find(_.name == "User").get
val code = ScalaCaseClasses.generateCaseClass(model, Seq.empty)
models.TestHelper.assertEqualsFile("/generators/reference-spec-user-case-class.txt", code)
}
it("member case classes") {
val model = ssd.models.find(_.name == "Member").get
val code = ScalaCaseClasses.generateCaseClass(model, Seq.empty)
models.TestHelper.assertEqualsFile("/generators/reference-spec-member-case-class.txt", code)
}
it("generates expected code for play 2.3 client") {
Play23ClientGenerator.invoke(InvocationForm(service = models.TestHelper.referenceWithImportsApiService)) match {
case Left(errors) => fail(errors.mkString(", "))
case Right(sourceFiles) => {
sourceFiles.size shouldBe 1
models.TestHelper.assertEqualsFile("/generators/reference-with-imports-spec-play-23.txt", sourceFiles.head.contents)
}
}
}
it("generates expected code for ning client") {
Ning18ClientGenerator.invoke(InvocationForm(service = models.TestHelper.referenceWithImportsApiService)) match {
case Left(errors) => fail(errors.mkString(", "))
case Right(sourceFiles) => {
sourceFiles.size shouldBe 1
models.TestHelper.assertEqualsFile("/generators/reference-with-imports-spec-ning-client.txt", sourceFiles.head.contents)
}
}
}
}
| krschultz/apidoc-generator | scala-generator/src/test/scala/models/generator/ReferenceWithImportsSpec.scala | Scala | mit | 1,783 |
package org.kududb.spark.demo.gamer.cdc
import java.text.SimpleDateFormat
import java.util.Random
import java.util.concurrent.atomic.AtomicInteger
import java.util.concurrent.{TimeUnit, Executors}
import org.kududb.client.{Operation, PartialRow, KuduClient}
import org.kududb.spark.demo.gamer.aggregates.GamerDataGenerator
object DirectDataMultiThreadedInjector {
val simpleDateFormat = new SimpleDateFormat("MM,dd,yyyy")
val random = new Random
def main(args:Array[String]): Unit = {
if (args.length == 0) {
println("<kuduMaster> <tableName> <numberOfRecords> <numberOfThreads>")
return
}
val kuduMaster = args(0)
val tableName = args(1)
val numberOfRecords = args(2).toInt
val executor = Executors.newFixedThreadPool(args(3).toInt)
val numberOfGamers = args(4).toInt
val sleepTime = args(5).toInt
val kuduClient = new KuduClient.KuduClientBuilder(kuduMaster).build()
val leftToRun = new AtomicInteger()
for (i <- 0 to numberOfRecords) {
leftToRun.incrementAndGet()
executor.execute(new ApplyNewRecordRunnable(GamerDataGenerator.makeNewGamerRecord(numberOfGamers),
kuduClient, tableName, leftToRun))
println("Summited:" + i)
Thread.sleep(sleepTime)
}
val startTime = System.currentTimeMillis()
while (!executor.awaitTermination(10000, TimeUnit.SECONDS)) {
val newTime = System.currentTimeMillis()
println("> Still Waiting: {Time:" + (newTime - startTime) + ", LeftToRun:" + leftToRun + "}" )
}
kuduClient.close()
}
}
| tmalaska/SparkOnKudu | src/main/scala/org/kududb/spark/demo/gamer/cdc/DirectDataMultiThreadedInjector.scala | Scala | apache-2.0 | 1,558 |
/*
* Copyright (C) 2014 - 2020 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.dfasdl.utils
import org.dfasdl.utils.DataElementType.DataElementType
import org.dfasdl.utils.ElementNames._
import org.dfasdl.utils.ElementType.ElementType
import org.dfasdl.utils.StructureElementType.StructureElementType
import org.w3c.dom.traversal.NodeFilter
import org.w3c.dom.{ Element, Node }
import scala.annotation.tailrec
/**
* Contains several useful functions for handling elements and their types.
*/
trait ElementHelpers {
val binaryDataElements = List(
BINARY,
BINARY_64,
BINARY_HEX
)
val stringDataElements = List(
DATE,
DATETIME,
FORMATTED_NUMBER,
FORMATTED_STRING,
FORMATTED_TIME,
NUMBER,
STRING,
TIME
)
val dataElements: List[String] = binaryDataElements ::: stringDataElements
val expressionElements = List(
CONSTANT,
SCALA_EXPRESSION
)
val structElements = List(
CHOICE,
CUSTOM_ID,
CHOICE_ELEMENT,
ELEMENT,
FIXED_SEQUENCE,
REFERENCE,
SEQUENCE
)
/**
* This is a simple implementation of a `NodeFilter` that can be used to traverse only
* data elements in a dfasdl xml tree.
*/
class DataElementFilter extends NodeFilter {
override def acceptNode(n: Node): Short =
if (n.getNodeType == Node.ELEMENT_NODE && getElementType(n.getNodeName) == ElementType.DataElement)
NodeFilter.FILTER_ACCEPT
else
NodeFilter.FILTER_REJECT
}
/**
* Analyze the given tag name and return the DFASDL element type.
* If the type is not known an `UnknownElement` type is returned.
*
* @param tagName The tag name of the element.
* @return The element type or `UnknownElement`.
*/
def getElementType(tagName: String): ElementType =
if (isDataElement(tagName))
ElementType.DataElement
else if (isExpressionElement(tagName))
ElementType.ExpressionElement
else if (isStructuralElement(tagName))
ElementType.StructuralElement
else if (tagName == ROOT)
ElementType.RootElement
else
ElementType.UnknownElement
/**
* Analyze the given DataElement and return the type.
* If the type is not known an `UnknownElement` type is returned.
*
* @param tagName The tag name of the element.
* @return The data element type or `UnknownElement`
*/
def getDataElementType(tagName: String): DataElementType =
if (isBinaryDataElement(tagName))
DataElementType.BinaryDataElement
else if (isStringDataElement(tagName))
DataElementType.StringDataElement
else
DataElementType.UnknownElement
/**
* Analyze the given structural element name and return it's type.
* If the type is not known an `Unknownelement` type is returned.
*
* @param tagName The tag name of the element.
* @return The structural element type or `UnknownElement`.
*/
def getStructureElementType(tagName: String): StructureElementType =
if (isStructuralElement(tagName)) {
tagName match {
case ElementNames.CHOICE => StructureElementType.Choice
case ElementNames.CHOICE_ELEMENT => StructureElementType.ChoiceElement
case ElementNames.CUSTOM_ID => StructureElementType.CustomId
case ElementNames.ELEMENT => StructureElementType.Element
case ElementNames.FIXED_SEQUENCE => StructureElementType.FixedSequence
case ElementNames.REFERENCE => StructureElementType.Reference
case ElementNames.SEQUENCE => StructureElementType.Sequence
}
} else
StructureElementType.Unknown
def isBinaryDataElement(tagName: String): Boolean = binaryDataElements.contains(tagName)
def isDataElement(tagName: String): Boolean = dataElements.contains(tagName)
def isStringDataElement(tagName: String): Boolean = stringDataElements.contains(tagName)
def isExpressionElement(tagName: String): Boolean = expressionElements.contains(tagName)
def isStructuralElement(tagName: String): Boolean = structElements.contains(tagName)
def isUniqueDataElement(e: Element): Boolean =
isDataElement(e.getNodeName) && e.hasAttribute(AttributeNames.UNIQUE) && e.getAttribute(
AttributeNames.UNIQUE
) == "true"
/**
* Walk up the tree until we find the parent choice of the given node.
*
* @param n The start node.
* @return An option to the parent choice element if it exists.
*/
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf", "org.wartremover.warts.Null"))
@tailrec
final def getParentChoice(n: Node): Option[Element] =
if (n == null)
None
else {
val parent = n.getParentNode
if (parent == null)
None
else {
if (getStructureElementType(parent.getNodeName) == StructureElementType.Choice)
Option(parent.asInstanceOf[Element])
else
getParentChoice(parent)
}
}
/**
* Walk up the tree until we find the parent sequence of the given node.
*
* @param n The start node.
* @return An option to the parent sequence element if it exists.
*/
@SuppressWarnings(Array("org.wartremover.warts.AsInstanceOf", "org.wartremover.warts.Null"))
@tailrec
final def getParentSequence(n: Node): Option[Element] =
if (n == null)
None
else {
val parent = n.getParentNode
if (parent == null)
None
else {
if (StructureElementType.isSequence(getStructureElementType(parent.getNodeName)))
Option(parent.asInstanceOf[Element])
else
getParentSequence(parent)
}
}
}
| DFASDL/dfasdl-utils | src/main/scala/org/dfasdl/utils/ElementHelpers.scala | Scala | agpl-3.0 | 6,304 |
package examples.demo
import java.awt.Dimension
import examples.demo.ui.{Circle, Shape, ShapesPanel}
import rescala.default._
import scala.swing.{MainFrame, SimpleSwingApplication, UIElement}
/** So far, we demonstrated the Signal abstraction, which allows modular,
* yet declarative implementation of interactive applications through
* deriving state with constraints. The persistent values of Signals
* are complemented by briefly occurring, transient values of Events,
* the second fundamental abstraction of RP for event-based programming.
*
* To introduce Events, we refactor our animation into a numeric
* simulation. So far, we described the moving circle's trajectory
* through arithmetic formulae. In the final application, though, the
* ball should bounce off walls and rackets, making it much more
* cumbersome to describe its trajectory arithmetically. Therefore, we
* migrate its trajectory to be simulated through numeric integration,
* using the euler method as the simplest approach. This means, the
* application has to be driven in discrete time increments, which we
* can easily model as Evt ticks. Equally to Vars being a manually
* assignable type of Signals, Evts are Events which can be manually
* triggered. Thus, we can add an according trigger in our main method.
*
* Events and Signals in RP are tightly integrated. We already showed,
* how new Signals can be derived from existing ones, and new Events
* can be derived equally. We will show Event expressions and several
* Event combinators later. In addition, though, Signals and Events
* can also be derived from each other. To implement our numeric
* integration, we do in fact need such a transformation. First,
* we use the previous definition of posX and posY now to define
* the moving circle's velocityX and velocityY, adding a "per
* nanosecond" factor to each. We then redefine Signals posX and
* posY as the result of folding over the Event ticks. Just like
* fold can compute an aggregate over all elements of a List, it
* here computes an aggregate Signal over all Event values that
* occurred so far, updated whenever the event emits a value.
*
* The euler method defines x for the next step as x of the previous
* step plus the product of the velocity of the previous step times
* the length of the step. X of the previous step and the length of
* the step are given as parameters to the fold closure. To access
* the velocity of the previous step, Signals offer the before method.
* Usually, this method is necessary to break data dependency cycles.
* For instance, for the Pong game, the ball's velocity is dependent
* on the ball colliding with the field boundary and player rackets.
* The balls collisions are dependent on the balls position. If the
* balls position now would depend on the velocity, these dependencies
* would form an endless loop. But, by using velocity.before, this
* loop is broken, and the program is well-defined.
*/
object EInaccurateNumericCircle extends SimpleSwingApplication {
val NanoSecond = 1000000000L
val nsTime = Var(System.nanoTime())
def tick() = nsTime.set(System.nanoTime())
val ticks = Evt[Long]()
val shapes = Var[List[Shape]](List.empty)
val panel = new ShapesPanel(shapes)
val angle = nsTime.map(_.toDouble / NanoSecond * math.Pi)
val velocity = Signal {
Pos(
x = (panel.width() / 2 - 50).toDouble * math.sin(angle()) / NanoSecond,
y = (panel.height() / 2 - 50).toDouble * math.cos(angle()) / NanoSecond
)
}
val inc = ticks.map(tick => velocity.value * tick.toDouble)
val pos = inc.fold(Pos(0, 0)) { (cur, inc) => cur + inc }
shapes.transform(new Circle(pos, Var(50)) :: _)
override lazy val top = {
panel.preferredSize = new Dimension(400, 300)
new MainFrame {
title = "REScala Demo"
contents = panel
setLocationRelativeTo(new UIElement { override def peer = null })
}
}
override def main(args: Array[String]): Unit = {
super.main(args)
while (!top.visible) Thread.sleep(5)
while (top.visible) {
Thread.sleep(1)
tick()
ticks.fire(1 * NanoSecond / 1000L)
}
}
}
| guidosalva/REScala | Code/Examples/examples/src/main/scala/examples/demo/EInaccurateNumericCircle.scala | Scala | apache-2.0 | 4,220 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.runtime.testingUtils
import org.apache.flink.runtime.clusterframework.types.ResourceID
import org.apache.flink.runtime.highavailability.HighAvailabilityServices
import org.apache.flink.runtime.io.disk.iomanager.IOManager
import org.apache.flink.runtime.io.network.NetworkEnvironment
import org.apache.flink.runtime.memory.MemoryManager
import org.apache.flink.runtime.metrics.groups.TaskManagerMetricGroup
import org.apache.flink.runtime.taskexecutor.TaskManagerConfiguration
import org.apache.flink.runtime.taskmanager.{TaskManager, TaskManagerLocation}
import scala.language.postfixOps
/** Subclass of the [[TaskManager]] to support testing messages
*/
class TestingTaskManager(
config: TaskManagerConfiguration,
resourceID: ResourceID,
connectionInfo: TaskManagerLocation,
memoryManager: MemoryManager,
ioManager: IOManager,
network: NetworkEnvironment,
numberOfSlots: Int,
highAvailabilityServices: HighAvailabilityServices,
taskManagerMetricGroup : TaskManagerMetricGroup)
extends TaskManager(
config,
resourceID,
connectionInfo,
memoryManager,
ioManager,
network,
numberOfSlots,
highAvailabilityServices,
taskManagerMetricGroup)
with TestingTaskManagerLike {
def this(
config: TaskManagerConfiguration,
connectionInfo: TaskManagerLocation,
memoryManager: MemoryManager,
ioManager: IOManager,
network: NetworkEnvironment,
numberOfSlots: Int,
highAvailabilityServices: HighAvailabilityServices,
taskManagerMetricGroup : TaskManagerMetricGroup) {
this(
config,
ResourceID.generate(),
connectionInfo,
memoryManager,
ioManager,
network,
numberOfSlots,
highAvailabilityServices,
taskManagerMetricGroup)
}
}
| zimmermatt/flink | flink-runtime/src/test/scala/org/apache/flink/runtime/testingUtils/TestingTaskManager.scala | Scala | apache-2.0 | 2,614 |
package gameover.fwk.libgdx.gfx
import com.badlogic.gdx.math.{Polygon, Vector2}
import gameover.fwk.libgdx.utils.LibGDXHelper
import gameover.fwk.pool.Vector2Pool
object Polygons extends LibGDXHelper {
def createArcAsPolygon(x: Float, y: Float, radius: Float, start: Float, angle: Float, segments: Int): Polygon = {
if (segments < 1) throw new IllegalArgumentException("arc need at least 1 segment")
val theta: Float = (2 * 3.1415926f * (angle / 360.0f)) / segments
val cos: Float = com.badlogic.gdx.math.MathUtils.cos(theta)
val sin: Float = com.badlogic.gdx.math.MathUtils.sin(theta)
var cx: Float = radius * com.badlogic.gdx.math.MathUtils.cos(start * com.badlogic.gdx.math.MathUtils.degreesToRadians)
var cy: Float = radius * com.badlogic.gdx.math.MathUtils.sin(start * com.badlogic.gdx.math.MathUtils.degreesToRadians)
val vertices: Array[Float] = new Array[Float](segments * 2 + 2)
vertices(vertices.length - 2) = x
vertices(vertices.length - 1) = y
for (i <- 0 to segments - 1) {
val temp: Float = cx
cx = cos * cx - sin * cy
cy = sin * temp + cos * cy
vertices(i * 2) = x + cx
vertices(i * 2 + 1) = y + cy
}
new Polygon(vertices)
}
def isPointInPolygon(polygon: GdxArray[Vector2], pointX: Float, pointY: Float): Boolean = {
val lastVertice: Vector2 = polygon.get(polygon.size - 1)
var lastVerticeX = lastVertice.x
var lastVerticeY = lastVertice.y
var oddNodes: Boolean = false
for (i <- polygon.indices) {
val v: Vector2 = polygon.get(i)
val x = v.x
val y = v.y
if (y < pointY && lastVerticeY >= pointY || lastVerticeY < pointY && y >= pointY) {
if (x + (pointY - y) / (lastVerticeY - y) * (lastVerticeX - x) < pointX) {
oddNodes = !oddNodes
}
}
lastVerticeX = x
lastVerticeY = y
}
oddNodes
}
def createArcAsListOfVertices(x: Float, y: Float, radius: Float, start: Float, angle: Float, segments: Int, fromPool: Boolean): GdxArray[Vector2] = {
if (segments < 1) throw new IllegalArgumentException("arc need at least 1 segment")
val theta: Float = (2 * 3.1415926f * (angle / 360.0f)) / segments
val cos: Float = com.badlogic.gdx.math.MathUtils.cos(theta)
val sin: Float = com.badlogic.gdx.math.MathUtils.sin(theta)
var cx: Float = radius * com.badlogic.gdx.math.MathUtils.cos(start * com.badlogic.gdx.math.MathUtils.degreesToRadians)
var cy: Float = radius * com.badlogic.gdx.math.MathUtils.sin(start * com.badlogic.gdx.math.MathUtils.degreesToRadians)
var ret: GdxArray[Vector2] = null
if (fromPool) {
ret = Vector2Pool.obtainAsGdxArray(segments + 1)
}
else {
def f = () => new Vector2
ret = new GdxArray[Vector2]().fill(segments + 1)(f)
}
for (i <- 0 to segments) {
val temp: Float = cx
cx = cos * cx - sin * cy
cy = sin * temp + cos * cy
ret.get(i).set(x + cx, y + cy)
}
ret.get(segments).set(x, y)
ret
}
}
| PixelDuck/gameover-game-framework | src/main/scala/gameover/fwk/libgdx/gfx/Polygons.scala | Scala | mit | 3,009 |
/*
* Copyright (C) 2014 - 2017 Contributors as noted in the AUTHORS.md file
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as
* published by the Free Software Foundation, either version 3 of the
* License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.wegtam.tensei.agent.processor
import akka.actor.SupervisorStrategy.Escalate
import akka.actor._
import akka.event.{ DiagnosticLoggingAdapter, Logging }
import com.wegtam.tensei.adt.{ ConnectionInformation, Cookbook, DFASDL, Recipe }
import com.wegtam.tensei.agent.DataTreeDocument.DataTreeDocumentMessages
import com.wegtam.tensei.agent.helpers.LoggingHelpers
import com.wegtam.tensei.agent.processor.RecipeWorker.RecipeWorkerMessages
import com.wegtam.tensei.agent.processor.RecipesWorker.{
RecipesWorkerMessages,
RecipesWorkerState,
RecipesWorkerStateData
}
import org.dfasdl.utils.DocumentHelpers
import org.w3c.dom.traversal.{ DocumentTraversal, NodeFilter, TreeWalker }
import scalaz._
object RecipesWorker {
/**
* A sealed trait for the messages that are send and received by this actor.
*/
sealed trait RecipesWorkerMessages
/**
* A companion object for the trait to keep the namespace clean.
*/
object RecipesWorkerMessages {
/**
* A message to report that a cookbook has been processed.
*
* @param cookbookId The ID of the cookbook.
* @param lastWriterMessageNumber The number of the last writer message that was sent out.
*/
case class FinishedProcessing(
cookbookId: String,
lastWriterMessageNumber: Long
)
/**
* Start processing the next recipe in the pipe.
*/
case object ProcessNextRecipe extends RecipesWorkerMessages
/**
* A message to report that a recipe has been processed.
*
* @param lastWriterMessageNumber The number of the last writer message that was sent out.
* @param currentLoopCounter The number of times the recipe has been processed.
*/
case class RecipeProcessed(
lastWriterMessageNumber: Long,
currentLoopCounter: Long = 0L
) extends RecipesWorkerMessages
/**
* Start processing the recipes within the given cookbook.
*
* @param writer An actor ref to a writer.
* @param sources A list of connection informations for the data sources.
* @param target The connection information for the target.
* @param cookbook The actual cookbook that contains the recipes.
* @param dataTreeDocs A list of actor refs to data trees that hold the source data.
*/
case class StartProcessing(
writer: ActorRef,
sources: List[ConnectionInformation],
target: ConnectionInformation,
cookbook: Cookbook,
dataTreeDocs: List[ActorRef]
) extends RecipesWorkerMessages
/**
* Instruct the actor to stop itself.
*/
case object Stop extends RecipesWorkerMessages
}
/**
* A sealed trait for the state of the recipes worker.
*/
sealed trait RecipesWorkerState
/**
* A companion object for the trait to keep the namespace clean.
*/
object RecipesWorkerState {
/**
* The actor is available for work.
*/
case object Idle extends RecipesWorkerState
/**
* The actor is preparing to process the recipes.
*/
case object Preparing extends RecipesWorkerState
/**
* The actor is currently processing recipes.
*/
case object Processing extends RecipesWorkerState
}
/**
* The state data for the process recipes actor.
*
* @param cookbook An option to the cookbook that includes the recipes to process.
* @param lastWriterMessageNumber The number of the last writer message that was sent out.
* @param recipeQueue A list of recipes that is bound to be processed.
* @param sourceDataTrees A list of source data trees paired with their dfasdl.
* @param targetDfasdl An option to the target DFASDL.
* @param targetTreeWalker A tree walker used to traverse the target dfasdl tree.
* @param writer An option to an actor ref of the data writer actor.
*/
case class RecipesWorkerStateData(
cookbook: Option[Cookbook] = None,
lastWriterMessageNumber: Long = 0L,
recipeQueue: List[Recipe] = List.empty[Recipe],
sourceDataTrees: List[SourceDataTreeListEntry] = List.empty[SourceDataTreeListEntry],
targetDfasdl: Option[DFASDL] = None,
targetTreeWalker: Option[TreeWalker] = None,
writer: Option[ActorRef] = None
)
/**
* A factory method to create the actor.
*
* @param agentRunIdentifier An optional agent run identifier which is usually an uuid.
* @return The props to create the actor.
*/
def props(agentRunIdentifier: Option[String]): Props =
Props(classOf[RecipesWorker], agentRunIdentifier)
}
/**
* This actor processes a list of recipes.
*
* @param agentRunIdentifier An optional agent run identifier which is usually an uuid.
*/
class RecipesWorker(agentRunIdentifier: Option[String])
extends Actor
with FSM[RecipesWorkerState, RecipesWorkerStateData]
with ActorLogging
with DocumentHelpers {
override val log
: DiagnosticLoggingAdapter = Logging(this) // Override the standard logger to be able to add stuff via MDC.
log.mdc(LoggingHelpers.generateMdcEntryForRunIdentifier(agentRunIdentifier))
override val supervisorStrategy: SupervisorStrategy =
AllForOneStrategy() {
case _: Exception => Escalate
}
startWith(RecipesWorkerState.Idle, RecipesWorkerStateData())
when(RecipesWorkerState.Idle) {
case Event(msg: RecipesWorkerMessages.StartProcessing, data) =>
log.debug("Received start processing message.")
if (msg.target.dfasdlRef.isEmpty) {
log.error("No DFASDL-Reference defined in target connection!")
stay() using data
} else {
val dfasdl = msg.cookbook.findDFASDL(msg.target.dfasdlRef.get)
if (dfasdl.isEmpty) {
log.error("DFASDL referenced by {} not found in cookbook {}!",
msg.target.dfasdlRef.get,
msg.cookbook.id)
stay() using data
} else {
// Try to create a normalised xml document from the target DFASDL.
\\/.fromTryCatch(createNormalizedDocument(dfasdl.get.content)) match {
case -\\/(e) =>
log.error(e, "An error occurred while trying to create the target document tree!")
stay() using data
case \\/-(targetTree) =>
// Create treewalker from a traversal for calculating some target element stuff later on.
\\/.fromTryCatch(targetTree.asInstanceOf[DocumentTraversal]) match {
case -\\/(e) =>
log.error(e, "Couldn't create traversal instance!")
stay() using data
case \\/-(traversal) =>
// We restrict the treewalker to data elements only!
\\/.fromTryCatch(
traversal.createTreeWalker(targetTree.getDocumentElement,
NodeFilter.SHOW_ELEMENT,
new DataElementFilter(),
true)
) match {
case -\\/(e) =>
log.error(e, "Couldn't create tree walker!")
stay() using data
case \\/-(treeWalker) =>
val newState = RecipesWorkerStateData(
cookbook = Option(msg.cookbook),
sourceDataTrees = msg.dataTreeDocs.map(
ref =>
SourceDataTreeListEntry(dfasdlId = dfasdl.get.id,
document = None,
actorRef = ref)
),
targetDfasdl = dfasdl,
targetTreeWalker = Option(treeWalker),
writer = Option(msg.writer)
)
goto(RecipesWorkerState.Preparing) using newState
}
}
}
}
}
}
when(RecipesWorkerState.Preparing) {
case Event(msg: DataTreeDocumentMessages.XmlStructure, data) =>
log.debug("Received xml structure.")
val index = data.sourceDataTrees.indexWhere(_.actorRef.path == sender().path)
if (index < 0) {
log.warning("Received xml structure from unknown data tree document!")
stay() using data
} else {
val e = data
.sourceDataTrees(index)
.copy(dfasdlId = msg.dfasdlId, document = Option(msg.document))
val newData = data.copy(
recipeQueue = data.cookbook.get.recipes,
sourceDataTrees = data.sourceDataTrees.updated(index, e)
)
if (newData.sourceDataTrees.exists(_.document.isEmpty))
stay() using newData // There are still document structures missing.
else
goto(RecipesWorkerState.Processing) using newData // We have all structures and can move on.
}
}
when(RecipesWorkerState.Processing) {
case Event(RecipesWorkerMessages.ProcessNextRecipe, data) =>
log.debug("Received process next recipe message.")
if (data.recipeQueue.isEmpty) {
// Tell our parent that we're done and stop.
context.parent ! RecipesWorkerMessages.FinishedProcessing(
cookbookId = data.cookbook.get.id,
lastWriterMessageNumber = data.lastWriterMessageNumber
)
stop()
} else {
// Create a worker for the recipe and instruct it to start processing.
val worker = context.actorOf(
RecipeWorker.props(agentRunIdentifier = agentRunIdentifier,
recipe = data.recipeQueue.head)
)
worker ! RecipeWorkerMessages.Start(
lastWriterMessageNumber = data.lastWriterMessageNumber,
sourceDataTrees = data.sourceDataTrees,
targetDfasdl = data.targetDfasdl.get,
targetTreeWalker = data.targetTreeWalker,
writer = data.writer
)
stay() using data // Stay and wait for the `RecipeProcessed` message.
}
case Event(msg: RecipesWorkerMessages.RecipeProcessed, data) =>
log.debug("Received recipe processed message.")
// Trigger the next recipe.
self ! RecipesWorkerMessages.ProcessNextRecipe
// Stay in processing mode
stay() using data.copy(
lastWriterMessageNumber = msg.lastWriterMessageNumber,
recipeQueue = data.recipeQueue.tail
)
}
whenUnhandled {
case Event(RecipesWorkerMessages.Stop, data) =>
log.debug("Received stop message.")
stop()
}
onTransition {
case RecipesWorkerState.Idle -> RecipesWorkerState.Preparing =>
// Instruct all actors without document to send us their xml structure.
nextStateData.sourceDataTrees
.filter(_.document.isEmpty)
.foreach(e => e.actorRef ! DataTreeDocumentMessages.ReturnXmlStructure)
case RecipesWorkerState.Preparing -> RecipesWorkerState.Processing =>
// Send initial processing message.
self ! RecipesWorkerMessages.ProcessNextRecipe
}
initialize()
}
| Tensei-Data/tensei-agent | src/main/scala/com/wegtam/tensei/agent/processor/RecipesWorker.scala | Scala | agpl-3.0 | 11,889 |
package com.olvind
package sui
object SuiTypeMemberMethodMapper extends MemberMapper {
override def apply(compName: CompName)(memberMethod: MemberMethod): ParsedMethod =
ParsedMethod(
apply(compName, memberMethod.paramNames, memberMethod.name),
None
)
private def apply(c: CompName, args: Seq[String], m: String) =
(c.value, args.size, m) match {
case ("DatePicker", 0, "getDate") => "getDate(): js.Date"
case other ⇒
println("missing types for method: " + other)
m + args.map(sanitize(_) + ": js.Any").mkString("(", ", ", ")") + ": js.Dynamic"
}
def sanitize(s: String) =
if (s == "val") "`val`" else s
}
| chandu0101/scalajs-react-components | gen/src/main/scala/com/olvind/sui/SuiTypeMemberMethodMapper.scala | Scala | apache-2.0 | 678 |
package org.jetbrains.plugins.scala.lang.dfa.analysis.tests
import org.jetbrains.plugins.scala.lang.dfa.Messages._
import org.jetbrains.plugins.scala.lang.dfa.analysis.ScalaDfaTestBase
class IfStatementsDfaTest extends ScalaDfaTestBase {
def testRegularIfs(): Unit = test(codeFromMethodBody(returnType = "Int") {
"""
|val x = if (3 < 2 && 5 <= 7) {
| 5 + 2 - 20
|} else if (12 == 13 || 13 != 5 || false) {
| 7 + 3 * 6 % 5
|} else {
| 9 * 3
|}
|
|x == 10
|x >= 11
|""".stripMargin
})(
"3 < 2" -> ConditionAlwaysFalse,
"3 < 2 && 5 <= 7" -> ConditionAlwaysFalse,
"12 == 13" -> ConditionAlwaysFalse,
"13 != 5" -> ConditionAlwaysTrue,
"12 == 13 || 13 != 5 || false" -> ConditionAlwaysTrue,
"x == 10" -> ConditionAlwaysTrue,
"x >= 11" -> ConditionAlwaysFalse
)
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/dfa/analysis/tests/IfStatementsDfaTest.scala | Scala | apache-2.0 | 867 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileStatus, Path, RawLocalFileSystem}
import org.apache.spark.sql.AnalysisException
import org.apache.spark.sql.test.SharedSparkSession
class DataSourceSuite extends SharedSparkSession {
import TestPaths._
test("test glob and non glob paths") {
val resultPaths = DataSource.checkAndGlobPathIfNecessary(
Seq(
path1.toString,
path2.toString,
globPath1.toString,
globPath2.toString
),
hadoopConf,
checkEmptyGlobPath = true,
checkFilesExist = true
)
assert(resultPaths.toSet === allPathsInFs.toSet)
}
test("test glob paths") {
val resultPaths = DataSource.checkAndGlobPathIfNecessary(
Seq(
globPath1.toString,
globPath2.toString
),
hadoopConf,
checkEmptyGlobPath = true,
checkFilesExist = true
)
assert(
resultPaths.toSet === Set(
globPath1Result1,
globPath1Result2,
globPath2Result1,
globPath2Result2
)
)
}
test("test non glob paths") {
val resultPaths = DataSource.checkAndGlobPathIfNecessary(
Seq(
path1.toString,
path2.toString
),
hadoopConf,
checkEmptyGlobPath = true,
checkFilesExist = true
)
assert(
resultPaths.toSet === Set(
path1,
path2
)
)
}
test("test non glob paths checkFilesExist=false") {
val resultPaths = DataSource.checkAndGlobPathIfNecessary(
Seq(
path1.toString,
path2.toString,
nonExistentPath.toString
),
hadoopConf,
checkEmptyGlobPath = true,
checkFilesExist = false
)
assert(
resultPaths.toSet === Set(
path1,
path2,
nonExistentPath
)
)
}
test("test non existent paths") {
assertThrows[AnalysisException](
DataSource.checkAndGlobPathIfNecessary(
Seq(
path1.toString,
path2.toString,
nonExistentPath.toString
),
hadoopConf,
checkEmptyGlobPath = true,
checkFilesExist = true
)
)
}
test("test non existent glob paths") {
assertThrows[AnalysisException](
DataSource.checkAndGlobPathIfNecessary(
Seq(
globPath1.toString,
globPath2.toString,
nonExistentGlobPath.toString
),
hadoopConf,
checkEmptyGlobPath = true,
checkFilesExist = true
)
)
}
}
object TestPaths {
val hadoopConf = new Configuration()
hadoopConf.set("fs.mockFs.impl", classOf[MockFileSystem].getName)
val path1 = new Path("mockFs://mockFs/somepath1")
val path2 = new Path("mockFs://mockFs/somepath2")
val globPath1 = new Path("mockFs://mockFs/globpath1*")
val globPath2 = new Path("mockFs://mockFs/globpath2*")
val nonExistentPath = new Path("mockFs://mockFs/nonexistentpath")
val nonExistentGlobPath = new Path("mockFs://mockFs/nonexistentpath*")
val globPath1Result1 = new Path("mockFs://mockFs/globpath1/path1")
val globPath1Result2 = new Path("mockFs://mockFs/globpath1/path2")
val globPath2Result1 = new Path("mockFs://mockFs/globpath2/path1")
val globPath2Result2 = new Path("mockFs://mockFs/globpath2/path2")
val allPathsInFs = Seq(
path1,
path2,
globPath1Result1,
globPath1Result2,
globPath2Result1,
globPath2Result2
)
val mockGlobResults: Map[Path, Array[FileStatus]] = Map(
globPath1 ->
Array(
createMockFileStatus(globPath1Result1.toString),
createMockFileStatus(globPath1Result2.toString)
),
globPath2 ->
Array(
createMockFileStatus(globPath2Result1.toString),
createMockFileStatus(globPath2Result2.toString)
)
)
def createMockFileStatus(path: String): FileStatus = {
val fileStatus = new FileStatus()
fileStatus.setPath(new Path(path))
fileStatus
}
}
class MockFileSystem extends RawLocalFileSystem {
import TestPaths._
override def exists(f: Path): Boolean = {
allPathsInFs.contains(f)
}
override def globStatus(pathPattern: Path): Array[FileStatus] = {
mockGlobResults.getOrElse(pathPattern, Array())
}
}
| kevinyu98/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/datasources/DataSourceSuite.scala | Scala | apache-2.0 | 5,085 |
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.flink.table.planner.plan.stream.sql.join
import org.apache.flink.api.scala._
import org.apache.flink.table.api.TableException
import org.apache.flink.table.api.scala._
import org.apache.flink.table.planner.utils.{StreamTableTestUtil, TableTestBase}
import org.hamcrest.Matchers.containsString
import org.junit.Test
import java.sql.Timestamp
class TemporalJoinTest extends TableTestBase {
val util: StreamTableTestUtil = streamTestUtil()
private val orders = util.addDataStream[(Long, String)](
"Orders", 'o_amount, 'o_currency, 'o_rowtime.rowtime)
private val ratesHistory = util.addDataStream[(String, Int, Timestamp)](
"RatesHistory", 'currency, 'rate, 'rowtime.rowtime)
util.addFunction(
"Rates",
ratesHistory.createTemporalTableFunction("rowtime", "currency"))
private val proctimeOrders = util.addDataStream[(Long, String)](
"ProctimeOrders", 'o_amount, 'o_currency, 'o_proctime.proctime)
private val proctimeRatesHistory = util.addDataStream[(String, Int)](
"ProctimeRatesHistory", 'currency, 'rate, 'proctime.proctime)
util.addFunction(
"ProctimeRates",
proctimeRatesHistory.createTemporalTableFunction("proctime", "currency"))
@Test
def testSimpleJoin(): Unit = {
val sqlQuery = "SELECT " +
"o_amount * rate as rate " +
"FROM Orders AS o, " +
"LATERAL TABLE (Rates(o.o_rowtime)) AS r " +
"WHERE currency = o_currency"
util.verifyPlan(sqlQuery)
}
@Test
def testSimpleProctimeJoin(): Unit = {
val sqlQuery = "SELECT " +
"o_amount * rate as rate " +
"FROM ProctimeOrders AS o, " +
"LATERAL TABLE (ProctimeRates(o.o_proctime)) AS r " +
"WHERE currency = o_currency"
util.verifyPlan(sqlQuery)
}
@Test
def testJoinOnQueryLeft(): Unit = {
val orders = util.tableEnv.sqlQuery("SELECT * FROM Orders WHERE o_amount > 1000")
util.tableEnv.createTemporaryView("Orders2", orders)
val sqlQuery = "SELECT " +
"o_amount * rate as rate " +
"FROM Orders2 AS o, " +
"LATERAL TABLE (Rates(o.o_rowtime)) AS r " +
"WHERE currency = o_currency"
util.verifyPlan(sqlQuery)
}
/**
* Test versioned joins with more complicated query.
* Important thing here is that we have complex OR join condition
* and there are some columns that are not being used (are being pruned).
*/
@Test
def testComplexJoin(): Unit = {
val util = streamTestUtil()
util.addDataStream[(String, Int)]("Table3", 't3_comment, 't3_secondary_key)
util.addDataStream[(Timestamp, String, Long, String, Int)](
"Orders", 'o_rowtime.rowtime, 'o_comment, 'o_amount, 'o_currency, 'o_secondary_key)
util.addDataStream[(Timestamp, String, String, Int, Int)](
"RatesHistory", 'rowtime.rowtime, 'comment, 'currency, 'rate, 'secondary_key)
val rates = util.tableEnv
.sqlQuery("SELECT * FROM RatesHistory WHERE rate > 110")
.createTemporalTableFunction("rowtime", "currency")
util.addFunction("Rates", rates)
val sqlQuery =
"SELECT * FROM " +
"(SELECT " +
"o_amount * rate as rate, " +
"secondary_key as secondary_key " +
"FROM Orders AS o, " +
"LATERAL TABLE (Rates(o_rowtime)) AS r " +
"WHERE currency = o_currency OR secondary_key = o_secondary_key), " +
"Table3 " +
"WHERE t3_secondary_key = secondary_key"
util.verifyPlan(sqlQuery)
}
@Test
def testUncorrelatedJoin(): Unit = {
expectedException.expect(classOf[TableException])
expectedException.expectMessage(containsString("Cannot generate a valid execution plan"))
val sqlQuery = "SELECT " +
"o_amount * rate as rate " +
"FROM Orders AS o, " +
"LATERAL TABLE (Rates(TIMESTAMP '2016-06-27 10:10:42.123')) AS r " +
"WHERE currency = o_currency"
util.verifyExplain(sqlQuery)
}
@Test
def testTemporalTableFunctionScan(): Unit = {
expectedException.expect(classOf[TableException])
expectedException.expectMessage(containsString("Cannot generate a valid execution plan"))
val sqlQuery = "SELECT * FROM LATERAL TABLE (Rates(TIMESTAMP '2016-06-27 10:10:42.123'))"
util.verifyExplain(sqlQuery)
}
}
| gyfora/flink | flink-table/flink-table-planner-blink/src/test/scala/org/apache/flink/table/planner/plan/stream/sql/join/TemporalJoinTest.scala | Scala | apache-2.0 | 5,022 |
package org.skycastle.entity.tilemap
import content.composite.CompositeEntity
import javax.swing.JComponent
import ui.Ui
import util.Parameters
/**
*
*
* @author Hans Haggstrom
*/
class Tilemap2dUi(tilemap : TilemapEntity) extends Ui {
type ViewType = JComponent
def createOwnView(composite: CompositeEntity) = null
protected def updateViewProperties(view: ViewType, changedParameters: Parameters) = null
}
| weimingtom/skycastle | src/main/scala/org/skycastle/content/tilemap/Tilemap2dUi.scala | Scala | gpl-2.0 | 427 |
/*
* Copyright 2013 Akiyoshi Sugiki, University of Tsukuba
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kumoi.impl.cache
import kumoi.shell._
import kumoi.shell.cache._
import kumoi.core._
import kumoi.core.log._
import kumoi.impl.rmi.hook._
import scala.actors._
import scala.actors.Actor._
import scala.actors.remote._
import scala.actors.remote.RemoteActor._
import java.lang.reflect.{InvocationHandler, Method, Proxy, InvocationTargetException}
import java.net._
import java.rmi.server._
import sun.rmi.server._
import sun.rmi.transport.tcp._
import kumoi.impl.rmi.hook.cache.InvalidateSub
import kumoi.impl.rmi.hook.cache.InvalidateObject
import kumoi.impl.rmi.hook.cache.Connect
/**
*
* @author Akiyoshi Sugiki
*/
object CachedObject extends CacheCommon {
private val logging = Logging("Cache")
private val lock = new AnyRef
// (Long, String, List[Class[_]], List[Object]) -> (Object, Long)
private var cache = Map[(InetAddress, Object, String, List[Class[_]], List[Object]), (Object, Long)]()
private var conn = Map[InetAddress, OutputChannel[Any]]()
logging.config("ClientCache: timeout=" + timeout)
logging.config("ClientCache: port=" + portObj)
logging.config("ClientCache: name=" + nameObj)
private def connect(dest: InetAddress) {
logging.debug("connect")
lock.synchronized {
conn.get(dest) match {
case Some(ch) => // already have a connection!
case None =>
actor {
logging.debug("host=" + dest)
if (classLoader == null) classLoader = getClass.getClassLoader
loop {
val lo = InetAddress.getLocalHost
select(Node(dest.getHostName, portObj), name) !? Connect(lo.getHostName, portObj, Shell.clientSide) match {
case InvalidateObject(obj, ttl) => invalidateAll(obj)
case InvalidateSub(obj, relm, ttl) => invalidate(obj, relm)
case msg =>
logging.warn("ClientCache: unknown message - " + msg)
}
}
}
conn += (dest -> null)
}
}
}
private def invalidateHost(host: InetAddress) {
logging.debug("ClientCache: invalidate host() " + host)
//logging.debug("before: " + cache)
lock.synchronized { cache = cache.filter(p => p._1._1 != host) }
logging.debug("after: " + cache)
}
private def invalidateAll(obj: Object) { // cl -> obj
if (invalidateObj) {
logging.debug("ClientCache: invalidate all() " + obj)
logging.debug("before: " + cache)
lock.synchronized { cache = cache.filter(p => p._1._2 != obj) }
logging.debug("after: " + cache)
}
}
private def invalidate(obj: Object, realm: List[String]) { // cl -> obj
if (invalidateObj) {
logging.debug("ClientCache: invalidate all() " + obj)
logging.debug("before: " + cache)
lock.synchronized { cache = cache.filter(p => p._1._2 != obj || !realm.contains(p._1._2)) }
logging.debug("after: " + cache)
}
}
def cache[T](obj: T) = {
val cl = obj.getClass
if (Proxy.isProxyClass(cl)) {
Proxy.getInvocationHandler(obj) match {
case rh: RemoteObjectInvocationHandler =>
val ep = rh.getRef.asInstanceOf[UnicastRef].getLiveRef.getChannel.getEndpoint.asInstanceOf[TCPEndpoint]
val host = InetAddress.getByName(ep.getHost)
connect(host)
Proxy.newProxyInstance(cl.getClassLoader, cl.getInterfaces,
new CachedInvocationHandler(host, obj.asInstanceOf[Object])).asInstanceOf[T]
case _ => obj
}
} else obj
}
class CachedInvocationHandler(host: InetAddress, obj: Object) extends InvocationHandler with Serializable {
def invoke(proxy: Object, method: Method, args: Array[Object]) = {
val name = method.getName
logging.debug("cache invoke: " + name)
if (name.equals("toString")) "Cached(" + invokeRemote(proxy, method, args) + ")"
else invokeRemote(proxy, method, args)
}
private def invokeRemote(proxy: Object, method: Method, args: Array[Object]) = {
val now = System.currentTimeMillis
//proxy.getClass.getAnnotation(classOf[nocache])
//proxy.getClass.getAnnotation(classOf[cache])
val params = method.getParameterTypes.toList
val wargs = wrap(args)
get(method, params, wargs, now) match {
case Some(res) => res
case None =>
val m = obj.getClass.getMethod(method.getName, params: _*)
val res = try {
m.invoke(obj, args: _*)
} catch {
case e: InvocationTargetException =>
e.getCause match {
case ce: java.rmi.ConnectException =>
invalidateHost(host)
case _ =>
}
throw e
case e: Throwable => throw e
}
try {
val cl = proxy.getClass.getInterfaces()(0)
val m2 = cl.getMethod(method.getName, params: _*)
// invalidation
m2.getAnnotation(classOf[invalidate]) match {
case inv: invalidate =>
val relm = inv.value.toList
logging.debug("@invalidate(value=" + relm + ")")
if (relm != null && relm.size > 0) invalidate(obj, relm)
else invalidateAll(obj)
case _ =>
}
// cache control
m2.getAnnotation(classOf[nocache]) match {
case an: nocache => logging.debug("@nocache")
case _ =>
m2.getAnnotation(classOf[cache]) match {
case ac: cache =>
logging.debug("@cache(value=" + ac.value + ")")
put(method, params, wargs, res, now + ac.value)
case _ =>
// TODO: must handle @persistcache
put(method, params, wargs, res, now + timeout)
}
}
} catch {
case ne: NoSuchMethodException =>
put(method, params, wargs, res, now + timeout)
}
res
}
}
private def get(method: Method, params: List[Class[_]], args: List[Object], now: Long) = {
cache.synchronized {
cache.get((host, obj, method.getName, params, args)) match {
case Some((res, expire)) if now < expire => // TODO: overflow
logging.debug("hit")
Some(res) // hit
case Some((res, _)) =>
logging.debug("hit, but expired")
None // hit, but expired
case None =>
logging.debug("miss")
None // miss
}
}
}
private def put(method: Method, params: List[Class[_]], args: List[Object], result: Object, now: Long) {
cache.synchronized {
cache += (host, obj, method.getName, params, args) -> (result, now)
}
}
private def wrap(a: Array[Object]) = a match {
case null => List()
case _ => a.toList
}
}
} | axi-sugiki/kumoi | src/kumoi/impl/cache/CachedObject.scala | Scala | apache-2.0 | 7,094 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.benchmark
import java.io.File
import scala.util.Random
import org.apache.spark.SparkConf
import org.apache.spark.benchmark.Benchmark
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.sql.functions.{monotonically_increasing_id, timestamp_seconds}
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.SQLConf.ParquetOutputTimestampType
import org.apache.spark.sql.types.{ByteType, Decimal, DecimalType}
/**
* Benchmark to measure read performance with Filter pushdown.
* To run this benchmark:
* {{{
* 1. without sbt: bin/spark-submit --class <this class> <spark sql test jar>
* 2. build/sbt "sql/test:runMain <this class>"
* 3. generate result: SPARK_GENERATE_BENCHMARK_FILES=1 build/sbt "sql/test:runMain <this class>"
* Results will be written to "benchmarks/FilterPushdownBenchmark-results.txt".
* }}}
*/
object FilterPushdownBenchmark extends SqlBasedBenchmark {
override def getSparkSession: SparkSession = {
val conf = new SparkConf()
.setAppName(this.getClass.getSimpleName)
// Since `spark.master` always exists, overrides this value
.set("spark.master", "local[1]")
.setIfMissing("spark.driver.memory", "3g")
.setIfMissing("spark.executor.memory", "3g")
.setIfMissing("orc.compression", "snappy")
.setIfMissing("spark.sql.parquet.compression.codec", "snappy")
SparkSession.builder().config(conf).getOrCreate()
}
private val numRows = 1024 * 1024 * 15
private val width = 5
private val mid = numRows / 2
// For Parquet/ORC, we will use the same value for block size and compression size
private val blockSize = org.apache.parquet.hadoop.ParquetWriter.DEFAULT_PAGE_SIZE
def withTempTable(tableNames: String*)(f: => Unit): Unit = {
try f finally tableNames.foreach(spark.catalog.dropTempView)
}
private def prepareTable(
dir: File, numRows: Int, width: Int, useStringForValue: Boolean): Unit = {
import spark.implicits._
val selectExpr = (1 to width).map(i => s"CAST(value AS STRING) c$i")
val valueCol = if (useStringForValue) {
monotonically_increasing_id().cast("string")
} else {
monotonically_increasing_id()
}
val df = spark.range(numRows).map(_ => Random.nextLong).selectExpr(selectExpr: _*)
.withColumn("value", valueCol)
.sort("value")
saveAsTable(df, dir)
}
private def prepareStringDictTable(
dir: File, numRows: Int, numDistinctValues: Int, width: Int): Unit = {
val selectExpr = (0 to width).map {
case 0 => s"CAST(id % $numDistinctValues AS STRING) AS value"
case i => s"CAST(rand() AS STRING) c$i"
}
val df = spark.range(numRows).selectExpr(selectExpr: _*).sort("value")
saveAsTable(df, dir, true)
}
private def saveAsTable(df: DataFrame, dir: File, useDictionary: Boolean = false): Unit = {
val orcPath = dir.getCanonicalPath + "/orc"
val parquetPath = dir.getCanonicalPath + "/parquet"
df.write.mode("overwrite")
.option("orc.dictionary.key.threshold", if (useDictionary) 1.0 else 0.8)
.option("orc.compress.size", blockSize)
.option("orc.stripe.size", blockSize).orc(orcPath)
spark.read.orc(orcPath).createOrReplaceTempView("orcTable")
df.write.mode("overwrite")
.option("parquet.block.size", blockSize).parquet(parquetPath)
spark.read.parquet(parquetPath).createOrReplaceTempView("parquetTable")
}
def filterPushDownBenchmark(
values: Int,
title: String,
whereExpr: String,
selectExpr: String = "*"): Unit = {
val benchmark = new Benchmark(title, values, minNumIters = 5, output = output)
Seq(false, true).foreach { pushDownEnabled =>
val name = s"Parquet Vectorized ${if (pushDownEnabled) s"(Pushdown)" else ""}"
benchmark.addCase(name) { _ =>
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_ENABLED.key -> s"$pushDownEnabled") {
spark.sql(s"SELECT $selectExpr FROM parquetTable WHERE $whereExpr").noop()
}
}
}
Seq(false, true).foreach { pushDownEnabled =>
val name = s"Native ORC Vectorized ${if (pushDownEnabled) s"(Pushdown)" else ""}"
benchmark.addCase(name) { _ =>
withSQLConf(SQLConf.ORC_FILTER_PUSHDOWN_ENABLED.key -> s"$pushDownEnabled") {
spark.sql(s"SELECT $selectExpr FROM orcTable WHERE $whereExpr").noop()
}
}
}
benchmark.run()
}
private def runIntBenchmark(numRows: Int, width: Int, mid: Int): Unit = {
Seq("value IS NULL", s"$mid < value AND value < $mid").foreach { whereExpr =>
val title = s"Select 0 int row ($whereExpr)".replace("value AND value", "value")
filterPushDownBenchmark(numRows, title, whereExpr)
}
Seq(
s"value = $mid",
s"value <=> $mid",
s"$mid <= value AND value <= $mid",
s"${mid - 1} < value AND value < ${mid + 1}"
).foreach { whereExpr =>
val title = s"Select 1 int row ($whereExpr)".replace("value AND value", "value")
filterPushDownBenchmark(numRows, title, whereExpr)
}
val selectExpr = (1 to width).map(i => s"MAX(c$i)").mkString("", ",", ", MAX(value)")
Seq(10, 50, 90).foreach { percent =>
filterPushDownBenchmark(
numRows,
s"Select $percent% int rows (value < ${numRows * percent / 100})",
s"value < ${numRows * percent / 100}",
selectExpr
)
}
Seq("value IS NOT NULL", "value > -1", "value != -1").foreach { whereExpr =>
filterPushDownBenchmark(
numRows,
s"Select all int rows ($whereExpr)",
whereExpr,
selectExpr)
}
}
private def runStringBenchmark(
numRows: Int, width: Int, searchValue: Int, colType: String): Unit = {
Seq("value IS NULL", s"'$searchValue' < value AND value < '$searchValue'")
.foreach { whereExpr =>
val title = s"Select 0 $colType row ($whereExpr)".replace("value AND value", "value")
filterPushDownBenchmark(numRows, title, whereExpr)
}
Seq(
s"value = '$searchValue'",
s"value <=> '$searchValue'",
s"'$searchValue' <= value AND value <= '$searchValue'"
).foreach { whereExpr =>
val title = s"Select 1 $colType row ($whereExpr)".replace("value AND value", "value")
filterPushDownBenchmark(numRows, title, whereExpr)
}
val selectExpr = (1 to width).map(i => s"MAX(c$i)").mkString("", ",", ", MAX(value)")
Seq("value IS NOT NULL").foreach { whereExpr =>
filterPushDownBenchmark(
numRows,
s"Select all $colType rows ($whereExpr)",
whereExpr,
selectExpr)
}
}
override def runBenchmarkSuite(mainArgs: Array[String]): Unit = {
runBenchmark("Pushdown for many distinct value case") {
withTempPath { dir =>
withTempTable("orcTable", "parquetTable") {
Seq(true, false).foreach { useStringForValue =>
prepareTable(dir, numRows, width, useStringForValue)
if (useStringForValue) {
runStringBenchmark(numRows, width, mid, "string")
} else {
runIntBenchmark(numRows, width, mid)
}
}
}
}
}
runBenchmark("Pushdown for few distinct value case (use dictionary encoding)") {
withTempPath { dir =>
val numDistinctValues = 200
withTempTable("orcTable", "parquetTable") {
prepareStringDictTable(dir, numRows, numDistinctValues, width)
runStringBenchmark(numRows, width, numDistinctValues / 2, "distinct string")
}
}
}
runBenchmark("Pushdown benchmark for StringStartsWith") {
withTempPath { dir =>
withTempTable("orcTable", "parquetTable") {
prepareTable(dir, numRows, width, true)
Seq(
"value like '10%'",
"value like '1000%'",
s"value like '${mid.toString.substring(0, mid.toString.length - 1)}%'"
).foreach { whereExpr =>
val title = s"StringStartsWith filter: ($whereExpr)"
filterPushDownBenchmark(numRows, title, whereExpr)
}
}
}
}
runBenchmark(s"Pushdown benchmark for ${DecimalType.simpleString}") {
withTempPath { dir =>
Seq(
s"decimal(${Decimal.MAX_INT_DIGITS}, 2)",
s"decimal(${Decimal.MAX_LONG_DIGITS}, 2)",
s"decimal(${DecimalType.MAX_PRECISION}, 2)"
).foreach { dt =>
val columns = (1 to width).map(i => s"CAST(id AS string) c$i")
val valueCol = if (dt.equalsIgnoreCase(s"decimal(${Decimal.MAX_INT_DIGITS}, 2)")) {
monotonically_increasing_id() % 9999999
} else {
monotonically_increasing_id()
}
val df = spark.range(numRows)
.selectExpr(columns: _*).withColumn("value", valueCol.cast(dt))
withTempTable("orcTable", "parquetTable") {
saveAsTable(df, dir)
Seq(s"value = $mid").foreach { whereExpr =>
val title = s"Select 1 $dt row ($whereExpr)".replace("value AND value", "value")
filterPushDownBenchmark(numRows, title, whereExpr)
}
val selectExpr = (1 to width).map(i => s"MAX(c$i)").mkString("", ",", ", MAX(value)")
Seq(10, 50, 90).foreach { percent =>
filterPushDownBenchmark(
numRows,
s"Select $percent% $dt rows (value < ${numRows * percent / 100})",
s"value < ${numRows * percent / 100}",
selectExpr
)
}
}
}
}
}
runBenchmark("Pushdown benchmark for InSet -> InFilters") {
withTempPath { dir =>
withTempTable("orcTable", "parquetTable") {
prepareTable(dir, numRows, width, false)
Seq(5, 10, 50, 100).foreach { count =>
Seq(10, 50, 90).foreach { distribution =>
val filter =
Range(0, count).map(r => scala.util.Random.nextInt(numRows * distribution / 100))
val whereExpr = s"value in(${filter.mkString(",")})"
val title = s"InSet -> InFilters (values count: $count, distribution: $distribution)"
filterPushDownBenchmark(numRows, title, whereExpr)
}
}
}
}
}
runBenchmark(s"Pushdown benchmark for ${ByteType.simpleString}") {
withTempPath { dir =>
val columns = (1 to width).map(i => s"CAST(id AS string) c$i")
val df = spark.range(numRows).selectExpr(columns: _*)
.withColumn("value", (monotonically_increasing_id() % Byte.MaxValue).cast(ByteType))
.orderBy("value")
withTempTable("orcTable", "parquetTable") {
saveAsTable(df, dir)
Seq(s"value = CAST(${Byte.MaxValue / 2} AS ${ByteType.simpleString})")
.foreach { whereExpr =>
val title = s"Select 1 ${ByteType.simpleString} row ($whereExpr)"
.replace("value AND value", "value")
filterPushDownBenchmark(numRows, title, whereExpr)
}
val selectExpr = (1 to width).map(i => s"MAX(c$i)").mkString("", ",", ", MAX(value)")
Seq(10, 50, 90).foreach { percent =>
filterPushDownBenchmark(
numRows,
s"Select $percent% ${ByteType.simpleString} rows " +
s"(value < CAST(${Byte.MaxValue * percent / 100} AS ${ByteType.simpleString}))",
s"value < CAST(${Byte.MaxValue * percent / 100} AS ${ByteType.simpleString})",
selectExpr
)
}
}
}
}
runBenchmark(s"Pushdown benchmark for Timestamp") {
withTempPath { dir =>
withSQLConf(SQLConf.PARQUET_FILTER_PUSHDOWN_TIMESTAMP_ENABLED.key -> true.toString) {
ParquetOutputTimestampType.values.toSeq.map(_.toString).foreach { fileType =>
withSQLConf(SQLConf.PARQUET_OUTPUT_TIMESTAMP_TYPE.key -> fileType) {
val columns = (1 to width).map(i => s"CAST(id AS string) c$i")
val df = spark.range(numRows).selectExpr(columns: _*)
.withColumn("value", timestamp_seconds(monotonically_increasing_id()))
withTempTable("orcTable", "parquetTable") {
saveAsTable(df, dir)
Seq(s"value = timestamp_seconds($mid)").foreach { whereExpr =>
val title = s"Select 1 timestamp stored as $fileType row ($whereExpr)"
.replace("value AND value", "value")
filterPushDownBenchmark(numRows, title, whereExpr)
}
val selectExpr = (1 to width)
.map(i => s"MAX(c$i)").mkString("", ",", ", MAX(value)")
Seq(10, 50, 90).foreach { percent =>
filterPushDownBenchmark(
numRows,
s"Select $percent% timestamp stored as $fileType rows " +
s"(value < timestamp_seconds(${numRows * percent / 100}))",
s"value < timestamp_seconds(${numRows * percent / 100})",
selectExpr
)
}
}
}
}
}
}
}
runBenchmark(s"Pushdown benchmark with many filters") {
val numRows = 1
val width = 500
withTempPath { dir =>
val columns = (1 to width).map(i => s"id c$i")
val df = spark.range(1).selectExpr(columns: _*)
withTempTable("orcTable", "parquetTable") {
saveAsTable(df, dir)
Seq(1, 250, 500).foreach { numFilter =>
val whereExpr = (1 to numFilter).map(i => s"c$i = 0").mkString(" and ")
// Note: InferFiltersFromConstraints will add more filters to this given filters
filterPushDownBenchmark(numRows, s"Select 1 row with $numFilter filters", whereExpr)
}
}
}
}
}
}
| witgo/spark | sql/core/src/test/scala/org/apache/spark/sql/execution/benchmark/FilterPushdownBenchmark.scala | Scala | apache-2.0 | 14,737 |
/*
* Copyright 2020 Precog Data
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.ejson
import slamdata.Predef.{Int => SInt, Char => SChar, _}
import quasar.contrib.argonaut._
import quasar.contrib.iota.copkTraverse
import argonaut.EncodeJson
import matryoshka._
import matryoshka.implicits._
import scalaz._
import simulacrum.typeclass
/** Typeclass for types that can be encoded as EJson. */
@typeclass
trait EncodeEJson[A] {
def encode[J](a: A)(implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): J
def contramap[B](f: B => A): EncodeEJson[B] = {
val orig = this
new EncodeEJson[B] {
def encode[J](b: B)(implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): J =
orig.encode[J](f(b))
}
}
}
object EncodeEJson extends EncodeEJsonInstances {
def apply[A](implicit ev: EncodeEJson[A]): EncodeEJson[A] = ev
def encodeEJsonR[T, F[_]: Functor](
implicit T: Recursive.Aux[T, F], F: EncodeEJsonK[F]
): EncodeEJson[T] =
new EncodeEJson[T] {
def encode[J](t: T)(implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): J =
t.cata[J](F.encodeK[J])
}
}
sealed abstract class EncodeEJsonInstances extends EncodeEJsonInstances0 {
implicit val bigIntEncodeEJson: EncodeEJson[BigInt] =
new EncodeEJson[BigInt] {
def encode[J](i: BigInt)(implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): J =
Fixed[J].int(i)
}
implicit val intEncodeEJson: EncodeEJson[SInt] =
bigIntEncodeEJson.contramap(BigInt(_))
implicit val longEncodeEJson: EncodeEJson[Long] =
bigIntEncodeEJson.contramap(BigInt(_))
implicit val shortEncodeEJson: EncodeEJson[Short] =
intEncodeEJson.contramap(_.toInt)
implicit val charEncodeEJson: EncodeEJson[SChar] =
new EncodeEJson[SChar] {
def encode[J](c: SChar)(implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): J =
Fixed[J].char(c)
}
implicit def optionEncodeEJson[A](implicit A: EncodeEJson[A]): EncodeEJson[Option[A]] =
new EncodeEJson[Option[A]] {
def encode[J](oa: Option[A])(implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): J =
oa.fold(Fixed[J].nul())(A.encode[J](_))
}
implicit def encodeJsonT[T[_[_]]: RecursiveT, F[_]: Functor: EncodeEJsonK]: EncodeEJson[T[F]] =
EncodeEJson.encodeEJsonR[T[F], F]
}
sealed abstract class EncodeEJsonInstances0 {
implicit def encodeJsonEJson[A: EncodeJson]: EncodeEJson[A] =
new EncodeEJson[A] {
def encode[J](a: A)(implicit JC: Corecursive.Aux[J, EJson], JR: Recursive.Aux[J, EJson]): J =
EncodeJson.of[A].encode(a).transCata[J](EJson.fromJson(Fixed[J].str(_)))
}
}
| quasar-analytics/quasar | ejson/src/main/scala/quasar/ejson/EncodeEJson.scala | Scala | apache-2.0 | 3,239 |
/*
* Copyright 2014–2018 SlamData Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package quasar.niflheim
import quasar.blueeyes.json._
import scala.collection.mutable
import java.io.{
BufferedReader,
BufferedOutputStream,
File,
FileInputStream,
FileOutputStream,
InputStreamReader,
OutputStream
}
import java.time.LocalDateTime
import java.time.format.DateTimeFormatter
private[niflheim] object RawLoader {
private val fmt = DateTimeFormatter.ofPattern("yyyyMMddHHmmssSSS")
private val utf8 = java.nio.charset.Charset.forName("UTF-8")
/**
* Write the rawlog header to 'os'. Currently this is:
*
* "##rawlog <id> 1\n"
*/
def writeHeader(os: OutputStream, id: Long): Unit = {
val s = "##rawlog " + id.toString + " 1\n"
os.write(s.getBytes(utf8))
os.flush()
}
/**
* Write the given event to 'os'. Each event consists of an
* 'eventid' and a sequence of Jvalue instances.
*/
def writeEvents(os: OutputStream, eventid: Long, values: Seq[JValue]) {
val e = eventid.toString
os.write(("##start " + e + "\n").getBytes(utf8))
values.foreach { j =>
os.write(j.renderCompact.getBytes(utf8))
os.write('\n')
}
os.write(("##end " + e + "\n").getBytes(utf8))
os.flush()
}
/**
* Load the rawlog (using the version 1 format).
*
* This method assumes the header line has already been parsed, and
* expects to see zero-or-more of the following groups:
*/
def load1(id: Long, f: File, reader: BufferedReader): (Seq[JValue], Seq[Long], Boolean) = {
val rows = mutable.ArrayBuffer.empty[JValue]
val events = mutable.ArrayBuffer.empty[(Long, Int)]
var line = reader.readLine()
var ok = true
while (ok && line != null) {
if (line.startsWith("##start ")) {
try {
val eventid = line.substring(8).toLong
val count = loadEvents1(reader, eventid, rows)
if (count < 0) {
ok = false
} else {
events.append((eventid, count))
line = reader.readLine()
}
} catch {
case _: Exception =>
ok = false
}
} else {
ok = false
}
}
if (!ok) {
reader.close() // release the file lock on Windows
recover1(id, f, rows, events)
}
(rows, events.map(_._1), ok)
}
/**
* Generate a "corrupted" rawlog file name.
*
* From "/foo/bar" we'l return "/foo/bar-corrupted-20130213155306768"
*/
def getCorruptFile(f: File): File =
new File(f.getPath + "-corrupted-" + fmt.format(LocalDateTime.now()))
/**
* Recovery
*/
def recover1(id: Long, f: File, rows: mutable.ArrayBuffer[JValue], events: mutable.ArrayBuffer[(Long, Int)]) {
// open a tempfile to write a "corrected" rawlog to, and write the header
val tmp = File.createTempFile("nilfheim", "recovery")
val os = new BufferedOutputStream(new FileOutputStream(tmp, true))
try {
writeHeader(os, id)
// for each event, write its rows to the rawlog
var row = 0
val values = mutable.ArrayBuffer.empty[JValue]
events.foreach { case (eventid, count) =>
var i = 0
while (i < count) {
values.append(rows(row))
row += 1
i += 1
}
writeEvents(os, eventid, values)
values.clear()
}
} finally {
os.close()
}
f.renameTo(getCorruptFile(f))
// rename the tempfile to the rawlog file
tmp.renameTo(f)
}
def isValidEnd1(line: String, eventid: Long): Boolean = try {
line.substring(6).toLong == eventid
} catch {
case _: Exception => false
}
def loadEvents1(reader: BufferedReader, eventid: Long, rows: mutable.ArrayBuffer[JValue]): Int = {
val sofar = mutable.ArrayBuffer.empty[JValue]
var line = reader.readLine()
var going = true
var ok = false
var count = 0
while (going && line != null) {
if (line.startsWith("##end ")) {
going = false
ok = isValidEnd1(line, eventid)
} else {
try {
sofar.append(JParser.parseUnsafe(line))
count += 1
line = reader.readLine()
} catch {
case _: Exception =>
going = false
}
}
}
if (ok) {
rows ++= sofar
count
} else {
-1
}
}
def load(id: Long, f: File): (Seq[JValue], Seq[Long], Boolean) = {
val reader = new BufferedReader(new InputStreamReader(new FileInputStream(f), utf8))
try {
val header = reader.readLine()
if (header == null)
sys.error("missing header")
else if (header == ("##rawlog " + id.toString + " 1"))
load1(id, f, reader)
else
sys.error("unsupported header: %s" format header)
} finally {
reader.close()
}
}
}
| jedesah/Quasar | niflheim/src/main/scala/quasar/niflheim/RawLoader.scala | Scala | apache-2.0 | 5,350 |
package top.myetl.lucenerdd.util
import java.io.{Closeable, File}
import java.util
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs._
import org.apache.hadoop.fs.permission.FsPermission
import org.apache.hadoop.hdfs.DistributedFileSystem
import org.apache.hadoop.hdfs.protocol.HdfsConstants.SafeModeAction
import org.apache.spark.{Logging, SparkConf}
/**
* utils method with hdfs
*/
object FsUtils extends Logging{
val FileSeparator: String = File.separator
val HdfsFileSeparator: String = "/"
def dirName(path: String*): String ={
val paths = path.map( t => {
if(t.endsWith(HdfsFileSeparator))
t.substring(0, t.length-1)
else
t
})
paths.mkString(HdfsFileSeparator)
}
/**
* list all file under this directory
* @param fileSystem
* @param path
* @return
*/
def listAll(fileSystem: FileSystem, path: Path): Array[String] = {
val listStatus = fileSystem.listStatus(path)
listStatus.map{ file => file.getPath.getName}
}
/**
* List Lucene directory under this directory
* @param fileSystem
* @param path
* @return
*/
def listLuceneDir(fileSystem: FileSystem, path: Path): Array[String] = {
val listStatus = fileSystem.listStatus(path)
val paths = listStatus.filter(_.isDirectory).map(p => {
p.getPath.getName
})
if(paths.length < 1 && listStatus.exists(_.getPath.getName.endsWith(".cfe"))) {
return Array(path.getName)
}
paths
}
/**
* hdfs base directory to store data , this directory must have write permission
* @param conf
* @return
*/
def getHdfsBaseDir(conf: SparkConf): String = {
val dirOption = conf.getOption(Constants.HdfsBaseDirKey)
val sep = File.separator
val dir = dirOption match {
case d: Some[String] => if(d.get.endsWith(sep)) d.get else d.get+sep
case _ => throw new IllegalArgumentException("hdfs base directory not set")
}
dir
}
/**
* Get Hdfs file system
* @param path
* @param conf
* @return
*/
def get(path: Path, conf: Configuration): FileSystem = {
val dir = path.toUri.getPath
FileSystem.get(path.toUri, conf)
}
def close(fs: FileSystem): Unit = {
closeQuietly(fs)
}
/**
* Get Hdfs file context
* @param path
* @param conf
* @return
*/
def getContext(path: Path, conf: Configuration): FileContext = {
FileContext.getFileContext(path.toUri, conf)
}
/**
* Make sure Hdfs is not in safe mode
* @param fileSystem
*/
def untilUnSafe(fileSystem: FileSystem): Unit = {
if(fileSystem.isInstanceOf[DistributedFileSystem]){
val fs = fileSystem.asInstanceOf[DistributedFileSystem]
while(fs.setSafeMode(SafeModeAction.SAFEMODE_GET, true)){
logWarning("The NameNode is in SafeMode wait 5 seconds and try again")
try{
Thread.sleep(5000)
}catch {
case e: InterruptedException => Thread.interrupted()
}
}
}
}
/**
* Make directory if is not exists
* @param path
* @return
*/
def mkDirIfNotExist(path: Path, fileSystem: FileSystem): Unit = {
try{
if(!fileSystem.exists(path)){
val success = fileSystem.mkdirs(path)
if(!success) throw new RuntimeException("Could not create directory: "+path)
logDebug("create directory success "+path)
}else{
logDebug("directory already exists "+path)
}
}catch {
case e: Exception => {
close(fileSystem)
throw new RuntimeException("Problem creating directory: " + path, e)
}
}
}
/**
* Close the FileSystem
* @param closeable
*/
private def closeQuietly(closeable: Closeable ): Unit = {
try {
if (closeable != null) {
closeable.close()
}
} catch {
case e: Exception => logError("Error while closing", e)
}
}
/**
* Get hdfs OutputStream
* @param fileSystem
* @param path
* @return
*/
def getOutputStream(fileSystem: FileSystem, path: Path): FSDataOutputStream = {
val conf: Configuration = fileSystem.getConf
val fsDefaults: FsServerDefaults = fileSystem.getServerDefaults(path)
val flags = util.EnumSet.of(CreateFlag.CREATE,
CreateFlag.OVERWRITE, CreateFlag.SYNC_BLOCK)
val permission = FsPermission.getDefault.applyUMask(FsPermission.getUMask(conf))
fileSystem.create(path, permission, flags, fsDefaults.getFileBufferSize,
fsDefaults.getReplication, fsDefaults.getBlockSize, null)
}
}
| myetl/sparkLu | src/main/scala/top/myetl/lucenerdd/util/FsUtils.scala | Scala | apache-2.0 | 4,570 |
import com.thesamet.proto.e2e.extensions._
import org.scalatest._
class ExtensionsSpec extends FlatSpec with MustMatchers with OptionValues {
"BaseMessage.parseFrom" should "parse unknown fields" in {
val helper = Helper(optInt = Some(37), optString = Some("foo"))
val extended = BaseMessage.parseFrom(helper.toByteArray)
extended.extension(Extension.optInt) must be (Some(37))
extended.extension(Extension.optString) must be (Some("foo"))
}
"BaseMessage.parseFrom" should "parse unknown fields with duplication" in {
val repeatedHelper = RepeatedHelper(optInt = Seq(37, 12), optString = Seq("foo", "bar"))
val extended = BaseMessage.parseFrom(repeatedHelper.toByteArray)
extended.extension(Extension.optInt) must be (Some(12))
extended.extension(Extension.optString) must be (Some("bar"))
}
}
| dotty-staging/ScalaPB | e2e/src/test/scala/ExtensionsSpec.scala | Scala | apache-2.0 | 836 |
package com.temportalist.chalked.common.tile
import java.util
import com.temportalist.origin.library.common.lib.ItemMeta
import com.temportalist.origin.wrapper.common.tile.TEWrapper
import net.minecraft.block.Block
import net.minecraft.item.ItemStack
import net.minecraft.nbt.NBTTagCompound
/**
*
*
* @author TheTemportalist
*/
class TEColored() extends TEWrapper("Smeared Block") with ICamouflage {
private var colorTag: NBTTagCompound = new NBTTagCompound
def setBlockAndColor(block: Block, meta: Int, tagCom: NBTTagCompound): Unit = {
this.setCamouflage(new ItemMeta(block, meta))
this.colorTag = tagCom
}
def getColor(): Int = {
this.colorTag.getInteger("hex")
}
override def getDrops(drops: util.ArrayList[ItemStack], block: Block, metadata: Int): Unit = {
drops.clear()
val thisStack: ItemStack = new ItemStack(block, 1, metadata)
val tagCom: NBTTagCompound = new NBTTagCompound
this.saveCamouflageNBT(tagCom)
tagCom.setTag("colorTag", this.colorTag)
thisStack.setTagCompound(tagCom)
drops.add(thisStack)
}
override def writeToNBT(tagCom: NBTTagCompound): Unit = {
super.writeToNBT(tagCom)
val blockMetaTag: NBTTagCompound = new NBTTagCompound
this.saveCamouflageNBT(blockMetaTag)
tagCom.setTag("camouflage", blockMetaTag)
tagCom.setTag("colorTag", this.colorTag)
}
override def readFromNBT(tagCom: NBTTagCompound): Unit = {
super.readFromNBT(tagCom)
this.readCamouflageNBT(tagCom.getCompoundTag("camouflage"))
this.colorTag = tagCom.getCompoundTag("colorTag")
}
}
| TheTemportalist/Chalked | src/main/scala/com/temportalist/chalked/common/tile/TEColored.scala | Scala | apache-2.0 | 1,537 |
import sbt._
object Conf {
lazy val dbConf = settingKey[(String, String, String)]("Typesafe config file with slick settings")
}
| 101dev/play-slick-codegen-flyway-seed | project/Conf.scala | Scala | cc0-1.0 | 131 |
object SCL6854 {
trait Foo[T] {
def value: T
}
// LEVEL 1
case class FooAny[T](value: T) extends Foo[T]
// LEVEL 2
case class FooNumberAny[T: Numeric](value: T) extends Foo[T]
// Constructor
object Foo {
def apply[T, R <: Foo[T]](value: T)(implicit builder: Builder[T, R]): R =
builder.buildInstance(value)
// Builder
trait Builder[T, R <: Foo[T]] {
def buildInstance(value: T): R
}
// defining the FooAny builder, that has a lower priority
trait Level1 {
implicit def FooAnyBuilder[T] = new Builder[T, FooAny[T]] {
def buildInstance(value: T) =
FooAny(value)
}
}
// removing the FooNumberAny builder also fixes the error highlighting
object Builder extends Level1 {
implicit def FooNumberAnyBuilder[T](implicit p: Numeric[T]) = new Builder[T, FooNumberAny[T]] {
def buildInstance(value: T) =
FooNumberAny(value)
}
}
}
object Main extends App {
def log[T](name: String, ref: Foo[T], value: T): Unit =
println(f"val $name%-12s: ${ref.getClass.getName}%-19s = $value")
println()
// Implicits guided type inference does not work in IntelliJ IDEA:
val anyRef = Foo("hello, world!") // <-- ERROR HERE (View -> Type Info shows "Nothing", when it should be FooAny)
log("anyRef", anyRef, anyRef.value)
// <-- manifested here (syntax highlighting error)
val anyRefExp: FooAny[String] = Foo("hello, world! (explicit)") // <-- specifying the type explicitly works
log("anyRefExp", anyRefExp, anyRefExp.value)
val someBoolean = Foo(true) // <-- ERROR here too
log("someBoolean", someBoolean, someBoolean.value)
// <-- manifested here
val anyNumber = Foo(Long.MaxValue)
log("anyNumber", anyNumber, anyNumber.value)
println()
/*start*/(anyRef, anyRefExp, someBoolean, anyNumber)/*end*/
}
}
//(SCL6854.FooAny[String], SCL6854.FooAny[String], SCL6854.FooAny[Boolean], SCL6854.FooNumberAny[Long]) | LPTK/intellij-scala | testdata/typeInference/bugs5/SCL6854.scala | Scala | apache-2.0 | 2,001 |
package parameter
trait Given {
trait T1
trait T2
trait T3
trait T4
trait T5
trait T6
trait T7
trait T8
trait T9
trait T10
trait T11
trait T12
given aliasTypeParameter[A]: Int = ???
given aliasTypeParameters[A, B]: Int = ???
given aliasValueParameter(using x: Int): Int = ???
given aliasValueParameters(using x: Int, y: Int): Int = ???
given aliasMultipleClauses(using x: Int)(using y: Int): Int = ???
given aliasTypeAndValueParameter[A](using x: Int): Int = ???
given [A]: T1 = ???
given [A, B]: T2 = ???
given (using x: Int): T3 = ???
given (using x: Int, y: Int): T4 = ???
given (using x: Int)(using y: Int): T5 = ???
given [A](using x: Int): T6 = ???
given instanceTypeParameter[A]: T1 with {}
given instanceTypeParameters[A, B]: T2 with {}
given instanceValueParameter(using x: Int): T3 with {}
given instanceValueParameters(using x: Int, y: Int): T4 with {}
given instanceMultipleClauses(using x: Int)(using y: Int): T5 with {}
given instanceTypeAndValueParameter[A](using x: Int): T6 with {}
given [A]: T7 with {}
given [A, B]: T8 with {}
given (using x: Int): T9 with {}
given (using x: Int, y: Int): T10 with {}
given (using x: Int)(using y: Int): T11 with {}
given [A](using x: Int): T12 with {}
} | JetBrains/intellij-scala | tasty/runtime/data/parameter/Given.scala | Scala | apache-2.0 | 1,315 |
package edu.oregonstate.mutation.statementHistory
/**
* Created by caius on 12/5/15.
*/
class MethodChangeDetectorTest extends GitTest with NodeChangeDetectorTest {
it should "track a method across refactorings" in {
val first = add("A.java", "public class A{\\npublic void m(){\\nint x=3;\\n}}")
val second = add("A.java", "public class A{\\npublic void n(){\\nint x=3;\\n}}")
val expected = Seq(ci(first.getName, "ADD"), ci(second.getName, "UPDATE"))
val actual = nd(git, MethodFinder).findCommits("A.java", 3, second.getName)
actual should equal (expected)
}
it should "track a method across multiple refactorings" in {
val first = add("A.java", "public class A{\\npublic void m(){\\nint x=3;\\n}}")
val second = add("A.java", "public class A{\\npublic void n(int a){\\nint x=3;\\n}}")
val expected = Seq(ci(first.getName, "ADD"), ci(second.getName, "UPDATE"))
val actual = nd(git, MethodFinder).findCommits("A.java", 3, second.getName)
actual should equal (expected)
}
}
| caiusb/statement-history | src/test/scala/edu/oregonstate/mutation/statementHistory/MethodChangeDetectorTest.scala | Scala | mit | 1,022 |
package poker.core.handanalyzer
import poker.core.{Hand, HandStatus, HandType}
import poker.Utils
final class RoyalFlushAnalyzer extends HandAnalyzer {
/** Detect any T-J-Q-K-A, all of the same suit */
override def doAnalyze(hand: Hand): HandStatus = {
if (hand.hasSameSuit && hand.hasConsecutiveCardsStartingWith(Utils.rankAsInt('T'))) {
HandStatus(HandType.RoyalFlush)
} else {
HandStatus.none
}
}
}
| kyuksel/poker | src/main/scala/poker/core/handanalyzer/RoyalFlushAnalyzer.scala | Scala | mit | 434 |
package controllers
import play.api.mvc.Controller
import org.scribe.oauth.OAuthService
import play.api.Play
import play.api.mvc.Action
import org.scribe.builder.ServiceBuilder
import org.scribe.builder.api.GoogleApi
import org.scribe.model.Token
import play.api.Logger
import org.scribe.model.Verifier
import org.scribe.model.OAuthRequest
import org.scribe.model.Verb
import org.scribe.model.Response
import play.api.libs.json.Json
import models.UserModel
import utils.EncryptionUtility
import org.bson.types.ObjectId
import play.api.i18n.Messages
object Google extends Controller {
val apiKey: String = "86006814210-qte4kti9s3m251fepttl2nhvdjs0.apps.googleusercontent.com"
val apiSecret: String = "gry9NiXEkxlR7nYw8OFoS"
var requestToken: Token = null
val authorizationUrlGoogle: String = "https://www.google.com/accounts/OAuthAuthorizeToken?oauth_token="
val protectedResourceUrl: String = "https://www.googleapis.com/oauth2/v1/userinfo?alt=json"
val scope: String = "https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email"
val currentUserId = "userId"
/**
* Get OAuthService Request
*/
def getOAuthService: OAuthService = {
var service: OAuthService = new ServiceBuilder()
.provider(classOf[GoogleApi])
.apiKey(apiKey)
.apiSecret(apiSecret)
.scope(scope)
.callback("http://" + getContextUrl + "/google/callback")
.build();
service
}
/**
* To get The root context from application.config
*/
def getContextUrl: String = {
Play.current.configuration.getString("contextUrl").get
}
def googleLogin: Action[play.api.mvc.AnyContent] = Action {
try {
requestToken = getOAuthService.getRequestToken();
val authorizationUrl: String = authorizationUrlGoogle + requestToken.getToken()
Redirect(authorizationUrl)
} catch {
case ex : Throwable => {
Logger.error("Error During Login Through Google - " + ex)
Ok(views.html.RedirectMain("", "failure"))
}
}
}
def googleCallback: Action[play.api.mvc.AnyContent] = Action { implicit request =>
try {
getVerifier(request.queryString) match {
case None => Ok("")
case Some(oauth_verifier) =>
val verifier: Verifier = new Verifier(oauth_verifier)
val accessToken: Token = getOAuthService.getAccessToken(requestToken, verifier)
val oAuthRequest: OAuthRequest = new OAuthRequest(Verb.GET, protectedResourceUrl)
getOAuthService.signRequest(accessToken, oAuthRequest)
oAuthRequest.addHeader("GData-Version", "3.0")
val response: Response = oAuthRequest.send
response.getCode match {
case 200 =>
val json = Json.parse(response.getBody)
val userEmail = (json \\ "email").asOpt[String]
UserModel.findUserByEmail(userEmail.get) match {
case None =>
val password = EncryptionUtility.generateRandomPassword
val user = UserModel(new ObjectId, userEmail.get, password)
val userOpt = UserModel.createUser(user)
userOpt match {
case None => Redirect("/").flashing("error" -> Messages("error"))
case Some(userId) =>
val userSession = request.session + ("userId" -> user.id.toString)
Ok(views.html.RedirectMain(user.id.toString, "success")).withSession(userSession)
}
case Some(alreadyExistingUser) =>
val userSession = request.session + ("userId" -> alreadyExistingUser.id.toString)
Ok(views.html.RedirectMain(alreadyExistingUser.id.toString, "success")).withSession(userSession)
}
case 400 =>
Logger.error("Error 400 : During Login Through Google- " + response.getBody)
Ok(views.html.RedirectMain("", "failure"))
case _ =>
Logger.error("Error " + response.getCode + " : During Login Through Google - " + response.getBody)
Ok(views.html.RedirectMain("", "failure"))
}
}
} catch {
case ex:Throwable => {
Logger.error("Error During Login Through Google - " + ex)
Ok(views.html.RedirectMain("", "failure"))
}
}
}
def getVerifier(queryString: Map[String, Seq[String]]): Option[String] = {
val seq = queryString.get("oauth_verifier").getOrElse(Seq())
seq.isEmpty match {
case true => None
case false => seq.headOption
}
}
} | knoldus/Play-Starter-Template | app/controllers/Google.scala | Scala | apache-2.0 | 4,605 |
/*
Find the Kth element of a list.
By convention, the first element in the list is element 0.
Example:
scala> nth(2, List(1, 1, 2, 3, 5, 8))
res0: Int = 2
*/
package S99
object P03 {
@annotation.tailrec
def nth[A](n: Int, xs: List[A]): A = {
xs match {
case _ :: tail if n > 0 => nth(n - 1, tail)
case h :: _ => h
case _ => throw new NoSuchElementException
}
}
}
| gcanti/S-99 | src/main/scala/P03.scala | Scala | mit | 439 |
package scalanlp.stats
package distributions
import org.scalatest.junit.JUnitRunner
import org.scalatest.FunSuite
import org.junit.Test
import org.junit.runner.RunWith
import org.junit.Before
import org.scalatest.prop._
import org.scalacheck._
import java.util.zip.DeflaterInputStream
/**
* @author dlwh
*/
trait MomentsTestBase[T] extends FunSuite with Checkers {
implicit def arbDistr: Arbitrary[Measure[T] with Rand[T] with Moments[Double]];
val numSamples = 10000;
def asDouble(x: T):Double
def fromDouble(x: Double):T
test("mean") {
check(Prop.forAll { (distr: Measure[T] with Rand[T] with Moments[Double])=>
val sample = distr.sample(numSamples).map(asDouble _)
val mean = DescriptiveStats.mean(sample)
(mean - distr.mean).abs/(mean.abs max 1) < 1E-1
})
}
test("variance") {
check(Prop.forAll { (distr: Measure[T] with Rand[T] with Moments[Double])=>
val sample = distr.sample(numSamples).map(asDouble _)
val variance = DescriptiveStats.variance(sample)
(variance - distr.variance).abs/(variance max 1) < 1E-1
})
}
test("mode") {
check(Prop.forAll { (distr: Rand[T] with Measure[T] with Moments[Double])=>
val sample = distr.sample(40)
val probMode = distr(fromDouble(distr.mode))
// if(distr.isInstanceOf[Poisson])
// println(distr,probMode,sample.map{ distr },sample)
sample.forall(x => probMode >= distr(x) - 1E-4);
})
}
} | MLnick/scalanlp-core | learn/src/test/scala/scalanlp/stats/distributions/MomentsTestBase.scala | Scala | apache-2.0 | 1,456 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* @author Vamsi Thummala {[email protected]}, Copyright (C) 2013-2015
*
*/
package safe.safelog
package parser
import scala.collection.mutable.{Set => MutableSet}
trait ParserImpl
extends scala.util.parsing.combinator.JavaTokenParsers
with scala.util.parsing.combinator.PackratParsers
with com.typesafe.scalalogging.LazyLogging {
parserImpl: ParserService =>
// override whiteSpace to support C-style comments or (* comments *)
// space | single-line-comment | multi-line-comments-c-style | multi-line-comments-with-braces
//protected override val whiteSpace = """(\\s|(?m)/\\*(\\*(?!/)|[^*])*\\*/|(?m)\\(\\*(\\*(?!\\))|[^*])*\\*\\))+""".r // ?m for multiline mode
protected override val whiteSpace = """(\\s|//.*|(?m)/\\*(\\*(?!/)|[^*])*\\*/|(?m)\\(\\*(\\*(?!\\))|[^*])*\\*\\))+""".r
/*
private var _saysOperator: Boolean = Config.config.saysOperator
private var _self: String = Config.config.self
// _statementCache is accessed from Repl
private[safelog] val _statementCache: MutableCache[Index, MutableSet[Statement]] =
new MutableCache[Index, MutableSet[Statement]]()
*/
lazy val symbol = """[a-zA-Z][a-zA-Z_\\d]*""".r
lazy val identifier: PackratParser[String] = (
"+"
| "-"
| "*"
| "/"
| "%" // modulo operator
| "!"
| "not"
| "<:<" // subset
| "<=" // Note: order important here for parsing, i.e., <= and >= should come before < and > respectively
| "<"
| ">="
| ">"
| "=:=" // compare opeartor; right side eval + left side eval + unify
| ":=" // is opeartor; right side eval + unify
| "=" // unify opeartor
| "compare"
| "is"
| "unify"
| "max"
| "min"
| "range"
| "subset"
| "in"
| "@" // at operator
)
lazy val globalVariablePattern = """\\$([a-zA-Z_\\d]*)""".r
lazy val variablePattern = """(^[\\?_][a-zA-Z_\\d]*)""".r
lazy val anyVariablePattern = """([\\$\\?])(?:\\()?([a-zA-Z_\\d]*)(?:\\))?""".r
lazy val typeDelimiter: PackratParser[String] = ("#" | "type") // dom for domain
lazy val logicalIf: PackratParser[String] = (":-" | "if")
lazy val logicalAnd: PackratParser[String] = ("," | "and")
lazy val logicalOr: PackratParser[String] = (";" | "or")
lazy val logicalEnd: PackratParser[String] = ("." | "end")
lazy val logicalNegation: PackratParser[String] = ("!" | "not")
lazy val attrMapDelimiter: PackratParser[String] = ("->" | "as") ^^ {case v => "as"}
lazy val attrMapIndexDelimiter: PackratParser[String] = ("->>" | "keyAs") ^^ {case v => "keyAs"}
def addStatement(index: Index, s: Statement): MutableSet[Statement] = {
val stmts: MutableSet[Statement] = _statementCache.get(index) map {
v => v +=s
} getOrElse {
val _newSet = MutableSet.empty[Statement]
_newSet += s
}
_statementCache.put(index, stmts)
stmts
}
def addStatementSeq(index: Index, seq: Seq[Statement]): MutableSet[Statement] = {
val stmts: MutableSet[Statement] = _statementCache.get(index) map {
v => v ++= seq
} getOrElse {
val _newSet = MutableSet.empty[Statement]
_newSet ++= seq
}
_statementCache.put(index, stmts)
stmts
}
lazy val program: PackratParser[MutableCache[Index, MutableSet[Statement]]] = rep(multiRule | statement) ^^ {
case multiStatements =>
_statementCache
}
lazy val statement: PackratParser[(Index, MutableSet[Statement])] = (query | assertion | retraction) ^^ {
case s @ Assertion(terms) => terms.head match {
case Structure(id, trs, _, _, _) if Config.config.reserved.contains(id) =>
val expectedArity: Int = Config.config.reserved(id)
val termsLength = if(saysOperator == true) {trs.length - 1} else {trs.length}
//println(s"TERMS LENGTH: ${trs.length}; $terms; saysOp: ${saysOperator}")
val res = if(termsLength != expectedArity) {
logger.warn(s"For metadata, the expected arity of $id is $expectedArity but ${trs.length} found")
addStatement(StrLit(s"_${id.name}"), s)
} else if(termsLength == expectedArity) addStatement(StrLit(s"_${id.name}"), s)
else if(id.name == "name" && termsLength == 1) addStatement(StrLit(s"_${id.name}"), s)
else addStatement(s.primaryIndex, s)
(id, res)
case _ =>
val res = addStatement(s.primaryIndex, s)
(s.primaryIndex, res)
}
case s @ Retraction(x) =>
val res = addStatement(StrLit("_retraction"), s)
(StrLit("_retraction"), res)
case s @ Query(x) =>
val res = addStatement(StrLit("_query"), s)
(StrLit("_query"), res)
case s @ QueryAll(x) =>
val res = addStatement(StrLit("_query"), s)
(StrLit("_query"), res) // this is on purpose; for indexing we only care whether the statement is a query
case _ => throw new UnSafeException(s"Statement type not detected")
}
lazy val assertion: PackratParser[Statement] =
(("assert" ~> clause <~ "end") | (clause <~ logicalEnd)) ^^ {
case trs => Assertion(trs)
}
lazy val retraction: PackratParser[Retraction] =
(("retract" ~> (predicateWithArity | clause) <~ logicalEnd) | ((predicateWithArity | clause) <~ "~")) ^^ {
case trs => Retraction(trs)
}
lazy val predicateWithArity: PackratParser[Seq[Term]] = (constantString ~ "/" ~ integer) ^^ {
case sym ~ slash ~ arity => Constant(StrLit("_withArity")) +: sym +: arity +: Nil
}
lazy val clause: PackratParser[Seq[Term]] = (rule | groundFact)
lazy val rule: PackratParser[Seq[Term]] = headLiteral ~ logicalIf ~ literals ^^ { // head :- body1; body2.
// TODO
// 1. check for safety: range restriction
// 2. check for stratified logicalNegation and/or other valid rules (for e.g., guarded safelog)
case head ~ lIf ~ body =>
val (isSafe, unSafeVar) = rangeRestrictionCheck(head, body)
if(!isSafe) {
throw ParserException(s"""Unsound rule dectected. Check range restriction failed for ${unSafeVar.mkString(",")}""")
}
head +: body
}
lazy val multiRule: PackratParser[(Index, MutableSet[Statement])] = (multiRuleAssertion | multiRuleRetraction) ^^ {
case s @ Assertion(terms) +: other => terms.head match {
case Structure(id, trs, _, _, _) if Config.config.reserved.contains(id) =>
val expectedArity: Int = Config.config.reserved(id)
val termsLength = if(saysOperator == true) {trs.length - 1} else {trs.length}
//println(s"TERMS LENGTH: ${trs.length}; $terms; saysOp: ${saysOperator}")
val out = if(termsLength != expectedArity) {
logger.warn(s"For metadata, the expected arity of $id is $expectedArity but ${trs.length} found")
val res: MutableSet[Statement] = addStatementSeq(StrLit(s"_${id.name}"), s)
(StrLit(s"_${id.name}"), res)
} else if(termsLength == expectedArity) {
val res: MutableSet[Statement] = addStatementSeq(StrLit(s"_${id.name}"), s)
(StrLit(s"_${id.name}"), res)
} else if(id.name == "name" && termsLength == 1) {
val res: MutableSet[Statement] = addStatementSeq(StrLit(s"_${id.name}"), s)
(StrLit(s"_${id.name}"), res)
} else {
val res: MutableSet[Statement] = addStatementSeq(s.head.primaryIndex, s)
(s.head.primaryIndex, res)
}
out
case _ =>
val res = addStatementSeq(s.head.primaryIndex, s)
(s.head.primaryIndex, res)
}
case s @ Retraction(x) +: other =>
val res: MutableSet[Statement] = addStatementSeq(StrLit("_retraction"), s)
(StrLit("_retraction"), res)
}
lazy val multiRuleAssertion: PackratParser[Seq[Statement]] =
(("assert" ~> headLiteral ~ logicalIf ~ repsep(literals, logicalOr) <~ "end") | (headLiteral ~ logicalIf ~ repsep(literals, logicalOr) <~ logicalEnd)) ^^ {
case head ~ lIf ~ clauses => clauses.map{clause =>
val (isSafe, unSafeVar) = rangeRestrictionCheck(head, clause)
if(!isSafe) {
throw ParserException(s"""Unsound rule dectected. Check range restriction failed for ${unSafeVar.mkString(",")}""")
}
Assertion(head +: clause)
}
}
lazy val multiRuleRetraction: PackratParser[Seq[Statement]] = (("retract" ~> headLiteral ~ logicalIf ~ repsep(literals, logicalOr) <~ "end") | (headLiteral ~ logicalIf ~ repsep(literals, logicalOr) <~ "~")) ^^ {
case head ~ lIf ~ clauses => clauses.map{clause => Retraction(head +: clause)}
}
lazy val groundFact: PackratParser[Seq[Term]] = headLiteral ^^ {
case head =>
val (isSafe, unSafeVar) = rangeRestrictionCheck(head, Nil)
if(!isSafe) {
throw ParserException(s"""Unsound rule dectected. Check range restriction failed for ${unSafeVar.mkString(",")}""")
}
Seq(head)
}
lazy val query: PackratParser[Statement] = (queryAll | queryOne)
lazy val queryAll: PackratParser[QueryAll] =
(("queryAll" ~> literals <~ "end") | (opt(logicalIf) ~> literals <~ "??")) ^^ {case q => QueryAll(q)}
lazy val queryOne: PackratParser[Query] =
(("query" ~> literals <~ "end") | (opt(logicalIf) ~> literals <~ "?")) ^^ {case q => Query(q)}
lazy val literals: PackratParser[Seq[Term]] = repsep(literal, logicalAnd)
lazy val headLiteral: PackratParser[Term] = (infixTerm | negatedAtom | structureTerm | atom)
lazy val literal: PackratParser[Term] = (headLiteral | nilAtom)
lazy val atoms: PackratParser[Seq[Term]] = repsep(atom, logicalAnd)
lazy val nilAtom: PackratParser[Term] = opt(constant) ^^ {
case None => Constant(StrLit("true"))
case Some(c: Constant) if(c.id == StrLit("end")) => c // the constant match should never occur; it is only for pattern matching purposes
case other => throw ParserException(s"Statement not terminated properly: $other")
}
lazy val atom: PackratParser[Term] = // Note: opt(typeDelimiter) is a hack to make #type -> value work
opt((singleQuotedString | symbol | "?" | "_") <~ typeDelimiter) ~ opt(opt(typeDelimiter) ~ (symbol | singleQuotedString) ~ (attrMapIndexDelimiter | attrMapDelimiter)) ~ (variable | constant) ^^ {
case None ~ None ~ Constant(x, cattrName, ctpe, ckey) =>
val (cctpe: StrLit, indexAndEncode: Encoding) = typeWithEncoding(ctpe, ckey, Encoding.Attr)
Constant(x, cattrName, cctpe, indexAndEncode)
case None ~ None ~ Variable(x, cattrName, ctpe, ckey) =>
val (cctpe: StrLit, indexAndEncode: Encoding) = typeWithEncoding(ctpe, ckey, Encoding.Attr)
Variable(x, cattrName, cctpe, indexAndEncode)
case None ~ None ~ Structure(x, xterms, cattrName, ctpe, ckey) => // interpolation case
val (cctpe: StrLit, indexAndEncode: Encoding) = typeWithEncoding(ctpe, ckey, Encoding.Attr)
Structure(x, xterms, cattrName, cctpe, indexAndEncode)
case None ~ Some(None ~ attrName ~ keyAttr) ~ Constant(x, cattrName, ctpe, ckey) =>
val (cctpe: StrLit, indexAndEncode: Encoding) =
if (keyAttr == "as") typeWithEncoding(ctpe, ckey, Encoding.Attr)
else typeWithEncoding(ctpe, ckey, Encoding.IndexAttr)
Constant(x, StrLit(attrName.toString), cctpe, indexAndEncode)
case None ~ Some(None ~ attrName ~ keyAttr) ~ Variable(x, cattrName, ctpe, ckey) =>
val (cctpe: StrLit, indexAndEncode: Encoding) =
if (keyAttr == "as") typeWithEncoding(ctpe, ckey, Encoding.Attr)
else typeWithEncoding(ctpe, ckey, Encoding.IndexAttr)
Variable(x, StrLit(attrName.toString), cctpe, indexAndEncode)
case None ~ Some(dlm ~ tpe ~ keyAttr) ~ Constant(x, cattrName, ctpe, ckey) =>
val (cctpe: StrLit, indexAndEncode: Encoding) =
if (keyAttr == "as") typeWithEncoding(ctpe, ckey, Encoding.Attr)
else typeWithEncoding(ctpe, ckey, Encoding.IndexAttr)
Constant(x, cattrName, StrLit(tpe.toString), indexAndEncode)
case None ~ Some(dlm ~ tpe ~ keyAttr) ~ Variable(x, cattrName, ctpe, ckey) =>
val (cctpe: StrLit, indexAndEncode: Encoding) =
if (keyAttr == "as") typeWithEncoding(ctpe, ckey, Encoding.Attr)
else typeWithEncoding(ctpe, ckey, Encoding.IndexAttr)
Variable(x, cattrName, StrLit(tpe.toString), indexAndEncode)
case Some(attrName) ~ None ~ Constant(x, cattrName, ctpe, ckey) =>
val (cctpe: StrLit, indexAndEncode: Encoding) = typeWithEncoding(ctpe, ckey, Encoding.Attr)
Constant(x, StrLit(attrName.toString), cctpe, indexAndEncode)
case Some(attrName) ~ None ~ Variable(x, cattrName, ctpe, ckey) =>
val (cctpe: StrLit, indexAndEncode: Encoding) = typeWithEncoding(ctpe, ckey, Encoding.Attr)
Variable(x, StrLit(attrName.toString), cctpe, indexAndEncode)
case Some(attrName) ~ Some(dlm ~ tpe ~ keyAttr) ~ Constant(x, cattrName, ctpe, ckey) =>
val (cctpe: StrLit, indexAndEncode: Encoding) =
if (keyAttr == "as") typeWithEncoding(ctpe, ckey, Encoding.Attr)
else typeWithEncoding(ctpe, ckey, Encoding.IndexAttr)
Constant(x, StrLit(attrName.toString), StrLit(tpe.toString), indexAndEncode)
case Some(attrName) ~ Some(dlm ~ tpe ~ keyAttr) ~ Variable(x, cattrName, ctpe, ckey) =>
val (cctpe: StrLit, indexAndEncode: Encoding) =
if (keyAttr == "as") typeWithEncoding(ctpe, ckey, Encoding.Attr)
else typeWithEncoding(ctpe, ckey, Encoding.IndexAttr)
Variable(x, StrLit(attrName.toString), StrLit(tpe.toString), indexAndEncode)
}
protected def typeWithEncoding(tpe: StrLit, enc: Encoding, indexAndEncode: Encoding): (StrLit, Encoding) = {
if(enc == Encoding.AttrLiteral) (StrLit("StrLit"), Encoding(indexAndEncode.id | 2))
else if(enc == Encoding.AttrHex) (StrLit("StrLit"), Encoding(indexAndEncode.id | 4))
else if(enc == Encoding.AttrBase64) (StrLit("StrLit"), Encoding(indexAndEncode.id | 6))
else (tpe, indexAndEncode) // enc == StrLit
}
// A rule is safe (range restricted) iff:
//
// 1. Each distinguished variable, // Unsafe: s(X) :- r(Y)
// 2. Each variable in an arithmetic subgoal, // Unsafe: s(X) :- r(Y), X < Y
// or contains an equality or is goal where X = Y, where Y is safe
// 3. Each variable in a negated subgoal, // Unsafe: s(X) :- r(Y), NOT r(X)
//
// also appears in a nonnegated relational subgoal.
//@annotation.tailrec
private def rangeRestrictionCheck(head: Term, body: Seq[Term]): Tuple2[Boolean, Set[Term]] = {
// filter arithmetic literals and negated literals from body
def filterVariablesInNegatedArithemeticLiterals(
body: Seq[Term]
, stdLiteralVariables: Set[Term] = Set.empty
, negatedArithmeticLiteralVariables: Set[Term] = Set.empty
): (Set[Term], Set[Term]) = body match {
case NegatedTerm(id, term, _, _, _) +: other =>
filterVariablesInNegatedArithemeticLiterals(
other
, stdLiteralVariables
, negatedArithmeticLiteralVariables ++ term.unboundVariables
)
case term @ Structure(equality, terms, _, _, _) +: other if Set(StrLit("_unify"), StrLit("_is")).contains(equality) =>
val (isSubGoalSafe: Boolean, varSeq: Set[Term]) = terms.last match {
case Constant(_, _, _, _) => (true, Set.empty)
case v @ Variable(_, _, _, _) if v.isEnvVariable() => (true, Set.empty) // ignore env variables
case Variable(_, _, _, _) => rangeRestrictionCheck(head, terms)
case Structure(_, xterms, _, _, _) => rangeRestrictionCheck(head, terms.head +: xterms)
}
if(isSubGoalSafe) {
filterVariablesInNegatedArithemeticLiterals(
other
, stdLiteralVariables ++ term.head.unboundVariables
, negatedArithmeticLiteralVariables
)
}
else {
filterVariablesInNegatedArithemeticLiterals(
other
, stdLiteralVariables
, negatedArithmeticLiteralVariables ++ varSeq
)
}
case term @ Structure(arithmetic, terms, _, _, _) +: other if arithmeticLiterals.contains(arithmetic) =>
filterVariablesInNegatedArithemeticLiterals(
other
, stdLiteralVariables
, negatedArithmeticLiteralVariables ++ term.head.unboundVariables
)
case stdLiteral +: other =>
filterVariablesInNegatedArithemeticLiterals(
other
, stdLiteralVariables ++ stdLiteral.unboundVariables
, negatedArithmeticLiteralVariables
)
case Nil => (stdLiteralVariables, negatedArithmeticLiteralVariables)
}
val (stdLiteralVariables, negatedArithmeticLiteralVariables) = filterVariablesInNegatedArithemeticLiterals(body)
val headVariables = head.unboundVariables
val variablesToCheck: Set[Term] = headVariables ++ negatedArithmeticLiteralVariables
val diffVariables: Set[Term] = variablesToCheck.diff(stdLiteralVariables)
if(diffVariables.isEmpty) (true, Set.empty) else (false, diffVariables)
}
lazy val negatedAtom: PackratParser[Term] = logicalNegation ~ opt("(") ~ (structureTerm | atom) ~ opt(")") ^^ {
case neg ~ Some(_) ~ atm ~ Some(_) => NegatedTerm(StrLit("_not"), atm)
case neg ~ None ~ atm ~ None => NegatedTerm(StrLit("_not"), atm)
}
lazy val infixTerm: PackratParser[Term] = (
expr ~ "compare" ~ expr // eval both leftExpr and rightExpr and unify
| expr ~ "notcompare" ~ expr // eval both leftExpr and rightExpr and unify
| expr ~ "=:=" ~ expr // eval both leftExpr and rightExpr and unify
| expr ~ "!=:=" ~ expr // eval both leftExpr and rightExpr and unify
| expr ~ "is" ~ expr // eval rightExpr and unfiy
| expr ~ ":=" ~ expr // eval rightExpr and unfiy
| expr ~ "unify" ~ expr // unify
| expr ~ "notunify" ~ expr // unify
| expr ~ "=" ~ expr // unify
| expr ~ "!<:<" ~ expr // subset
| expr ~ "<:<" ~ expr // subset
| expr ~ "subset" ~ expr // subset
| expr ~ "notsubset" ~ expr // subset
| expr ~ "<:" ~ expr // in
| expr ~ "in" ~ expr // in
| expr ~ "!<:" ~ expr // in
| expr ~ "notin" ~ expr // in
| expr ~ "!=" ~ expr // not unify
| expr ~ "<=" ~ expr
| expr ~ "!<=" ~ expr
| expr ~ "<" ~ expr
| expr ~ "!<" ~ expr
| expr ~ ">=" ~ expr
| expr ~ "!>=" ~ expr
| expr ~ ">" ~ expr
| expr ~ "!>" ~ expr
| expr ~ "@" ~ expr // at operator
) ^^ {
case leftTerm ~ operator ~ rightTerm => operator match {
case "=:=" | "compare" => Structure(StrLit("_compare"), Seq(leftTerm) ++: Seq(rightTerm))
case "!=:=" | "notcompare" => NegatedTerm(StrLit("_not"), Structure(StrLit("_compare"), Seq(leftTerm) ++: Seq(rightTerm)))
case ":=" | "is" => Structure(StrLit("_is"), Seq(leftTerm) ++: Seq(rightTerm))
case "=" | "unify" => Structure(StrLit("_unify"), Seq(leftTerm) ++: Seq(rightTerm))
case "!=" | "notunify" => NegatedTerm(StrLit("_not"), Structure(StrLit("_unify"), Seq(leftTerm) ++: Seq(rightTerm)))
case "<:<" | "subset" => Structure(StrLit("_subset"), Seq(leftTerm) ++: Seq(rightTerm))
case "!<:<" | "notsubset" => NegatedTerm(StrLit("_not"), Structure(StrLit("_subset"), Seq(leftTerm) ++: Seq(rightTerm)))
case "<:" | "in" => Structure(StrLit("_in"), Seq(leftTerm) ++: Seq(rightTerm))
case "!<:<" | "notin" => NegatedTerm(StrLit("_not"), Structure(StrLit("_in"), Seq(leftTerm) ++: Seq(rightTerm)))
case "<" => Structure(StrLit("_lt"), Seq(leftTerm) ++: Seq(rightTerm))
case "!<" => NegatedTerm(StrLit("_not"), Structure(StrLit("_lt"), Seq(leftTerm) ++: Seq(rightTerm)))
case "<=" => Structure(StrLit("_lteq"), Seq(leftTerm) ++: Seq(rightTerm))
case "!<=" => NegatedTerm(StrLit("_not"), Structure(StrLit("_lteq"), Seq(leftTerm) ++: Seq(rightTerm)))
case ">" => Structure(StrLit("_gt"), Seq(leftTerm) ++: Seq(rightTerm))
case "!>" => NegatedTerm(StrLit("_not"), Structure(StrLit("_gt"), Seq(leftTerm) ++: Seq(rightTerm)))
case ">=" => Structure(StrLit("_gteq"), Seq(leftTerm) ++: Seq(rightTerm))
case "!>=" => NegatedTerm(StrLit("_not"), Structure(StrLit("_gteq"), Seq(leftTerm) ++: Seq(rightTerm)))
case "@" => Structure(StrLit("_at"), Seq(leftTerm) ++: Seq(rightTerm))
case _ => Structure(operator, Seq(leftTerm) ++: Seq(rightTerm))
}
}
lazy val expr: PackratParser[Term] = (structureTerm | variable | constant)
lazy val arithmeticLiterals: Set[StrLit] = Set(
StrLit("_plus")
, StrLit("_minus")
, StrLit("_times")
, StrLit("_div")
, StrLit("_rem")
, StrLit("_lteq")
, StrLit("_lt")
, StrLit("_gteq")
, StrLit("_gt")
, StrLit("_max")
, StrLit("_min")
)
lazy val functor: PackratParser[StrLit] = (identifier) ^^ {
case "+" => StrLit("_plus")
case "-" => StrLit("_minus")
case "*" => StrLit("_times")
case "/" => StrLit("_div")
case "%" => StrLit("_rem") // modulo operator
case "<:<" | "subset" => StrLit("_subset") // subset operator
case "<:" | "in" => StrLit("_in") // in operator
case "<=" => StrLit("_lteq")
case "<" => StrLit("_lt")
case ">=" => StrLit("_gteq")
case ">" => StrLit("_gt")
case "=:=" | "compare" => StrLit("_compare") // compare opeartor; right side eval + left side eval + unify
case ":=" | "is" => StrLit("_is") // is opeartor; right side eval + unify
case "=" | "unify" => StrLit("_unify")
case "!" | "not" => StrLit("_not")
case "max" => StrLit("_max")
case "min" => StrLit("_min")
case "range" => StrLit("_range")
case "@" => StrLit("_at") // at operator
}
lazy val operatorTerm: PackratParser[Term] = opt(atom <~ ("says" | ":")) ~ functor ~ "(" ~ atoms <~ ")" ^^ {
case Some(subject) ~ funct ~ lParen ~ trs => Structure(funct, trs) // No signing needed for functional operators
case None ~ funct ~ lParen ~ trs => Structure(funct, trs)
}
lazy val symbolTerm: PackratParser[Term] = opt(atom <~ ("says" | ":")) ~ (constantString | singleQuotedString) ~ "(" ~ atoms <~ ")" ^^ {
case Some(speaker) ~ funct ~ lParen ~ trs => Structure(funct.id, speaker +: trs)
case None ~ funct ~ lParen ~ trs =>
if(saysOperator == true && self != "Self") Structure(funct.id, Constant(StrLit(self), StrLit("nil"), StrLit("StrLit"), Encoding.AttrBase64) +: trs)
else if(saysOperator) Structure(funct.id, Variable("$" + s"${self}") +: trs)
else Structure(funct.id, trs)
}
lazy val overrideOperatorTerm: PackratParser[Term] = opt(atom <~ ("says" | ":")) ~ identifier ~ "(" ~ atoms <~ ")" ^^ {
case Some(subject) ~ funct ~ lParen ~ trs => Structure(funct, trs) // No signing needed for functional operators
case None ~ funct ~ lParen ~ trs => Structure(funct, trs)
}
// override may be allowed indirectly from a higher layer (slang)
lazy val overrideDefaultTerm = ".." ~> (overrideOperatorTerm | symbolTerm) ^^ {case x => x}
lazy val structureTerm: PackratParser[Term] = opt((singleQuotedString | symbol | "?" | "_") <~ typeDelimiter) ~ opt(opt(typeDelimiter) ~ (symbol | singleQuotedString) <~ (attrMapIndexDelimiter | attrMapDelimiter)) ~ (overrideDefaultTerm | operatorTerm | symbolTerm) ^^ {
// For Structure, attrMapIndexDelimiter does not matter since all predicates are indexed by default
case None ~ None ~ Structure(funct, trs, _, _, _) =>
Structure(funct, trs, StrLit("nil"), termType)
case None ~ Some(None ~ attrName) ~ Structure(funct, trs, _, _, _) =>
Structure(funct, trs, StrLit(attrName.toString), termType)
case None ~ Some(dlm ~ tpe) ~ Structure(funct, trs, _, _, _) =>
Structure(funct, trs, StrLit("nil"), StrLit(tpe.toString))
case Some(attrName) ~ None ~ Structure(funct, trs, _, _, _) =>
Structure(funct, trs, StrLit(attrName.toString), termType)
case Some(attrName) ~ Some(dlm ~ tpe) ~ Structure(funct, trs, _, _, _) =>
Structure(funct, trs, StrLit(attrName.toString), StrLit(tpe.toString))
}
lazy val variable: PackratParser[Term] = (localVariable | globalVariable)
lazy val localVariable: PackratParser[Term] = variablePattern ^^ {v => Variable(v)}
lazy val globalVariable: PackratParser[Term] = "$" ~> symbol <~ opt("(" ~ ")") ^^ {v => Variable("$" + v)}
lazy val constant: PackratParser[Term] = (doubleQuotedString | numeric | singleQuotedString | constantString)
lazy val constantString: PackratParser[Term] = not("""end$""".r) ~> symbol ^^ {
case sym => Constant(sym)
}
lazy val doubleQuotedString: PackratParser[Term] = tripleDoubleQuotedString | doubleQuotedStringWithEscapeDelimitedMayBe
private def parseDoubleQuotedString(str: String): Term = {
var _mutableStr = str
anyVariablePattern.findAllIn(str).matchData.map {
case m if !m.group(2).isEmpty =>
val enclosedVarMayBe = s"${m.group(1)}${m.group(2)}"
_mutableStr = _mutableStr.replace(s"${m.group(1)}(${m.group(2)})", enclosedVarMayBe) // NOTE: m.group(0) would not work. e.g., pattern hello($World) will match the right most paren
Variable(enclosedVarMayBe)
}.toSeq match {
case Nil => Constant(StrLit(Term.stripQuotes(_mutableStr.toString)), StrLit("nil"), StrLit("StrLit"), Encoding.AttrLiteral)
case varSeq =>
Structure(StrLit("_interpolate")
, Constant(Term.stripQuotes(_mutableStr.toString))
+: Constant(varSeq.mkString(","))
+: varSeq
)
}
}
lazy val domainTerm: PackratParser[Term] = ("dn" ~ "|") ~> repsep(numeric | singleQuotedString | constantString, ".") ~ opt(".") ^^ {
case cTerms ~ Some(root) => Structure(StrLit("_seq"), Constant(".") +: cTerms.reverse, termIndex, StrLit("Dn"))
case cTerms ~ None => Structure(StrLit("_seq"), Constant(".") +: cTerms.reverse, termIndex, StrLit("Dn"))
}
lazy val doubleQuotedStringWithEscapeDelimitedMayBe: PackratParser[Term] = opt(symbol) ~ "\\"" ~ """([^"\\\\]*(?:\\\\.[^"\\\\]*)*)""".r <~ "\\"" ^^ {
case Some(tpe) ~ lquote ~ str if (tpe == "r" | tpe == "regex") =>
Variable(s"^$str")
case Some(tpe) ~ lquote ~ str => parseDomainTerm(s"$tpe|$str")
case None ~ lquote ~ str => parseDoubleQuotedString(str)
}
lazy val tripleDoubleQuotedString: PackratParser[Term] = opt(symbol) ~ "\\"\\"\\"" ~ """(((?s)(?!\\"\\"\\").)*)""".r <~ "\\"\\"\\"" ^^ {
case Some(tpe) ~ lquote ~ str if (tpe == "r" | tpe == "regex") =>
Variable(s"^$str")
case Some(tpe) ~ lquote ~ str =>
throw ParserException(s"Prefix type not recognized: $tpe")
case None ~ lquote ~ str => parseDoubleQuotedString(str)
}
lazy val singleQuotedString: PackratParser[Term] = tripleQuotedString | singleQuotedStringWithEscapeDelimitedMayBe
// ((?!''')(.|\\n))* --- negative lookup is very expensive resulting in stack overflow
lazy val tripleQuotedString: PackratParser[Term] = opt(symbol) ~ "'''" ~ """(((?s)(?!''').)*)""".r <~ "'''" ^^ {
case None ~ lquote ~ str => Constant(StrLit(str.replaceAll("""\\\\'""", "'")), StrLit("nil"), StrLit("StrLit"), Encoding.AttrLiteral)
case Some(tpe) ~ lquote ~ str => tpe match {
case "u" => Constant(StrLit(str.replaceAll("""\\\\'""", "'")), StrLit("nil"), StrLit("StrLit"), Encoding.AttrBase64)
case "h" =>
val hex = try {java.lang.Long.parseLong(str, 16)} catch {
case ex: NumberFormatException => throw NumericException(s"Invalid input for hex: $str")
}
Constant(StrLit(hex.toString), StrLit("nil"), StrLit("StrLit"), Encoding.AttrHex)
case _ => throw ParserException(s"Unknown encoding detected: $tpe")
}
}
// ([^'\\\\]*(?:\\\\.[^'\\\\]*)*) match anything other than ' or \\; followed by \\anything and then not ' or \\
lazy val singleQuotedStringWithEscapeDelimitedMayBe: PackratParser[Term] = opt(symbol) ~ "'" ~ """([^'\\\\]*(?:\\\\.[^'\\\\]*)*)""".r <~ "'" ^^ {
case None ~ lquote ~ str => Constant(StrLit(str.replaceAll("""\\\\'""", "'")), StrLit("nil"), StrLit("StrLit"), Encoding.AttrLiteral) // """[^']""".r good enough?
case Some(tpe) ~ lquote ~ str => tpe match {
case "u" => Constant(StrLit(str.replaceAll("""\\\\'""", "'")), StrLit("nil"), StrLit("StrLit"), Encoding.AttrBase64)
case "h" =>
val hex = try {java.lang.Long.parseLong(str, 16)} catch {
case ex: NumberFormatException => throw NumericException(s"Invalid input for hex: $str")
}
Constant(StrLit(hex.toString), StrLit("nil"), StrLit("StrLit"), Encoding.AttrHex)
case _ => throw ParserException(s"Unknown encoding detected: $tpe")
}
}
lazy val numeric = (
float // 32 bit
| double // 64 bit
| hexInteger // 32 bit (same as long)
| bigInteger // string
| long // 64 bit
| doubleInteger // 64 bit
| floatInteger // 32 bit
| short // 16 bit (same as char)
| byte // 8 bit
| integer // 32 bit
)
lazy val float: PackratParser[Term] = """-?(\\d+\\.\\d+)([eE][+-]?\\d+)?""".r <~ """[fF]+""".r ^^ { c => Constant(c, StrLit("nil"), StrLit("Float")) }
lazy val double: PackratParser[Term] = """-?(\\d+\\.\\d+)""".r <~ opt("""[dD]?""".r) ^^ { c => Constant(c, StrLit("nil"), StrLit("Double")) } // restriction: .1 and 3. are not valid
lazy val hexInteger: PackratParser[Term] = """\\-?0x[\\da-fA-f]+""".r ^^ { c =>
val hex = try {java.lang.Long.parseLong(c.substring(2), 16)} catch {
case ex: NumberFormatException => throw NumericException(s"Invalid input for hex: $c")
}
Constant(StrLit(hex.toString), StrLit("nil"), StrLit("StrLit"), Encoding.AttrHex)
}
lazy val bigInteger: PackratParser[Term] = wholeNumber <~ """[zZ]""".r ^^ { c => Constant(c, StrLit("nil"), StrLit("BigInt")) }
lazy val long: PackratParser[Term] = wholeNumber <~ """[lL]""".r ^^ { c => Constant(c, StrLit("nil"), StrLit("Long")) }
lazy val doubleInteger: PackratParser[Term] = wholeNumber <~ """[dD]""".r ^^ { c => Constant(c, StrLit("nil"), StrLit("Double")) }
lazy val floatInteger: PackratParser[Term] = wholeNumber <~ """[fF]""".r ^^ { c => Constant(c, StrLit("nil"), StrLit("Float")) }
lazy val short: PackratParser[Term] = wholeNumber <~ """[sS]""".r ^^ { c =>
try { c.toShort } catch {
case ex: NumberFormatException => throw NumericException(s"Invalid input for short: $c: ")
}
Constant(c, StrLit("nil"), StrLit("Short"))
}
lazy val byte: PackratParser[Term] = wholeNumber <~ """[bB]""".r ^^ { c =>
try { c.toByte } catch {
case ex: NumberFormatException => throw NumericException(s"Invalid input for byte: $c")
}
Constant(c, StrLit("nil"), StrLit("Byte"))
}
lazy val integer: PackratParser[Term] = wholeNumber ^^ {c => Constant(c, StrLit("nil"), StrLit("Int"))}
/**
* source is provided by slang program
*/
def parseSlog(source: String): ParseResult[MutableCache[Index, MutableSet[Statement]]] = {
parseAll(program, source)
}
private[safelog] def parseDomainTerm(source: String): Term = {
val res: Term = parseAll(domainTerm, source) match {
case Success(_result, _) => _result
case failure: NoSuccess => throw ParserException(s"Parse error: ${failure}")
}
res
}
override def parse(source: String): Map[Index, Set[Statement]] = {
val res: Map[Index, Set[Statement]] = parseAll(program, source) match {
case Success(_result, _) =>
_result.map { kv => (kv._1, kv._2.toSet)}.toMap
case failure: NoSuccess => throw ParserException(s"${failure.msg}")
}
res
}
override def parse(source: java.io.Reader): Map[Index, Set[Statement]] = {
val res: Map[Index, Set[Statement]] = parseAll(program, source) match {
case Success(_result, _) =>
_result.map { kv => (kv._1, kv._2.toSet)}.toMap
case failure: NoSuccess => throw ParserException(s"${failure.msg}")
}
res
}
override def parseFile(fileName: String): Map[Index, Set[Statement]] = {
val source = new java.io.BufferedReader(new java.io.FileReader(fileName))
val res = parse(source)
source.close()
res
}
/*
def setSays(value: Boolean): Unit = {
_saysOperator = value
}
def setSpeaker(speaker: String): Unit = {
_self = speaker
}
def clearContextCached(): Unit = { // scala bug? seems clearContext() is defined somewhere in predef
_statementCache.clear()
}
*/
override def parseFileFresh(speaker: String, fileName: String): Map[Index, Set[Statement]] = {
//setSays(true)
//setSpeaker(speaker)
//clearContextCached()
val source = new java.io.BufferedReader(new java.io.FileReader(fileName))
val res = parse(source)
source.close()
res
}
private def parseAsSegmentsHelper(
result: ParseResult[MutableCache[Index, MutableSet[Statement]]]
): Tuple4[Map[Index, Set[Statement]], Seq[Statement], Seq[Statement], Seq[Statement]] = result match {
case Success(_result, _) =>
val importSeq = _result.get(StrLit("_import")).getOrElse(Nil).toSeq
_result -= StrLit("_import")
val querySeq = _result.get(StrLit("_query")).getOrElse(Nil).toSeq
_result -= StrLit("_query")
val retractionSeq = _result.get(StrLit("_retraction")).getOrElse(Nil).toSeq
_result -= StrLit("_retraction")
Tuple4(_result.map {kv => (kv._1, kv._2.toSet)}.toMap, querySeq, importSeq, retractionSeq)
case failure: NoSuccess => throw ParserException(s"${failure.msg}")
}
override def parseAsSegments(source: String): Tuple4[Map[Index, Set[Statement]], Seq[Statement], Seq[Statement], Seq[Statement]] = {
parseAsSegmentsHelper(parseAll(program, source))
}
override def parseAsSegments(source: java.io.Reader): Tuple4[Map[Index, Set[Statement]], Seq[Statement], Seq[Statement], Seq[Statement]] = {
parseAsSegmentsHelper(parseAll(program, source))
}
override def parseFileAsSegments(fileName: String): Tuple4[Map[Index, Set[Statement]], Seq[Statement], Seq[Statement], Seq[Statement]] = {
val source = new java.io.BufferedReader(new java.io.FileReader(fileName))
val res = parseAsSegments(source)
source.close()
res
}
lazy val startsWithComment = """(\\s*//.*)+$""".r // line starts with a comment
lazy val pasteMode = """(\\s*p(aste)?(\\(\\))?\\.)\\s*$""".r // line starts with a comment
lazy val endOfSource = """(.*)([.?~]|end)\\s*(//.*)?$""".r
lazy val quit = """(\\s*q(uit)?\\s*(\\(\\))?\\s*[.?]+\\s*$)""".r // q. | quit. | q(). | quit(). | q? | quit? | ..
lazy val pasteQuit = """(.*)(\\.q(uit)?\\s*(\\(\\))?\\s*[.?]+\\s*$)""".r // q. | quit. | q(). | quit(). | q? | quit? | ..
var _isPasteMode = false
private def handleCmdLine(source: String): Tuple2[Option[MutableCache[Index, MutableSet[Statement]]], Symbol] = {
endOfSource.findFirstIn(source) match {
case Some(_) => try {
parseAll(program, source) match {
case Success(result, _) =>
(Some(result), 'success)
case Failure(msg, _) =>
logger.error("Parse error: " + msg)
(None, 'failure)
case Error(msg, _) =>
logger.error("Parse error: " + msg)
(None, 'error)
}
} catch {
case ex: ParserException =>
logger.error("Parse error: " + ex)
(None, 'error)
case ex: NumericException =>
logger.error("Parse error: " + ex)
(None, 'error)
}
case None => (None, 'continuation)
}
}
override def parseCmdLine(source: String): Tuple2[Option[MutableCache[Index, MutableSet[Statement]]], Symbol] = source match {
case startsWithComment(_) => (None, 'comment)
case pasteMode(_, _, _) =>
_isPasteMode = true
(None, 'paste)
case quit(_, _, _) => (None, 'quit)
case pasteQuit(_, _, _, _) if(_isPasteMode == false) => (None, 'quit)
case pasteQuit(src, _, _, _) if(_isPasteMode == true) =>
_isPasteMode = false
handleCmdLine(s"$src.")
case _ if(_isPasteMode) => (None, 'continuation)
case _ => handleCmdLine(source)
}
}
| wowmsi/safe | safe-logic/src/main/scala/safe/safelog/parser/ParserImpl.scala | Scala | apache-2.0 | 38,105 |
/*
* Wire
* Copyright (C) 2016 Wire Swiss GmbH
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package com.waz.model2.transport
import com.waz.model._
import com.waz.utils.JsonDecoder
import org.json.JSONObject
case class Team(
id: TeamId,
name: Name,
creator: UserId,
icon: Option[RAssetId] = None,
iconKey: Option[AESKey] = None,
binding: Boolean
)
object Team {
implicit val TeamBDecoder: JsonDecoder[Team] = new JsonDecoder[Team] {
override def apply(implicit js: JSONObject): Team = {
import JsonDecoder._
Team(
'id,
'name,
'creator,
'icon,
decodeOptString('icon_key).map(AESKey),
decodeBool('binding)
)
}
}
}
| wireapp/wire-android-sync-engine | zmessaging/src/main/scala/com/waz/model2/transport/Team.scala | Scala | gpl-3.0 | 1,327 |
import com.google.inject.{Guice, AbstractModule}
import play.api.GlobalSettings
/**
* Set up the Guice injector and provide the mechanism for return objects from the dependency graph.
*/
object Global extends GlobalSettings {
/**
* Bind types such that whenever UUIDGenerator is required, an instance of SimpleUUIDGenerator will be used.
*/
val injector = Guice.createInjector(new AbstractModule {
protected def configure() {
}
})
/**
* Controllers must be resolved through the application context. There is a special method of GlobalSettings
* that we can override to resolve a given controller. This resolution is required by the Play router.
*/
override def getControllerInstance[A](controllerClass: Class[A]): A = injector.getInstance(controllerClass)
}
| franciscodr/what-are-we-talking-about | app/Global.scala | Scala | apache-2.0 | 796 |
/*
* Copyright (c) 2015 Snowplow Analytics Ltd. All rights reserved.
*
* This program is licensed to you under the Apache License Version 2.0,
* and you may not use this file except in compliance with the Apache License Version 2.0.
* You may obtain a copy of the Apache License Version 2.0 at http://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the Apache License Version 2.0 is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the Apache License Version 2.0 for the specific language governing permissions and limitations there under.
*/
package com.snowplowanalytics.schemaguru
package generators
// json4s
import org.json4s._
// Testing
import org.specs2.Specification
// This library
import schema.types._
import schema.Helpers.SchemaContext
class MergeSpec extends Specification { def is = s2"""
Check integer merge
maintain all types in array $maintainTypesInArray
merge two instances $mergeMinimumValuesForInt32
merge integer with number must result in number $mergeIntegerWithNumber
merge two distinct string formats $mergeDistinctFormats
merge strings with and without format $mergeStringWithFormatAndWithout
merge two different types produce product $mergeTwoDifferentTypes
reduce properties for product types $reducePropertiesForProductType
merge strings with maxLengths $mergeStringsWithMaxLengths
merge strings with minLengths $mergeStringsWithMinLengths
merge product types with maxLengths $mergeProductTypeWithMaxLengths
"""
implicit val formats = DefaultFormats
implicit val ctx = SchemaContext(0)
val StringS = StringSchema()
val IntegerS = IntegerSchema()
val StringWithLengths = StringSchema(minLength = Some(3), maxLength = Some(10))
val StringWithLengths2 = StringSchema(minLength = Some(5), maxLength = Some(8))
val schemaWithInt16 = ObjectSchema(Map("test_key" -> IntegerSchema(Some(-2), Some(3))))
val schemaWithInt32 = ObjectSchema(Map("test_key" -> IntegerSchema(Some(-34000), Some(3))))
val schemaWithNumber = ObjectSchema(Map("test_key" -> NumberSchema(Some(-34000), Some(3.3))))
val schemaWithUuid = ObjectSchema(Map("test_key" -> StringSchema(format = Some("uuid"))))
val schemaWithDateTime = ObjectSchema(Map("test_key" -> StringSchema(format = Some("date-time"))))
val schemaWithoutFormat = ObjectSchema(Map("test_key" -> StringSchema()))
def maintainTypesInArray =
StringS.merge(IntegerS) must beEqualTo(ProductSchema(stringSchema = Some(StringS), integerSchema = Some(IntegerS)))
def mergeMinimumValuesForInt32 = {
val merged = schemaWithInt16.merge(schemaWithInt32).toJson
(merged \\ "properties" \\ "test_key" \\ "minimum").extract[BigInt] must beEqualTo(-34000)
}
def mergeIntegerWithNumber = {
val merged = schemaWithInt32.merge(schemaWithNumber).toJson
(merged \\ "properties" \\ "test_key" \\ "type").extract[String] must beEqualTo("number")
}
def mergeDistinctFormats = {
val merged = schemaWithUuid.merge(schemaWithDateTime).toJson
(merged \\ "properties" \\ "test_key" \\ "format").extract[Option[String]] must beNone
}
def mergeStringWithFormatAndWithout = {
val merged = schemaWithoutFormat.merge(schemaWithDateTime).toJson
(merged \\ "properties" \\ "test_key" \\ "format").extract[Option[String]] must beNone
}
def mergeTwoDifferentTypes = {
val merged = schemaWithDateTime.merge(schemaWithInt16).toJson
(merged \\ "properties" \\ "test_key" \\ "type").extract[List[String]].sorted must beEqualTo(List("integer", "string"))
}
def reducePropertiesForProductType = {
val merged = schemaWithDateTime.merge(schemaWithInt16).toJson
(merged \\ "properties" \\ "test_key" \\ "format").extract[String] mustEqual("date-time")
}
def mergeStringsWithMaxLengths = {
val merged = StringWithLengths.merge(StringWithLengths2).toJson
(merged \\ "maxLength").extract[Int] mustEqual(10)
}
def mergeStringsWithMinLengths = {
val merged = StringWithLengths2.merge(StringWithLengths).toJson
(merged \\ "minLength").extract[Int] mustEqual(3)
}
def mergeProductTypeWithMaxLengths = {
val merged = IntegerS.merge(StringWithLengths2.merge(StringWithLengths)).toJson
(merged \\ "maxLength").extract[Int] mustEqual(10)
}
}
| snowplow/schema-guru | src/test/scala/MergeSpec.scala | Scala | apache-2.0 | 4,595 |
/**
* Copyright 2013, 2014 Gianluca Amato <[email protected]>
*
* This file is part of JANDOM: JVM-based Analyzer for Numerical DOMains
* JANDOM is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* JANDOM is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty ofa
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with JANDOM. If not, see <http://www.gnu.org/licenses/>.
*/
package it.unich.jandom.targets.lts
import it.unich.jandom.targets.NumericCondition
/**
* This is a region in an LTS. A region is a named pair made of an optional location and a numeric condition.
* It may be used to keep various information such as starting conditions, bad nodes, etc...
* At the moment, it is not used for the analysis.
* @author Gianluca Amato <[email protected]>
*/
case class Region(val name: String, val state: Option[Location], val condition: NumericCondition) {
def mkString(vars: Seq[String]) = if (state.isEmpty)
s"region ${name} with ( ${condition.mkString(vars)} );"
else
s"region ${name} on state = ${state.get.name} with ( ${condition.mkString(vars)} );"
override def toString = mkString(Stream.from(0).map { "v" + _ })
}
| francescaScozzari/Jandom | core/src/main/scala/it/unich/jandom/targets/lts/Region.scala | Scala | lgpl-3.0 | 1,545 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.consumer
import java.util.concurrent._
import java.util.concurrent.atomic._
import locks.ReentrantLock
import collection._
import kafka.cluster._
import kafka.utils._
import org.I0Itec.zkclient.exception.ZkNodeExistsException
import java.net.InetAddress
import org.I0Itec.zkclient.{IZkStateListener, IZkChildListener, ZkClient}
import org.apache.zookeeper.Watcher.Event.KeeperState
import java.util.UUID
import kafka.serializer._
import kafka.utils.ZkUtils._
import kafka.common._
import kafka.client.ClientUtils
import com.yammer.metrics.core.Gauge
import kafka.metrics._
import scala.Some
/**
* This class handles the consumers interaction with zookeeper
*
* Directories:
* 1. Consumer id registry:
* /consumers/[group_id]/ids[consumer_id] -> topic1,...topicN
* A consumer has a unique consumer id within a consumer group. A consumer registers its id as an ephemeral znode
* and puts all topics that it subscribes to as the value of the znode. The znode is deleted when the client is gone.
* A consumer subscribes to event changes of the consumer id registry within its group.
*
* The consumer id is picked up from configuration, instead of the sequential id assigned by ZK. Generated sequential
* ids are hard to recover during temporary connection loss to ZK, since it's difficult for the client to figure out
* whether the creation of a sequential znode has succeeded or not. More details can be found at
* (http://wiki.apache.org/hadoop/ZooKeeper/ErrorHandling)
*
* 2. Broker node registry:
* /brokers/[0...N] --> { "host" : "host:port",
* "topics" : {"topic1": ["partition1" ... "partitionN"], ...,
* "topicN": ["partition1" ... "partitionN"] } }
* This is a list of all present broker brokers. A unique logical node id is configured on each broker node. A broker
* node registers itself on start-up and creates a znode with the logical node id under /brokers. The value of the znode
* is a JSON String that contains (1) the host name and the port the broker is listening to, (2) a list of topics that
* the broker serves, (3) a list of logical partitions assigned to each topic on the broker.
* A consumer subscribes to event changes of the broker node registry.
*
* 3. Partition owner registry:
* /consumers/[group_id]/owner/[topic]/[broker_id-partition_id] --> consumer_node_id
* This stores the mapping before broker partitions and consumers. Each partition is owned by a unique consumer
* within a consumer group. The mapping is reestablished after each rebalancing.
*
* 4. Consumer offset tracking:
* /consumers/[group_id]/offsets/[topic]/[broker_id-partition_id] --> offset_counter_value
* Each consumer tracks the offset of the latest message consumed for each partition.
*
*/
private[kafka] object ZookeeperConsumerConnector {
val shutdownCommand: FetchedDataChunk = new FetchedDataChunk(null, null, -1L)
}
private[kafka] class ZookeeperConsumerConnector(val config: ConsumerConfig,
val enableFetcher: Boolean) // for testing only
extends ConsumerConnector with Logging with KafkaMetricsGroup {
private val isShuttingDown = new AtomicBoolean(false)
private val rebalanceLock = new Object
private var fetcher: Option[ConsumerFetcherManager] = None
private var zkClient: ZkClient = null
private var topicRegistry = new Pool[String, Pool[Int, PartitionTopicInfo]]
private val topicThreadIdAndQueues = new Pool[(String,String), BlockingQueue[FetchedDataChunk]]
private val scheduler = new KafkaScheduler(1)
private val messageStreamCreated = new AtomicBoolean(false)
private var sessionExpirationListener: ZKSessionExpireListener = null
private var loadBalancerListener: ZKRebalancerListener = null
private var wildcardTopicWatcher: ZookeeperTopicEventWatcher = null
val consumerIdString = {
var consumerUuid : String = null
config.consumerId match {
case Some(consumerId) // for testing only
=> consumerUuid = consumerId
case None // generate unique consumerId automatically
=> val uuid = UUID.randomUUID()
consumerUuid = "%s-%d-%s".format(
InetAddress.getLocalHost.getHostName, System.currentTimeMillis,
uuid.getMostSignificantBits().toHexString.substring(0,8))
}
config.groupId + "_" + consumerUuid
}
this.logIdent = "[" + consumerIdString + "], "
connectZk()
createFetcher()
if (config.autoCommitEnable) {
scheduler.startup
info("starting auto committer every " + config.autoCommitIntervalMs + " ms")
scheduler.scheduleWithRate(autoCommit, "Kafka-consumer-autocommit-", config.autoCommitIntervalMs,
config.autoCommitIntervalMs, false)
}
KafkaMetricsReporter.startReporters(config.props)
def this(config: ConsumerConfig) = this(config, true)
def createMessageStreams(topicCountMap: Map[String,Int]): Map[String, List[KafkaStream[Array[Byte],Array[Byte]]]] =
createMessageStreams(topicCountMap, new DefaultDecoder(), new DefaultDecoder())
def createMessageStreams[K,V](topicCountMap: Map[String,Int], keyDecoder: Decoder[K], valueDecoder: Decoder[V])
: Map[String, List[KafkaStream[K,V]]] = {
if (messageStreamCreated.getAndSet(true))
throw new RuntimeException(this.getClass.getSimpleName +
" can create message streams at most once")
consume(topicCountMap, keyDecoder, valueDecoder)
}
def createMessageStreamsByFilter[K,V](topicFilter: TopicFilter,
numStreams: Int,
keyDecoder: Decoder[K] = new DefaultDecoder(),
valueDecoder: Decoder[V] = new DefaultDecoder()) = {
val wildcardStreamsHandler = new WildcardStreamsHandler[K,V](topicFilter, numStreams, keyDecoder, valueDecoder)
wildcardStreamsHandler.streams
}
private def createFetcher() {
if (enableFetcher)
fetcher = Some(new ConsumerFetcherManager(consumerIdString, config, zkClient))
}
private def connectZk() {
info("Connecting to zookeeper instance at " + config.zkConnect)
zkClient = new ZkClient(config.zkConnect, config.zkSessionTimeoutMs, config.zkConnectionTimeoutMs, ZKStringSerializer)
}
def shutdown() {
val canShutdown = isShuttingDown.compareAndSet(false, true);
if (canShutdown) {
info("ZKConsumerConnector shutting down")
if (wildcardTopicWatcher != null)
wildcardTopicWatcher.shutdown()
try {
if (config.autoCommitEnable)
scheduler.shutdownNow()
fetcher match {
case Some(f) => f.stopConnections
case None =>
}
sendShutdownToAllQueues()
if (config.autoCommitEnable)
commitOffsets()
if (zkClient != null) {
zkClient.close()
zkClient = null
}
} catch {
case e =>
fatal("error during consumer connector shutdown", e)
}
info("ZKConsumerConnector shut down completed")
}
}
def consume[K, V](topicCountMap: scala.collection.Map[String,Int], keyDecoder: Decoder[K], valueDecoder: Decoder[V])
: Map[String,List[KafkaStream[K,V]]] = {
debug("entering consume ")
if (topicCountMap == null)
throw new RuntimeException("topicCountMap is null")
val topicCount = TopicCount.constructTopicCount(consumerIdString, topicCountMap)
val topicThreadIds = topicCount.getConsumerThreadIdsPerTopic
// make a list of (queue,stream) pairs, one pair for each threadId
val queuesAndStreams = topicThreadIds.values.map(threadIdSet =>
threadIdSet.map(_ => {
val queue = new LinkedBlockingQueue[FetchedDataChunk](config.queuedMaxMessages)
val stream = new KafkaStream[K,V](
queue, config.consumerTimeoutMs, keyDecoder, valueDecoder, config.clientId)
(queue, stream)
})
).flatten.toList
val dirs = new ZKGroupDirs(config.groupId)
registerConsumerInZK(dirs, consumerIdString, topicCount)
reinitializeConsumer(topicCount, queuesAndStreams)
loadBalancerListener.kafkaMessageAndMetadataStreams.asInstanceOf[Map[String, List[KafkaStream[K,V]]]]
}
// this API is used by unit tests only
def getTopicRegistry: Pool[String, Pool[Int, PartitionTopicInfo]] = topicRegistry
private def registerConsumerInZK(dirs: ZKGroupDirs, consumerIdString: String, topicCount: TopicCount) = {
info("begin registering consumer " + consumerIdString + " in ZK")
val consumerRegistrationInfo =
Utils.mergeJsonFields(Utils.mapToJsonFields(Map("version" -> 1.toString, "subscription" -> topicCount.dbString), valueInQuotes = false)
++ Utils.mapToJsonFields(Map("pattern" -> topicCount.pattern), valueInQuotes = true))
createEphemeralPathExpectConflict(zkClient, dirs.consumerRegistryDir + "/" + consumerIdString, consumerRegistrationInfo)
info("end registering consumer " + consumerIdString + " in ZK")
}
private def sendShutdownToAllQueues() = {
for (queue <- topicThreadIdAndQueues.values) {
debug("Clearing up queue")
queue.clear()
queue.put(ZookeeperConsumerConnector.shutdownCommand)
debug("Cleared queue and sent shutdown command")
}
}
def autoCommit() {
trace("auto committing")
try {
commitOffsets()
}
catch {
case t: Throwable =>
// log it and let it go
error("exception during autoCommit: ", t)
}
}
def commitOffsets() {
if (zkClient == null) {
error("zk client is null. Cannot commit offsets")
return
}
for ((topic, infos) <- topicRegistry) {
val topicDirs = new ZKGroupTopicDirs(config.groupId, topic)
for (info <- infos.values) {
val newOffset = info.getConsumeOffset
try {
updatePersistentPath(zkClient, topicDirs.consumerOffsetDir + "/" + info.partitionId,
newOffset.toString)
} catch {
case t: Throwable =>
// log it and let it go
warn("exception during commitOffsets", t)
}
debug("Committed offset " + newOffset + " for topic " + info)
}
}
}
class ZKSessionExpireListener(val dirs: ZKGroupDirs,
val consumerIdString: String,
val topicCount: TopicCount,
val loadBalancerListener: ZKRebalancerListener)
extends IZkStateListener {
@throws(classOf[Exception])
def handleStateChanged(state: KeeperState) {
// do nothing, since zkclient will do reconnect for us.
}
/**
* Called after the zookeeper session has expired and a new session has been created. You would have to re-create
* any ephemeral nodes here.
*
* @throws Exception
* On any error.
*/
@throws(classOf[Exception])
def handleNewSession() {
/**
* When we get a SessionExpired event, we lost all ephemeral nodes and zkclient has reestablished a
* connection for us. We need to release the ownership of the current consumer and re-register this
* consumer in the consumer registry and trigger a rebalance.
*/
info("ZK expired; release old broker parition ownership; re-register consumer " + consumerIdString)
loadBalancerListener.resetState()
registerConsumerInZK(dirs, consumerIdString, topicCount)
// explicitly trigger load balancing for this consumer
loadBalancerListener.syncedRebalance()
// There is no need to resubscribe to child and state changes.
// The child change watchers will be set inside rebalance when we read the children list.
}
}
class ZKRebalancerListener(val group: String, val consumerIdString: String,
val kafkaMessageAndMetadataStreams: mutable.Map[String,List[KafkaStream[_,_]]])
extends IZkChildListener {
private val correlationId = new AtomicInteger(0)
private var isWatcherTriggered = false
private val lock = new ReentrantLock
private val cond = lock.newCondition()
private val watcherExecutorThread = new Thread(consumerIdString + "_watcher_executor") {
override def run() {
info("starting watcher executor thread for consumer " + consumerIdString)
var doRebalance = false
while (!isShuttingDown.get) {
try {
lock.lock()
try {
if (!isWatcherTriggered)
cond.await(1000, TimeUnit.MILLISECONDS) // wake up periodically so that it can check the shutdown flag
} finally {
doRebalance = isWatcherTriggered
isWatcherTriggered = false
lock.unlock()
}
if (doRebalance)
syncedRebalance
} catch {
case t => error("error during syncedRebalance", t)
}
}
info("stopping watcher executor thread for consumer " + consumerIdString)
}
}
watcherExecutorThread.start()
@throws(classOf[Exception])
def handleChildChange(parentPath : String, curChilds : java.util.List[String]) {
lock.lock()
try {
isWatcherTriggered = true
cond.signalAll()
} finally {
lock.unlock()
}
}
private def deletePartitionOwnershipFromZK(topic: String, partition: Int) {
val topicDirs = new ZKGroupTopicDirs(group, topic)
val znode = topicDirs.consumerOwnerDir + "/" + partition
deletePath(zkClient, znode)
debug("Consumer " + consumerIdString + " releasing " + znode)
}
private def releasePartitionOwnership(localTopicRegistry: Pool[String, Pool[Int, PartitionTopicInfo]])= {
info("Releasing partition ownership")
for ((topic, infos) <- localTopicRegistry) {
for(partition <- infos.keys)
deletePartitionOwnershipFromZK(topic, partition)
localTopicRegistry.remove(topic)
}
}
def resetState() {
topicRegistry.clear
}
def syncedRebalance() {
rebalanceLock synchronized {
for (i <- 0 until config.rebalanceMaxRetries) {
info("begin rebalancing consumer " + consumerIdString + " try #" + i)
var done = false
val cluster = getCluster(zkClient)
try {
done = rebalance(cluster)
} catch {
case e =>
/** occasionally, we may hit a ZK exception because the ZK state is changing while we are iterating.
* For example, a ZK node can disappear between the time we get all children and the time we try to get
* the value of a child. Just let this go since another rebalance will be triggered.
**/
info("exception during rebalance ", e)
}
info("end rebalancing consumer " + consumerIdString + " try #" + i)
if (done) {
return
} else {
/* Here the cache is at a risk of being stale. To take future rebalancing decisions correctly, we should
* clear the cache */
info("Rebalancing attempt failed. Clearing the cache before the next rebalancing operation is triggered")
}
// stop all fetchers and clear all the queues to avoid data duplication
closeFetchersForQueues(cluster, kafkaMessageAndMetadataStreams, topicThreadIdAndQueues.map(q => q._2))
Thread.sleep(config.rebalanceBackoffMs)
}
}
throw new ConsumerRebalanceFailedException(consumerIdString + " can't rebalance after " + config.rebalanceMaxRetries +" retries")
}
private def rebalance(cluster: Cluster): Boolean = {
val myTopicThreadIdsMap = TopicCount.constructTopicCount(group, consumerIdString, zkClient).getConsumerThreadIdsPerTopic
val consumersPerTopicMap = getConsumersPerTopic(zkClient, group)
val brokers = getAllBrokersInCluster(zkClient)
val topicsMetadata = ClientUtils.fetchTopicMetadata(myTopicThreadIdsMap.keySet,
brokers,
config.clientId,
config.socketTimeoutMs,
correlationId.getAndIncrement).topicsMetadata
val partitionsPerTopicMap = new mutable.HashMap[String, Seq[Int]]
topicsMetadata.foreach(m => {
val topic = m.topic
val partitions = m.partitionsMetadata.map(m1 => m1.partitionId)
partitionsPerTopicMap.put(topic, partitions)
})
/**
* fetchers must be stopped to avoid data duplication, since if the current
* rebalancing attempt fails, the partitions that are released could be owned by another consumer.
* But if we don't stop the fetchers first, this consumer would continue returning data for released
* partitions in parallel. So, not stopping the fetchers leads to duplicate data.
*/
closeFetchers(cluster, kafkaMessageAndMetadataStreams, myTopicThreadIdsMap)
releasePartitionOwnership(topicRegistry)
var partitionOwnershipDecision = new collection.mutable.HashMap[(String, Int), String]()
val currentTopicRegistry = new Pool[String, Pool[Int, PartitionTopicInfo]]
for ((topic, consumerThreadIdSet) <- myTopicThreadIdsMap) {
currentTopicRegistry.put(topic, new Pool[Int, PartitionTopicInfo])
val topicDirs = new ZKGroupTopicDirs(group, topic)
val curConsumers = consumersPerTopicMap.get(topic).get
val curPartitions: Seq[Int] = partitionsPerTopicMap.get(topic).get
val nPartsPerConsumer = curPartitions.size / curConsumers.size
val nConsumersWithExtraPart = curPartitions.size % curConsumers.size
info("Consumer " + consumerIdString + " rebalancing the following partitions: " + curPartitions +
" for topic " + topic + " with consumers: " + curConsumers)
for (consumerThreadId <- consumerThreadIdSet) {
val myConsumerPosition = curConsumers.findIndexOf(_ == consumerThreadId)
assert(myConsumerPosition >= 0)
val startPart = nPartsPerConsumer*myConsumerPosition + myConsumerPosition.min(nConsumersWithExtraPart)
val nParts = nPartsPerConsumer + (if (myConsumerPosition + 1 > nConsumersWithExtraPart) 0 else 1)
/**
* Range-partition the sorted partitions to consumers for better locality.
* The first few consumers pick up an extra partition, if any.
*/
if (nParts <= 0)
warn("No broker partitions consumed by consumer thread " + consumerThreadId + " for topic " + topic)
else {
for (i <- startPart until startPart + nParts) {
val partition = curPartitions(i)
info(consumerThreadId + " attempting to claim partition " + partition)
addPartitionTopicInfo(currentTopicRegistry, topicDirs, partition, topic, consumerThreadId)
// record the partition ownership decision
partitionOwnershipDecision += ((topic, partition) -> consumerThreadId)
}
}
}
}
/**
* move the partition ownership here, since that can be used to indicate a truly successful rebalancing attempt
* A rebalancing attempt is completed successfully only after the fetchers have been started correctly
*/
if(reflectPartitionOwnershipDecision(partitionOwnershipDecision.toMap)) {
info("Updating the cache")
debug("Partitions per topic cache " + partitionsPerTopicMap)
debug("Consumers per topic cache " + consumersPerTopicMap)
topicRegistry = currentTopicRegistry
updateFetcher(cluster)
true
} else {
false
}
}
private def closeFetchersForQueues(cluster: Cluster,
messageStreams: Map[String,List[KafkaStream[_,_]]],
queuesToBeCleared: Iterable[BlockingQueue[FetchedDataChunk]]) {
val allPartitionInfos = topicRegistry.values.map(p => p.values).flatten
fetcher match {
case Some(f) =>
f.stopConnections
clearFetcherQueues(allPartitionInfos, cluster, queuesToBeCleared, messageStreams)
info("Committing all offsets after clearing the fetcher queues")
/**
* here, we need to commit offsets before stopping the consumer from returning any more messages
* from the current data chunk. Since partition ownership is not yet released, this commit offsets
* call will ensure that the offsets committed now will be used by the next consumer thread owning the partition
* for the current data chunk. Since the fetchers are already shutdown and this is the last chunk to be iterated
* by the consumer, there will be no more messages returned by this iterator until the rebalancing finishes
* successfully and the fetchers restart to fetch more data chunks
**/
if (config.autoCommitEnable)
commitOffsets
case None =>
}
}
private def clearFetcherQueues(topicInfos: Iterable[PartitionTopicInfo], cluster: Cluster,
queuesTobeCleared: Iterable[BlockingQueue[FetchedDataChunk]],
messageStreams: Map[String,List[KafkaStream[_,_]]]) {
// Clear all but the currently iterated upon chunk in the consumer thread's queue
queuesTobeCleared.foreach(_.clear)
info("Cleared all relevant queues for this fetcher")
// Also clear the currently iterated upon chunk in the consumer threads
if(messageStreams != null)
messageStreams.foreach(_._2.foreach(s => s.clear()))
info("Cleared the data chunks in all the consumer message iterators")
}
private def closeFetchers(cluster: Cluster, messageStreams: Map[String,List[KafkaStream[_,_]]],
relevantTopicThreadIdsMap: Map[String, Set[String]]) {
// only clear the fetcher queues for certain topic partitions that *might* no longer be served by this consumer
// after this rebalancing attempt
val queuesTobeCleared = topicThreadIdAndQueues.filter(q => relevantTopicThreadIdsMap.contains(q._1._1)).map(q => q._2)
closeFetchersForQueues(cluster, messageStreams, queuesTobeCleared)
}
private def updateFetcher(cluster: Cluster) {
// update partitions for fetcher
var allPartitionInfos : List[PartitionTopicInfo] = Nil
for (partitionInfos <- topicRegistry.values)
for (partition <- partitionInfos.values)
allPartitionInfos ::= partition
info("Consumer " + consumerIdString + " selected partitions : " +
allPartitionInfos.sortWith((s,t) => s.partitionId < t.partitionId).map(_.toString).mkString(","))
fetcher match {
case Some(f) =>
f.startConnections(allPartitionInfos, cluster)
case None =>
}
}
private def reflectPartitionOwnershipDecision(partitionOwnershipDecision: Map[(String, Int), String]): Boolean = {
var successfullyOwnedPartitions : List[(String, Int)] = Nil
val partitionOwnershipSuccessful = partitionOwnershipDecision.map { partitionOwner =>
val topic = partitionOwner._1._1
val partition = partitionOwner._1._2
val consumerThreadId = partitionOwner._2
val partitionOwnerPath = getConsumerPartitionOwnerPath(group, topic, partition)
try {
createEphemeralPathExpectConflict(zkClient, partitionOwnerPath, consumerThreadId)
info(consumerThreadId + " successfully owned partition " + partition + " for topic " + topic)
successfullyOwnedPartitions ::= (topic, partition)
true
} catch {
case e: ZkNodeExistsException =>
// The node hasn't been deleted by the original owner. So wait a bit and retry.
info("waiting for the partition ownership to be deleted: " + partition)
false
case e2 => throw e2
}
}
val hasPartitionOwnershipFailed = partitionOwnershipSuccessful.foldLeft(0)((sum, decision) => sum + (if(decision) 0 else 1))
/* even if one of the partition ownership attempt has failed, return false */
if(hasPartitionOwnershipFailed > 0) {
// remove all paths that we have owned in ZK
successfullyOwnedPartitions.foreach(topicAndPartition => deletePartitionOwnershipFromZK(topicAndPartition._1, topicAndPartition._2))
false
}
else true
}
private def addPartitionTopicInfo(currentTopicRegistry: Pool[String, Pool[Int, PartitionTopicInfo]],
topicDirs: ZKGroupTopicDirs, partition: Int,
topic: String, consumerThreadId: String) {
val partTopicInfoMap = currentTopicRegistry.get(topic)
val znode = topicDirs.consumerOffsetDir + "/" + partition
val offsetString = readDataMaybeNull(zkClient, znode)._1
// If first time starting a consumer, set the initial offset to -1
val offset =
offsetString match {
case Some(offsetStr) => offsetStr.toLong
case None => PartitionTopicInfo.InvalidOffset
}
val queue = topicThreadIdAndQueues.get((topic, consumerThreadId))
val consumedOffset = new AtomicLong(offset)
val fetchedOffset = new AtomicLong(offset)
val partTopicInfo = new PartitionTopicInfo(topic,
partition,
queue,
consumedOffset,
fetchedOffset,
new AtomicInteger(config.fetchMessageMaxBytes),
config.clientId)
partTopicInfoMap.put(partition, partTopicInfo)
debug(partTopicInfo + " selected new offset " + offset)
}
}
private def reinitializeConsumer[K,V](
topicCount: TopicCount,
queuesAndStreams: List[(LinkedBlockingQueue[FetchedDataChunk],KafkaStream[K,V])]) {
val dirs = new ZKGroupDirs(config.groupId)
// listener to consumer and partition changes
if (loadBalancerListener == null) {
val topicStreamsMap = new mutable.HashMap[String,List[KafkaStream[K,V]]]
loadBalancerListener = new ZKRebalancerListener(
config.groupId, consumerIdString, topicStreamsMap.asInstanceOf[scala.collection.mutable.Map[String, List[KafkaStream[_,_]]]])
}
// register listener for session expired event
if (sessionExpirationListener == null)
sessionExpirationListener = new ZKSessionExpireListener(
dirs, consumerIdString, topicCount, loadBalancerListener)
val topicStreamsMap = loadBalancerListener.kafkaMessageAndMetadataStreams
// map of {topic -> Set(thread-1, thread-2, ...)}
val consumerThreadIdsPerTopic: Map[String, Set[String]] =
topicCount.getConsumerThreadIdsPerTopic
val allQueuesAndStreams = topicCount match {
case wildTopicCount: WildcardTopicCount =>
/*
* Wild-card consumption streams share the same queues, so we need to
* duplicate the list for the subsequent zip operation.
*/
(1 to consumerThreadIdsPerTopic.keySet.size).flatMap(_ => queuesAndStreams).toList
case statTopicCount: StaticTopicCount =>
queuesAndStreams
}
val topicThreadIds = consumerThreadIdsPerTopic.map {
case(topic, threadIds) =>
threadIds.map((topic, _))
}.flatten
require(topicThreadIds.size == allQueuesAndStreams.size,
"Mismatch between thread ID count (%d) and queue count (%d)"
.format(topicThreadIds.size, allQueuesAndStreams.size))
val threadQueueStreamPairs = topicThreadIds.zip(allQueuesAndStreams)
threadQueueStreamPairs.foreach(e => {
val topicThreadId = e._1
val q = e._2._1
topicThreadIdAndQueues.put(topicThreadId, q)
debug("Adding topicThreadId %s and queue %s to topicThreadIdAndQueues data structure".format(topicThreadId, q.toString))
/*newGauge(
config.clientId + "-" + config.groupId + "-" + topicThreadId._1 + "-" + topicThreadId._2 + "-FetchQueueSize",
new Gauge[Int] {
def value = q.size
}
)*/
})
val groupedByTopic = threadQueueStreamPairs.groupBy(_._1._1)
groupedByTopic.foreach(e => {
val topic = e._1
val streams = e._2.map(_._2._2).toList
topicStreamsMap += (topic -> streams)
debug("adding topic %s and %d streams to map.".format(topic, streams.size))
})
// listener to consumer and partition changes
zkClient.subscribeStateChanges(sessionExpirationListener)
zkClient.subscribeChildChanges(dirs.consumerRegistryDir, loadBalancerListener)
topicStreamsMap.foreach { topicAndStreams =>
// register on broker partition path changes
val partitionPath = BrokerTopicsPath + "/" + topicAndStreams._1
zkClient.subscribeChildChanges(partitionPath, loadBalancerListener)
}
// explicitly trigger load balancing for this consumer
loadBalancerListener.syncedRebalance()
}
class WildcardStreamsHandler[K,V](topicFilter: TopicFilter,
numStreams: Int,
keyDecoder: Decoder[K],
valueDecoder: Decoder[V])
extends TopicEventHandler[String] {
if (messageStreamCreated.getAndSet(true))
throw new RuntimeException("Each consumer connector can create " +
"message streams by filter at most once.")
private val wildcardQueuesAndStreams = (1 to numStreams)
.map(e => {
val queue = new LinkedBlockingQueue[FetchedDataChunk](config.queuedMaxMessages)
val stream = new KafkaStream[K,V](queue,
config.consumerTimeoutMs,
keyDecoder,
valueDecoder,
config.clientId)
(queue, stream)
}).toList
// bootstrap with existing topics
private var wildcardTopics =
getChildrenParentMayNotExist(zkClient, BrokerTopicsPath)
.filter(topicFilter.isTopicAllowed)
private val wildcardTopicCount = TopicCount.constructTopicCount(
consumerIdString, topicFilter, numStreams, zkClient)
val dirs = new ZKGroupDirs(config.groupId)
registerConsumerInZK(dirs, consumerIdString, wildcardTopicCount)
reinitializeConsumer(wildcardTopicCount, wildcardQueuesAndStreams)
if (!topicFilter.requiresTopicEventWatcher) {
info("Not creating event watcher for trivial whitelist " + topicFilter)
}
else {
info("Creating topic event watcher for whitelist " + topicFilter)
wildcardTopicWatcher = new ZookeeperTopicEventWatcher(config, this)
/*
* Topic events will trigger subsequent synced rebalances. Also, the
* consumer will get registered only after an allowed topic becomes
* available.
*/
}
def handleTopicEvent(allTopics: Seq[String]) {
debug("Handling topic event")
val updatedTopics = allTopics.filter(topicFilter.isTopicAllowed)
val addedTopics = updatedTopics filterNot (wildcardTopics contains)
if (addedTopics.nonEmpty)
info("Topic event: added topics = %s"
.format(addedTopics))
/*
* TODO: Deleted topics are interesting (and will not be a concern until
* 0.8 release). We may need to remove these topics from the rebalance
* listener's map in reinitializeConsumer.
*/
val deletedTopics = wildcardTopics filterNot (updatedTopics contains)
if (deletedTopics.nonEmpty)
info("Topic event: deleted topics = %s"
.format(deletedTopics))
wildcardTopics = updatedTopics
info("Topics to consume = %s".format(wildcardTopics))
if (addedTopics.nonEmpty || deletedTopics.nonEmpty)
reinitializeConsumer(wildcardTopicCount, wildcardQueuesAndStreams)
}
def streams: Seq[KafkaStream[K,V]] =
wildcardQueuesAndStreams.map(_._2)
}
}
| kavink92/kafka-0.8.0-beta1-src | core/src/main/scala/kafka/consumer/ZookeeperConsumerConnector.scala | Scala | apache-2.0 | 33,296 |
package fpinscala.datastructures
/** Reimplement Scala's List type */
sealed trait List[+A]
case object Nil extends List[Nothing]
case class Cons[+A](head: A, tail: List[A]) extends List[A]
object List {
def apply[A](as: A*): List[A] =
if (as.isEmpty) Nil
else Cons(as.head, apply(as.tail: _*))
/** A total function version where tail(Nil) == Nil */
def tailT[A](l: List[A]): List[A] =
l match {
case Cons(h, t) => t
case _ => l
}
/** Return first element of List - partial function
*
* @note Well known that head of an empty list fails.
* @note Unchecked match used to gag compiler warning.
*
*/
def head[A](l: List[A]): A =
(l: @unchecked) match {
case Cons(h, t) => h
}
/** Return a list with a replaced first element */
def setHead[A](l: List[A], a: A): List[A] =
l match {
case Cons(h, t) => Cons(a, t)
case _ => l
}
/** Drop n elements from beginning of list */
def drop[A](l: List[A], n: Int): List[A] =
if (n < 1)
l
else
l match {
case Cons(h, t) => drop(t, n-1)
case _ => l
}
/** Drop elements from beginning of list while condition true */
def dropWhile[A](l: List[A], f: A => Boolean): List[A] =
l match {
case Cons(h, t) =>
if (f(h))
dropWhile(t, f)
else
l
case _ => l
}
/** Version of dropWhile using a guard */
def dropWhile1[A](l: List[A], f: A => Boolean): List[A] =
l match {
case Cons(h, t) if (f(h)) => dropWhile1(t, f)
case _ => l
}
/** Curried version of dropWhile1 - gives improved type inference */
def dropWhile2[A](l: List[A])(f: A => Boolean): List[A] =
l match {
case Cons(h, t) if (f(h)) => dropWhile2(t)(f)
case _ => l
}
// Exercise 3.6
/** Remove last element in list - not a total function since
* the empty list has no last element to remove.
*
* @note Well known that init is not a total function.
* @note Unchecked match used to gag compiler warning.
* @note Sensitive to stackoverflow.
*/
def init[A](l: List[A]): List[A] =
(l: @unchecked) match {
case Cons(h1, Cons(h2, Nil)) => Cons(h1, Nil)
case Cons(h1, Cons(h2, rest)) => Cons(h1, Cons(h2, init(rest)))
case Cons(h, Nil) => Nil
}
/** Product of a list of doubles */
def productL1(xs: List[Double]) = foldLeft(xs, 1.0)(_ * _)
/** Product of a list of doubles */
def productR1(xs: List[Double]) = foldRightUnsafe(xs, 1.0)(_ * _)
// Exercise 3.7
/** Right fold with short circuit logic
*
* @note Names zero and one based on analogy to a multiplicative group.
*
*/
def foldRightSC[A,B](xs: List[A], zero: A, one: B)(f: (A, B) => B): B =
xs match {
case Nil => one
case Cons(x, rest) if x == zero => f(zero, one)
case Cons(x, rest) => f(x, foldRightSC(rest, zero, one)(f))
}
/** Product of a list of doubles, does not bother processing
* the rest of list if 0.0 is encountered
*/
def productSC(xs: List[Double]) = foldRightSC(xs, 0.0, 1.0)(_ * _)
/** Concatenate strings in List until an empty String is encountered */
def catSC(xs: List[String]) = foldRightSC(xs, "", "")(_ ++ _)
// Exercise 3.9
/** Find length of List */
def length[A](as: List[A]): Int = {
def lenAcc(n: Int)(l: List[A]): Int =
l match {
case Cons(h, rest) => lenAcc(n+1)(rest)
case _ => n
}
lenAcc(0)(as)
}
/** Implement foldRight - not tail recursive, not stack safe */
def foldRightUnsafe[A, B](as: List[A], z: B)(f: (A, B) => B): B =
as match {
case Nil => z
case Cons(x, xs) => f(x, foldRightUnsafe(xs, z)(f))
}
// Exercise 3.10 - Implement tail recursive foldLeft
/** Tail recursive foldLeft - stack safe */
def foldLeft[A, B](as: List[A], z: B)(f: (B, A) => B): B = {
def acc(lt: B, rt: List[A]): B =
rt match {
case Nil => lt
case Cons(x, xs) => acc(f(lt, x), xs)
}
acc(z, as)
}
/** Tail recursive foldLeft - with Short Circuit */
def foldLeftSC[A, B](as: List[A], zero: A, one: B)(f: (B, A) => B): B = {
def acc(lt: B, rt: List[A]): B =
rt match {
case Nil => lt
case Cons(x, xs) if x == zero => f(one, zero)
case Cons(x, xs) => acc(f(lt, x), xs)
}
acc(one, as)
}
// Exercise 3.11 (Partial)
/** Length of List via foldLeft */
def lengthL[A](as: List[A]): Int =
foldLeft(as, 0)( (n, _) => n + 1 )
// Exercise 3.12
/** Reverse the elements of a List */
def reverse[A](as: List[A]): List[A] =
foldLeft(as, Nil: List[A])((as, a) => Cons(a, as))
// Exercise 3.13 - Implement foldRight with foldLefts
/** Tail recursive foldRight - stack safe */
def foldRight[A, B](as: List[A], z: B)(f: (A, B) => B): B =
foldLeft(foldLeft(as, Nil: List[A])((as, a) => Cons(a, as)), z)((b, a) => f(a, b))
// Exercise 3.14 - Implement append via either foldsLeft or foldRight
/** Append 2nd List to First */
def append[A](as: List[A], bs: List[A]): List[A] =
foldRight(as, bs)(Cons(_, _))
// Exercise 3.15 - Implement flatten linearly in length of the list
/** Flatten a List of Lists */
def flatten[A](ass: List[List[A]]): List[A] =
foldRight(ass, Nil: List[A])(append)
// Exercise 3.16
/** Transform a List[Int] by incrementing each element by 1 */
def bump1(l: List[Int]): List[Int] =
foldRight(l, Nil: List[Int])((a, as) => Cons(a + 1, as))
// Exercise 3.17
/** Transform a List[Double] to a List[String] */
def doublesToStrings(l: List[Double]): List[String] =
foldRight(l, Nil: List[String])((d, ds) => Cons(d.toString, ds))
// Exercise 3.18
/** Modify each element of a list while maintaining
* while maintaining the structure of the List
*/
def map[A, B](l: List[A])(f: A => B): List[B] =
foldRight(l, Nil: List[B])((a, bs) => Cons(f(a), bs))
// Exercise 3.19
/** Filter elements from a list based on a predicate */
def filter[A](l: List[A])(f: A => Boolean): List[A] =
foldRight(l, Nil: List[A])(
(a, as) =>
if (f(a))
Cons(a, as)
else
as
)
// Exercise 3.20
/** Implement flatMap for List */
def flatMap[A, B](as: List[A])(f: A => List[B]): List[B] =
flatten(map(as)(f))
// Exercise 3.21
/** Reimplimention of filter using flatMap */
def filter2[A](as: List[A])(f: A => Boolean): List[A] =
flatMap(as)(
a =>
if (f(a))
List(a)
else
Nil
)
// Exercise 3.22
/** Add corressponding elements of 2 lists of Int's - match up from head */
def addLists(as: List[Int], bs: List[Int]): List[Int] =
(as, bs) match {
case (Cons(a, rest_as), Cons(b, rest_bs)) =>
Cons(a+b, addLists(rest_as, rest_bs))
case _ =>
Nil
}
// Exercise 3.23
/**
* Implements standard zipWith function
*
* @note Not stack safe
*/
def zipWith[A,B,C](as: List[A], bs: List[B])(f: (A,B) => C): List[C] =
(as, bs) match {
case (Cons(a, rest_as), Cons(b, rest_bs)) =>
Cons(f(a, b), zipWith(rest_as, rest_bs)(f))
case _ =>
Nil
}
/*
* Implements standard zipWith function
*
* Todo: when I know how to make things lazy
*/
//def zipWith[A,B,C](as: List[A], bs: List[B])(f: (A,B) => C): List[C] =
// foldLeft(as, Nil: List[C])
// Exercise 3.24
/** Determine if a List contains another List as a subsequence */
def hasSubsequence[A](sup: List[A], sub: List[A]): Boolean = {
def hasHead(h: A, l: List[A]): Boolean =
l match {
case Cons(h2, rest) if h == h2 => true
case _ => false
}
def sameInit(l1: List[A], l2: List[A]): Boolean =
foldLeftSC(zipWith(l1, l2)(_ == _), false, true)(_ && _)
def foundIt(l: List[A]): Boolean =
if (hasHead(head(sub), l))
if (sameInit(l, sub))
true
else
foundIt(tailT(l))
else
if (l == Nil)
false
else
foundIt(tailT(l))
if (sub != Nil)
foundIt(sup)
else
true
}
}
| grscheller/scheller-linux-archive | grok/Scala2/fpinscala/src/main/scala/fpinscala/datastructures/List.scala | Scala | bsd-3-clause | 8,150 |
/* *\\
** Squants **
** **
** Scala Quantities and Units of Measure Library and DSL **
** (c) 2013-2014, Gary Keorkunian **
** **
\\* */
package squants.market
import org.scalatest.{ Matchers, FlatSpec }
/**
* @author garyKeorkunian
* @since 0.1
*
*/
class MoneyContextSpec extends FlatSpec with Matchers {
val defCur = USD
val rates = List(
CurrencyExchangeRate(EUR(1), USD(1.25)),
CurrencyExchangeRate(USD(1), JPY(100)),
CurrencyExchangeRate(GBP(1), USD(1.6686)),
CurrencyExchangeRate(USD(1), CAD(1.1126)),
CurrencyExchangeRate(USD(1), HKD(7.7565)),
CurrencyExchangeRate(USD(1), CNY(6.0915)),
CurrencyExchangeRate(AUD(1), USD(0.8961)),
CurrencyExchangeRate(XAU(1), USD(1200.00)),
CurrencyExchangeRate(XAG(1), USD(20.00)),
CurrencyExchangeRate(CHF(1), NOK(40)))
val moneyContext = MoneyContext(defCur, defaultCurrencySet, rates)
behavior of "MoneyContext"
it should "initialize with appropriate values" in {
moneyContext.defaultCurrency should be(defCur)
moneyContext.rates should be(rates)
}
it should "properly create Money using the default currency with an implicit MoneyContext in scope" in {
implicit val moneyContext = MoneyContext(defCur, defaultCurrencySet, rates)
Money(10.22) should be(defCur(10.22))
}
it should "return Some(rate) for a given currency pair for which there is a rate" in {
moneyContext.directRateFor(USD, JPY) should be(Some(CurrencyExchangeRate(USD(1), JPY(100))))
moneyContext.directRateFor(JPY, USD) should be(Some(CurrencyExchangeRate(USD(1), JPY(100))))
}
it should "return None for a given currency pair for which there is no rate" in {
moneyContext.directRateFor(CAD, AUD) should be(None)
moneyContext.directRateFor(EUR, JPY) should be(None)
}
it should "return Some(rate) for a given currency pair for which there is a direct rate" in {
moneyContext.indirectRateFor(USD, JPY) should be(Some(CurrencyExchangeRate(USD(1), JPY(100))))
moneyContext.indirectRateFor(JPY, USD) should be(Some(CurrencyExchangeRate(USD(1), JPY(100))))
}
it should "return Some(rate) for a given currency pair for which there is no direct, but an indirect rate" in {
moneyContext.indirectRateFor(XAG, XAU).get.rate should be(CurrencyExchangeRate(XAG(60), XAU(1)).rate)
moneyContext.indirectRateFor(EUR, JPY).get.rate should be(CurrencyExchangeRate(EUR(0.80), JPY(100)).rate)
}
it should "return None for a given currency pair for which there is no direct or indirect rate" in {
moneyContext.indirectRateFor(CHF, USD) should be(None)
moneyContext.indirectRateFor(NOK, XAG) should be(None)
}
it should "properly convert Money values between currencies for which there is a direct exchange rate" in {
moneyContext.convert(USD(10), JPY) should be(JPY(1000.00))
moneyContext.convert(JPY(200), USD) should be(USD(2))
moneyContext.convert(XAG(30), USD) should be(USD(600))
}
it should "properly convert Money values between currencies for which there is only an indirect exchange rate" in {
moneyContext.convert(XAU(1), XAG) should be(XAG(60))
moneyContext.convert(JPY(100), EUR) should be(EUR(0.8))
}
it should "properly return the same Money when converting to the same currency" in {
moneyContext.convert(USD(12.28), USD) should be(USD(12.28))
moneyContext.convert(XAG(12.28), XAG) should be(XAG(12.28))
}
it should "properly return the same Money when converting to the same currency with an empty context" in {
val context = MoneyContext(defCur, defaultCurrencySet, Nil)
context.convert(USD(12.28), USD) should be(USD(12.28))
context.convert(XAG(12.28), XAG) should be(XAG(12.28))
}
it should "throw a NoExchangeRateException when converting between Currencies for which no indirect rate can be determined" in {
intercept[NoSuchExchangeRateException] {
moneyContext.convert(CHF(100), USD)
}
intercept[NoSuchExchangeRateException] {
moneyContext.convert(NOK(100), XAG)
}
}
it should "throw a NoExchangeRateException when converting where an indirect rate exists but is not allowed by the context" in {
val context = MoneyContext(defCur, defaultCurrencySet, rates, allowIndirectConversions = false)
intercept[NoSuchExchangeRateException] {
context.convert(XAU(1), XAG) should be(XAG(60))
}
intercept[NoSuchExchangeRateException] {
context.convert(JPY(100), EUR) should be(EUR(0.8))
}
}
it should "return Money in the first currency when adding two Moneys in different currencies" in {
moneyContext.add(USD(1), JPY(100)) should be(USD(2))
}
it should "return Money in the first currency when subtracting two Moneys in different currencies" in {
moneyContext.subtract(USD(2), JPY(100)) should be(USD(1))
}
it should "return BigDecimal when dividing two Moneys in different currencies" in {
moneyContext.divide(USD(2), JPY(100)) should be(2)
}
it should "return Int based on a standard comparison of two Moneys in different currencies" in {
moneyContext.compare(USD(2), JPY(100)) should be(1)
moneyContext.compare(USD(1), JPY(200)) should be(-1)
moneyContext.compare(USD(1), JPY(100)) should be(0)
}
it should "return a copy with a new set of rates" in {
val newRates = List(
CurrencyExchangeRate(EUR(1), USD(1.25)),
CurrencyExchangeRate(USD(1), JPY(100)),
CurrencyExchangeRate(GBP(1), USD(1.6686)),
CurrencyExchangeRate(USD(1), CAD(1.1126)))
val newContext = moneyContext.withExchangeRates(newRates)
newContext.defaultCurrency should be(moneyContext.defaultCurrency)
newContext.currencies should be(moneyContext.currencies)
newContext.rates should be(newRates)
}
}
| non/squants | src/test/scala/squants/market/MoneyContextSpec.scala | Scala | apache-2.0 | 6,133 |
package models.module
import play.api.mvc.{PathBindable, QueryStringBindable}
object ModelBindables {
/* Start model bindables */
/* Projectile export section [boilerplay] */
private[this] def boilerplayMpaaRatingTypeExtractor(v: Either[String, String]) = v match {
case Right(s) => Right(models.film.MpaaRatingType.withValue(s))
case Left(x) => throw new IllegalStateException(x)
}
implicit def boilerplayMpaaRatingTypePathBindable(implicit binder: play.api.mvc.PathBindable[String]): play.api.mvc.PathBindable[models.film.MpaaRatingType] = new play.api.mvc.PathBindable[models.film.MpaaRatingType] {
override def bind(key: String, value: String) = boilerplayMpaaRatingTypeExtractor(binder.bind(key, value))
override def unbind(key: String, e: models.film.MpaaRatingType) = e.value
}
implicit def boilerplayMpaaRatingTypeQueryStringBindable(implicit binder: play.api.mvc.QueryStringBindable[String]): play.api.mvc.QueryStringBindable[models.film.MpaaRatingType] = new play.api.mvc.QueryStringBindable[models.film.MpaaRatingType] {
override def bind(key: String, params: Map[String, Seq[String]]) = binder.bind(key, params).map(boilerplayMpaaRatingTypeExtractor)
override def unbind(key: String, e: models.film.MpaaRatingType) = e.value
}
/* End model bindables */
}
| KyleU/boilerplay | app/models/module/ModelBindables.scala | Scala | cc0-1.0 | 1,311 |
package special.sigma
import org.ergoplatform.SigmaConstants.ScriptCostLimit
import org.scalatest.BeforeAndAfterAll
import scalan.RType
import scalan.util.BenchmarkUtil
import sigmastate.{DataValueComparer, JitCost, TrivialProp}
import sigmastate.Values.ErgoTree
import sigmastate.eval.{CSigmaProp, Profiler, SigmaDsl}
import sigmastate.helpers.SigmaPPrint
import sigmastate.interpreter.{CostAccumulator, ErgoTreeEvaluator, EvalSettings, TracedCost}
import special.collection.Coll
import scala.util.{Success, Try}
class DataValueComparerSpecification extends SigmaDslTesting
with BeforeAndAfterAll { suite =>
implicit override val generatorDrivenConfig = PropertyCheckConfiguration(minSuccessful = 30)
implicit override val evalSettings: EvalSettings =
ErgoTreeEvaluator.DefaultEvalSettings.copy(
isMeasureOperationTime = true,
isMeasureScriptTime = true,
isLogEnabled = false, // don't commit the `true` value (CI log is too high)
costTracingEnabled = true // should always be enabled in tests (and false by default)
)
override val nBenchmarkIters = 10
val nWarmUpIterations = 100
implicit val suiteProfiler = new Profiler
import TestData._
def createEvaluator(settings: EvalSettings, profiler: Profiler): ErgoTreeEvaluator = {
val accumulator = new CostAccumulator(
initialCost = JitCost(0),
costLimit = Some(JitCost.fromBlockCost(ScriptCostLimit.value)))
val evaluator = new ErgoTreeEvaluator(
context = null,
constants = ErgoTree.EmptyConstants,
coster = accumulator, profiler, settings)
evaluator
}
/** Checks (on positive cases) that EQ.equalDataValues used in v5.0 is equivalent to
* `==` used in v4.0
* NOTE: the computations `x` and `y` are expected to be stable (i.e. always producing
* equal values)
* @param x computation which produced first argument
* @param y computation which produced second argument
*/
def check(x: => Any, y: => Any, expected: Boolean)(implicit settings: EvalSettings, profiler: Profiler) = {
val _x = x // force computation and obtain value
val _y = y
withClue(s"EQ.equalDataValues(${_x}, ${_y})") {
val res = sameResultOrError(
repeatAndReturnLast(nBenchmarkIters + 1) {
val evaluator = createEvaluator(settings, profiler)
// it's important to use fresh values to neutralize memory cache to some extent
val fresh_x = x
val fresh_y = y
DataValueComparer.equalDataValues(fresh_x, fresh_y)(evaluator)
},
_x == _y)
res match {
case Success(res) => res shouldBe expected
case _ =>
}
}
if (settings.isMeasureScriptTime) {
val evaluator = createEvaluator(settings, profiler)
val fresh_x = x
val fresh_y = y
val (res, actualTime) = BenchmarkUtil.measureTimeNano {
Try(DataValueComparer.equalDataValues(fresh_x, fresh_y)(evaluator))
}
if (res.isSuccess) {
val costDetails = TracedCost(evaluator.getCostTrace(), Some(actualTime))
val xStr = SigmaPPrint(fresh_x).plainText
val yStr = SigmaPPrint(fresh_y).plainText
val script = s"$xStr == $yStr"
evaluator.profiler.addJitEstimation(script, costDetails.cost, actualTime)
}
}
}
/** It is important for profiling to return a new array on every method call.
* This is to avoid reusing the same memory location during numerous iterations
* which doesn't reflect the real world scenario. Thus, creating a new array on every
* request neutralizes the effects of cache and makes profiling more accurate. */
def zeros = Array[Any](0.toByte, 0.toShort, 0, 0.toLong)
def ones = Array[Any](1.toByte, 1.toShort, 1, 1.toLong)
override protected def beforeAll(): Unit = {
// this method warms up the code in DataValueComparer
val warmUpProfiler = new Profiler
warmUpBeforeAllTest(nTotalIters = nWarmUpIterations) {
runBaseCases(warmUpProfiler)(evalSettings = evalSettings.copy(isLogEnabled = false))
}
}
/** Runs a number of equality checks for a value produced by the given computation.
* @param x computation which produces value to be exercised. */
def checkIsEqual(x: => Any) = {
check(x, x, true)
check(Some(x), Some(x), true)
check((x, x), (x, x), true)
}
/** This is NOT comprehensive list of possible checks.
* See also DataSerializerSpecification.roundtrip where comprehensive
* checking of positive cases is done.
* This method is used:
* 1) to warm up DataValueComparer in the beforeAll method
* 2) to profile DataValueComparer operations */
def runBaseCases(profiler: Profiler)(implicit evalSettings: EvalSettings) = {
implicit val suiteProfiler = profiler // hide suite's profiler and use explicitly passed
ones.foreach { x =>
ones.foreach { y =>
check(x, y, true) // numeric values are equal regardless of their type
check(Some(x), Some(y), true) // numeric values in Option
check(Some(x), y, false)
check(x, Some(y), false)
check(Some(x), None, false)
check(None, Some(x), false)
check((x, 1), (y, 1), true) // and in Tuple
check((1, x), (1, y), true)
check((1, x), y, false)
check(x, (1, y), false)
}
}
val sizes = Array(0, 1, 4, 8, 16, 32, 64, 128, 256, 512)
def coll[T: RType](s: Int, v: => T): Coll[T] = {
val arr = Array.fill(s)(v)(RType[T].classTag)
builder.Colls.fromArray(arr)
}
sizes.foreach { s =>
checkIsEqual(coll(s, 1.toByte))
checkIsEqual(coll(s, 1.toShort))
checkIsEqual(coll(s, 1))
checkIsEqual(coll(s, 1L))
checkIsEqual(coll(s, createBigIntMaxValue()))
checkIsEqual(coll(s, create_ge1()))
checkIsEqual(coll(s, create_t1()))
checkIsEqual(coll(s, create_b1()))
checkIsEqual(coll(s, create_preH1()))
checkIsEqual(coll(s, create_h1()))
// collections of complex types
checkIsEqual(coll(s, (1.toByte, 1)))
checkIsEqual(coll(s, Option((1.toByte, 1))))
checkIsEqual(coll(s, (create_ge1(), Option((1.toByte, 1)))))
checkIsEqual(coll(s, (create_ge1(), (Option((1.toByte, 1)), coll(32, 7.toByte)))))
checkIsEqual(coll(s, SigmaDsl.SigmaProp(create_dlog())))
checkIsEqual(coll(s, SigmaDsl.SigmaProp(create_dht())))
checkIsEqual(coll(s, SigmaDsl.SigmaProp(create_and())))
checkIsEqual(coll(s, SigmaDsl.SigmaProp(create_or())))
checkIsEqual(coll(s, SigmaDsl.SigmaProp(TrivialProp.TrueProp)))
checkIsEqual(coll(s, SigmaDsl.SigmaProp(TrivialProp.FalseProp)))
}
checkIsEqual(createBigIntMaxValue())
checkIsEqual(create_ge1())
checkIsEqual(create_t1)
checkIsEqual(create_b1())
checkIsEqual(create_preH1())
checkIsEqual(create_h1())
checkIsEqual(CSigmaProp(create_dlog()))
checkIsEqual(CSigmaProp(create_dht()))
checkIsEqual(CSigmaProp(create_and()))
checkIsEqual(CSigmaProp(create_or()))
checkIsEqual(CSigmaProp(TrivialProp.TrueProp))
checkIsEqual(CSigmaProp(TrivialProp.FalseProp))
}
/** Run this property alone for profiling and see the report generated in afterAll. */
property("equalDataValues base cases (use for profiling)") {
runBaseCases(suiteProfiler)(evalSettings)
}
property("equalDataValues positive cases (Coll)") {
checkIsEqual(Coll[Int]())
checkIsEqual(Coll[Int](1))
}
property("equalDataValues negative cases") {
check(BigIntZero, BigIntOne, false)
check(ge1, ge2, false)
check(t1, t2, false)
check(b1, b2, false)
check(preH1, preH2, false)
check(h1, h2, false)
ones.foreach { x =>
zeros.foreach { y =>
check(x, y, false)
}
}
ones.foreach { x =>
check(BigIntOne, x, false)
}
val values = Array[AnyRef](
1.asInstanceOf[AnyRef], BigIntOne, ge1, t1, b1, preH1, h1,
Coll[Int](1), None, Option(1), (1, 1))
values.foreach { x =>
values.foreach { y =>
if (!(x eq y)) {
check(x, y, false)
check(y, x, false)
}
}
}
}
property("equalDataValues negative cases (in Coll)") {
val onesInColl = Array[AnyRef](Coll(1.toByte), Coll(1.toShort), Coll(1), Coll(1.toLong))
onesInColl.foreach { x =>
onesInColl.foreach { y =>
if (!(x eq y)) {
check(x, y, false)
check(y, x, false)
}
}
}
check(Coll(1), Coll(1, 2), false)
}
override protected def afterAll(): Unit = {
println(suiteProfiler.generateReport)
}
}
| ScorexFoundation/sigmastate-interpreter | sigmastate/src/test/scala/special/sigma/DataValueComparerSpecification.scala | Scala | mit | 8,591 |
package com.github.aselab.activerecord
import java.sql.Connection
import org.squeryl.internals.DatabaseAdapter
import play.api.Play.current
import java.util.{Locale, TimeZone}
import play.api.i18n.{Messages, I18nSupport}
class PlayConfig(
val schema: ActiveRecordTables,
overrideSettings: Map[String, Any] = Map()
) extends ActiveRecordConfig {
lazy val schemaName = schema.getClass.getName.dropRight(1)
lazy val _prefix = current.configuration.getString("schema." + schemaName).getOrElse("activerecord")
def prefix(key: String) =
"db." + _prefix + "." + key
def classLoader = play.api.Play.application.classloader
private def debug(key: String): Unit =
debug(key, current.configuration.getString(key))
private def _getString(key: String): Option[String] =
overrideSettings.get(key).map(_.toString).orElse(
current.configuration.getString(key)
)
def getString(key: String): Option[String] = _getString("activerecord." + key)
def getBoolean(key: String, default: Boolean): Boolean =
overrideSettings.get(key).map(_.asInstanceOf[Boolean]).orElse(
current.configuration.getBoolean(key)
).getOrElse(default)
def autoCreate: Boolean = getBoolean("activerecord.autoCreate", true)
def autoDrop: Boolean = getBoolean("activerecord.autoDrop", false)
def schemaClass: String =
getString("schema").getOrElse("models.Tables")
def connection: Connection =
play.api.db.DB.getConnection(_prefix)
override def log = {
logger.debug("----- Database setting: %s (mode: %s) -----".format(_prefix, play.api.Play.application.mode))
logger.debug("\\tSchema class: " + schemaName)
List(prefix("url"), prefix("driver"), prefix("user")).foreach(debug)
}
lazy val adapter: DatabaseAdapter = {
adapter(_getString(prefix("driver")).orElse(getString("driver")).getOrElse("org.h2.Driver"))
}
def translator: i18n.Translator = PlayTranslator
}
object PlayTranslator extends i18n.Translator {
import play.api.i18n._
import play.api.i18n.Messages.Implicits._
def get(key: String, args: Any*)(implicit locale: Locale):Option[String] = {
implicit val lang = Lang(locale.getLanguage, locale.getCountry)
val messages = implicitly[Messages]
if (messages.isDefinedAt(key)) {
Some(messages(key))
} else {
implicit val locale = messages.lang.toLocale
i18n.DefaultTranslator.get(key, args:_*)
}
}
}
trait PlaySupport { self: ActiveRecordTables =>
override def loadConfig(c: Map[String, Any]): ActiveRecordConfig =
new PlayConfig(self, c)
}
object PlayConfig {
def loadSchemas = current.configuration.getConfig("schema")
.map(_.keys).getOrElse(List("models.Tables")).map(ActiveRecordTables.find).toSeq
}
| xdougx/scala-activerecord | play2/src/main/scala/PlaySupport.scala | Scala | mit | 2,738 |
/*
* Copyright © 2015-2019 the contributors (see Contributors.md).
*
* This file is part of Knora.
*
* Knora is free software: you can redistribute it and/or modify
* it under the terms of the GNU Affero General Public License as published
* by the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* Knora is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Affero General Public License for more details.
*
* You should have received a copy of the GNU Affero General Public
* License along with Knora. If not, see <http://www.gnu.org/licenses/>.
*/
package org.knora.webapi.e2e.v1
import java.io.{File, FileInputStream, FileOutputStream}
import akka.http.scaladsl.model.headers._
import akka.http.scaladsl.model.{HttpEntity, _}
import com.typesafe.config.{Config, ConfigFactory}
import org.knora.webapi.ITKnoraFakeSpec
import org.knora.webapi.messages.store.triplestoremessages.TriplestoreJsonProtocol
import org.knora.webapi.util.{FileUtil, MutableTestIri}
import spray.json._
object KnoraSipiScriptsV1ITSpec {
val config: Config = ConfigFactory.parseString(
"""
|akka.loglevel = "DEBUG"
|akka.stdout-loglevel = "DEBUG"
""".stripMargin)
}
/**
* End-to-End (E2E) test specification for testing Knora-Sipi scripts. Sipi must be running with the config file
* `sipi.knora-config.lua`. This spec uses the KnoraFakeService to start a faked `webapi` server that always allows
* access to files.
*/
class KnoraSipiScriptsV1ITSpec extends ITKnoraFakeSpec(KnoraSipiScriptsV1ITSpec.config) with TriplestoreJsonProtocol {
implicit override val log = akka.event.Logging(system, this.getClass)
private val username = "[email protected]"
private val password = "test"
private val pathToChlaus = "_test_data/test_route/images/Chlaus.jpg"
private val pathToMarbles = "_test_data/test_route/images/marbles.tif"
private val firstPageIri = new MutableTestIri
private val secondPageIri = new MutableTestIri
"Calling Knora Sipi Scripts" should {
"successfully call C++ functions from Lua scripts" in {
val request = Get(baseSipiUrl + "/test_functions" )
getResponseString(request)
}
"successfully call Lua functions for mediatype handling" in {
val request = Get(baseSipiUrl + "/test_file_type" )
getResponseString(request)
}
"successfully call Lua function that gets the Knora session id from the cookie header sent to Sipi" in {
val request = Get(baseSipiUrl + "/test_knora_session_cookie" )
getResponseString(request)
}
"successfully call make_thumbnail.lua sipi script" in {
// The image to be uploaded.
val fileToSend = new File(pathToChlaus)
assert(fileToSend.exists(), s"File $pathToChlaus does not exist")
// A multipart/form-data request containing the image.
val sipiFormData = Multipart.FormData(
Multipart.FormData.BodyPart(
"file",
HttpEntity.fromPath(MediaTypes.`image/jpeg`, fileToSend.toPath),
Map("filename" -> fileToSend.getName)
)
)
// Send a POST request to Sipi, asking it to make a thumbnail of the image.
val sipiPostRequest = Post(baseSipiUrl + "/make_thumbnail", sipiFormData) ~> addCredentials(BasicHttpCredentials(username, password))
val sipiPostResponseJson = getResponseJson(sipiPostRequest)
/* sipiResponseJson will be something like this
{
"mimetype_thumb":"image/jpeg",
"original_mimetype":"image/jpeg",
"nx_thumb":93,
"preview_path":"http://localhost:1024/thumbs/CjwDMhlrctI-BG7gms08BJ4.jpg/full/full/0/default.jpg",
"filename":"CjwDMhlrctI-BG7gms08BJ4",
"file_type":"IMAGE",
"original_filename":"Chlaus.jpg",
"ny_thumb":128
}
*/
// get the preview_path
val previewPath = sipiPostResponseJson.fields("preview_path").asInstanceOf[JsString].value
// get the filename
val filename = sipiPostResponseJson.fields("filename").asInstanceOf[JsString].value
// Send a GET request to Sipi, asking for the preview image
val sipiGetRequest01 = Get(previewPath)
val sipiGetResponseJson01 = getResponseString(sipiGetRequest01)
// Send a GET request to Sipi, asking for the info.json of the image
val sipiGetRequest02 = Get(baseSipiUrl + "/thumbs/" + filename + ".jpg/info.json" )
val sipiGetResponseJson = getResponseJson(sipiGetRequest02)
}
"successfully call convert_from_file.lua sipi script" in {
/* This is the case where the file is already stored on the sipi server as part of make_thumbnail*/
// The image to be uploaded.
val fileToSend = new File(pathToChlaus)
assert(fileToSend.exists(), s"File $pathToChlaus does not exist")
// A multipart/form-data request containing the image.
val sipiFormData = Multipart.FormData(
Multipart.FormData.BodyPart(
"file",
HttpEntity.fromPath(MediaTypes.`image/jpeg`, fileToSend.toPath),
Map("filename" -> fileToSend.getName)
)
)
// Send a POST request to Sipi, asking it to make a thumbnail of the image.
val sipiMakeThumbnailRequest = Post(baseSipiUrl + "/make_thumbnail", sipiFormData)
val sipiMakeThumbnailResponseJson = getResponseJson(sipiMakeThumbnailRequest)
val originalFilename = sipiMakeThumbnailResponseJson.fields("original_filename").asInstanceOf[JsString].value
val originalMimetype = sipiMakeThumbnailResponseJson.fields("original_mimetype").asInstanceOf[JsString].value
val filename = sipiMakeThumbnailResponseJson.fields("filename").asInstanceOf[JsString].value
// A form-data request containing the payload for convert_from_file.
val sipiFormData02 = FormData(
Map(
"originalFilename" -> originalFilename,
"originalMimeType" -> originalMimetype,
"prefix" -> "0001",
"filename" -> filename
)
)
val convertFromFileRequest = Post(baseSipiUrl + "/convert_from_file", sipiFormData02)
val convertFromFileResponseJson = getResponseJson(convertFromFileRequest)
val filenameFull = convertFromFileResponseJson.fields("filename_full").asInstanceOf[JsString].value
// Running with KnoraFakeService which always allows access to files.
// Send a GET request to Sipi, asking for full image
// not possible as authentication is required and file needs to be known by knora to be able to authenticate the request
val sipiGetImageRequest = Get(baseSipiUrl + "/0001/" + filenameFull + "/full/full/0/default.jpg") ~> addCredentials(BasicHttpCredentials(username, password))
checkResponseOK(sipiGetImageRequest)
// Send a GET request to Sipi, asking for the info.json of the image
val sipiGetInfoRequest = Get(baseSipiUrl + "/0001/" + filenameFull + "/info.json" ) ~> addCredentials(BasicHttpCredentials(username, password))
val sipiGetInfoResponseJson = getResponseJson(sipiGetInfoRequest)
log.debug("sipiGetInfoResponseJson: {}", sipiGetInfoResponseJson)
}
"successfully call convert_from_path.lua sipi script" in {
// The image to be uploaded.
val fileToSend = new File(pathToChlaus)
assert(fileToSend.exists(), s"File $pathToChlaus does not exist")
// To be able to run packaged tests inside Docker, we need to copy
// the file first to a place which is shared with sipi
val dest = FileUtil.createTempFile(settings)
new FileOutputStream(dest)
.getChannel
.transferFrom(
new FileInputStream(fileToSend).getChannel,
0,
Long.MaxValue
)
// A multipart/form-data request containing the image.
val sipiFormData = FormData(
Map(
"originalFilename" -> fileToSend.getName,
"originalMimeType" -> "image/jpeg",
"prefix" -> "0001",
"source" -> dest.getAbsolutePath
)
)
// Send a POST request to Sipi, asking it to make a thumbnail of the image.
val sipiConvertFromPathPostRequest = Post(baseSipiUrl + "/convert_from_path", sipiFormData)
val sipiConvertFromPathPostResponseJson = getResponseJson(sipiConvertFromPathPostRequest)
val filenameFull = sipiConvertFromPathPostResponseJson.fields("filename_full").asInstanceOf[JsString].value
//log.debug("sipiConvertFromPathPostResponseJson: {}", sipiConvertFromPathPostResponseJson)
// Running with KnoraFakeService which always allows access to files.
val sipiGetImageRequest = Get(baseSipiUrl + "/0001/" + filenameFull + "/full/full/0/default.jpg") ~> addCredentials(BasicHttpCredentials(username, password))
checkResponseOK(sipiGetImageRequest)
// Send a GET request to Sipi, asking for the info.json of the image
val sipiGetInfoRequest = Get(baseSipiUrl + "/0001/" + filenameFull + "/info.json" ) ~> addCredentials(BasicHttpCredentials(username, password))
val sipiGetInfoResponseJson = getResponseJson(sipiGetInfoRequest)
log.debug("sipiGetInfoResponseJson: {}", sipiGetInfoResponseJson)
}
}
}
| musicEnfanthen/Knora | webapi/src/it/scala/org/knora/webapi/e2e/v1/KnoraSipiScriptsV1ITSpec.scala | Scala | agpl-3.0 | 10,200 |
package com.sammyrulez.ocs.api
import org.scalatra.test.specs2._
import org.specs2.mock.Mockito
import org.mockito.Matchers._
import org.specs2.mutable._
import javax.servlet.http._
import javax.servlet._
import org.scalatra.auth.Scentry
import net.iharder.Base64
import org.scalatra.ScalatraFilter
class OperationsServletTests extends MutableScalatraSpec with Mockito {
addServlet(classOf[Operations], "/*")
/*val basicCredentials = "Basic " + Base64.encodeBytes("scalatra:scalatra")
val authHeader = Map("Authorization" -> basicCredentials) */
var invalidateCalled = false
val context = new ScalatraFilter {
private[this] val sessionMap = scala.collection.mutable.HashMap[String, Any](Scentry.scentryAuthKey -> "6789")
val mockSession = smartMock[HttpSession]
override def session(implicit request: HttpServletRequest) = mockSession
mockSession.getAttribute(anyString) answers { k => sessionMap.getOrElse(k.asInstanceOf[String], null).asInstanceOf[AnyRef] }
mockSession.setAttribute(anyString, anyObject) answers { (kv, wtfIsThis) =>
val Array(k: String, v: Any) = kv
sessionMap(k) = v
}
mockSession.invalidate() answers { k =>
invalidateCalled = true
sessionMap.clear()
}
}
implicit val req = mock[HttpServletRequest]
implicit val res = mock[HttpServletResponse].smart
val mockedScentry = mock[Scentry[User]]
mockedScentry.isAuthenticated returns true
mockedScentry.authenticate returns new User("scalatra")
"POST / on deposit" should {
"return status 200" in {
post("/deposit") {
status must_== 200
}
}
}
}
| sammyrulez/octo-crypto-samurai | src/test/scala/com/sammyrulez/ocs/api/OperationsTest.scala | Scala | mit | 2,042 |
class Factorial {
def computeFactorial(num : Int):String = {
(2 to num).foldLeft(BigInt(1))(_*_).toString()
}
}
| aayushKumarJarvis/Code-Snippets-JAVA-SCALA | src/main/scala/Factorial.scala | Scala | mit | 122 |
package org.vaslabs.granger.repo
import java.time.ZonedDateTime
import java.util.UUID
import akka.http.scaladsl.model.StatusCode
import cats.effect.IO
import org.vaslabs.granger.LoadDataOutcome
import org.vaslabs.granger.comms.api.model.{ Activity, AddToothInformationRequest }
import org.vaslabs.granger.modeltreatments.TreatmentCategory
import org.vaslabs.granger.modelv2._
/**
* Created by vnicolaou on 28/05/17.
*/
case class PatientEntry(patientId: PatientId, patient: Patient)
sealed trait RepoErrorState
case class SchemaFailure(error: String) extends RepoErrorState
case object EmptyRepo extends RepoErrorState
case class UnkownState(error: String) extends RepoErrorState
case class UnparseableSchema(error: String) extends RepoErrorState
trait GrangerRepo[State, F[_]] {
def loadData(): F[LoadDataOutcome]
def setUpRepo(): F[Unit]
def getLatestActivity(patientId: PatientId): Map[Int, List[Activity]]
def addToothInfo(rq: AddToothInformationRequest): Either[InvalidData, F[Patient]]
def addPatient(patient: Patient): F[Patient]
def retrieveAllPatients(): IO[Either[RepoErrorState, List[Patient]]]
def pushChanges(): F[Unit]
def startTreatment(
patientId: PatientId,
toothId: Int,
treatmentCategory: TreatmentCategory): Either[InvalidData, F[Patient]]
def finishTreatment(patientId: PatientId, toothId: Int, finishTime: ZonedDateTime): Either[InvalidData, F[Patient]]
def deleteTreatment(patientId: PatientId, toothId: Int, timestamp: ZonedDateTime): Either[InvalidData, F[Patient]]
def deletePatient(patientId: PatientId): F[Either[IOError, Unit]]
}
| vaslabs/granger | src/main/scala/org/vaslabs/granger/repo/GrangerRepo.scala | Scala | lgpl-3.0 | 1,619 |
package kessler
import ksp._
import scala.actors._
/**
* A daemon for merging save files and replying with the merged files.
*
* Operations are:
* - update <content> - merge this content into the master save file
* - get - get a copy of the merged file
* - get <list> - get a copy of the merged file, excluding rockets that require parts not in <list>
*/
class KesslerDaemon(configfile: String) extends Actor {
import scala.actors.remote.RemoteActor._
import Protocol._
import java.util.Properties
import java.io.{File,FileInputStream}
val VERSION = 12040700;
println("Loading configuration from " + configfile)
private object config extends Properties(new Properties()) {
load(new FileInputStream(configfile))
defaults put ("port", "8988")
defaults put ("save", "kessler/merged.sfs")
defaults put ("load", "kessler/merged.sfs,saves/default/persistent.sfs,kessler/empty.sfs")
defaults put ("filter", "nan,ghost,launchpad")
def apply(key: String) = getProperty(key)
def apply(key: Symbol) = getProperty(key name)
def port = this('port).toInt
def load = this('load).split(",")
def filter = this('filter).split(",")
}
var game = loadFile(config.load)
def log(message: String) { println(message) }
def loadFile(names: Seq[String]): Game = {
if (names.isEmpty) {
log("Error: couldn't find any games to load. Exiting.")
System.exit(1)
} else {
log("Looking for " + names.head + ":")
}
try {
val game = Game.fromFile(names.head)
log(" loaded " + names.head)
game
} catch {
case e: Exception => log(" failed (" + e.getMessage + ")"); loadFile(names.tail)
case other => log(" failed - unknown result - " + other); loadFile(names.tail)
}
}
override def exceptionHandler = {
case e: Exception => e.printStackTrace(); sender ! Error(e.getMessage)
}
def act() {
alive(config.port)
register('kesslerd, this)
log("Kessler daemon running on port " + config.port + " (protocol version " + VERSION + ")")
loop {
react {
case ConnectCommand(pass, version) => doAuth(pass) && checkVersion(version)
case put: PutCommand => if (doAuth(put.pass)) doPut(put.payload);
case get: GetCommand => if (doAuth(get.pass)) doGet();
case other => sender ! Error("Invalid command: " + other)
}
}
}
def checkVersion(version: Int) = {
println("Performing version check: " + version)
if (version != VERSION) {
println("Rejecting connection (client is using mismatched version " + version + ")")
sender ! Error("Version mismatch: server " + VERSION + ", client " + version)
false
} else {
sender ! Success("Server ready.")
true
}
}
def doAuth(pass: String) = {
if (config('pass) == null || config('pass) == pass) {
log("Command received from " + sender)
true
} else {
log("Rejecting command from " + sender + ": invalid password")
reply(Error("invalid password"))
false
}
}
def doPut(save: String) {
var count = game.asObject.children.values.foldLeft(0)((total, buf) => total + buf.length)
val newGame = if (config.filter != null) {
filterGame(Game.fromString(save), config.filter)
} else {
Game.fromString(save)
}
game = game.merge(newGame)
count = game.asObject.children.values.foldLeft(0)((total, buf) => total + buf.length) - count
log(count + " objects received from client.")
reply(Success(count + " objects successfully uploaded."))
safeSave(game)
}
def filterGame(game: Game, filters: Seq[String]) = {
import kessler.GameEditor
val editor = new GameEditor(game)
filters foreach { filter =>
try {
log("Applying filter '" + filter + "'")
editor.runCommand("clean " + filter)
} catch {
case e: Exception => log("Error applying filter '" + filter + "': " + e.getMessage)
}
}
editor.game
}
def safeSave(game: Game) {
game.save(config('save) + ".tmp")
new File(config('save)).delete()
new File(config('save) + ".tmp").renameTo(new File(config('save)))
}
def doGet() {
val (crew,vessels) = (game.asObject.getChildren("CREW").length,game.asObject.getChildren("VESSEL").length)
log("Sending merged save file containing "
+ crew
+ " crew and "
+ vessels
+ " vessels."
)
val msg = Success("Save file contains " + crew + " crew and " + vessels + " vessels.")
msg.payload = game.mkString
reply(msg)
}
}
object KesslerDaemon {
def main(args: Array[String]) {
val configfile = if (args.length > 0) args(0) else "kessler/server_config.txt"
new KesslerDaemon(configfile).start()
}
}
| ToxicFrog/kessler | kesslerd/src/KesslerDaemon.scala | Scala | mit | 4,809 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.scheduler
import java.util.Properties
import javax.annotation.Nullable
import scala.collection.Map
import com.fasterxml.jackson.annotation.JsonTypeInfo
import org.apache.spark.{SparkConf, TaskEndReason}
import org.apache.spark.annotation.DeveloperApi
import org.apache.spark.executor.TaskMetrics
import org.apache.spark.scheduler.cluster.ExecutorInfo
import org.apache.spark.storage.{BlockManagerId, BlockUpdatedInfo}
import org.apache.spark.ui.SparkUI
@DeveloperApi
@JsonTypeInfo(use = JsonTypeInfo.Id.CLASS, include = JsonTypeInfo.As.PROPERTY, property = "Event")
trait SparkListenerEvent {
/* Whether output this event to the event log */
protected[spark] def logEvent: Boolean = true
}
@DeveloperApi
case class SparkListenerStageSubmitted(stageInfo: StageInfo, properties: Properties = null)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerStageCompleted(stageInfo: StageInfo) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerTaskStart(stageId: Int, stageAttemptId: Int, taskInfo: TaskInfo)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerTaskGettingResult(taskInfo: TaskInfo) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerTaskEnd(
stageId: Int,
stageAttemptId: Int,
taskType: String,
reason: TaskEndReason,
taskInfo: TaskInfo,
// may be null if the task has failed
@Nullable taskMetrics: TaskMetrics)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerJobStart(
jobId: Int,
time: Long,
stageInfos: Seq[StageInfo],
properties: Properties = null)
extends SparkListenerEvent {
// Note: this is here for backwards-compatibility with older versions of this event which
// only stored stageIds and not StageInfos:
val stageIds: Seq[Int] = stageInfos.map(_.stageId)
}
@DeveloperApi
case class SparkListenerJobEnd(
jobId: Int,
time: Long,
jobResult: JobResult)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerEnvironmentUpdate(environmentDetails: Map[String, Seq[(String, String)]])
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerBlockManagerAdded(
time: Long,
blockManagerId: BlockManagerId,
maxMem: Long,
maxOnHeapMem: Option[Long] = None,
maxOffHeapMem: Option[Long] = None) extends SparkListenerEvent {
}
@DeveloperApi
case class SparkListenerBlockManagerRemoved(time: Long, blockManagerId: BlockManagerId)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerUnpersistRDD(rddId: Int) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorAdded(time: Long, executorId: String, executorInfo: ExecutorInfo)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorRemoved(time: Long, executorId: String, reason: String)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorBlacklisted(
time: Long,
executorId: String,
taskFailures: Int)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerExecutorUnblacklisted(time: Long, executorId: String)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerNodeBlacklisted(
time: Long,
hostId: String,
executorFailures: Int)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerNodeUnblacklisted(time: Long, hostId: String)
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerBlockUpdated(blockUpdatedInfo: BlockUpdatedInfo) extends SparkListenerEvent
/**
* Periodic updates from executors.
* @param execId executor id
* @param accumUpdates sequence of (taskId, stageId, stageAttemptId, accumUpdates)
*/
@DeveloperApi
case class SparkListenerExecutorMetricsUpdate(
execId: String,
accumUpdates: Seq[(Long, Int, Int, Seq[AccumulableInfo])])
extends SparkListenerEvent
@DeveloperApi
case class SparkListenerApplicationStart(
appName: String,
appId: Option[String],
time: Long,
sparkUser: String,
appAttemptId: Option[String],
driverLogs: Option[Map[String, String]] = None) extends SparkListenerEvent
@DeveloperApi
case class SparkListenerApplicationEnd(time: Long) extends SparkListenerEvent
/**
* An internal class that describes the metadata of an event log.
*/
@DeveloperApi
case class SparkListenerLogStart(sparkVersion: String) extends SparkListenerEvent
/**
* Interface for creating history listeners defined in other modules like SQL, which are used to
* rebuild the history UI.
*/
private[spark] trait SparkHistoryListenerFactory {
/**
* Create listeners used to rebuild the history UI.
*/
def createListeners(conf: SparkConf, sparkUI: SparkUI): Seq[SparkListener]
}
/**
* Interface for listening to events from the Spark scheduler. Most applications should probably
* extend SparkListener or SparkFirehoseListener directly, rather than implementing this class.
*
* Note that this is an internal interface which might change in different Spark releases.
*/
private[spark] trait SparkListenerInterface {
/**
* Called when a stage completes successfully or fails, with information on the completed stage.
*/
def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit
/**
* Called when a stage is submitted
*/
def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit
/**
* Called when a task starts
*/
def onTaskStart(taskStart: SparkListenerTaskStart): Unit
/**
* Called when a task begins remotely fetching its result (will not be called for tasks that do
* not need to fetch the result remotely).
*/
def onTaskGettingResult(taskGettingResult: SparkListenerTaskGettingResult): Unit
/**
* Called when a task ends
*/
def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit
/**
* Called when a job starts
*/
def onJobStart(jobStart: SparkListenerJobStart): Unit
/**
* Called when a job ends
*/
def onJobEnd(jobEnd: SparkListenerJobEnd): Unit
/**
* Called when environment properties have been updated
*/
def onEnvironmentUpdate(environmentUpdate: SparkListenerEnvironmentUpdate): Unit
/**
* Called when a new block manager has joined
*/
def onBlockManagerAdded(blockManagerAdded: SparkListenerBlockManagerAdded): Unit
/**
* Called when an existing block manager has been removed
*/
def onBlockManagerRemoved(blockManagerRemoved: SparkListenerBlockManagerRemoved): Unit
/**
* Called when an RDD is manually unpersisted by the application
*/
def onUnpersistRDD(unpersistRDD: SparkListenerUnpersistRDD): Unit
/**
* Called when the application starts
*/
def onApplicationStart(applicationStart: SparkListenerApplicationStart): Unit
/**
* Called when the application ends
*/
def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit
/**
* Called when the driver receives task metrics from an executor in a heartbeat.
*/
def onExecutorMetricsUpdate(executorMetricsUpdate: SparkListenerExecutorMetricsUpdate): Unit
/**
* Called when the driver registers a new executor.
*/
def onExecutorAdded(executorAdded: SparkListenerExecutorAdded): Unit
/**
* Called when the driver removes an executor.
*/
def onExecutorRemoved(executorRemoved: SparkListenerExecutorRemoved): Unit
/**
* Called when the driver blacklists an executor for a Spark application.
*/
def onExecutorBlacklisted(executorBlacklisted: SparkListenerExecutorBlacklisted): Unit
/**
* Called when the driver re-enables a previously blacklisted executor.
*/
def onExecutorUnblacklisted(executorUnblacklisted: SparkListenerExecutorUnblacklisted): Unit
/**
* Called when the driver blacklists a node for a Spark application.
*/
def onNodeBlacklisted(nodeBlacklisted: SparkListenerNodeBlacklisted): Unit
/**
* Called when the driver re-enables a previously blacklisted node.
*/
def onNodeUnblacklisted(nodeUnblacklisted: SparkListenerNodeUnblacklisted): Unit
/**
* Called when the driver receives a block update info.
*/
def onBlockUpdated(blockUpdated: SparkListenerBlockUpdated): Unit
/**
* Called when other events like SQL-specific events are posted.
*/
def onOtherEvent(event: SparkListenerEvent): Unit
}
/**
* :: DeveloperApi ::
* A default implementation for `SparkListenerInterface` that has no-op implementations for
* all callbacks.
*
* Note that this is an internal interface which might change in different Spark releases.
*/
@DeveloperApi
abstract class SparkListener extends SparkListenerInterface {
override def onStageCompleted(stageCompleted: SparkListenerStageCompleted): Unit = { }
override def onStageSubmitted(stageSubmitted: SparkListenerStageSubmitted): Unit = { }
override def onTaskStart(taskStart: SparkListenerTaskStart): Unit = { }
override def onTaskGettingResult(taskGettingResult: SparkListenerTaskGettingResult): Unit = { }
override def onTaskEnd(taskEnd: SparkListenerTaskEnd): Unit = { }
override def onJobStart(jobStart: SparkListenerJobStart): Unit = { }
override def onJobEnd(jobEnd: SparkListenerJobEnd): Unit = { }
override def onEnvironmentUpdate(environmentUpdate: SparkListenerEnvironmentUpdate): Unit = { }
override def onBlockManagerAdded(blockManagerAdded: SparkListenerBlockManagerAdded): Unit = { }
override def onBlockManagerRemoved(
blockManagerRemoved: SparkListenerBlockManagerRemoved): Unit = { }
override def onUnpersistRDD(unpersistRDD: SparkListenerUnpersistRDD): Unit = { }
override def onApplicationStart(applicationStart: SparkListenerApplicationStart): Unit = { }
override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = { }
override def onExecutorMetricsUpdate(
executorMetricsUpdate: SparkListenerExecutorMetricsUpdate): Unit = { }
override def onExecutorAdded(executorAdded: SparkListenerExecutorAdded): Unit = { }
override def onExecutorRemoved(executorRemoved: SparkListenerExecutorRemoved): Unit = { }
override def onExecutorBlacklisted(
executorBlacklisted: SparkListenerExecutorBlacklisted): Unit = { }
override def onExecutorUnblacklisted(
executorUnblacklisted: SparkListenerExecutorUnblacklisted): Unit = { }
override def onNodeBlacklisted(
nodeBlacklisted: SparkListenerNodeBlacklisted): Unit = { }
override def onNodeUnblacklisted(
nodeUnblacklisted: SparkListenerNodeUnblacklisted): Unit = { }
override def onBlockUpdated(blockUpdated: SparkListenerBlockUpdated): Unit = { }
override def onOtherEvent(event: SparkListenerEvent): Unit = { }
}
| aokolnychyi/spark | core/src/main/scala/org/apache/spark/scheduler/SparkListener.scala | Scala | apache-2.0 | 11,406 |
/* - Coeus web framework -------------------------
*
* Licensed under the Apache License, Version 2.0.
*
* Author: Spiros Tzavellas
*/
package com.tzavellas.coeus.i18n.locale
import java.util.Locale
import javax.servlet.http.{ HttpServletRequest, HttpServletResponse }
/**
* Resolves the user's locale based on an attribute in <code>HttpSession</code>.
*
* @param default the <code>Locale</code> to use if one is not found in the
* <code>HttpSession</code>.
*/
class SessionLocaleResolver(default: Option[Locale] = None) extends LocaleResolver {
private[this] val ATTRIBUTE_NAME = classOf[SessionLocaleResolver].getName + ".Locale"
/**
* Get the user's locale from <code>HttpSession</code>.
*
* <p>If the attribute with the locale is not found in the <code>HttpSession</code> the
* default locale is returned. If the default locale is not specified then the locale from
* the <code>HttpServletRequest</code> is returned.</p>
*
* @param request the request to use for resolving the Locale
*/
def resolve(request: HttpServletRequest) = {
def defaultLocale = default match {
case None => request.getLocale
case Some(locale) => locale
}
val session = request.getSession(false)
if (session eq null) {
defaultLocale
} else {
val locale = session.getAttribute(ATTRIBUTE_NAME)
if (locale eq null) defaultLocale
else locale.asInstanceOf[Locale]
}
}
/**
* Set the specified locale in the <code>HttpSession</code>.
*
* <p>If the specified <code>Locale</code> is <code>null</code> then the locale's attribute
* will get removed from <code>HttpSession</code> and this resolver will return the default
* locale in subsequest calls of the {@link #resolve(request)} method.</p>
*
* @param request the current Servlet request
* @param response the current Servlet response
* @param locale the user's locale
*/
def setLocale(request: HttpServletRequest, response: HttpServletResponse, locale: Locale) {
if (locale eq null) {
request.getSession.removeAttribute(ATTRIBUTE_NAME)
} else {
request.getSession.setAttribute(ATTRIBUTE_NAME, locale)
}
}
} | sptz45/coeus | src/main/scala/com/tzavellas/coeus/i18n/locale/SessionLocaleResolver.scala | Scala | apache-2.0 | 2,226 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.partition
import scala.collection.JavaConverters._
import scala.collection.mutable.ListBuffer
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.datastore.filesystem.{CarbonFile, CarbonFileFilter}
import org.apache.carbondata.core.datastore.impl.FileFactory
import org.apache.carbondata.core.metadata.CarbonMetadata
import org.apache.carbondata.core.metadata.schema.table.CarbonTable
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.core.util.path.CarbonTablePath
class TestAlterPartitionTable extends QueryTest with BeforeAndAfterAll {
override def beforeAll {
dropTable
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
/**
* list_table_area_origin
* list_table_area
*/
sql("""
| CREATE TABLE IF NOT EXISTS list_table_area_origin
| (
| id Int,
| vin string,
| logdate Timestamp,
| phonenumber Long,
| country string,
| salary Int
| )
| PARTITIONED BY (area string)
| STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST',
| 'LIST_INFO'='Asia, America, Europe')
""".stripMargin)
sql("""
| CREATE TABLE IF NOT EXISTS list_table_area
| (
| id Int,
| vin string,
| logdate Timestamp,
| phonenumber Long,
| country string,
| salary Int
| )
| PARTITIONED BY (area string)
| STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST',
| 'LIST_INFO'='Asia, America, Europe')
""".stripMargin)
/**
* range_table_logdate_origin
* range_table_logdate
*/
sql(
"""
| CREATE TABLE IF NOT EXISTS range_table_logdate_origin
| (
| id Int,
| vin string,
| phonenumber Long,
| country string,
| area string,
| salary Int
| )
| PARTITIONED BY (logdate Timestamp)
| STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='2014/01/01, 2015/01/01, 2016/01/01')
""".stripMargin)
sql(
"""
| CREATE TABLE IF NOT EXISTS range_table_logdate
| (
| id Int,
| vin string,
| phonenumber Long,
| country string,
| area string,
| salary Int
| )
| PARTITIONED BY (logdate Timestamp)
| STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='2014/01/01, 2015/01/01, 2016/01/01', 'DICTIONARY_INCLUDE'='logdate')
""".stripMargin)
/**
* list_table_country_origin
* list_table_country
*/
sql(
"""
| CREATE TABLE IF NOT EXISTS list_table_country_origin
| (
| id Int,
| vin string,
| logdate Timestamp,
| phonenumber Long,
| area string,
| salary Int
| )
| PARTITIONED BY (country string)
| STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST',
| 'LIST_INFO'='(China, US),UK ,Japan,(Canada,Russia, Good, NotGood), Korea ')
""".stripMargin)
sql(
"""
| CREATE TABLE IF NOT EXISTS list_table_country
| (
| id Int,
| vin string,
| logdate Timestamp,
| phonenumber Long,
| area string,
| salary Int
| )
| PARTITIONED BY (country string)
| STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='LIST',
| 'LIST_INFO'='(China, US),UK ,Japan,(Canada,Russia, Good, NotGood), Korea ')
""".stripMargin)
/**
* range_table_logdate_split_origin
* range_table_logdate_split
*/
sql(
"""
| CREATE TABLE IF NOT EXISTS range_table_logdate_split_origin
| (
| id Int,
| vin string,
| phonenumber Long,
| country string,
| area string,
| salary Int
| )
| PARTITIONED BY (logdate Timestamp)
| STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='2014/01/01, 2015/01/01, 2016/01/01, 2018/01/01')
""".stripMargin)
sql(
"""
| CREATE TABLE IF NOT EXISTS range_table_logdate_split
| (
| id Int,
| vin string,
| phonenumber Long,
| country string,
| area string,
| salary Int
| )
| PARTITIONED BY (logdate Timestamp)
| STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='2014/01/01, 2015/01/01, 2016/01/01, 2018/01/01',
| 'DICTIONARY_INCLUDE'='logdate')
""".stripMargin)
/**
* range_table_bucket_origin
* range_table_bucket
*/
sql(
"""
| CREATE TABLE IF NOT EXISTS range_table_bucket_origin
| (
| id Int,
| vin string,
| phonenumber Long,
| country string,
| area string,
| salary Int
| )
| PARTITIONED BY (logdate Timestamp)
| STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='2014/01/01, 2015/01/01, 2016/01/01, 2018/01/01',
| 'BUCKETNUMBER'='3',
| 'BUCKETCOLUMNS'='country')
""".stripMargin)
sql(
"""
| CREATE TABLE IF NOT EXISTS range_table_bucket
| (
| id Int,
| vin string,
| phonenumber Long,
| country string,
| area string,
| salary Int
| )
| PARTITIONED BY (logdate Timestamp)
| STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE',
| 'RANGE_INFO'='2014/01/01, 2015/01/01, 2016/01/01, 2018/01/01',
| 'DICTIONARY_INCLUDE'='logdate',
| 'BUCKETNUMBER'='3',
| 'BUCKETCOLUMNS'='country')
""".stripMargin)
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/partition_data.csv' INTO TABLE list_table_area_origin OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/partition_data.csv' INTO TABLE range_table_logdate_origin OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/partition_data.csv' INTO TABLE list_table_country_origin OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/partition_data.csv' INTO TABLE range_table_logdate_split_origin OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/partition_data.csv' INTO TABLE range_table_bucket_origin OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/partition_data.csv' INTO TABLE list_table_area OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/partition_data.csv' INTO TABLE range_table_logdate OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/partition_data.csv' INTO TABLE list_table_country OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/partition_data.csv' INTO TABLE range_table_logdate_split OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
sql(s"""LOAD DATA LOCAL INPATH '$resourcesPath/partition_data.csv' INTO TABLE range_table_bucket OPTIONS('DELIMITER'= ',', 'QUOTECHAR'= '"')""")
}
test("Alter table add partition: List Partition") {
sql("""ALTER TABLE list_table_area ADD PARTITION ('OutSpace', 'Hi')""".stripMargin)
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_list_table_area")
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds = partitionInfo.getPartitionIds
val list_info = partitionInfo.getListInfo
assert(partitionIds == List(0, 1, 2, 3, 4, 5).map(Integer.valueOf(_)).asJava)
assert(partitionInfo.getMaxPartitionId == 5)
assert(partitionInfo.getNumPartitions == 6)
assert(list_info.get(0).get(0) == "Asia")
assert(list_info.get(1).get(0) == "America")
assert(list_info.get(2).get(0) == "Europe")
assert(list_info.get(3).get(0) == "OutSpace")
assert(list_info.get(4).get(0) == "Hi")
validateDataFiles("default_list_table_area", "0", Seq(1, 2, 4))
val result_after = sql("select id, vin, logdate, phonenumber, country, area, salary from list_table_area")
val result_origin = sql("select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin")
checkAnswer(result_after, result_origin)
val result_after1 = sql(s"select id, vin, logdate, phonenumber, country, area, salary from list_table_area where area < 'OutSpace' ")
val result_origin1 = sql(s"select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin where area < 'OutSpace' ")
checkAnswer(result_after1, result_origin1)
val result_after2 = sql("select id, vin, logdate, phonenumber, country, area, salary from list_table_area where area <= 'OutSpace' ")
val result_origin2 = sql("select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin where area <= 'OutSpace' ")
checkAnswer(result_after2, result_origin2)
val result_after3 = sql("select id, vin, logdate, phonenumber, country, area, salary from list_table_area where area = 'OutSpace' ")
val result_origin3 = sql("select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin where area = 'OutSpace' ")
checkAnswer(result_after3, result_origin3)
val result_after4 = sql("select id, vin, logdate, phonenumber, country, area, salary from list_table_area where area > 'OutSpace' ")
val result_origin4 = sql("select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin where area > 'OutSpace' ")
checkAnswer(result_after4, result_origin4)
val result_after5 = sql("select id, vin, logdate, phonenumber, country, area, salary from list_table_area where area >= 'OutSpace' ")
val result_origin5 = sql("select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin where area >= 'OutSpace' ")
checkAnswer(result_after5, result_origin5)
intercept[Exception] { sql("""ALTER TABLE DROP PARTITION(0)""")}
intercept[Exception] { sql("""ALTER TABLE DROP PARTITION(0) WITH DATA""")}
sql("""ALTER TABLE list_table_area DROP PARTITION(2) WITH DATA""")
val carbonTable2 = CarbonMetadata.getInstance().getCarbonTable("default_list_table_area")
val partitionInfo2 = carbonTable2.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds2 = partitionInfo2.getPartitionIds
val list_info2 = partitionInfo2.getListInfo
assert(partitionIds2 == List(0, 1, 3, 4, 5).map(Integer.valueOf(_)).asJava)
assert(partitionInfo2.getMaxPartitionId == 5)
assert(partitionInfo2.getNumPartitions == 5)
assert(list_info2.get(0).get(0) == "Asia")
assert(list_info2.get(1).get(0) == "Europe")
assert(list_info2.get(2).get(0) == "OutSpace")
assert(list_info2.get(3).get(0) == "Hi")
validateDataFiles("default_list_table_area", "0", Seq(1, 4))
checkAnswer(sql("select id, vin, logdate, phonenumber, country, area, salary from list_table_area"),
sql("select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin where area <> 'America' "))
}
test("Alter table add partition: Range Partition") {
sql("""ALTER TABLE range_table_logdate ADD PARTITION ('2017/01/01', '2018/01/01')""")
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_range_table_logdate")
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds = partitionInfo.getPartitionIds
val range_info = partitionInfo.getRangeInfo
assert(partitionIds == List(0, 1, 2, 3, 4, 5).map(Integer.valueOf(_)).asJava)
assert(partitionInfo.getMaxPartitionId == 5)
assert(partitionInfo.getNumPartitions == 6)
assert(range_info.get(0) == "2014/01/01")
assert(range_info.get(1) == "2015/01/01")
assert(range_info.get(2) == "2016/01/01")
assert(range_info.get(3) == "2017/01/01")
assert(range_info.get(4) == "2018/01/01")
validateDataFiles("default_range_table_logdate", "0", Seq(1, 2, 3, 4, 5))
val result_after = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate""")
val result_origin = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_origin""")
checkAnswer(result_after, result_origin)
val result_after1 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate where logdate < cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin1 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_origin where logdate < cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after1, result_origin1)
val result_after2 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate where logdate <= cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin2 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_origin where logdate <= cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after2, result_origin2)
val result_after3 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate where logdate = cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin3 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_origin where logdate = cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after3, result_origin3)
val result_after4 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate where logdate >= cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin4 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_origin where logdate >= cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after4, result_origin4)
val result_after5 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate where logdate > cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin5 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_origin where logdate > cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after5, result_origin5)
sql("""ALTER TABLE range_table_logdate DROP PARTITION(3) WITH DATA;""")
val carbonTable1 = CarbonMetadata.getInstance().getCarbonTable("default_range_table_logdate")
val partitionInfo1 = carbonTable1.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds1 = partitionInfo1.getPartitionIds
val range_info1 = partitionInfo1.getRangeInfo
assert(partitionIds1 == List(0, 1, 2, 4, 5).map(Integer.valueOf(_)).asJava)
assert(partitionInfo1.getMaxPartitionId == 5)
assert(partitionInfo1.getNumPartitions == 5)
assert(range_info1.get(0) == "2014/01/01")
assert(range_info1.get(1) == "2015/01/01")
assert(range_info1.get(2) == "2017/01/01")
assert(range_info1.get(3) == "2018/01/01")
assert(range_info1.size() == 4)
validateDataFiles("default_range_table_logdate", "0", Seq(1, 2, 4, 5))
val result_after6 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate""")
val result_origin6 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_origin where logdate < '2015/01/01 00:00:00' or logdate >= '2016/01/01 00:00:00' """)
checkAnswer(result_after6, result_origin6)
}
test("test exception if invalid partition id is provided in alter command") {
sql("drop table if exists test_invalid_partition_id")
sql("CREATE TABLE test_invalid_partition_id (CUST_NAME String,ACTIVE_EMUI_VERSION string,DOB Timestamp,DOJ timestamp, " +
"BIGINT_COLUMN1 bigint,BIGINT_COLUMN2 bigint,DECIMAL_COLUMN1 decimal(30,10), DECIMAL_COLUMN2 decimal(36,10)," +
"Double_COLUMN1 double, Double_COLUMN2 double,INTEGER_COLUMN1 int) PARTITIONED BY (CUST_ID int)" +
" STORED BY 'org.apache.carbondata.format' " +
"TBLPROPERTIES ('PARTITION_TYPE'='RANGE','RANGE_INFO'='9090,9500,9800',\"TABLE_BLOCKSIZE\"= \"256 MB\")")
intercept[IllegalArgumentException] { sql("ALTER TABLE test_invalid_partition_id SPLIT PARTITION(6) INTO ('9800','9900')") }
}
test("Alter table split partition: List Partition") {
sql("""ALTER TABLE list_table_country SPLIT PARTITION(4) INTO ('Canada', 'Russia', '(Good, NotGood)')""".stripMargin)
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_list_table_country")
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds = partitionInfo.getPartitionIds
val list_info = partitionInfo.getListInfo
assert(partitionIds == List(0, 1, 2, 3, 6, 7, 8, 5).map(Integer.valueOf(_)).asJava)
assert(partitionInfo.getMaxPartitionId == 8)
assert(partitionInfo.getNumPartitions == 8)
assert(list_info.get(0).get(0) == "China")
assert(list_info.get(0).get(1) == "US")
assert(list_info.get(1).get(0) == "UK")
assert(list_info.get(2).get(0) == "Japan")
assert(list_info.get(3).get(0) == "Canada")
assert(list_info.get(4).get(0) == "Russia")
assert(list_info.get(5).get(0) == "Good")
assert(list_info.get(5).get(1) == "NotGood")
assert(list_info.get(6).get(0) == "Korea")
validateDataFiles("default_list_table_country", "0", Seq(1, 2, 3, 8))
val result_after = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country""")
val result_origin = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin""")
checkAnswer(result_after, result_origin)
val result_after1 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country where country < 'NotGood' """)
val result_origin1 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin where country < 'NotGood' """)
checkAnswer(result_after1, result_origin1)
val result_after2 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country where country <= 'NotGood' """)
val result_origin2 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin where country <= 'NotGood' """)
checkAnswer(result_after2, result_origin2)
val result_after3 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country where country = 'NotGood' """)
val result_origin3 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin where country = 'NotGood' """)
checkAnswer(result_after3, result_origin3)
val result_after4 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country where country >= 'NotGood' """)
val result_origin4 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin where country >= 'NotGood' """)
checkAnswer(result_after4, result_origin4)
val result_after5 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country where country > 'NotGood' """)
val result_origin5 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin where country > 'NotGood' """)
checkAnswer(result_after5, result_origin5)
sql("""ALTER TABLE list_table_country DROP PARTITION(8)""")
val carbonTable1 = CarbonMetadata.getInstance().getCarbonTable("default_list_table_country")
val partitionInfo1 = carbonTable1.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds1 = partitionInfo1.getPartitionIds
val list_info1 = partitionInfo1.getListInfo
assert(partitionIds1 == List(0, 1, 2, 3, 6, 7, 5).map(Integer.valueOf(_)).asJava)
assert(partitionInfo1.getMaxPartitionId == 8)
assert(partitionInfo1.getNumPartitions == 7)
assert(list_info1.get(0).get(0) == "China")
assert(list_info1.get(0).get(1) == "US")
assert(list_info1.get(1).get(0) == "UK")
assert(list_info1.get(2).get(0) == "Japan")
assert(list_info1.get(3).get(0) == "Canada")
assert(list_info1.get(4).get(0) == "Russia")
assert(list_info1.get(5).get(0) == "Korea")
validateDataFiles("default_list_table_country", "0", Seq(0, 1, 2, 3))
val result_origin6 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country""")
val result_after6 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin""")
checkAnswer(result_origin6, result_after6)
}
test("Alter table split partition with different List Sequence: List Partition") {
sql("""ALTER TABLE list_table_country ADD PARTITION ('(Part1, Part2, Part3, Part4)')""".stripMargin)
sql("""ALTER TABLE list_table_country SPLIT PARTITION(9) INTO ('Part4', 'Part2', '(Part1, Part3)')""".stripMargin)
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_list_table_country")
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds = partitionInfo.getPartitionIds
val list_info = partitionInfo.getListInfo
assert(partitionIds == List(0, 1, 2, 3, 6, 7, 5, 10, 11, 12).map(Integer.valueOf(_)).asJava)
assert(partitionInfo.getMaxPartitionId == 12)
assert(partitionInfo.getNumPartitions == 10)
assert(list_info.get(0).get(0) == "China")
assert(list_info.get(0).get(1) == "US")
assert(list_info.get(1).get(0) == "UK")
assert(list_info.get(2).get(0) == "Japan")
assert(list_info.get(3).get(0) == "Canada")
assert(list_info.get(4).get(0) == "Russia")
assert(list_info.get(5).get(0) == "Korea")
assert(list_info.get(6).get(0) == "Part4")
assert(list_info.get(7).get(0) == "Part2")
assert(list_info.get(8).get(0) == "Part1")
assert(list_info.get(8).get(1) == "Part3")
validateDataFiles("default_list_table_country", "0", Seq(0, 1, 2, 3))
val result_after = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country""")
val result_origin = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin""")
checkAnswer(result_after, result_origin)
val result_after1 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country where country < 'NotGood' """)
val result_origin1 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin where country < 'NotGood' """)
checkAnswer(result_after1, result_origin1)
val result_after2 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country where country <= 'NotGood' """)
val result_origin2 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin where country <= 'NotGood' """)
checkAnswer(result_after2, result_origin2)
val result_after3 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country where country = 'NotGood' """)
val result_origin3 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin where country = 'NotGood' """)
checkAnswer(result_after3, result_origin3)
val result_after4 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country where country >= 'NotGood' """)
val result_origin4 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin where country >= 'NotGood' """)
checkAnswer(result_after4, result_origin4)
val result_after5 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country where country > 'NotGood' """)
val result_origin5 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_country_origin where country > 'NotGood' """)
checkAnswer(result_after5, result_origin5)
}
test("Alter table split partition with extra space in New SubList: List Partition") {
sql("""ALTER TABLE list_table_area ADD PARTITION ('(One,Two, Three, Four)')""".stripMargin)
sql("""ALTER TABLE list_table_area SPLIT PARTITION(6) INTO ('One', '(Two, Three )', 'Four')""".stripMargin)
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_list_table_area")
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds = partitionInfo.getPartitionIds
val list_info = partitionInfo.getListInfo
assert(partitionIds == List(0, 1, 3, 4, 5, 7, 8, 9).map(Integer.valueOf(_)).asJava)
assert(partitionInfo.getMaxPartitionId == 9)
assert(partitionInfo.getNumPartitions == 8)
assert(list_info.get(0).get(0) == "Asia")
assert(list_info.get(1).get(0) == "Europe")
assert(list_info.get(2).get(0) == "OutSpace")
assert(list_info.get(3).get(0) == "Hi")
assert(list_info.get(4).get(0) == "One")
assert(list_info.get(5).get(0) == "Two")
assert(list_info.get(5).get(1) == "Three")
assert(list_info.get(6).get(0) == "Four")
validateDataFiles("default_list_table_area", "0", Seq(1, 4))
val result_after = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_area""")
val result_origin = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin where area <> 'America' """)
checkAnswer(result_after, result_origin)
val result_after1 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_area where area < 'Four' """)
val result_origin1 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin where area < 'Four' and area <> 'America' """)
checkAnswer(result_after1, result_origin1)
val result_after2 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_area where area <= 'Four' """)
val result_origin2 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin where area <= 'Four' and area <> 'America' """)
checkAnswer(result_after2, result_origin2)
val result_after3 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_area where area = 'Four' """)
val result_origin3 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin where area = 'Four' and area <> 'America' """)
checkAnswer(result_after3, result_origin3)
val result_after4 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_area where area >= 'Four' """)
val result_origin4 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin where area >= 'Four' and area <> 'America' """)
checkAnswer(result_after4, result_origin4)
val result_after5 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_area where area > 'Four' """)
val result_origin5 = sql("""select id, vin, logdate, phonenumber, country, area, salary from list_table_area_origin where area > 'Four' and area <> 'America' """)
checkAnswer(result_after5, result_origin5)
}
test("Alter table split partition: Range Partition") {
sql("""ALTER TABLE range_table_logdate_split SPLIT PARTITION(4) INTO ('2017/01/01', '2018/01/01')""")
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_range_table_logdate_split")
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds = partitionInfo.getPartitionIds
val rangeInfo = partitionInfo.getRangeInfo
assert(partitionIds == List(0, 1, 2, 3, 5, 6).map(Integer.valueOf(_)).asJava)
assert(partitionInfo.getMaxPartitionId == 6)
assert(partitionInfo.getNumPartitions == 6)
assert(rangeInfo.get(0) == "2014/01/01")
assert(rangeInfo.get(1) == "2015/01/01")
assert(rangeInfo.get(2) == "2016/01/01")
assert(rangeInfo.get(3) == "2017/01/01")
assert(rangeInfo.get(4) == "2018/01/01")
validateDataFiles("default_range_table_logdate_split", "0", Seq(1, 2, 3, 5, 6))
val result_after = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split""")
val result_origin = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split_origin""")
checkAnswer(result_after, result_origin)
val result_after1 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split where logdate < cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin1 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split_origin where logdate < cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after1, result_origin1)
val result_after2 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split where logdate <= cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin2 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split_origin where logdate <= cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after2, result_origin2)
val result_after3 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split where logdate = cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin3 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split_origin where logdate = cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after3, result_origin3)
val result_after4 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split where logdate >= cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin4 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split_origin where logdate >= cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after4, result_origin4)
val result_after5 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split where logdate > cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin5 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split_origin where logdate > cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after5, result_origin5)
sql("""ALTER TABLE range_table_logdate_split DROP PARTITION(6)""")
val carbonTable1 = CarbonMetadata.getInstance().getCarbonTable("default_range_table_logdate_split")
val partitionInfo1 = carbonTable1.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds1 = partitionInfo1.getPartitionIds
val rangeInfo1 = partitionInfo1.getRangeInfo
assert(partitionIds1 == List(0, 1, 2, 3, 5).map(Integer.valueOf(_)).asJava)
assert(partitionInfo1.getMaxPartitionId == 6)
assert(partitionInfo1.getNumPartitions == 5)
assert(rangeInfo1.get(0) == "2014/01/01")
assert(rangeInfo1.get(1) == "2015/01/01")
assert(rangeInfo1.get(2) == "2016/01/01")
assert(rangeInfo1.get(3) == "2017/01/01")
assert(rangeInfo1.size() == 4)
validateDataFiles("default_range_table_logdate_split", "0", Seq(0, 1, 2, 3, 5))
val result_after6 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split""")
val result_origin6 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_logdate_split_origin""")
checkAnswer(result_after6, result_origin6)
}
test("Alter table split partition: Range Partition + Bucket") {
sql("""ALTER TABLE range_table_bucket SPLIT PARTITION(4) INTO ('2017/01/01', '2018/01/01')""")
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_range_table_bucket")
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds = partitionInfo.getPartitionIds
val rangeInfo = partitionInfo.getRangeInfo
assert(partitionIds == List(0, 1, 2, 3, 5, 6).map(Integer.valueOf(_)).asJava)
assert(partitionInfo.getMaxPartitionId == 6)
assert(partitionInfo.getNumPartitions == 6)
assert(rangeInfo.get(0) == "2014/01/01")
assert(rangeInfo.get(1) == "2015/01/01")
assert(rangeInfo.get(2) == "2016/01/01")
assert(rangeInfo.get(3) == "2017/01/01")
assert(rangeInfo.get(4) == "2018/01/01")
validateDataFiles("default_range_table_bucket", "0", Seq(1, 2, 3, 5, 6))
val result_after = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket""")
val result_origin = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket_origin""")
checkAnswer(result_after, result_origin)
val result_after1 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket where logdate < cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin1 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket_origin where logdate < cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after1, result_origin1)
val result_after2 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket where logdate <= cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin2 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket_origin where logdate <= cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after2, result_origin2)
val result_origin3 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket where logdate = cast('2017/01/12 00:00:00' as timestamp) """)
val result_after3 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket_origin where logdate = cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_origin3, result_after3)
val result_after4 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket where logdate >= cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin4 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket_origin where logdate >= cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after4, result_origin4)
val result_after5 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket where logdate > cast('2017/01/12 00:00:00' as timestamp) """)
val result_origin5 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket_origin where logdate > cast('2017/01/12 00:00:00' as timestamp) """)
checkAnswer(result_after5, result_origin5)
sql("""ALTER TABLE range_table_bucket DROP PARTITION(6) WITH DATA""")
val carbonTable1 = CarbonMetadata.getInstance().getCarbonTable("default_range_table_bucket")
val partitionInfo1 = carbonTable1.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds1 = partitionInfo1.getPartitionIds
val rangeInfo1 = partitionInfo1.getRangeInfo
assert(partitionIds1 == List(0, 1, 2, 3, 5).map(Integer.valueOf(_)).asJava)
assert(partitionInfo1.getMaxPartitionId == 6)
assert(partitionInfo1.getNumPartitions == 5)
assert(rangeInfo1.get(0) == "2014/01/01")
assert(rangeInfo1.get(1) == "2015/01/01")
assert(rangeInfo1.get(2) == "2016/01/01")
assert(rangeInfo1.get(3) == "2017/01/01")
assert(rangeInfo1.size() == 4)
validateDataFiles("default_range_table_bucket", "0", Seq(1, 2, 3, 5))
val result_after6 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket""")
val result_origin6= sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket_origin where logdate < '2017/01/01 00:00:00' or logdate >= '2018/01/01 00:00:00'""")
checkAnswer(result_after6, result_origin6)
sql("""ALTER TABLE range_table_bucket DROP PARTITION(3)""")
val carbonTable2 = CarbonMetadata.getInstance().getCarbonTable("default_range_table_bucket")
val partitionInfo2 = carbonTable2.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds2 = partitionInfo2.getPartitionIds
val rangeInfo2 = partitionInfo2.getRangeInfo
assert(partitionIds2 == List(0, 1, 2, 5).map(Integer.valueOf(_)).asJava)
assert(partitionInfo2.getMaxPartitionId == 6)
assert(partitionInfo2.getNumPartitions == 4)
assert(rangeInfo2.get(0) == "2014/01/01")
assert(rangeInfo2.get(1) == "2015/01/01")
assert(rangeInfo2.get(2) == "2017/01/01")
assert(rangeInfo2.size() == 3)
validateDataFiles("default_range_table_bucket", "0", Seq(1, 2, 5))
val result_origin7 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket""")
val result_after7 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket_origin where logdate < '2017/01/01 00:00:00' or logdate >= '2018/01/01 00:00:00'""")
checkAnswer(result_origin7, result_after7)
sql("""ALTER TABLE range_table_bucket DROP PARTITION(5)""")
val carbonTable3 = CarbonMetadata.getInstance().getCarbonTable("default_range_table_bucket")
val partitionInfo3 = carbonTable3.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds3 = partitionInfo3.getPartitionIds
val rangeInfo3 = partitionInfo3.getRangeInfo
assert(partitionIds3 == List(0, 1, 2).map(Integer.valueOf(_)).asJava)
assert(partitionInfo3.getMaxPartitionId == 6)
assert(partitionInfo3.getNumPartitions == 3)
assert(rangeInfo3.get(0) == "2014/01/01")
assert(rangeInfo3.get(1) == "2015/01/01")
assert(rangeInfo3.size() == 2)
validateDataFiles("default_range_table_bucket", "0", Seq(0, 1, 2))
val result_after8 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket""")
val result_origin8 = sql("""select id, vin, logdate, phonenumber, country, area, salary from range_table_bucket_origin where logdate < '2017/01/01 00:00:00' or logdate >= '2018/01/01 00:00:00'""")
checkAnswer(result_after8, result_origin8)
}
test("test exception when alter partition and the values"
+ "in range_info can not match partition column type") {
val exception_test_range_int: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_int(col1 INT, col2 STRING)
| PARTITIONED BY (col3 INT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='11,12')
""".stripMargin)
sql("ALTER TABLE test_range_int ADD PARTITION ('abc')")
}
assert(exception_test_range_int.getMessage
.contains("Data in range info must be the same type with the partition field's type"))
sql("DROP TABLE IF EXISTS test_range_smallint")
val exception_test_range_smallint: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_smallint(col1 INT, col2 STRING)
| PARTITIONED BY (col3 SMALLINT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='11,12')
""".stripMargin)
sql("ALTER TABLE test_range_smallint ADD PARTITION ('abc')")
}
assert(exception_test_range_smallint.getMessage
.contains("Data in range info must be the same type with the partition field's type"))
sql("DROP TABLE IF EXISTS test_range_float")
val exception_test_range_float: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_float(col1 INT, col2 STRING)
| PARTITIONED BY (col3 FLOAT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='1.1,2.1')
""".stripMargin)
sql("ALTER TABLE test_range_float ADD PARTITION ('abc')")
}
assert(exception_test_range_float.getMessage
.contains("Data in range info must be the same type with the partition field's type"))
sql("DROP TABLE IF EXISTS test_range_double")
val exception_test_range_double: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_double(col1 INT, col2 STRING)
| PARTITIONED BY (col3 DOUBLE) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='1000.005,2000.005')
""".stripMargin)
sql("ALTER TABLE test_range_double ADD PARTITION ('abc')")
}
assert(exception_test_range_double.getMessage
.contains("Data in range info must be the same type with the partition field's type"))
sql("DROP TABLE IF EXISTS test_range_bigint")
val exception_test_range_bigint: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_bigint(col1 INT, col2 STRING)
| PARTITIONED BY (col3 BIGINT) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='123456789,223456789')
""".stripMargin)
sql("ALTER TABLE test_range_bigint ADD PARTITION ('abc')")
}
assert(exception_test_range_bigint.getMessage
.contains("Data in range info must be the same type with the partition field's type"))
sql("DROP TABLE IF EXISTS test_range_date")
val exception_test_range_date: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_date(col1 INT, col2 STRING)
| PARTITIONED BY (col3 DATE) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='2017-06-11, 2017-06-13')
""".stripMargin)
sql("ALTER TABLE test_range_date ADD PARTITION ('abc')")
}
assert(exception_test_range_date.getMessage
.contains("Data in range info must be the same type with the partition field's type"))
sql("DROP TABLE IF EXISTS test_range_timestamp")
val exception_test_range_timestamp: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_timestamp(col1 INT, col2 STRING)
| PARTITIONED BY (col3 TIMESTAMP) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='2017/06/11, 2017/06/13')
""".stripMargin)
sql("ALTER TABLE test_range_timestamp ADD PARTITION ('abc')")
}
assert(exception_test_range_timestamp.getMessage
.contains("Data in range info must be the same type with the partition field's type"))
sql("DROP TABLE IF EXISTS test_range_decimal")
val exception_test_range_decimal: Exception = intercept[Exception] {
sql(
"""
| CREATE TABLE test_range_decimal(col1 INT, col2 STRING)
| PARTITIONED BY (col3 DECIMAL(25, 4)) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='22.22,33.33')
""".stripMargin)
sql("ALTER TABLE test_range_decimal ADD PARTITION ('abc')")
}
assert(exception_test_range_decimal.getMessage
.contains("Data in range info must be the same type with the partition field's type"))
}
test("Add partition to table in or not in default database") {
sql("DROP TABLE IF EXISTS carbon_table_default_db")
sql(
"""
| CREATE TABLE carbon_table_default_db(id INT, name STRING) PARTITIONED BY (dt STRING)
| STORED BY 'carbondata' TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='2015,2016')
""".stripMargin)
sql("ALTER TABLE carbon_table_default_db ADD PARTITION ('2017')")
val carbonTable = CarbonMetadata.getInstance().getCarbonTable("default_carbon_table_default_db")
val partitionInfo = carbonTable.getPartitionInfo(carbonTable.getFactTableName)
val partitionIds = partitionInfo.getPartitionIds
val range_info = partitionInfo.getRangeInfo
assert(partitionIds == List(0, 1, 2, 3).map(Integer.valueOf(_)).asJava)
assert(partitionInfo.getMaxPartitionId == 3)
assert(partitionInfo.getNumPartitions == 4)
assert(range_info.get(0) == "2015")
assert(range_info.get(1) == "2016")
assert(range_info.get(2) == "2017")
sql("CREATE DATABASE IF NOT EXISTS carbondb")
sql("DROP TABLE IF EXISTS carbondb.carbontable")
sql(
"""
| CREATE TABLE carbondb.carbontable(id INT, name STRING) PARTITIONED BY (dt STRING)
| STORED BY 'carbondata' TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='2015,2016')
""".stripMargin)
sql("ALTER TABLE carbondb.carbontable ADD PARTITION ('2017')")
val carbonTable1 = CarbonMetadata.getInstance().getCarbonTable("carbondb_carbontable")
val partitionInfo1 = carbonTable1.getPartitionInfo(carbonTable1.getFactTableName)
val partitionIds1 = partitionInfo1.getPartitionIds
val range_info1 = partitionInfo1.getRangeInfo
assert(partitionIds1 == List(0, 1, 2, 3).map(Integer.valueOf(_)).asJava)
assert(partitionInfo1.getMaxPartitionId == 3)
assert(partitionInfo1.getNumPartitions == 4)
assert(range_info1.get(0) == "2015")
assert(range_info1.get(1) == "2016")
assert(range_info1.get(2) == "2017")
}
test("test exception when alter partition's table doesn't exist in a perticular database") {
val exception_test_add_partition: Exception = intercept[Exception] {
sql("CREATE DATABASE IF NOT EXISTS carbondb")
sql("USE default")
sql(
"""
| CREATE TABLE carbon_table_in_default_db(id INT, name STRING)
| PARTITIONED BY (dt STRING) STORED BY 'carbondata'
| TBLPROPERTIES('PARTITION_TYPE'='RANGE', 'RANGE_INFO'='2015,2016')
""".stripMargin)
sql("ALTER TABLE carbondb.carbon_table_in_default_db ADD PARTITION ('2017')")
}
assert(exception_test_add_partition.getMessage
.contains("Table or view 'carbon_table_in_default_db' not found in database 'carbondb'"))
}
def validateDataFiles(tableUniqueName: String, segmentId: String, partitions: Seq[Int]): Unit = {
val carbonTable = CarbonMetadata.getInstance().getCarbonTable(tableUniqueName)
val dataFiles = getDataFiles(carbonTable, segmentId)
validatePartitionTableFiles(partitions, dataFiles)
}
def getDataFiles(carbonTable: CarbonTable, segmentId: String): Array[CarbonFile] = {
val tablePath = new CarbonTablePath(carbonTable.getStorePath, carbonTable.getDatabaseName,
carbonTable.getFactTableName)
val segmentDir = tablePath.getCarbonDataDirectoryPath("0", segmentId)
val carbonFile = FileFactory.getCarbonFile(segmentDir, FileFactory.getFileType(segmentDir))
val dataFiles = carbonFile.listFiles(new CarbonFileFilter() {
override def accept(file: CarbonFile): Boolean = {
return file.getName.endsWith(".carbondata")
}
})
dataFiles
}
/**
* should ensure answer equals to expected list, not only contains
* @param partitions
* @param dataFiles
*/
def validatePartitionTableFiles(partitions: Seq[Int], dataFiles: Array[CarbonFile]): Unit = {
val partitionIds: ListBuffer[Int] = new ListBuffer[Int]()
dataFiles.foreach { dataFile =>
val partitionId = CarbonTablePath.DataFileUtil.getTaskNo(dataFile.getName).split("_")(0).toInt
partitionIds += partitionId
assert(partitions.contains(partitionId))
}
partitions.foreach(id => assert(partitionIds.contains(id)))
}
override def afterAll = {
dropTable
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_DATE_FORMAT, "yyyy-MM-dd")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
}
def dropTable {
sql("DROP TABLE IF EXISTS list_table_area_origin")
sql("DROP TABLE IF EXISTS range_table_logdate_origin")
sql("DROP TABLE IF EXISTS list_table_country_origin")
sql("DROP TABLE IF EXISTS range_table_logdate_split_origin")
sql("DROP TABLE IF EXISTS range_table_bucket_origin")
sql("DROP TABLE IF EXISTS list_table_area")
sql("DROP TABLE IF EXISTS range_table_logdate")
sql("DROP TABLE IF EXISTS list_table_country")
sql("DROP TABLE IF EXISTS range_table_logdate_split")
sql("DROP TABLE IF EXISTS range_table_bucket")
sql("DROP TABLE IF EXISTS test_range_int")
sql("DROP TABLE IF EXISTS test_range_smallint")
sql("DROP TABLE IF EXISTS test_range_bigint")
sql("DROP TABLE IF EXISTS test_range_float")
sql("DROP TABLE IF EXISTS test_range_double")
sql("DROP TABLE IF EXISTS test_range_date")
sql("DROP TABLE IF EXISTS test_range_timestamp")
sql("DROP TABLE IF EXISTS test_range_decimal")
sql("drop table if exists test_invalid_partition_id")
}
}
| HuaweiBigData/carbondata | integration/spark2/src/test/scala/org/apache/carbondata/spark/testsuite/partition/TestAlterPartitionTable.scala | Scala | apache-2.0 | 50,672 |
/**
* Copyright 2015 Mohiva Organisation (license at mohiva dot com)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.mohiva.play.silhouette.impl.providers
import com.mohiva.play.silhouette.impl.providers.oauth1.TwitterProvider
import com.mohiva.play.silhouette.impl.providers.oauth2.{ GoogleProvider, FacebookProvider }
import com.mohiva.play.silhouette.impl.providers.openid.YahooProvider
import org.specs2.mock.Mockito
import org.specs2.specification.Scope
import play.api.test.PlaySpecification
/**
* Test case for the [[com.mohiva.play.silhouette.impl.providers.SocialProviderRegistry]] class.
*/
class SocialProviderRegistrySpec extends PlaySpecification with Mockito {
"The `get` method" should {
"return a provider by its type" in new Context {
registry.get[GoogleProvider] must beSome(providers(1))
}
"return None if no provider for the given type exists" in new Context {
registry.get[YahooProvider] must beNone
}
"return a provider by its ID as SocialProvider" in new Context {
val provider = registry.get[SocialProvider](GoogleProvider.ID)
provider must beSome.like {
case value =>
value.id must be equalTo providers(1).id
value must beAnInstanceOf[SocialProvider]
}
}
"return a provider by its ID as OAuth2Provider" in new Context {
val provider = registry.get[OAuth2Provider](GoogleProvider.ID)
provider must beSome.like {
case value =>
value.id must be equalTo providers(1).id
value must beAnInstanceOf[OAuth2Provider]
}
}
"return None if no provider for the given ID exists" in new Context {
registry.get[SocialProvider](YahooProvider.ID) must beNone
}
}
"The `getSeq` method" should {
"return a list of providers by it's sub type" in new Context {
val list = registry.getSeq[OAuth2Provider]
list(0).id must be equalTo providers(0).id
list(1).id must be equalTo providers(1).id
}
}
/**
* The context.
*/
trait Context extends Scope {
/**
* Some social providers.
*/
val providers = {
val facebook = mock[FacebookProvider]
facebook.id returns FacebookProvider.ID
val google = mock[GoogleProvider]
google.id returns GoogleProvider.ID
val twitter = mock[TwitterProvider]
twitter.id returns TwitterProvider.ID
Seq(
facebook,
google,
twitter
)
}
/**
* The registry to test.
*/
val registry = SocialProviderRegistry(providers)
}
}
| cemcatik/play-silhouette | silhouette/test/com/mohiva/play/silhouette/impl/providers/SocialProviderRegistrySpec.scala | Scala | apache-2.0 | 3,091 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.livy.test
import java.io.File
import java.net.URI
import java.util.concurrent.{TimeUnit, Future => JFuture}
import javax.servlet.http.HttpServletResponse
import scala.util.Properties
import com.fasterxml.jackson.databind.ObjectMapper
import com.fasterxml.jackson.module.scala.DefaultScalaModule
import org.apache.http.client.methods.HttpGet
import org.scalatest.BeforeAndAfterAll
import org.apache.livy._
import org.apache.livy.client.common.HttpMessages._
import org.apache.livy.sessions.SessionKindModule
import org.apache.livy.test.framework.BaseIntegrationTestSuite
import org.apache.livy.test.jobs.spark2._
import org.apache.livy.utils.LivySparkUtils
class Spark2JobApiIT extends BaseIntegrationTestSuite with BeforeAndAfterAll with Logging {
private var client: LivyClient = _
private var sessionId: Int = _
private val mapper = new ObjectMapper()
.registerModule(DefaultScalaModule)
.registerModule(new SessionKindModule())
override def afterAll(): Unit = {
super.afterAll()
if (client != null) {
client.stop(true)
}
livyClient.connectSession(sessionId).stop()
}
scalaTest("create a new session and upload test jar") {
val prevSessionCount = sessionList().total
val tempClient = createClient(livyEndpoint)
try {
// Figure out the session ID by poking at the REST endpoint. We should probably expose this
// in the Java API.
val list = sessionList()
assert(list.total === prevSessionCount + 1)
val tempSessionId = list.sessions(0).id
livyClient.connectSession(tempSessionId).verifySessionIdle()
waitFor(tempClient.uploadJar(new File(testLib)))
client = tempClient
sessionId = tempSessionId
} finally {
if (client == null) {
try {
if (tempClient != null) {
tempClient.stop(true)
}
} catch {
case e: Exception => warn("Error stopping client.", e)
}
}
}
}
scalaTest("run spark2 job") {
assume(client != null, "Client not active.")
val result = waitFor(client.submit(new SparkSessionTest()))
assert(result === 3)
}
scalaTest("run spark2 dataset job") {
assume(client != null, "Client not active.")
val result = waitFor(client.submit(new DatasetTest()))
assert(result === 2)
}
private def waitFor[T](future: JFuture[T]): T = {
future.get(60, TimeUnit.SECONDS)
}
private def sessionList(): SessionList = {
val httpGet = new HttpGet(s"$livyEndpoint/sessions/")
val r = livyClient.httpClient.execute(httpGet)
val statusCode = r.getStatusLine().getStatusCode()
val responseBody = r.getEntity().getContent
val sessionList = mapper.readValue(responseBody, classOf[SessionList])
r.close()
assert(statusCode == HttpServletResponse.SC_OK)
sessionList
}
private def createClient(uri: String): LivyClient = {
new LivyClientBuilder().setURI(new URI(uri)).build()
}
protected def scalaTest(desc: String)(testFn: => Unit): Unit = {
test(desc) {
val livyConf = new LivyConf()
val (sparkVersion, scalaVersion) = LivySparkUtils.sparkSubmitVersion(livyConf)
val formattedSparkVersion = LivySparkUtils.formatSparkVersion(sparkVersion)
val versionString =
LivySparkUtils.sparkScalaVersion(formattedSparkVersion, scalaVersion, livyConf)
assume(versionString == LivySparkUtils.formatScalaVersion(Properties.versionNumberString),
s"Scala test can only be run with ${Properties.versionString}")
testFn
}
}
}
| ajbozarth/incubator-livy | integration-test/src/test/spark2/scala/Spark2JobApiIT.scala | Scala | apache-2.0 | 4,372 |
package domain
object Test extends App {
Array("[email protected]", "[email protected]", "not_valid") foreach {
case Email("test", Domain("com", "test")) => println("first case")
case Email(_, Domain(_, "blah", _*)) => println("second case")
case _ => println("not valid")
}
}
| mhotchen/programming-in-scala | src/domain/Test.scala | Scala | apache-2.0 | 288 |
package com.github.simy4.xpath
package scala
import java.io.StringReader
import java.util.stream.Stream
import fixtures.FixtureAccessor
import helpers.SimpleNamespaceContext
import javax.xml.namespace.NamespaceContext
import javax.xml.xpath.{ XPathConstants, XPathExpression, XPathFactory }
import org.assertj.core.api.{ Assertions, Condition }
import org.junit.jupiter.api.extension.ExtensionContext
import org.junit.jupiter.params.ParameterizedTest
import org.junit.jupiter.params.provider.{ Arguments, ArgumentsProvider, ArgumentsSource }
import org.xml.sax.InputSource
import collection.{ mutable, Map }
import xml.{ Elem, NamespaceBinding, Node, Null, PrettyPrinter, TopScope, XML }
class DataProvider extends ArgumentsProvider {
private val namespaceContext = new SimpleNamespaceContext
private val namespaceBinding = NamespaceBinding("my", namespaceContext.getNamespaceURI("my"), TopScope)
override def provideArguments(context: ExtensionContext): Stream[_ <: Arguments] =
java.util.Arrays.stream(
Array[Arguments](
(new FixtureAccessor("simple"), null, <breakfast_menu/>),
(new FixtureAccessor("simple"), namespaceContext, <breakfast_menu/>),
(
new FixtureAccessor("ns-simple"),
namespaceContext,
Elem("my", "breakfast_menu", Null, namespaceBinding, minimizeEmpty = true)
),
(new FixtureAccessor("attr"), null, <breakfast_menu/>),
(new FixtureAccessor("attr"), namespaceContext, <breakfast_menu/>),
(new FixtureAccessor("special"), null, <records/>),
(new FixtureAccessor("special"), namespaceContext, <records/>)
)
)
implicit private def arguments(p: Product): Arguments =
new Arguments {
@SuppressWarnings(Array("org.wartremover.warts.Any", "org.wartremover.warts.AsInstanceOf"))
override def get(): Array[AnyRef] = p.productIterator.toArray.asInstanceOf[Array[AnyRef]]
}
}
class XmlBuilderTest {
import Assertions._
import XmlBuilderTest._
@ParameterizedTest
@ArgumentsSource(classOf[DataProvider])
def shouldBuildDocumentFromSetOfXPaths(
fixtureAccessor: FixtureAccessor,
namespaceContext: NamespaceContext,
root: Elem
): Unit = {
implicit val ns: NamespaceContext = namespaceContext
val xmlProperties = fixtureAccessor.getXmlProperties.asScala
val builtDocument = xmlProperties.keys
.foldRight(Right(Nil): Either[Throwable, List[Effect]]) { (xpath, acc) =>
acc >>= { xs =>
Effect.put(xpath).fmap(_ :: xs)
}
}
.>>=(XmlBuilder(_)(root))
.unsafeGet
val builtDocumentString = xmlToString(builtDocument)
xmlProperties.keys.foreach { xpath =>
val documentSource: InputSource = builtDocumentString
assertThat(xpath.evaluate(documentSource, XPathConstants.NODE)).isNotNull
}
// although these cases are working fine the order of attribute is messed up
assertThat(builtDocumentString).is(
new Condition(
(xml: String) => fixtureAccessor.toString.startsWith("attr") || xml == fixtureAccessor.getPutXml,
"\\"%s\\" matches exactly",
fixtureAccessor.getPutXml
)
)
}
@ParameterizedTest
@ArgumentsSource(classOf[DataProvider])
def shouldBuildDocumentFromSetOfXPathsAndSetValues(
fixtureAccessor: FixtureAccessor,
namespaceContext: NamespaceContext,
root: Elem
): Unit = {
implicit val ns: NamespaceContext = namespaceContext
val xmlProperties = fixtureAccessor.getXmlProperties.asScala
val builtDocument = xmlProperties.toSeq
.foldRight(Right(Nil): Either[Throwable, List[Effect]]) { (pair, acc) =>
acc >>= { xs =>
Effect.putValue(pair._1, pair._2).fmap(_ :: xs)
}
}
.>>=(XmlBuilder(_)(root))
.unsafeGet
val builtDocumentString = xmlToString(builtDocument)
xmlProperties.foreach { case (xpath, value) =>
val documentSource: InputSource = builtDocumentString
assertThat(xpath.evaluate(documentSource, XPathConstants.STRING))
.as("Should evaluate XPath %s to %s", xpath, value)
.isEqualTo(value)
}
// although these cases are working fine the order of attribute is messed up
assertThat(xmlToString(builtDocument)).is(
new Condition(
(xml: String) => fixtureAccessor.toString.startsWith("attr") || xml == fixtureAccessor.getPutValueXml,
"\\"%s\\" matches exactly",
fixtureAccessor.getPutValueXml
)
)
}
@ParameterizedTest
@ArgumentsSource(classOf[DataProvider])
def shouldModifyDocumentWhenXPathsAreNotTraversable(
fixtureAccessor: FixtureAccessor,
namespaceContext: NamespaceContext,
root: Elem
): Unit = {
implicit val ns: NamespaceContext = namespaceContext
val xmlProperties = fixtureAccessor.getXmlProperties.asScala
val xml = fixtureAccessor.getPutXml
val oldDocument = XML.loadString(xml)
val builtDocument = xmlProperties.toSeq
.foldRight(Right(Nil): Either[Throwable, List[Effect]]) { (pair, acc) =>
acc >>= { xs =>
Effect.putValue(pair._1, pair._2).fmap(_ :: xs)
}
}
.>>=(XmlBuilder(_)(oldDocument))
.unsafeGet
val builtDocumentString = xmlToString(builtDocument)
xmlProperties.foreach { case (xpath, value) =>
val documentSource: InputSource = builtDocumentString
assertThat(xpath.evaluate(documentSource, XPathConstants.STRING))
.as("Should evaluate XPath %s to %s", xpath, value)
.isEqualTo(value)
}
// although these cases are working fine the order of attribute is messed up
assertThat(builtDocumentString).is(
new Condition(
(xml: String) => fixtureAccessor.toString.startsWith("attr") || xml == fixtureAccessor.getPutValueXml,
"\\"%s\\" matches exactly",
fixtureAccessor.getPutValueXml
)
)
}
@ParameterizedTest
@ArgumentsSource(classOf[DataProvider])
def shouldNotModifyDocumentWhenAllXPathsTraversable(
fixtureAccessor: FixtureAccessor,
namespaceContext: NamespaceContext,
root: Elem
): Unit = {
implicit val ns: NamespaceContext = namespaceContext
val xmlProperties = fixtureAccessor.getXmlProperties.asScala
val xml = fixtureAccessor.getPutValueXml
val oldDocument = XML.loadString(xml)
var builtDocument = xmlProperties.toSeq
.foldRight(Right(Nil): Either[Throwable, List[Effect]]) { (pair, acc) =>
acc >>= { xs =>
Effect.putValue(pair._1, pair._2).fmap(_ :: xs)
}
}
.>>=(XmlBuilder(_)(oldDocument))
.unsafeGet
var builtDocumentString = xmlToString(builtDocument)
xmlProperties.foreach { case (xpath, value) =>
val documentSource: InputSource = builtDocumentString
assertThat(xpath.evaluate(documentSource, XPathConstants.STRING))
.as("Should evaluate XPath %s to %s", xpath, value)
.isEqualTo(value)
}
// although these cases are working fine the order of attribute is messed up
assertThat(builtDocumentString).is(
new Condition(
(xml: String) => fixtureAccessor.toString.startsWith("attr") || xml == fixtureAccessor.getPutValueXml,
"\\"%s\\" matches exactly",
fixtureAccessor.getPutValueXml
)
)
builtDocument = xmlProperties.keys
.foldRight(Right(Nil): Either[Throwable, List[Effect]]) { (xpath, acc) =>
acc >>= { xs =>
Effect.put(xpath).fmap(_ :: xs)
}
}
.>>=(XmlBuilder(_)(oldDocument))
.unsafeGet
builtDocumentString = xmlToString(builtDocument)
xmlProperties.foreach { case (xpath, value) =>
val documentSource: InputSource = builtDocumentString
assertThat(xpath.evaluate(documentSource, XPathConstants.STRING))
.as("Should evaluate XPath %s to %s", xpath, value)
.isEqualTo(value)
}
// although these cases are working fine the order of attribute is messed up
assertThat(builtDocumentString).is(
new Condition(
(xml: String) => fixtureAccessor.toString.startsWith("attr") || xml == fixtureAccessor.getPutValueXml,
"\\"%s\\" matches exactly",
fixtureAccessor.getPutValueXml
)
)
}
@ParameterizedTest
@ArgumentsSource(classOf[DataProvider])
def shouldRemovePathsFromExistingXml(
fixtureAccessor: FixtureAccessor,
namespaceContext: NamespaceContext,
root: Elem
): Unit = {
implicit val ns: NamespaceContext = namespaceContext
val xmlProperties = fixtureAccessor.getXmlProperties.asScala
val xml = fixtureAccessor.getPutValueXml
val oldDocument = XML.loadString(xml)
val builtDocument = xmlProperties.keys
.foldRight(Right(Nil): Either[Throwable, List[Effect]]) { (xpath, acc) =>
acc >>= { xs =>
Effect.remove(xpath).fmap(_ :: xs)
}
}
.>>=(XmlBuilder(_)(oldDocument))
.unsafeGet
val builtDocumentString = xmlToString(builtDocument)
xmlProperties.keySet.foreach { xpath =>
val documentSource: InputSource = builtDocumentString
assertThat(xpath.evaluate(documentSource, XPathConstants.NODE))
.as("Should not evaluate XPath %s", xpath)
.isNull()
}
assertThat(builtDocumentString).isNotEqualTo(fixtureAccessor.getPutValueXml)
}
private def xmlToString(xml: Node) = {
val lineSeparator = System.lineSeparator()
val printer = new PrettyPrinter(255, 4)
val string = printer.format(xml).replaceAll(s">\\n\\\\s*(\\\\w.+?)\\n\\\\s*</", ">$1</") + "\\n"
string.replaceAll("\\n", lineSeparator)
}
implicit private def toXPathExpression(xpathString: String)(implicit nc: NamespaceContext): XPathExpression = {
val xpath = XPathFactory.newInstance().newXPath()
Option(nc).foreach(xpath.setNamespaceContext)
xpath.compile(xpathString)
}
implicit private def toInputSource(xmlString: String): InputSource = new InputSource(new StringReader(xmlString))
}
//noinspection ConvertExpressionToSAM
object XmlBuilderTest {
implicit private[scala] class JUMapOps[K, V](private val map: java.util.Map[K, V]) extends AnyVal {
def asScala: Map[K, V] = {
val linkedHashMap = new mutable.LinkedHashMap[K, V]
val iterator = map.entrySet().iterator()
while (iterator.hasNext) {
val entry = iterator.next()
linkedHashMap += entry.getKey -> entry.getValue
}
linkedHashMap
}
}
implicit private[scala] class EitherOps[+L, +R](private val either: Either[L, R]) extends AnyVal {
def fmap[RR](f: R => RR): Either[L, RR] =
>>= { r =>
Right(f(r))
}
def >>=[LL >: L, RR](f: R => Either[LL, RR]): Either[LL, RR] = either.fold(Left(_), f)
def unsafeGet(implicit ev: L <:< Throwable): R = either.fold(ex => throw ev(ex), identity)
}
implicit private[scala] def asJavaPredicate[A](p: A => Boolean): java.util.function.Predicate[A] =
new java.util.function.Predicate[A] {
override def test(a: A): Boolean = p(a)
}
}
| SimY4/xpath-to-xml | xpath-to-xml-scala/src/test/scala/com/github/simy4/xpath/scala/XmlBuilderTest.scala | Scala | apache-2.0 | 11,196 |
package org.pico.fp
import scala.language.higherKinds
trait Applicative[F[_]] extends Apply[F] {
def point[A](a: => A): F[A]
}
| pico-works/pico-fp | pico-fp/src/main/scala/org/pico/fp/Applicative.scala | Scala | mit | 131 |
package org.pdfextractor.algorithm
import java.awt._
import java.io.{File, InputStream}
import java.nio.charset.StandardCharsets
import java.time.LocalDate
import java.util.Locale
import net.liftweb.json.JsonAST.JValue
import net.liftweb.json._
import org.apache.commons.io.{FileUtils, IOUtils}
import org.pdfextractor.db.domain.dictionary.PaymentFieldType
import scala.collection.immutable.Map.Map2
import scala.collection.{Map, mutable}
package object io {
type rawField2Pts = (String, Seq[Map2[String, BigInt]])
type field2Pts = (PaymentFieldType, Seq[Point])
type field2PtsMap = Map[PaymentFieldType, Seq[Point]]
def getMapFromFile(fileName: String): field2PtsMap = {
getAsMap(fileName).map(jsonAsMapEntry => {
extractEntry(jsonAsMapEntry)
})
}
private def extractEntry(jsonAsMapEntry: (Any, Any)): field2Pts = {
val field2PointsTuple: rawField2Pts =
jsonAsMapEntry.asInstanceOf[rawField2Pts]
val fieldType: PaymentFieldType =
PaymentFieldType.valueOf(field2PointsTuple._1)
val points: Seq[Point] = extractPoints(field2PointsTuple)
(fieldType, points)
}
private def extractPoints(field2PointsTuple: rawField2Pts): Seq[Point] = {
field2PointsTuple._2.map(pt => {
val x = pt.get("x").get.intValue()
val y = pt.get("y").get.intValue()
new Point(x, y)
})
}
private def getAsMap(fileName: String): Map[Any, Any] = {
val jsonAsString = getStringFromFile(fileName)
val jsonAsJValue: JValue = JsonParser.parse(jsonAsString)
val jsonAsMap: Map[Any, Any] =
jsonAsJValue.values.asInstanceOf[Map[Any, Any]]
jsonAsMap
}
def getStringFromFile(fileName: String): String = {
IOUtils.toString(getInputStreamFromFile(fileName))
}
def getInputStreamFromFile(fileName: String): InputStream = {
Thread.currentThread.getContextClassLoader.getResourceAsStream(fileName)
}
def getFolderAsFile(location2: String): File = {
val url: java.net.URL =
Thread.currentThread.getContextClassLoader.getResource(location2)
val location: String = url.getFile
val file: File = new File(location)
checkFile(file, true)
file
}
def checkFile(file: File, checkIsDirectory: Boolean): Unit = {
require(file.exists, "Location '" + file + "' does not exist")
require(file.canRead,
"Location '" + file + "' is not readable for application")
require(!checkIsDirectory || file.isDirectory,
"Location '" + file + "' is not a folder")
}
def writeToFiles(map: Map[PaymentFieldType, Seq[Point]]) = {
map.foreach {
case (paymentFieldType, locations) => {
val fileName = paymentFieldType.toString + ".txt"
val locationsString =
locations
.map(point => point.x + "," + point.y + System.lineSeparator)
.reduce(_ + _)
FileUtils.writeStringToFile(new File(fileName), locationsString)
}
}
}
def writeStatisticsToFiles(locale: Locale,
trainingData: mutable.Map[PaymentFieldType, mutable.Buffer[Map[String, Any]]]
): Unit = {
trainingData.foreach {
case (paymentFieldType: PaymentFieldType, candidates: mutable.Buffer[Map[String, Any]]) => {
val sb = new StringBuilder
sb.append(candidates(0).keys.reduce(_ + "," + _))
sb.append(System.lineSeparator)
sb.append(candidates.map(_.values.reduce(_ + "," + _) + System.lineSeparator))
val fileName = "statistics-" + locale.getLanguage + "-" + paymentFieldType.toString.toLowerCase + "-" + LocalDate.now().getMonthValue + "-" + LocalDate.now().getDayOfMonth + ".csv"
FileUtils.writeStringToFile(new File(fileName), sb.toString, StandardCharsets.UTF_8)
}
}
}
} | kveskimae/pdfalg | src/main/scala/org/pdfextractor/algorithm/io/IOHelper.scala | Scala | mit | 3,767 |
package forimpatient.chapter16
import scala.io.Source
import scala.xml.parsing.XhtmlParser
/**
* Created by Iryna Kharaborkina on 8/19/16.
*
* Solution to the Chapter 16 Exercise 05 'Scala for the Impatient' by Horstmann C.S.
*
* Print the names of all images in an XHTML file. That is, print all src attribute values inside img elements.
*/
object Exercise05 extends App {
println("Chapter 16 Exercise 05")
val xhtml = XhtmlParser(Source.fromURL("https://www.w3.org"))
(xhtml \\\\ "img" \\\\ "@src").foreach(println)
}
| Kiryna/Scala-for-the-Impatient | src/forimpatient/chapter16/Exercise05.scala | Scala | apache-2.0 | 538 |
/**
* Copyright (C) 2015 DANS - Data Archiving and Networked Services ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.knaw.dans.easy.solr.command
import java.io.File
import java.net.URL
import java.nio.charset.StandardCharsets
import java.nio.file.Paths
import com.yourmediashelf.fedora.client.FedoraCredentials
import resource.Using
import nl.knaw.dans.easy.solr.{Settings, SolrProviderImpl, EasyUpdateSolrIndex, FedoraProviderImpl};
object Command extends App {
val configuration = Configuration(Paths.get(System.getProperty("app.home")))
val clo = new CommandLineOptions(args, configuration)
implicit val settings: Settings = new Settings(
batchSize = clo.batchSize(),
timeout = clo.timeout(),
testMode = clo.debug(),
output = clo.output(),
datasets = clo.datasets(),
solr = SolrProviderImpl(
new URL(configuration.properties.getString("default.solr-update-url")),
s"easy-update-solr-index/${ configuration.version }",
),
fedora = FedoraProviderImpl(
new FedoraCredentials(
configuration.properties.getString("default.fcrepo-server"),
configuration.properties.getString("default.fcrepo-user"),
configuration.properties.getString("default.fcrepo-password"))))
val files = settings.datasets.filter(new File(_).exists())
val queries = settings.datasets.filter(_ startsWith "pid~")
val ids = settings.datasets.toSet -- files -- queries
EasyUpdateSolrIndex.executeBatches(ids.toSeq)
for (file <- files) {
EasyUpdateSolrIndex.executeBatches(Using.fileLines(StandardCharsets.UTF_8)(new File(file)).toSeq)
}
for (query <- queries) {
EasyUpdateSolrIndex.datasetsFromQuery(query)
}
}
| DANS-KNAW/easy-update-solr-index | command/src/main/scala/nl.knaw.dans.easy.solr.command/Command.scala | Scala | apache-2.0 | 2,229 |
/*
* Copyright 2009-2011 WorldWide Conferencing, LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.json4s
import java.util.Date
import org.json4s
import org.specs2.mutable.Specification
import text.Document
object NativeExtractionExamples extends ExtractionExamples[Document]("Native", native.Serialization) with native.JsonMethods
object JacksonExtractionExamples extends ExtractionExamples[JValue]("Jackson", jackson.Serialization) with jackson.JsonMethods
abstract class ExtractionExamples[T](mod: String, ser : json4s.Serialization) extends Specification with JsonMethods[T] {
implicit lazy val formats = DefaultFormats
val notNullFormats = new DefaultFormats {
override val allowNull = false
}
def treeFormats[T] = ser.formats(ShortTypeHints(List(classOf[Node[T]], classOf[Leaf[T]], EmptyLeaf.getClass)))
(mod+" Extraction Examples Specification") should {
"Extraction example" in {
val json = parse(testJson)
json.extract[Person] must_== Person("joe", Address("Bulevard", "Helsinki"), List(Child("Mary", 5, Some(date("2004-09-04T18:06:22Z"))), Child("Mazy", 3, None)))
}
"Extraction with path expression example" in {
val json = parse(testJson)
(json \\ "address").extract[Address] must_== Address("Bulevard", "Helsinki")
}
"Partial extraction example" in {
val json = parse(testJson)
json.extract[SimplePerson] must_== SimplePerson("joe", Address("Bulevard", "Helsinki"))
}
"Extract with a default value" in {
val json = parse(testJson)
(json \\ "address2").extractOrElse(Address("Tie", "Helsinki")) must_== Address("Tie", "Helsinki")
}
"Map with primitive values extraction example" in {
val json = parse(testJson)
json.extract[PersonWithMap] must_==
PersonWithMap("joe", Map("street" -> "Bulevard", "city" -> "Helsinki"))
}
"Map with object values extraction example" in {
val json = parse(twoAddresses)
json.extract[PersonWithAddresses] must_==
PersonWithAddresses("joe", Map("address1" -> Address("Bulevard", "Helsinki"),
"address2" -> Address("Soho", "London")))
}
"Mutable map extraction example" in {
val json = parse("""{ "name": "Joe" }""")
json.extract[scala.collection.Map[String, String]] must_== scala.collection.Map("name" -> "Joe")
}
"Simple value extraction example" in {
val json = parse(testJson)
json.extract[Name] must_== Name("joe")
(json \\ "children")(0).extract[Name] must_== Name("Mary")
(json \\ "children")(1).extract[Name] must_== Name("Mazy")
}
"Primitive value extraction example" in {
val json = parse(testJson)
(json \\ "name").extract[String] must_== "joe"
(json \\ "name").extractOpt[String] must_== Some("joe")
(json \\ "name").extractOpt[Int] must_== None
((json \\ "children")(0) \\ "birthdate").extract[Date] must_== date("2004-09-04T18:06:22Z")
JInt(1).extract[Int] must_== 1
JInt(1).extract[String] must_== "1"
}
"Primitive extraction example" in {
val json = parse(primitives)
json.extract[Primitives] must_== Primitives(124, 123L, 126.5, 127.5.floatValue, "128", 'symb, 125, 129.byteValue, true)
}
"Null extraction example" in {
val json = parse("""{ "name": null, "age": 5, "birthdate": null }""")
json.extract[Child] must_== Child(null, 5, None)
}
"Date extraction example" in {
val json = parse("""{"name":"e1","timestamp":"2009-09-04T18:06:22Z"}""")
json.extract[Event] must_== Event("e1", date("2009-09-04T18:06:22Z"))
}
"Timestamp extraction example" in {
val json = parse("""{"timestamp":"2009-09-04T18:06:22Z"}""")
new Date((json \\ "timestamp").extract[java.sql.Timestamp].getTime) must_== date("2009-09-04T18:06:22Z")
}
"Option extraction example" in {
val json = parse("""{ "name": null, "age": 5, "mother":{"name":"Marilyn"}}""")
json.extract[OChild] must_== OChild(None, 5, Some(Parent("Marilyn")), None)
}
"Missing JSON array can be extracted as an empty List" in {
parse(missingChildren).extract[Person] must_== Person("joe", Address("Bulevard", "Helsinki"), Nil)
}
"Multidimensional array extraction example" in {
parse(multiDimensionalArrays).extract[MultiDim] must_== MultiDim(
List(List(List(1, 2), List(3)), List(List(4), List(5, 6))),
List(List(Name("joe"), Name("mary")), List(Name("mazy"))))
}
"Flatten example with simple case class" in {
val f = Extraction.flatten(Extraction.decompose(SimplePerson("joe", Address("Bulevard", "Helsinki"))))
val e = Map(".name" -> "\\"joe\\"", ".address.street" -> "\\"Bulevard\\"", ".address.city" -> "\\"Helsinki\\"")
f must_== e
}
"Unflatten example with top level string and int" in {
val m = Map(".name" -> "\\"joe\\"", ".age" -> "32")
Extraction.unflatten(m) must_== JObject(List(JField("name",JString("joe")), JField("age",JInt(32))))
}
"Unflatten example with top level string and double" in {
val m = Map(".name" -> "\\"joe\\"", ".age" -> "32.2")
Extraction.unflatten(m) must_== JObject(List(JField("name",JString("joe")), JField("age",JDouble(32.2))))
}
"Unflatten example with two-level string properties" in {
val m = Map(".name" -> "\\"joe\\"", ".address.street" -> "\\"Bulevard\\"", ".address.city" -> "\\"Helsinki\\"")
Extraction.unflatten(m) must_== JObject(List(JField("name", JString("joe")), JField("address", JObject(List(JField("street", JString("Bulevard")), JField("city", JString("Helsinki")))))))
}
"Unflatten example with top level array" in {
val m = Map(".foo[2]" -> "2", ".foo[0]" -> "0", ".foo[1]" -> "1")
Extraction.unflatten(m) must_== JObject(List(JField("foo", JArray(List(JInt(0), JInt(1), JInt(2))))))
}
"Flatten and unflatten are symmetric" in {
val parsed = parse(testJson)
Extraction.unflatten(Extraction.flatten(parsed)) must_== parsed
}
"Flatten preserves empty sets" in {
val s = SetWrapper(Set())
Extraction.flatten(Extraction.decompose(s)).get(".set") must_== Some("[]")
}
"Flatten and unflatten are symmetric with empty sets" in {
val s = SetWrapper(Set())
Extraction.unflatten(Extraction.flatten(Extraction.decompose(s))).extract[SetWrapper] must_== s
}
"List extraction example" in {
val json = parse(testJson) \\ "children"
json.extract[List[Name]] must_== List(Name("Mary"), Name("Mazy"))
}
"Map extraction example" in {
val json = parse(testJson) \\ "address"
json.extract[Map[String, String]] must_== Map("street" -> "Bulevard", "city" -> "Helsinki")
}
"Set extraction example" in {
val json = parse(testJson) \\ "children"
json.extract[Set[Name]] must_== Set(Name("Mary"), Name("Mazy"))
}
"Seq extraction example" in {
val json = parse(testJson) \\ "children"
json.extract[Seq[Name]] must_== Seq(Name("Mary"), Name("Mazy"))
}
"Mutable set extraction example" in {
val json = parse(testJson) \\ "children"
json.extract[scala.collection.mutable.Set[Name]] must_==
scala.collection.mutable.Set(Name("Mary"), Name("Mazy"))
}
"Mutable seq extraction example" in {
val json = parse(testJson) \\ "children"
json.extract[scala.collection.mutable.Seq[Name]] must_==
scala.collection.mutable.Seq(Name("Mary"), Name("Mazy"))
}
"Extraction and decomposition are symmetric" in {
val person = parse(testJson).extract[Person]
Extraction.decompose(person).extract[Person] must_== person
}
"Extraction failure message example" in {
val json = parse("""{"city":"San Francisco"}""")
json.extract[Address] must throwA(MappingException("No usable value for street\\nDid not find value which can be converted into java.lang.String", null))
}
"Best matching constructor selection example" in {
parse("""{"name":"john","age":32,"size":"M"}""").extract[MultipleConstructors] must_==
MultipleConstructors("john", 32, Some("M"))
parse("""{"name":"john","age":32}""").extract[MultipleConstructors] must_==
MultipleConstructors("john", 32, Some("S"))
parse("""{"name":"john","foo":"xxx"}""").extract[MultipleConstructors] must_==
MultipleConstructors("john", 30, None)
parse("""{"name":"john","age":32,"size":null}""").extract[MultipleConstructors] must_==
MultipleConstructors("john", 32, None)
parse("""{"birthYear":1990,"name":"john","foo":2}""").extract[MultipleConstructors] must_==
MultipleConstructors("john", 20, None)
parse("""{"foo":2,"age":12,"size":"XS"}""").extract[MultipleConstructors] must_==
MultipleConstructors("unknown", 12, Some("XS"))
}
"Partial JSON extraction" in {
parse(stringField).extract[ClassWithJSON] must_== ClassWithJSON("one", JString("msg"))
parse(objField).extract[ClassWithJSON] must_== ClassWithJSON("one", JObject(List(JField("yes", JString("woo")))))
}
"Double can be coerced to Int or Long" in {
JDouble(2.1).extract[Int] must_== 2
JDouble(2.1).extract[Long] must_== 2L
}
"Map with nested non-polymorphic list extraction example" in {
parse("""{"a":["b"]}""").extract[Map[String, List[String]]] must_== Map("a" -> List("b"))
}
"List with nested non-polymorphic list extraction example" in {
parse("""[["a"]]""").extract[List[List[String]]] must_== List(List("a"))
}
"Complex nested non-polymorphic collections extraction example" in {
parse("""{"a":[{"b":"c"}]}""").extract[Map[String, List[Map[String, String]]]] must_== Map("a" -> List(Map("b" -> "c")))
}
"allowNull format set to false should disallow null values in extraction for class types" in {
parse("""{"name":"foobar","address":null}""").extract[SimplePerson](notNullFormats, Manifest.classType(classOf[SimplePerson])) must throwA(MappingException("No usable value for address\\nDid not find value which can be converted into org.json4s.Address", null))
}
"allowNull format set to false should disallow null values in extraction for primitive types" in {
parse("""{"name":null}""").extract[Name](notNullFormats, Manifest.classType(classOf[Name])) must throwA(MappingException("No usable value for name\\nDid not find value which can be converted into java.lang.String", null))
}
"allowNull format set to false should extract a null Option[T] as None" in {
parse("""{"name":null,"age":22}""").extract[OChild](notNullFormats, Manifest.classType(classOf[OChild])) must_== new OChild(None, 22, None, None)
}
"simple case objects should be sucessfully extracted as a singleton instance" in {
parse(emptyTree).extract[LeafTree[Int]](treeFormats, Manifest.classType(classOf[LeafTree[Int]])) must_== LeafTree.empty
}
"case objects in a complex structure should be sucessfully extracted as a singleton instance" in {
parse(tree).extract[LeafTree[Int]](treeFormats[Int], Manifest.classType(classOf[LeafTree[Int]])) must_== Node(List[LeafTree[Int]](EmptyLeaf, Node(List.empty), Leaf(1), Leaf(2)))
}
"#274 Examples with default value should be parsed" in {
val res = WithDefaultValueHolder(Seq(WithDefaultValue("Bob")))
parse("""{"values":[{"name":"Bob","gender":"male"}]}""").extract[WithDefaultValueHolder](
DefaultFormats, Manifest.classType(classOf[WithDefaultValueHolder])) must_== (res)
}
}
val testJson =
"""
{ "name": "joe",
"address": {
"street": "Bulevard",
"city": "Helsinki"
},
"children": [
{
"name": "Mary",
"age": 5,
"birthdate": "2004-09-04T18:06:22Z"
},
{
"name": "Mazy",
"age": 3
}
]
}
"""
val maryChildJson =
"""
|{
| "name": "Mary",
| "age": 5,
| "birthdate": "2004-09-04T18:06:22Z"
|}
""".stripMargin
val missingChildren =
"""
{
"name": "joe",
"address": {
"street": "Bulevard",
"city": "Helsinki"
}
}
"""
val twoAddresses =
"""
{
"name": "joe",
"addresses": {
"address1": {
"street": "Bulevard",
"city": "Helsinki"
},
"address2": {
"street": "Soho",
"city": "London"
}
}
}
"""
val primitives =
"""
{
"l": 123,
"i": 124,
"sh": 125,
"d": 126.5,
"f": 127.5,
"s": "128",
"b": 129,
"bool": true,
"sym":"symb"
}
"""
val multiDimensionalArrays =
"""
{
"ints": [[[1, 2], [3]], [[4], [5, 6]]],
"names": [[{"name": "joe"}, {"name": "mary"}], [{"name": "mazy"}]]
}
"""
val stringField =
"""
{
"name": "one",
"message": "msg"
}
"""
val objField =
"""
{
"name": "one",
"message": {
"yes": "woo"
}
}
"""
val emptyTree =
"""
|{
| "jsonClass":"EmptyLeaf$"
|}
""".stripMargin
val tree =
"""
|{
| "jsonClass":"Node",
| "children":[
| {
| "jsonClass":"EmptyLeaf$"
| },
| {
| "jsonClass":"Node",
| "children":[]
| },
| {
| "jsonClass":"Leaf",
| "value":1
| },
| {
| "jsonClass":"Leaf",
| "value":2
| }
| ]
|}
""".stripMargin
def date(s: String) = DefaultFormats.dateFormat.parse(s).get
}
case class SetWrapper(set: Set[String])
case class Person(name: String, address: Address, children: List[Child])
case class Address(street: String, city: String)
case class Child(name: String, age: Int, birthdate: Option[java.util.Date])
case class SimplePerson(name: String, address: Address)
case class PersonWithMap(name: String, address: Map[String, String])
case class PersonWithAddresses(name: String, addresses: Map[String, Address])
case class Name(name: String)
case class Primitives(i: Int, l: Long, d: Double, f: Float, s: String, sym: Symbol, sh: Short, b: Byte, bool: Boolean)
case class OChild(name: Option[String], age: Int, mother: Option[Parent], father: Option[Parent])
case class Parent(name: String)
case class Event(name: String, timestamp: Date)
case class MultiDim(ints: List[List[List[Int]]], names: List[List[Name]])
case class MultipleConstructors(name: String, age: Int, size: Option[String]) {
def this(name: String) = this(name, 30, None)
def this(age: Int, name: String) = this(name, age, Some("S"))
def this(name: String, birthYear: Int) = this(name, 2010 - birthYear, None)
def this(size: Option[String], age: Int) = this("unknown", age, size)
}
case class ClassWithJSON(name: String, message: JValue)
sealed trait LeafTree[+T]
object LeafTree {
def empty[T] : LeafTree[T] = EmptyLeaf
}
case class Node[T](children : List[LeafTree[T]]) extends LeafTree[T]
case class Leaf[T](value : T) extends LeafTree[T]
case object EmptyLeaf extends LeafTree[Nothing]
case class WithDefaultValueHolder(values: Seq[WithDefaultValue])
case class WithDefaultValue(name: String, gender: String = "male")
| karolx/json4s | tests/src/test/scala/org/json4s/ExtractionExamplesSpec.scala | Scala | apache-2.0 | 15,602 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.expressions
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.codegen._
import org.apache.spark.sql.types._
/**
* A literal value that is not foldable. Used in expression codegen testing to test code path
* that behave differently based on foldable values.
*/
case class NonFoldableLiteral(value: Any, dataType: DataType)
extends LeafExpression with CodegenFallback {
override def foldable: Boolean = false
override def nullable: Boolean = true
override def toString: String = if (value != null) value.toString else "null"
override def eval(input: InternalRow): Any = value
override def genCode(ctx: CodeGenContext, ev: GeneratedExpressionCode): String = {
Literal.create(value, dataType).genCode(ctx, ev)
}
}
object NonFoldableLiteral {
def apply(value: Any): NonFoldableLiteral = {
val lit = Literal(value)
NonFoldableLiteral(lit.value, lit.dataType)
}
def create(value: Any, dataType: DataType): NonFoldableLiteral = {
val lit = Literal.create(value, dataType)
NonFoldableLiteral(lit.value, lit.dataType)
}
}
| ArvinDevel/onlineAggregationOnSparkV2 | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/expressions/NonFoldableLiteral.scala | Scala | apache-2.0 | 1,954 |
package models
package object db {
object AccountRole extends Enumeration {
val normal, admin = Value
}
}
| asciiu/halo | arbiter/app/models/db/package.scala | Scala | mit | 116 |
package models.daos
import java.util.UUID
import com.mohiva.play.silhouette.api.LoginInfo
import models.User
import scala.concurrent.Future
/**
* Give access to the user object.
*/
trait UserDao {
/**
* Finds a user by its login info.
*
* @param loginInfo The login info of the user to find.
* @return The found user or None if no user for the given login info could be found.
*/
def find(loginInfo: LoginInfo): Future[Option[User]]
/**
* Finds a user by its user ID.
*
* @param userID The ID of the user to find.
* @return The found user or None if no user for the given ID could be found.
*/
def find(userID: UUID): Future[Option[User]]
def findByApiKey(apiKey: UUID): Option[User]
/**
* Saves a user.
*
* @param user The user to save.
* @return The saved user.
*/
def save(user: User): Future[User]
}
| sne11ius/geotracker-service | app/models/daos/UserDao.scala | Scala | gpl-3.0 | 873 |
/*
* Copyright 2012 Twitter Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.twitter.zipkin.config
trait StaticResourceConfig {
val pathPrefix: String = "/public"
val resourceType: String
val remoteResources: Seq[String]
val localResources: Seq[String]
lazy val resources = remoteResources ++
localResources.map { r =>
"%s/%s/%s".format(pathPrefix, resourceType, r)
}
}
| tangyang/zipkin | zipkin-web/src/main/scala/com/twitter/zipkin/config/StaticResourceConfig.scala | Scala | apache-2.0 | 917 |
package pl.szymonmatejczyk.myerson
import com.twitter.cassovary.graph.{DirectedGraph, GraphDir}
import pl.szymonmatejczyk.subgraphsampling.{SubgraphsStructure, Subgraph}
import scala.collection
import scala.collection.immutable.BitSet
import scala.collection.mutable
import scala.util.Random
/**
* Created by szymonmatejczyk on 21.04.15.
*/
trait ValuationFunction {
def apply(s: collection.Set[Int]): Double
def name: String
}
trait MyersonValuation extends ValuationFunction {
def graph: DirectedGraph
val cc = new SubgraphsStructure(graph)
def forCC(s: collection.Set[Int]): Double
}
class SumOfOverConnectedComponentsValuation(val graph: DirectedGraph, v: ValuationFunction) extends MyersonValuation {
def name = "SOCC_" + v.name
override def apply(s: collection.Set[Int]): Double = {
cc.components(s, GraphDir.OutDir).iterator.map(v.apply).sum
}
override def forCC(s: collection.Set[Int]): Double = {
v(s)
}
}
class SizeValuation extends ValuationFunction {
def name = "sizeValuation"
override def apply(s: collection.Set[Int]): Double = s.size
}
trait Squared extends ValuationFunction {
abstract override def apply(s: collection.Set[Int]): Double = {
val v = super.apply(s)
v * v
}
}
class NeighbourhoodSize(graph: DirectedGraph) extends ValuationFunction {
def name = getClass.getCanonicalName
override def apply(s: collection.Set[Int]): Double = {
val sg = Subgraph(graph, s)
sg.neighbors().size
}
}
class BigConnectedPremium(graph: DirectedGraph) extends ValuationFunction {
def name = getClass.getCanonicalName
override def apply(s: collection.Set[Int]): Double = {
val cc = new SubgraphsStructure(graph)
if (cc.isConnected(s, GraphDir.OutDir)) {
1.0 / (graph.nodeCount - s.size + 1)
} else {
0.0
}
}
}
class CompletelyDefinedValuation(val v: collection.Map[collection.Set[Int], Double], n: String) extends ValuationFunction {
def name = n
override def apply(s: collection.Set[Int]): Double = v(s)
}
object RandomValuationsGenerator {
val rand = new Random()
def gaussian(mi: Double, stdDev: Double) = mi + (rand.nextGaussian() * stdDev)
def binomial(successProb: Double): Double = if (rand.nextDouble() > successProb) 1 else 0
def unif(a: Double, b: Double) = a + rand.nextDouble() * (b - a)
private def independentValuation(name: String, gen: (collection.Set[Int] => Double))(n: Int): ValuationFunction = {
val m = mutable.Map[collection.Set[Int], Double]()
(0 until n).toSet.subsets().foreach {
s => m += ((s, gen(s)))
}
new CompletelyDefinedValuation(m, name)
}
def uniform(n: Int): ValuationFunction = independentValuation("uniform", x => rand.nextDouble() * x.size)(n)
def normal(n: Int): ValuationFunction = independentValuation("normal", x => gaussian(10 * x.size, 0.1))(n)
def modifiedUniform(n: Int): ValuationFunction = independentValuation("uniform+", x => rand.nextDouble() * 10 * x.size +
binomial(0.2) * unif(0, 50))(n)
def modifiedNormal(n: Int) = independentValuation("normal+", x => gaussian(10 * x.size, 0.01) + binomial(0.2) * unif(0, 50))(n)
def superadditiveUniform(n: Int, maxSynergy: Double): CompletelyDefinedValuation = {
val players = BitSet() ++ (0 until n)
val v = mutable.Map[collection.Set[Int], Double]()
v(BitSet()) = 0.0
(1 to n).foreach {
size => players.subsets(size).foreach {
subset =>
var min = 0.0
subset.subsets().filter(x => x.size != 0 && x.size != subset.size).foreach {
s =>
min = math.max(min, v(s) + v(subset -- s))
}
v(subset) = unif(min, min + maxSynergy)
}
}
new CompletelyDefinedValuation(v, "superadditiveUniform")
}
/** Monotone submodular **/
def submodularUniform(n: Int, maxSingleton: Double): CompletelyDefinedValuation = {
val players = BitSet() ++ (0 until n)
val v = mutable.Map[collection.Set[Int], Double]()
v(BitSet()) = 0.0
(1 to n).foreach {
size => players.subsets(size).foreach {
subset =>
// min for monotonicity
var min = 0.0
subset.subsets().filter(x => x.size != 0 && x.size != subset.size).foreach {
s =>
min = math.max(min, v(s))
}
// max for submodularity
var max = maxSingleton * subset.size
subset.foreach {
deleted =>
subset.filter(_ != deleted).foreach {
other =>
max = math.min(max, v(subset - deleted) + v(subset - other) - v (subset - other - deleted))
}
}
v(subset) = unif(min, max)
}
}
new CompletelyDefinedValuation(v, "submodularUniform")
}
/** Supermodular **/
def supermodularUniform(n: Int, maxSingle: Double): CompletelyDefinedValuation = {
val players = BitSet() ++ (0 until n)
val v = mutable.Map[collection.Set[Int], Double]()
v(BitSet()) = 0.0
(1 to n).foreach {
size => players.subsets(size).foreach {
subset =>
var min = 0.0
// min for submodularity
subset.foreach {
deleted =>
subset.filter(_ != deleted).foreach {
other =>
min = math.max(min, v(subset - deleted) + v(subset - other) - v (subset - other - deleted))
}
}
v(subset) = unif(min, min + maxSingle)
}
}
new CompletelyDefinedValuation(v, "supermodularUniform")
}
}
| szymonm/subgraphSampling | src/main/scala/pl/szymonmatejczyk/myerson/ValuationFunction.scala | Scala | apache-2.0 | 5,532 |
package xyz.hyperreal.bvm
import scala.collection.immutable
object VMLazyListClass extends VMClass with VMBuilder {
val parent: VMClass = VMObjectClass
val name: String = "LazyList"
val extending: List[VMType] = List(parent)
val members: Map[Symbol, VMMember] = Map()
override def build(iterator: Iterator[VMObject]): VMObject =
new VMSeq(immutable.ArraySeq.from(iterator))
val clas: VMClass = VMClassClass
}
class VMLazyList(list: LazyList[VMObject])
extends VMObject
with VMNonResizableSequence
with VMNonMap
with VMNonUpdatable
with VMUnordered
with VMNonSet {
val clas: VMClass = VMSeqClass
def apply(idx: VMObject): VMObject = list(idx.asInstanceOf[VMNumber].value.intValue)
def size: Int = list.length
def iterator: Iterator[VMObject] = list.iterator
def append(elem: VMObject): VMObject = new VMLazyList(list :+ elem)
def concat(iterable: VMObject): VMObject = new VMLazyList(list :++ iterable.iterator)
def head: VMObject = list.head
def tail: VMObject = new VMLazyList(list.tail)
override def toString = s"LazyList(${list take 100 mkString ", "}${if (list isDefinedAt 101) ", ..." else ""})"
}
| edadma/funl | bvm/src/main/scala/xyz/hyperreal/bvm/VMLazyListClass.scala | Scala | mit | 1,175 |
package com.akkademy
import akka.actor.ActorSystem
import akka.pattern.ask
import akka.util.Timeout
import com.akkademy.messages.{GetRequest, SetRequest}
import scala.concurrent.duration._
class SClient(remoteAddress: String){
private implicit val timeout = Timeout(2 seconds)
private implicit val system = ActorSystem("LocalSystem")
private val remoteDb = system.actorSelection(s"akka.tcp://akkademy@$remoteAddress/user/akkademy-db")
def set(key: String, value: Object) = {
remoteDb ? SetRequest(key, value)
}
def get(key: String) = {
remoteDb ? GetRequest(key)
}
}
| jasongoodwin/learning-akka | ch2/akkademy-db-client-scala/src/main/scala/com/akkademy/SClient.scala | Scala | apache-2.0 | 593 |
object Colors {
import scala.Console._
lazy val isANSISupported = {
Option(System.getProperty("sbt.log.noformat")).map(_ != "true").orElse {
Option(System.getProperty("os.name"))
.map(_.toLowerCase)
.filter(_.contains("windows"))
.map(_ => false)
}.getOrElse(true)
}
def red(str: String): String = if (isANSISupported) (RED + str + RESET) else str
def blue(str: String): String = if (isANSISupported) (BLUE + str + RESET) else str
def cyan(str: String): String = if (isANSISupported) (CYAN + str + RESET) else str
def green(str: String): String = if (isANSISupported) (GREEN + str + RESET) else str
def magenta(str: String): String = if (isANSISupported) (MAGENTA + str + RESET) else str
def white(str: String): String = if (isANSISupported) (WHITE + str + RESET) else str
def black(str: String): String = if (isANSISupported) (BLACK + str + RESET) else str
def yellow(str: String): String = if (isANSISupported) (YELLOW + str + RESET) else str
}
| fehmicansaglam/elastic-streams | project/Colors.scala | Scala | apache-2.0 | 1,011 |
import scala.annotation.tailrec
import scala.{Stream => _, _}
/**
* Created by philwill on 18/01/15.
*/
object Chapter5 {
sealed trait Stream[+A] {
final def headOption: Option[A] = this match {
case Empty => None
case Cons(h, t) => Some(h())
}
// 5.1
final def toList: List[A] = {
@tailrec
def _toList(sofar: List[A], s: () => Stream[A]): List[A] =
s() match {
case Empty => sofar
case Cons(h, t) => _toList(sofar :+ h(), t)
}
_toList(Nil: List[A], () => this)
}
// 5.2 - Not tail recursive
final def take(n: Int): Stream[A] = {
def _take(s: () => Stream[A], n: Int): Stream[A] = if (n < 0) ???
else if (n == 0) Stream.empty[A]
else
s() match {
case Empty => ???
case Cons(h, t) => Cons(h, () => _take(t, n - 1))
}
_take(() => this, n)
}
final def drop(n: Int): Stream[A] = {
@tailrec
def _drop(s: () => Stream[A], n: Int): Stream[A] = if (n < 0) ???
else if (n == 0) s()
else
s() match {
case Empty => ???
case Cons(h, t) => _drop(t, n - 1)
}
_drop(() => this, n)
}
// 5.3
final def takeWhile(p: A => Boolean): Stream[A] = {
def _take(s: () => Stream[A], p: A => Boolean): Stream[A] =
s() match {
case Cons(h, t) if (p(h())) => Cons(h, () => _take(t, p))
case _ => Stream.empty[A]
}
_take(() => this, p)
}
final def exists(p: A => Boolean): Boolean = {
@tailrec
def _exists(s: Stream[A], p: (A) => Boolean): Boolean = s match {
case Cons(h, t) if (p(h())) => true
case Cons(h, t) => _exists(t(), p)
case Empty => false
}
_exists(this, p)
}
final def foldLeft[B](z: => B)(f: (=> B, A) => B): B = {
@tailrec
def _foldLeft(s: () => Stream[A], z: => B)(f: (=> B, A) => B): B = s() match {
case Empty => z
case Cons(h, t) => _foldLeft(t, f(z, h()))(f)
}
_foldLeft(() => this, z)(f)
}
final def foldRight[B](z: => B)(f: (A, => B) => B): B = this match {
case Cons(h, t) => f(h(), t().foldRight(z)(f))
case Empty => z
}
// 5.4
final def forAll(p: (A) => Boolean): Boolean = {
@tailrec
def _forAll(s: Stream[A], p: (A) => Boolean): Boolean = s match {
case Cons(h, t) if (p(h())) => _forAll(t(), p)
case Empty => true
case _ => false
}
_forAll(this, p)
}
// 5.5
final def takeWhileRight(p: A => Boolean): Stream[A] = this.foldRight[Stream[A]](Stream.empty[A])((a, b) => if (p(a)) Cons(() => a, () => b) else Stream.empty[A])
// 5.6
final def headOptionRight: Option[A] = this.foldRight[Option[A]](None)((a, b) => Some(a))
// 5.7
final def map[B](f: A => B): Stream[B] = this.foldRight(Stream.empty[B])((a, b) => Cons(() => f(a), () => b))
final def filter(f: A => Boolean): Stream[A] = this.foldRight(Stream.empty[A])((a, b) => if (f(a)) Cons(() => a, () => b) else b)
final def append[B >: A](others: => Stream[B]): Stream[B] = this.foldRight[Stream[B]](others)((a, b) => Cons(() => a, () => b))
final def flatMap[B](f: A => Stream[B]): Stream[B] = this.foldRight[Stream[B]](Stream.empty[B])((a, b) => f(a).append(b))
// 5.13
final def mapUnfold[B](f: A => B): Stream[B] = Stream.unfold[B, () => Stream[A]](() => this)((a) => a() match {
case Cons(h, t) => Some(f(h()), t)
case Empty => None
})
final def takeUnfold(n: Int): Stream[A] = Stream.unfold[A, (Int, () => Stream[A])]((n, () => this))((a) => {
val n = a._1
val s = a._2
if (n < 0) ???
else if (n == 0) None
else
s() match {
case Empty => ???
case Cons(h, t) => Some(h(), (n - 1, () => t()))
}
})
final def takeWhileUnfold(p: A => Boolean): Stream[A] = Stream.unfold(() => this)((a) => {
a() match {
case Cons(h, t) if (p(h())) => Some(h(), () => t())
case _ => None
}
})
final def zipWith[B, C](bs: Stream[B])(f: (A, B) => C): Stream[C] = Stream.unfold[C, (() => Stream[A], () => Stream[B])](() => this, () => bs)((a) => {
val as = a._1
val bs = a._2
as() match {
case Cons(ah, at) => bs() match {
case Cons(bh, bt) => Some(f(ah(), bh()), (at, bt))
case _ => None
}
case _ => None
}
})
final def zipAll[B](bs: Stream[B]): Stream[(Option[A], Option[B])] = Stream.unfold[(Option[A], Option[B]), (() => Stream[A], () => Stream[B])](() => this, () => bs)((a) => {
val as = a._1
val bs = a._2
as() match {
case Cons(ah, at) => bs() match {
case Cons(bh, bt) => Some((Some(ah()), Some(bh())), (at, bt))
case _ => Some((Some(ah()), None), (at, bs))
}
case _ => bs() match {
case Cons(bh, bt) => Some((None, Some(bh())), (as, bt))
case _ => None
}
}
})
// 5.14
final def startsWith[A](s: Stream[A]): Boolean = zipWith[A, Boolean](s)((a, b) => a == b).forAll(_ == true)
// 5.15
final def tails: Stream[Stream[A]] = Stream.unfold[Stream[A], Option[() => Stream[A]]](Some(() => this))((as) => {
as match {
case Some(a) => a() match {
case Cons(ah, at) => Some(a(), Some(at))
case _ => Some(a(), None)
}
case None => None
}
})
// 5.16
final def scanRight[B](z: => B)(f: (A, => B) => B): Stream[B] = this match {
case Cons(h, t) => {
lazy val tail = t().scanRight(z)(f)
Stream.cons(f(h(), tail.headOption match { case Some(b) => b case None => z}), tail)
}
case Empty => Stream(z)
}
// Can't use Stream.unfold to implement scanRight with the same performance as above because it inherently traverses
// left-to-right and provides nowhere to stash the in-progress tail. It is possible to use the tail-recursive
// implementation of foldLeft as follows:
final def scanRightByFoldingLeft[B](z: => B)(f: (A, => B) => B): Stream[B] = {
this.foldLeft[Stream[B] => Stream[B]]((a) => a)((bf, a) => (b: Stream[B]) => {
lazy val bbf = bf
bbf(b match {
case Cons(h, t) => {
println("a")
lazy val zz = f(a, h())
Cons(() => zz, () => b)
}
case Empty => ???
})
})(Stream(z))
}
// I notice that foldLeft wasn't in the book or in any exercise so here it is with foldRight:
final def scanRightByFoldingRight[B](z: => B)(f: (A, => B) => B): Stream[B] = {
// Makes foldRight behave like foldLeft; we saw this in Chapter3
def fl[A, B](as: Stream[A], z: B)(f: (B, A) => B): B = {
as.foldRight((b: B) => b)((a, g) => (b) => {
lazy val gf = g; gf(f(b, a))
})(z)
}
fl[A, Stream[B] => Stream[B]](this, (a) => a)((bf, a) => (b: Stream[B]) => {
lazy val bbf = bf
bbf(b match {
case Cons(h, t) => {
println("a")
lazy val zz = f(a, h())
Cons(() => zz, () => b)
}
case Empty => ???
})
})(Stream(z))
}
}
case object Empty extends Stream[Nothing]
case class Cons[+A](h: () => A, t: () => Stream[A]) extends Stream[A]
object Stream {
def cons[A](h: => A, t: => Stream[A]): Stream[A] = {
lazy val head = h
lazy val tail = t
Cons(() => head, () => tail)
}
def empty[A]: Stream[A] = Empty
def apply[A](as: A*): Stream[A] = if (as.isEmpty) empty else cons(as.head, apply(as.tail: _*))
// 5.8
def constant[A](c: A): Stream[A] = {
lazy val co: Stream[A] = Stream.cons(c, co)
co
}
// 5.9
def from(n: Int): Stream[Int] = Stream.cons(n, from(n + 1))
// 5.10
def fibs: Stream[Int] = {
def _fibs(n1: Int, n2: Int): Stream[Int] = {
Stream.cons(n1, _fibs(n2, n1 + n2))
}
_fibs(0, 1)
}
// 5.11
def unfold[A, S](z: S)(f: S => Option[(A, S)]): Stream[A] = {
val hOption = f(z)
hOption match {
case None => Stream.empty[A]
case Some(t) => cons(t._1, unfold(t._2)(f))
}
}
// 5.12
def ones: Stream[Int] = unfold(1)((a) => Some(a, a))
def fibsUnfold: Stream[Int] = unfold[Int, (Int, Int)]((0, 1))((ns) => Some(ns._1, (ns._2, ns._1 + ns._2)))
def fromUnfold(n: Int): Stream[Int] = unfold[Int, Int](n)((n) => Some(n, n + 1))
def constantUnfold[A](n: A): Stream[A] = unfold[A, A](n)((n) => Some(n, n))
}
}
| WillerZ/functional-programming-in-scala | src/main/scala/Chapter5.scala | Scala | unlicense | 8,644 |
package com.twitter.finagle.offload
import com.twitter.app.GlobalFlag
object queueSize
extends GlobalFlag[Int](
Int.MaxValue,
"Experimental flag. When offload filter is enabled, its queue is bounded by this value (default is" +
"Int.MaxValue or unbounded). Any excess work that can't be offloaded due to the queue overflow is run" +
"on IO (Netty) threads instead. Thus, when set, this flag enforces the backpressure on the link between" +
"Netty (producer) and your application (consumer)."
)
| twitter/finagle | finagle-core/src/main/scala/com/twitter/finagle/offload/queueSize.scala | Scala | apache-2.0 | 540 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.columnar
import org.apache.commons.lang3.StringUtils
import org.apache.spark.network.util.JavaUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.analysis.MultiInstanceRelation
import org.apache.spark.sql.catalyst.expressions._
import org.apache.spark.sql.catalyst.plans.QueryPlan
import org.apache.spark.sql.catalyst.plans.logical
import org.apache.spark.sql.catalyst.plans.logical.{HintInfo, Statistics}
import org.apache.spark.sql.execution.SparkPlan
import org.apache.spark.storage.StorageLevel
import org.apache.spark.util.LongAccumulator
object InMemoryRelation {
def apply(
useCompression: Boolean,
batchSize: Int,
storageLevel: StorageLevel,
child: SparkPlan,
tableName: Option[String],
statsOfPlanToCache: Statistics): InMemoryRelation =
new InMemoryRelation(child.output, useCompression, batchSize, storageLevel, child, tableName)(
statsOfPlanToCache = statsOfPlanToCache)
}
/**
* CachedBatch is a cached batch of rows.
*
* @param numRows The total number of rows in this batch
* @param buffers The buffers for serialized columns
* @param stats The stat of columns
*/
private[columnar]
case class CachedBatch(numRows: Int, buffers: Array[Array[Byte]], stats: InternalRow)
case class InMemoryRelation(
output: Seq[Attribute],
useCompression: Boolean,
batchSize: Int,
storageLevel: StorageLevel,
@transient child: SparkPlan,
tableName: Option[String])(
@transient var _cachedColumnBuffers: RDD[CachedBatch] = null,
val sizeInBytesStats: LongAccumulator = child.sqlContext.sparkContext.longAccumulator,
statsOfPlanToCache: Statistics)
extends logical.LeafNode with MultiInstanceRelation {
override protected def innerChildren: Seq[SparkPlan] = Seq(child)
override def doCanonicalize(): logical.LogicalPlan =
copy(output = output.map(QueryPlan.normalizeExprId(_, child.output)),
storageLevel = StorageLevel.NONE,
child = child.canonicalized,
tableName = None)(
_cachedColumnBuffers,
sizeInBytesStats,
statsOfPlanToCache)
override def producedAttributes: AttributeSet = outputSet
@transient val partitionStatistics = new PartitionStatistics(output)
override def computeStats(): Statistics = {
if (sizeInBytesStats.value == 0L) {
// Underlying columnar RDD hasn't been materialized, use the stats from the plan to cache.
// Note that we should drop the hint info here. We may cache a plan whose root node is a hint
// node. When we lookup the cache with a semantically same plan without hint info, the plan
// returned by cache lookup should not have hint info. If we lookup the cache with a
// semantically same plan with a different hint info, `CacheManager.useCachedData` will take
// care of it and retain the hint info in the lookup input plan.
statsOfPlanToCache.copy(hints = HintInfo())
} else {
Statistics(sizeInBytes = sizeInBytesStats.value.longValue)
}
}
// If the cached column buffers were not passed in, we calculate them in the constructor.
// As in Spark, the actual work of caching is lazy.
if (_cachedColumnBuffers == null) {
buildBuffers()
}
private def buildBuffers(): Unit = {
val output = child.output
val cached = child.execute().mapPartitionsInternal { rowIterator =>
new Iterator[CachedBatch] {
def next(): CachedBatch = {
val columnBuilders = output.map { attribute =>
ColumnBuilder(attribute.dataType, batchSize, attribute.name, useCompression)
}.toArray
var rowCount = 0
var totalSize = 0L
while (rowIterator.hasNext && rowCount < batchSize
&& totalSize < ColumnBuilder.MAX_BATCH_SIZE_IN_BYTE) {
val row = rowIterator.next()
// Added for SPARK-6082. This assertion can be useful for scenarios when something
// like Hive TRANSFORM is used. The external data generation script used in TRANSFORM
// may result malformed rows, causing ArrayIndexOutOfBoundsException, which is somewhat
// hard to decipher.
assert(
row.numFields == columnBuilders.length,
s"Row column number mismatch, expected ${output.size} columns, " +
s"but got ${row.numFields}." +
s"\\nRow content: $row")
var i = 0
totalSize = 0
while (i < row.numFields) {
columnBuilders(i).appendFrom(row, i)
totalSize += columnBuilders(i).columnStats.sizeInBytes
i += 1
}
rowCount += 1
}
sizeInBytesStats.add(totalSize)
val stats = InternalRow.fromSeq(
columnBuilders.flatMap(_.columnStats.collectedStatistics))
CachedBatch(rowCount, columnBuilders.map { builder =>
JavaUtils.bufferToArray(builder.build())
}, stats)
}
def hasNext: Boolean = rowIterator.hasNext
}
}.persist(storageLevel)
cached.setName(
tableName.map(n => s"In-memory table $n")
.getOrElse(StringUtils.abbreviate(child.toString, 1024)))
_cachedColumnBuffers = cached
}
def withOutput(newOutput: Seq[Attribute]): InMemoryRelation = {
InMemoryRelation(
newOutput, useCompression, batchSize, storageLevel, child, tableName)(
_cachedColumnBuffers, sizeInBytesStats, statsOfPlanToCache)
}
override def newInstance(): this.type = {
new InMemoryRelation(
output.map(_.newInstance()),
useCompression,
batchSize,
storageLevel,
child,
tableName)(
_cachedColumnBuffers,
sizeInBytesStats,
statsOfPlanToCache).asInstanceOf[this.type]
}
def cachedColumnBuffers: RDD[CachedBatch] = _cachedColumnBuffers
override protected def otherCopyArgs: Seq[AnyRef] =
Seq(_cachedColumnBuffers, sizeInBytesStats, statsOfPlanToCache)
}
| brad-kaiser/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/columnar/InMemoryRelation.scala | Scala | apache-2.0 | 6,867 |
package euler
package til70
import euler._
object Euler61 extends EulerProblem {
override def result = {
val polys = (1000 to 9999) filter isPoly
val cyclics = for {
p1 <- polys
p2 <- polys if p1 % 100 == p2 / 100
p3 <- polys if p2 % 100 == p3 / 100
p4 <- polys if p3 % 100 == p4 / 100
p5 <- polys if p4 % 100 == p5 / 100
p6 <- polys if p5 % 100 == p6 / 100 && p6 % 100 == p1 / 100
} yield Seq(p1, p2, p3, p4, p5, p6)
val res = cyclics find { xs =>
// each number could be of more that one polygonal kind
val ss = xs map sides
val sidesCombinations = for {
s1 <- ss(0)
s2 <- ss(1)
s3 <- ss(2)
s4 <- ss(3)
s5 <- ss(4)
s6 <- ss(5)
} yield Seq(s1, s2, s3, s4, s5, s6)
sidesCombinations.exists(_.toSet.size == 6)
}
res.get.sum
}
def isPoly(n: Int) = {
(3 to 8).find(s => isNgonal(s, n)).isDefined
}
def sides(n: Int) = {
(3 to 8) collect { case s: Int if isNgonal(s, n) => s }
}
}
| TrustNoOne/Euler | scala/src/main/scala/euler/til70/Euler61.scala | Scala | mit | 1,047 |
object overloaded {
def f(x: String): String = x
def f[T >: Null](x: T): Int = 1
val x1 = f("abc")
val x2 = f(new Integer(1))
val x3 = f(null)
val x4: String => String = f
val x5: String => Any = f
val x6: Any = f _
def g(): Int = 1
def g(x: Int): Int = 2
val y1: Int => Int = g
val y2: Any = g _
println(g)
val xs = List("a", "b")
xs.mkString
}
| DarkDimius/dotty | tests/pos/overloaded.scala | Scala | bsd-3-clause | 396 |
/*
* Copyright 2013 Maurício Linhares
*
* Maurício Linhares licenses this file to you under the Apache License,
* version 2.0 (the "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at:
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*/
package com.github.mauricio.async.db.mysql.binary
import _root_.io.netty.buffer.ByteBuf
import com.github.mauricio.async.db.exceptions.BufferNotFullyConsumedException
import com.github.mauricio.async.db.mysql.message.server.ColumnDefinitionMessage
import com.github.mauricio.async.db.util._
import scala.collection.mutable.ArrayBuffer
import scala.collection.Seq
object BinaryRowDecoder {
final val log = Log.get[BinaryRowDecoder]
final val BitMapOffset = 9
}
class BinaryRowDecoder {
//import BinaryRowDecoder._
def decode(
buffer: ByteBuf,
columns: Seq[ColumnDefinitionMessage]
): Array[Any] = {
//log.debug("columns are {} - {}", buffer.readableBytes(), columns)
//log.debug( "decoding row\\n{}", MySQLHelper.dumpAsHex(buffer))
//PrintUtils.printArray("bitmap", buffer)
val nullCount = (columns.size + 9) / 8
val nullBitMask = new Array[Byte](nullCount)
buffer.readBytes(nullBitMask)
var nullMaskPos = 0
var bit = 4
val row = new ArrayBuffer[Any](columns.size)
var index = 0
while (index < columns.size) {
if ((nullBitMask(nullMaskPos) & bit) != 0) {
row += null
} else {
val column = columns(index)
//log.debug(s"${decoder.getClass.getSimpleName} - ${buffer.readableBytes()}")
//log.debug("Column value [{}] - {}", value, column.name)
row += column.binaryDecoder.decode(buffer)
}
bit <<= 1
if ((bit & 255) == 0) {
bit = 1
nullMaskPos += 1
}
index += 1
}
//log.debug("values are {}", row)
if (buffer.readableBytes() != 0) {
throw new BufferNotFullyConsumedException(buffer)
}
row.toArray
}
}
| dripower/postgresql-async | mysql-async/src/main/scala/com/github/mauricio/async/db/mysql/binary/BinaryRowDecoder.scala | Scala | apache-2.0 | 2,360 |
// Copyright: 2010 - 2016 https://github.com/ensime/ensime-server/graphs
// Licence: http://www.gnu.org/licenses/gpl-3.0.en.html
package org.ensime.fixture
import akka.actor.ActorSystem
import org.ensime.api._
import org.ensime.indexer.{ EnsimeVFS, SearchService }
import scala.concurrent._
import scala.concurrent.duration._
trait IsolatedSearchServiceFixture extends IsolatedSourceResolverFixture {
def withSearchService(testCode: (EnsimeConfig, SearchService) => Any)(implicit actorSystem: ActorSystem, vfs: EnsimeVFS): Any = withSourceResolver { (config, resolver) =>
val searchService = new SearchService(config, resolver)
try {
testCode(config, searchService)
} finally {
Await.ready(searchService.shutdown(), Duration.Inf)
actorSystem.shutdown()
actorSystem.awaitTermination(10.seconds)
}
}
}
trait SharedSearchServiceFixture
extends SharedEnsimeVFSFixture
with SharedSourceResolverFixture {
this: SharedTestKitFixture =>
private[fixture] var _search: SearchService = _
override def beforeAll(): Unit = {
super.beforeAll()
implicit val system = _testkit.system
_search = new SearchService(_config, _resolver)
}
override def afterAll(): Unit = {
Await.ready(_search.shutdown(), Duration.Inf)
super.afterAll()
}
def withSearchService(
testCode: (EnsimeConfig, SearchService) => Any
): Unit = testCode(_config, _search)
def withSearchService(testCode: SearchService => Any): Unit = testCode(_search)
}
| j-mckitrick/ensime-sbt | src/sbt-test/ensime-sbt/ensime-server/core/src/it/scala/org/ensime/fixture/SearchServiceFixture.scala | Scala | apache-2.0 | 1,509 |
package filodb.downsampler.chunk
import java.time.Instant
import java.time.format.DateTimeFormatter
import kamon.Kamon
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import filodb.coordinator.KamonShutdownHook
import filodb.downsampler.DownsamplerContext
/**
*
* Goal: Downsample all real-time data.
* Goal: Align chunks when this job is run in multiple DCs so that cross-dc repairs can be done.
* Non-Goal: Downsampling of non-real time data or data with different epoch.
*
* Strategy is to run this spark job every 6 hours at 8am, 2pm, 8pm, 2am UTC each day.
*
* Run at 8am: We query data with ingestionTime from 10pm to 8am.
* Then query and downsample data with userTime between 12am to 6am.
* Downsampled chunk would have an ingestionTime of 12am.
* Run at 2pm: We query data with ingestionTime from 4am to 2pm.
* Then query and downsample data with userTime between 6am to 12pm.
* Downsampled chunk would have an ingestionTime of 6am.
* Run at 8pm: We query data with ingestionTime from 10am to 8pm.
* Then query and downsample data with userTime between 12pm to 6pm.
* Downsampled chunk would have an ingestionTime of 12pm.
* Run at 2am: We query data with ingestionTime from 4pm to 2am.
* Then query and downsample data with userTime between 6pm to 12am.
* Downsampled chunk would have an ingestionTime of 6pm.
*
* This will cover all data with userTime 12am to 12am.
* Since we query for a broader ingestionTime, it will include data arriving early/late by 2 hours.
*
* Important Note: The reason non-real-time data is not included in goals is because we
* want chunk alignment between DCs in downsampled data to enable cross-dc repair without chunk surgery.
* Without chunk-alignment in raw data and consistency in behavior across DCs, it would be difficult
* to achieve chunk alignment in downsampled data. Once we solve that (deferred problem), we will
* lift the constraint.
*/
object DownsamplerMain extends App {
Kamon.init() // kamon init should be first thing in driver jvm
val settings = new DownsamplerSettings()
val batchDownsampler = new BatchDownsampler(settings)
val d = new Downsampler(settings, batchDownsampler)
val sparkConf = new SparkConf(loadDefaults = true)
d.run(sparkConf)
}
class Downsampler(settings: DownsamplerSettings, batchDownsampler: BatchDownsampler) extends Serializable {
// Gotcha!! Need separate function (Cannot be within body of a class)
// to create a closure for spark to serialize and move to executors.
// Otherwise, config values below were not being sent over.
// See https://medium.com/onzo-tech/serialization-challenges-with-spark-and-scala-a2287cd51c54
// scalastyle:off method.length
def run(sparkConf: SparkConf): SparkSession = {
val spark = SparkSession.builder()
.appName("FiloDBDownsampler")
.config(sparkConf)
.getOrCreate()
DownsamplerContext.dsLogger.info(s"Spark Job Properties: ${spark.sparkContext.getConf.toDebugString}")
// Use the spark property spark.filodb.downsampler.user-time-override to override the
// userTime period for which downsampling should occur.
// Generally disabled, defaults the period that just ended prior to now.
// Specified during reruns for downsampling old data
val userTimeInPeriod: Long = spark.sparkContext.getConf
.getOption("spark.filodb.downsampler.userTimeOverride") match {
// by default assume a time in the previous downsample period
case None => System.currentTimeMillis() - settings.downsampleChunkDuration
// examples: 2019-10-20T12:34:56Z or 2019-10-20T12:34:56-08:00
case Some(str) => Instant.from(DateTimeFormatter.ISO_OFFSET_DATE_TIME.parse(str)).toEpochMilli()
}
val userTimeStart: Long = (userTimeInPeriod / settings.downsampleChunkDuration) * settings.downsampleChunkDuration
val userTimeEndExclusive: Long = userTimeStart + settings.downsampleChunkDuration
val ingestionTimeStart: Long = userTimeStart - settings.widenIngestionTimeRangeBy.toMillis
val ingestionTimeEnd: Long = userTimeEndExclusive + settings.widenIngestionTimeRangeBy.toMillis
val downsamplePeriodStr = java.time.Instant.ofEpochMilli(userTimeStart).toString
DownsamplerContext.dsLogger.info(s"This is the Downsampling driver. Starting downsampling job " +
s"rawDataset=${settings.rawDatasetName} for " +
s"userTimeInPeriod=${java.time.Instant.ofEpochMilli(userTimeInPeriod)} " +
s"ingestionTimeStart=${java.time.Instant.ofEpochMilli(ingestionTimeStart)} " +
s"ingestionTimeEnd=${java.time.Instant.ofEpochMilli(ingestionTimeEnd)} " +
s"userTimeStart=$downsamplePeriodStr " +
s"userTimeEndExclusive=${java.time.Instant.ofEpochMilli(userTimeEndExclusive)}")
DownsamplerContext.dsLogger.info(s"To rerun this job add the following spark config: " +
s""""spark.filodb.downsampler.userTimeOverride": "${java.time.Instant.ofEpochMilli(userTimeInPeriod)}"""")
val splits = batchDownsampler.rawCassandraColStore.getScanSplits(batchDownsampler.rawDatasetRef)
DownsamplerContext.dsLogger.info(s"Cassandra split size: ${splits.size}. We will have this many spark " +
s"partitions. Tune num-token-range-splits-for-scans if parallelism is low or latency is high")
KamonShutdownHook.registerShutdownHook()
spark.sparkContext
.makeRDD(splits)
.mapPartitions { splitIter =>
Kamon.init()
KamonShutdownHook.registerShutdownHook()
val rawDataSource = batchDownsampler.rawCassandraColStore
val batchIter = rawDataSource.getChunksByIngestionTimeRangeNoAsync(
datasetRef = batchDownsampler.rawDatasetRef,
splits = splitIter, ingestionTimeStart = ingestionTimeStart,
ingestionTimeEnd = ingestionTimeEnd,
userTimeStart = userTimeStart, endTimeExclusive = userTimeEndExclusive,
maxChunkTime = settings.rawDatasetIngestionConfig.storeConfig.maxChunkTime.toMillis,
batchSize = settings.batchSize,
cassFetchSize = settings.cassFetchSize)
batchIter
}
.foreach { rawPartsBatch =>
Kamon.init()
KamonShutdownHook.registerShutdownHook()
batchDownsampler.downsampleBatch(rawPartsBatch, userTimeStart, userTimeEndExclusive)
}
DownsamplerContext.dsLogger.info(s"Chunk Downsampling Driver completed successfully for downsample period " +
s"$downsamplePeriodStr")
val jobCompleted = Kamon.counter("chunk-migration-completed")
.withTag("downsamplePeriod", downsamplePeriodStr)
jobCompleted.increment()
val downsampleHourStartGauge = Kamon.gauge("chunk-downsampler-period-start-hour")
.withTag("downsamplePeriod", downsamplePeriodStr)
downsampleHourStartGauge.update(userTimeStart / 1000 / 60 / 60)
Thread.sleep(62000) // quick & dirty hack to ensure that the completed metric gets published
spark
}
}
| filodb/FiloDB | spark-jobs/src/main/scala/filodb/downsampler/chunk/DownsamplerMain.scala | Scala | apache-2.0 | 7,068 |
// -*- mode: Scala;-*-
// Filename: State.scala
// Authors: lgm
// Creation: Wed Mar 12 14:09:06 2014
// Copyright: Not supplied
// Description:
// ------------------------------------------------------------------------
package com.biosimilarity.lift.lib.monad
object StateMonad {
import MonadicEvidence._
case class State[S, +V]( f : ( S ) => ( S, V ) ) {
def apply( s : S ) : ( S, V ) = f( s )
}
implicit def stateFunctor[S]() : Functor[({type L[A] = State[S,A]})#L] =
new Functor[({type L[A] = State[S,A]})#L] {
def fmap[V, P >: V, U]( f : P => U ) : State[S,P] => State[S,U] = {
( s : State[S,P] ) => {
new State[S,U](
( ns : S ) => {
val ( state, value ) = s( ns )
( state, f( value ) )
}
)
}
}
}
implicit def stateMonad[S]() = new Monad[({type L[A] = State[S,A]})#L]{
def apply[V]( data : V ) = new State(( s : S ) => ( s, data ) )
def flatten[V]( m : State[S,State[S, V]] ) : State[S,V] =
new State[S, V](
( s : S ) => {
val ( sp, mp ) = m( s )
mp( sp )
}
)
}
}
| leithaus/strategies | src/main/scala/com/biosimilarity/lib/monad/State.scala | Scala | cc0-1.0 | 1,155 |
package hbase
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hbase.HBaseConfiguration
import org.apache.hadoop.hbase.HConstants._
import Config._
import scala.concurrent.duration._
case class Config private[hbase](props: Map[String, String] = Map.empty) {
def put[A](key: Key[A], value: A)(implicit conv: ValueConverter[A]) = copy(props.updated(key.name, conv.to(value)))
def get[A](key: Key[A])(implicit conv: ValueConverter[A]): Option[A] = props.get(key.name).map(conv.from(_))
def toConfiguration(): Configuration = {
val conf = HBaseConfiguration.create()
props.foreach{case (k, v) => conf.set(k, v)}
conf
}
}
object Config {
def create() = apply()
sealed abstract class Key[A](val name: String)
object ZookeeperQuorum extends Key[List[String]](ZOOKEEPER_QUORUM)
object ZookeeperClientPort extends Key[Int](ZOOKEEPER_CLIENT_PORT)
object ZookeeperTimeout extends Key[FiniteDuration](ZK_SESSION_TIMEOUT)
object ZookeeperMaxConnection extends Key[Int](ZOOKEEPER_MAX_CLIENT_CNXNS)
object ClientInstanceId extends Key[Int](HBASE_CLIENT_INSTANCE_ID)
object ClientIpcPoolSize extends Key[Int](HBASE_CLIENT_IPC_POOL_SIZE)
object ClientIpcPoolType extends Key[String](HBASE_CLIENT_IPC_POOL_TYPE)
/*
TODO: 0.96 and higher.. Use string constants instead?
object ClientMaxTasksPerRegion extends Key[Int](HBASE_CLIENT_MAX_PERREGION_TASKS)
object ClientMaxTasksPerServer extends Key[Int](HBASE_CLIENT_MAX_PERSERVER_TASKS)
object ClientMaxTasks extends Key[Int](HBASE_CLIENT_MAX_TOTAL_TASKS)
object ClientMetaOperationTimeout extends Key[FiniteDuration](HBASE_CLIENT_META_OPERATION_TIMEOUT)
object ClientScannerCaching extends Key[Int](HBASE_CLIENT_SCANNER_CACHING)
object ClientScannerTimeout extends Key[FiniteDuration](HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD)
*/
object ClientOperationTimeout extends Key[FiniteDuration](HBASE_CLIENT_OPERATION_TIMEOUT)
object ClientPause extends Key[Int](HBASE_CLIENT_PAUSE)
object ClientPrefetchLimit extends Key[Int](HBASE_CLIENT_PREFETCH_LIMIT)
object ClientRetriesNumber extends Key[Int](HBASE_CLIENT_RETRIES_NUMBER)
object ClientScannerMaxResultSize extends Key[Int](HBASE_CLIENT_SCANNER_MAX_RESULT_SIZE_KEY)
trait ValueConverter[A] {
def to(input: A): String
def from(string: String): A
}
object ValueConverter {
implicit object StringValueConverter extends ValueConverter[String] {
def to(input: String) = input
def from(string: String) = string
}
implicit object IntValueConverter extends ValueConverter[Int] {
def to(input: Int) = input.toString
def from(string: String) = string.toInt
}
implicit object FiniteDurationValueConverter extends ValueConverter[FiniteDuration] {
def to(input: FiniteDuration) = input.toMillis.toString
def from(string: String) = new FiniteDuration(string.toLong, java.util.concurrent.TimeUnit.MILLISECONDS)
}
class ListValueConverter[A](conv: ValueConverter[A]) extends ValueConverter[List[A]] {
def to(input: List[A]): String = input.map(conv.to(_)).mkString("",",","")
def from(string: String) = string.split(",").map(conv.from(_)).toList
}
implicit object StringListValueConverter extends ListValueConverter[String](StringValueConverter)
}
}
| hamnis/hbase-scala | src/main/scala/hbase/Config.scala | Scala | apache-2.0 | 3,313 |
package gitbucket.core.model
trait SshKeyComponent { self: Profile =>
import profile.api._
lazy val SshKeys = TableQuery[SshKeys]
class SshKeys(tag: Tag) extends Table[SshKey](tag, "SSH_KEY") {
val userName = column[String]("USER_NAME")
val sshKeyId = column[Int]("SSH_KEY_ID", O AutoInc)
val title = column[String]("TITLE")
val publicKey = column[String]("PUBLIC_KEY")
def * = (userName, sshKeyId, title, publicKey) <> (SshKey.tupled, SshKey.unapply)
def byPrimaryKey(userName: String, sshKeyId: Int) = (this.userName === userName.bind) && (this.sshKeyId === sshKeyId.bind)
}
}
case class SshKey(
userName: String,
sshKeyId: Int = 0,
title: String,
publicKey: String
)
| gencer/gitbucket | src/main/scala/gitbucket/core/model/SshKey.scala | Scala | apache-2.0 | 715 |
/*
* Copyright (c) 2017. Yuriy Stul
*/
package com.stulsoft.ysps.ptuple
/**
* @author Yuriy Stul
*/
private object CompareTuple extends App {
private def t1(): Unit = {
println("==>t1")
var t1 = (1.0, 2.0)
var t2 = (1.0, 2.0)
println(s"t1 == t2 is ${t1 == t2}")
println(s"t1.equals(t2) is ${t1.equals(t2)}")
t1 = (1.0, 2.0)
t2 = (1.001, 2.0)
println(s"t1 == t2 is ${t1 == t2}")
println(s"t1.equals(t2) is ${t1.equals(t2)}")
println("<==t1")
}
println("==>main")
t1()
println("<==main")
}
| ysden123/ysps | src/main/scala/com/stulsoft/ysps/ptuple/CompareTuple.scala | Scala | mit | 548 |
package cookbook.finagle
import com.twitter.finagle.Http
import com.twitter.finagle.http.Request
import com.twitter.util.Await.result
import io.fintrospect.filters.RequestFilters.AddUserAgent
object SSL_Client_Example extends App {
val client = Http.client.withTls("api.github.com").newService("api.github.com:443")
val request = Request("/users/daviddenton/repos")
println(result(AddUserAgent("Fintrospect client").andThen(client)(request)).contentString)
} | daviddenton/fintrospect | src/test/scala/cookbook/finagle/SSL_Client_Example.scala | Scala | apache-2.0 | 469 |
package eventstore
package akka
class StreamMetadataITest extends TestConnection {
"StreamMetadata" should {
"return empty content for empty stream" in new TestScope {
val content = connection.getStreamMetadata(streamId).await_
content mustEqual Content.Empty
}
"return empty content for non empty stream" in new TestScope {
appendEventToCreateStream()
val content = connection.getStreamMetadata(streamId).await_
content mustEqual Content.Empty
}
"return empty metadata when stream deleted" in new TestScope {
appendEventToCreateStream()
deleteStream()
val content = connection.getStreamMetadata(streamId).await_
content mustEqual Content.Empty
}
"set stream metadata" in new TestScope {
appendEventToCreateStream()
val result = connection.setStreamMetadata(streamId, metadata).await_
result map { _.nextExpectedVersion } must beSome(ExpectedVersion.Exact(0))
}
"return stream metadata" in new TestScope {
appendEventToCreateStream()
connection.setStreamMetadata(streamId, metadata).await_
val content = connection.getStreamMetadata(streamId).await_
content shouldEqual metadata
}
}
private trait TestScope extends TestConnectionScope {
val connection = new EsConnection(actor, system)
val metadata = Content.Json(""" { "test": "test" } """)
}
}
| EventStore/EventStore.JVM | client/src/test/scala/eventstore/akka/StreamMetadataITest.scala | Scala | bsd-3-clause | 1,406 |
package ru.pavkin.todoist.api.utils
import org.scalatest.prop.Checkers
import org.scalatest.{FunSuite, FlatSpec, Matchers}
import shapeless.{::, HNil}
import shapeless.test.illTyped
class NotContainsSpec extends FunSuite with Checkers {
test("NotContains") {
NotContainsConstraint[HNil, Int]
NotContainsConstraint[String :: Int :: HNil, Boolean]
illTyped("""NotContains[Int::HNil, Int]""")
illTyped("""NotContains[Boolean :: Int :: HNil, Int]""")
illTyped("""NotContains[String :: Boolean :: Int :: HNil, String]""")
}
}
| vpavkin/todoist-api-scala | tests/src/test/scala/ru/pavkin/todoist/api/utils/NotContainsSpec.scala | Scala | mit | 550 |
package org.codeswarm.polymap
import org.scalatest._
class Examples extends FreeSpec {
"Simple person example" in {
case class Person(id: Int, name: String)
val people = new PolyMap[Person] {
val byId = index(_.id)
val byName = index(_.name)
}
people += (Person(1, "Alice"), Person(2, "Bob"), Person(3, "Alice"))
info(people.byName("Alice").map(_.id).toSeq.sorted.mkString(", "))
//
// Result:
//
// 1, 3
//
}
"Extended person example" in {
// Person class with an identifier, name, and age.
case class Person(id: Int, name: String, age: Int) {
override def toString: String = "%s (age %d)".format(name, age)
}
// Order people by id (just to make the output more readable).
implicit object PersonOrdering extends Ordering[Person] {
def compare(x: Person, y: Person): Int = x.id compare y.id
}
// This PolyMap is a collection of people.
val people = new PolyMap[Person] {
val byId = index(_.id) // We might want to look up people by id number,
val byName = index(_.name) // or we also might want to look up by name.
// Print the collection as a list of id and name.
override def toString(): String =
toSeq.sorted.map(p => "%d-%s".format(p.id, p.name)).mkString(", ")
}
// Add some people to the collection.
people += (
Person(1, "Alice", 24),
Person(2, "Bob", 47),
Person(3, "Alice", 32),
Person(4, "Eve", 12)
)
// Print the inital state of the collection.
info("All people: " + people.toString())
//
// Result:
//
// All people: 1-Alice, 2-Bob, 3-Alice, 4-Eve
//
// Find people named Alice.
val alices = people.byName("Alice")
info("People named Alice: " + alices.toSeq.sorted.map(_.id).mkString(", "))
//
// Result:
//
// People named Alice: 1, 3
//
// Find the person with id 4.
people.byId(4) foreach { person4 => info("Person 4: " + person4) }
//
// Result:
//
// Person 4: Eve (age 12)
//
// Remove people named Alice from the collection.
people.byName.remove("Alice")
// Print the new state of the collection with Alices removed.
info("All people: " + people.toString())
//
// Result:
//
// All people: 2-Bob, 4-Eve
//
}
} | chris-martin/polymap | src/test/scala/Examples.scala | Scala | apache-2.0 | 2,376 |
package BIDMach.mixins
import BIDMat.{Mat,SBMat,CMat,DMat,FMat,IMat,HMat,GMat,GIMat,GSMat,SMat,SDMat}
import BIDMat.MatFunctions._
import BIDMat.SciFunctions._
import BIDMach.models._
@SerialVersionUID(100L)
abstract class Mixin(val opts:Mixin.Opts = new Mixin.Options) extends Serializable {
val options = opts
var modelmats:Array[Mat] = null
var updatemats:Array[Mat] = null
def compute(mats:Array[Mat], step:Float)
def score(mats:Array[Mat], step:Float):FMat
def init(model:Model) = {
modelmats = model.modelmats
updatemats = model.updatemats
}
}
object Mixin {
trait Opts extends BIDMat.Opts {}
class Options extends Opts {}
}
| jamesjia94/BIDMach | src/main/scala/BIDMach/mixins/Mixin.scala | Scala | bsd-3-clause | 696 |
package org.jetbrains.sbt
package annotator
import com.intellij.lang.annotation.{AnnotationHolder, Annotator}
import com.intellij.psi.PsiElement
import org.jetbrains.plugins.scala.lang.psi.ScalaPsiUtil
import org.jetbrains.plugins.scala.lang.psi.api.base.ScLiteral
import org.jetbrains.plugins.scala.lang.psi.api.expr.{ScInfixExpr, ScReferenceExpression}
import org.jetbrains.plugins.scala.lang.psi.impl.base.ScLiteralImpl
import org.jetbrains.plugins.scala.project.ProjectPsiElementExt
import org.jetbrains.plugins.scala.util.NotificationUtil
import org.jetbrains.sbt.annotator.quickfix.{SbtRefreshProjectQuickFix, SbtUpdateResolverIndexesQuickFix}
import org.jetbrains.sbt.resolvers.{ResolverException, SbtResolverIndexesManager, SbtResolverUtils}
/**
* @author Nikolay Obedin
* @since 8/4/14.
*/
class SbtDependencyAnnotator extends Annotator {
private case class ArtifactInfo(group: String, artifact: String, version: String)
override def annotate(element: PsiElement, holder: AnnotationHolder): Unit =
try {
doAnnotate(element, holder)
} catch {
case exc: ResolverException =>
// TODO: find another way to notify user instead of spamming with notifications
// NotificationUtil.showMessage(null, exc.getMessage)
}
private def doAnnotate(element: PsiElement, holder: AnnotationHolder): Unit = {
if (ScalaPsiUtil.fileContext(element).getFileType.getName != Sbt.Name) return
def findDependencyOrAnnotate(info: ArtifactInfo): Unit = {
val resolversToUse = SbtResolverUtils.getProjectResolvers(Option(ScalaPsiUtil.fileContext(element)))
val indexManager = SbtResolverIndexesManager()
val indexes = resolversToUse.flatMap(indexManager.find).toSet
if (indexes.isEmpty) return
val isInRepo = {
if (isDynamicVersion(info.version))
indexes.exists(_.versions(info.group, info.artifact).nonEmpty)
else
indexes.exists(_.versions(info.group, info.artifact).contains(info.version))
}
if (!isInRepo) {
val annotation = holder.createErrorAnnotation(element, SbtBundle("sbt.annotation.unresolvedDependency"))
annotation.registerFix(new SbtUpdateResolverIndexesQuickFix)
annotation.registerFix(new SbtRefreshProjectQuickFix)
}
}
for {
literal@ScLiteral(_) <- Option(element)
parentExpr@ScInfixExpr(leftPart, operation, _) <- Option(literal.getParent)
if isOneOrTwoPercents(operation)
} yield leftPart match {
case _: ScLiteral =>
extractArtifactInfo(parentExpr.getParent).foreach(findDependencyOrAnnotate)
case leftExp: ScInfixExpr if isOneOrTwoPercents(leftExp.operation) =>
extractArtifactInfo(parentExpr).foreach(findDependencyOrAnnotate)
case _ => // do nothing
}
}
private def isOneOrTwoPercents(op: ScReferenceExpression) =
op.getText == "%" || op.getText == "%%"
private def extractArtifactInfo(from: PsiElement): Option[ArtifactInfo] = {
val scalaVersion = from.scalaLanguageLevel.map(_.version)
for {
ScInfixExpr(leftPart, _, maybeVersion) <- Option(from)
ScInfixExpr(maybeGroup, maybePercents, maybeArtifact) <- Option(leftPart)
ScLiteralImpl.string(version) <- Option(maybeVersion)
ScLiteralImpl.string(group) <- Option(maybeGroup)
ScLiteralImpl.string(artifact) <- Option(maybeArtifact)
shouldAppendScalaVersion = maybePercents.getText == "%%"
} yield {
if (shouldAppendScalaVersion && scalaVersion.isDefined)
ArtifactInfo(group, artifact + "_" + scalaVersion.get, version)
else
ArtifactInfo(group, artifact, version)
}
}
private def isDynamicVersion(version: String): Boolean =
version.startsWith("latest") || version.endsWith("+") || "[]()".exists(version.contains(_))
}
| double-y/translation-idea-plugin | src/org/jetbrains/sbt/annotator/SbtDependencyAnnotator.scala | Scala | apache-2.0 | 3,816 |
package de.unihamburg.vsis.sddf.preprocessing
import de.unihamburg.vsis.sddf.reading.Tuple
import de.unihamburg.vsis.sddf.visualisation.model.BasicAnalysable
class PipePreprocessorTrim(featureId: Int*) extends TraitPipePreprocessor with Serializable {
def clean(tuple: Tuple): Tuple = {
featureId.foreach(fId => {
// transform the given features
tuple.applyOnFeature(fId, _.trim())
})
tuple
}
}
object PipePreprocessorTrim {
def apply(featureId: Int*) = new PipePreprocessorTrim(featureId: _*)
} | numbnut/sddf | src/main/scala/de/unihamburg/vsis/sddf/preprocessing/PipePreprocessorTrim.scala | Scala | gpl-3.0 | 534 |
package org.scala_tools.time
import akka.util.{ FiniteDuration, Duration ⇒ AkkaDuration }
import java.util.concurrent.TimeUnit
import org.joda.time.Duration
trait AkkaImplicits {
implicit def forceAkkaDuration(builder: DurationBuilder): AkkaDuration =
durationForceAkkaDuration(builder.underlying.toStandardDuration)
implicit def durationForceAkkaDuration(builder: Duration): AkkaDuration = {
builder.getMillis match {
case Long.MaxValue ⇒ AkkaDuration.Inf
case Long.MinValue ⇒ AkkaDuration.MinusInf
case 0 ⇒ new FiniteDuration(0, TimeUnit.NANOSECONDS)
case v ⇒ AkkaDuration(v, TimeUnit.MILLISECONDS)
}
}
} | scalatra/oauth2-server | src/main/scala/org/scala_tools/time/AkkaImplicits.scala | Scala | mit | 687 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.datasources.jdbc
import java.sql.{Connection, Date, PreparedStatement, ResultSet, SQLException, Timestamp}
import scala.util.control.NonFatal
import org.apache.commons.lang3.StringUtils
import org.apache.spark.{InterruptibleIterator, Partition, SparkContext, TaskContext}
import org.apache.spark.internal.Logging
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.jdbc.{JdbcDialect, JdbcDialects}
import org.apache.spark.sql.sources._
import org.apache.spark.sql.types._
import org.apache.spark.util.CompletionIterator
/**
* Data corresponding to one partition of a JDBCRDD.
*/
case class JDBCPartition(whereClause: String, idx: Int) extends Partition {
override def index: Int = idx
}
object JDBCRDD extends Logging {
/**
* Takes a (schema, table) specification and returns the table's Catalyst
* schema.
*
* @param options - JDBC options that contains url, table and other information.
*
* @return A StructType giving the table's Catalyst schema.
* @throws SQLException if the table specification is garbage.
* @throws SQLException if the table contains an unsupported type.
*/
def resolveTable(options: JDBCOptions): StructType = {
val url = options.url
val table = options.table
val dialect = JdbcDialects.get(url)
val conn: Connection = JdbcUtils.createConnectionFactory(options)()
try {
val statement = conn.prepareStatement(dialect.getSchemaQuery(table))
try {
val rs = statement.executeQuery()
try {
JdbcUtils.getSchema(rs, dialect)
} finally {
rs.close()
}
} finally {
statement.close()
}
} finally {
conn.close()
}
}
/**
* Prune all but the specified columns from the specified Catalyst schema.
*
* @param schema - The Catalyst schema of the master table
* @param columns - The list of desired columns
*
* @return A Catalyst schema corresponding to columns in the given order.
*/
private def pruneSchema(schema: StructType, columns: Array[String]): StructType = {
val fieldMap = Map(schema.fields.map(x => x.metadata.getString("name") -> x): _*)
new StructType(columns.map(name => fieldMap(name)))
}
/**
* Converts value to SQL expression.
*/
private def compileValue(value: Any): Any = value match {
case stringValue: String => s"'${escapeSql(stringValue)}'"
case timestampValue: Timestamp => "'" + timestampValue + "'"
case dateValue: Date => "'" + dateValue + "'"
case arrayValue: Array[Any] => arrayValue.map(compileValue).mkString(", ")
case _ => value
}
private def escapeSql(value: String): String =
if (value == null) null else StringUtils.replace(value, "'", "''")
/**
* Turns a single Filter into a String representing a SQL expression.
* Returns None for an unhandled filter.
*/
def compileFilter(f: Filter, dialect: JdbcDialect): Option[String] = {
def quote(colName: String): String = dialect.quoteIdentifier(colName)
Option(f match {
case EqualTo(attr, value) => s"${quote(attr)} = ${compileValue(value)}"
case EqualNullSafe(attr, value) =>
val col = quote(attr)
s"(NOT ($col != ${compileValue(value)} OR $col IS NULL OR " +
s"${compileValue(value)} IS NULL) OR ($col IS NULL AND ${compileValue(value)} IS NULL))"
case LessThan(attr, value) => s"${quote(attr)} < ${compileValue(value)}"
case GreaterThan(attr, value) => s"${quote(attr)} > ${compileValue(value)}"
case LessThanOrEqual(attr, value) => s"${quote(attr)} <= ${compileValue(value)}"
case GreaterThanOrEqual(attr, value) => s"${quote(attr)} >= ${compileValue(value)}"
case IsNull(attr) => s"${quote(attr)} IS NULL"
case IsNotNull(attr) => s"${quote(attr)} IS NOT NULL"
case StringStartsWith(attr, value) => s"${quote(attr)} LIKE '${value}%'"
case StringEndsWith(attr, value) => s"${quote(attr)} LIKE '%${value}'"
case StringContains(attr, value) => s"${quote(attr)} LIKE '%${value}%'"
case In(attr, value) if value.isEmpty =>
s"CASE WHEN ${quote(attr)} IS NULL THEN NULL ELSE FALSE END"
case In(attr, value) => s"${quote(attr)} IN (${compileValue(value)})"
case Not(f) => compileFilter(f, dialect).map(p => s"(NOT ($p))").getOrElse(null)
case Or(f1, f2) =>
// We can't compile Or filter unless both sub-filters are compiled successfully.
// It applies too for the following And filter.
// If we can make sure compileFilter supports all filters, we can remove this check.
val or = Seq(f1, f2).flatMap(compileFilter(_, dialect))
if (or.size == 2) {
or.map(p => s"($p)").mkString(" OR ")
} else {
null
}
case And(f1, f2) =>
val and = Seq(f1, f2).flatMap(compileFilter(_, dialect))
if (and.size == 2) {
and.map(p => s"($p)").mkString(" AND ")
} else {
null
}
case _ => null
})
}
/**
* Build and return JDBCRDD from the given information.
*
* @param sc - Your SparkContext.
* @param schema - The Catalyst schema of the underlying database table.
* @param requiredColumns - The names of the columns to SELECT.
* @param filters - The filters to include in all WHERE clauses.
* @param parts - An array of JDBCPartitions specifying partition ids and
* per-partition WHERE clauses.
* @param options - JDBC options that contains url, table and other information.
*
* @return An RDD representing "SELECT requiredColumns FROM fqTable".
*/
def scanTable(
sc: SparkContext,
schema: StructType,
requiredColumns: Array[String],
filters: Array[Filter],
parts: Array[Partition],
options: JDBCOptions): RDD[InternalRow] = {
val url = options.url
val dialect = JdbcDialects.get(url)
val quotedColumns = requiredColumns.map(colName => dialect.quoteIdentifier(colName))
new JDBCRDD(
sc,
JdbcUtils.createConnectionFactory(options),
pruneSchema(schema, requiredColumns),
quotedColumns,
filters,
parts,
url,
options)
}
}
/**
* An RDD representing a table in a database accessed via JDBC. Both the
* driver code and the workers must be able to access the database; the driver
* needs to fetch the schema while the workers need to fetch the data.
*/
private[jdbc] class JDBCRDD(
sc: SparkContext,
getConnection: () => Connection,
schema: StructType,
columns: Array[String],
filters: Array[Filter],
partitions: Array[Partition],
url: String,
options: JDBCOptions)
extends RDD[InternalRow](sc, Nil) {
/**
* Retrieve the list of partitions corresponding to this RDD.
*/
override def getPartitions: Array[Partition] = partitions
/**
* `columns`, but as a String suitable for injection into a SQL query.
*/
private val columnList: String = {
val sb = new StringBuilder()
columns.foreach(x => sb.append(",").append(x))
if (sb.isEmpty) "1" else sb.substring(1)
}
/**
* `filters`, but as a WHERE clause suitable for injection into a SQL query.
*/
private val filterWhereClause: String =
filters
.flatMap(JDBCRDD.compileFilter(_, JdbcDialects.get(url)))
.map(p => s"($p)").mkString(" AND ")
/**
* A WHERE clause representing both `filters`, if any, and the current partition.
*/
private def getWhereClause(part: JDBCPartition): String = {
if (part.whereClause != null && filterWhereClause.length > 0) {
"WHERE " + s"($filterWhereClause)" + " AND " + s"(${part.whereClause})"
} else if (part.whereClause != null) {
"WHERE " + part.whereClause
} else if (filterWhereClause.length > 0) {
"WHERE " + filterWhereClause
} else {
""
}
}
/**
* Runs the SQL query against the JDBC driver.
*
*/
override def compute(thePart: Partition, context: TaskContext): Iterator[InternalRow] = {
var closed = false
var rs: ResultSet = null
var stmt: PreparedStatement = null
var conn: Connection = null
def close() {
if (closed) return
try {
if (null != rs) {
rs.close()
}
} catch {
case e: Exception => logWarning("Exception closing resultset", e)
}
try {
if (null != stmt) {
stmt.close()
}
} catch {
case e: Exception => logWarning("Exception closing statement", e)
}
try {
if (null != conn) {
if (!conn.isClosed && !conn.getAutoCommit) {
try {
conn.commit()
} catch {
case NonFatal(e) => logWarning("Exception committing transaction", e)
}
}
conn.close()
}
logInfo("closed connection")
} catch {
case e: Exception => logWarning("Exception closing connection", e)
}
closed = true
}
context.addTaskCompletionListener{ context => close() }
val inputMetrics = context.taskMetrics().inputMetrics
val part = thePart.asInstanceOf[JDBCPartition]
conn = getConnection()
val dialect = JdbcDialects.get(url)
import scala.collection.JavaConverters._
dialect.beforeFetch(conn, options.asProperties.asScala.toMap)
// H2's JDBC driver does not support the setSchema() method. We pass a
// fully-qualified table name in the SELECT statement. I don't know how to
// talk about a table in a completely portable way.
val myWhereClause = getWhereClause(part)
val sqlText = s"SELECT $columnList FROM ${options.table} $myWhereClause"
stmt = conn.prepareStatement(sqlText,
ResultSet.TYPE_FORWARD_ONLY, ResultSet.CONCUR_READ_ONLY)
stmt.setFetchSize(options.fetchSize)
rs = stmt.executeQuery()
val rowsIterator = JdbcUtils.resultSetToSparkInternalRows(rs, schema, inputMetrics)
CompletionIterator[InternalRow, Iterator[InternalRow]](
new InterruptibleIterator(context, rowsIterator), close())
}
}
| spark0001/spark2.1.1 | sql/core/src/main/scala/org/apache/spark/sql/execution/datasources/jdbc/JDBCRDD.scala | Scala | apache-2.0 | 10,928 |
package com.twitter.scalding.reducer_estimation
import scala.collection.JavaConverters._
import cascading.flow.FlowStep
import cascading.tap.{ Tap, CompositeTap }
import cascading.tap.hadoop.Hfs
import org.apache.hadoop.mapred.JobConf
import org.slf4j.LoggerFactory
object InputSizeReducerEstimator {
val BytesPerReducer = "scalding.reducer.estimator.bytes.per.reducer"
val defaultBytesPerReducer = 1L << 32 // 4 GB
/** Get the target bytes/reducer from the JobConf */
def getBytesPerReducer(conf: JobConf): Long = conf.getLong(BytesPerReducer, defaultBytesPerReducer)
}
/**
* Estimator that uses the input size and a fixed "bytesPerReducer" target.
*
* Bytes per reducer can be configured with configuration parameter, defaults to 1 GB.
*/
class InputSizeReducerEstimator extends ReducerEstimator {
private val LOG = LoggerFactory.getLogger(this.getClass)
private def unrollTaps(taps: Seq[Tap[_, _, _]]): Seq[Tap[_, _, _]] =
taps.flatMap {
case multi: CompositeTap[_] =>
unrollTaps(multi.getChildTaps.asScala.toSeq)
case t => Seq(t)
}
private def unrolledSources(step: FlowStep[JobConf]): Seq[Tap[_, _, _]] =
unrollTaps(step.getSources.asScala.toSeq)
/**
* Get the total size of the file(s) specified by the Hfs, which may contain a glob
* pattern in its path, so we must be ready to handle that case.
*/
protected def size(f: Hfs, conf: JobConf): Long = {
val fs = f.getPath.getFileSystem(conf)
fs.globStatus(f.getPath)
.map{ s => fs.getContentSummary(s.getPath).getLength }
.sum
}
private def inputSizes(taps: Seq[Tap[_, _, _]], conf: JobConf): Option[Seq[(String, Long)]] = {
val sizes = taps.map {
case tap: Hfs => Some(tap.toString -> size(tap, conf))
case tap => {
LOG.warn("Cannot compute size in bytes of tap: {}", tap)
None
}
}.flatten
if (sizes.nonEmpty) Some(sizes) else None
}
protected def inputSizes(step: FlowStep[JobConf]): Option[Seq[(String, Long)]] =
inputSizes(unrolledSources(step), step.getConfig)
/**
* Figure out the total size of the input to the current step and set the number
* of reducers using the "bytesPerReducer" configuration parameter.
*/
override def estimateReducers(info: FlowStrategyInfo): Option[Int] =
inputSizes(info.step) match {
case Some(inputSizes) =>
val bytesPerReducer =
InputSizeReducerEstimator.getBytesPerReducer(info.step.getConfig)
val totalBytes = inputSizes.map(_._2).sum
val nReducers = (totalBytes.toDouble / bytesPerReducer).ceil.toInt max 1
lazy val logStr = inputSizes.map {
case (name, bytes) => " - %s\t%d\n".format(name, bytes)
}.mkString("")
LOG.info("\nInputSizeReducerEstimator" +
"\n - input size (bytes): " + totalBytes +
"\n - reducer estimate: " + nReducers +
"\n - Breakdown:\n" +
logStr)
Some(nReducers)
case None =>
LOG.warn("InputSizeReducerEstimator unable to estimate reducers; " +
"cannot compute size of:\n - " +
unrolledSources(info.step).filterNot(_.isInstanceOf[Hfs]).mkString("\n - "))
None
}
}
| JiJiTang/scalding | scalding-core/src/main/scala/com/twitter/scalding/reducer_estimation/InputSizeReducerEstimator.scala | Scala | apache-2.0 | 3,225 |
/*
* Copyright (c) 2014-2020 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.execution
import scala.collection.{BuildFrom => ScalaBuildFrom}
import scala.collection.mutable
object compat {
type BuildFrom[-From, -A, +C] = ScalaBuildFrom[From, A, C]
private[monix] object internal {
type IterableOnce[+X] = scala.collection.IterableOnce[X]
def toIterator[X](i: IterableOnce[X]): Iterator[X] = i.iterator
def hasDefiniteSize[X](i: IterableOnce[X]): Boolean = i.knownSize >= 0
def newBuilder[From, A, C](bf: BuildFrom[From, A, C], from: From): mutable.Builder[A, C] = bf.newBuilder(from)
@inline def toSeq[A](array: Array[AnyRef]): Seq[A] =
new scala.collection.immutable.ArraySeq.ofRef(array).asInstanceOf[Seq[A]]
}
}
| alexandru/monifu | monix-execution/shared/src/main/scala_2.13+/monix/execution/compat.scala | Scala | apache-2.0 | 1,366 |
/*
* Copyright (c) 2014-2015 by its authors. Some rights reserved.
* See the project homepage at: http://www.monifu.org
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monifu.concurrent
import monifu.concurrent.Scheduler.Environment
import monifu.concurrent.schedulers._
import monifu.util.math.roundToPowerOf2
import scala.annotation.implicitNotFound
import scala.concurrent.ExecutionContext
import scala.concurrent.duration._
/**
* A Scheduler is an `scala.concurrent.ExecutionContext` that additionally can
* schedule the execution of units of work to run with a delay or periodically.
*/
@implicitNotFound(
"Cannot find an implicit Scheduler, either " +
"import monifu.concurrent.Implicits.globalScheduler or use a custom one")
trait Scheduler extends ExecutionContext with UncaughtExceptionReporter {
/**
* Runs a block of code in this `ExecutionContext`.
*/
def execute(runnable: Runnable): Unit
/**
* Reports that an asynchronous computation failed.
*/
def reportFailure(t: Throwable): Unit
/**
* Schedules the given `action` for immediate execution.
*
* @return a [[Cancelable]] that can be used to cancel the task in case
* it hasn't been executed yet.
*/
def execute(action: => Unit): Unit
/**
* Schedules a task to run in the future, after `initialDelay`.
*
* For example the following schedules a message to be printed to standard
* output after 5 minutes:
* {{{
* val task = scheduler.scheduleOnce(5.minutes) {
* println("Hello, world!")
* }
*
* // later if you change your mind ...
* task.cancel()
* }}}
*
* @param initialDelay is the time to wait until the execution happens
* @param action is the callback to be executed
*
* @return a `Cancelable` that can be used to cancel the created task
* before execution.
*/
def scheduleOnce(initialDelay: FiniteDuration)(action: => Unit): Cancelable
/**
* Schedules a task to run in the future, after `initialDelay`.
*
* For example the following schedules a message to be printed to standard
* output after 5 minutes:
* {{{
* val task = scheduler.scheduleOnce(5.minutes) {
* println("Hello, world!")
* }
*
* // later if you change your mind ...
* task.cancel()
* }}}
*
* @param initialDelay is the time to wait until the execution happens
* @param unit is the time unit used for `initialDelay`
* @param action is the callback to be executed
*
* @return a `Cancelable` that can be used to cancel the created task
* before execution.
*/
def scheduleOnce(initialDelay: Long, unit: TimeUnit)(action: => Unit): Cancelable
/**
* Schedules a task to run in the future, after `initialDelay`.
*
* For example the following schedules a message to be printed to
* standard output after 5 minutes:
* {{{
* val task = scheduler.scheduleOnce(5.minutes, new Runnable {
* def run() = println("Hello, world!")
* })
*
* // later if you change your mind ...
* task.cancel()
* }}}
*
* @param initialDelay is the time to wait until the execution happens
* @param r is the callback to be executed
*
* @return a `Cancelable` that can be used to cancel the created task
* before execution.
*/
def scheduleOnce(initialDelay: FiniteDuration, r: Runnable): Cancelable
/**
* Schedules a task to run in the future, after `initialDelay`.
*
* For example the following schedules a message to be printed to
* standard output after 5 minutes:
* {{{
* val task = scheduler.scheduleOnce(5.minutes, new Runnable {
* def run() = println("Hello, world!")
* })
*
* // later if you change your mind ...
* task.cancel()
* }}}
*
* @param initialDelay is the time to wait until the execution happens
* @param unit is the time unit used for `initialDelay`
* @param r is the callback to be executed
*
* @return a `Cancelable` that can be used to cancel the created task
* before execution.
*/
def scheduleOnce(initialDelay: Long, unit: TimeUnit, r: Runnable): Cancelable
/**
* Schedules for execution a periodic task that is first executed
* after the given initial delay and subsequently with the given
* delay between the termination of one execution and the
* commencement of the next.
*
* For example the following schedules a message to be printed to
* standard output every 10 seconds with an initial delay of 5
* seconds:
* {{{
* val task = s.scheduleWithFixedDelay(5.seconds, 10.seconds, new Runnable {
* def run() = println("Hello, world!")
* })
*
* // later if you change your mind ...
* task.cancel()
* }}}
*
* @param initialDelay is the time to wait until the first execution happens
* @param delay is the time to wait between 2 successive executions of the task
* @param r is the callback to be executed
*
* @return a cancelable that can be used to cancel the execution of
* this repeated task at any time.
*/
def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration, r: Runnable): Cancelable
/**
* Schedules for execution a periodic task that is first executed
* after the given initial delay and subsequently with the given
* delay between the termination of one execution and the
* commencement of the next.
*
* For example the following schedules a message to be printed to
* standard output every 10 seconds with an initial delay of 5
* seconds:
* {{{
* val task = s.scheduleWithFixedDelay(5.seconds, 10.seconds, new Runnable {
* def run() = println("Hello, world!")
* })
*
* // later if you change your mind ...
* task.cancel()
* }}}
*
* @param initialDelay is the time to wait until the first execution happens
* @param delay is the time to wait between 2 successive executions of the task
* @param unit is the time unit used for the `initialDelay` and the `delay` parameters
* @param r is the callback to be executed
*
* @return a cancelable that can be used to cancel the execution of
* this repeated task at any time.
*/
def scheduleWithFixedDelay(initialDelay: Long, delay: Long, unit: TimeUnit, r: Runnable): Cancelable
/**
* Schedules for execution a periodic task that is first executed after the
* given initial delay and subsequently with the given delay between the
* termination of one execution and the commencement of the next.
*
* For example the following schedules a message to be printed to standard
* output every 10 seconds with an initial delay of 5 seconds:
* {{{
* val task = s.scheduleWithFixedDelay(5.seconds, 10.seconds) {
* println("Hello, world!")
* }
*
* // later if you change your mind ...
* task.cancel()
* }}}
*
* @param initialDelay is the time to wait until the first execution happens
* @param delay is the time to wait between 2 successive executions of the task
* @param action is the callback to be executed
*
* @return a cancelable that can be used to cancel the execution of
* this repeated task at any time.
*/
def scheduleWithFixedDelay(initialDelay: FiniteDuration, delay: FiniteDuration)(action: => Unit): Cancelable
/**
* Schedules for execution a periodic task that is first executed after the
* given initial delay and subsequently with the given delay between the
* termination of one execution and the commencement of the next.
*
* For example the following schedules a message to be printed to standard
* output every 10 seconds with an initial delay of 5 seconds:
* {{{
* val task = s.scheduleWithFixedDelay(5.seconds, 10.seconds) {
* println("Hello, world!")
* }
*
* // later if you change your mind ...
* task.cancel()
* }}}
*
* @param initialDelay is the time to wait until the first execution happens
* @param delay is the time to wait between 2 successive executions of the task
* @param unit is the time unit used for the `initialDelay` and the `delay` parameters
* @param action is the callback to be executed
*
* @return a cancelable that can be used to cancel the execution of
* this repeated task at any time.
*/
def scheduleWithFixedDelay(initialDelay: Long, delay: Long, unit: TimeUnit)(action: => Unit): Cancelable
/**
* Schedules a periodic task that becomes enabled first after the given
* initial delay, and subsequently with the given period. Executions will
* commence after `initialDelay` then `initialDelay + period`, then
* `initialDelay + 2 * period` and so on.
*
* If any execution of the task encounters an exception, subsequent executions
* are suppressed. Otherwise, the task will only terminate via cancellation or
* termination of the scheduler. If any execution of this task takes longer
* than its period, then subsequent executions may start late, but will not
* concurrently execute.
*
* For example the following schedules a message to be printed to standard
* output approximately every 10 seconds with an initial delay of 5 seconds:
* {{{
* val task = scheduler.scheduleAtFixedRate(5.seconds, 10.seconds , new Runnable {
* def run() = println("Hello, world!")
* })
*
* // later if you change your mind ...
* task.cancel()
* }}}
*
* @param initialDelay is the time to wait until the first execution happens
* @param period is the time to wait between 2 successive executions of the task
* @param r is the callback to be executed
*
* @return a cancelable that can be used to cancel the execution of
* this repeated task at any time.
*/
def scheduleAtFixedRate(initialDelay: FiniteDuration, period: FiniteDuration, r: Runnable): Cancelable
/**
* Schedules a periodic task that becomes enabled first after the given
* initial delay, and subsequently with the given period. Executions will
* commence after `initialDelay` then `initialDelay + period`, then
* `initialDelay + 2 * period` and so on.
*
* If any execution of the task encounters an exception, subsequent executions
* are suppressed. Otherwise, the task will only terminate via cancellation or
* termination of the scheduler. If any execution of this task takes longer
* than its period, then subsequent executions may start late, but will not
* concurrently execute.
*
* For example the following schedules a message to be printed to standard
* output approximately every 10 seconds with an initial delay of 5 seconds:
* {{{
* val task = scheduler.scheduleAtFixedRate(5.seconds, 10.seconds , new Runnable {
* def run() = println("Hello, world!")
* })
*
* // later if you change your mind ...
* task.cancel()
* }}}
*
* @param initialDelay is the time to wait until the first execution happens
* @param period is the time to wait between 2 successive executions of the task
* @param unit is the time unit used for the `initialDelay` and the `period` parameters
* @param r is the callback to be executed
*
* @return a cancelable that can be used to cancel the execution of
* this repeated task at any time.
*/
def scheduleAtFixedRate(initialDelay: Long, period: Long, unit: TimeUnit, r: Runnable): Cancelable
/**
* Schedules a periodic task that becomes enabled first after the given
* initial delay, and subsequently with the given period. Executions will
* commence after `initialDelay` then `initialDelay + period`, then
* `initialDelay + 2 * period` and so on.
*
* If any execution of the task encounters an exception, subsequent executions
* are suppressed. Otherwise, the task will only terminate via cancellation or
* termination of the scheduler. If any execution of this task takes longer
* than its period, then subsequent executions may start late, but will not
* concurrently execute.
*
* For example the following schedules a message to be printed to standard
* output approximately every 10 seconds with an initial delay of 5 seconds:
* {{{
* val task = scheduler.scheduleAtFixedRate(5.seconds, 10.seconds) {
* println("Hello, world!")
* }
*
* // later if you change your mind ...
* task.cancel()
* }}}
*
* @param initialDelay is the time to wait until the first execution happens
* @param period is the time to wait between 2 successive executions of the task
* @param action is the callback to be executed
*
* @return a cancelable that can be used to cancel the execution of
* this repeated task at any time.
*/
def scheduleAtFixedRate(initialDelay: FiniteDuration, period: FiniteDuration)(action: => Unit): Cancelable
/**
* Schedules a periodic task that becomes enabled first after the given
* initial delay, and subsequently with the given period. Executions will
* commence after `initialDelay` then `initialDelay + period`, then
* `initialDelay + 2 * period` and so on.
*
* If any execution of the task encounters an exception, subsequent executions
* are suppressed. Otherwise, the task will only terminate via cancellation or
* termination of the scheduler. If any execution of this task takes longer
* than its period, then subsequent executions may start late, but will not
* concurrently execute.
*
* For example the following schedules a message to be printed to standard
* output approximately every 10 seconds with an initial delay of 5 seconds:
* {{{
* val task = scheduler.scheduleAtFixedRate(5.seconds, 10.seconds) {
* println("Hello, world!")
* }
*
* // later if you change your mind ...
* task.cancel()
* }}}
*
* @param initialDelay is the time to wait until the first execution happens
* @param period is the time to wait between 2 successive executions of the task
* @param unit is the time unit used for the `initialDelay` and the `period` parameters
* @param action is the callback to be executed
*
* @return a cancelable that can be used to cancel the execution of
* this repeated task at any time.
*/
def scheduleAtFixedRate(initialDelay: Long, period: Long, unit: TimeUnit)(action: => Unit): Cancelable
/**
* Returns the current time in milliseconds. Note that while the
* unit of time of the return value is a millisecond, the
* granularity of the value depends on the underlying operating
* system and may be larger. For example, many operating systems
* measure time in units of tens of milliseconds.
*
* It's the equivalent of `System.currentTimeMillis()`. When wanting
* to measure time, do not use `System.currentTimeMillis()`
* directly, prefer this method instead, because then it can be
* mocked for testing purposes (see for example
* [[monifu.concurrent.schedulers.TestScheduler TestScheduler]])
*/
def currentTimeMillis(): Long
/**
* Information about the environment on top of
* which our scheduler runs on.
*/
def env: Environment
}
object Scheduler extends SchedulerCompanion {
/**
* Information about the environment on top of which
* our scheduler runs on.
*/
final class Environment private
(_batchSize: Int, _platform: Platform.Value) {
/**
* Recommended batch size used for breaking synchronous loops in
* asynchronous batches. When streaming value from a producer to
* a synchronous consumer it's recommended to break the streaming
* in batches as to not hold the current thread or run-loop
* indefinitely.
*
* Working with power of 2, because then for applying the modulo
* operation we can just do:
* {{{
* val modulus = scheduler.env.batchSize - 1
* // ...
* nr = (nr + 1) & modulus
* }}}
*/
val batchSize: Int =
roundToPowerOf2(_batchSize)
/**
* Represents the platform our scheduler runs on.
*/
val platform: Platform.Value =
_platform
override def equals(other: Any): Boolean = other match {
case that: Environment =>
batchSize == that.batchSize &&
platform == that.platform
case _ => false
}
override val hashCode: Int = {
val state = Seq(batchSize.hashCode(), platform.hashCode())
state.foldLeft(0)((a, b) => 31 * a + b)
}
}
object Environment {
/** Builder for [[Environment]] */
def apply(batchSize: Int, platform: Platform.Value): Environment = {
new Environment(batchSize, platform)
}
}
/**
* Represents the platform our scheduler runs on.
*/
object Platform extends Enumeration {
val JVM = Value("JVM")
val JS = Value("JS")
val Fake = Value("Fake")
}
}
| virtualirfan/monifu | core/shared/src/main/scala/monifu/concurrent/Scheduler.scala | Scala | apache-2.0 | 17,476 |
/*
* @author Philip Stutz
*
* Copyright 2011 University of Zurich
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package com.signalcollect.logging
import com.signalcollect.interfaces.LogMessage
import akka.actor.Actor
object DefaultLogger {
def log(logMessage: LogMessage) {
logMessage.msg match {
case e: Exception =>
println(logMessage.from+": "+e.getMessage)
e.printStackTrace
case other =>
println(logMessage.from+": "+logMessage.msg)
}
}
}
class DefaultLogger(loggingFunction: LogMessage => Unit = DefaultLogger.log) extends Actor {
def receive = {
case logMessage: LogMessage =>
loggingFunction(logMessage)
}
} | Tjoene/thesis | Case_Programs/signal-collect/src/main/scala/com/signalcollect/logging/DefaultLogger.scala | Scala | gpl-2.0 | 1,295 |
/*
* Copyright 2011-2022 GatlingCorp (https://gatling.io)
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.gatling.core.controller.inject.open
import scala.concurrent.duration._
import io.gatling.BaseSpec
import io.gatling.commons.util.Collections._
class OpenInjectionStepSpec extends BaseSpec {
private def scheduling(steps: OpenInjectionStep*): List[FiniteDuration] =
steps.reverse
.foldLeft[Iterator[FiniteDuration]](Iterator.empty) { (it, step) =>
step.chain(it)
}
.toList
"RampInjection" should "return the correct number of users" in {
RampOpenInjection(5, 1.second).users shouldBe 5
}
it should "return the correct injection duration" in {
RampOpenInjection(5, 1.second).duration shouldBe (1.second)
}
it should "schedule with a correct interval" in {
val ramp = RampOpenInjection(5, 1.second)
val rampScheduling = scheduling(ramp)
val interval0 = rampScheduling(1) - rampScheduling.head
val interval1 = rampScheduling(2) - rampScheduling(1)
rampScheduling.length shouldBe ramp.users
interval0 shouldBe interval1
interval0 shouldBe (200.milliseconds)
}
it should "schedule the correct number of users" in {
val step = RampOpenInjection(3, 8.seconds)
step.users shouldBe 3
scheduling(step).size shouldBe 3
}
it should "the first and the last users should be correctly scheduled" in {
val rampScheduling = scheduling(RampOpenInjection(5, 1.second))
val first = rampScheduling.head
val last = rampScheduling.last
first shouldBe Duration.Zero
last shouldBe <(1.second)
rampScheduling shouldBe sorted
}
"ConstantRateInjection" should "return the correct number of users" in {
ConstantRateOpenInjection(1.0, 5.seconds).users shouldBe 5
ConstantRateOpenInjection(0.4978, 100.seconds).users shouldBe 50
}
"NothingForInjection" should "return the correct number of users" in {
NothingForOpenInjection(1.second).users shouldBe 0
}
it should "return the correct injection duration" in {
NothingForOpenInjection(1.second).duration shouldBe (1.second)
}
it should "return the correct injection scheduling" in {
NothingForOpenInjection(1.second).chain(Iterator.empty) shouldBe empty
}
"AtOnceInjection" should "return the correct number of users" in {
AtOnceOpenInjection(4).users shouldBe 4
}
it should "return the correct injection duration" in {
scheduling(AtOnceOpenInjection(4)).max shouldBe Duration.Zero
}
it should "return the correct injection scheduling" in {
val peak = AtOnceOpenInjection(4)
val atOnceScheduling = scheduling(peak)
val uniqueScheduling = atOnceScheduling.toSet
uniqueScheduling should contain(Duration.Zero)
atOnceScheduling should have length peak.users
}
"RampRateInjection" should "return the correct injection duration" in {
RampRateOpenInjection(2, 4, 10.seconds).duration shouldBe (10.seconds)
}
it should "return the correct number of users" in {
RampRateOpenInjection(2, 4, 10.seconds).users shouldBe 30
}
it should "provides an injection scheduling with the correct number of elements" in {
val rampRate = RampRateOpenInjection(2, 4, 10.seconds)
val rampRateScheduling = scheduling(rampRate)
rampRateScheduling.length shouldBe rampRate.users
}
it should "provides an injection scheduling with the correct values" in {
val rampRateScheduling = scheduling(RampRateOpenInjection(2, 4, 10.seconds))
rampRateScheduling.head shouldBe Duration.Zero
rampRateScheduling(1) shouldBe (500.milliseconds)
}
it should "return the correct injection duration when the acceleration is null" in {
RampRateOpenInjection(1.0, 1.0, 10.seconds).duration shouldBe (10.seconds)
}
it should "return the correct number of users when the acceleration is null" in {
RampRateOpenInjection(1.0, 1.0, 10.seconds).users shouldBe 10
}
it should "return a scheduling of constant step when the acceleration is null" in {
val constantRampScheduling = scheduling(RampRateOpenInjection(1.0, 1.0, 10.seconds))
val steps = constantRampScheduling
.zip(constantRampScheduling.drop(1))
.map { case (i1, i2) =>
i2 - i1
}
.toSet[FiniteDuration]
constantRampScheduling shouldBe sorted
steps.size shouldBe 1
constantRampScheduling.last shouldBe <(10.seconds)
}
private val heavisideScheduling = HeavisideOpenInjection(100, 5.seconds).chain(Iterator.empty).toList
"HeavisideInjection" should "provide an appropriate number of users" in {
heavisideScheduling.length shouldBe 100
}
it should "provide correct values" in {
heavisideScheduling(1) shouldBe (291.milliseconds)
heavisideScheduling shouldBe sorted
heavisideScheduling.last shouldBe <(5.seconds)
}
it should "have most of the scheduling values close to half of the duration" in {
val l = heavisideScheduling.count(t => (t > (1.5.seconds)) && (t < (3.5.seconds)))
l shouldBe 67 // two thirds
}
"Injection chaining" should "provide a monotonically increasing series of durations" in {
val scheduling = RampOpenInjection(3, 2.seconds).chain(RampOpenInjection(3, 2.seconds).chain(Iterator.empty)).toVector
scheduling shouldBe sorted
}
"Poisson injection" should "inject constant users at approximately the right rate" in {
// Inject 1000 users per second for 60 seconds
val inject = PoissonOpenInjection(60.seconds, 1000.0, 1000.0, seed = 0L) // Seed with 0, to ensure tests are deterministic
val scheduling = inject.chain(Iterator(0.seconds)).toVector // Chain to an injector with a zero timer
scheduling.size shouldBe (inject.users + 1)
scheduling.size shouldBe 60001 +- 200 // 60000 for the users injected by PoissonInjection, plus the 0 second one
scheduling.last shouldBe (60.seconds)
scheduling(scheduling.size - 2).toMillis shouldBe 60000L +- 5L
scheduling.head.toMillis shouldBe 0L +- 5L
scheduling(30000).toMillis shouldBe 30000L +- 1000L // Half-way through we should have injected half of the users
}
it should "inject ramped users at approximately the right rate" in {
// ramp from 0 to 1000 users per second over 60 seconds
val inject = PoissonOpenInjection(60.seconds, 0.0, 1000.0, seed = 0L) // Seed with 0, to ensure tests are deterministic
val scheduling = inject.chain(Iterator(0.seconds)).toVector // Chain to an injector with a zero timer
scheduling.size shouldBe (inject.users + 1)
scheduling.size shouldBe 30001 +- 500 // 30000 for the users injected by PoissonInjection, plus the 0 second one
scheduling.last shouldBe (60.seconds)
scheduling(scheduling.size - 2).toMillis shouldBe 60000L +- 5L
scheduling.head.toMillis shouldBe 0L +- 200L
scheduling(7500).toMillis shouldBe 30000L +- 1000L // Half-way through ramp-up we should have run a quarter of users
}
"Chain steps" should "inject the expected number of users" in {
val steps = Vector(
RampOpenInjection(50, 9.minutes),
NothingForOpenInjection(1.minute),
RampOpenInjection(50, 1.minute),
NothingForOpenInjection(9.minutes),
RampOpenInjection(50, 1.minute),
NothingForOpenInjection(9.minutes),
RampOpenInjection(50, 1.minute),
NothingForOpenInjection(9.minutes),
RampOpenInjection(50, 1.minute),
NothingForOpenInjection(9.minutes)
)
scheduling(steps: _*).size shouldBe steps.sumBy(_.users)
}
}
| gatling/gatling | gatling-core/src/test/scala/io/gatling/core/controller/inject/open/OpenInjectionStepSpec.scala | Scala | apache-2.0 | 7,996 |
package com.eevolution.context.dictionary.infrastructure.service.impl
import java.util.UUID
import com.eevolution.context.dictionary.infrastructure.repository.SchedulerRecipientRepository
import com.eevolution.context.dictionary.infrastructure.service.SchedulerRecipientService
import com.lightbend.lagom.scaladsl.api.ServiceCall
import com.lightbend.lagom.scaladsl.persistence.PersistentEntityRegistry
/**
* Copyright (C) 2003-2017, e-Evolution Consultants S.A. , http://www.e-evolution.com
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
* Email: [email protected], http://www.e-evolution.com , http://github.com/EmerisScala
* Created by [email protected] , www.e-evolution.com on 21/11/17.
*/
/**
* Scheduler Recipient Service Implementation
* @param registry
* @param schedulerRecipientRepository
*/
class SchedulerRecipientServiceImpl (registry: PersistentEntityRegistry, schedulerRecipientRepository: SchedulerRecipientRepository) extends SchedulerRecipientService {
private val DefaultPageSize = 10
override def getAll() = ServiceCall {_ => schedulerRecipientRepository.getAll()}
override def getAllByPage(page : Option[Int], pageSize : Option[Int]) = ServiceCall{_ => schedulerRecipientRepository.getAllByPage(page.getOrElse(0) , pageSize.getOrElse(DefaultPageSize))}
override def getById(id: Int) = ServiceCall { _ => schedulerRecipientRepository.getById(id)}
override def getByUUID(uuid: UUID) = ServiceCall { _ => schedulerRecipientRepository.getByUUID(uuid)}
}
| adempiere/ADReactiveSystem | dictionary-impl/src/main/scala/com/eevolution/context/dictionary/infrastructure/service/impl/SchedulerRecipientServiceImpl.scala | Scala | gpl-3.0 | 2,142 |
/*
* Copyright (c) 2013 Scott Abernethy.
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package model
import org.specs2.mutable.Specification
import org.specs2.mock.Mockito
import model.Mythos._
import gate.T
import java.sql.Timestamp
import db.TestDb
import test.WithTestApplication
class ClonedSnapshotFactoryTest extends Specification with Mockito {
// step {
// inTransaction{
// val time1 = T.at(2011, 3, 20, 1, 2, 3)
// val time2 = T.at(2011, 3, 22, 1, 2, 3)
// artifacts.delete(from(artifacts)(a => select(a)))
// artifacts.insert(Artifact.create(1L, "a/b/c", time1, T.now))
// artifacts.insert(Artifact.create(2L, "fudge", time1, T.now))
// artifacts.insert(Artifact.create(1L, "d/e/f", time2, T.now))
// artifacts.insert(Artifact.create(2L, "sugar", time2, T.now))
// artifacts.insert(Artifact.create(2L, "chocolate", time2, T.now))
// }
// }
"ClonedSnapshotFactory" should {
"be empty for no clones" in new WithTestApplication {
import org.squeryl.PrimitiveTypeMode._
inTransaction(clones.delete(from(clones)(c => select(c))))
val x = new ClonedSnapshotFactory
val xx = x.create(2)
xx.cloned must beEmpty
}
"only show artifacts cloned by cultist" in new WithTestApplication {
import org.squeryl.PrimitiveTypeMode._
inTransaction(clones.delete(from(clones)(c => select(c))))
val i = inTransaction(from(artifacts)(a => select(a.id) orderBy(a.id asc)).headOption) getOrElse -1L
val myClone = inTransaction(clones.insert(Clone.create(i, 2L, CloneState.awaiting)))
val theirClone = inTransaction(clones.insert(Clone.create(i, 1L, CloneState.cloning)))
val x = new ClonedSnapshotFactory
val xx = x.create(2)
xx.cloned must haveSize(0)
}
"only show artifacts cloned by cultist, in the last 7 days" in new WithTestApplication {
import org.squeryl.PrimitiveTypeMode._
inTransaction(clones.delete(from(clones)(c => select(c))))
val i = inTransaction(from(artifacts)(a => select(a.id) orderBy(a.id asc)).headOption) getOrElse -1L
val myClone = Clone.create(i, 2L, CloneState.cloned)
myClone.attempted = T.ago(6 * 24 * 60 * 60 * 1000)
val myOldClone = Clone.create(i+1, 2L, CloneState.cloned)
myOldClone.attempted = T.ago((7 * 24 * 60 * 60 * 1000) + 1)
inTransaction {
clones.insert(myClone)
clones.insert(myOldClone)
}
val x = new ClonedSnapshotFactory
val xx = x.create(2)
xx.cloned must haveSize(1)
}
"ordered by non-completes by request date, then completes by completion date" in new WithTestApplication {
import org.squeryl.PrimitiveTypeMode._
inTransaction {
// artifacts.delete(from(artifacts)(a => select(a)))
val a1 = artifacts.insert(Artifact.create(1L, "aa", T.now, T.now))
val a2 = artifacts.insert(Artifact.create(1L, "bb", T.now, T.now))
val a3 = artifacts.insert(Artifact.create(1L, "cc", T.now, T.now))
val a4 = artifacts.insert(Artifact.create(1L, "dd", T.now, T.now))
val a5 = artifacts.insert(Artifact.create(1L, "ee", T.now, T.now))
clones.delete(from(clones)(c => select(c)))
clones.insert(Clone.fake(a3.id, 2L, CloneState.awaiting, new Timestamp(600000), T.yesterday))
clones.insert(Clone.fake(a2.id, 2L, CloneState.cloning, new Timestamp(700000), T.yesterday))
clones.insert(Clone.fake(a5.id, 2L, CloneState.cloned, new Timestamp(700000), T.ago(3 * 24 * 60 * 60 * 1000)))
clones.insert(Clone.fake(a4.id, 2L, CloneState.awaiting, new Timestamp(610000), T.yesterday))
clones.insert(Clone.fake(a1.id, 2L, CloneState.cloned, new Timestamp(800000), T.ago(4 * 24 * 60 * 60 * 1000)))
val x = new ClonedSnapshotFactory
val xx = x.create(2)
xx.cloned must haveSize(2)
xx.cloned(0)._1 must be_==(a5)
xx.cloned(1)._1 must be_==(a1)
}
}
}
}
| scott-abernethy/opener-of-the-way | test/model/ClonedSnapshotFactoryTest.scala | Scala | gpl-3.0 | 4,575 |
package wow.common.config
/**
* Makes an enumeration config convertible
*/
trait ConfigurationSerializableEnumeration {
this: Enumeration =>
/**
* Config converter for Value type
*/
implicit val valueConfigConverter = wow.common.config.deriveEnumValue(this)
/**
* Config converter for ValueSet type
*/
implicit val valueSetConfigConverter = wow.common.config.deriveEnumValueSet(this)
}
| SKNZ/SpinaciCore | wow/core/src/main/scala/wow/common/config/ConfigurationSerializableEnumeration.scala | Scala | mit | 422 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.accounts.frsse2008
import uk.gov.hmrc.ct.accounts.retriever.AccountsBoxRetriever
import uk.gov.hmrc.ct.box._
case class AC22(value: Option[Int]) extends CtBoxIdentifier(name = "Current Other operating income")
with CtOptionalInteger with Input
with SelfValidatableBox[AccountsBoxRetriever, Option[Int]] {
override def validate(boxRetriever: AccountsBoxRetriever): Set[CtValidation] = {
validateMoney(value)
}
} | liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/accounts/frsse2008/AC22.scala | Scala | apache-2.0 | 1,120 |
package application
import java.awt.image.BufferedImage
import org.joda.time.DateTime
class Image(var imageId: Int, var loadTime: Option[DateTime], var image: BufferedImage)
| abtv/RemoteViewer | src/main/scala/application/Image.scala | Scala | mit | 177 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package spark.scheduler
import java.io._
import java.util.{HashMap => JHashMap}
import java.util.zip.{GZIPInputStream, GZIPOutputStream}
import scala.collection.mutable.{ArrayBuffer, HashMap}
import scala.collection.JavaConversions._
import it.unimi.dsi.fastutil.io.FastBufferedOutputStream
import com.ning.compress.lzf.LZFInputStream
import com.ning.compress.lzf.LZFOutputStream
import spark._
import spark.executor.ShuffleWriteMetrics
import spark.storage._
import spark.util.{TimeStampedHashMap, MetadataCleaner}
private[spark] object ShuffleMapTask {
// A simple map between the stage id to the serialized byte array of a task.
// Served as a cache for task serialization because serialization can be
// expensive on the master node if it needs to launch thousands of tasks.
val serializedInfoCache = new TimeStampedHashMap[Int, Array[Byte]]
val metadataCleaner = new MetadataCleaner("ShuffleMapTask", serializedInfoCache.clearOldValues)
def serializeInfo(stageId: Int, rdd: RDD[_], dep: ShuffleDependency[_,_]): Array[Byte] = {
synchronized {
val old = serializedInfoCache.get(stageId).orNull
if (old != null) {
return old
} else {
val out = new ByteArrayOutputStream
val ser = SparkEnv.get.closureSerializer.newInstance()
val objOut = ser.serializeStream(new GZIPOutputStream(out))
objOut.writeObject(rdd)
objOut.writeObject(dep)
objOut.close()
val bytes = out.toByteArray
serializedInfoCache.put(stageId, bytes)
return bytes
}
}
}
def deserializeInfo(stageId: Int, bytes: Array[Byte]): (RDD[_], ShuffleDependency[_,_]) = {
synchronized {
val loader = Thread.currentThread.getContextClassLoader
val in = new GZIPInputStream(new ByteArrayInputStream(bytes))
val ser = SparkEnv.get.closureSerializer.newInstance()
val objIn = ser.deserializeStream(in)
val rdd = objIn.readObject().asInstanceOf[RDD[_]]
val dep = objIn.readObject().asInstanceOf[ShuffleDependency[_,_]]
return (rdd, dep)
}
}
// Since both the JarSet and FileSet have the same format this is used for both.
def deserializeFileSet(bytes: Array[Byte]) : HashMap[String, Long] = {
val in = new GZIPInputStream(new ByteArrayInputStream(bytes))
val objIn = new ObjectInputStream(in)
val set = objIn.readObject().asInstanceOf[Array[(String, Long)]].toMap
return (HashMap(set.toSeq: _*))
}
def clearCache() {
synchronized {
serializedInfoCache.clear()
}
}
}
private[spark] class ShuffleMapTask(
stageId: Int,
var rdd: RDD[_],
var dep: ShuffleDependency[_,_],
var partition: Int,
@transient private var locs: Seq[String])
extends Task[MapStatus](stageId)
with Externalizable
with Logging {
protected def this() = this(0, null, null, 0, null)
@transient private val preferredLocs: Seq[String] = if (locs == null) Nil else locs.toSet.toSeq
{
// DEBUG code
preferredLocs.foreach (hostPort => Utils.checkHost(Utils.parseHostPort(hostPort)._1, "preferredLocs : " + preferredLocs))
}
var split = if (rdd == null) {
null
} else {
rdd.partitions(partition)
}
override def writeExternal(out: ObjectOutput) {
RDDCheckpointData.synchronized {
split = rdd.partitions(partition)
out.writeInt(stageId)
val bytes = ShuffleMapTask.serializeInfo(stageId, rdd, dep)
out.writeInt(bytes.length)
out.write(bytes)
out.writeInt(partition)
out.writeLong(generation)
out.writeObject(split)
}
}
override def readExternal(in: ObjectInput) {
val stageId = in.readInt()
val numBytes = in.readInt()
val bytes = new Array[Byte](numBytes)
in.readFully(bytes)
val (rdd_, dep_) = ShuffleMapTask.deserializeInfo(stageId, bytes)
rdd = rdd_
dep = dep_
partition = in.readInt()
generation = in.readLong()
split = in.readObject().asInstanceOf[Partition]
}
override def run(attemptId: Long): MapStatus = {
val numOutputSplits = dep.partitioner.numPartitions
val taskContext = new TaskContext(stageId, partition, attemptId)
metrics = Some(taskContext.taskMetrics)
val blockManager = SparkEnv.get.blockManager
var shuffle: ShuffleBlocks = null
var buckets: ShuffleWriterGroup = null
try {
// Obtain all the block writers for shuffle blocks.
val ser = SparkEnv.get.serializerManager.get(dep.serializerClass)
shuffle = blockManager.shuffleBlockManager.forShuffle(dep.shuffleId, numOutputSplits, ser)
buckets = shuffle.acquireWriters(partition)
// Write the map output to its associated buckets.
for (elem <- rdd.iterator(split, taskContext)) {
val pair = elem.asInstanceOf[(Any, Any)]
val bucketId = dep.partitioner.getPartition(pair._1)
buckets.writers(bucketId).write(pair)
}
// Commit the writes. Get the size of each bucket block (total block size).
var totalBytes = 0L
val compressedSizes: Array[Byte] = buckets.writers.map { writer: BlockObjectWriter =>
writer.commit()
writer.close()
val size = writer.size()
totalBytes += size
MapOutputTracker.compressSize(size)
}
// Update shuffle metrics.
val shuffleMetrics = new ShuffleWriteMetrics
shuffleMetrics.shuffleBytesWritten = totalBytes
metrics.get.shuffleWriteMetrics = Some(shuffleMetrics)
return new MapStatus(blockManager.blockManagerId, compressedSizes)
} catch { case e: Exception =>
// If there is an exception from running the task, revert the partial writes
// and throw the exception upstream to Spark.
if (buckets != null) {
buckets.writers.foreach(_.revertPartialWrites())
}
throw e
} finally {
// Release the writers back to the shuffle block manager.
if (shuffle != null && buckets != null) {
shuffle.releaseWriters(buckets)
}
// Execute the callbacks on task completion.
taskContext.executeOnCompleteCallbacks()
}
}
override def preferredLocations: Seq[String] = preferredLocs
override def toString = "ShuffleMapTask(%d, %d)".format(stageId, partition)
}
| wgpshashank/spark | core/src/main/scala/spark/scheduler/ShuffleMapTask.scala | Scala | apache-2.0 | 7,038 |
package uppsat.solver;
import java.io.{BufferedReader, FileNotFoundException, InputStreamReader};
import uppsat.globalOptions
import uppsat.Timer
import uppsat.Timer.TimeoutException
class Z3Solver(val name : String = "Z3",
val checkSatCmd : String = "(check-sat)")
extends SMTSolver {
case class Z3Exception(msg : String) extends Exception("Z3 error: " + msg)
def evaluate(formula : String) = Timer.measure("Z3Solver.runSolver") {
import sys.process._
globalOptions.verbose("Using Z3 with seed: " + globalOptions.RANDOM_SEED)
val z3Binary = "z3 sat.random_seed=" + globalOptions.RANDOM_SEED
val cmd =
if (globalOptions.DEADLINE.isDefined) {
val dlf = ((globalOptions.remainingTime.get) / 1000.0).ceil.toInt
z3Binary + " -T:" + dlf + " -in -smt2"
} else {
z3Binary + " -in -smt2"
}
try {
val process = Runtime.getRuntime().exec(cmd)
print("Started process: " + process)
val stdin = process.getOutputStream ()
val stderr = process.getErrorStream ()
val stdout = process.getInputStream ()
stdin.write((formula + "\\n(exit)\\n").getBytes("UTF-8"));
stdin.close();
val outReader = new BufferedReader(new InputStreamReader (stdout))
var result = List() : List[String]
val toPattern = ".*timeout.*".r
// TODO: Maybe restore the errorPattern but excluding model calls
var line = outReader.readLine()
while (line != null) {
line match {
case toPattern() => throw new TimeoutException("Z3Solver.evaluate")
case other => result = result ++ List(other)
}
line = outReader.readLine()
}
process.waitFor();
val exitValue = process.exitValue()
result.mkString("\\n")
} catch {
case e : java.io.IOException => {
// Most times we get "broken pipe" because of timeout, so if deadline is
// violated, lets throw timeout exception instead.
globalOptions.checkTimeout()
// If it wasn't timeout, probably its because z3 binary is not found
val msg = "(probably) z3 binary not found"
throw new Z3Exception(msg)
}
}
}
def parseOutput(output : String,
extractSymbols : List[String])
: Option[Map[String, String]] = {
val lines = output.split("\\n")
lines.head.trim() match {
case "timeout" => throw new TimeoutException("Z3solver")
case "sat" => Some((extractSymbols zip lines.tail).toMap)
case "unsat" => None
case result => {
// Make sure this is not timeout related.
globalOptions.checkTimeout()
val msg = "Trying to get model from non-sat result (" + output + ")"
throw new Z3Exception(msg)
}
}
}
def getStringModel(formula : String, extractSymbols : List[String]) = {
val extendedFormula = formula + "\\n" + checkSatCmd +
(extractSymbols.map("(eval " + _ + ")").mkString("\\n", "\\n", ""))
val result = evaluate(extendedFormula)
parseOutput(result, extractSymbols)
}
def checkSat(formula : String) : Boolean = {
val result = evaluate(formula + "\\n" + checkSatCmd)
val retVal = result.split("\\n").head.trim()
retVal match {
case "sat" => true
case "unsat" => false
case str => {
val msg = "Unexpected result: " + str
throw new Z3Exception(msg)
}
}
}
def getAnswer(formula : String) : String = {
val result = evaluate(formula + "\\n" + checkSatCmd)
val retVal = result.split("\\n")
retVal.head.trim() match {
case "sat" => retVal(1).trim()
case str => {
val msg = "Unexpected result: " + str
throw new Z3Exception(msg)
}
}
}
}
| uuverifiers/uppsat | src/main/scala/uppsat/solver/Z3Solver.scala | Scala | gpl-3.0 | 3,727 |
package org.bluescale.util
import org.apache.log4j.Logger;
trait LogHelper {
val loggerName = this.getClass.getName
val logger = Logger.getLogger(loggerName)
def log(msg: => String) {
logger.info(msg)
}
def debug(msg: => String) {
println(msg)
}
def error(msg: =>String) {
logger.error(msg)
}
def error(ex:Exception, msg: =>String) {
logger.error(msg)
}
}
| BlueScale/BlueScale | src/main/scala/org/bluescale/util/LogHelper.scala | Scala | agpl-3.0 | 391 |
/**
* Copyright 2015, deepsense.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.deepsense.deeplang.doperations
import scala.collection.JavaConverters._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.Row
import org.apache.spark.sql.types.{IntegerType, StructField, StructType}
import org.scalatest.Matchers
import org.scalatest.prop.GeneratorDrivenPropertyChecks
import io.deepsense.deeplang._
import io.deepsense.deeplang.doperables.dataframe.DataFrame
class DataFrameSplitterIntegSpec
extends DeeplangIntegTestSupport
with GeneratorDrivenPropertyChecks
with Matchers {
"SplitDataFrame" should {
"split randomly one df into two df in given range" in {
val input = Range(1, 100)
val parameterPairs = List(
(0.0, 0),
(0.3, 1),
(0.5, 2),
(0.8, 3),
(1.0, 4))
for((splitRatio, seed) <- parameterPairs) {
val rdd = createData(input)
val df = executionContext.dataFrameBuilder.buildDataFrame(createSchema, rdd)
val (df1, df2) = executeOperation(
executionContext,
new Split()
.setSplitMode(
SplitModeChoice.Random()
.setSplitRatio(splitRatio)
.setSeed(seed / 2)))(df)
validateSplitProperties(df, df1, df2)
}
}
"split conditionally one df into two df in given range" in {
val input = Range(1, 100)
val condition = "value > 20"
val predicate: Int => Boolean = _ > 20
val (expectedDF1, expectedDF2) =
(input.filter(predicate), input.filter(!predicate(_)))
val rdd = createData(input)
val df = executionContext.dataFrameBuilder.buildDataFrame(createSchema, rdd)
val (df1, df2) = executeOperation(
executionContext,
new Split()
.setSplitMode(
SplitModeChoice.Conditional()
.setCondition(condition)))(df)
df1.sparkDataFrame.collect().map(_.get(0)) should contain theSameElementsAs expectedDF1
df2.sparkDataFrame.collect().map(_.get(0)) should contain theSameElementsAs expectedDF2
validateSplitProperties(df, df1, df2)
}
}
private def createSchema: StructType = {
StructType(List(
StructField("value", IntegerType, nullable = false)
))
}
private def createData(data: Seq[Int]): RDD[Row] = {
sparkContext.parallelize(data.map(Row(_)))
}
private def executeOperation(context: ExecutionContext, operation: DOperation)
(dataFrame: DataFrame): (DataFrame, DataFrame) = {
val operationResult = operation.executeUntyped(Vector[DOperable](dataFrame))(context)
val df1 = operationResult.head.asInstanceOf[DataFrame]
val df2 = operationResult.last.asInstanceOf[DataFrame]
(df1, df2)
}
def validateSplitProperties(inputDF: DataFrame, outputDF1: DataFrame, outputDF2: DataFrame)
: Unit = {
val dfCount = inputDF.sparkDataFrame.count()
val df1Count = outputDF1.sparkDataFrame.count()
val df2Count = outputDF2.sparkDataFrame.count()
val rowsDf = inputDF.sparkDataFrame.collectAsList().asScala
val rowsDf1 = outputDF1.sparkDataFrame.collectAsList().asScala
val rowsDf2 = outputDF2.sparkDataFrame.collectAsList().asScala
val intersect = rowsDf1.intersect(rowsDf2)
intersect.size shouldBe 0
(df1Count + df2Count) shouldBe dfCount
rowsDf.toSet shouldBe rowsDf1.toSet.union(rowsDf2.toSet)
}
}
| deepsense-io/seahorse-workflow-executor | deeplang/src/it/scala/io/deepsense/deeplang/doperations/DataFrameSplitterIntegSpec.scala | Scala | apache-2.0 | 3,967 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.log
/**
* Configuration parameters for the log cleaner
*
* @param numThreads The number of cleaner threads to run
* @param dedupeBufferSize The total memory used for log deduplication
* @param dedupeBufferLoadFactor The maximum percent full for the deduplication buffer
* @param maxMessageSize The maximum size of a message that can appear in the log
* @param maxIoBytesPerSecond The maximum read and write I/O that all cleaner threads are allowed to do
* @param backOffMs The amount of time to wait before rechecking if no logs are eligible for cleaning
* @param enableCleaner Allows completely disabling the log cleaner
* @param hashAlgorithm The hash algorithm to use in key comparison.
*/
case class CleanerConfig(val numThreads: Int = 1,
val dedupeBufferSize: Long = 4*1024*1024L,
val dedupeBufferLoadFactor: Double = 0.9d,
val ioBufferSize: Int = 1024*1024,
val maxMessageSize: Int = 32*1024*1024,
val maxIoBytesPerSecond: Double = Double.MaxValue,
val backOffMs: Long = 60 * 1000,
val enableCleaner: Boolean = true,
val hashAlgorithm: String = "MD5") {
} | akosiaris/kafka | core/src/main/scala/kafka/log/CleanerConfig.scala | Scala | apache-2.0 | 2,092 |
/*
Copyright 2014 Janek Bogucki
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.scalacraft.domain.v2.net.unconstrained
import org.scalatest.FlatSpec
import org.scalatest.Matchers
import org.scalatest.OptionValues._
import com.scalacraft.domain.v2.net.{Port => Other}
/**
* Specification for an unconstrained `Port`
*/
class PortSpec extends FlatSpec with Matchers {
/* Greater than 65535 */
private val PortNumber = 551140
private val ValidPortNumber = 4580
private val InvalidPortNumber = -1
behavior of "An unconstrained Port"
/* Pattern Matching */
it should "be usable in pattern matching" in {
def m(x: Int) = x match {
case Port(p) => p
case _ => None
}
m(5) should equal(5)
}
it should "be usable in string pattern matching" in {
def m(x: String) = x match {
case Port(p) => p
case _ => None
}
m(PortNumber.toString) should equal(PortNumber)
}
/* Implicit Conversions */
it should "implicitly convert to an int" in {
val port = Port(PortNumber)
val i: Int = port
i should equal(PortNumber)
}
it should "implicitly convert to a string" in {
val port = Port(PortNumber)
val s: String = port
s should equal(PortNumber.toString)
}
it should "implicitly convert to a constrained Port when the port is valid" in {
val port = Port(ValidPortNumber)
val otherOpt: Option[Other] = port
otherOpt.value.portNumber should equal(ValidPortNumber)
}
it should "implicitly convert to None when port is invalid" in {
val port = Port(InvalidPortNumber)
val otherOpt: Option[Other] = port
otherOpt should be(None)
}
}
| janekdb/scalacraft-domain | src/test/scala/com/scalacraft/domain/v2/net/unconstrained/PortSpec.scala | Scala | apache-2.0 | 2,261 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.