code
stringlengths 5
1M
| repo_name
stringlengths 5
109
| path
stringlengths 6
208
| language
stringclasses 1
value | license
stringclasses 15
values | size
int64 5
1M
|
---|---|---|---|---|---|
/*
* Copyright 2021 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package views.tradingpremises
import forms.{Form2, InvalidForm, ValidForm}
import jto.validation.{Path, ValidationError}
import models.tradingpremises._
import play.api.i18n.Messages
import utils.AmlsViewSpec
import views.Fixture
import views.html.tradingpremises.remove_agent_premises_reasons
class remove_agent_premises_reasonsSpec extends AmlsViewSpec {
import models.tradingpremises.RemovalReasonConstants._
trait ViewFixture extends Fixture {
lazy val remove_agent_premises_reasons = app.injector.instanceOf[remove_agent_premises_reasons]
implicit val requestWithToken = addTokenForView()
}
"remove_agent_premises_reasons view" must {
"have correct title" in new ViewFixture {
val form2: ValidForm[AgentRemovalReason] = Form2(AgentRemovalReason(Schema.MAJOR_COMPLIANCE_ISSUES))
def view = remove_agent_premises_reasons(form2, 1, false)
doc.title must startWith(Messages("tradingpremises.remove_reasons.agent.premises.title"))
}
"have correct headings" in new ViewFixture {
val form2: ValidForm[AgentRemovalReason] = Form2(AgentRemovalReason(Schema.MAJOR_COMPLIANCE_ISSUES))
def view = remove_agent_premises_reasons(form2, 1, true)
heading.html must be(Messages("tradingpremises.remove_reasons.agent.premises.title"))
subHeading.html must include(Messages("summary.tradingpremises"))
}
"have a back link" in new ViewFixture {
val form2: ValidForm[AgentRemovalReason] = Form2(AgentRemovalReason(Schema.MAJOR_COMPLIANCE_ISSUES))
def view = remove_agent_premises_reasons(form2, 1, true)
doc.getElementsByAttributeValue("class", "link-back") must not be empty
}
"show errors in the correct locations" when {
"nothing is selected" in new ViewFixture {
val field = "removalReason"
val errorKey = "error.missing"
val invalidForm: InvalidForm = InvalidForm(Map.empty,
Seq((Path \\ field, Seq(ValidationError(errorKey)))))
def view = remove_agent_premises_reasons(invalidForm, 0, false)
errorSummary.html() must include(errorKey)
doc.getElementById(field).parent().getElementsByClass("error-notification").first().html() must include(errorKey)
}
"Other is selected but no other reason is given" in new ViewFixture {
import models.tradingpremises.RemovalReasonConstants._
val field = "removalReasonOther"
val errorKey = "tradingpremises.remove_reasons.agent.other.missing"
val invalidForm: InvalidForm = InvalidForm(Map("removalReason" -> Seq(Form.OTHER)),
Seq((Path \\ field, Seq(ValidationError(errorKey)))))
def view = remove_agent_premises_reasons(invalidForm, 0, false)
errorSummary.html() must include(Messages(errorKey))
doc.getElementById(field).parent().getElementsByClass("error-notification").first().html() must include(Messages(errorKey))
}
}
}
} | hmrc/amls-frontend | test/views/tradingpremises/agent_removal_reasonSpec.scala | Scala | apache-2.0 | 3,543 |
/**
* Provide security features
*/
trait Secured {
private def check(request: RequestHeader) = {
if(request.session.get("id").isDefined){
Some(
User(
request.session.get("id").get.toLong
,request.session.get("email").get
,request.session.get("name").get
)
)
}
else{
None
}
}
/**
* Redirect to login if the user in not authorized.
*/
private def onUnauthorized(request: RequestHeader) = Results.Redirect(routes.Application.index)
// --
/**
* Action for authenticated users.
*/
def IsAuthenticated(f: => User => Request[AnyContent] => Result):EssentialAction = Security.Authenticated(check, onUnauthorized) { user =>
Action(request => f(user)(request))
}
/*def IsAuthenticated(b: BodyParser[Any])(f: User => Request[Any] => Result) = Security.Authenticated(check, onUnauthorized) { user =>
Action(b)(request => f(user)(request))
}*/
// overriding for file upload
def IsAuthenticated(b: BodyParser[play.api.mvc.MultipartFormData[play.api.libs.Files.TemporaryFile]])(f: User => Request[play.api.mvc.MultipartFormData[play.api.libs.Files.TemporaryFile]] => Result) = Security.Authenticated(check, onUnauthorized) { user =>
Action(b)(request => f(user)(request))
}
} | Nexysweb/play-helpers | traits/tSecured.scala | Scala | gpl-2.0 | 1,304 |
package edu.mit.csail.sdg.ormolu.rel
import edu.mit.csail.sdg.ormolu.form.Formula
import edu.mit.csail.sdg.hsqldb.data.access.Subquery
case class Comprehension(sub: Formula, args: Variable*) extends Relation {
require(args forall {_.arity == 1}, "The arguements of a comprehension must have arity 1")
override def arity: Int = args.size
override def toString: String = args.map { v => v.name + ": " + v.relation }.mkString("{", ",", "\\n\\t| %s}").format(sub)
override def query= querySpec
override def projection = args flatMap (_.projection)
override def filter = Vector(sub.boolExpr)
override def tables = for (arg <- args) yield Subquery(arg.relation.query) as arg.name
} | dlreeves/ormolu | src/edu/mit/csail/sdg/ormolu/rel/Comprehension.scala | Scala | mit | 707 |
/*
* Ported from https://github.com/hamcrest/JavaHamcrest/
*/
package org.hamcrest.core
import org.hamcrest.BaseMatcher
import org.hamcrest.Description
import org.hamcrest.Matcher
class IsEqual[T](expectedValue: AnyRef) extends BaseMatcher[T] {
override def matches(actualValue: AnyRef): Boolean =
IsEqual.areEqual(actualValue, expectedValue)
override def describeTo(description: Description): Unit =
description.appendValue(expectedValue)
}
object IsEqual {
private[IsEqual] def areEqual(actual: AnyRef, expected: AnyRef): Boolean = {
(actual, expected) match {
case (null, _) => expected == null
case (actual: Array[_], expected: Array[_]) => actual.toList == expected.toList
case _ => actual.equals(expected)
}
}
def equalTo[T](operand: T): Matcher[T] =
new IsEqual[T](operand.asInstanceOf[AnyRef])
def equalToObject(operand: AnyRef): Matcher[AnyRef] =
new IsEqual[AnyRef](operand)
}
| lrytz/scala-js | junit-runtime/src/main/scala/org/hamcrest/core/IsEqual.scala | Scala | bsd-3-clause | 1,016 |
package mesosphere.mesos
import mesosphere.marathon.core.instance.Instance
import mesosphere.marathon.core.launcher.impl.TaskLabels
import mesosphere.marathon.state.{ PersistentVolume, ResourceRole, RunSpec, DiskType, DiskSource }
import mesosphere.marathon.stream.Implicits._
import mesosphere.marathon.tasks.{ PortsMatch, PortsMatcher, ResourceUtil }
import mesosphere.mesos.protos.Resource
import org.apache.mesos.Protos
import org.apache.mesos.Protos.Offer
import org.apache.mesos.Protos.Resource.DiskInfo.Source
import org.slf4j.LoggerFactory
import scala.annotation.tailrec
import scala.collection.immutable.Seq
object ResourceMatcher {
import ResourceUtil.RichResource
type Role = String
private[this] val log = LoggerFactory.getLogger(getClass)
/**
* A successful match result of the [[ResourceMatcher]].matchResources method.
*/
case class ResourceMatch(scalarMatches: Seq[ScalarMatch], portsMatch: PortsMatch) {
lazy val hostPorts: Seq[Option[Int]] = portsMatch.hostPorts
def scalarMatch(name: String): Option[ScalarMatch] = scalarMatches.find(_.resourceName == name)
def resources: Seq[Protos.Resource] =
scalarMatches.flatMap(_.consumedResources)(collection.breakOut) ++
portsMatch.resources
// TODO - this assumes that volume matches are one resource to one volume, which should be correct, but may not be.
val localVolumes: Seq[(DiskSource, PersistentVolume)] =
scalarMatches.collect { case r: DiskResourceMatch => r.volumes }.flatten
}
/**
* Restricts which resources are considered for matching.
*
* Disk resources are always discarded, since we do not want to match them by
* accident.
*
* @param acceptedRoles contains all Mesos resource roles that are accepted
* @param needToReserve if true, only unreserved resources will considered
* @param labelMatcher a matcher that checks if the given resource labels
* are compliant with the expected or not expected labels
*/
case class ResourceSelector(
acceptedRoles: Set[String],
needToReserve: Boolean,
labelMatcher: LabelMatcher) {
def apply(resource: Protos.Resource): Boolean = {
import ResourceSelector._
// resources with disks are matched by the VolumeMatcher or not at all
val noAssociatedVolume = !(resource.hasDisk && resource.getDisk.hasVolume)
def matchesLabels: Boolean = labelMatcher.matches(reservationLabels(resource))
noAssociatedVolume && acceptedRoles(resource.getRole) && matchesLabels
}
override def toString: String = {
val reserveString = if (needToReserve) " to reserve" else ""
val rolesString = acceptedRoles.mkString(", ")
s"Considering resources$reserveString with roles {$rolesString} $labelMatcher"
}
}
object ResourceSelector {
/** The reservation labels if the resource is reserved, or an empty Map */
private def reservationLabels(resource: Protos.Resource): Map[String, String] =
if (!resource.hasReservation || !resource.getReservation.hasLabels)
Map.empty
else {
resource.getReservation.getLabels.getLabelsList.map { label =>
label.getKey -> label.getValue
}(collection.breakOut)
}
/** Match resources with given roles that have at least the given labels */
def reservedWithLabels(acceptedRoles: Set[String], labels: Map[String, String]): ResourceSelector = {
ResourceSelector(acceptedRoles, needToReserve = false, LabelMatcher.WithReservationLabels(labels))
}
/** Match resources with given roles that do not have known reservation labels */
def reservable: ResourceSelector = {
ResourceSelector(Set(ResourceRole.Unreserved), needToReserve = true, LabelMatcher.WithoutReservationLabels)
}
/** Match any resources with given roles that do not have known reservation labels */
def any(acceptedRoles: Set[String]): ResourceSelector = {
ResourceSelector(acceptedRoles, needToReserve = false, LabelMatcher.WithoutReservationLabels)
}
}
private[mesos] sealed trait LabelMatcher {
def matches(resourceLabels: Map[String, String]): Boolean
}
private[this] object LabelMatcher {
case class WithReservationLabels(labels: Map[String, String]) extends LabelMatcher {
override def matches(resourceLabels: Map[String, String]): Boolean =
labels.forall { case (k, v) => resourceLabels.get(k).contains(v) }
override def toString: Role = {
val labelsStr = labels.map { case (k, v) => s"$k: $v" }.mkString(", ")
s"and labels {$labelsStr}"
}
}
case object WithoutReservationLabels extends LabelMatcher {
override def matches(resourceLabels: Map[String, String]): Boolean =
resourceLabels.keys.toSet.intersect(TaskLabels.labelKeysForTaskReservations).isEmpty
override def toString: Role = "without resident reservation labels"
}
}
/**
* Checks whether the given offer contains enough resources to launch a task of the given run spec
* or to make a reservation for a task.
*
* If a task uses local volumes, this method is typically called twice for every launch. Once
* for the reservation on UNRESERVED resources and once for every (re-)launch on RESERVED resources.
*
* If matching on RESERVED resources as specified by the ResourceSelector, resources for volumes
* have to be matched separately (e.g. by the [[PersistentVolumeMatcher]]). If matching on UNRESERVED
* resources, the disk resources for the local volumes are included since they must become part of
* the reservation.
*/
def matchResources(offer: Offer, runSpec: RunSpec, runningInstances: => Seq[Instance],
selector: ResourceSelector): ResourceMatchResponse = {
val groupedResources: Map[Role, Seq[Protos.Resource]] = offer.getResourcesList.groupBy(_.getName).map { case (k, v) => k -> v.to[Seq] }
val scalarResourceMatch = matchScalarResource(groupedResources, selector) _
val diskResourceMatch = matchDiskResource(groupedResources, selector) _
// Local volumes only need to be matched if we are making a reservation for resident tasks --
// that means if the resources that are matched are still unreserved.
def needToReserveDisk = selector.needToReserve && runSpec.diskForPersistentVolumes > 0
val diskMatch = if (needToReserveDisk)
diskResourceMatch(
runSpec.resources.disk,
runSpec.persistentVolumes,
ScalarMatchResult.Scope.IncludingLocalVolumes)
else
diskResourceMatch(runSpec.resources.disk, Nil, ScalarMatchResult.Scope.ExcludingLocalVolumes)
val scalarMatchResults = (
Seq(
scalarResourceMatch(Resource.CPUS, runSpec.resources.cpus, ScalarMatchResult.Scope.NoneDisk),
scalarResourceMatch(Resource.MEM, runSpec.resources.mem, ScalarMatchResult.Scope.NoneDisk),
scalarResourceMatch(Resource.GPUS, runSpec.resources.gpus.toDouble, ScalarMatchResult.Scope.NoneDisk)) ++
diskMatch
).filter(_.requiredValue != 0)
// add scalar resources to noOfferMatchReasons
val noOfferMatchReasons = scalarMatchResults
.filter(scalar => !scalar.matches)
.map(scalar => NoOfferMatchReason.fromResourceType(scalar.resourceName)).toBuffer
// Current mesos implementation will only send resources with one distinct role assigned.
// If not a single resource (matching the resource selector) was found, a NoOfferMatchReason.UnmatchedRole
// will be added to noOfferMatchReasons
if (!offer.getResourcesList.exists(resource => selector.apply(resource))) {
noOfferMatchReasons += NoOfferMatchReason.UnfulfilledRole
}
logUnsatisfiedResources(offer, selector, scalarMatchResults)
def portsMatchOpt: Option[PortsMatch] = PortsMatcher(runSpec, offer, selector).portsMatch
val meetsAllConstraints: Boolean = {
lazy val instances = runningInstances.filter { inst =>
inst.isLaunched && inst.runSpecVersion >= runSpec.versionInfo.lastConfigChangeVersion
}
val badConstraints = runSpec.constraints.filterNot { constraint =>
Constraints.meetsConstraint(instances, offer, constraint)
}
if (badConstraints.nonEmpty) {
// Add constraints to noOfferMatchReasons
noOfferMatchReasons += NoOfferMatchReason.UnfulfilledConstraint
if (log.isInfoEnabled) {
log.info(
s"Offer [${offer.getId.getValue}]. Constraints for run spec [${runSpec.id}] not satisfied.\\n" +
s"The conflicting constraints are: [${badConstraints.mkString(", ")}]"
)
}
}
badConstraints.isEmpty
}
val resourceMatchOpt = if (scalarMatchResults.forall(_.matches) && meetsAllConstraints) {
portsMatchOpt match {
case Some(portsMatch) =>
Some(ResourceMatch(scalarMatchResults.collect { case m: ScalarMatch => m }, portsMatch))
case None =>
// Add ports to noOfferMatchReasons
noOfferMatchReasons += NoOfferMatchReason.InsufficientPorts
None
}
} else {
None
}
resourceMatchOpt match {
case Some(resourceMatch) => ResourceMatchResponse.Match(resourceMatch)
case None => ResourceMatchResponse.NoMatch(noOfferMatchReasons.to[Seq])
}
}
private[mesos] case class SourceResources(source: Option[Source], resources: List[Protos.Resource]) {
lazy val size = resources.foldLeft(0.0)(_ + _.getScalar.getValue)
}
private[mesos] object SourceResources extends ((Option[Source], List[Protos.Resource]) => SourceResources) {
def listFromResources(l: List[Protos.Resource]): List[SourceResources] = {
l.groupBy(_.getDiskSourceOption).map(SourceResources.tupled).toList
}
}
/*
Prioritize resources to make the most sensible allocation.
- If requesting full disk, allocate the smallest disk volume that meets constraints
- If requesting root, just return it because there can only be one.
- If requesting path disk, allocate the largest volume possible to spread allocation evenly.
This may not be ideal if you'd prefer to leave room for larger allocations instead.
TODO - test. Also, parameterize?
*/
private[this] def prioritizeDiskResources(
diskType: DiskType,
resources: List[SourceResources]): List[SourceResources] = {
diskType match {
case DiskType.Root =>
resources
case DiskType.Path =>
resources.sortBy(_.size)(implicitly[Ordering[Double]].reverse)
case DiskType.Mount =>
resources.sortBy(_.size)
}
}
// format: OFF
@tailrec
private[this] def findDiskGroupMatches(
requiredValue: Double,
resourcesRemaining: List[SourceResources],
allSourceResources: List[SourceResources],
matcher: Protos.Resource => Boolean):
Option[(Option[Source], List[GeneralScalarMatch.Consumption], List[SourceResources])] =
// format: ON
resourcesRemaining match {
case Nil =>
None
case next :: rest =>
consumeResources(requiredValue, next.resources, matcher = matcher) match {
case Left(_) =>
findDiskGroupMatches(requiredValue, rest, allSourceResources, matcher)
case Right((resourcesConsumed, remainingAfterConsumption)) =>
val sourceResourcesAfterConsumption = if (remainingAfterConsumption.isEmpty)
None
else
Some(next.copy(resources = remainingAfterConsumption))
Some((
next.source,
resourcesConsumed,
(sourceResourcesAfterConsumption ++ allSourceResources.filterNot(_ == next)).toList))
}
}
/**
* Match volumes against disk resources and return results which keep disk sources and persistentVolumes associated.
*
* TODO - handle matches for a single volume across multiple resource offers for the same disk
*/
private[this] def matchDiskResource(
groupedResources: Map[Role, Seq[Protos.Resource]], selector: ResourceSelector)(
scratchDisk: Double,
volumes: Seq[PersistentVolume],
scope: ScalarMatchResult.Scope = ScalarMatchResult.Scope.NoneDisk): Seq[ScalarMatchResult] = {
@tailrec
def findMatches(
diskType: DiskType,
pendingAllocations: List[Either[Double, PersistentVolume]],
resourcesRemaining: List[SourceResources],
resourcesConsumed: List[DiskResourceMatch.Consumption] = Nil): Either[DiskResourceNoMatch, DiskResourceMatch] = {
val orderedResources = prioritizeDiskResources(diskType, resourcesRemaining)
pendingAllocations match {
case Nil =>
Right(DiskResourceMatch(diskType, resourcesConsumed, scope))
case nextAllocation :: restAllocations =>
val (matcher, nextAllocationSize) = nextAllocation match {
case Left(size) => ({ _: Protos.Resource => true }, size)
case Right(v) => (
VolumeConstraints.meetsAllConstraints(_: Protos.Resource, v.persistent.constraints),
v.persistent.size.toDouble
)
}
findDiskGroupMatches(nextAllocationSize, orderedResources, orderedResources, matcher) match {
case None =>
Left(
DiskResourceNoMatch(resourcesConsumed, resourcesRemaining.flatMap(_.resources), nextAllocation, scope))
case Some((source, generalConsumptions, decrementedResources)) =>
val consumptions = generalConsumptions.map { c =>
DiskResourceMatch.Consumption(c, source, nextAllocation.right.toOption)
}
findMatches(
diskType,
restAllocations,
decrementedResources,
consumptions ++ resourcesConsumed)
}
}
}
/*
* The implementation for finding mount matches differs from disk matches because:
* - A mount volume cannot be partially allocated. The resource allocation request must be sized up to match the
* actual resource size
* - The mount volume can't be split amongst reserved / non-reserved.
* - The mount volume has an extra maxSize concern
*
* If this method can be generalized to worth with the above code, then so be it.
*/
@tailrec def findMountMatches(
pendingAllocations: List[PersistentVolume],
resources: List[Protos.Resource],
resourcesConsumed: List[DiskResourceMatch.Consumption] = Nil): Either[DiskResourceNoMatch, DiskResourceMatch] = {
pendingAllocations match {
case Nil =>
Right(DiskResourceMatch(DiskType.Mount, resourcesConsumed, scope))
case nextAllocation :: restAllocations =>
resources.find { resource =>
val resourceSize = resource.getScalar.getValue
VolumeConstraints.meetsAllConstraints(resource, nextAllocation.persistent.constraints) &&
(resourceSize >= nextAllocation.persistent.size) &&
(resourceSize <= nextAllocation.persistent.maxSize.getOrElse(Long.MaxValue))
} match {
case Some(matchedResource) =>
val consumedAmount = matchedResource.getScalar.getValue
val grownVolume =
nextAllocation.copy(
persistent = nextAllocation.persistent.copy(
size = consumedAmount.toLong))
val consumption =
DiskResourceMatch.Consumption(
consumedAmount,
role = matchedResource.getRole,
reservation = if (matchedResource.hasReservation) Option(matchedResource.getReservation) else None,
source = DiskSource.fromMesos(matchedResource.getDiskSourceOption),
Some(grownVolume))
findMountMatches(
restAllocations,
resources.filterNot(_ == matchedResource),
consumption :: resourcesConsumed)
case None =>
Left(DiskResourceNoMatch(resourcesConsumed, resources, Right(nextAllocation), scope))
}
}
}
val diskResources = groupedResources.getOrElse(Resource.DISK, Seq.empty)
val resourcesByType: Map[DiskType, Seq[Protos.Resource]] = diskResources.groupBy { r =>
DiskSource.fromMesos(r.getDiskSourceOption).diskType
}.withDefault(_ => Nil)
val scratchDiskRequest = if (scratchDisk > 0.0) Some(Left(scratchDisk)) else None
val requestedResourcesByType: Map[DiskType, Seq[Either[Double, PersistentVolume]]] =
(scratchDiskRequest ++ volumes.map(Right(_)).toList).groupBy {
case Left(_) => DiskType.Root
case Right(p) => p.persistent.`type`
}.map { case (k, v) => k -> v.to[Seq] }
requestedResourcesByType.keys.map { diskType =>
val withBiggestRequestsFirst =
requestedResourcesByType(diskType).
toList.
sortBy({ r => r.right.map(_.persistent.size.toDouble).merge })(implicitly[Ordering[Double]].reverse)
val resources: List[Protos.Resource] = resourcesByType(diskType).filterAs(selector(_))(collection.breakOut)
if (diskType == DiskType.Mount) {
findMountMatches(
withBiggestRequestsFirst.flatMap(_.right.toOption),
resources)
} else
findMatches(
diskType,
withBiggestRequestsFirst,
SourceResources.listFromResources(resources))
}.toList.map(_.merge)
}
private[this] def matchScalarResource(
groupedResources: Map[Role, Seq[Protos.Resource]], selector: ResourceSelector)(
name: String, requiredValue: Double,
scope: ScalarMatchResult.Scope = ScalarMatchResult.Scope.NoneDisk): ScalarMatchResult = {
require(scope == ScalarMatchResult.Scope.NoneDisk || name == Resource.DISK)
val resourcesForName = groupedResources.getOrElse(name, Seq.empty)
val matchingScalarResources = resourcesForName.filter(selector(_))
consumeResources(requiredValue, matchingScalarResources.toList) match {
case Left(valueLeft) =>
NoMatch(name, requiredValue, requiredValue - valueLeft, scope = scope)
case Right((resourcesConsumed, remaining)) =>
GeneralScalarMatch(name, requiredValue, resourcesConsumed, scope = scope)
}
}
/**
* Given a list of resources, allocates the specified size.
*
* Returns an either:
*
* - Left: indicates failure; contains the amount failed to be matched.
* - Right: indicates success; contains a list of consumptions and a list of resources remaining after the
* allocation.
*/
@tailrec
private[this] def consumeResources(
valueLeft: Double,
resourcesLeft: List[Protos.Resource],
resourcesNotConsumed: List[Protos.Resource] = Nil,
resourcesConsumed: List[GeneralScalarMatch.Consumption] = Nil,
matcher: Protos.Resource => Boolean = { _ => true }):
// format: OFF
Either[(Double), (List[GeneralScalarMatch.Consumption], List[Protos.Resource])] = {
// format: ON
if (valueLeft <= 0) {
Right((resourcesConsumed, resourcesLeft ++ resourcesNotConsumed))
} else {
resourcesLeft match {
case Nil => Left(valueLeft)
case nextResource :: restResources =>
if (matcher(nextResource)) {
val consume = Math.min(valueLeft, nextResource.getScalar.getValue)
val decrementedResource = ResourceUtil.consumeScalarResource(nextResource, consume)
val newValueLeft = valueLeft - consume
val reservation = if (nextResource.hasReservation) Option(nextResource.getReservation) else None
val consumedValue = GeneralScalarMatch.Consumption(consume, nextResource.getRole, reservation)
consumeResources(newValueLeft, restResources, (decrementedResource ++ resourcesNotConsumed).toList,
consumedValue :: resourcesConsumed, matcher)
} else {
consumeResources(valueLeft, restResources, nextResource :: resourcesNotConsumed, resourcesConsumed, matcher)
}
}
}
}
private[this] def logUnsatisfiedResources(
offer: Offer,
selector: ResourceSelector,
scalarMatchResults: Seq[ScalarMatchResult]): Unit = {
if (log.isInfoEnabled && scalarMatchResults.exists(!_.matches)) {
val basicResourceString = scalarMatchResults.mkString(", ")
log.info(
s"Offer [${offer.getId.getValue}]. " +
s"$selector. " +
s"Not all basic resources satisfied: $basicResourceString")
}
}
}
| natemurthy/marathon | src/main/scala/mesosphere/mesos/ResourceMatcher.scala | Scala | apache-2.0 | 20,531 |
package io.reactors.common
import scala.reflect.ClassTag
class ConqueueBuffer[@specialized(Byte, Char, Int, Long, Float, Double) T: ClassTag](
val k: Int, val isLazy: Boolean, private var conqueue: Conqueue[T]
) {
import Conc._
import Conqueue._
require(k > 0)
private var leftChunk: Array[T] = _
private var leftIndex: Int = k - 1
private var leftStart: Int = k - 1
private var rightChunk: Array[T] = _
private var rightIndex: Int = 0
private var rightStart: Int = 0
private def init(dummy: ConqueueBuffer[T]) {
leftChunk = new Array[T](k)
leftIndex = k - 1
leftStart = k - 1
rightChunk = new Array[T](k)
rightIndex = 0
rightStart = 0
}
init(this)
def this(k: Int) = this(k, true, Conqueue.Lazy(Nil, Conqueue.empty, Nil))
def this(k: Int, lazyConqueue: Boolean) =
this(k, lazyConqueue,
if (lazyConqueue) Conqueue.Lazy(Nil, Conqueue.empty, Nil) else Conqueue.empty)
def size = conqueue.size
def isEmpty = {
leftIndex == leftStart && ConcUtils.isEmptyConqueue(conqueue) &&
rightIndex == rightStart
}
def nonEmpty = !isEmpty
private def leftEnsureSize(n: Int) {
if (leftChunk.length < n) leftChunk = new Array[T](n)
}
private def rightEnsureSize(n: Int) {
if (rightChunk.length < n) rightChunk = new Array[T](n)
}
private def pullLeft() {
if (conqueue.nonEmpty) {
val head = ConcUtils.head(conqueue)
conqueue = ConcUtils.popHeadTop(conqueue)
(head: @unchecked) match {
case head: Chunk[T] =>
leftChunk = head.array
leftStart = head.size - 1
leftIndex = -1
case head: Single[T] =>
leftChunk = new Array[T](k)
leftChunk(k - 1) = head.x
leftStart = k - 1
leftIndex = k - 2
}
} else if (rightIndex > rightStart) {
val rightMid = (rightStart + rightIndex + 1) / 2
val n = rightMid - rightStart
leftEnsureSize(n)
System.arraycopy(rightChunk, rightStart, leftChunk, leftChunk.length - n, n)
rightStart = rightMid
leftStart = leftChunk.length - 1
leftIndex = leftChunk.length - n - 1
} else unsupported("empty")
}
def head: T = {
if (leftIndex < leftStart) leftChunk(leftIndex + 1)
else {
pullLeft()
head
}
}
private def pullRight() = {
if (conqueue.nonEmpty) {
val last = ConcUtils.last(conqueue)
conqueue = ConcUtils.popLastTop(conqueue)
(last: @unchecked) match {
case last: Chunk[T] =>
rightChunk = last.array
rightStart = 0
rightIndex = last.size
case last: Single[T] =>
rightChunk = new Array[T](k)
rightChunk(0) = last.x
rightStart = 0
rightIndex = 1
}
} else if (leftIndex < leftStart) {
val leftMid = (leftIndex + 1 + leftStart) / 2
val n = leftStart - leftMid + 1
rightEnsureSize(n)
System.arraycopy(leftChunk, leftMid, rightChunk, 0, n)
leftStart = leftMid - 1
rightStart = 0
rightIndex = n
} else unsupported("empty")
}
def last: T = {
if (rightIndex > rightStart) rightChunk(rightIndex - 1)
else {
pullRight()
last
}
}
private def packLeft(): Unit = if (leftIndex < leftStart) {
val sz = leftStart - leftIndex
val chunk = {
if (leftIndex == -1) leftChunk
else ConcUtils.copiedArray(leftChunk, leftIndex + 1, sz)
}
conqueue = ConcUtils.pushHeadTop(conqueue, new Chunk(chunk, sz, k))
}
private def expandLeft() {
packLeft()
leftChunk = new Array[T](k)
leftIndex = k - 1
leftStart = k - 1
}
private def packRight(): Unit = if (rightIndex > rightStart) {
val sz = rightIndex - rightStart
val chunk = {
if (rightStart == 0) rightChunk
else ConcUtils.copiedArray(rightChunk, rightStart, sz)
}
conqueue = ConcUtils.pushLastTop(conqueue, new Chunk(chunk, sz, k))
}
private def expandRight() {
packRight()
rightChunk = new Array[T](k)
rightIndex = 0
rightStart = 0
}
def pushHead(elem: T): this.type = {
if (leftIndex < 0) expandLeft()
leftChunk(leftIndex) = elem
leftIndex -= 1
this
}
def +=:(elem: T): this.type = pushHead(elem)
def popHead(): T = {
if (leftIndex < leftStart) {
leftIndex += 1
val result = leftChunk(leftIndex)
leftChunk(leftIndex) = null.asInstanceOf[T]
result
} else {
pullLeft()
popHead()
}
}
def pushLast(elem: T): this.type = {
if (rightIndex > rightChunk.size - 1) expandRight()
rightChunk(rightIndex) = elem
rightIndex += 1
this
}
def +=(elem: T): this.type = pushLast(elem)
def popLast(): T = {
if (rightIndex > rightStart) {
rightIndex -= 1
val result = rightChunk(rightIndex)
rightChunk(rightIndex) = null.asInstanceOf[T]
result
} else {
pullRight()
popLast()
}
}
def extractConqueue() = {
packLeft()
packRight()
var result = conqueue
conqueue = if (isLazy) Lazy(Nil, Conqueue.empty, Nil) else Conqueue.empty
result
}
def clear() {
init(this)
}
override def toString = {
val buffer = collection.mutable.Buffer[T]()
for (i <- (leftIndex + 1) to leftStart) buffer += leftChunk(i)
for (x <- conqueue) buffer += x
for (i <- rightStart until rightIndex) buffer += rightChunk(i)
buffer.mkString("ConqueueBuffer(", ", ", ")")
}
private[common] def diagnosticString = {
println(s"-----------")
println(s"leftIndex/leftStart: $leftIndex/$leftStart")
println(s"leftChunk: ${leftChunk.mkString(", ")}")
println(s"rightStart/rightIndex: $rightStart/$rightIndex")
println(s"rightChunk: ${rightChunk.mkString(", ")}")
println(s"mid: ${ConcUtils.toSeq(conqueue).mkString(", ")}")
}
}
| storm-enroute/reactors | reactors-common/shared/src/main/scala/io/reactors/common/ConqueueBuffer.scala | Scala | bsd-3-clause | 5,862 |
/*
* This file is part of Apparat.
*
* Copyright (C) 2010 Joa Ebert
* http://www.joa-ebert.com/
*
* This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with this library; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
package apparat.graph.analysis
import apparat.graph.{ControlFlow, GraphLike}
import annotation.tailrec
/**
* @author Joa Ebert
*/
class Dominance[V](val graph: GraphLike[V]) {
private lazy val entry: V = graph match {
case controlFlow: ControlFlow[_] => controlFlow.entryVertex.asInstanceOf[V]
case _ => graph.verticesIterator find (vertex => (graph indegreeOf vertex) == 0) match {
case Some(vertex) => vertex
case None => error("No vertex with indegree(v) == 0 found.")
}
}
private lazy val postorder = graph dft entry toList
private lazy val reversePostorder = postorder.reverse
@tailrec private def advanceIntersection(map: Map[V, V], a: V, b: V): V = {
if ((postorder indexOf a) < (postorder indexOf b)) advanceIntersection(map, map(a), b)
else a
}
@tailrec private def intersect(map: Map[V, V], b1: V, b2: V): V = {
if (b1 != b2) {
val f = advanceIntersection(map, b1, b2)
intersect(map, f, advanceIntersection(map, b2, f))
} else {
b1
}
}
private def pickPredecessor(map: Map[V, V], predecessors: Iterable[V]): V = {
predecessors.find(map contains _) match {
case Some(vertex) => vertex
case None => error("Unreachable by definition.")
}
}
private lazy val doms = {
//
// "A Simple, Fast Dominance Algorithm"
//
// Keith D. Cooper et al.
// Rice University, Houston, TX
//
// http://www.cs.rice.edu/~keith/EMBED/dom.pdf
// Page 13
//
var result = Map(entry -> entry)
val rp = reversePostorder filterNot (_ == entry)
def loop(): Unit = {
var changed = false
for (b <- rp) {
val predecessorsTmp = graph predecessorsOf b
var newIDom = pickPredecessor(result, predecessorsTmp)
val predecessors = predecessorsTmp filterNot (_ == newIDom)
for (p <- predecessors) {
result get p match {
case Some(vertex) => {
newIDom = intersect(result, vertex, newIDom)
}
case None =>
}
}
result get b match {
case Some(old) => if (old != newIDom) {
result = result updated (b, newIDom)
changed = true
}
case None => {
result = result + (b -> newIDom)
changed = true
}
}
}
if (changed) {
loop()
}
}
loop()
result
}
private lazy val frontiers = {
//
// "A Simple, Fast Dominance Algorithm"
//
// Keith D. Cooper et al.
// Rice University, Houston, TX
//
// http://www.cs.rice.edu/~keith/EMBED/dom.pdf
// Page 18
//
var result = graph vertexMap (v => List.empty[V])
for (b <- graph.verticesIterator) {
val predecessors = graph predecessorsOf b
if (predecessors.size > 1) {
for (p <- predecessors) {
var runner = p
while (runner != doms(b)) {
// result is normally a set so no duplicate into it
val resultRunner = result(runner)
if (!resultRunner.contains(b))
result = result updated (runner, b :: resultRunner)
runner = doms(runner)
}
}
}
}
result
}
def apply(vertex: V) = frontiersOf(vertex)
def frontiersOf(vertex: V) = frontiers get vertex
}
| joa/apparat | apparat-core/src/main/scala/apparat/graph/analysis/Dominance.scala | Scala | lgpl-2.1 | 3,904 |
package skinny.engine.scalate
import java.io.File
import org.fusesource.scalate.TemplateEngine
class SkinnyEngineTemplateEngine(
sourceDirectories: Traversable[File] = None,
mode: String = sys.props.getOrElse("scalate.mode", "production"))
extends TemplateEngine(sourceDirectories, mode) | holycattle/skinny-framework | engine-scalate/src/main/scala/skinny/engine/scalate/SkinnyEngineTemplateEngine.scala | Scala | mit | 298 |
package waldap.core.controller
import waldap.core.util.Implicits._
import waldap.core.util.Keys
class PreprocessController extends ControllerBase {
get(context.currentPath.startsWith("/user") && context.loginAccount.map { _.isAdmin }.getOrElse(false)) {
org.scalatra.Forbidden("Access Denied")
}
get(context.currentPath.startsWith("/admin") && context.loginAccount.map { !_.isAdmin }.getOrElse(false)) {
org.scalatra.Forbidden("Access Denied")
}
get(
context.loginAccount.isEmpty && context.currentPath != "/" && !context.currentPath.startsWith("/assets")
&& !context.currentPath.startsWith("/webjars")
&& !context.currentPath.startsWith("/admin/signin") && !context.currentPath.startsWith("/user/signin")
) {
Unauthorized()
}
protected def Unauthorized()(implicit context: Context): Unit = {
if (request.hasAttribute(Keys.Request.Ajax)) {
org.scalatra.Unauthorized()
} else {
if (context.loginAccount.isDefined) {
org.scalatra.Unauthorized(redirect("/"))
} else {
if (context.currentPath.startsWith("/admin")) {
org.scalatra.Unauthorized(redirect("/admin/signin"))
} else {
org.scalatra.Unauthorized(redirect("/user/signin"))
}
}
}
}
}
| kounoike/waldap | src/main/scala/waldap/core/controller/PreprocessController.scala | Scala | apache-2.0 | 1,277 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2013 Alexey Aksenov [email protected]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: [email protected]
*/
package org.digimead.tabuddy.desktop.core.ui.definition
/**
* Base application wizard with init(arg) method
*/
trait IWizard extends org.eclipse.jface.wizard.IWizard {
/** Wizard result. Int by default. */
@volatile var result: Option[AnyRef] = None
/** This method is invoked before wizard opening. */
def init(argument: AnyRef)
}
| digimead/digi-TABuddy-desktop | part-core-ui/src/main/scala/org/digimead/tabuddy/desktop/core/ui/definition/IWizard.scala | Scala | agpl-3.0 | 2,585 |
/*
* Scala (https://www.scala-lang.org)
*
* Copyright EPFL and Lightbend, Inc.
*
* Licensed under Apache License 2.0
* (http://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package scala.tools.nsc.interpreter.jline
import java.io.IOException
import java.nio.charset.Charset
import java.nio.file.{FileSystems, Files, Path}
import _root_.jline.console.history.PersistentHistory
import scala.collection.JavaConverters._
import scala.io.Codec
import scala.reflect.internal.util.OwnerOnlyChmod
import scala.util.control.NonFatal
/** TODO: file locking.
*/
trait FileBackedHistory extends JLineHistory with PersistentHistory {
def maxSize: Int
import java.nio.file.StandardOpenOption.{APPEND, TRUNCATE_EXISTING}
val charSet: Charset = implicitly[Codec].charSet
// For a history file in the standard location, always try to restrict permission,
// creating an empty file if none exists.
// For a user-specified location, only lock down permissions if we're the ones
// creating it, otherwise responsibility for permissions is up to the caller.
private lazy val historyPath = {
val fs = FileSystems.getDefault
// This would really have been sufficient for our property getting infrastructure
def prop(p: String) = Option(System.getProperty(p))
(prop("scala.shell.histfile").map(fs.getPath(_)).map{ p => if (!Files.exists(p)) secure(p); p } orElse
prop("user.home").map(n => fs.getPath(n + s"${fs.getSeparator}${FileBackedHistory.defaultFileName}")).map(secure)
).getOrElse(throw new IllegalStateException("Cannot determine path for history file."))
}
private def secure(p: Path): Path = {
try OwnerOnlyChmod.chmodFileOrCreateEmpty(p)
catch { case NonFatal(e) =>
e.printStackTrace(Console.err)
Console.err.println(s"Warning: history file ${p}'s permissions could not be restricted to owner-only.")
}
p
}
protected lazy val lines: List[String] = {
try Files.readAllLines(historyPath, charSet).asScala.toList
catch {
// It seems that control characters in the history file combined
// with the default codec can lead to nio spewing exceptions. Rather
// than abandon hope we'll try to read it as ISO-8859-1
case _: IOException =>
try Files.readAllLines(historyPath, Codec.ISO8859.charSet).asScala.toList
catch {
case _: IOException => Nil
}
}
}
private var isPersistent = true
locally {
load()
}
def withoutSaving[T](op: => T): T = {
val saved = isPersistent
isPersistent = false
try op
finally isPersistent = saved
}
def addLineToFile(item: CharSequence): Unit = {
if (isPersistent)
append(s"$item\\n")
}
/** Overwrites the history file with the current memory. */
protected def sync(): Unit =
Files.write(historyPath, asStrings.asJava, charSet, TRUNCATE_EXISTING)
/** Append one or more lines to the history file. */
protected def append(newLines: String*): Unit =
Files.write(historyPath, newLines.asJava, charSet, APPEND)
def load(): Unit = try {
// avoid writing to the history file
withoutSaving(lines takeRight maxSize foreach add)
// truncate the history file if it's too big.
if (lines.size > maxSize) {
sync()
}
moveToEnd()
} catch {
case _: IOException | _: IllegalStateException =>
Console.err.println("Could not load history.")
isPersistent = false
}
def flush(): Unit = ()
def purge(): Unit = Files.write(historyPath, Array.emptyByteArray)
}
object FileBackedHistory {
// val ContinuationChar = '\\003'
// val ContinuationNL: String = Array('\\003', '\\n').mkString
final val defaultFileName = ".scala_history"
}
| martijnhoekstra/scala | src/repl-frontend/scala/tools/nsc/interpreter/jline/FileBackedHistory.scala | Scala | apache-2.0 | 3,836 |
import scala.annotation.tailrec
/*
Reference: http://aperiodic.net/phil/scala/s-99
P02 (*) Find the last but one element of a list.
Example:
scala> penultimate(List(1, 1, 2, 3, 5, 8))
res0: Int = 5
*/
@tailrec
def penultimate[T](l: List[T]): T = {
l match {
case Nil | _ :: Nil => throw new NoSuchElementException
case x :: y :: Nil => x
case h :: t => penultimate(t)
}
}
println(penultimate(List(1, 1, 2, 3, 5, 8))) | mcamou/s-99 | src/P02.scala | Scala | apache-2.0 | 448 |
package freecli
package object argument {
type ArgumentDsl[A] = dsl.ArgumentDsl[A]
}
| pavlosgi/freecli | core/src/main/scala/freecli/argument/package.scala | Scala | apache-2.0 | 88 |
package mimir.ctables;
import java.io.{StringReader,FileReader}
import org.specs2.mutable._
import mimir._
import mimir.ctables._
import mimir.ctables.vgterm._
import mimir.parser._
import mimir.algebra._
import mimir.sql._
import mimir.optimizer._
import mimir.optimizer.operator._
import mimir.exec._
import mimir.provenance._
import mimir.models._
import mimir.test._
object OperatorDeterminismSpec
extends Specification
with RAParsers
{
val schema = Map[String,Seq[(ID,Type)]](
("R", Seq(
ID("A") -> TInt(),
ID("B") -> TInt()
)),
("S", Seq(
ID("C") -> TInt(),
ID("D") -> TFloat()
))
)
def table(name: String): Operator =
Table(ID(name), ID(name), schema(name), Seq())
def modelLookup(model: ID) = UniformDistribution
def schemaLookup(table: String) = schema(table).toList
def ack(
idx: Int = 1,
args: Seq[Expression] = Seq(RowIdVar())
): Expression = IsAcknowledged(UniformDistribution, idx, args)
def project(cols: List[(String,String)], src: Operator): Operator =
Project(cols.map( { case (name,e) => ProjectArg(ID(name), expr(e))}), src)
def percolite(x:Operator): Operator =
PullUpConstants(
SimpleOptimizeExpressions(
InlineProjections(
PullUpConstants(
OperatorDeterminism.compile(x, modelLookup(_))
)
)
)
)
def ucol(x:String) =
OperatorDeterminism.mimirColDeterministicColumn(ID(x))
def urow =
OperatorDeterminism.mimirRowDeterministicColumnName
val TRUE = BoolPrimitive(true)
val FALSE = BoolPrimitive(false)
"The Percolator (Lite)" should {
"Handle Base Relations" in {
percolite(
table("R")
) must be equalTo (
table("R").mapByID(
ID("A") -> Var("A"),
ID("B") -> Var("B"),
ucol("A") -> TRUE,
ucol("B") -> TRUE,
urow -> TRUE
)
)
}
"Handle Deterministic Projection" in {
percolite(
table("R")
.project("A")
) must be equalTo (
table("R").
mapByID(
ID("A") -> Var("A"),
ucol("A") -> TRUE,
urow -> TRUE
)
)
}
"Handle Data-Independent Non-Deterministic Projection 1" in {
percolite(
table("R")
.map(
"A" -> Var("A"),
"B" -> VGTerm(ID("X"), 1, Seq(RowIdVar()), Seq())
)
) must be equalTo (
table("R")
.mapByID(
ID("A") -> Var("A"),
ID("B") -> VGTerm(ID("X"), 1, Seq(RowIdVar()), Seq()),
ucol("A") -> TRUE,
ucol("B") -> ack(),
urow -> TRUE
)
)
}
"Handle Data-Dependent Non-Deterministic Projection 2" in {
percolite(
table("R")
.map(
"A" -> Var("A"),
"B" -> Conditional(IsNullExpression(Var("B")), VGTerm(ID("X"), 1, Seq(RowIdVar()), Seq()), Var("B"))
)
) must be equalTo (
table("R")
.mapByID(
ID("A") -> Var("A"),
ID("B") -> Conditional(IsNullExpression(Var("B")), VGTerm(ID("X"), 1, Seq(RowIdVar()), Seq()), Var("B")),
ucol("A") -> TRUE,
ucol("B") -> Conditional(IsNullExpression(Var("B")), ack(), BoolPrimitive(true)),
urow -> TRUE
)
)
}
"Handle Data-Independent Non-Deterministic Inline Selection" in {
percolite(
table("R")
.filter(VGTerm(ID("X"), 1, Seq(RowIdVar()), Seq()).eq(IntPrimitive(3)))
) must be equalTo (
table("R")
.filter(VGTerm(ID("X"), 1, Seq(RowIdVar()), Seq()).eq(IntPrimitive(3)))
.mapByID(
ID("A") -> Var("A"),
ID("B") -> Var("B"),
ucol("A") -> TRUE,
ucol("B") -> TRUE,
urow -> ack()
)
)
}
"Handle Data-Dependent Non-Deterministic Projection 3" in {
percolite(
table("R")
.map(
"A" -> expr("A"),
"B" -> Var("B").isNull
.thenElse { VGTerm(ID("X"), 1, Seq(RowIdVar()), Seq()) }
{ Var("B") }
)
.filterParsed("B = 3")
) must be equalTo (
table("R")
.mapByID(
ID("A") -> expr("A"),
ID("B") -> Var("B").isNull
.thenElse { VGTerm(ID("X"), 1, Seq(RowIdVar()), Seq()) }
{ Var("B") },
ucol("B") -> Var("B").isNull.thenElse(ack()) (BoolPrimitive(true))
)
.filterParsed("B = 3")
.mapByID(
ID("A") -> Var("A"),
ID("B") -> Var("B"),
ucol("A") -> TRUE,
ucol("B") -> Var(ucol("B")),
urow -> Var(ucol("B"))
)
)
}
"Handle Deterministic Joins" in {
percolite(
table("R").join(table("S"))
) must be equalTo (
table("R")
.join(table("S"))
.mapByID(
ID("A") -> Var("A"),
ID("B") -> Var("B"),
ucol("A") -> TRUE,
ucol("B") -> TRUE,
ID("C") -> Var("C"),
ID("D") -> Var("D"),
ucol("C") -> TRUE,
ucol("D") -> TRUE,
urow -> TRUE
)
)
}
"Handle Non-Deterministic Joins" in {
percolite(
table("R")
.map( "A" -> VGTerm(ID("X"), 1, Seq(RowIdVar(), Var("A")), Seq()) )
.join(table("S"))
) must be equalTo (
table("R")
.join(table("S"))
.mapByID(
ID("A") -> VGTerm(ID("X"), 1, Seq(RowIdVar(), Var("A")), Seq()),
ucol("A") -> ack( args = Seq(RowIdVar(), Var("A")) ),
ID("C") -> Var("C"),
ID("D") -> Var("D"),
ucol("C") -> TRUE,
ucol("D") -> TRUE,
urow -> TRUE
)
)
}
"Handle Non-Deterministic Joins With Row Non-Determinism" in {
percolite(
table("R")
.filter(
Var("B").lt {
Var("A").lt(3)
.thenElse { VGTerm(ID("X"), 1, Seq(Var("A")), Seq())}
{ IntPrimitive(3) }
}
)
.join(
table("S")
.filter(
Var("C").lt {
Var("D").gt(5)
.thenElse { VGTerm(ID("X"), 2, Seq(Var("D")), Seq())}
{ IntPrimitive(5) }
}
)
)
) must be equalTo (
table("R")
.filter(
Var("A").lt(3)
.thenElse {
Var("B").lt {
VGTerm(ID("X"), 1, Seq(Var("A")), Seq())
}
} { Var("B").lt(3) }
)
.join(
table("S")
.filter(
Var("D").gt(5)
.thenElse {
Var("C").lt {
VGTerm(ID("X"), 2, Seq(Var("D")), Seq())
}
} { Var("C").lt(5) }
)
)
.mapByID(
ID("A") -> Var("A"),
ID("B") -> Var("B"),
ucol("A") -> TRUE,
ucol("B") -> TRUE,
ID("C") -> Var("C"),
ID("D") -> Var("D"),
ucol("C") -> TRUE,
ucol("D") -> TRUE,
urow ->
ExpressionUtils.makeAnd(
Var("A").lt(3).thenElse(
ack( idx = 1, args = Seq(Var("A")) )
) (
BoolPrimitive(true)
),
Var("D").gt(5).thenElse(
ack( idx = 2, args = Seq(Var("D")) )
) (
BoolPrimitive(true)
)
)
)
)
}
"Percolate projections over non-deterministic rows" >> {
percolite(
table("R")
.filter(
Var("A").lt(5)
.thenElse { VGTerm(ID("X"), 1, Seq(Var("A")), Seq()) }
{ Var("A") }
.gt(5)
)
.project("A", "B")
) must be equalTo (
table("R")
.filter(
Var("A").lt(5)
.thenElse { VGTerm(ID("X"), 1, Seq(Var("A")), Seq()).gt(5) }
{ Var("A").gt(5) }
)
.mapByID(
ID("A") -> Var("A"),
ID("B") -> Var("B"),
ucol("A") -> TRUE,
ucol("B") -> TRUE,
urow ->
Var("A")
.lt(5)
.thenElse(
ack( args = Seq(Var("A")) )
)(
BoolPrimitive(true)
)
)
)
}
"Handle Deterministic Aggregates" in {
percolite(
Project(
Seq(
ProjectArg(ID("COMPANY"), Var("PRODUCT_INVENTORY_COMPANY")),
ProjectArg(ID("SUM_2"), Var("MIMIR_AGG_SUM_2"))
),
Aggregate(
Seq(Var("PRODUCT_INVENTORY_COMPANY")),
Seq(AggFunction(ID("sum"), false, Seq(Var("PRODUCT_INVENTORY_QUANTITY")), ID("MIMIR_AGG_SUM_2"))),
Table(ID("PRODUCT_INVENTORY"),ID("PRODUCT_INVENTORY"), Seq(
(ID("PRODUCT_INVENTORY_ID"), TString()),
(ID("PRODUCT_INVENTORY_COMPANY"), TString()),
(ID("PRODUCT_INVENTORY_QUANTITY"), TInt()),
(ID("PRODUCT_INVENTORY_PRICE"), TFloat())
), Seq())
))
) must be equalTo(
Project(
Seq(
ProjectArg(ID("COMPANY"), Var("PRODUCT_INVENTORY_COMPANY")),
ProjectArg(ID("SUM_2"), Var("MIMIR_AGG_SUM_2")),
ProjectArg(ucol("COMPANY"), TRUE),
ProjectArg(ucol("SUM_2"), TRUE),
ProjectArg(urow, TRUE)
),
Aggregate(
Seq(Var("PRODUCT_INVENTORY_COMPANY")),
Seq(AggFunction(ID("sum"), false, Seq(Var("PRODUCT_INVENTORY_QUANTITY")), ID("MIMIR_AGG_SUM_2"))),
Table(ID("PRODUCT_INVENTORY"),ID("PRODUCT_INVENTORY"), Seq(
(ID("PRODUCT_INVENTORY_ID"), TString()),
(ID("PRODUCT_INVENTORY_COMPANY"), TString()),
(ID("PRODUCT_INVENTORY_QUANTITY"), TInt()),
(ID("PRODUCT_INVENTORY_PRICE"), TFloat())
), Seq())
))
)
}
}
} | UBOdin/mimir | src/test/scala/mimir/ctables/OperatorDeterminismSpec.scala | Scala | apache-2.0 | 10,724 |
package org.scalaide.core.internal.launching
import org.eclipse.core.runtime.CoreException
import org.eclipse.core.runtime.NullProgressMonitor
import org.eclipse.debug.core.DebugPlugin
import org.eclipse.debug.core.ILaunchConfiguration
import org.eclipse.debug.core.ILaunchConfigurationWorkingCopy
import org.eclipse.debug.core.ILaunchManager
import org.eclipse.debug.ui.DebugUITools
import org.eclipse.jdt.core.IJavaElement
import org.eclipse.jdt.core.IType
import org.eclipse.jdt.internal.junit.launcher.JUnitLaunchConfigurationConstants
import org.eclipse.jdt.internal.junit.ui.JUnitMessages
import org.eclipse.jdt.internal.junit.ui.JUnitPlugin
import org.eclipse.jdt.junit.launcher.JUnitLaunchShortcut
import org.eclipse.jdt.ui.JavaUI
import org.eclipse.jface.viewers.ISelection
import org.eclipse.jface.viewers.IStructuredSelection
import org.eclipse.jface.window.Window
import org.eclipse.ui.IEditorPart
import org.eclipse.ui.dialogs.ElementListSelectionDialog
import org.scalaide.core.internal.jdt.model.ScalaSourceFile
/** A `Run As Scala JUnit Test` shortcut. The only thing that we need to change compared to
* the plain Java JUnit shortcut is the test runner kind. We introduced a new test kind,
* similar to the JDT 'JUnit4' and 'JUnit3' test kinds, whose sole responsibility is to
* locate tests.<p>
* Implementation Note: code of `JUnitLaunchShortcut` has been rewritten here because some
* of functionality (mainly selected element selection) is hidden in its private methods.
*
* @see the `internal_testkinds` extension point.
*
*/
class ScalaJUnitLaunchShortcut extends JUnitLaunchShortcut {
/** Add the Scala JUnit test kind to the configuration.. */
override def createLaunchConfiguration(element: IJavaElement): ILaunchConfigurationWorkingCopy = {
val conf = super.createLaunchConfiguration(element)
conf.setAttribute(JUnitLaunchConfigurationConstants.ATTR_TEST_RUNNER_KIND, ScalaJUnitLaunchShortcut.SCALA_JUNIT_TEST_KIND)
conf
}
/** We need to force the creation of a new launch configuration if the test kind is different, otherwise
* the plain JDT test finder would be run, and possibly miss tests.
*/
override def getAttributeNamesToCompare(): Array[String] = {
super.getAttributeNamesToCompare() :+ JUnitLaunchConfigurationConstants.ATTR_TEST_RUNNER_KIND
}
/** Launch Scala Test Finder for compilation units only. In other cases drop to `super.launch(...)`. */
override def launch(selection: ISelection, mode: String) = selection match {
case struct: IStructuredSelection if isCompilationUnit(struct) =>
launch(element(struct), mode).getOrElse(super.launch(struct, mode))
case _ => super.launch(selection, mode)
}
/** Launch Scala Test Finder for compilation units only. In other cases drop to `super.launch(...)`. */
override def launch(editor: IEditorPart, mode: String): Unit = {
JavaUI.getEditorInputTypeRoot(editor.getEditorInput()) match {
case element: ScalaSourceFile =>
launch(Option(element), mode).getOrElse(super.launch(editor, mode))
case _ => super.launch(editor, mode)
}
}
private def element(struct: IStructuredSelection) = struct.toArray.headOption
private def whenCompilationUnit[T, R](f: IJavaElement => R): PartialFunction[T, R] = {
case selected: IJavaElement if IJavaElement.COMPILATION_UNIT == selected.getElementType =>
f(selected)
}
private def isCompilationUnit(struct: IStructuredSelection): Boolean =
element(struct) collect whenCompilationUnit { _ => true } getOrElse (false)
private val testsInContainer: IJavaElement => Option[IType] = {
import scala.collection.mutable
val testFinder = new JUnit4TestFinder
val progressMonitor = new NullProgressMonitor
(selected: IJavaElement) => {
val found = mutable.Set.empty[IType]
testFinder.findTestsInContainer(selected, found, progressMonitor)
found.headOption
}
}
private def launch[T](element: Option[T], mode: String): Option[Unit] =
(element collect whenCompilationUnit { testsInContainer } flatten) map {
performLaunch(_, mode)
} orElse {
None
}
private def performLaunch(element: IJavaElement, mode: String) = {
val temporary = createLaunchConfiguration(element)
val config = findExistingLaunchConfiguration(temporary, mode)
DebugUITools.launch(config.getOrElse(temporary.doSave()), mode)
}
private def findExistingLaunchConfiguration(temporary: ILaunchConfigurationWorkingCopy, mode: String): Option[ILaunchConfiguration] =
findExistingLaunchConfigurations(temporary) match {
case Nil => None
case conf :: Nil => Option(conf)
case configs @ _ :: _ => chooseConfiguration(configs, mode)
}
private def findExistingLaunchConfigurations(temporary: ILaunchConfigurationWorkingCopy): List[ILaunchConfiguration] = {
val configType = temporary.getType()
val configs = getLaunchManager.getLaunchConfigurations(configType).toList
val attributeToCompare = getAttributeNamesToCompare
configs.filter { config =>
hasSameAttributes(config, temporary, attributeToCompare)
}
}
private def hasSameAttributes(config1: ILaunchConfiguration, config2: ILaunchConfiguration, attributeToCompare: Array[String]) = {
val EMPTY_STRING = ""
try {
attributeToCompare.forall { element =>
config1.getAttribute(element, EMPTY_STRING) == config2.getAttribute(element, EMPTY_STRING)
}
} catch {
case _: CoreException =>
// ignore access problems here, return false
false
}
}
private def chooseConfiguration(configList: List[ILaunchConfiguration], mode: String) = {
val labelProvider = DebugUITools.newDebugModelPresentation()
val dialog = new ElementListSelectionDialog(JUnitPlugin.getActiveWorkbenchShell, labelProvider)
dialog.setElements(configList.toArray)
dialog.setTitle(JUnitMessages.JUnitLaunchShortcut_message_selectConfiguration)
if (mode.equals(ILaunchManager.DEBUG_MODE)) {
dialog.setMessage(JUnitMessages.JUnitLaunchShortcut_message_selectDebugConfiguration)
} else {
dialog.setMessage(JUnitMessages.JUnitLaunchShortcut_message_selectRunConfiguration)
}
dialog.setMultipleSelection(false)
dialog.open() match {
case Window.OK =>
Option(dialog.getFirstResult.asInstanceOf[ILaunchConfiguration])
case _ =>
None
}
}
private def getLaunchManager = {
DebugPlugin.getDefault.getLaunchManager
}
}
object ScalaJUnitLaunchShortcut {
final val SCALA_JUNIT_TEST_KIND = "org.scala-ide.sdt.core.junit"
}
| scala-ide/scala-ide | org.scala-ide.sdt.core/src/org/scalaide/core/internal/launching/ScalaJUnitLaunchShortcut.scala | Scala | bsd-3-clause | 6,619 |
package systems.opalia.commons.scripting.ejs
import java.io.IOException
import java.nio.file.{Path, Paths}
import org.scalatest._
import play.api.libs.json._
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.concurrent.{Await, ExecutionContext, Future}
import scala.io.Source
import systems.opalia.commons.scripting.JavaScript
import systems.opalia.commons.scripting.ejs.exceptions._
class EjsTest
extends FlatSpec
with Matchers {
val files = Map(
Paths.get("/utils/escape.ejs") ->
"""
|<%
|utils.escape = function(html) {
| return String(html)
| .replace(/&(?!#?[a-zA-Z0-9]+;)/g, '&')
| .replace(/</g, '<')
| .replace(/>/g, '>')
| .replace(/"/g, '"')
| .replace(/'/g, ''')
|};
|-%>
""".stripMargin.trim,
Paths.get("/html/inc/main.html.ejs") ->
"""
|<% include ../../utils/escape.ejs -%>
|<% include header.html.ejs -%>
|<% include footer.html.ejs -%>
|<% macros.main = function(title, contentBody) { -%>
|<!DOCTYPE html>
|<html>
|<head>
| <meta charset="UTF-8">
| <title><%= title %></title>
|</head>
|<body>
|<% macros.header(); -%>
| <div id="content">
|<% contentBody(); -%>
| </div>
|<% macros.footer(); -%>
|</body>
|<!-- <%- JSON.stringify(locals) %> -->
|</html>
|<% } -%>
""".stripMargin.trim,
Paths.get("/html/inc/header.html.ejs") ->
"""
|<% macros.header = function() { -%>
| <header></header>
|<% } -%>
""".stripMargin.trim,
Paths.get("/html/inc/footer.html.ejs") ->
"""
|<% macros.footer = function() { -%>
| <footer></footer>
|<% } -%>
""".stripMargin.trim,
Paths.get("/html/content/success.html.ejs") ->
"""
|<% include ../inc/main.html.ejs -%>
|<% macros.main(locals.title, content); -%>
|<% function content() { -%>
|<%# this is a comment -%>
| <h1><%= locals.title %></h1>
| <p><%%= <%= locals.text %> %></p>
|<% } -%>
""".stripMargin.trim,
Paths.get("/html/content/failure1.html.ejs") ->
"""
|<% include ../inc/main.html.ejs -%>
|<% macros.main(locals.title, content); -%>
|<% function content() { -%>
|<%# this is a comment -%>
|<%- }} -%>
|<% } -%>
""".stripMargin.trim,
Paths.get("/html/content/failure2.html.ejs") ->
"""
|<% include ../inc/not_existing_file.html.ejs -%>
|<% macros.main(locals.title, content); -%>
|<% function content() { -%>
|<% } -%>
""".stripMargin.trim,
Paths.get("/html/content/failure3.html.ejs") ->
"""
|<% include ../inc/main.html.ejs -%>
|<% macros.main(locals.title, content); -%>
|<% function content() { -%>
|<%# this is a comment -%>
|<%
|
|
| locals.blubb.bla = 42;
|
|-%>
|<% } -%>
""".stripMargin.trim)
val data = Json.obj("title" -> "Page Title", "text" -> "Hello from EJS!")
val js = JavaScript()
val ejs =
Ejs(js, new EjsConfiguration {
val openWith = EjsDefaultConfiguration.openWith
val closeWith = EjsDefaultConfiguration.closeWith
def resolve(path: Path)
(implicit executor: ExecutionContext): Future[Source] =
Future.successful(files.get(path).map(Source.fromString)
.getOrElse(throw new IOException(s"Cannot resolve path $path.")))
})
it should "generate the same result for compiling and direct rendering" in {
Await.result(ejs.render(Paths.get("/html/content/success.html.ejs"), data), Duration.Inf) should be(
Await.result(ejs.compile(Paths.get("/html/content/success.html.ejs")).flatMap(_.render(data)), Duration.Inf))
}
it should "generate a valid document" in {
Await.result(ejs.render(Paths.get("/html/content/success.html.ejs"), data), Duration.Inf) should be(
"""
|<!DOCTYPE html>
|<html>
|<head>
| <meta charset="UTF-8">
| <title>Page Title</title>
|</head>
|<body>
| <header></header>
| <div id="content">
| <h1>Page Title</h1>
| <p><%= Hello from EJS! %></p>
| </div>
| <footer></footer>
|</body>
|<!-- {"title":"Page Title","text":"Hello from EJS!"} -->
|</html>
""".stripMargin.trim + "\\n")
}
it should "throw a running exception for errors occurred while execution" in {
val thrownRendering = intercept[EjsRunningException] {
Await.result(ejs.render(Paths.get("/html/content/failure3.html.ejs"), data), Duration.Inf)
}
val thrownCompiling = intercept[EjsRunningException] {
Await.result(ejs.compile(Paths.get("/html/content/failure3.html.ejs")).flatMap(_.render(data)), Duration.Inf)
}
val message = "EJS TypeError in /html/content/failure3.html.ejs" +
" on line 8: Cannot set property \\"bla\\" of undefined"
thrownRendering.getMessage should be(message)
thrownCompiling.getMessage should be(message)
}
it should "throw a parsing exception while rendering a syntax error" in {
val thrownRendering = intercept[EjsParsingException] {
Await.result(ejs.render(Paths.get("/html/content/failure1.html.ejs"), JsNull), Duration.Inf)
}
val thrownCompiling = intercept[EjsParsingException] {
Await.result(ejs.compile(Paths.get("/html/content/failure1.html.ejs")), Duration.Inf)
}
val message = "EJS Error: Expected an operand but found }"
thrownRendering.getMessage should be(message)
thrownCompiling.getMessage should be(message)
}
it should "throw an io exception while resolving a not existing file" in {
val thrownRendering = intercept[IOException] {
Await.result(ejs.render(Paths.get("/html/content/failure2.html.ejs"), JsNull), Duration.Inf)
}
val thrownCompiling = intercept[IOException] {
Await.result(ejs.compile(Paths.get("/html/content/failure2.html.ejs")), Duration.Inf)
}
val message = "Cannot resolve path /html/inc/not_existing_file.html.ejs."
thrownRendering.getMessage should be(message)
thrownCompiling.getMessage should be(message)
}
}
| OpaliaSystems/commons | src/test/scala/systems/opalia/commons/scripting/ejs/EjsTest.scala | Scala | apache-2.0 | 6,564 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.hive.thriftserver.ui
import java.util.concurrent.ConcurrentHashMap
import scala.collection.JavaConverters._
import scala.collection.mutable.ArrayBuffer
import org.apache.hive.service.server.HiveServer2
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.internal.Logging
import org.apache.spark.internal.config.Status.LIVE_ENTITY_UPDATE_PERIOD
import org.apache.spark.scheduler._
import org.apache.spark.sql.hive.thriftserver.HiveThriftServer2.ExecutionState
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.status.{ElementTrackingStore, KVUtils, LiveEntity}
/**
* An inner sparkListener called in sc.stop to clean up the HiveThriftServer2
*/
private[thriftserver] class HiveThriftServer2Listener(
kvstore: ElementTrackingStore,
sparkConf: SparkConf,
server: Option[HiveServer2],
live: Boolean = true) extends SparkListener with Logging {
private val sessionList = new ConcurrentHashMap[String, LiveSessionData]()
private val executionList = new ConcurrentHashMap[String, LiveExecutionData]()
private val (retainedStatements: Int, retainedSessions: Int) = {
(sparkConf.get(SQLConf.THRIFTSERVER_UI_STATEMENT_LIMIT),
sparkConf.get(SQLConf.THRIFTSERVER_UI_SESSION_LIMIT))
}
// How often to update live entities. -1 means "never update" when replaying applications,
// meaning only the last write will happen. For live applications, this avoids a few
// operations that we can live without when rapidly processing incoming events.
private val liveUpdatePeriodNs = if (live) sparkConf.get(LIVE_ENTITY_UPDATE_PERIOD) else -1L
// Returns true if this listener has no live data. Exposed for tests only.
private[thriftserver] def noLiveData(): Boolean = {
sessionList.isEmpty && executionList.isEmpty
}
kvstore.addTrigger(classOf[SessionInfo], retainedSessions) { count =>
cleanupSession(count)
}
kvstore.addTrigger(classOf[ExecutionInfo], retainedStatements) { count =>
cleanupExecutions(count)
}
kvstore.onFlush {
if (!live) {
flush((entity: LiveEntity) => updateStoreWithTriggerEnabled(entity))
}
}
override def onApplicationEnd(applicationEnd: SparkListenerApplicationEnd): Unit = {
if (live) {
server.foreach(_.stop())
}
}
override def onJobStart(jobStart: SparkListenerJobStart): Unit = {
val properties = jobStart.properties
if (properties != null) {
val groupId = properties.getProperty(SparkContext.SPARK_JOB_GROUP_ID)
if (groupId != null) {
updateJobDetails(jobStart.jobId.toString, groupId)
}
}
}
private def updateJobDetails(jobId: String, groupId: String): Unit = {
val execList = executionList.values().asScala.filter(_.groupId == groupId).toSeq
if (execList.nonEmpty) {
execList.foreach { exec =>
exec.jobId += jobId.toString
updateLiveStore(exec)
}
} else {
// It may possible that event reordering happens, such a way that JobStart event come after
// Execution end event (Refer SPARK-27019). To handle that situation, if occurs in
// Thriftserver, following code will take care. Here will come only if JobStart event comes
// after Execution End event.
val storeExecInfo = kvstore.view(classOf[ExecutionInfo]).asScala.filter(_.groupId == groupId)
storeExecInfo.foreach { exec =>
val liveExec = getOrCreateExecution(exec.execId, exec.statement, exec.sessionId,
exec.startTimestamp, exec.userName)
liveExec.jobId += jobId.toString
updateStoreWithTriggerEnabled(liveExec)
executionList.remove(liveExec.execId)
}
}
}
override def onOtherEvent(event: SparkListenerEvent): Unit = {
event match {
case e: SparkListenerThriftServerSessionCreated => onSessionCreated(e)
case e: SparkListenerThriftServerSessionClosed => onSessionClosed(e)
case e: SparkListenerThriftServerOperationStart => onOperationStart(e)
case e: SparkListenerThriftServerOperationParsed => onOperationParsed(e)
case e: SparkListenerThriftServerOperationCanceled => onOperationCanceled(e)
case e: SparkListenerThriftServerOperationTimeout => onOperationTimeout(e)
case e: SparkListenerThriftServerOperationError => onOperationError(e)
case e: SparkListenerThriftServerOperationFinish => onOperationFinished(e)
case e: SparkListenerThriftServerOperationClosed => onOperationClosed(e)
case _ => // Ignore
}
}
private def onSessionCreated(e: SparkListenerThriftServerSessionCreated): Unit = {
val session = getOrCreateSession(e.sessionId, e.startTime, e.ip, e.userName)
sessionList.put(e.sessionId, session)
updateLiveStore(session)
}
private def onSessionClosed(e: SparkListenerThriftServerSessionClosed): Unit =
Option(sessionList.get(e.sessionId)) match {
case Some(sessionData) =>
sessionData.finishTimestamp = e.finishTime
updateStoreWithTriggerEnabled(sessionData)
sessionList.remove(e.sessionId)
case None => logWarning(s"onSessionClosed called with unknown session id: ${e.sessionId}")
}
private def onOperationStart(e: SparkListenerThriftServerOperationStart): Unit = {
val executionData = getOrCreateExecution(
e.id,
e.statement,
e.sessionId,
e.startTime,
e.userName)
executionData.state = ExecutionState.STARTED
executionList.put(e.id, executionData)
executionData.groupId = e.groupId
updateLiveStore(executionData)
Option(sessionList.get(e.sessionId)) match {
case Some(sessionData) =>
sessionData.totalExecution += 1
updateLiveStore(sessionData)
case None => logWarning(s"onOperationStart called with unknown session id: ${e.sessionId}." +
s"Regardless, the operation has been registered.")
}
}
private def onOperationParsed(e: SparkListenerThriftServerOperationParsed): Unit =
Option(executionList.get(e.id)) match {
case Some(executionData) =>
executionData.executePlan = e.executionPlan
executionData.state = ExecutionState.COMPILED
updateLiveStore(executionData)
case None => logWarning(s"onOperationParsed called with unknown operation id: ${e.id}")
}
private def onOperationCanceled(e: SparkListenerThriftServerOperationCanceled): Unit =
Option(executionList.get(e.id)) match {
case Some(executionData) =>
executionData.finishTimestamp = e.finishTime
executionData.state = ExecutionState.CANCELED
updateLiveStore(executionData)
case None => logWarning(s"onOperationCanceled called with unknown operation id: ${e.id}")
}
private def onOperationTimeout(e: SparkListenerThriftServerOperationTimeout): Unit =
Option(executionList.get(e.id)) match {
case Some(executionData) =>
executionData.finishTimestamp = e.finishTime
executionData.state = ExecutionState.TIMEDOUT
updateLiveStore(executionData)
case None => logWarning(s"onOperationCanceled called with unknown operation id: ${e.id}")
}
private def onOperationError(e: SparkListenerThriftServerOperationError): Unit =
Option(executionList.get(e.id)) match {
case Some(executionData) =>
executionData.finishTimestamp = e.finishTime
executionData.detail = e.errorMsg
executionData.state = ExecutionState.FAILED
updateLiveStore(executionData)
case None => logWarning(s"onOperationError called with unknown operation id: ${e.id}")
}
private def onOperationFinished(e: SparkListenerThriftServerOperationFinish): Unit =
Option(executionList.get(e.id)) match {
case Some(executionData) =>
executionData.finishTimestamp = e.finishTime
executionData.state = ExecutionState.FINISHED
updateLiveStore(executionData)
case None => logWarning(s"onOperationFinished called with unknown operation id: ${e.id}")
}
private def onOperationClosed(e: SparkListenerThriftServerOperationClosed): Unit =
Option(executionList.get(e.id)) match {
case Some(executionData) =>
executionData.closeTimestamp = e.closeTime
executionData.state = ExecutionState.CLOSED
updateStoreWithTriggerEnabled(executionData)
executionList.remove(e.id)
case None => logWarning(s"onOperationClosed called with unknown operation id: ${e.id}")
}
// Update both live and history stores. Trigger is enabled by default, hence
// it will cleanup the entity which exceeds the threshold.
def updateStoreWithTriggerEnabled(entity: LiveEntity): Unit = {
entity.write(kvstore, System.nanoTime(), checkTriggers = true)
}
// Update only live stores. If trigger is enabled, it will cleanup entity
// which exceeds the threshold.
def updateLiveStore(entity: LiveEntity, trigger: Boolean = false): Unit = {
val now = System.nanoTime()
if (live && liveUpdatePeriodNs >= 0 && now - entity.lastWriteTime > liveUpdatePeriodNs) {
entity.write(kvstore, now, checkTriggers = trigger)
}
}
/** Go through all `LiveEntity`s and use `entityFlushFunc(entity)` to flush them. */
private def flush(entityFlushFunc: LiveEntity => Unit): Unit = {
sessionList.values.asScala.foreach(entityFlushFunc)
executionList.values.asScala.foreach(entityFlushFunc)
}
private def getOrCreateSession(
sessionId: String,
startTime: Long,
ip: String,
username: String): LiveSessionData = {
sessionList.computeIfAbsent(sessionId,
(_: String) => new LiveSessionData(sessionId, startTime, ip, username))
}
private def getOrCreateExecution(
execId: String, statement: String,
sessionId: String, startTimestamp: Long,
userName: String): LiveExecutionData = {
executionList.computeIfAbsent(execId,
(_: String) => new LiveExecutionData(execId, statement, sessionId, startTimestamp, userName))
}
private def cleanupExecutions(count: Long): Unit = {
val countToDelete = calculateNumberToRemove(count, retainedStatements)
if (countToDelete <= 0L) {
return
}
val view = kvstore.view(classOf[ExecutionInfo]).index("finishTime").first(0L)
val toDelete = KVUtils.viewToSeq(view, countToDelete.toInt) { j =>
j.finishTimestamp != 0
}
toDelete.foreach { j => kvstore.delete(j.getClass, j.execId) }
}
private def cleanupSession(count: Long): Unit = {
val countToDelete = calculateNumberToRemove(count, retainedSessions)
if (countToDelete <= 0L) {
return
}
val view = kvstore.view(classOf[SessionInfo]).index("finishTime").first(0L)
val toDelete = KVUtils.viewToSeq(view, countToDelete.toInt) { j =>
j.finishTimestamp != 0L
}
toDelete.foreach { j => kvstore.delete(j.getClass, j.sessionId) }
}
/**
* Remove at least (retainedSize / 10) items to reduce friction. Because tracking may be done
* asynchronously, this method may return 0 in case enough items have been deleted already.
*/
private def calculateNumberToRemove(dataSize: Long, retainedSize: Long): Long = {
if (dataSize > retainedSize) {
math.max(retainedSize / 10L, dataSize - retainedSize)
} else {
0L
}
}
}
private[thriftserver] class LiveExecutionData(
val execId: String,
val statement: String,
val sessionId: String,
val startTimestamp: Long,
val userName: String) extends LiveEntity {
var finishTimestamp: Long = 0L
var closeTimestamp: Long = 0L
var executePlan: String = ""
var detail: String = ""
var state: ExecutionState.Value = ExecutionState.STARTED
val jobId: ArrayBuffer[String] = ArrayBuffer[String]()
var groupId: String = ""
override protected def doUpdate(): Any = {
new ExecutionInfo(
execId,
statement,
sessionId,
startTimestamp,
userName,
finishTimestamp,
closeTimestamp,
executePlan,
detail,
state,
jobId,
groupId)
}
}
private[thriftserver] class LiveSessionData(
val sessionId: String,
val startTimeStamp: Long,
val ip: String,
val username: String) extends LiveEntity {
var finishTimestamp: Long = 0L
var totalExecution: Int = 0
override protected def doUpdate(): Any = {
new SessionInfo(
sessionId,
startTimeStamp,
ip,
username,
finishTimestamp,
totalExecution)
}
}
| shaneknapp/spark | sql/hive-thriftserver/src/main/scala/org/apache/spark/sql/hive/thriftserver/ui/HiveThriftServer2Listener.scala | Scala | apache-2.0 | 13,230 |
/*
* Copyright 2014 Databricks
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.databricks.spark.xml.util
import java.io.ByteArrayInputStream
import javax.xml.stream.events._
import javax.xml.stream._
import com.databricks.spark.xml.parsers.StaxXmlParserUtils
import org.slf4j.LoggerFactory
import scala.collection.Seq
import scala.collection.mutable.ArrayBuffer
import scala.collection.JavaConversions._
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.types._
import com.databricks.spark.xml.util.TypeCast._
import com.databricks.spark.xml.XmlOptions
private[xml] object InferSchema {
private val logger = LoggerFactory.getLogger(InferSchema.getClass)
/**
* Copied from internal Spark api
* [[org.apache.spark.sql.catalyst.analysis.HiveTypeCoercion]]
*/
private val numericPrecedence: IndexedSeq[DataType] =
IndexedSeq[DataType](
ByteType,
ShortType,
IntegerType,
LongType,
FloatType,
DoubleType,
TimestampType,
DecimalType.Unlimited)
val findTightestCommonTypeOfTwo: (DataType, DataType) => Option[DataType] = {
case (t1, t2) if t1 == t2 => Some(t1)
// Promote numeric types to the highest of the two
case (t1, t2) if Seq(t1, t2).forall(numericPrecedence.contains) =>
val index = numericPrecedence.lastIndexWhere(t => t == t1 || t == t2)
Some(numericPrecedence(index))
case _ => None
}
/**
* Infer the type of a collection of XML records in three stages:
* 1. Infer the type of each record
* 2. Merge types by choosing the lowest type necessary to cover equal keys
* 3. Replace any remaining null fields with string, the top type
*/
def infer(xml: RDD[String], options: XmlOptions): StructType = {
require(options.samplingRatio > 0,
s"samplingRatio ($options.samplingRatio) should be greater than 0")
val schemaData = if (options.samplingRatio > 0.99) {
xml
} else {
xml.sample(withReplacement = false, options.samplingRatio, 1)
}
val failFast = options.failFastFlag
// perform schema inference on each row and merge afterwards
val rootType = schemaData.mapPartitions { iter =>
val factory = XMLInputFactory.newInstance()
factory.setProperty(XMLInputFactory.IS_NAMESPACE_AWARE, false)
factory.setProperty(XMLInputFactory.IS_COALESCING, true)
val filter = new EventFilter {
override def accept(event: XMLEvent): Boolean =
// Ignore comments. This library does not treat comments.
event.getEventType != XMLStreamConstants.COMMENT
}
iter.flatMap { xml =>
// It does not have to skip for white space, since [[XmlInputFormat]]
// always finds the root tag without a heading space.
val reader = new ByteArrayInputStream(xml.getBytes)
val eventReader = factory.createXMLEventReader(reader)
val parser = factory.createFilteredReader(eventReader, filter)
try {
val rootEvent =
StaxXmlParserUtils.skipUntil(parser, XMLStreamConstants.START_ELEMENT)
val rootAttributes =
rootEvent.asStartElement.getAttributes.map(_.asInstanceOf[Attribute]).toArray
Some(inferObject(parser, options, rootAttributes))
} catch {
case _: XMLStreamException if !failFast =>
logger.warn(s"Dropping malformed row: ${xml.replaceAll("\\n", "")}")
None
case _: XMLStreamException if failFast =>
throw new RuntimeException(s"Malformed row (failing fast): ${xml.replaceAll("\\n", "")}")
}
}
}.treeAggregate[DataType](StructType(Seq()))(
compatibleType(options), compatibleType(options))
canonicalizeType(rootType) match {
case Some(st: StructType) => st
case _ =>
// canonicalizeType erases all empty structs, including the only one we want to keep
StructType(Seq())
}
}
private def inferFrom: String => DataType = {
case null => NullType
case v if v.isEmpty => NullType
case v if isLong(v) => LongType
case v if isInteger(v) => IntegerType
case v if isDouble(v) => DoubleType
case v if isBoolean(v) => BooleanType
case v if isTimestamp(v) => TimestampType
case v => StringType
}
private def inferField(parser: XMLEventReader, options: XmlOptions): DataType = {
parser.peek match {
case _: EndElement => NullType
case _: StartElement => inferObject(parser, options)
case c: Characters if c.isWhiteSpace =>
// When `Characters` is found, we need to look further to decide
// if this is really data or space between other elements.
val data = c.getData
parser.nextEvent()
parser.peek match {
case _: StartElement => inferObject(parser, options)
case _: EndElement if data.isEmpty => NullType
case _: EndElement if options.treatEmptyValuesAsNulls => NullType
case _: EndElement => StringType
case _ => inferField(parser, options)
}
case c: Characters if !c.isWhiteSpace =>
// This means data exists
inferFrom(c.getData)
case e: XMLEvent =>
sys.error(s"Failed to parse data with unexpected event ${e.toString}")
}
}
/**
* Infer the type of a xml document from the parser's token stream
*/
private def inferObject(
parser: XMLEventReader,
options: XmlOptions,
rootAttributes: Array[Attribute] = Array.empty): DataType = {
val builder = Seq.newBuilder[StructField]
val nameToDataType = collection.mutable.Map.empty[String, ArrayBuffer[DataType]]
var shouldStop = false
while (!shouldStop) {
parser.nextEvent match {
case e: StartElement =>
// If there are attributes, then we should process them first.
val rootValuesMap =
StaxXmlParserUtils.convertAttributesToValuesMap(rootAttributes, options)
rootValuesMap.foreach {
case (f, v) =>
nameToDataType += (f -> ArrayBuffer(inferFrom(v)))
}
val attributes = e.getAttributes.map(_.asInstanceOf[Attribute]).toArray
val valuesMap = StaxXmlParserUtils.convertAttributesToValuesMap(attributes, options)
val inferredType = inferField(parser, options) match {
case st: StructType if valuesMap.nonEmpty =>
// Merge attributes to the field
val nestedBuilder = Seq.newBuilder[StructField]
nestedBuilder ++= st.fields
valuesMap.foreach {
case (f, v) =>
nestedBuilder += StructField(f, inferFrom(v), nullable = true)
}
StructType(nestedBuilder.result().sortBy(_.name))
case dt: DataType if valuesMap.nonEmpty =>
// We need to manually add the field for value.
val nestedBuilder = Seq.newBuilder[StructField]
nestedBuilder += StructField(options.valueTag, dt, nullable = true)
valuesMap.foreach {
case (f, v) =>
nestedBuilder += StructField(f, inferFrom(v), nullable = true)
}
StructType(nestedBuilder.result().sortBy(_.name))
case dt: DataType => dt
}
// Add the field and datatypes so that we can check if this is ArrayType.
val field = e.asStartElement.getName.getLocalPart
val dataTypes = nameToDataType.getOrElse(field, ArrayBuffer.empty[DataType])
dataTypes += inferredType
nameToDataType += (field -> dataTypes)
case _: EndElement =>
shouldStop = StaxXmlParserUtils.checkEndElement(parser)
case _ =>
shouldStop = shouldStop && parser.hasNext
}
}
// We need to manually merges the fields having the sames so that
// This can be inferred as ArrayType.
nameToDataType.foreach{
case (field, dataTypes) if dataTypes.length > 1 =>
val elementType = dataTypes.reduceLeft(InferSchema.compatibleType(options))
builder += StructField(field, ArrayType(elementType), nullable = true)
case (field, dataTypes) =>
builder += StructField(field, dataTypes.head, nullable = true)
}
StructType(builder.result().sortBy(_.name))
}
/**
* Convert NullType to StringType and remove StructTypes with no fields
*/
private def canonicalizeType: DataType => Option[DataType] = {
case at @ ArrayType(elementType, _) =>
for {
canonicalType <- canonicalizeType(elementType)
} yield {
at.copy(canonicalType)
}
case StructType(fields) =>
val canonicalFields = for {
field <- fields
if field.name.nonEmpty
canonicalType <- canonicalizeType(field.dataType)
} yield {
field.copy(dataType = canonicalType)
}
if (canonicalFields.nonEmpty) {
Some(StructType(canonicalFields))
} else {
// per SPARK-8093: empty structs should be deleted
None
}
case NullType => Some(StringType)
case other => Some(other)
}
/**
* Returns the most general data type for two given data types.
*/
private[xml] def compatibleType(options: XmlOptions)(t1: DataType, t2: DataType): DataType = {
// TODO: Optimise this logic.
findTightestCommonTypeOfTwo(t1, t2).getOrElse {
// t1 or t2 is a StructType, ArrayType, or an unexpected type.
(t1, t2) match {
// Double support larger range than fixed decimal, DecimalType.Maximum should be enough
// in most case, also have better precision.
case (DoubleType, t: DecimalType) =>
DoubleType
case (t: DecimalType, DoubleType) =>
DoubleType
case (t1: DecimalType, t2: DecimalType) =>
val scale = math.max(t1.scale, t2.scale)
val range = math.max(t1.precision - t1.scale, t2.precision - t2.scale)
if (range + scale > 38) {
// DecimalType can't support precision > 38
DoubleType
} else {
DecimalType(range + scale, scale)
}
case (StructType(fields1), StructType(fields2)) =>
val newFields = (fields1 ++ fields2).groupBy(field => field.name).map {
case (name, fieldTypes) =>
val dataType = fieldTypes.view.map(_.dataType).reduce(compatibleType(options))
StructField(name, dataType, nullable = true)
}
StructType(newFields.toSeq.sortBy(_.name))
case (ArrayType(elementType1, containsNull1), ArrayType(elementType2, containsNull2)) =>
ArrayType(
compatibleType(options)(elementType1, elementType2), containsNull1 || containsNull2)
// In XML datasource, since StructType can be compared with ArrayType.
// In this case, ArrayType wraps the StructType.
case (ArrayType(ty1, _), ty2) =>
ArrayType(compatibleType(options)(ty1, ty2))
case (ty1, ArrayType(ty2, _)) =>
ArrayType(compatibleType(options)(ty1, ty2))
// As this library can infer an element with attributes as StructType whereas
// some can be inferred as other non-structural data types, this case should be
// treated.
case (st: StructType, dt: DataType) if st.fieldNames.contains(options.valueTag) =>
val valueIndex = st.fieldNames.indexOf(options.valueTag)
val valueField = st.fields(valueIndex)
val valueDataType = compatibleType(options)(valueField.dataType, dt)
st.fields(valueIndex) = StructField(options.valueTag, valueDataType, nullable = true)
st
case (dt: DataType, st: StructType) if st.fieldNames.contains(options.valueTag) =>
val valueIndex = st.fieldNames.indexOf(options.valueTag)
val valueField = st.fields(valueIndex)
val valueDataType = compatibleType(options)(dt, valueField.dataType)
st.fields(valueIndex) = StructField(options.valueTag, valueDataType, nullable = true)
st
// TODO: These null type checks should be in `findTightestCommonTypeOfTwo`.
case (_, NullType) => t1
case (NullType, _) => t2
// strings and every string is a XML object.
case (_, _) => StringType
}
}
}
}
| mattroberts297/spark-xml | src/main/scala/com/databricks/spark/xml/util/InferSchema.scala | Scala | apache-2.0 | 12,847 |
package be.stijnvermeeren.my2048ai.move
case object Undo extends InGameMove
| stijnvermeeren/2048-ai | src/main/scala/be/stijnvermeeren/my2048ai/move/Undo.scala | Scala | cc0-1.0 | 77 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.ml.regression
import org.json4s.{DefaultFormats, JObject}
import org.json4s.JsonDSL._
import org.apache.spark.annotation.Since
import org.apache.spark.ml.{PredictionModel, Predictor}
import org.apache.spark.ml.linalg.Vector
import org.apache.spark.ml.param.ParamMap
import org.apache.spark.ml.tree._
import org.apache.spark.ml.tree.impl.RandomForest
import org.apache.spark.ml.util._
import org.apache.spark.ml.util.DefaultParamsReader.Metadata
import org.apache.spark.ml.util.Instrumentation.instrumented
import org.apache.spark.mllib.tree.configuration.{Algo => OldAlgo}
import org.apache.spark.mllib.tree.model.{RandomForestModel => OldRandomForestModel}
import org.apache.spark.sql.{Column, DataFrame, Dataset}
import org.apache.spark.sql.functions.{col, udf}
/**
* <a href="http://en.wikipedia.org/wiki/Random_forest">Random Forest</a>
* learning algorithm for regression.
* It supports both continuous and categorical features.
*/
@Since("1.4.0")
class RandomForestRegressor @Since("1.4.0") (@Since("1.4.0") override val uid: String)
extends Predictor[Vector, RandomForestRegressor, RandomForestRegressionModel]
with RandomForestRegressorParams with DefaultParamsWritable {
@Since("1.4.0")
def this() = this(Identifiable.randomUID("rfr"))
// Override parameter setters from parent trait for Java API compatibility.
// Parameters from TreeRegressorParams:
/** @group setParam */
@Since("1.4.0")
def setMaxDepth(value: Int): this.type = set(maxDepth, value)
/** @group setParam */
@Since("1.4.0")
def setMaxBins(value: Int): this.type = set(maxBins, value)
/** @group setParam */
@Since("1.4.0")
def setMinInstancesPerNode(value: Int): this.type = set(minInstancesPerNode, value)
/** @group setParam */
@Since("1.4.0")
def setMinInfoGain(value: Double): this.type = set(minInfoGain, value)
/** @group expertSetParam */
@Since("1.4.0")
def setMaxMemoryInMB(value: Int): this.type = set(maxMemoryInMB, value)
/** @group expertSetParam */
@Since("1.4.0")
def setCacheNodeIds(value: Boolean): this.type = set(cacheNodeIds, value)
/**
* Specifies how often to checkpoint the cached node IDs.
* E.g. 10 means that the cache will get checkpointed every 10 iterations.
* This is only used if cacheNodeIds is true and if the checkpoint directory is set in
* [[org.apache.spark.SparkContext]].
* Must be at least 1.
* (default = 10)
* @group setParam
*/
@Since("1.4.0")
def setCheckpointInterval(value: Int): this.type = set(checkpointInterval, value)
/** @group setParam */
@Since("1.4.0")
def setImpurity(value: String): this.type = set(impurity, value)
// Parameters from TreeEnsembleParams:
/** @group setParam */
@Since("1.4.0")
def setSubsamplingRate(value: Double): this.type = set(subsamplingRate, value)
/** @group setParam */
@Since("1.4.0")
def setSeed(value: Long): this.type = set(seed, value)
// Parameters from RandomForestParams:
/** @group setParam */
@Since("1.4.0")
def setNumTrees(value: Int): this.type = set(numTrees, value)
/** @group setParam */
@Since("1.4.0")
def setFeatureSubsetStrategy(value: String): this.type =
set(featureSubsetStrategy, value)
override protected def train(
dataset: Dataset[_]): RandomForestRegressionModel = instrumented { instr =>
val categoricalFeatures: Map[Int, Int] =
MetadataUtils.getCategoricalFeatures(dataset.schema($(featuresCol)))
val instances = extractLabeledPoints(dataset).map(_.toInstance)
val strategy =
super.getOldStrategy(categoricalFeatures, numClasses = 0, OldAlgo.Regression, getOldImpurity)
instr.logPipelineStage(this)
instr.logDataset(instances)
instr.logParams(this, labelCol, featuresCol, predictionCol, leafCol, impurity, numTrees,
featureSubsetStrategy, maxDepth, maxBins, maxMemoryInMB, minInfoGain,
minInstancesPerNode, seed, subsamplingRate, cacheNodeIds, checkpointInterval)
val trees = RandomForest
.run(instances, strategy, getNumTrees, getFeatureSubsetStrategy, getSeed, Some(instr))
.map(_.asInstanceOf[DecisionTreeRegressionModel])
val numFeatures = trees.head.numFeatures
instr.logNamedValue(Instrumentation.loggerTags.numFeatures, numFeatures)
new RandomForestRegressionModel(uid, trees, numFeatures)
}
@Since("1.4.0")
override def copy(extra: ParamMap): RandomForestRegressor = defaultCopy(extra)
}
@Since("1.4.0")
object RandomForestRegressor extends DefaultParamsReadable[RandomForestRegressor]{
/** Accessor for supported impurity settings: variance */
@Since("1.4.0")
final val supportedImpurities: Array[String] = HasVarianceImpurity.supportedImpurities
/** Accessor for supported featureSubsetStrategy settings: auto, all, onethird, sqrt, log2 */
@Since("1.4.0")
final val supportedFeatureSubsetStrategies: Array[String] =
TreeEnsembleParams.supportedFeatureSubsetStrategies
@Since("2.0.0")
override def load(path: String): RandomForestRegressor = super.load(path)
}
/**
* <a href="http://en.wikipedia.org/wiki/Random_forest">Random Forest</a> model for regression.
* It supports both continuous and categorical features.
*
* @param _trees Decision trees in the ensemble.
* @param numFeatures Number of features used by this model
*/
@Since("1.4.0")
class RandomForestRegressionModel private[ml] (
override val uid: String,
private val _trees: Array[DecisionTreeRegressionModel],
override val numFeatures: Int)
extends PredictionModel[Vector, RandomForestRegressionModel]
with RandomForestRegressorParams with TreeEnsembleModel[DecisionTreeRegressionModel]
with MLWritable with Serializable {
require(_trees.nonEmpty, "RandomForestRegressionModel requires at least 1 tree.")
/**
* Construct a random forest regression model, with all trees weighted equally.
*
* @param trees Component trees
*/
private[ml] def this(trees: Array[DecisionTreeRegressionModel], numFeatures: Int) =
this(Identifiable.randomUID("rfr"), trees, numFeatures)
@Since("1.4.0")
override def trees: Array[DecisionTreeRegressionModel] = _trees
// Note: We may add support for weights (based on tree performance) later on.
private lazy val _treeWeights: Array[Double] = Array.fill[Double](_trees.length)(1.0)
@Since("1.4.0")
override def treeWeights: Array[Double] = _treeWeights
override def transform(dataset: Dataset[_]): DataFrame = {
transformSchema(dataset.schema, logging = true)
var predictionColNames = Seq.empty[String]
var predictionColumns = Seq.empty[Column]
val bcastModel = dataset.sparkSession.sparkContext.broadcast(this)
if ($(predictionCol).nonEmpty) {
val predictUDF = udf { features: Vector => bcastModel.value.predict(features) }
predictionColNames :+= $(predictionCol)
predictionColumns :+= predictUDF(col($(featuresCol)))
}
if ($(leafCol).nonEmpty) {
val leafUDF = udf { features: Vector => bcastModel.value.predictLeaf(features) }
predictionColNames :+= $(leafCol)
predictionColumns :+= leafUDF(col($(featuresCol)))
}
if (predictionColNames.nonEmpty) {
dataset.withColumns(predictionColNames, predictionColumns)
} else {
this.logWarning(s"$uid: RandomForestRegressionModel.transform() does nothing" +
" because no output columns were set.")
dataset.toDF()
}
}
override def predict(features: Vector): Double = {
// TODO: When we add a generic Bagging class, handle transform there. SPARK-7128
// Predict average of tree predictions.
// Ignore the weights since all are 1.0 for now.
_trees.map(_.rootNode.predictImpl(features).prediction).sum / getNumTrees
}
@Since("1.4.0")
override def copy(extra: ParamMap): RandomForestRegressionModel = {
copyValues(new RandomForestRegressionModel(uid, _trees, numFeatures), extra).setParent(parent)
}
@Since("1.4.0")
override def toString: String = {
s"RandomForestRegressionModel (uid=$uid) with $getNumTrees trees"
}
/**
* Estimate of the importance of each feature.
*
* Each feature's importance is the average of its importance across all trees in the ensemble
* The importance vector is normalized to sum to 1. This method is suggested by Hastie et al.
* (Hastie, Tibshirani, Friedman. "The Elements of Statistical Learning, 2nd Edition." 2001.)
* and follows the implementation from scikit-learn.
*
* @see `DecisionTreeRegressionModel.featureImportances`
*/
@Since("1.5.0")
lazy val featureImportances: Vector = TreeEnsembleModel.featureImportances(trees, numFeatures)
/** (private[ml]) Convert to a model in the old API */
private[ml] def toOld: OldRandomForestModel = {
new OldRandomForestModel(OldAlgo.Regression, _trees.map(_.toOld))
}
@Since("2.0.0")
override def write: MLWriter =
new RandomForestRegressionModel.RandomForestRegressionModelWriter(this)
}
@Since("2.0.0")
object RandomForestRegressionModel extends MLReadable[RandomForestRegressionModel] {
@Since("2.0.0")
override def read: MLReader[RandomForestRegressionModel] = new RandomForestRegressionModelReader
@Since("2.0.0")
override def load(path: String): RandomForestRegressionModel = super.load(path)
private[RandomForestRegressionModel]
class RandomForestRegressionModelWriter(instance: RandomForestRegressionModel)
extends MLWriter {
override protected def saveImpl(path: String): Unit = {
val extraMetadata: JObject = Map(
"numFeatures" -> instance.numFeatures,
"numTrees" -> instance.getNumTrees)
EnsembleModelReadWrite.saveImpl(instance, path, sparkSession, extraMetadata)
}
}
private class RandomForestRegressionModelReader extends MLReader[RandomForestRegressionModel] {
/** Checked against metadata when loading model */
private val className = classOf[RandomForestRegressionModel].getName
private val treeClassName = classOf[DecisionTreeRegressionModel].getName
override def load(path: String): RandomForestRegressionModel = {
implicit val format = DefaultFormats
val (metadata: Metadata, treesData: Array[(Metadata, Node)], treeWeights: Array[Double]) =
EnsembleModelReadWrite.loadImpl(path, sparkSession, className, treeClassName)
val numFeatures = (metadata.metadata \\ "numFeatures").extract[Int]
val numTrees = (metadata.metadata \\ "numTrees").extract[Int]
val trees: Array[DecisionTreeRegressionModel] = treesData.map { case (treeMetadata, root) =>
val tree =
new DecisionTreeRegressionModel(treeMetadata.uid, root, numFeatures)
treeMetadata.getAndSetParams(tree)
tree
}
require(numTrees == trees.length, s"RandomForestRegressionModel.load expected $numTrees" +
s" trees based on metadata but found ${trees.length} trees.")
val model = new RandomForestRegressionModel(metadata.uid, trees, numFeatures)
metadata.getAndSetParams(model)
model
}
}
/** Convert a model from the old API */
private[ml] def fromOld(
oldModel: OldRandomForestModel,
parent: RandomForestRegressor,
categoricalFeatures: Map[Int, Int],
numFeatures: Int = -1): RandomForestRegressionModel = {
require(oldModel.algo == OldAlgo.Regression, "Cannot convert RandomForestModel" +
s" with algo=${oldModel.algo} (old API) to RandomForestRegressionModel (new API).")
val newTrees = oldModel.trees.map { tree =>
// parent for each tree is null since there is no good way to set this.
DecisionTreeRegressionModel.fromOld(tree, null, categoricalFeatures)
}
val uid = if (parent != null) parent.uid else Identifiable.randomUID("rfr")
new RandomForestRegressionModel(uid, newTrees, numFeatures)
}
}
| techaddict/spark | mllib/src/main/scala/org/apache/spark/ml/regression/RandomForestRegressor.scala | Scala | apache-2.0 | 12,606 |
package piecewise.intervaltree
import cats.kernel.Order
trait IntervalOps[I[_]]{
def contain[V](interval: I[V], v: V)(implicit ord: Order[V]): Boolean
def relation[V](interval: I[V], v: V)(implicit ord: Order[V]): Int
def isLowerBound[V](interval: I[V], v: V)(implicit ord: Order[V]): Boolean
def isUpperBound[V](interval: I[V], v: V)(implicit ord: Order[V]): Boolean
def isUpperThan[V](interval: I[V], v: V)(implicit ord: Order[V]): Boolean =
relation(interval, v) match {
case GT => true
case _ => false
}
def isLowerThan[V](interval: I[V], v: V)(implicit ord: Order[V]): Boolean =
relation(interval, v) match {
case LT => true
case _ => false
}
def isEmptyWith[V](l: V, u: V)(implicit ord: Order[V]): Boolean = {
ord.lt(l, u) || (ord.eqv(l, u) && haveClosedBound)
}
def haveClosedBound: Boolean
def inside[V](interval: I[V])(l: V, u: V)(implicit ord: Order[V]): Boolean =
isUpperThan[V](interval, l) && isLowerThan[V](interval, u)
def outside[V](interval: I[V])(l: V, u: V)(implicit ord: Order[V]): Boolean =
!inside(interval)(l, u)
}
| daniil-timofeev/gridsplines | piecewise/src/main/scala/piecewise/intervaltree/IntervalOps.scala | Scala | apache-2.0 | 1,123 |
/*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package wvlet.airframe.http.recorder
import java.time.Instant
import java.util.concurrent.atomic.AtomicInteger
import java.util.Base64
import com.twitter.finagle.http.{MediaType, Request, Response}
import com.twitter.io.Buf
import org.yaml.snakeyaml.{DumperOptions, Yaml}
import wvlet.airframe.codec.MessageCodec
import wvlet.airframe.jdbc.{DbConfig, SQLiteConnectionPool}
import wvlet.airframe.metrics.TimeWindow
import wvlet.log.LogSupport
import java.nio.charset.StandardCharsets
import java.sql.ResultSet
import scala.jdk.CollectionConverters._
/**
* Recorder for HTTP server responses
*/
class HttpRecordStore(val recorderConfig: HttpRecorderConfig, dropSession: Boolean = false, inMemory: Boolean = false)
extends AutoCloseable
with LogSupport {
private val requestCounter = scala.collection.mutable.Map.empty[Int, AtomicInteger]
private val connectionPool = {
val dbFilePath = if (inMemory) ":memory:" else recorderConfig.sqliteFilePath
new SQLiteConnectionPool(DbConfig.ofSQLite(dbFilePath))
}
private def recordTableName = recorderConfig.recordTableName
// Increment this value if we need to change the table format.
private val recordFormatVersion = 4
init
private def createRecorderInfoTable: Unit = {
connectionPool.executeUpdate("create table if not exists recorder_info(format_version integer primary key)")
}
protected def init: Unit = {
// Support recorder version migration for persistent records
createRecorderInfoTable
val lastVersion: Option[Int] = connectionPool.executeQuery("select format_version from recorder_info limit 1") {
handler =>
if (handler.next()) {
Some(handler.getInt(1))
} else {
None
}
}
def setVersion: Unit = {
clearAllRecords
// Record the current record format version
connectionPool.executeUpdate(
s"insert into recorder_info(format_version) values(${recordFormatVersion}) ON CONFLICT(format_version) DO UPDATE SET format_version=${recordFormatVersion}"
)
}
lastVersion match {
case None => setVersion
case Some(last) if last != recordFormatVersion =>
warn(s"Record format version has been changed from ${last} to ${recordFormatVersion}")
connectionPool.executeUpdate("drop table if exists recorder_info") // Support schema migration
createRecorderInfoTable
setVersion
case _ => // do nothing
}
// Prepare a database table for recording HttpRecord
connectionPool.executeUpdate(HttpRecord.createTableSQL(recordTableName))
connectionPool.executeUpdate(
s"create index if not exists ${recordTableName}_index on ${recordTableName} (session, requestHash)"
)
// TODO: Detect schema change
if (dropSession) {
clearSession
}
cleanupExpiredRecords
}
private def clearAllRecords: Unit = {
if (!inMemory) {
warn(s"Deleting all records in ${recorderConfig.sqliteFilePath}")
connectionPool.executeUpdate(s"drop table if exists ${recordTableName}")
}
}
def clearSession: Unit = {
if (!inMemory) {
warn(s"Deleting old session records for session:${recorderConfig.sessionName}")
connectionPool.executeUpdate(s"delete from ${recordTableName} where session = '${recorderConfig.sessionName}'")
}
}
private def cleanupExpiredRecords: Unit = {
recorderConfig.expirationTime match {
case None =>
// Do nothing
case Some(expire) =>
val duration = TimeWindow.withUTC.parse(expire)
val diffSec = duration.endUnixTime - duration.startUnixTime
val deletedRows = connectionPool.executeUpdate(
s"delete from ${recordTableName} where session = '${recorderConfig.sessionName}' and strftime('%s', 'now') - strftime('%s', createdAt) >= ${diffSec}"
)
if (deletedRows > 0) {
warn(
s"Deleted ${deletedRows} expired records from session:${recorderConfig.sessionName}, db:${recorderConfig.sqliteFilePath}"
)
}
}
}
def resetCounter: Unit = {
requestCounter.clear()
}
def numRecordsInSession: Long = {
connectionPool.executeQuery(
s"select count(1) cnt from ${recordTableName} where session = '${recorderConfig.sessionName}'"
) { rs =>
if (rs.next()) {
rs.getLong(1)
} else {
0L
}
}
}
def findNext(request: Request, incrementHitCount: Boolean = true): Option[HttpRecord] = {
val rh = recorderConfig.requestMatcher.computeHash(request)
// If there are multiple records for the same request, use the counter to find
// n-th request, where n is the access count to the same path
val counter = requestCounter.getOrElseUpdate(rh, new AtomicInteger())
val hitCount = if (incrementHitCount) counter.getAndIncrement() else counter.get()
trace(s"findNext: request hash: ${rh} for ${request}, hitCount: ${hitCount}")
connectionPool.queryWith(
// Get the next request matching the requestHash
s"select * from ${recordTableName} where session = ? and requestHash = ? order by createdAt limit 1 offset ?"
) { prepare =>
prepare.setString(1, recorderConfig.sessionName)
prepare.setInt(2, rh)
prepare.setInt(3, hitCount)
} { rs => HttpRecord.read(rs).headOption }
}
def record(request: Request, response: Response): Unit = {
val rh = recorderConfig.requestMatcher.computeHash(request)
val httpHeadersForRecording: Seq[(String, String)] =
request.headerMap.toSeq.filterNot { x => recorderConfig.excludeHeaderFilterForRecording(x._1, x._2) }
val entry = HttpRecord(
recorderConfig.sessionName,
requestHash = rh,
method = request.method.toString(),
destHost = recorderConfig.destAddress.hostAndPort,
path = request.uri,
requestHeader = httpHeadersForRecording,
requestBody = HttpRecordStore.encodeToBase64(request.content),
responseCode = response.statusCode,
responseHeader = response.headerMap.toSeq,
responseBody = HttpRecordStore.encodeToBase64(response.content),
createdAt = Instant.now()
)
trace(s"record: request hash ${rh} for ${request} -> ${entry.summary}")
connectionPool.withConnection { conn => entry.insertInto(recordTableName, conn) }
}
override def close(): Unit = {
connectionPool.stop
}
def dumpSessionAsJson: String = {
dumpSession(dumpRecordAsJson _)
}
def dumpAllSessionsAsJson: String = {
dumpAllSessions(dumpRecordAsJson _)
}
def dumpSessionAsYaml: String = {
dumpSession(dumpRecordAsYaml _)
}
def dumpAllSessionsAsYaml: String = {
dumpAllSessions(dumpRecordAsYaml _)
}
private def dumpSession(dumper: ResultSet => String): String = {
connectionPool.queryWith(
s"select * from ${recordTableName} where session = ? order by createdAt"
) { prepare =>
prepare.setString(1, recorderConfig.sessionName)
}(dumper)
}
private def dumpAllSessions(dumper: ResultSet => String): String = {
connectionPool.executeQuery(
// Get the next request matching the requestHash
s"select * from ${recordTableName} order by createdAt"
)(dumper)
}
private def dumpRecordAsJson(rs: ResultSet): String = {
val records = HttpRecord.read(rs).map { record =>
record.copy(
requestBody = decodeBody(record.requestBody, record.requestHeader),
responseBody = decodeBody(record.responseBody, record.responseHeader)
)
}
val recordCodec = MessageCodec.of[HttpRecord]
records
.map { record =>
recordCodec.toJson(record)
}.mkString("\n")
}
private def dumpRecordAsYaml(rs: ResultSet): String = {
val records = HttpRecord
.read(rs).map { record =>
Map(
"session" -> record.session,
"requestHash" -> record.requestHash,
"method" -> record.method,
"destHost" -> record.destHost,
"path" -> record.path,
"requestHeader" -> record.requestHeader.toMap.asJava,
"requestBody" -> decodeBody(record.requestBody, record.requestHeader),
"responseCode" -> record.responseCode,
"responseHeader" -> record.responseHeader.toMap.asJava,
"responseBody" -> decodeBody(record.responseBody, record.responseHeader),
"createdAt" -> record.createdAt.toString
).asJava
}.asJava
val options = new DumperOptions()
options.setDefaultFlowStyle(DumperOptions.FlowStyle.BLOCK)
options.setDefaultScalarStyle(DumperOptions.ScalarStyle.PLAIN)
new Yaml(options).dump(records)
}
private def decodeBody(base64Encoded: String, headers: Seq[(String, String)]): String = {
val contentType = headers.collectFirst {
case (name, value) if name == "Content-Type" =>
value
}
if (contentType.contains(MediaType.OctetStream)) {
base64Encoded
} else {
new String(HttpRecordStore.decodeFromBase64(base64Encoded), StandardCharsets.UTF_8)
}
}
}
object HttpRecordStore {
def encodeToBase64(content: Buf): String = {
val buf = new Array[Byte](content.length)
content.write(buf, 0)
val encoder = Base64.getEncoder
encoder.encodeToString(buf)
}
def decodeFromBase64(base64String: String): Array[Byte] = {
val decoder = Base64.getDecoder
decoder.decode(base64String)
}
}
| wvlet/airframe | airframe-http-recorder/src/main/scala/wvlet/airframe/http/recorder/HttpRecordStore.scala | Scala | apache-2.0 | 9,991 |
/*
* Copyright (c) 2014-2018 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.reactive.subjects
import minitest.TestSuite
import monix.eval.Task
import monix.execution.exceptions.DummyException
import monix.execution.Ack.Continue
import monix.execution.Scheduler
import monix.execution.schedulers.TestScheduler
import monix.reactive.{Observable, Observer}
import scala.util.Random
trait BaseConcurrentSubjectSuite extends TestSuite[TestScheduler] {
case class Sample(channel: ConcurrentSubject[Long,Long] with Observable[Long], expectedSum: Long)
def setup() = TestScheduler()
def tearDown(s: TestScheduler) = {
assert(s.state.tasks.isEmpty,
"TestScheduler should have no pending tasks")
}
/**
* Returns a sample channel that needs testing.
*/
def alreadyTerminatedTest(expectedElems: Seq[Long])(implicit s: Scheduler): Sample
/**
* Returns a sample channel for the test of
* continuous streaming.
*/
def continuousStreamingTest(expectedElems: Seq[Long])(implicit s: Scheduler): Option[Sample]
test("already completed and empty channel terminates observers") { implicit s =>
var wereCompleted = 0
var sum = 0L
def createObserver = new Observer[Long] {
def onNext(elem: Long) = {
sum += elem
Continue
}
def onError(ex: Throwable) = ()
def onComplete() = {
wereCompleted += 1
}
}
val Sample(channel, expectedSum) = alreadyTerminatedTest(Seq.empty)
channel.onComplete()
channel.unsafeSubscribeFn(createObserver)
channel.unsafeSubscribeFn(createObserver)
channel.unsafeSubscribeFn(createObserver)
s.tick()
assertEquals(sum, expectedSum * 3)
assertEquals(wereCompleted, 3)
}
test("failed empty channel terminates observers with an error") { implicit s =>
var wereCompleted = 0
var sum = 0L
def createObserver = new Observer[Long] {
def onNext(elem: Long) = {
sum += elem
Continue
}
def onComplete() = ()
def onError(ex: Throwable) = ex match {
case DummyException("dummy") =>
wereCompleted += 1
case _ =>
()
}
}
val Sample(channel, _) = alreadyTerminatedTest(Seq.empty)
channel.onError(DummyException("dummy"))
s.tick()
channel.unsafeSubscribeFn(createObserver)
channel.unsafeSubscribeFn(createObserver)
channel.unsafeSubscribeFn(createObserver)
s.tick()
assertEquals(sum, 0)
assertEquals(wereCompleted, 3)
}
test("already completed but non-empty channel terminates new observers") { implicit s =>
val elems = (0 until 20).map(_ => Random.nextLong())
var wereCompleted = 0
var sum = 0L
def createObserver = new Observer[Long] {
def onNext(elem: Long) = {
sum += elem
Continue
}
def onError(ex: Throwable) = ()
def onComplete() = {
wereCompleted += 1
}
}
val Sample(channel, expectedSum) = alreadyTerminatedTest(elems)
for (e <- elems) channel.onNext(e); channel.onComplete()
s.tick()
channel.unsafeSubscribeFn(createObserver)
channel.unsafeSubscribeFn(createObserver)
channel.unsafeSubscribeFn(createObserver)
s.tick()
assertEquals(sum, expectedSum * 3)
assertEquals(wereCompleted, 3)
}
test("already failed but non-empty channel terminates new observers") { implicit s =>
val elems = (0 until 20).map(_ => Random.nextLong())
var wereCompleted = 0
def createObserver = new Observer[Long] {
def onNext(elem: Long) = Continue
def onComplete() = ()
def onError(ex: Throwable) = ex match {
case DummyException("dummy") =>
wereCompleted += 1
case _ =>
()
}
}
val Sample(channel, _) = alreadyTerminatedTest(elems)
for (e <- elems) channel.onNext(e)
channel.onError(DummyException("dummy"))
s.tick()
channel.unsafeSubscribeFn(createObserver)
channel.unsafeSubscribeFn(createObserver)
channel.unsafeSubscribeFn(createObserver)
s.tick()
assertEquals(wereCompleted, 3)
}
test("should remove subscribers that triggered errors") { implicit s =>
val elems = (0 until Random.nextInt(300) + 100).map(_.toLong)
var wereCompleted = 0
var totalOnNext = 0L
def createObserver =
new Observer[Long] {
def onNext(elem: Long) = {
if (elem > 10)
throw DummyException("dummy")
else {
totalOnNext += elem
Continue
}
}
def onComplete() = ()
def onError(ex: Throwable) = ex match {
case DummyException("dummy") =>
wereCompleted += 1
case _ =>
()
}
}
continuousStreamingTest(elems) match {
case None => ignore()
case Some(Sample(channel, expectedSum)) =>
var totalEmitted = 0L
channel.doOnNext(x => Task { totalEmitted += x }).subscribe()
channel.subscribe(createObserver)
channel.subscribe(createObserver)
channel.subscribe(createObserver)
s.tick()
for (e <- elems) channel.onNext(e); channel.onComplete()
s.tick()
assertEquals(wereCompleted, 3)
assertEquals(totalEmitted, expectedSum)
assertEquals(totalOnNext, 11 * 5 * 3)
}
}
}
| Wogan/monix | monix-reactive/shared/src/test/scala/monix/reactive/subjects/BaseConcurrentSubjectSuite.scala | Scala | apache-2.0 | 5,953 |
package org.dbpedia.spotlight.db.io.util
import org.codehaus.jackson.{JsonToken, JsonFactory}
/**
* TokenOccurrenceParser based on the Jackson Streaming API.
*
* @author Joachim Daiber
*/
class JacksonTokenOccurrenceParser extends TokenOccurrenceParser {
val jFactory = new JsonFactory()
def parse(tokens: String, minimumCount: Int): Pair[Array[String], Array[Int]] = {
var tokensA = Array[String]()
var countsA = Array[Int]()
val jParser = jFactory.createJsonParser(tokens)
jParser.nextToken()
while (jParser.nextToken() != JsonToken.END_ARRAY) {
jParser.nextToken()
val token = jParser.getText
jParser.nextToken()
val count = jParser.getIntValue
if (count >= minimumCount) {
countsA :+= count
tokensA :+= token
}
jParser.nextToken()
}
Pair(tokensA, countsA)
}
} | Skunnyk/dbpedia-spotlight-model | index/src/main/scala/org/dbpedia/spotlight/db/io/util/JacksonTokenOccurrenceParser.scala | Scala | apache-2.0 | 872 |
package com.nummulus.amqp.driver.test
import org.scalatest._
import com.nummulus.amqp.driver.api.provider.AmqpProviderRequest
import com.nummulus.amqp.driver.fixture.BoundProviderFixture
class ProviderIntegrationTest extends FlatSpec with Matchers {
behavior of "AmqpProvider"
val SomeDeliveryTag = 1
it should "forward a message to the provided actor" in new BoundProviderFixture(true) {
sendMessage(SomeDeliveryTag, "Hi")
probe.expectMsg(AmqpProviderRequest("Hi", SomeDeliveryTag))
}
} | nummulus/amqp-driver | amqp-driver-test/src/test/scala/com/nummulus/amqp/driver/test/ProviderIntegrationTest.scala | Scala | apache-2.0 | 515 |
package macros
class Foo {
def bar(y: Int) = 10
} | xeno-by/old-scalameta-sbt | sbt/src/sbt-test/source-dependencies/macro-scalahost-3/changes/Foo.scala | Scala | bsd-3-clause | 52 |
package com.rumblesan.scalaglitch.types
import java.io.File
import java.awt.image.BufferedImage
trait GlitchTypes {
type Coord = (Int, Int)
type PixelShifter = Coord => Coord
}
sealed trait GlitchedImage
case class GlitchedJpeg(image: BufferedImage) extends GlitchedImage
case class GlitchedGif(images: List[BufferedImage]) extends GlitchedImage
sealed trait GlitchSource
case class ImageCanvas(image: BufferedImage, glitchType: String) extends GlitchSource
case class GlitchedImageData(data: Array[Byte], extension: String)
| rumblesan/cuttr | scala-glitch/src/main/scala/types/GlitchTypes.scala | Scala | bsd-2-clause | 538 |
package java.util
object RandomSuite extends tests.Suite {
/** Helper class to access next */
class HackRandom(seed: Long) extends Random(seed) {
override def next(bits: Int): Int = super.next(bits)
}
test("seed 10") {
val random = new HackRandom(10)
assert(random.next(10) == 747)
assert(random.next(1) == 0)
assert(random.next(6) == 16)
assert(random.next(20) == 432970)
assert(random.next(32) == 254270492)
}
test("seed -5") {
val random = new HackRandom(-5)
assert(random.next(10) == 275)
assert(random.next(1) == 0)
assert(random.next(6) == 21)
assert(random.next(20) == 360349)
assert(random.next(32) == 1635930704)
}
test("seed max long") {
val random = new HackRandom(Long.MaxValue)
assert(random.next(10) == 275)
assert(random.next(1) == 0)
assert(random.next(6) == 0)
assert(random.next(20) == 574655)
assert(random.next(32) == -1451336087)
}
test("seed max int") {
val random = new HackRandom(Int.MinValue)
assert(random.next(10) == 388)
assert(random.next(1) == 0)
assert(random.next(6) == 25)
assert(random.next(20) == 352095)
assert(random.next(32) == -2140124682)
}
test("seed reset") {
val random = new HackRandom(11)
assert(random.next(10) == 747)
assert(random.next(1) == 1)
assert(random.next(6) == 27)
random.setSeed(11)
assert(random.next(10) == 747)
assert(random.next(1) == 1)
assert(random.next(6) == 27)
}
test("reset nextGaussian") {
val random = new Random(-1)
assert(random.nextGaussian() == 1.7853314409882288)
random.setSeed(-1)
assert(random.nextGaussian() == 1.7853314409882288)
}
test("nextDouble") {
val random = new Random(-45)
assert(random.nextDouble() == 0.27288421395636253)
assert(random.nextDouble() == 0.5523165360074201)
assert(random.nextDouble() == 0.5689979434708298)
assert(random.nextDouble() == 0.9961166166874871)
assert(random.nextDouble() == 0.5368984665202684)
assert(random.nextDouble() == 0.19849067496547423)
assert(random.nextDouble() == 0.6021019223595357)
assert(random.nextDouble() == 0.06132131151816378)
assert(random.nextDouble() == 0.7303867762743866)
assert(random.nextDouble() == 0.7426529384056163)
}
test("nextBoolean") {
val random = new Random(4782934)
assert(random.nextBoolean() == false)
assert(random.nextBoolean() == true)
assert(random.nextBoolean() == true)
assert(random.nextBoolean() == false)
assert(random.nextBoolean() == false)
assert(random.nextBoolean() == false)
assert(random.nextBoolean() == true)
assert(random.nextBoolean() == false)
}
test("nextInt") {
val random = new Random(-84638)
assert(random.nextInt() == -1217585344)
assert(random.nextInt() == 1665699216)
assert(random.nextInt() == 382013296)
assert(random.nextInt() == 1604432482)
assert(random.nextInt() == -1689010196)
assert(random.nextInt() == 1743354032)
assert(random.nextInt() == 454046816)
assert(random.nextInt() == 922172344)
assert(random.nextInt() == -1890515287)
assert(random.nextInt() == 1397525728)
}
test("nextIntN") {
val random = new Random(7)
assert(random.nextInt(76543) == 32736)
assert {
try {
random.nextInt(0)
false
} catch {
case _: Throwable => true
}
}
assert(random.nextInt(45) == 29)
assert(random.nextInt(945) == 60)
assert(random.nextInt(35694839) == 20678044)
assert(random.nextInt(35699) == 23932)
assert(random.nextInt(3699) == 2278)
assert(random.nextInt(10) == 8)
}
test("nextInt2Pow") {
val random = new Random(-56938)
assert(random.nextInt(32) == 8)
assert(random.nextInt(8) == 3)
assert(random.nextInt(128) == 3)
assert(random.nextInt(4096) == 1950)
assert(random.nextInt(8192) == 3706)
assert(random.nextInt(8192) == 4308)
assert(random.nextInt(8192) == 3235)
assert(random.nextInt(8192) == 7077)
assert(random.nextInt(8192) == 2392)
assert(random.nextInt(32) == 31)
}
test("nextLong") {
val random = new Random(205620432625028L)
assert(random.nextLong() == 3710537363280377478L)
assert(random.nextLong() == 4121778334981170700L)
assert(random.nextLong() == 289540773990891960L)
assert(random.nextLong() == 307008980197674441L)
assert(random.nextLong() == 7527069864796025013L)
assert(random.nextLong() == -4563192874520002144L)
assert(random.nextLong() == 7619507045427546529L)
assert(random.nextLong() == -7888117030898487184L)
assert(random.nextLong() == -3499168703537933266L)
assert(random.nextLong() == -1998975913933474L)
}
test("nextFloat") {
val random = new Random(-3920005825473L)
def closeTo(num: Float, exp: Double): Boolean =
((num < (exp + 0.0000001)) && (num > (exp - 0.0000001)))
assert(closeTo(random.nextFloat(), 0.059591234))
assert(closeTo(random.nextFloat(), 0.7007871))
assert(closeTo(random.nextFloat(), 0.39173192))
assert(closeTo(random.nextFloat(), 0.0647918))
assert(closeTo(random.nextFloat(), 0.9029677))
assert(closeTo(random.nextFloat(), 0.18226051))
assert(closeTo(random.nextFloat(), 0.94444054))
assert(closeTo(random.nextFloat(), 0.008844078))
assert(closeTo(random.nextFloat(), 0.08891684))
assert(closeTo(random.nextFloat(), 0.06482434))
}
test("nextBytes") {
val random = new Random(7399572013373333L)
def test(exps: Array[Int]) = {
val exp = exps.map(_.toByte)
val buf = new Array[Byte](exp.length)
random.nextBytes(buf)
var i = 0
var res = true
assert {
while (i < buf.size && res == true) {
res = (buf(i) == exp(i))
i += 1
}
res
}
}
test(Array[Int](62, 89, 68, -91, 10, 0, 85))
test(
Array[Int](-89,
-76,
88,
121,
-25,
47,
58,
-8,
78,
20,
-77,
84,
-3,
-33,
58,
-9,
11,
57,
-118,
40,
-74,
-86,
78,
123,
58))
test(Array[Int](-77, 112, -116))
test(Array[Int]())
test(Array[Int](-84, -96, 108))
test(Array[Int](57, -106, 42, -100, -47, -84, 67, -48, 45))
}
test("nextGaussian") {
val random = new Random(2446004)
assert(random.nextGaussian() == -0.5043346938630431)
assert(random.nextGaussian() == -0.3250983270156675)
assert(random.nextGaussian() == -0.23799457294994966)
assert(random.nextGaussian() == 0.4164610631507695)
assert(random.nextGaussian() == 0.22086348814760687)
assert(random.nextGaussian() == -0.706833209972521)
assert(random.nextGaussian() == 0.6730758289772553)
assert(random.nextGaussian() == 0.2797393696191283)
assert(random.nextGaussian() == -0.2979099632667685)
assert(random.nextGaussian() == 0.37443415981434314)
assert(random.nextGaussian() == 0.9584801742918951)
assert(random.nextGaussian() == 1.1762179112229345)
assert(random.nextGaussian() == 0.8736960092848826)
assert(random.nextGaussian() == 0.12301554931271008)
assert(random.nextGaussian() == -0.6052081187207353)
assert(random.nextGaussian() == -0.2015925608755316)
assert(random.nextGaussian() == -1.0071216119742104)
assert(random.nextGaussian() == 0.6734222041441913)
assert(random.nextGaussian() == 0.3990565555091522)
assert(random.nextGaussian() == 2.0051627385915154)
}
}
| cedricviaccoz/scala-native | unit-tests/src/main/scala/java/util/RandomSuite.scala | Scala | bsd-3-clause | 7,809 |
package org.apache.spark.sql.types
import magellan._
import org.apache.spark.sql.catalyst.InternalRow
import org.apache.spark.sql.catalyst.expressions.GenericInternalRow
class PointUDT extends UserDefinedType[Point] with GeometricUDT {
override val sqlType: StructType = StructType(
Seq(
StructField("type", IntegerType, nullable = false),
StructField("xmin", DoubleType, nullable = false),
StructField("ymin", DoubleType, nullable = false),
StructField("xmax", DoubleType, nullable = false),
StructField("ymax", DoubleType, nullable = false),
StructField("x", DoubleType, nullable = false),
StructField("y", DoubleType, nullable = false)
))
override def serialize(point: Point): InternalRow = {
val row = new GenericInternalRow(7)
row.setInt(0, point.getType())
row.setDouble(1, point.getX())
row.setDouble(2, point.getY())
row.setDouble(3, point.getX())
row.setDouble(4, point.getY())
row.setDouble(5, point.getX())
row.setDouble(6, point.getY())
row
}
override def serialize(shape: Shape) = serialize(shape.asInstanceOf[Point])
override def userClass: Class[Point] = classOf[Point]
override def deserialize(datum: Any): Point = {
val row = datum.asInstanceOf[InternalRow]
require(row.numFields == 7)
Point(row.getDouble(5), row.getDouble(6))
}
override def pyUDT: String = "magellan.types.PointUDT"
def serialize(x: Double, y: Double): InternalRow = {
val row = new GenericInternalRow(7)
row.setInt(0, 1)
row.setDouble(1, x)
row.setDouble(2, y)
row.setDouble(3, x)
row.setDouble(4, y)
row.setDouble(5, x)
row.setDouble(6, y)
row
}
override val geometryType = new Point().getType()
}
| harsha2010/magellan | src/main/scala/org/apache/spark/sql/types/PointUDT.scala | Scala | apache-2.0 | 1,754 |
import sbt._
import Keys._
import play.Project._
object ApplicationBuild extends Build {
val appName = "eswcss-project"
val appVersion = "1.0-SNAPSHOT"
val appDependencies = Seq(
// Select Play modules
//jdbc, // The JDBC connection pool and the play.api.db API
//anorm, // Scala RDBMS Library
//javaJdbc, // Java database API
//javaEbean, // Java Ebean plugin
//javaJpa, // Java JPA plugin
//filters, // A set of built-in filters
javaCore, // The core Java API
// WebJars pull in client-side web libraries
"org.webjars" % "webjars-play" % "2.1.0",
"org.webjars" % "bootstrap" % "2.3.1"
// "commons-httpclient" % "commons-httpclient" % "3.1"
// Add your own project dependencies in the form:
// "group" % "artifact" % "version"
)
val main = play.Project(appName, appVersion, appDependencies).settings(
// Add your own project settings here
scalaVersion := "2.10.2"
)
}
| laurensdv/buzzword_detector | project/Build.scala | Scala | gpl-2.0 | 984 |
package com.lucidchart.open.cashy.utils
import org.apache.commons.mail.HtmlEmail
import play.api.Play.configuration
import play.api.Play.current
import play.api.Logger
case class MailerSMTPConfiguration(
host: String,
port: Int,
user: String,
pass: String
)
case class MailerAddress(
email: String,
name: String = ""
)
case class MailerMessage(
from: MailerAddress,
reply: Option[MailerAddress] = None,
to: Iterable[MailerAddress],
cc: Iterable[MailerAddress] = Nil,
bcc: Iterable[MailerAddress] = Nil,
subject: String,
text: String
)
object Mailer {
private val enabled = configuration.getBoolean("mailer.enabled").get
private val smtpConfig = MailerSMTPConfiguration(
configuration.getString("mailer.smtp.host").get,
configuration.getInt("mailer.smtp.port").get,
configuration.getString("mailer.smtp.user").get,
configuration.getString("mailer.smtp.pass").get
)
/**
* Send an email message
*
* Throws any and all exceptions
*
* @param message Details about the message to send
*/
def send(message: MailerMessage) {
if (!enabled) {
Logger.info("Not sending email to " + message.to + " with subject '" + message.subject + "' because the mailer is disabled.")
}
else {
val email = new HtmlEmail()
email.setSmtpPort(smtpConfig.port)
email.setHostName(smtpConfig.host)
email.setAuthentication(smtpConfig.user, smtpConfig.pass)
email.setTextMsg(message.text)
email.setSubject(message.subject)
email.setFrom(message.from.email, message.from.name)
message.reply.map { reply =>
email.addReplyTo(reply.email, reply.name)
}
message.to.foreach { to =>
email.addTo(to.email, to.name)
}
message.cc.foreach { cc =>
email.addCc(cc.email, cc.name)
}
message.bcc.foreach { bcc =>
email.addBcc(bcc.email, bcc.name)
}
email.send()
}
}
}
| lucidsoftware/cashy | app/com/lucidchart/open/cashy/utils/Mailer.scala | Scala | apache-2.0 | 1,954 |
package dnd5_dm_db.lang.eng
import dnd5_dm_db.lang.Lang
import dnd5_dm_db.model._
trait SkillAndLanguageText {
self : Lang =>
val skills : String = "Skills"
val skill : Skill => String = {
case Athletics => "Athletics"
case Acrobatics => "Acrobatics"
case SleightOfHand => "Sleight of hand"
case Stealth => "Stealth"
case Arcana => "Arcana"
case History => "History"
case Investigation => "Investigation"
case Nature => "Nature"
case Religion => "Religion"
case AnimalHandling => "Animal handling"
case Insight => "Insight"
case Medicine => "Medicine"
case Perception => "Perception"
case Survival => "Survival"
case Deception => "Deception"
case Intimidation => "Intimidation"
case Performance => "Performance"
case Persuasion => "Persuasion"
}
val languages : String = "Languages"
val language : Language => String = {
case AnyLanguage(x, default) => s"any $x language"+
default.map(d => s" (usually ${language(d)})").getOrElse("")
case LanguageSpecial(str) => str.value(self)
case UnderstandOnly(l) =>
s"understands ${language(l)} but can't speak"
case Common => "Common"
case Dwarvish => "Dwarvish"
case Elvish => "Elvish"
case GiantLang => "Giant"
case Gnomish => "Gnomish"
case Goblin => "Goblin"
case Halfling => "Halfling"
case OrcLang => "Orc"
case Abyssal => "Abyssal"
case CelestialLang => "Celestial"
case Draconic => "Draconic"
case DeepSpeech => "Deep speech"
case Infernal => "Infernal"
case Primordial => "Primordial"
case Sylvan => "Sylvan"
case Undercommon => "Undercommon"
case TroglodyteLang => "Troglodyte"
}
}
| lorilan/dnd5_dm_db | src/main/scala/dnd5_dm_db/lang/eng/SkillAndLanguageText.scala | Scala | gpl-3.0 | 1,717 |
package breeze.optimize
/*
Copyright 2015 David Hall, Daniel Ramage, Debasish Das
Licensed under the Apache License, Version 2.0 (the "License")
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
import breeze.math.{InnerProductModule, MutableVectorField}
import breeze.linalg.norm
import breeze.util.SerializableLogging
/**
* SPG is a Spectral Projected Gradient minimizer; it minimizes a differentiable
* function subject to the optimum being in some set, given by the projection operator projection
* @tparam T vector type
* @param tolerance termination criterion: tolerance for norm of projected gradient
* @param suffDec sufficient decrease parameter
* @param bbMemory number of history entries for linesearch
* @param alphaMax longest step
* @param alphaMin shortest step
* @param maxIter maximum number of iterations
* @param maxSrcht maximum number of iterations inside line search
* @param initFeas is the initial guess feasible, or should it be projected?
* @param projection projection operations
* @param curvilinear if curvilinear true, do the projection inside line search in place of doing it in chooseDescentDirection
*/
class SpectralProjectedGradient[T](val projection: T => T = { (t: T) =>
t
}, tolerance: Double = 1e-6, suffDec: Double = 1e-4, fvalMemory: Int = 30, alphaMax: Double = 1e10, alphaMin: Double = 1e-10, bbMemory: Int = 10, maxIter: Int = -1, val initFeas: Boolean = false, val curvilinear: Boolean = false, val bbType: Int = 1, val maxSrcht: Int = 30)(
implicit space: MutableVectorField[T, Double])
extends FirstOrderMinimizer[T, DiffFunction[T]](fvalMemory = fvalMemory, maxIter = maxIter, tolerance = tolerance)
with Projecting[T]
with SerializableLogging {
import space._
case class History(alphaBB: Double, fvals: IndexedSeq[Double])
override protected def initialHistory(f: DiffFunction[T], init: T): History = {
History(0.1, IndexedSeq.empty)
}
/**
* From Mark Schmidt's Matlab code
* if bbType == 1
* alpha = (s'*s)/(s'*y);
* else
* alpha = (s'*y)/(y'*y);
*/
protected def bbAlpha(s: T, y: T): Double = {
var alpha =
if (bbType == 1)(s.dot(s)) / (s.dot(y))
else (s.dot(y)) / (y.dot(y))
if (alpha <= alphaMin || alpha > alphaMax) alpha = 1.0
if (alpha.isNaN) alpha = 1.0
alpha
}
override protected def updateHistory(
newX: T,
newGrad: T,
newVal: Double,
f: DiffFunction[T],
oldState: State): History = {
val s = newX - oldState.x
val y = newGrad - oldState.grad
History(bbAlpha(s, y), (newVal +: oldState.history.fvals).take(bbMemory))
}
override protected def takeStep(state: State, dir: T, stepSize: Double): T = {
val qq = projection(state.x + dir * stepSize)
assert(projection(qq) == qq)
qq
}
override protected def chooseDescentDirection(state: State, f: DiffFunction[T]): T = {
if (curvilinear) state.x - state.grad * state.history.alphaBB
else projection(state.x - state.grad * state.history.alphaBB) - state.x
}
override protected def determineStepSize(state: State, f: DiffFunction[T], direction: T): Double = {
val fb = if (state.history.fvals.isEmpty) state.value else state.value.max(state.history.fvals.max)
val normGradInDir = state.grad.dot(direction)
var gamma =
if (state.iter == 0) scala.math.min(1.0, 1.0 / norm(state.grad))
else 1.0
val searchFun =
if (curvilinear) functionFromSearchDirection(f, state.x, direction, projection)
else LineSearch.functionFromSearchDirection(f, state.x, direction)
//TO DO :
// 1. Add cubic interpolation and see it's performance. Bisection did not work for L1 projection
val search = new BacktrackingLineSearch(fb, maxIterations = maxSrcht)
gamma = search.minimize(searchFun, gamma)
if (gamma < 1e-10) {
throw new LineSearchFailed(normGradInDir, norm(direction))
}
gamma
}
// because of the projection, we have to do our own verstion
private def functionFromSearchDirection[T, I](f: DiffFunction[T], x: T, direction: T, project: T => T)(
implicit prod: InnerProductModule[T, Double]): DiffFunction[Double] = new DiffFunction[Double] {
import prod._
/** calculates the value at a point */
override def valueAt(alpha: Double): Double = f.valueAt(project(x + direction * alpha))
/** calculates the gradient at a point */
override def gradientAt(alpha: Double): Double = f.gradientAt(project(x + direction * alpha)).dot(direction)
/** Calculates both the value and the gradient at a point */
def calculate(alpha: Double): (Double, Double) = {
val (ff, grad) = f.calculate(x + direction * alpha)
ff -> (grad.dot(direction))
}
}
}
| scalanlp/breeze | math/src/main/scala/breeze/optimize/SpectralProjectedGradient.scala | Scala | apache-2.0 | 5,163 |
/*
* Copyright (c) 2014-2021 by The Monix Project Developers.
* See the project homepage at: https://monix.io
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package monix.catnap
import cats.effect.{Async, CancelToken, Concurrent, ContextShift}
import monix.catnap.internal.AsyncUtils
import monix.execution.Callback
import monix.execution.annotations.{UnsafeBecauseImpure, UnsafeProtocol}
import monix.execution.atomic.PaddingStrategy
import monix.execution.atomic.PaddingStrategy.NoPadding
import monix.execution.internal.GenericSemaphore
import monix.execution.internal.GenericSemaphore.Listener
import scala.concurrent.Promise
/** The `Semaphore` is an asynchronous semaphore implementation that
* limits the parallelism on task execution.
*
* The following example instantiates a semaphore with a
* maximum parallelism of 10:
*
* {{{
* import cats.implicits._
* import cats.effect.IO
*
* // Needed for ContextShift[IO]
* import monix.execution.Scheduler
* implicit val cs = IO.contextShift(Scheduler.global)
*
* // Dummies for didactic purposes
* case class HttpRequest()
* case class HttpResponse()
* def makeRequest(r: HttpRequest): IO[HttpResponse] = IO(???)
*
* for {
* semaphore <- Semaphore[IO](provisioned = 10)
* tasks = for (_ <- 0 until 1000) yield {
* semaphore.withPermit(makeRequest(HttpRequest()))
* }
* // Execute in parallel; note that due to the `semaphore`
* // no more than 10 tasks will be allowed to execute in parallel
* _ <- tasks.toList.parSequence
* } yield ()
* }}}
*
* ==Credits==
*
* `Semaphore` is now implementing `cats.effect.Semaphore`, deprecating
* the old Monix `TaskSemaphore`.
*
* The changes to the interface and some implementation details are
* inspired by the implementation in Cats-Effect, which was ported
* from FS2.
*/
final class Semaphore[F[_]] private (provisioned: Long, ps: PaddingStrategy)(
implicit F: Concurrent[F] OrElse Async[F],
cs: ContextShift[F])
extends cats.effect.concurrent.Semaphore[F] {
private[this] implicit val F0: Async[F] = F.unify
/** Returns the number of permits currently available. Always non-negative.
*
* The protocol is unsafe, the semaphore is used in concurrent settings
* and thus the value returned isn't stable or reliable. Use with care.
*/
@UnsafeProtocol
def available: F[Long] = underlying.available
/** Obtains a snapshot of the current count. Can be negative.
*
* Like [[available]] when permits are available but returns the
* number of permits callers are waiting for when there are no permits
* available.
*/
@UnsafeProtocol
def count: F[Long] = underlying.count
/** Acquires `n` permits.
*
* The returned effect semantically blocks until all requested permits are
* available. Note that acquires are satisfied in strict FIFO order, so given
* a `Semaphore[F]` with 2 permits available, an `acquireN(3)` will
* always be satisfied before a later call to `acquireN(1)`.
*
* @see [[withPermit]], the preferred way to acquire and release
* @see [[acquire]] for a version acquires a single permit
*
* @param n number of permits to acquire; must be >= 0
*/
def acquireN(n: Long): F[Unit] =
underlying.acquireN(n)
/** Acquires a single permit. Alias for `[[acquireN]](1)`.
*
* @see [[withPermit]], the preferred way to acquire and release
* @see [[acquireN]] for a version that can acquire multiple permits
*/
override def acquire: F[Unit] = acquireN(1)
/** Alias for `[[tryAcquireN]](1)`.
*
* The protocol is unsafe, because with the "try*" methods the user needs a
* firm grasp of what race conditions are and how they manifest and usage of
* such methods can lead to very fragile logic.
*
* @see [[tryAcquireN]] for the version that can acquire multiple permits
* @see [[acquire]] for the version that can wait for acquisition
* @see [[withPermit]] the preferred way to acquire and release
*/
@UnsafeProtocol
def tryAcquireN(n: Long): F[Boolean] =
underlying.tryAcquireN(n)
/** Alias for `[[tryAcquireN]](1)`.
*
* The protocol is unsafe, because with the "try*" methods the user needs a
* firm grasp of what race conditions are and how they manifest and usage of
* such methods can lead to very fragile logic.
*
* @see [[tryAcquireN]] for the version that can acquire multiple permits
* @see [[acquire]] for the version that can wait for acquisition
* @see [[withPermit]] the preferred way to acquire and release
*/
@UnsafeProtocol
override def tryAcquire: F[Boolean] = tryAcquireN(1)
/** Releases `n` permits, potentially unblocking up to `n`
* outstanding acquires.
*
* @see [[withPermit]], the preferred way to acquire and release
*
* @param n number of permits to release - must be >= 0
*/
def releaseN(n: Long): F[Unit] =
underlying.releaseN(n)
/** Releases a permit, returning it to the pool.
*
* If there are consumers waiting on permits being available,
* then the first in the queue will be selected and given
* a permit immediately.
*
* @see [[withPermit]], the preferred way to acquire and release
*/
override def release: F[Unit] = releaseN(1)
/** Returns a new task, ensuring that the given source
* acquires an available permit from the semaphore before
* it is executed.
*
* The returned task also takes care of resource handling,
* releasing its permit after being complete.
*
* @param fa is an effect to execute once the permit has been
* acquired; regardless of its result, the permit is
* released to the pool afterwards
*/
def withPermit[A](fa: F[A]): F[A] =
withPermitN(1)(fa)
/** Returns a new task, ensuring that the given source
* acquires `n` available permits from the semaphore before
* it is executed.
*
* The returned task also takes care of resource handling,
* releasing its permits after being complete.
*
* @param n is the number of permits required for the given
* function to be executed
*
* @param fa is an effect to execute once the permits have been
* acquired; regardless of its result, the permits are
* released to the pool afterwards
*/
def withPermitN[A](n: Long)(fa: F[A]): F[A] =
F0.bracket(underlying.acquireAsyncN(n)) {
case (acquire, _) => F0.flatMap(acquire)(_ => fa)
} {
case (_, release) => release
}
/** Returns a task that will be complete when the specified
* number of permits are available.
*
* The protocol is unsafe because by the time the returned
* task completes, some other process might have already
* acquired the available permits and thus usage of `awaitAvailable`
* can lead to fragile concurrent logic. Use with care.
*
* Can be useful for termination logic, for example to execute
* a piece of logic once all available permits have been released.
*
* @param n is the number of permits waited on
*/
@UnsafeProtocol
def awaitAvailable(n: Long): F[Unit] =
underlying.awaitAvailable(n)
private[this] val underlying =
new Semaphore.Impl[F](provisioned, ps)
}
object Semaphore {
/**
* Builds a [[Semaphore]] instance.
*
* @param provisioned is the number of permits initially available
*
* @param ps is an optional padding strategy for avoiding the
* "false sharing problem", a common JVM effect when multiple threads
* read and write in shared variables
*
* @param F is the type class instance required to make `Semaphore` work,
* can be either `Concurrent` or `Async` for extra flexibility
*
* @param cs is a `ContextShift` instance required in order to introduce
* async boundaries after successful `acquire` operations, for safety
*/
def apply[F[_]](provisioned: Long, ps: PaddingStrategy = NoPadding)(
implicit F: Concurrent[F] OrElse Async[F],
cs: ContextShift[F]): F[Semaphore[F]] = {
F.unify.delay(new Semaphore[F](provisioned, ps))
}
/** Builds a [[Semaphore]] instance.
*
* '''Unsafe warning:''' this violates referential transparency.
* Use with care, prefer the pure [[Semaphore.apply]].
*
* @param provisioned is the number of permits initially available
*
* @param ps is an optional padding strategy for avoiding the
* "false sharing problem", a common JVM effect when multiple threads
* read and write in shared variables
*
* @param F is the type class instance required to make `Semaphore` work,
* can be either `Concurrent` or `Async` for extra flexibility
*
* @param cs is a `ContextShift` instance required in order to introduce
* async boundaries after successful `acquire` operations, for safety
*/
@UnsafeBecauseImpure
def unsafe[F[_]](provisioned: Long, ps: PaddingStrategy = NoPadding)(
implicit F: Concurrent[F] OrElse Async[F],
cs: ContextShift[F]): Semaphore[F] =
new Semaphore[F](provisioned, ps)
implicit final class DeprecatedExtensions[F[_]](val source: Semaphore[F]) extends AnyVal {
/**
* DEPRECATED — renamed to [[Semaphore.withPermit withPermit]].
*
* Please switch to `withPermit`, as deprecated symbols will be
* dropped in the future.
*/
@deprecated("Renamed to: withPermit", "3.0.0")
def greenLight[A](fa: F[A]): F[A] = source.withPermit(fa)
}
private final class Impl[F[_]](provisioned: Long, ps: PaddingStrategy)(
implicit F: Concurrent[F] OrElse Async[F],
F0: Async[F],
cs: ContextShift[F])
extends GenericSemaphore[F[Unit]](provisioned, ps) {
val available: F[Long] = F0.delay(unsafeAvailable())
val count: F[Long] = F0.delay(unsafeCount())
def acquireN(n: Long): F[Unit] =
F0.suspend {
if (unsafeTryAcquireN(n))
F0.unit
else
F0.flatMap(make[Unit](unsafeAcquireN(n, _)))(bindFork)
}
def acquireAsyncN(n: Long): F[(F[Unit], CancelToken[F])] =
F0.delay {
// Happy path
if (unsafeTryAcquireN(n)) {
// This cannot be canceled in the context of `bracket`
(F0.unit, releaseN(n))
} else {
val p = Promise[Unit]()
val cancelToken = unsafeAsyncAcquireN(n, Callback.fromPromise(p))
val acquire = FutureLift.scalaToAsync(F0.pure(p.future))
// Extra async boundary needed for fairness
(F0.flatMap(acquire)(bindFork), cancelToken)
}
}
def tryAcquireN(n: Long): F[Boolean] =
F0.delay(unsafeTryAcquireN(n))
def releaseN(n: Long): F[Unit] =
F0.delay(unsafeReleaseN(n))
def awaitAvailable(n: Long): F[Unit] =
F0.flatMap(make[Unit](unsafeAwaitAvailable(n, _)))(bindFork)
protected def emptyCancelable: F[Unit] =
F0.unit
protected def makeCancelable(f: (Listener[Unit]) => Unit, p: Listener[Unit]): F[Unit] =
F0.delay(f(p))
private def make[A](k: (Either[Throwable, A] => Unit) => F[Unit]): F[A] =
F.fold(
F => F.cancelable(k),
F => AsyncUtils.cancelable(k)(F)
)
private[this] val bindFork: (Unit => F[Unit]) =
_ => cs.shift
}
}
| monifu/monix | monix-catnap/shared/src/main/scala/monix/catnap/Semaphore.scala | Scala | apache-2.0 | 11,943 |
/*
* Copyright 2015 cookie.ai
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package ai.cookie.spark.sql.sources.mnist
import java.io.{IOException, Closeable}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FSDataInputStream, Path}
private[mnist] class MnistLabelReader(path: Path)(implicit conf: Configuration)
extends Closeable with Iterator[Int]
{
private val stream: FSDataInputStream = {
val fs = path.getFileSystem(conf)
fs.open(path)
}
if(stream.readInt() != MnistLabelReader.HEADER_MAGIC)
throw new IOException("labels database file is unreadable")
val numLabels: Int = stream.readInt()
def seek(n: Int): Unit = stream.seek(MnistLabelReader.HEADER_SIZE + n)
override def hasNext: Boolean = stream.getPos < numLabels - MnistLabelReader.HEADER_SIZE
override def next(): Int = stream.readUnsignedByte()
def skip(): Unit = {
stream.skip(1)
}
def close(): Unit = {
stream.close()
}
def recordAt(pos: Long): Int = {
(pos - MnistImageReader.HEADER_SIZE).toInt
}
}
private object MnistLabelReader {
val HEADER_SIZE = 8
val HEADER_MAGIC = 0x00000801
}
private[mnist] class MnistImageReader(path: Path)(implicit conf: Configuration)
extends Closeable with Iterator[Array[Byte]]
{
private[mnist] val stream: FSDataInputStream = {
val fs = path.getFileSystem(conf)
fs.open(path)
}
if(stream.readInt() != MnistImageReader.HEADER_MAGIC)
throw new IOException("images database file is unreadable")
val numImages: Int = stream.readInt()
val numRows: Int = stream.readInt()
val numColumns: Int = stream.readInt()
private val buffer = new Array[Byte](numRows * numColumns)
def seek(n: Int): Unit = stream.seek(MnistImageReader.HEADER_SIZE + n * buffer.length)
def hasNext: Boolean = stream.getPos < numImages * buffer.length - MnistImageReader.HEADER_SIZE
def next(): Array[Byte] = {
stream.readFully(buffer)
buffer
}
def skip(): Unit = {
stream.skip(buffer.length)
}
def close(): Unit = {
stream.close()
}
def recordAt(pos: Long): Int = {
Math.ceil((pos - MnistImageReader.HEADER_SIZE) / buffer.length.toFloat).toInt
}
}
private object MnistImageReader {
val HEADER_SIZE = 16
val HEADER_MAGIC = 0x00000803
} | cookieai/cookie-datasets | src/main/scala/ai/cookie/spark/sql/sources/mnist/mnist.scala | Scala | apache-2.0 | 2,798 |
/*
* Copyright 2015 Webtrends (http://www.webtrends.com)
*
* See the LICENCE.txt file distributed with this work for additional
* information regarding copyright ownership.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.webtrends.harness.health
import akka.actor.{Actor, ActorRef}
import akka.pattern._
import akka.util.Timeout
import com.webtrends.harness.HarnessConstants
import com.webtrends.harness.logging.Logger
import com.webtrends.harness.service.messages.CheckHealth
import com.webtrends.harness.utils.ConfigUtil
import scala.concurrent.duration._
import scala.concurrent.{Future, Promise}
import scala.util.{Failure, Success, Try}
trait ActorHealth {
this: Actor =>
private val _log = Logger(this, context.system)
import context.dispatcher
implicit val checkTimeout:Timeout =
ConfigUtil.getDefaultTimeout(context.system.settings.config, HarnessConstants.KeyDefaultTimeout, Timeout(15 seconds))
def health:Receive = {
case CheckHealth =>
pipe(Try(checkHealth)
.recover({
case e: Exception =>
_log.error("Error fetching health", e)
Future.successful(HealthComponent(getClass.getSimpleName, ComponentState.CRITICAL,
"Exception when trying to check the health: %s".format(e.getMessage)))
}).get
) to sender()
}
/**
* This is the health of the current object, by default will be NORMAL
* In general this should be overridden to define the health of the current object
* For objects that simply manage other objects you shouldn't need to do anything
* else, as the health of the children components would be handled by their own
* CheckHealth function
*
* @return
*/
protected def getHealth: Future[HealthComponent] = {
Future {
HealthComponent(self.path.toString, ComponentState.NORMAL, "Healthy")
}
}
/**
* This is the list of child actors that should be iterated and checked for health
* This can be overridden in cases where one does not want to check all children for
* health, or some children may not support health checks, or a child is using a push
* based model of health reporting
* CheckHealth function
*
* @return
*/
protected def getHealthChildren: Iterable[ActorRef] = {
if (context != null) context.children else Iterable()
}
/**
* The actor has been asked to respond with some health information. It needs
* to implement this function and provide a list of components used in this service
* and their current state. By default the health check will simply run through all the
* children for the actor and get their health. Should be overridden for any custom
* behavior
* @return An instance of a health component
*/
def checkHealth: Future[HealthComponent] = {
val p = Promise[HealthComponent]()
getHealth.onComplete {
case Success(s) =>
val healthFutures = getHealthChildren map { ref =>
(ref ? CheckHealth).mapTo[HealthComponent] recover {
case _: AskTimeoutException =>
_log.warn(s"Health Check time out on child actor ${ref.path.toStringWithoutAddress}")
HealthComponent(getClass.getSimpleName, ComponentState.CRITICAL,
"Time out on child: %s".format(ref.path.toStringWithoutAddress))
case ex: Exception =>
HealthComponent(ref.path.name, ComponentState.CRITICAL, s"Failure to get health of child component. ${ex.getMessage}")
}
}
Future.sequence(healthFutures) onComplete {
case Failure(f) =>
_log.debug(f, "Failed to retrieve health of children objects")
p success HealthComponent(s.name, ComponentState.CRITICAL, s"Failure to get health of child components. ${f.getMessage}")
case Success(healths) =>
healths foreach { it => s.addComponent(it) }
p success s
}
case Failure(f) =>
_log.debug(f, "Failed to get health from component")
p success HealthComponent(self.path.toString, ComponentState.CRITICAL, f.getMessage)
}
p.future
}
}
| Webtrends/wookiee | wookiee-core/src/main/scala/com/webtrends/harness/health/ActorHealth.scala | Scala | apache-2.0 | 4,641 |
package io.iohk.ethereum.consensus.ethash
package validators
import io.iohk.ethereum.consensus.difficulty.DifficultyCalculator
import io.iohk.ethereum.consensus.validators.{BlockHeaderError, BlockHeaderValid, BlockHeaderValidatorSkeleton}
import io.iohk.ethereum.domain.BlockHeader
import io.iohk.ethereum.utils.BlockchainConfig
class MockedPowBlockHeaderValidator(blockchainConfig: BlockchainConfig)
extends BlockHeaderValidatorSkeleton(blockchainConfig) {
protected def difficulty: DifficultyCalculator = DifficultyCalculator(blockchainConfig)
def validateEvenMore(
blockHeader: BlockHeader,
parentHeader: BlockHeader
): Either[BlockHeaderError, BlockHeaderValid] =
Right(BlockHeaderValid)
}
| input-output-hk/etc-client | src/main/scala/io/iohk/ethereum/consensus/ethash/validators/MockedPowBlockHeaderValidator.scala | Scala | mit | 725 |
/**
* Copyright (C) 2012 Orbeon, Inc.
*
* This program is free software; you can redistribute it and/or modify it under the terms of the
* GNU Lesser General Public License as published by the Free Software Foundation; either version
* 2.1 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Lesser General Public License for more details.
*
* The full text of the license is available at http://www.gnu.org/copyleft/lesser.html
*/
package org.orbeon.oxf.xforms.function.xxforms
import collection.JavaConverters._
import org.orbeon.oxf.xforms.control.controls.XFormsSwitchControl
import org.orbeon.oxf.xforms.function.{FunctionSupport, XFormsFunction}
import org.orbeon.saxon.expr.XPathContext
import org.orbeon.saxon.om.{SequenceIterator, EmptyIterator}
import org.orbeon.oxf.util.ScalaUtils._
/**
* Extension xxf:cases($switch-id as xs:string) as xs:string* function.
*/
class XXFormsCases extends XFormsFunction with FunctionSupport {
override def iterate(xpathContext: XPathContext): SequenceIterator =
relevantControl(0)(xpathContext) flatMap
collectByErasedType[XFormsSwitchControl] map
(control ⇒ asIterator(control.getChildrenCases.asScala map (_.getId))) getOrElse EmptyIterator.getInstance
}
| evlist/orbeon-forms | src/main/scala/org/orbeon/oxf/xforms/function/xxforms/XXFormsCases.scala | Scala | lgpl-2.1 | 1,467 |
/*
Copyright 2012 Twitter, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package com.twitter.parrot.processor
import collection.JavaConverters._
import com.twitter.ostrich.stats.Stats
import com.twitter.parrot.server.ParrotRequest
import com.twitter.parrot.util.Uri
import java.util.{List => JList}
trait Record {
def uri: Uri
def rawLine: String
def timestamp: Long
}
/**
* parses lines into records, which are mapped to results
*/
trait RecordParser[Res] {
def apply(lines: JList[String]): JList[Res] = this(lines.asScala).asJava
def apply(lines: Seq[String]): Seq[Res]
def splitWords(l: String): Array[String] = l.toString.trim.split("[ \\\\t]+")
}
| twitter/iago | src/main/scala/com/twitter/parrot/processor/RecordParser.scala | Scala | apache-2.0 | 1,152 |
/*
* Copyright 2019 Spotify AB.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package com.spotify.scio.jmh
import java.io.{InputStream, OutputStream}
import java.util.concurrent.TimeUnit
import com.esotericsoftware.kryo.io.{Input, Output}
import com.esotericsoftware.kryo.{Kryo, Serializer}
import com.spotify.scio.coders._
import com.spotify.scio.schemas._
import com.twitter.chill.IKryoRegistrar
import org.apache.beam.sdk.coders.{
AtomicCoder,
ByteArrayCoder,
Coder => BCoder,
SerializableCoder,
StringUtf8Coder
}
import org.apache.beam.sdk.util.CoderUtils
import org.apache.beam.sdk.schemas.SchemaCoder
import org.apache.beam.sdk.values.TypeDescriptor
import org.openjdk.jmh.annotations._
final case class UserId(bytes: Array[Byte])
object UserId {
implicit def coderUserId: Coder[UserId] = Coder.gen[UserId]
}
final case class User(id: UserId, username: String, email: String)
final case class SpecializedUser(id: UserId, username: String, email: String)
final case class SpecializedUserForDerived(id: UserId, username: String, email: String)
@BenchmarkMode(Array(Mode.AverageTime))
@OutputTimeUnit(TimeUnit.NANOSECONDS)
@State(Scope.Thread)
class CoderBenchmark {
// please don't use arrays outside of benchmarks
val userId: UserId = UserId(Array[Byte](1, 2, 3, 4))
// use standard coders
val user: User = User(userId, "johndoe", "[email protected]")
// use hand-optimized coders
val specializedUser: SpecializedUser =
SpecializedUser(userId, "johndoe", "[email protected]")
val specializedUserForDerived: SpecializedUserForDerived =
SpecializedUserForDerived(userId, "johndoe", "[email protected]")
val javaUser =
new j.User(
new j.UserId(Array[Byte](1, 2, 3, 4).map(x => x: java.lang.Byte)),
"johndoe",
"[email protected]"
)
val tenTimes: List[SpecializedUserForDerived] = List.fill(10)(specializedUserForDerived)
val kryoCoder = new KryoAtomicCoder[User](KryoOptions())
val kryoJavaCoder = new KryoAtomicCoder[j.User](KryoOptions())
val javaCoder: SerializableCoder[User] = SerializableCoder.of(classOf[User])
val specializedCoder = new SpecializedCoder
val specializedKryoCoder = new KryoAtomicCoder[SpecializedUser](KryoOptions())
val derivedCoder: BCoder[SpecializedUserForDerived] =
CoderMaterializer.beamWithDefault(Coder[SpecializedUserForDerived])
val derivedListCoder: BCoder[List[SpecializedUserForDerived]] =
CoderMaterializer.beamWithDefault(Coder[List[SpecializedUserForDerived]])
val specializedMapKryoCoder = new KryoAtomicCoder[Map[String, Long]](KryoOptions())
val derivedMapCoder: BCoder[Map[String, Long]] =
CoderMaterializer.beamWithDefault(Coder[Map[String, Long]])
val mapExample: Map[String, Long] = (1 to 1000).map(x => (s"stringvalue$x", x.toLong)).toMap
val specializedStringListKryoCoder = new KryoAtomicCoder[List[String]](KryoOptions())
val derivedStringListCoder: BCoder[List[String]] =
CoderMaterializer.beamWithDefault(Coder[List[String]])
val stringListExample: List[String] = (1 to 1000).map(x => s"stringvalue$x").toList
val derivedTuple3Coder: BCoder[(Int, Int, Int)] =
CoderMaterializer.beamWithDefault(Coder[(Int, Int, Int)])
val tuple3Example: (Int, Int, Int) = (1, 10, 100)
val derivedTuple4Coder: BCoder[(Int, Int, Int, Int)] =
CoderMaterializer.beamWithDefault(Coder[(Int, Int, Int, Int)])
val tuple4Example: (Int, Int, Int, Int) = (1, 10, 100, 1000)
@Benchmark
def tuple3Encode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(derivedTuple3Coder, tuple3Example)
}
@Benchmark
def tuple4Encode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(derivedTuple4Coder, tuple4Example)
}
@Benchmark
def kryoEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(kryoCoder, user)
}
@Benchmark
def javaEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(javaCoder, user)
}
@Benchmark
def customEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(specializedCoder, specializedUser)
}
@Benchmark
def customKryoEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(specializedKryoCoder, specializedUser)
}
@Benchmark
def derivedEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(derivedCoder, specializedUserForDerived)
}
@Benchmark
def derivedListEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(derivedListCoder, tenTimes)
}
@Benchmark
def kryoMapEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(specializedMapKryoCoder, mapExample)
}
@Benchmark
def derivedMapEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(derivedMapCoder, mapExample)
}
@Benchmark
def kryoStringListEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(specializedStringListKryoCoder, stringListExample)
}
@Benchmark
def derivedStringListEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(derivedStringListCoder, stringListExample)
}
val kryoEncoded: Array[Byte] = kryoEncode(new SerializedOutputSize)
val javaEncoded: Array[Byte] = javaEncode(new SerializedOutputSize)
val customEncoded: Array[Byte] = customEncode(new SerializedOutputSize)
val customKryoEncoded: Array[Byte] = customKryoEncode(new SerializedOutputSize)
val derivedEncoded: Array[Byte] = derivedEncode(new SerializedOutputSize)
val derivedListEncoded: Array[Byte] = derivedListEncode(new SerializedOutputSize)
val kryoMapEncoded: Array[Byte] = kryoMapEncode(new SerializedOutputSize)
val derivedMapEncoded: Array[Byte] = derivedMapEncode(new SerializedOutputSize)
val kryoStringListEncoded: Array[Byte] = kryoStringListEncode(new SerializedOutputSize)
val derivedStringListEncoded: Array[Byte] = derivedStringListEncode(new SerializedOutputSize)
val tuple3Encoded: Array[Byte] = tuple3Encode(new SerializedOutputSize)
val tuple4Encoded: Array[Byte] = tuple4Encode(new SerializedOutputSize)
@Benchmark
def tuple3Decode: (Int, Int, Int) =
CoderUtils.decodeFromByteArray(derivedTuple3Coder, tuple3Encoded)
@Benchmark
def tuple4Decode: (Int, Int, Int, Int) =
CoderUtils.decodeFromByteArray(derivedTuple4Coder, tuple4Encoded)
@Benchmark
def kryoDecode: User =
CoderUtils.decodeFromByteArray(kryoCoder, kryoEncoded)
@Benchmark
def javaDecode: User =
CoderUtils.decodeFromByteArray(javaCoder, javaEncoded)
@Benchmark
def customDecode: SpecializedUser =
CoderUtils.decodeFromByteArray(specializedCoder, customEncoded)
@Benchmark
def customKryoDecode: SpecializedUser =
CoderUtils.decodeFromByteArray(specializedKryoCoder, customKryoEncoded)
@Benchmark
def derivedDecode: SpecializedUserForDerived =
CoderUtils.decodeFromByteArray(derivedCoder, derivedEncoded)
@Benchmark
def derivedListDecode: List[SpecializedUserForDerived] =
CoderUtils.decodeFromByteArray(derivedListCoder, derivedListEncoded)
@Benchmark
def kryoMapDecode: Map[String, Long] =
CoderUtils.decodeFromByteArray(specializedMapKryoCoder, kryoMapEncoded)
@Benchmark
def derivedMapDecode: Map[String, Long] =
CoderUtils.decodeFromByteArray(derivedMapCoder, derivedMapEncoded)
@Benchmark
def kryoStringListDecode: Seq[String] =
CoderUtils.decodeFromByteArray(specializedStringListKryoCoder, kryoStringListEncoded)
@Benchmark
def derivedStringListDecode: Seq[String] =
CoderUtils.decodeFromByteArray(derivedStringListCoder, derivedStringListEncoded)
// Compare the performance of Schema Coders vs compile time derived Coder. Run with:
// jmh:run -f1 -wi 10 -i 20 com.spotify.scio.jmh.CoderBenchmark.(derived|schemaCoder)(De|En)code
val (specializedUserSchema, specializedTo, specializedFrom) =
SchemaMaterializer.materialize(
Schema[SpecializedUserForDerived]
)
val specializedSchemaCoder: BCoder[SpecializedUserForDerived] =
SchemaCoder.of(
specializedUserSchema,
TypeDescriptor.of(classOf[SpecializedUserForDerived]),
specializedTo,
specializedFrom
)
@Benchmark
def schemaCoderEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(specializedSchemaCoder, specializedUserForDerived)
}
val shemaEncoded: Array[Byte] = schemaCoderEncode(new SerializedOutputSize)
@Benchmark
def schemaCoderDecode: SpecializedUserForDerived =
CoderUtils.decodeFromByteArray(specializedSchemaCoder, shemaEncoded)
// Compare the performance of Schema Coders vs Kryo coder for java class run with:
// jmh:run -f1 -wi 10 -i 20 com.spotify.scio.jmh.CoderBenchmark.java(Kryo|Schema)CoderEncode
val (javaUserSchema, javaTo, javaFrom) =
SchemaMaterializer.materialize(
Schema[j.User]
)
val javaSchemaCoder: BCoder[j.User] =
SchemaCoder.of(javaUserSchema, TypeDescriptor.of(classOf[j.User]), javaTo, javaFrom)
@Benchmark
def javaSchemaCoderEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(javaSchemaCoder, javaUser)
}
val javaShemaEncoded: Array[Byte] = javaSchemaCoderEncode(new SerializedOutputSize)
@Benchmark
def javaSchemaCoderDecode: j.User =
CoderUtils.decodeFromByteArray(javaSchemaCoder, javaShemaEncoded)
@Benchmark
def javaKryoCoderEncode(o: SerializedOutputSize): Array[Byte] =
Counter.track(o) {
CoderUtils.encodeToByteArray(kryoJavaCoder, javaUser)
}
val javaKryoEncoded: Array[Byte] = javaKryoCoderEncode(new SerializedOutputSize)
@Benchmark
def javaKryoCoderDecode: j.User =
CoderUtils.decodeFromByteArray(kryoJavaCoder, javaKryoEncoded)
}
/** Counter to track the size of the serialized output */
@State(Scope.Thread)
@AuxCounters(AuxCounters.Type.EVENTS)
class SerializedOutputSize(var outputSize: Int) {
def this() = this(0)
}
object Counter {
def track[A](o: SerializedOutputSize)(f: => Array[Byte]): Array[Byte] = {
val out = f
if (o.outputSize == 0)
o.outputSize = out.length
out
}
}
final class SpecializedCoder extends AtomicCoder[SpecializedUser] {
def encode(value: SpecializedUser, os: OutputStream): Unit = {
ByteArrayCoder.of().encode(value.id.bytes, os)
StringUtf8Coder.of().encode(value.username, os)
StringUtf8Coder.of().encode(value.email, os)
}
def decode(is: InputStream): SpecializedUser =
SpecializedUser(
UserId(ByteArrayCoder.of().decode(is)),
StringUtf8Coder.of().decode(is),
StringUtf8Coder.of().decode(is)
)
}
final class SpecializedKryoSerializer extends Serializer[SpecializedUser] {
def read(kryo: Kryo, input: Input, tpe: Class[SpecializedUser]): SpecializedUser = {
val len = input.readInt()
val array = new Array[Byte](len)
input.readBytes(array)
val username = input.readString()
val email = input.readString()
SpecializedUser(UserId(array), username, email)
}
def write(kryo: Kryo, output: Output, obj: SpecializedUser): Unit = {
output.writeInt(obj.id.bytes.length)
output.writeBytes(obj.id.bytes)
output.writeString(obj.username)
output.writeString(obj.email)
}
}
@KryoRegistrar
class KryoRegistrar extends IKryoRegistrar {
def apply(k: Kryo): Unit = {
k.register(classOf[User])
k.register(classOf[SpecializedUser], new SpecializedKryoSerializer)
k.register(classOf[UserId])
k.register(classOf[Array[Byte]])
k.register(classOf[Array[java.lang.Byte]])
k.register(classOf[j.UserId])
k.register(classOf[j.User])
k.setRegistrationRequired(true)
}
}
| spotify/scio | scio-jmh/src/test/scala/com/spotify/scio/jmh/CoderBenchmark.scala | Scala | apache-2.0 | 12,561 |
package com.geeksville.andropilot.gui
import android.os.Bundle
import android.widget.ArrayAdapter
import scala.collection.JavaConverters._
import com.geeksville.util.ThreadTools._
import android.support.v4.app.Fragment
import android.view.LayoutInflater
import android.view.ViewGroup
import com.ridemission.scandroid.AndroidUtil._
import com.geeksville.andropilot.TypedResource._
import com.geeksville.andropilot.TR
import android.widget.ArrayAdapter
import com.geeksville.flight._
import java.util.LinkedList
import com.geeksville.andropilot.R
import android.view.View
import com.ridemission.scandroid.ObservableAdapter
import com.geeksville.flight.StatusText
import android.widget.BaseAdapter
import com.geeksville.andropilot.AndropilotPrefs
import com.geeksville.akka.InstrumentedActor
/**
* Common behavior for both the overview and floating instruments
*/
class VehicleInfoFragment(layoutId: Int) extends LayoutFragment(layoutId) with AndroServiceFragment with AndropilotPrefs {
protected final def altView = getView.findView(TR.altitude)
protected final def airspeedView = getView.findView(TR.airspeed)
protected final def batteryView = getView.findView(TR.battery_volt)
protected final def numSatView = getView.findView(TR.gps_numsats)
protected final def rssiLocalView = getView.findView(TR.rssi_local)
override def onVehicleReceive: InstrumentedActor.Receiver = {
case l: Location =>
//debug("Handling location: " + l)
handler.post { () =>
if (getView != null) {
myVehicle.foreach { v =>
onLocationUpdate(v, l)
}
}
}
case MsgSysStatusChanged =>
//info("Handling status changed")
handler.post { () =>
if (getView != null) {
myVehicle.foreach { v =>
onStatusUpdate(v)
}
}
}
}
protected def showRssi(v: VehicleModel) {
v.radio.foreach { n =>
val local = n.rssi - n.noise
val rem = n.remrssi - n.remnoise
rssiLocalView.setText(local.toString + "/" + rem.toString)
}
}
protected def showGps(v: VehicleModel) {
val numSats = v.numSats.getOrElse("?")
val hdop = v.hdop.getOrElse("?")
numSatView.setText("%s / %s".format(numSats, hdop))
}
/**
* called in gui thread
*/
protected def onStatusUpdate(v: VehicleModel) {
showRssi(v)
//info("showing battery voltage: " + v.batteryVoltage)
v.batteryVoltage.foreach { n =>
val socStr = v.batteryPercent.map { pct => " %d%%".format((pct * 100).toInt) }.getOrElse("")
batteryView.setText("%.1f".format(n) + "V " + socStr)
}
}
protected def onLocationUpdate(v: VehicleModel, l: Location) {
altView.setText("%.1f".format(v.bestAltitude) + " m")
v.vfrHud.foreach { hud =>
airspeedView.setText("%.1f".format(hud.airspeed) + " m/s")
}
showGps(v)
}
}
class MiniOverviewFragment extends VehicleInfoFragment(R.layout.mini_overview) {
override def onResume() {
super.onResume()
// Always show the panel while developing
if (developerMode && !isVehicleConnected) {
showMe()
altView.setText("213 m")
batteryView.setText("11.1V (87%)")
airspeedView.setText("7.8 m/s")
numSatView.setText("1.9")
rssiLocalView.setText("103")
}
}
/// We show ourselves once we get our first vehicle update
private def showMe() {
getView.findView(TR.mini_overview).setVisibility(View.VISIBLE)
}
override protected def showRssi(v: VehicleModel) {
showMe()
v.radio.foreach { n =>
val local = n.rssi - n.noise
val rem = n.remrssi - n.remnoise
val m = math.min(local, rem)
rssiLocalView.setText(rem.toString)
}
}
override protected def showGps(v: VehicleModel) {
showMe()
val hdop = v.hdop.map("%.1f".format(_)).getOrElse("?")
numSatView.setText(hdop)
}
}
class OverviewFragment extends VehicleInfoFragment(R.layout.vehicle_overview) {
private def latView = getView.findView(TR.latitude)
private def lonView = getView.findView(TR.longitude)
private def groundspeedView = getView.findView(TR.groundspeed)
private def devRowView = getView.findView(TR.dev_row)
private def devInfoView = getView.findView(TR.dev_info)
override def onResume() = {
super.onResume()
devRowView.setVisibility(if (developerMode) View.VISIBLE else View.GONE)
}
private def showDevInfo() {
// Show current state
myVehicle.foreach { v =>
val stateName = v.fsm.getLastState.getName.split('.')(1)
val status = v.systemStatus.getOrElse(-1)
Option(devInfoView).foreach(_.setText(s"$stateName/$status"))
}
}
/**
* called in gui thread
*/
override def onStatusUpdate(v: VehicleModel) {
super.onStatusUpdate(v)
// Show current state
showDevInfo()
}
override def onLocationUpdate(v: VehicleModel, l: Location) {
super.onLocationUpdate(v, l)
val degSymbol = "\\u00B0"
latView.setText("%.4f".format(l.lat) + degSymbol)
lonView.setText("%.4f".format(l.lon) + degSymbol)
v.vfrHud.foreach { hud =>
groundspeedView.setText("%.1f".format(hud.groundspeed) + " m/s")
}
}
override def onVehicleReceive = ({
case MsgFSMChanged(_) =>
handler.post(showDevInfo _)
}: PartialFunction[Any, Unit]).orElse(super.onVehicleReceive)
}
| geeksville/arduleader | andropilot/src/main/scala/com/geeksville/andropilot/gui/OverviewFragment.scala | Scala | gpl-3.0 | 5,328 |
/*
* Copyright 2016 Dennis Vriend
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.github.dnvriend.streams.flow
import akka.stream.scaladsl._
import akka.{ Done, NotUsed }
import com.github.dnvriend.streams.TestSpec
import scala.concurrent.Future
class RunnableFlowTest extends TestSpec {
/**
*
* It is possible to attach a Flow to a Source resulting in a composite source,
* and it is also possible to prepend a Flow to a Sink to get a new sink.
*
* After a stream is properly terminated by having both a source and a sink, it will be
* represented by the RunnableFlow type, indicating that it is ready to be executed.
*
* It is important to remember that even after constructing the RunnableFlow by connecting
* all the source, sink and different processing stages, no data will flow through it until
* it is 'materialized'.
*
* Materialization is the process of allocating all resources needed to run the computation
* described by a Flow (in Akka Streams this will often involve starting up Actors).
*
* Thanks to Flows being simply a description of the processing pipeline they are immutable, thread-safe,
* and freely shareable, which means that it is for example safe to share and send them between actors,
* to have one actor prepare the work, and then have it be materialized at some completely different place in the code.
*/
"RunnableFlow" should "be defined" in {
val source: Source[Int, NotUsed] = Source(1 to 10)
val sink: Sink[Int, Future[Int]] = Sink.fold[Int, Int](0)(_ + _)
// connect the Source to the Sink, obtaining a RunnableFlow, which is
// a Model of the processing pipeline
val runnable: RunnableGraph[Future[Int]] = source.toMat(sink)(Keep.right)
// materialize the flow (convert the RunnableFlow model to a runtime representation
// using the ActorFlowMaterializer which creates a network of actors that will give the
// behavior defined by the model) and get the value of the FoldSink
val sum: Future[Int] = runnable.run()
// create a new processing pipeline, run the network with the result from the future
Source.fromFuture(sum).map(_ * 2).runWith(Sink.foreach(println))
sum.futureValue shouldBe 55
}
/**
* After running (materializing) the RunnableFlow[T] we get back the materialized value of type T.
*
* Every stream processing stage can produce a materialized value, and it is the responsibility of
* the user to combine them to a new type.
*
* In the above example we used 'toMat' to indicate that we want to 'transform the materialized
* value of the source and sink', and we used the convenience function Keep.right to say that we are
* only interested in the materialized value of the sink.
*
* In our example the FoldSink materializes a value of type Future which will represent the result
* of the folding process over the stream.
*
* In general, a stream can expose multiple materialized values, but it is quite common to be interested
* in only the value of the Source or the Sink in the stream.
*
* For this reason there is a convenience method called runWith() available for Sink, Source or Flow requiring,
* respectively, a supplied Source (in order to run a Sink), a Sink (in order to run a Source) or both a Source
* and a Sink (in order to run a Flow, since it has neither attached yet).
*/
/**
* Defining sources, sinks and flows
*
* The objects Source and Sink define various ways to create sources and sinks of elements.
*
* The following examples show some of the most useful constructs (refer to the API documentation for more details):
*/
"Sources" should "be created" in {
// Create a source from an Iterable
val s1: Source[Int, NotUsed] = Source(List(1, 2, 3))
// Create a source from a Future
val s2: Source[String, NotUsed] = Source.fromFuture(Future.successful("Hello Streams!"))
// Create a source from a single element
val s3: Source[String, NotUsed] = Source.single("only one element")
// an empty source
val s4: Source[String, NotUsed] = Source.empty[String]
}
"Sinks" should "be created" in {
// Sink that folds over the stream and returns a Future
// of the final result as its materialized value
val s1: Sink[Int, Future[Int]] = Sink.fold[Int, Int](0)(_ + _)
// Sink that returns a Future as its materialized value,
// containing the first element of the stream
val s2: Sink[Int, Future[Int]] = Sink.head[Int]
// A Sink that consumes a stream without doing anything with the elements
val s3: Sink[Any, Future[Done]] = Sink.ignore
// A Sink that executes a side-effecting call for every element of the stream
val s4: Sink[String, Future[Done]] = Sink.foreach[String](println(_))
}
/**
* There are various ways to wire up different parts of a stream, the following examples
* show some of the available options:
*/
"Streams" should "be wired up from different parts" in {
// Explicitly creating and wiring up a Source, Sink and Flow
// the Sink is of type Sink[Int, Future[Unit]]
val runnable: RunnableGraph[NotUsed] =
Source(1 to 6)
.via(
Flow[Int].map(_ * 2)
)
.to(
Sink.foreach(println(_))
)
// Starting from a Source
val source = Source(1 to 6).map(_ * 2)
val runnable2: RunnableGraph[NotUsed] =
source
.to(Sink.foreach(println(_)))
// Starting from a Sink
val sink: Sink[Int, NotUsed] = Flow[Int].map(_ * 2).to(Sink.foreach(println(_)))
val runnable3: RunnableGraph[NotUsed] =
Source(1 to 6)
.to(sink)
}
}
| dnvriend/intro-to-akka-streams | src/test/scala/com/github/dnvriend/streams/flow/RunnableFlowTest.scala | Scala | apache-2.0 | 6,230 |
import org.scalatra.LifeCycle
import javax.servlet.ServletContext
import com.typesafe.scalalogging.LazyLogging
import slick.driver.H2Driver.api._
import akka.actor.ActorSystem
import com.github.mtodo.auth.{Signup, Signin, Users}
class ScalatraBootstrap extends LifeCycle with LazyLogging {
val users = TableQuery[Users]
val system = ActorSystem("Auth")
override def init(context: ServletContext) = {
val db = Database.forConfig("h2mem1")
db.run(users.schema.create)
context mount (new Signup(db, system), "/auth/signup/*")
context mount (new Signin(db), "/auth/signin/*")
}
}
| mtodo/auth | src/main/scala/com/github/mtodo/auth/ScalatraBootstrap.scala | Scala | mit | 608 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.catalyst.optimizer
import org.apache.spark.sql.catalyst.SimpleCatalystConf
import org.apache.spark.sql.catalyst.analysis.{Analyzer, EmptyFunctionRegistry}
import org.apache.spark.sql.catalyst.catalog.{InMemoryCatalog, SessionCatalog}
import org.apache.spark.sql.catalyst.dsl.expressions._
import org.apache.spark.sql.catalyst.dsl.plans._
import org.apache.spark.sql.catalyst.expressions.{If, Literal}
import org.apache.spark.sql.catalyst.expressions.aggregate.{CollectSet, Count}
import org.apache.spark.sql.catalyst.plans.PlanTest
import org.apache.spark.sql.catalyst.plans.logical.{Aggregate, Expand, LocalRelation, LogicalPlan}
import org.apache.spark.sql.types.{IntegerType, StringType}
class RewriteDistinctAggregatesSuite extends PlanTest {
val conf = SimpleCatalystConf(caseSensitiveAnalysis = false, groupByOrdinal = false)
val catalog = new SessionCatalog(new InMemoryCatalog, EmptyFunctionRegistry, conf)
val analyzer = new Analyzer(catalog, conf)
val nullInt = Literal(null, IntegerType)
val nullString = Literal(null, StringType)
val testRelation = LocalRelation('a.string, 'b.string, 'c.string, 'd.string, 'e.int)
private def checkRewrite(rewrite: LogicalPlan): Unit = rewrite match {
case Aggregate(_, _, Aggregate(_, _, _: Expand)) =>
case _ => fail(s"Plan is not rewritten:\\n$rewrite")
}
test("single distinct group") {
val input = testRelation
.groupBy('a)(countDistinct('e))
.analyze
val rewrite = RewriteDistinctAggregates(input)
comparePlans(input, rewrite)
}
test("single distinct group with partial aggregates") {
val input = testRelation
.groupBy('a, 'd)(
countDistinct('e, 'c).as('agg1),
max('b).as('agg2))
.analyze
val rewrite = RewriteDistinctAggregates(input)
comparePlans(input, rewrite)
}
test("single distinct group with non-partial aggregates") {
val input = testRelation
.groupBy('a, 'd)(
countDistinct('e, 'c).as('agg1),
CollectSet('b).toAggregateExpression().as('agg2))
.analyze
checkRewrite(RewriteDistinctAggregates(input))
}
test("multiple distinct groups") {
val input = testRelation
.groupBy('a)(countDistinct('b, 'c), countDistinct('d))
.analyze
checkRewrite(RewriteDistinctAggregates(input))
}
test("multiple distinct groups with partial aggregates") {
val input = testRelation
.groupBy('a)(countDistinct('b, 'c), countDistinct('d), sum('e))
.analyze
checkRewrite(RewriteDistinctAggregates(input))
}
test("multiple distinct groups with non-partial aggregates") {
val input = testRelation
.groupBy('a)(
countDistinct('b, 'c),
countDistinct('d),
CollectSet('b).toAggregateExpression())
.analyze
checkRewrite(RewriteDistinctAggregates(input))
}
}
| Panos-Bletsos/spark-cost-model-optimizer | sql/catalyst/src/test/scala/org/apache/spark/sql/catalyst/optimizer/RewriteDistinctAggregatesSuite.scala | Scala | apache-2.0 | 3,657 |
/**
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package kafka.server.checkpoints
import java.io.IOException
import kafka.utils.{Logging, TestUtils}
import org.apache.kafka.common.TopicPartition
import org.junit.Assert._
import org.junit.Test
import org.scalatest.junit.JUnitSuite
import scala.collection.Map
class OffsetCheckpointFileTest extends JUnitSuite with Logging {
@Test
def shouldPersistAndOverwriteAndReloadFile(): Unit = {
val checkpoint = new OffsetCheckpointFile(TestUtils.tempFile())
//Given
val offsets = Map(new TopicPartition("foo", 1) -> 5L, new TopicPartition("bar", 2) -> 10L)
//When
checkpoint.write(offsets)
//Then
assertEquals(offsets, checkpoint.read())
//Given overwrite
val offsets2 = Map(new TopicPartition("foo", 2) -> 15L, new TopicPartition("bar", 3) -> 20L)
//When
checkpoint.write(offsets2)
//Then
assertEquals(offsets2, checkpoint.read())
}
@Test
def shouldHandleMultipleLines(): Unit = {
val checkpoint = new OffsetCheckpointFile(TestUtils.tempFile())
//Given
val offsets = Map(
new TopicPartition("foo", 1) -> 5L, new TopicPartition("bar", 6) -> 10L,
new TopicPartition("foo", 2) -> 5L, new TopicPartition("bar", 7) -> 10L,
new TopicPartition("foo", 3) -> 5L, new TopicPartition("bar", 8) -> 10L,
new TopicPartition("foo", 4) -> 5L, new TopicPartition("bar", 9) -> 10L,
new TopicPartition("foo", 5) -> 5L, new TopicPartition("bar", 10) -> 10L
)
//When
checkpoint.write(offsets)
//Then
assertEquals(offsets, checkpoint.read())
}
@Test
def shouldReturnEmptyMapForEmptyFile(): Unit = {
//When
val checkpoint = new OffsetCheckpointFile(TestUtils.tempFile())
//Then
assertEquals(Map(), checkpoint.read())
//When
checkpoint.write(Map())
//Then
assertEquals(Map(), checkpoint.read())
}
@Test(expected = classOf[IOException])
def shouldThrowIfVersionIsNotRecognised(): Unit = {
val checkpointFile = new CheckpointFile(TestUtils.tempFile(), OffsetCheckpointFile.CurrentVersion + 1,
OffsetCheckpointFile.Formatter)
checkpointFile.write(Seq(new TopicPartition("foo", 5) -> 10L))
new OffsetCheckpointFile(checkpointFile.file).read()
}
}
| wangcy6/storm_app | frame/kafka-0.11.0/kafka-0.11.0.1-src/core/src/test/scala/unit/kafka/server/checkpoints/OffsetCheckpointFileTest.scala | Scala | apache-2.0 | 3,038 |
package hr.element.beepo.client
package xml
import sms._
import hr.element.etb.Pimps._
trait SmsXMLConverter extends XMLConverter { this: Sms =>
def toXml =
<SmsIptRequest>
<phone>{ phones.map{ p =>
<string>{ p }</string>
}}</phone>
<messageText>{ this.body }</messageText>
</SmsIptRequest>
}
| element-doo/beepo | code/scala/client/src/main/scala/hr/element/beepo/client/xml/SMSXMLConverter.scala | Scala | bsd-3-clause | 308 |
package models.gift
import enumeratum.{Enum, EnumEntry, PlayJsonEnum}
import models.user._
import org.joda.time.DateTime
case class Comment(
id: Option[Long] = None,
objectid: Long,
user: User,
creationDate: DateTime = DateTime.now,
category: Comment.Category,
content: String)
object Comment {
sealed trait Category extends EnumEntry
object Category extends Enum[Category] with PlayJsonEnum[Category] {
val values = findValues
case object Gift extends Category
def id(category: Category): Int = {
category match {
case Gift => 1
}
}
def fromId(id: Int) = {
id match {
case 1 => Gift
}
}
}
} | epot/Gifter | app/models/gift/Comment.scala | Scala | mit | 708 |
package build
import org.scalajs.linker.interface._
object TestSuiteLinkerOptions {
def semantics(s: Semantics): Semantics = {
import Semantics.RuntimeClassNameMapper
s.withRuntimeClassNameMapper(
RuntimeClassNameMapper.keepAll().andThen(
RuntimeClassNameMapper.regexReplace(
raw"""^org\\.scalajs\\.testsuite\\.compiler\\.ReflectionTest\\$$RenamedTestClass$$""".r,
"renamed.test.Class")
).andThen(
RuntimeClassNameMapper.regexReplace(
raw"""^org\\.scalajs\\.testsuite\\.compiler\\.ReflectionTest\\$$Prefix""".r,
"renamed.test.byprefix.")
).andThen(
RuntimeClassNameMapper.regexReplace(
raw"""^org\\.scalajs\\.testsuite\\.compiler\\.ReflectionTest\\$$OtherPrefix""".r,
"renamed.test.byotherprefix.")
)
)
}
def moduleInitializers: List[ModuleInitializer] = {
val module = "org.scalajs.testsuite.compiler.ModuleInitializers"
List(
ModuleInitializer.mainMethod(module, "mainNoArgs"),
ModuleInitializer.mainMethodWithArgs(module, "mainWithArgs"),
ModuleInitializer.mainMethodWithArgs(module, "mainWithArgs", List("foo", "bar")),
ModuleInitializer.mainMethod(module + "$NoLinkedClass", "main"),
ModuleInitializer.mainMethod(module + "$WithLinkedClass", "main")
)
}
}
| scala-js/scala-js | project/TestSuiteLinkerOptions.scala | Scala | apache-2.0 | 1,387 |
// Databricks notebook source exported at Sun, 19 Jun 2016 02:59:22 UTC
// MAGIC %md
// MAGIC
// MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
// MAGIC
// MAGIC
// MAGIC ### prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand)
// MAGIC
// MAGIC *supported by* [](https://databricks.com/)
// MAGIC and
// MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome)
// COMMAND ----------
// MAGIC %md
// MAGIC This is an elaboration of the [Apache Spark 1.6 mllib-progamming-guide on mllib-data-types](http://spark.apache.org/docs/latest/mllib-data-types.html).
// MAGIC
// MAGIC # [Overview](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/000_MLlibProgGuide)
// MAGIC
// MAGIC ## [Data Types - MLlib Programming Guide](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/000_dataTypesProgGuide)
// MAGIC
// MAGIC - [Local vector](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/001_LocalVector) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#local-vector)
// MAGIC - [Labeled point](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/002_LabeledPoint) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#labeled-point)
// MAGIC - [Local matrix](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/003_LocalMatrix) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#local-matrix)
// MAGIC - [Distributed matrix](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/004_DistributedMatrix) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#distributed-matrix)
// MAGIC - [RowMatrix](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/005_RowMatrix) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#rowmatrix)
// MAGIC - [IndexedRowMatrix](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/006_IndexedRowMatrix) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#indexedrowmatrix)
// MAGIC - [CoordinateMatrix](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/007_CoordinateMatrix) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#coordinatematrix)
// MAGIC - [BlockMatrix](/#workspace/scalable-data-science/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/008_BlockMatrix) and [URL](http://spark.apache.org/docs/latest/mllib-data-types.html#blockmatrix)
// MAGIC
// MAGIC MLlib supports local vectors and matrices stored on a single machine, as
// MAGIC well as distributed matrices backed by one or more RDDs. Local vectors
// MAGIC and local matrices are simple data models that serve as public
// MAGIC interfaces. The underlying linear algebra operations are provided by
// MAGIC [Breeze](http://www.scalanlp.org/) and [jblas](http://jblas.org/). A
// MAGIC training example used in supervised learning is called a “labeled point”
// MAGIC in MLlib.
// COMMAND ----------
// MAGIC %md
// MAGIC Labeled point in Scala
// MAGIC -------------
// MAGIC
// MAGIC A labeled point is a local vector, either dense or sparse, associated
// MAGIC with a label/response. In MLlib, labeled points are used in supervised
// MAGIC learning algorithms.
// MAGIC
// MAGIC We use a double to store a label, so we can use
// MAGIC labeled points in both regression and classification.
// MAGIC
// MAGIC For binary classification, a label should be either `0` (negative) or `1`
// MAGIC (positive). For multiclass classification, labels should be class
// MAGIC indices starting from zero: `0, 1, 2, ...`.
// MAGIC
// MAGIC A labeled point is represented by the case class
// MAGIC [`LabeledPoint`](http://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.mllib.regression.LabeledPoint).
// MAGIC
// MAGIC Refer to the [`LabeledPoint` Scala docs](http://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.mllib.regression.LabeledPoint)
// MAGIC for details on the API.
// COMMAND ----------
//import first
import org.apache.spark.mllib.linalg.Vectors
import org.apache.spark.mllib.regression.LabeledPoint
// COMMAND ----------
// Create a labeled point with a "positive" label and a dense feature vector.
val pos = LabeledPoint(1.0, Vectors.dense(1.0, 0.0, 3.0))
// COMMAND ----------
// Create a labeled point with a "negative" label and a sparse feature vector.
val neg = LabeledPoint(0.0, Vectors.sparse(3, Array(0, 2), Array(1.0, 3.0)))
// COMMAND ----------
// MAGIC %md
// MAGIC ***Sparse data in Scala***
// MAGIC
// MAGIC It is very common in practice to have sparse training data. MLlib
// MAGIC supports reading training examples stored in `LIBSVM` format, which is
// MAGIC the default format used by
// MAGIC [`LIBSVM`](http://www.csie.ntu.edu.tw/~cjlin/libsvm/) and
// MAGIC [`LIBLINEAR`](http://www.csie.ntu.edu.tw/~cjlin/liblinear/). It is a
// MAGIC text format in which each line represents a labeled sparse feature
// MAGIC vector using the following format:
// MAGIC
// MAGIC label index1:value1 index2:value2 ...
// MAGIC
// MAGIC where the indices are one-based and in ascending order. After loading,
// MAGIC the feature indices are converted to zero-based.
// MAGIC
// MAGIC [`MLUtils.loadLibSVMFile`](http://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.mllib.util.MLUtils$)
// MAGIC reads training examples stored in LIBSVM format.
// MAGIC
// MAGIC Refer to the [`MLUtils` Scala docs](http://spark.apache.org/docs/latest/api/scala/index.html#org.apache.spark.mllib.util.MLUtils)
// MAGIC for details on the API.
// COMMAND ----------
import org.apache.spark.mllib.regression.LabeledPoint
import org.apache.spark.mllib.util.MLUtils
import org.apache.spark.rdd.RDD
//val examples: RDD[LabeledPoint] = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt") // from prog guide but no such data here - can wget from github
// COMMAND ----------
// MAGIC %md
// MAGIC ## Load MNIST training and test datasets
// MAGIC
// MAGIC Our datasets are vectors of pixels representing images of handwritten digits. For example:
// MAGIC
// MAGIC 
// MAGIC 
// COMMAND ----------
display(dbutils.fs.ls("/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt"))
// COMMAND ----------
val examples: RDD[LabeledPoint] = MLUtils.loadLibSVMFile(sc, "/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt")
// COMMAND ----------
examples.take(1)
// COMMAND ----------
// MAGIC %md
// MAGIC Display our data. Each image has the true label (the `label` column) and a vector of `features` which represent pixel intensities (see below for details of what is in `training`).
// COMMAND ----------
display(examples.toDF) // covert to DataFrame and display for convenient db visualization
// COMMAND ----------
// MAGIC %md
// MAGIC The pixel intensities are represented in `features` as a sparse vector, for example the first observation, as seen in row 1 of the output to `display(training)` below, has `label` as `5`, i.e. the hand-written image is for the number 5. And this hand-written image is the following sparse vector (just click the triangle to the left of the feature in first row to see the following):
// MAGIC ```
// MAGIC type: 0
// MAGIC size: 780
// MAGIC indices: [152,153,155,...,682,683]
// MAGIC values: [3, 18, 18,18,126,...,132,16]
// MAGIC ```
// MAGIC Here
// MAGIC * `type: 0` says we hve a sparse vector.
// MAGIC * `size: 780` says the vector has 780 indices in total
// MAGIC * these indices from 0,...,779 are a unidimensional indexing of the two-dimensional array of pixels in the image
// MAGIC * `indices: [152,153,155,...,682,683]` are the indices from the `[0,1,...,779]` possible indices with non-zero values
// MAGIC * a value is an integer encoding the gray-level at the pixel index
// MAGIC * `values: [3, 18, 18,18,126,...,132,16]` are the actual gray level values, for example:
// MAGIC * at pixed index `152` the gray-level value is `3`,
// MAGIC * at index `153` the gray-level value is `18`,
// MAGIC * ..., and finally at
// MAGIC * at index `683` the gray-level value is `18`
// COMMAND ----------
// MAGIC %md
// MAGIC ***
// MAGIC ***
// MAGIC Labeled point in Python
// MAGIC -------------
// MAGIC A labeled point is represented by
// MAGIC [`LabeledPoint`](http://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.regression.LabeledPoint).
// MAGIC
// MAGIC Refer to the [`LabeledPoint` Python
// MAGIC docs](http://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.regression.LabeledPoint)
// MAGIC for more details on the API.
// COMMAND ----------
// MAGIC %py
// MAGIC # import first
// MAGIC from pyspark.mllib.linalg import SparseVector
// MAGIC from pyspark.mllib.regression import LabeledPoint
// MAGIC
// MAGIC # Create a labeled point with a positive label and a dense feature vector.
// MAGIC pos = LabeledPoint(1.0, [1.0, 0.0, 3.0])
// MAGIC
// MAGIC # Create a labeled point with a negative label and a sparse feature vector.
// MAGIC neg = LabeledPoint(0.0, SparseVector(3, [0, 2], [1.0, 3.0]))
// COMMAND ----------
// MAGIC %md
// MAGIC ***Sparse data in Python***
// MAGIC
// MAGIC
// MAGIC [`MLUtils.loadLibSVMFile`](http://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.util.MLUtils)
// MAGIC reads training examples stored in LIBSVM format.
// MAGIC
// MAGIC Refer to the [`MLUtils` Python docs](http://spark.apache.org/docs/latest/api/python/pyspark.mllib.html#pyspark.mllib.util.MLUtils)
// MAGIC for more details on the API.
// COMMAND ----------
// MAGIC %py
// MAGIC from pyspark.mllib.util import MLUtils
// MAGIC
// MAGIC # examples = MLUtils.loadLibSVMFile(sc, "data/mllib/sample_libsvm_data.txt") #from prog guide but no such data here - can wget from github
// MAGIC examples = MLUtils.loadLibSVMFile(sc, "/databricks-datasets/mnist-digits/data-001/mnist-digits-train.txt")
// COMMAND ----------
examples.take(1)
// COMMAND ----------
// MAGIC %md
// MAGIC
// MAGIC # [Scalable Data Science](http://www.math.canterbury.ac.nz/~r.sainudiin/courses/ScalableDataScience/)
// MAGIC
// MAGIC
// MAGIC ### prepared by [Raazesh Sainudiin](https://nz.linkedin.com/in/raazesh-sainudiin-45955845) and [Sivanand Sivaram](https://www.linkedin.com/in/sivanand)
// MAGIC
// MAGIC *supported by* [](https://databricks.com/)
// MAGIC and
// MAGIC [](https://www.awseducate.com/microsite/CommunitiesEngageHome) | lamastex/scalable-data-science | db/xtraResources/ProgGuides1_6/MLlibProgrammingGuide/dataTypes/002_LabeledPoint.scala | Scala | unlicense | 11,570 |
package org.jetbrains.plugins.scala.lang.resolve
import org.jetbrains.plugins.scala.{LatestScalaVersions, ScalaVersion}
import org.jetbrains.plugins.scala.base.ScalaLightCodeInsightFixtureTestAdapter
class ParenlessMethodCallOverloadingResolutionTest
extends ScalaLightCodeInsightFixtureTestAdapter
with SimpleResolveTestBase {
import SimpleResolveTestBase._
override protected def supportedIn(version: ScalaVersion): Boolean =
version >= LatestScalaVersions.Scala_2_12
def testSCL16802(): Unit = doResolveTest(
s"""
|trait Foo {
| def foo(i: Int): String
|}
|
|def ge${REFTGT}tFoo(): Foo = ???
|def getFoo(s: String): Foo = ???
|
|def takesFoo(foo: Foo): Unit = ()
|takesFoo(getF${REFSRC}oo)
|
|""".stripMargin)
}
| JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/resolve/ParenlessMethodCallOverloadingResolutionTest.scala | Scala | apache-2.0 | 820 |
package support
import io.circe.generic.auto._
import org.joda.time.DateTime
import support.Helpers._
import scala.io.Source
import scala.tools.nsc.io.File
import scala.util.{Failure, Success, Try}
case class Log(
file: String,
suite: String,
test: String,
time: DateTime,
status: String
)
object Logger {
val config = Config.Logger
def log(suiteName: String, testName: String, errOpt: Option[MyException]): Unit = {
if (config.enabled) {
val log = Log(
file = errOpt.flatMap(_.fileName).getOrElse("unknown"),
suite = suiteName,
test = testName,
time = new DateTime(),
status = errOpt match {
case Some(err: MyTestPauseException) => "pending"
case Some(err: MyTestPendingException) => "pending"
case Some(err: MyNotImplementedException) => "pending"
case Some(err: MyTestFailedException) => "error"
case Some(err: MyException) => "error"
case Some(err) => "error"
case _ => "unknown"
}
)
new java.io.File(config.file).getParentFile.mkdirs()
File(config.file).appendAll(asJson(log) + "\\n")
}
}
def read(): List[Log] = Try {
Source.fromFile(config.file).getLines().toList.zipWithIndex.flatMap { case (json, index) =>
parseJson[Log](json) match {
case Success(log) => Some(log)
case Failure(err) => println(s"WARN at line ${index + 1}: " + err.getMessage); None
}
}
}.getOrElse(List())
def analyse(logs: List[Log]): String = {
def durations(logs: List[Log]): List[(Log, Long)] =
logs.sliding(2).flatMap {
case before :: after :: Nil => Some((before, after.time.getMillis - before.time.getMillis))
case _ => None
}.filter(_._2 < 1000 * 60 * 15).toList
def duration(logs: List[(Log, Long)]): String =
(logs.map(_._2).sum / 1000 / 60) + " min"
def errors(logs: List[(Log, Long)]): String =
logs.count(_._1.status == "error") + " erreurs"
durations(logs).groupBy(_._1.suite).map { case (suite, suiteLogs) =>
s"$suite: ${duration(suiteLogs)}, ${errors(suiteLogs)}\\n" +
suiteLogs.groupBy(_._1.test).map { case (test, testLogs) =>
s" $test: ${duration(testLogs)}, ${errors(testLogs)}"
}.toList.mkString("\\n")
}.toList.sorted.mkString("\\n")
}
def main(args: Array[String]): Unit = {
println("Analyse des logs:\\n" + analyse(read()))
}
// because circe do pretty print...
private def asJson(log: Log): String =
s"""{"file":"${log.file}","suite":"${log.suite}","test":"${log.test}","time":"${log.time}","status":"${log.status}"}"""
}
| loicknuchel/scala-class | src/main/scala/support/Logger.scala | Scala | unlicense | 2,735 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.carbondata.spark.testsuite.dataretention
import java.text.SimpleDateFormat
import org.apache.commons.lang3.time.DateUtils
import org.apache.spark.sql.{CarbonEnv, Row}
import org.apache.spark.sql.test.util.QueryTest
import org.scalatest.BeforeAndAfterAll
import org.apache.carbondata.common.exceptions.sql.MalformedCarbonCommandException
import org.apache.carbondata.core.constants.CarbonCommonConstants
import org.apache.carbondata.core.locks.{CarbonLockFactory, ICarbonLock, LockUsage}
import org.apache.carbondata.core.metadata.AbsoluteTableIdentifier
import org.apache.carbondata.core.statusmanager.{LoadMetadataDetails, SegmentStatusManager}
import org.apache.carbondata.core.util.CarbonProperties
import org.apache.carbondata.core.util.path.CarbonTablePath
/**
* This class contains data retention feature test cases
*/
class DataRetentionTestCase extends QueryTest with BeforeAndAfterAll {
var absoluteTableIdentifierForLock: AbsoluteTableIdentifier = null
var absoluteTableIdentifierForRetention: AbsoluteTableIdentifier = null
var carbonTablePath : String = null
var carbonDateFormat = new SimpleDateFormat(CarbonCommonConstants.CARBON_TIMESTAMP)
var defaultDateFormat = new SimpleDateFormat(CarbonCommonConstants
.CARBON_TIMESTAMP_DEFAULT_FORMAT)
var carbonTableStatusLock: ICarbonLock = null
var carbonDeleteSegmentLock: ICarbonLock = null
var carbonCleanFilesLock: ICarbonLock = null
var carbonMetadataLock: ICarbonLock = null
override def beforeAll {
sql("drop table if exists DataRetentionTable")
sql("drop table if exists retentionlock")
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.MAX_TIMEOUT_FOR_CARBON_LOCK, "1")
.addProperty(CarbonCommonConstants.MAX_QUERY_EXECUTION_TIME, "1")
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT, "yyyy/MM/dd")
sql(
"CREATE table DataRetentionTable (ID int, date String, country String, name " +
"String," +
"phonetype String, serialname String, salary int) STORED AS carbondata"
)
sql(
"CREATE table retentionlock (ID int, date String, country String, name " +
"String," +
"phonetype String, serialname String, salary int) STORED AS carbondata"
)
val carbonTable =
CarbonEnv.getCarbonTable(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
"retentionlock")(sqlContext.sparkSession)
absoluteTableIdentifierForLock = carbonTable.getAbsoluteTableIdentifier
val carbonTable2 =
CarbonEnv.getCarbonTable(Option(CarbonCommonConstants.DATABASE_DEFAULT_NAME),
"dataRetentionTable")(sqlContext.sparkSession)
absoluteTableIdentifierForRetention = carbonTable2.getAbsoluteTableIdentifier
carbonTablePath = CarbonTablePath
.getMetadataPath(absoluteTableIdentifierForRetention.getTablePath)
carbonTableStatusLock = CarbonLockFactory
.getCarbonLockObj(absoluteTableIdentifierForLock, LockUsage.TABLE_STATUS_LOCK)
carbonDeleteSegmentLock = CarbonLockFactory
.getCarbonLockObj(absoluteTableIdentifierForLock, LockUsage.DELETE_SEGMENT_LOCK)
carbonCleanFilesLock = CarbonLockFactory
.getCarbonLockObj(absoluteTableIdentifierForLock, LockUsage.CLEAN_FILES_LOCK)
carbonMetadataLock = CarbonLockFactory
.getCarbonLockObj(absoluteTableIdentifierForLock, LockUsage.METADATA_LOCK)
sql(
s"LOAD DATA LOCAL INPATH '$resourcesPath/dataretention1.csv' INTO TABLE retentionlock " +
"OPTIONS('DELIMITER' = ',')")
sql(
s"LOAD DATA LOCAL INPATH '$resourcesPath/dataretention1.csv' INTO TABLE DataRetentionTable " +
"OPTIONS('DELIMITER' = ',')")
sql(
s"LOAD DATA LOCAL INPATH '$resourcesPath/dataretention2.csv' INTO TABLE DataRetentionTable " +
"OPTIONS('DELIMITER' = ',')")
}
override def afterAll {
CarbonProperties.getInstance()
.addProperty(CarbonCommonConstants.CARBON_TIMESTAMP_FORMAT,
CarbonCommonConstants.CARBON_TIMESTAMP_DEFAULT_FORMAT)
sql("drop table if exists DataRetentionTable")
sql("drop table if exists retentionlock")
}
private def getSegmentStartTime(segments: Array[LoadMetadataDetails],
segmentId: Integer): String = {
val segmentLoadTimeString = segments(segmentId).getLoadStartTime()
var loadTime = carbonDateFormat.parse(carbonDateFormat.format(segmentLoadTimeString))
// add one min to execute delete before load start time command
loadTime = DateUtils.addMinutes(loadTime, 1)
defaultDateFormat.format(loadTime)
}
test("RetentionTest_withoutDelete") {
checkAnswer(
sql("SELECT country, count(salary) AS amount FROM DataRetentionTable WHERE country" +
" IN ('china','ind','aus','eng') GROUP BY country"
),
Seq(Row("aus", 9), Row("ind", 9))
)
}
test("RetentionTest_DeleteSegmentsByLoadTime") {
val segments: Array[LoadMetadataDetails] =
SegmentStatusManager.readLoadMetadata(carbonTablePath)
// check segment length, it should be 3 (loads)
if (segments.length != 2) {
assert(false)
}
val actualValue: String = getSegmentStartTime(segments, 1)
// delete segments (0,1) which contains ind, aus
sql(
"delete from table DataRetentionTable where segment.starttime before '" + actualValue + "'")
// load segment 2 which contains eng
sql(
s"LOAD DATA LOCAL INPATH '$resourcesPath/dataretention3.csv' INTO TABLE DataRetentionTable " +
"OPTIONS('DELIMITER' = ',')")
checkAnswer(
sql("SELECT country, count(salary) AS amount FROM DataRetentionTable WHERE country" +
" IN ('china','ind','aus','eng') GROUP BY country"
),
Seq(Row("eng", 9))
)
}
test("RetentionTest3_DeleteByLoadId") {
// delete segment 2 and load ind segment
sql("delete from table DataRetentionTable where segment.id in (2)")
sql(
s"LOAD DATA LOCAL INPATH '$resourcesPath/dataretention1.csv' INTO TABLE DataRetentionTable " +
"OPTIONS('DELIMITER' = ',')")
checkAnswer(
sql("SELECT country, count(salary) AS amount FROM DataRetentionTable WHERE country" +
" IN ('china','ind','aus','eng') GROUP BY country"
),
Seq(Row("ind", 9))
)
// these queries should execute without any error.
sql("show segments for table DataRetentionTable")
sql("clean files for table DataRetentionTable")
}
test("RetentionTest4_DeleteByInvalidLoadId") {
val e = intercept[MalformedCarbonCommandException] {
// delete segment with no id
sql("delete from table DataRetentionTable where segment.id in ()")
}
assert(e.getMessage.contains("should not be empty"))
}
test("test delete segments by load date with case-insensitive table name") {
sql(
"""
CREATE TABLE IF NOT EXISTS carbon_table_1
(ID Int, date Timestamp, country String,
name String, phonetype String, serialname String, salary Int)
STORED AS carbondata
""")
sql(s"LOAD DATA LOCAL INPATH '$resourcesPath/emptyDimensionData.csv' into table carbon_table_1")
checkAnswer(
sql("select count(*) from carbon_table_1"), Seq(Row(20)))
sql("delete from table carbon_table_1 where segment.starttime " +
" before '2099-07-28 11:00:00'")
checkAnswer(
sql("select count(*) from carbon_table_1"), Seq(Row(0)))
sql("DROP TABLE carbon_table_1")
}
test("RetentionTest_DeleteSegmentsByLoadTimeValiadtion") {
val e = intercept[MalformedCarbonCommandException] {
sql(
"delete from table DataRetentionTable where segment.starttime before" +
" 'abcd-01-01 00:00:00'")
}
assert(e.getMessage.contains("Invalid load start time format"))
val ex = intercept[MalformedCarbonCommandException] {
sql(
"delete from table DataRetentionTable where segment.starttime before" +
" '2099:01:01 00:00:00'")
}
assert(ex.getMessage.contains("Invalid load start time format"))
checkAnswer(
sql("SELECT country, count(salary) AS amount FROM DataRetentionTable WHERE country" +
" IN ('china','ind','aus','eng') GROUP BY country"
),
Seq(Row("ind", 9))
)
sql("delete from table DataRetentionTable where segment.starttime before '2099-01-01'")
checkAnswer(
sql("SELECT country, count(salary) AS amount FROM DataRetentionTable WHERE country" +
" IN ('china','ind','aus','eng') GROUP BY country"), Seq())
}
test("RetentionTest_InvalidDeleteCommands") {
// All these queries should fail.
intercept[Exception] {
sql("DELETE LOADS FROM TABLE DataRetentionTable where STARTTIME before '2099-01-01'")
}
intercept[Exception] {
sql("DELETE LOAD 2 FROM TABLE DataRetentionTable")
}
intercept[Exception] {
sql("show loads for table DataRetentionTable")
}
}
test("RetentionTest_Locks") {
sql(
s"LOAD DATA LOCAL INPATH '$resourcesPath/dataretention1.csv' INTO TABLE retentionlock " +
"OPTIONS('DELIMITER' = ',')")
carbonDeleteSegmentLock.lockWithRetries()
carbonTableStatusLock.lockWithRetries()
carbonCleanFilesLock.lockWithRetries()
// delete segment 0 it should fail
intercept[Exception] {
sql("delete from table retentionlock where segment.id in (0)")
}
// it should fail
intercept[Exception] {
sql("delete from table retentionlock where segment.starttime before " +
"'2099-01-01 00:00:00.0'")
}
// it should fail
intercept[Exception] {
sql("clean files for table retentionlock")
}
sql("SHOW SEGMENTS FOR TABLE retentionlock").collect()
carbonTableStatusLock.unlock()
carbonCleanFilesLock.unlock()
carbonDeleteSegmentLock.unlock()
sql("delete from table retentionlock where segment.id in (0)")
// load and delete should execute parallely
carbonMetadataLock.lockWithRetries()
sql("delete from table retentionlock where segment.id in (1)")
carbonMetadataLock.unlock()
}
}
| zzcclp/carbondata | integration/spark/src/test/scala/org/apache/carbondata/spark/testsuite/dataretention/DataRetentionTestCase.scala | Scala | apache-2.0 | 10,806 |
/*
* Copyright 2014 porter <https://github.com/eikek/porter>
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package porter.auth
import porter.model._
import porter.model.Account
import porter.model.Realm
@SerialVersionUID(20131122)
final case class AuthToken(realm: Realm,
account: Account,
credentials: Set[Credentials],
votes: Map[Ident, Vote] = Map.empty) extends Serializable {
def vote(v: (Ident, Vote)) = copy(votes = votes + (v._1 -> v._2))
def toResult = AuthResult(realm, account.name, votes, account.props)
}
| eikek/porter | api/src/main/scala/porter/auth/AuthToken.scala | Scala | apache-2.0 | 1,125 |
/*
* La Trobe University - Distributed Deep Learning System
* Copyright 2016 Matthias Langer ([email protected])
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
*/
package edu.latrobe.blaze.modules.generic
import edu.latrobe._
import edu.latrobe.blaze._
import edu.latrobe.blaze.modules._
final class Dropout_Generic_Baseline(override val builder: DropoutBuilder,
override val inputHints: BuildHints,
override val seed: InstanceSeed,
override val weightBufferBuilder: ValueTensorBufferBuilder)
extends Dropout_Generic {
// ---------------------------------------------------------------------------
// Forward propagation related.
// ---------------------------------------------------------------------------
override protected def doPredictForTraining(output: Tensor,
rng: PseudoRNG)
: PredictContext = {
// Build a dropout mask.
val bernoulli = rng.bernoulliDistribution(
probability,
Real.zero,
if (useOriginalAlgorithm) Real.one else boostFactor
)
val mask = output.createSibling()
mask.fill(bernoulli.sample, threadSafe = false)
// Apply it.
output :*= mask
Dropout_Generic_Baseline_Context(mask)
}
override protected def doPredictForInference(output: Tensor)
: Unit = {
if (useOriginalAlgorithm) {
output *= probabilityInv
}
else {
// Do nothing.
}
}
// ---------------------------------------------------------------------------
// Back propagation related.
// ---------------------------------------------------------------------------
override protected def doDeriveInputError(context: PredictContext,
error: Tensor)
: Tensor = context match {
case Dropout_Generic_Baseline_Context(mask) =>
error :*= mask
error
case _ =>
throw new MatchError(context)
}
/*
override protected def doDeriveInputErrorForInference(error: Tensor)
: Tensor = {
if (useOriginalAlgorithm) {
error *= probabilityInv
}
else {
// Do nothing.
}
error
}
*/
}
final case class Dropout_Generic_Baseline_Context(mask: Tensor)
extends PredictContext {
override protected def doClose()
: Unit = {
mask.close()
super.doClose()
}
}
object Dropout_Generic_Baseline_Description
extends GenericModuleVariantDescription[DropoutBuilder] {
override def build(builder: DropoutBuilder,
hints: BuildHints,
seed: InstanceSeed,
weightsBuilder: ValueTensorBufferBuilder)
: Dropout_Generic_Baseline = new Dropout_Generic_Baseline(
builder, hints, seed, weightsBuilder
)
}
| bashimao/ltudl | blaze/src/main/scala/edu/latrobe/blaze/modules/generic/Dropout_Generic_Baseline.scala | Scala | apache-2.0 | 3,394 |
object Test {
def main(args: Array[String]): Unit = {
println(rewrite("foo"))
println(rewrite("foo" + "foo"))
rewrite {
println("apply")
}
}
}
| dotty-staging/dotty | tests/run-macros/expr-map-3/Test_2.scala | Scala | apache-2.0 | 172 |
/* __ *\\
** ________ ___ / / ___ Scala API **
** / __/ __// _ | / / / _ | (c) 2003-2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ | http://scala-lang.org/ **
** /____/\\___/_/ |_/____/_/ | | **
** |/ **
\\* */
package scala
package collection
package generic
import scala.language.higherKinds
/** A template for companion objects of Seq and subclasses thereof.
*
* @since 2.8
*/
abstract class SeqFactory[CC[X] <: Seq[X] with GenericTraversableTemplate[X, CC]]
extends GenSeqFactory[CC] with TraversableFactory[CC] {
/** This method is called in a pattern match { case Seq(...) => }.
*
* @param x the selector value
* @return sequence wrapped in an option, if this is a Seq, otherwise none
*/
def unapplySeq[A](x: CC[A]): Some[CC[A]] = Some(x)
}
| felixmulder/scala | src/library/scala/collection/generic/SeqFactory.scala | Scala | bsd-3-clause | 1,091 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.sql.execution.command
import org.apache.spark.internal.Logging
import org.apache.spark.sql.{Row, SparkSession}
import org.apache.spark.sql.catalyst.expressions.Attribute
import org.apache.spark.sql.internal.SQLConf
import org.apache.spark.sql.internal.StaticSQLConf.CATALOG_IMPLEMENTATION
import org.apache.spark.sql.types.{StringType, StructField, StructType}
/**
* Command that runs
* {{{
* set key = value;
* set -v;
* set;
* }}}
*/
case class SetCommand(kv: Option[(String, Option[String])]) extends RunnableCommand with Logging {
private def keyValueOutput: Seq[Attribute] = {
val schema = StructType(
StructField("key", StringType, nullable = false) ::
StructField("value", StringType, nullable = false) :: Nil)
schema.toAttributes
}
private val (_output, runFunc): (Seq[Attribute], SparkSession => Seq[Row]) = kv match {
// Configures the deprecated "mapred.reduce.tasks" property.
case Some((SQLConf.Deprecated.MAPRED_REDUCE_TASKS, Some(value))) =>
val runFunc = (sparkSession: SparkSession) => {
logWarning(
s"Property ${SQLConf.Deprecated.MAPRED_REDUCE_TASKS} is deprecated, " +
s"automatically converted to ${SQLConf.SHUFFLE_PARTITIONS.key} instead.")
if (value.toInt < 1) {
val msg =
s"Setting negative ${SQLConf.Deprecated.MAPRED_REDUCE_TASKS} for automatically " +
"determining the number of reducers is not supported."
throw new IllegalArgumentException(msg)
} else {
sparkSession.conf.set(SQLConf.SHUFFLE_PARTITIONS.key, value)
Seq(Row(SQLConf.SHUFFLE_PARTITIONS.key, value))
}
}
(keyValueOutput, runFunc)
case Some((SQLConf.Replaced.MAPREDUCE_JOB_REDUCES, Some(value))) =>
val runFunc = (sparkSession: SparkSession) => {
logWarning(
s"Property ${SQLConf.Replaced.MAPREDUCE_JOB_REDUCES} is Hadoop's property, " +
s"automatically converted to ${SQLConf.SHUFFLE_PARTITIONS.key} instead.")
if (value.toInt < 1) {
val msg =
s"Setting negative ${SQLConf.Replaced.MAPREDUCE_JOB_REDUCES} for automatically " +
"determining the number of reducers is not supported."
throw new IllegalArgumentException(msg)
} else {
sparkSession.conf.set(SQLConf.SHUFFLE_PARTITIONS.key, value)
Seq(Row(SQLConf.SHUFFLE_PARTITIONS.key, value))
}
}
(keyValueOutput, runFunc)
case Some((key @ SetCommand.VariableName(name), Some(value))) =>
val runFunc = (sparkSession: SparkSession) => {
sparkSession.conf.set(name, value)
Seq(Row(key, value))
}
(keyValueOutput, runFunc)
// Configures a single property.
case Some((key, Some(value))) =>
val runFunc = (sparkSession: SparkSession) => {
if (sparkSession.conf.get(CATALOG_IMPLEMENTATION.key).equals("hive") &&
key.startsWith("hive.")) {
logWarning(s"'SET $key=$value' might not work, since Spark doesn't support changing " +
"the Hive config dynamically. Please passing the Hive-specific config by adding the " +
s"prefix spark.hadoop (e.g., spark.hadoop.$key) when starting a Spark application. " +
"For details, see the link: https://spark.apache.org/docs/latest/configuration.html#" +
"dynamically-loading-spark-properties.")
}
sparkSession.conf.set(key, value)
Seq(Row(key, value))
}
(keyValueOutput, runFunc)
// (In Hive, "SET" returns all changed properties while "SET -v" returns all properties.)
// Queries all key-value pairs that are set in the SQLConf of the sparkSession.
case None =>
val runFunc = (sparkSession: SparkSession) => {
sparkSession.conf.getAll.toSeq.sorted.map { case (k, v) => Row(k, v) }
}
(keyValueOutput, runFunc)
// Queries all properties along with their default values and docs that are defined in the
// SQLConf of the sparkSession.
case Some(("-v", None)) =>
val runFunc = (sparkSession: SparkSession) => {
sparkSession.sessionState.conf.getAllDefinedConfs.sorted.map {
case (key, defaultValue, doc) =>
Row(key, Option(defaultValue).getOrElse("<undefined>"), doc)
}
}
val schema = StructType(
StructField("key", StringType, nullable = false) ::
StructField("value", StringType, nullable = false) ::
StructField("meaning", StringType, nullable = false) :: Nil)
(schema.toAttributes, runFunc)
// Queries the deprecated "mapred.reduce.tasks" property.
case Some((SQLConf.Deprecated.MAPRED_REDUCE_TASKS, None)) =>
val runFunc = (sparkSession: SparkSession) => {
logWarning(
s"Property ${SQLConf.Deprecated.MAPRED_REDUCE_TASKS} is deprecated, " +
s"showing ${SQLConf.SHUFFLE_PARTITIONS.key} instead.")
Seq(Row(
SQLConf.SHUFFLE_PARTITIONS.key,
sparkSession.sessionState.conf.numShufflePartitions.toString))
}
(keyValueOutput, runFunc)
// Queries a single property.
case Some((key, None)) =>
val runFunc = (sparkSession: SparkSession) => {
val value = sparkSession.conf.getOption(key).getOrElse("<undefined>")
Seq(Row(key, value))
}
(keyValueOutput, runFunc)
}
override val output: Seq[Attribute] = _output
override def run(sparkSession: SparkSession): Seq[Row] = runFunc(sparkSession)
}
object SetCommand {
val VariableName = """hivevar:([^=]+)""".r
}
/**
* This command is for resetting SQLConf to the default values. Command that runs
* {{{
* reset;
* }}}
*/
case object ResetCommand extends RunnableCommand with Logging {
override def run(sparkSession: SparkSession): Seq[Row] = {
sparkSession.sessionState.conf.clear()
Seq.empty[Row]
}
}
| mike0sv/spark | sql/core/src/main/scala/org/apache/spark/sql/execution/command/SetCommand.scala | Scala | apache-2.0 | 6,739 |
/*
* Copyright (c) 2016 Fred Cecilia, Valentin Kasas, Olivier Girardot
*
* Permission is hereby granted, free of charge, to any person obtaining a copy of
* this software and associated documentation files (the "Software"), to deal in
* the Software without restriction, including without limitation the rights to
* use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
* the Software, and to permit persons to whom the Software is furnished to do so,
* subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
* FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
* COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
* IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
package fr.psug.kafka.streams
import java.lang
import org.apache.kafka.common.serialization.Serde
import org.apache.kafka.streams.KeyValue
import org.apache.kafka.streams.kstream.{KeyValueMapper, _}
import org.apache.kafka.streams.processor.{Processor, ProcessorSupplier, StreamPartitioner}
import scala.language.implicitConversions
/**
* Typesafe wrapper for kafka's org.apache.kafka.streams.kstream.KStream
*
* @param source - wrapped stream
* @tparam K - key
* @tparam V - value
*/
class TKStream[K, V](val source: KStream[K, V]) {
import TKStream._
/**
* Create a new {@code KStream} that consists of all records of this stream which satisfy the given predicate.
* All records that do not satisfy the predicate are dropped.
* This is a stateless record-by-record operation.
*
* @param predicate a filter { @link Predicate} that is applied to each record
* @return a { @code KStream} that contains only those records that satisfy the given predicate
* @see #filterNot(Predicate)
*/
def filter(predicate: (K, V) => Boolean): TKStream[K, V] =
source.filter(new Predicate[K, V] {
override def test(key: K, value: V) = predicate(key, value)
})
/**
* Create a new {@code KStream} that consists all records of this stream which do <em>not</em> satisfy the given
* predicate.
* All records that <em>do</em> satisfy the predicate are dropped.
* This is a stateless record-by-record operation.
*
* @param predicate a filter { @link Predicate} that is applied to each record
* @return a { @code KStream} that contains only those records that do <em>not</em> satisfy the given predicate
* @see #filter(Predicate)
*/
def filterNot(predicate: (K, V) => Boolean): TKStream[K, V] =
source.filterNot(new Predicate[K, V] {
override def test(key: K, value: V) = predicate(key, value)
})
/**
* Set a new key (with possibly new type) for each input record.
* The provided {@link KeyValueMapper} is applied to each input record and computes a new key for it.
* Thus, an input record {@code <K,V>} can be transformed into an output record {@code <K':V>}.
* This is a stateless record-by-record operation.
* <p>
* For example, you can use this transformation to set a key for a key-less input record {@code <null,V>} by
* extracting a key from the value within your {@link KeyValueMapper}. The example below computes the new key as the
* length of the value string.
* <pre>{@code
* KStream<Byte[], String> keyLessStream = builder.stream("key-less-topic");
* KStream<Integer, String> keyedStream = keyLessStream.selectKey(new KeyValueMapper<Byte[], String, Integer> {
* Integer apply(Byte[] key, String value) {
* return value.length();
* }
* });
* }</pre>
* <p>
* Setting a new key might result in an internal data redistribution if a key based operator (like an aggregation or
* join) is applied to the result {@code KStream}.
*
* @param mapper a { @link KeyValueMapper} that computes a new key for each record
* @tparam K1 the new key type of the result stream
* @return a { @code KStream} that contains records with new key (possibly of different type) and unmodified value
* @see #map(KeyValueMapper)
* @see #flatMap(KeyValueMapper)
* @see #mapValues(ValueMapper)
* @see #flatMapValues(ValueMapper)
*/
def selectKey[KK >: K, VV >: V, K1](mapper: (KK, VV) => K1): KStream[K1, V] =
source.selectKey(new KeyValueMapper[KK, VV, K1] {
override def apply(key: KK, value: VV) = mapper(key, value)
})
def map[KR, VR](mapper: (K, V) => (KR, VR)): TKStream[KR, VR] =
streamToTypesafe(source.map(new KeyValueMapper[K, V, KeyValue[KR, VR]] {
override def apply(key: K, value: V): KeyValue[KR, VR] = {
val (outK, outV) = mapper(key, value)
new KeyValue(outK, outV)
}
}))
def mapValues[VR](mapper: V => VR): TKStream[K, VR] =
new TKStream(source.mapValues(new ValueMapper[V, VR] {
override def apply(value: V): VR = mapper(value)
}))
def print(implicit keySerde: Serde[K], valSerde: Serde[V]): Unit = source.print(keySerde, valSerde)
def writeAsText(filePath: String)(implicit keySerde: Serde[K], valSerde: Serde[V]): Unit =
source.writeAsText(filePath, keySerde, valSerde)
def flatMap[KR, VR](mapper: (K, V) => Iterable[(KR, VR)]): TKStream[KR, VR] =
streamToTypesafe(source.flatMap(new KeyValueMapper[K, V, lang.Iterable[KeyValue[KR, VR]]] {
override def apply(key: K, value: V): lang.Iterable[KeyValue[KR, VR]] = {
import scala.collection.JavaConverters._
mapper(key, value).map { case (k, v) => new KeyValue(k, v) }.asJava
}
}))
def flatMapValues[V1](mapper: V => Iterable[V1]): TKStream[K, V1] =
source.flatMapValues(new ValueMapper[V, java.lang.Iterable[V1]] {
override def apply(value: V): java.lang.Iterable[V1] = {
import scala.collection.JavaConverters._
mapper(value).asJava
}
})
def branch(predicates: ((K, V) => Boolean)*): Array[TKStream[K, V]] = {
source
.branch(predicates.map(p =>
new Predicate[K, V]() {
override def test(key: K, value: V): Boolean = p(key, value)
}): _*)
.map(x => x: TKStream[K, V])
}
/**
* DOES NOT EXIST IN REAL LIFE
*
* @param predicate to segregate data
* @return
*/
def partition(predicate: (K, V) => Boolean): (TKStream[K, V], TKStream[K, V]) = {
val in = source.filter(predicate)
val out = source.filterNot(predicate)
(in, out)
}
def foreach(func: (K, V) => Unit): Unit =
source.foreach(new ForeachAction[K, V] {
override def apply(key: K, value: V): Unit = func(key, value)
})
def through(topic: String)(implicit keySerde: Serde[K], valSerde: Serde[V]): TKStream[K, V] =
source.through(keySerde, valSerde, topic)
def through(partitioner: StreamPartitioner[K, V], topic: String)(implicit keySerde: Serde[K],
valSerde: Serde[V]): TKStream[K, V] =
source.through(keySerde, valSerde, partitioner, topic)
def to(topic: String)(implicit keySerde: Serde[K], valSerde: Serde[V]): Unit = {
source.to(keySerde, valSerde, topic)
}
def to(partitioner: StreamPartitioner[K, V], topic: String)(implicit keySerde: Serde[K], valSerde: Serde[V]): Unit =
source.to(keySerde, valSerde, partitioner, topic)
def transform[K1, V1](transformerSupplier: () => Transformer[K, V, KeyValue[K1, V1]],
stateStoreNames: String*): TKStream[K1, V1] =
source.transform(new TransformerSupplier[K, V, KeyValue[K1, V1]] {
override def get(): Transformer[K, V, KeyValue[K1, V1]] = transformerSupplier()
}, stateStoreNames: _*)
def transformValues[R](valueTransformerSupplier: => ValueTransformer[V, R],
stateStoreNames: String*): TKStream[K, R] =
streamToTypesafe(source.transformValues(new ValueTransformerSupplier[V, R] {
override def get(): ValueTransformer[V, R] = valueTransformerSupplier
}, stateStoreNames: _*))
def process(processorSupplier: () => Processor[K, V], stateStoreNames: String*): Unit = {
source.process(new ProcessorSupplier[K, V] {
override def get(): Processor[K, V] = processorSupplier()
}, stateStoreNames: _*)
}
/**
* Group the records by their current key into a {@link KGroupedStream} while preserving the original values.
* Grouping a stream on the record key is required before an aggregation operator can be applied to the data
* (cf. {@link KGroupedStream}).
* If a record key is {@code null} the record will not be included in the resulting {@link KGroupedStream}.
* <p>
* If a key changing operator was used before this operation (e.g., {@link #selectKey(KeyValueMapper)},
* {@link #map(KeyValueMapper)}, {@link #flatMap(KeyValueMapper)}, or
* {@link #transform(TransformerSupplier, String...)}), and no data redistribution happened afterwards (e.g., via
* {@link #through(String)}) an internal repartitioning topic will be created in Kafka.
* This topic will be named "${applicationId}-XXX-repartition", where "applicationId" is user-specified in
* {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is
* an internally generated name, and "-repartition" is a fixed suffix.
* You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
* <p>
* For this case, all data of this stream will be redistributed through the repartitioning topic by writing all
* records to it, and rereading all records from it, such that the resulting {@link KGroupedStream} is partitioned
* correctly on its key.
*
* @param keySerde key serdes for materializing this stream,
* if not specified the default serdes defined in the configs will be used
* @param valSerde value serdes for materializing this stream,
* if not specified the default serdes defined in the configs will be used
* @return a { @link KGroupedStream} that contains the grouped records of the original { @code KStream}
*/
def groupByKey(implicit keySerde: Serde[K], valSerde: Serde[V]): TKGroupedStream[K, V] = source.groupByKey(keySerde, valSerde)
/**
* Group the records of this {@code KStream} on a new key that is selected using the provided {@link KeyValueMapper}
* and default serializers and deserializers.
* Grouping a stream on the record key is required before an aggregation operator can be applied to the data
* (cf. {@link KGroupedStream}).
* The {@link KeyValueMapper} selects a new key (with should be of the same type) while preserving the original values.
* If the new record key is {@code null} the record will not be included in the resulting {@link KGroupedStream}
* <p>
* Because a new key is selected, an internal repartitioning topic will be created in Kafka.
* This topic will be named "${applicationId}-XXX-repartition", where "applicationId" is user-specified in
* {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is
* an internally generated name, and "-repartition" is a fixed suffix.
* You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
* <p>
* All data of this stream will be redistributed through the repartitioning topic by writing all records to it,
* and rereading all records from it, such that the resulting {@link KGroupedStream} is partitioned on the new key.
* <p>
* This operation is equivalent to calling {@link #selectKey(KeyValueMapper)} followed by {@link #groupByKey()}.
* If the key type is changed, it is recommended to use {@link #groupBy(KeyValueMapper, Serde, Serde)} instead.
*
* @param keySelector a { @link KeyValueMapper} that computes a new key for grouping
* @tparam K1 the key type of the result { @link KGroupedStream}
* @return a { @link KGroupedStream} that contains the grouped records of the original { @code KStream}
*/
def groupBy[K1](keySelector: (K, V) => K1)(implicit keySerde: Serde[K1], valSerde: Serde[V]): TKGroupedStream[K1, V] = source.groupBy(new KeyValueMapper[K, V, K1] {
override def apply(key: K, value: V) = keySelector(key, value)
}, keySerde, valSerde)
/**
* Join records of this stream with another {@code KStream}'s records using windowed inner equi join.
* The join is computed on the records' key with join attribute {@code thisKStream.key == otherKStream.key}.
* Furthermore, two records are only joined if their timestamps are close to each other as defined by the given
* {@link JoinWindows}, i.e., the window defines an additional join predicate on the record timestamps.
* <p>
* For each pair of records meeting both join predicates the provided {@link ValueJoiner} will be called to compute
* a value (with arbitrary type) for the result record.
* The key of the result record is the same as for both joining input records.
* If an input record key or value is {@code null} the record will not be included in the join operation and thus no
* output record will be added to the resulting {@code KStream}.
* <p>
* Example (assuming all input records belong to the correct windows):
* <table border='1'>
* <tr>
* <th>this</th>
* <th>other</th>
* <th>result</th>
* </tr>
* <tr>
* <td><K1:A></td>
* <td></td>
* <td></td>
* </tr>
* <tr>
* <td><K2:B></td>
* <td><K2:b></td>
* <td><K2:ValueJoiner(B,b)></td>
* </tr>
* <tr>
* <td></td>
* <td><K3:c></td>
* <td></td>
* </tr>
* </table>
* Both input streams need to be co-partitioned on the join key.
* If this requirement is not met, Kafka Streams will automatically repartition the data, i.e., it will create an
* internal repartitioning topic in Kafka and write and re-read the data via this topic before the actual join.
* The repartitioning topic will be named "${applicationId}-XXX-repartition", where "applicationId" is
* user-specified in {@link StreamsConfig} via parameter
* {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
* "-repartition" is a fixed suffix.
* You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
* <p>
* Repartitioning can happen for one or both of the joining {@code KStream}s.
* For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
* records to it, and rereading all records from it, such that the join input {@code KStream} is partitioned
* correctly on its key.
* <p>
* Both of the joining {@code KStream}s will be materialized in local state stores with auto-generated store names.
* For failure and recovery each store will be backed by an internal changelog topic that will be created in Kafka.
* The changelog topic will be named "${applicationId}-storeName-changelog", where "applicationId" is user-specified
* in {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG},
* "storeName" is an internally generated name, and "-changelog" is a fixed suffix.
* You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
*
* @param otherStream the { @code KStream} to be joined with this stream
* @param joiner a { @link ValueJoiner} that computes the join result for a pair of matching records
* @param windows the specification of the { @link JoinWindows}
* @param keySerde key serdes for materializing both streams,
* if not specified the default serdes defined in the configs will be used
* @param thisValueSerde value serdes for materializing this stream,
* if not specified the default serdes defined in the configs will be used
* @param otherValueSerde value serdes for materializing the other stream,
* if not specified the default serdes defined in the configs will be used
* @tparam VO the value type of the other stream
* @tparam VR the value type of the result stream
* @return a { @code KStream} that contains join-records for each key and values computed by the given
* { @link ValueJoiner}, one for each matched record-pair with the same key and within the joining window intervals
* @see #leftJoin(KStream, ValueJoiner, JoinWindows, Serde, Serde, Serde)
* @see #outerJoin(KStream, ValueJoiner, JoinWindows, Serde, Serde, Serde)
*/
def join[VO, VR](otherStream: TKStream[K, VO],
joiner: (V, VO) => VR,
windows: JoinWindows)(
implicit keySerde: Serde[K],
thisValueSerde: Serde[V],
otherValueSerde: Serde[VO]): TKStream[K, VR] = streamToTypesafe(source.join(otherStream.source, new ValueJoiner[V, VO, VR] {
override def apply(value1: V, value2: VO) = joiner(value1, value2)
}, windows, keySerde, thisValueSerde, otherValueSerde))
/**
* Join records of this stream with {@link KTable}'s records using non-windowed inner equi join with default
* serializers and deserializers.
* The join is a primary key table lookup join with join attribute {@code stream.key == table.key}.
* "Table lookup join" means, that results are only computed if {@code KStream} records are processed.
* This is done by performing a lookup for matching records in the <em>current</em> (i.e., processing time) internal
* {@link KTable} state.
* In contrast, processing {@link KTable} input records will only update the internal {@link KTable} state and
* will not produce any result records.
* <p>
* For each {@code KStream} record that finds a corresponding record in {@link KTable} the provided
* {@link ValueJoiner} will be called to compute a value (with arbitrary type) for the result record.
* The key of the result record is the same as for both joining input records.
* If an {@code KStream} input record key or value is {@code null} the record will not be included in the join
* operation and thus no output record will be added to the resulting {@code KStream}.
* <p>
* Example:
* <table border='1'>
* <tr>
* <th>KStream</th>
* <th>KTable</th>
* <th>state</th>
* <th>result</th>
* </tr>
* <tr>
* <td><K1:A></td>
* <td></td>
* <td></td>
* <td></td>
* </tr>
* <tr>
* <td></td>
* <td><K1:b></td>
* <td><K1:b></td>
* <td></td>
* </tr>
* <tr>
* <td><K1:C></td>
* <td></td>
* <td><K1:b></td>
* <td><K1:ValueJoiner(C,b)></td>
* </tr>
* </table>
* Both input streams need to be co-partitioned on the join key (cf.
* {@link #join(GlobalKTable, KeyValueMapper, ValueJoiner)}).
* If this requirement is not met, Kafka Streams will automatically repartition the data, i.e., it will create an
* internal repartitioning topic in Kafka and write and re-read the data via this topic before the actual join.
* The repartitioning topic will be named "${applicationId}-XXX-repartition", where "applicationId" is
* user-specified in {@link StreamsConfig} via parameter
* {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
* "-repartition" is a fixed suffix.
* You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
* <p>
* Repartitioning can happen only for this {@code KStream}s.
* For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
* records to it, and rereading all records from it, such that the join input {@code KStream} is partitioned
* correctly on its key.
*
* @param table the { @link KTable} to be joined with this stream
* @param joiner a { @link ValueJoiner} that computes the join result for a pair of matching records
* @tparam VT the value type of the table
* @tparam VR the value type of the result stream
* @return a { @code KStream} that contains join-records for each key and values computed by the given
* { @link ValueJoiner}, one for each matched record-pair with the same key
* @see #leftJoin(KTable, ValueJoiner)
* @see #join(GlobalKTable, KeyValueMapper, ValueJoiner)
*/
def join[VT, VR](table: TKTable[K, VT], joiner: (V, VT) => VR): TKStream[K, VR] = {
streamToTypesafe(source.join(table.source, new ValueJoiner[V, VT, VR] {
override def apply(value1: V, value2: VT) = joiner(value1, value2)
}))
}
/**
* Join records of this stream with {@link GlobalKTable}'s records using non-windowed inner equi join.
* The join is a primary key table lookup join with join attribute
* {@code keyValueMapper.map(stream.keyValue) == table.key}.
* "Table lookup join" means, that results are only computed if {@code KStream} records are processed.
* This is done by performing a lookup for matching records in the <em>current</em> internal {@link GlobalKTable}
* state.
* In contrast, processing {@link GlobalKTable} input records will only update the internal {@link GlobalKTable}
* state and will not produce any result records.
* <p>
* For each {@code KStream} record that finds a corresponding record in {@link GlobalKTable} the provided
* {@link ValueJoiner} will be called to compute a value (with arbitrary type) for the result record.
* The key of the result record is the same as the key of this {@code KStream}.
* If an {@code KStream} input record key or value is {@code null} the record will not be included in the join
* operation and thus no output record will be added to the resulting {@code KStream}.
*
* @param globalKTable the { @link GlobalKTable} to be joined with this stream
* @param keyValueMapper instance of { @link KeyValueMapper} used to map from the (key, value) of this stream
* to the key of the { @link GlobalKTable}
* @param joiner a { @link ValueJoiner} that computes the join result for a pair of matching records
* @tparam GK the key type of { @link GlobalKTable}
* @tparam GV the value type of the { @link GlobalKTable}
* @tparam RV the value type of the resulting { @code KStream}
* @return a { @code KStream} that contains join-records for each key and values computed by the given
* { @link ValueJoiner}, one output for each input { @code KStream} record
* @see #leftJoin(GlobalKTable, KeyValueMapper, ValueJoiner)
*/
def join[GK, GV, RV](globalKTable: GlobalKTable[GK, GV],
keyValueMapper: (K, V) => GK,
joiner: (V, GV) => RV): TKStream[K, RV] = streamToTypesafe(source.join(globalKTable,
new KeyValueMapper[K, V, GK] {
override def apply(key: K, value: V) = keyValueMapper(key, value)
},
new ValueJoiner[V, GV, RV] {
override def apply(value1: V, value2: GV) = joiner(value1, value2)
}))
def leftJoin[GK, GV, RV](globalKTable: GlobalKTable[GK, GV],
keyValueMapper: (K, V) => GK,
joiner: (V, GV) => RV): TKStream[K, RV] = streamToTypesafe(source.leftJoin(globalKTable,
new KeyValueMapper[K, V, GK] {
override def apply(key: K, value: V) = keyValueMapper(key, value)
},
new ValueJoiner[V, GV, RV] {
override def apply(value1: V, value2: GV) = joiner(value1, value2)
}))
/**
* Join records of this stream with another {@code KStream}'s records using windowed left equi join with default
* serializers and deserializers.
* In contrast to {@link #join(KStream, ValueJoiner, JoinWindows) inner-join}, all records from this stream will
* produce at least one output record (cf. below).
* The join is computed on the records' key with join attribute {@code thisKStream.key == otherKStream.key}.
* Furthermore, two records are only joined if their timestamps are close to each other as defined by the given
* {@link JoinWindows}, i.e., the window defines an additional join predicate on the record timestamps.
* <p>
* For each pair of records meeting both join predicates the provided {@link ValueJoiner} will be called to compute
* a value (with arbitrary type) for the result record.
* The key of the result record is the same as for both joining input records.
* Furthermore, for each input record of this {@code KStream} that does not satisfy the join predicate the provided
* {@link ValueJoiner} will be called with a {@code null} value for the other stream.
* If an input record key or value is {@code null} the record will not be included in the join operation and thus no
* output record will be added to the resulting {@code KStream}.
* <p>
* Example (assuming all input records belong to the correct windows):
* <table border='1'>
* <tr>
* <th>this</th>
* <th>other</th>
* <th>result</th>
* </tr>
* <tr>
* <td><K1:A></td>
* <td></td>
* <td><K1:ValueJoiner(A,null)></td>
* </tr>
* <tr>
* <td><K2:B></td>
* <td><K2:b></td>
* <td><K2:ValueJoiner(B,b)></td>
* </tr>
* <tr>
* <td></td>
* <td><K3:c></td>
* <td></td>
* </tr>
* </table>
* Both input streams need to be co-partitioned on the join key.
* If this requirement is not met, Kafka Streams will automatically repartition the data, i.e., it will create an
* internal repartitioning topic in Kafka and write and re-read the data via this topic before the actual join.
* The repartitioning topic will be named "${applicationId}-XXX-repartition", where "applicationId" is
* user-specified in {@link StreamsConfig} via parameter
* {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
* "-repartition" is a fixed suffix.
* You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
* <p>
* Repartitioning can happen for one or both of the joining {@code KStream}s.
* For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
* records to it, and rereading all records from it, such that the join input {@code KStream} is partitioned
* correctly on its key.
* <p>
* Both of the joining {@code KStream}s will be materialized in local state stores with auto-generated store names.
* For failure and recovery each store will be backed by an internal changelog topic that will be created in Kafka.
* The changelog topic will be named "${applicationId}-storeName-changelog", where "applicationId" is user-specified
* in {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG},
* "storeName" is an internally generated name, and "-changelog" is a fixed suffix.
* You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
*
* @param otherStream the { @code KStream} to be joined with this stream
* @param joiner a { @link ValueJoiner} that computes the join result for a pair of matching records
* @param windows the specification of the { @link JoinWindows}
* @tparam V1 the value type of the other stream
* @tparam R the value type of the result stream
* @return a { @code KStream} that contains join-records for each key and values computed by the given
* { @link ValueJoiner}, one for each matched record-pair with the same key plus one for each non-matching record of
* this { @code KStream} and within the joining window intervals
* @see #join(KStream, ValueJoiner, JoinWindows)
* @see #outerJoin(KStream, ValueJoiner, JoinWindows)
*/
def leftJoin[V1, R](otherStream: TKStream[K, V1],
joiner: (V, V1) => R,
windows: JoinWindows)(implicit keySerde: Serde[K], thisValueSerde: Serde[V],
otherValueSerde: Serde[V1]): TKStream[K, R] =
streamToTypesafe(source.leftJoin(otherStream.source, new ValueJoiner[V, V1, R] {
override def apply(value1: V, value2: V1) = joiner(value1, value2)
}, windows, keySerde, thisValueSerde, otherValueSerde))
def leftJoin[V1, V2](table: KTable[K, V1],
joiner: (V, V1) => V2)
(implicit keySerde: Serde[K], valSerde: Serde[V]): TKStream[K, V2] =
streamToTypesafe(source.leftJoin(table, new ValueJoiner[V, V1, V2] {
override def apply(value1: V, value2: V1): V2 = joiner(value1, value2)
}, keySerde, valSerde))
/**
* Join records of this stream with another {@code KStream}'s records using windowed left equi join with default
* serializers and deserializers.
* In contrast to {@link #join(KStream, ValueJoiner, JoinWindows) inner-join} or
* {@link #leftJoin(KStream, ValueJoiner, JoinWindows) left-join}, all records from both streams will produce at
* least one output record (cf. below).
* The join is computed on the records' key with join attribute {@code thisKStream.key == otherKStream.key}.
* Furthermore, two records are only joined if their timestamps are close to each other as defined by the given
* {@link JoinWindows}, i.e., the window defines an additional join predicate on the record timestamps.
* <p>
* For each pair of records meeting both join predicates the provided {@link ValueJoiner} will be called to compute
* a value (with arbitrary type) for the result record.
* The key of the result record is the same as for both joining input records.
* Furthermore, for each input record of both {@code KStream}s that does not satisfy the join predicate the provided
* {@link ValueJoiner} will be called with a {@code null} value for the this/other stream, respectively.
* If an input record key or value is {@code null} the record will not be included in the join operation and thus no
* output record will be added to the resulting {@code KStream}.
* <p>
* Example (assuming all input records belong to the correct windows):
* <table border='1'>
* <tr>
* <th>this</th>
* <th>other</th>
* <th>result</th>
* </tr>
* <tr>
* <td><K1:A></td>
* <td></td>
* <td><K1:ValueJoiner(A,null)></td>
* </tr>
* <tr>
* <td><K2:B></td>
* <td><K2:b></td>
* <td><K2:ValueJoiner(null,b)><br /><K2:ValueJoiner(B,b)></td>
* </tr>
* <tr>
* <td></td>
* <td><K3:c></td>
* <td><K3:ValueJoiner(null,c)></td>
* </tr>
* </table>
* Both input streams need to be co-partitioned on the join key.
* If this requirement is not met, Kafka Streams will automatically repartition the data, i.e., it will create an
* internal repartitioning topic in Kafka and write and re-read the data via this topic before the actual join.
* The repartitioning topic will be named "${applicationId}-XXX-repartition", where "applicationId" is
* user-specified in {@link StreamsConfig} via parameter
* {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG}, "XXX" is an internally generated name, and
* "-repartition" is a fixed suffix.
* You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
* <p>
* Repartitioning can happen for one or both of the joining {@code KStream}s.
* For this case, all data of the stream will be redistributed through the repartitioning topic by writing all
* records to it, and rereading all records from it, such that the join input {@code KStream} is partitioned
* correctly on its key.
* <p>
* Both of the joining {@code KStream}s will be materialized in local state stores with auto-generated store names.
* For failure and recovery each store will be backed by an internal changelog topic that will be created in Kafka.
* The changelog topic will be named "${applicationId}-storeName-changelog", where "applicationId" is user-specified
* in {@link StreamsConfig} via parameter {@link StreamsConfig#APPLICATION_ID_CONFIG APPLICATION_ID_CONFIG},
* "storeName" is an internally generated name, and "-changelog" is a fixed suffix.
* You can retrieve all generated internal topic names via {@link KafkaStreams#toString()}.
*
* @param otherStream the { @code KStream} to be joined with this stream
* @param joiner a { @link ValueJoiner} that computes the join result for a pair of matching records
* @param windows the specification of the { @link JoinWindows}
* @tparam V1 the value type of the other stream
* @tparam R the value type of the result stream
* @return a { @code KStream} that contains join-records for each key and values computed by the given
* { @link ValueJoiner}, one for each matched record-pair with the same key plus one for each non-matching record of
* both { @code KStream} and within the joining window intervals
* @see #join(KStream, ValueJoiner, JoinWindows)
* @see #leftJoin(KStream, ValueJoiner, JoinWindows)
*/
def outerJoin[V1, R](otherStream: TKStream[K, V1], joiner: (V, V1) => R, windows: JoinWindows)(
implicit keySerde: Serde[K],
thisValueSerde: Serde[V],
otherValueSerde: Serde[V1]): TKStream[K, R] = streamToTypesafe(source.outerJoin(otherStream.source, new ValueJoiner[V, V1, R] {
override def apply(value1: V, value2: V1) = joiner(value1, value2)
}, windows, keySerde, thisValueSerde, otherValueSerde))
}
object TKStream {
private implicit def streamToTypesafe[I, J](source: KStream[I, J]): TKStream[I, J] = new TKStream[I, J](source)
private implicit def groupedStreamToTypesafe[I, J](source: KGroupedStream[I, J]): TKGroupedStream[I, J] = new TKGroupedStream(source)
} | ogirardot/typesafe-kafka-streams | src/main/scala/fr/psug/kafka/streams/TKStream.scala | Scala | mit | 34,627 |
package io.mpjsons.impl.deserializer.mutables
import io.mpjsons.impl.deserializer.jsontypes.AbstractJsonArrayDeserializer
import io.mpjsons.impl.util.{Context, ObjectConstructionUtil, TypesUtil}
import io.mpjsons.impl.{DeserializerFactory, StringIterator}
import scala.collection.mutable.ArrayBuffer
import scala.reflect.runtime.universe._
/**
* @author Marcin Pieciukiewicz
*/
class ArrayDeserializer[E](deserializerFactory: DeserializerFactory, tpe: Type, context: Context)
extends AbstractJsonArrayDeserializer[E, Array[E]](deserializerFactory, tpe, context) {
override def deserialize(jsonIterator: StringIterator): Array[E] = {
toArray(deserializeArray(jsonIterator, tpe))
}
private def toArray(buffer: ArrayBuffer[E]): Array[E] = {
if (buffer.isEmpty) {
ObjectConstructionUtil.createArrayInstance[E](TypesUtil.getClassFromType[E](elementsType), 0)
} else {
val arrayt: Any = ObjectConstructionUtil.createArrayInstance[Any](TypesUtil.getClassFromType[Any](elementsType), buffer.size)
val array = arrayt.asInstanceOf[Array[E]]
var list = buffer.toList
var p = 0
while (list.nonEmpty) {
java.lang.reflect.Array.set(array, p, list.head)
list = list.tail
p = p + 1
}
array
}
}
}
| marpiec/mpjsons | src/main/scala/io/mpjsons/impl/deserializer/mutables/ArrayDeserializer.scala | Scala | apache-2.0 | 1,290 |
package org.jetbrains.plugins.scala
package lang
package autoImport
import com.intellij.openapi.project.Project
import com.intellij.openapi.util.io.FileUtil
import com.intellij.openapi.util.text.StringUtil
import com.intellij.openapi.vfs.{CharsetToolkit, LocalFileSystem}
import com.intellij.psi.util.PsiTreeUtil.getParentOfType
import com.intellij.psi.{PsiClass, PsiPackage, SmartPointerManager}
import org.jetbrains.plugins.scala.autoImport.quickFix.ScalaImportTypeFix
import org.jetbrains.plugins.scala.base.ScalaLightPlatformCodeInsightTestCaseAdapter
import org.jetbrains.plugins.scala.lang.lexer.ScalaTokenTypes
import org.jetbrains.plugins.scala.lang.psi.api.ScalaFile
import org.jetbrains.plugins.scala.lang.psi.api.base.ScReference
import org.jetbrains.plugins.scala.lang.psi.api.statements.ScTypeAlias
import org.junit.Assert._
import java.io.File
import scala.annotation.nowarn
/**
* User: Alexander Podkhalyuzin
* Date: 15.03.2009
*/
@nowarn("msg=ScalaLightPlatformCodeInsightTestCaseAdapter")
abstract class AutoImportTestBase extends ScalaLightPlatformCodeInsightTestCaseAdapter with ScalaFiles {
private val refMarker = "/*ref*/" // todo to be replaced with <caret>
protected def folderPath = baseRootPath + "autoImport/"
protected override def sourceRootPath: String = folderPath
// todo configureBy* should be called instead
protected def doTest(): Unit = {
val filePath = folderPath + getTestName(false) + ".scala"
val file = LocalFileSystem.getInstance.refreshAndFindFileByPath(filePath.replace(File.separatorChar, '/'))
assertNotNull("file " + filePath + " not found", file)
var fileText = StringUtil.convertLineSeparators(FileUtil.loadFile(new File(file.getCanonicalPath), CharsetToolkit.UTF8))
val offset = fileText.indexOf(refMarker)
fileText = fileText.replace(refMarker, "")
configureFromFileTextAdapter(getTestName(false) + "." + fileType.getDefaultExtension, fileText)
val scalaFile = getFileAdapter.asInstanceOf[ScalaFile]
assertNotEquals(s"Not specified ref marker in test case. Use $refMarker in scala file for this.", offset, -1)
val ref = getParentOfType(scalaFile.findElementAt(offset), classOf[ScReference])
assertNotNull("Not specified reference at marker.", ref)
ref.resolve() match {
case null =>
case _ => fail("Reference must be unresolved.")
}
implicit val project: Project = getProjectAdapter
val refPointer = SmartPointerManager.getInstance(project).createSmartPsiElementPointer(ref)
val fix = ScalaImportTypeFix(ref)
val classes = fix.elements
assertFalse("Element to import not found", classes.isEmpty)
checkNoResultsIfExcluded(ref, classes.map(_.qualifiedName))
classes.map(_.element).foreach {
case _: PsiClass | _: ScTypeAlias | _: PsiPackage =>
case element => fail(s"Class, alias or package is expected, found: $element")
}
var res: String = null
val lastPsi = scalaFile.findElementAt(scalaFile.getTextLength - 1)
try {
val action = fix.createAddImportAction(getEditorAdapter)
action.addImportTestOnly(classes.head)
res = scalaFile.getText.substring(0, lastPsi.getTextOffset).trim//getImportStatements.map(_.getText()).mkString("\\n")
assertNotNull("reference is unresolved after import action", refPointer.getElement.resolve)
}
catch {
case e: Exception =>
println(e)
fail(e.getMessage + "\\n" + e.getStackTrace.mkString("Array(", ", ", ")"))
}
val text = lastPsi.getText
val output = lastPsi.getNode.getElementType match {
case ScalaTokenTypes.tLINE_COMMENT => text.substring(2).trim
case ScalaTokenTypes.tBLOCK_COMMENT | ScalaTokenTypes.tDOC_COMMENT =>
text.substring(2, text.length - 2).trim
case _ =>
assertTrue("Test result must be in last comment statement.", false)
""
}
assertEquals(output, res)
}
private def checkNoResultsIfExcluded(ref: ScReference, excludedNames: Seq[String]): Unit = {
ImportElementFixTestBase.withExcluded(getProject, excludedNames) {
val newFix = ScalaImportTypeFix(ref)
val foundElements = newFix.elements
assertTrue(
s"$excludedNames excluded, but something was found",
foundElements.isEmpty
)
}
}
} | JetBrains/intellij-scala | scala/scala-impl/test/org/jetbrains/plugins/scala/lang/autoImport/AutoImportTestBase.scala | Scala | apache-2.0 | 4,299 |
/**
* This file is part of the TA Buddy project.
* Copyright (c) 2014 Alexey Aksenov [email protected]
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU Affero General Global License version 3
* as published by the Free Software Foundation with the addition of the
* following permission added to Section 15 as permitted in Section 7(a):
* FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED
* BY Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS»,
* Limited Liability Company «MEZHGALAKTICHESKIJ TORGOVYJ ALIANS» DISCLAIMS
* THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
* or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU Affero General Global License for more details.
* You should have received a copy of the GNU Affero General Global License
* along with this program; if not, see http://www.gnu.org/licenses or write to
* the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
* Boston, MA, 02110-1301 USA, or download the license from the following URL:
* http://www.gnu.org/licenses/agpl.html
*
* The interactive user interfaces in modified source and object code versions
* of this program must display Appropriate Legal Notices, as required under
* Section 5 of the GNU Affero General Global License.
*
* In accordance with Section 7(b) of the GNU Affero General Global License,
* you must retain the producer line in every report, form or document
* that is created or manipulated using TA Buddy.
*
* You can be released from the requirements of the license by purchasing
* a commercial license. Buying such a license is mandatory as soon as you
* develop commercial activities involving the TA Buddy software without
* disclosing the source code of your own applications.
* These activities include: offering paid services to customers,
* serving files in a web or/and network application,
* shipping TA Buddy with a closed source product.
*
* For more information, please contact Digimead Team at this
* address: [email protected]
*/
package org.digimead.tabuddy.desktop.model.editor.ui.view.editor.bar.element
import javax.inject.Inject
import org.digimead.digi.lib.aop.log
import org.digimead.digi.lib.log.api.XLoggable
import org.digimead.tabuddy.desktop.core.definition.Context
import org.digimead.tabuddy.desktop.core.support.App
import org.digimead.tabuddy.desktop.core.ui.definition.widget.VComposite
import org.digimead.tabuddy.desktop.core.{ Messages ⇒ CMessages }
import org.digimead.tabuddy.desktop.logic.payload.marker.GraphMarker
import org.eclipse.e4.core.di.annotations.Optional
import org.eclipse.jface.action.{ Action, IAction }
import scala.language.implicitConversions
/**
* 'New' action for an element bar.
*/
class New @Inject() (context: Context) extends Action(CMessages.new_text) with XLoggable {
/** Akka execution context. */
implicit lazy val ec = App.system.dispatcher
if (context.getParent().getLocal(classOf[VComposite]) == null)
throw new IllegalArgumentException(s"Parent of ${context} does not contain VComposite.")
override def isEnabled(): Boolean = super.isEnabled &&
context.get(classOf[GraphMarker]) != null
/** Runs this action, passing the triggering SWT event. */
@log
override def run = for {
composite ← Option(context.get(classOf[VComposite]))
marker ← Option(context.get(classOf[GraphMarker]))
} {
// invoke new
}
/** Update enabled action state. */
protected def updateEnabled() = if (isEnabled)
firePropertyChange(IAction.ENABLED, java.lang.Boolean.FALSE, java.lang.Boolean.TRUE)
else
firePropertyChange(IAction.ENABLED, java.lang.Boolean.TRUE, java.lang.Boolean.FALSE)
/** Invoked on marker modification. */
@Inject @Optional
protected def onMarkerChanged(@Optional marker: GraphMarker): Unit = App.exec { updateEnabled() }
}
| digimead/digi-TABuddy-desktop | part-model-editor/src/main/scala/org/digimead/tabuddy/desktop/model/editor/ui/view/editor/bar/element/New.scala | Scala | agpl-3.0 | 4,053 |
/*
* Copyright 2015 Heiko Seeberger
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package de.heikoseeberger.reactiveflows
import akka.actor.{ Address, RootActorPath, Terminated, Actor, ActorIdentity, ActorLogging, ActorRef, Identify, Props, ReceiveTimeout }
import akka.cluster.ClusterEvent.{ InitialStateAsEvents, MemberUp }
import akka.cluster.{ Cluster, Member }
import akka.persistence.journal.leveldb.{ SharedLeveldbStore, SharedLeveldbJournal }
import java.nio.file.Paths
import scala.concurrent.duration.{ Duration, DurationInt }
object SharedJournalManager {
final val Name = "shared-journal-manager"
final val SharedJournal = "shared-journal"
def props: Props = Props(new SharedJournalManager)
}
class SharedJournalManager extends Actor with ActorLogging {
import SharedJournalManager._
Cluster(context.system).state.members.toList.sortWith(_.isOlderThan(_)).headOption.map(_.address) match {
case Some(address) if address == Cluster(context.system).selfAddress => startSharedJournal()
case Some(address) => identifySharedJournal(address)
case None => onInvalidClusterState()
}
override def receive = {
case ActorIdentity(_, Some(sharedJournal)) => onSharedJournalIdentified(sharedJournal)
case ActorIdentity(_, None) => onSharedJournalNotIdentified()
case ReceiveTimeout => onSharedJournalReceiveTimeout()
}
private def watching: Receive = {
case Terminated(_) => onSharedJournalTerminated()
}
private def startSharedJournal() = {
deleteDir(Paths.get(
context.system.settings.config.getString("akka.persistence.journal.leveldb-shared.store.dir")
))
val sharedJournal = context.watch(context.actorOf(Props(new SharedLeveldbStore), SharedJournal))
SharedLeveldbJournal.setStore(sharedJournal, context.system)
log.debug("Started shared journal {}", sharedJournal)
}
private def identifySharedJournal(address: Address) = {
val sharedJournal = context.actorSelection(RootActorPath(address) / "user" / ReactiveFlows.Name / Name / SharedJournal)
sharedJournal ! Identify(None)
context.setReceiveTimeout(10 seconds)
}
private def onInvalidClusterState() = {
log.error("Invalid cluster state: There must at least be one member!")
context.stop(self)
}
private def onSharedJournalIdentified(sharedJournal: ActorRef) = {
SharedLeveldbJournal.setStore(sharedJournal, context.system)
log.debug("Succssfully set shared journal {}", sharedJournal)
context.watch(sharedJournal)
context.setReceiveTimeout(Duration.Undefined)
context.become(watching)
}
private def onSharedJournalNotIdentified() = {
log.error("Can't identify shared journal!")
context.stop(self)
}
private def onSharedJournalReceiveTimeout() = {
log.error("Timeout identifying shared journal!")
context.stop(self)
}
private def onSharedJournalTerminated() = {
log.error("Shared journal terminated!")
context.stop(self)
}
}
| jnickels/reactive-flows | src/main/scala/de/heikoseeberger/reactiveflows/SharedJournalManager.scala | Scala | apache-2.0 | 3,629 |
package skinny.engine.test
import scala.language.implicitConversions
/**
* Contains implicit conversions for making test DSL easier
* to use. This is included by all `Client` implementations.
*/
trait ImplicitConversions {
implicit def stringToByteArray(str: String): Array[Byte] = str.getBytes("UTF-8")
}
| holycattle/skinny-framework | engine-test/src/main/scala/skinny/engine/test/ImplicitConversions.scala | Scala | mit | 315 |
/*
* Copyright (c) 2012, 2013, 2014, 2015, 2016 SURFnet BV
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted provided that the
* following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this list of conditions and the following
* disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided with the distribution.
* * Neither the name of the SURFnet BV nor the names of its contributors may be used to endorse or promote products
* derived from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
package controllers
import java.net.URI
import javax.inject._
import akka.actor._
import nl.surfnet.nsiv2.messages.CorrelationId
import nl.surfnet.nsiv2.utils._
import nl.surfnet.safnari._
import java.time.Instant
import play.api.Logger
import play.api.http.ContentTypes._
import play.api.http.HeaderNames._
import play.api.http.Status._
import play.api.libs.json._
import play.api.libs.ws.WSClient
import play.api.mvc._
import scala.concurrent.{ ExecutionContext, Future }
import scala.concurrent.duration._
import scala.concurrent.stm.Ref
import scala.util.{Failure, Success}
@Singleton
class PathComputationEngineController @Inject()(configuration: Configuration, pce: PathComputationEngine)(implicit ec: ExecutionContext) extends InjectedController {
def pceReply = Action(parse.json) { implicit request =>
Json.fromJson[PceResponse](request.body) match {
case JsSuccess(response, _) =>
Logger.info(s"Pce reply: $response")
pce.pceContinuations.replyReceived(response.correlationId, response)
Ok
case JsError(error) =>
Logger.info(s"Pce error: $error body: ${request.body}")
BadRequest
}
}
}
@Singleton
class PathComputationEngine @Inject()(actorSystem: ActorSystem, ws: WSClient)(implicit ec: ExecutionContext) {
private[controllers] val pceContinuations = new Continuations[PceResponse](actorSystem.scheduler)
def createPceRequesterActor(configuration: Configuration): ActorRef =
configuration.PceActor match {
case None | Some("dummy") => actorSystem.actorOf(Props(new DummyPceRequesterActor), "pceRequester")
case _ => actorSystem.actorOf(Props(new PceRequesterActor(configuration)), "pceRequester")
}
class PceRequesterActor(configuration: Configuration) extends Actor {
private val uuidGenerator = Uuid.randomUuidGenerator()
private def newCorrelationId() = CorrelationId.fromUuid(uuidGenerator())
private val latestReachabilityEntries: LastModifiedCache[Seq[ReachabilityTopologyEntry]] = new LastModifiedCache()
private val endPoint = configuration.PceEndpoint
def receive = {
case 'healthCheck =>
val topologyHealth = ws.url(s"$endPoint/management/status/topology").addHttpHeaders(ACCEPT -> JSON).get()
topologyHealth onComplete {
case Success(_) => // nothing
case Failure(e) => Logger.warn(s"Failed to access PCE topology service: $e")
}
val lastModified = topologyHealth map { _.header("Last-Modified").getOrElse("unknown") }
val healthy = topologyHealth.map(_.status == 200).recover { case t => false }
sender ! healthy.flatMap(h => lastModified.map(d => s"PCE (Real; $d)" -> h))
case 'reachability =>
val reachabilityResponse = ws.url(s"$endPoint/reachability").withRequestTimeout(Duration(20000, MILLISECONDS)).addHttpHeaders(ACCEPT -> JSON).get()
val senderRef = sender
reachabilityResponse.onComplete {
case Success(response) =>
val result = (response.json \\ "reachability").validate[Seq[ReachabilityTopologyEntry]] match {
case JsSuccess(reachability, _) =>
Success(latestReachabilityEntries.updateAndGet(reachability))
case JsError(e) =>
Logger.error(s"Failed to parse reachability from the pce: $e")
latestReachabilityEntries.get.toTry(new RuntimeException("Could not parse reachability"))
}
senderRef ! result
case Failure(e) =>
Logger.error("Failed to retrieve reachability from the pce", e)
senderRef ! latestReachabilityEntries.get.toTry(e)
}
case ToPce(request) =>
val findPathEndPoint = s"$endPoint/paths/find"
Logger.info(s"Sending request to pce ($findPathEndPoint): ${Json.toJson(request)}")
val connection = Connection(sender)
pceContinuations.register(request.correlationId, configuration.AsyncReplyTimeout).onComplete {
case Success(reply) =>
connection ! Connection.Command(Instant.now, FromPce(reply))
case Failure(e) =>
connection ! Connection.Command(Instant.now, MessageDeliveryFailure(newCorrelationId(), None, request.correlationId, URI.create(findPathEndPoint), Instant.now(), e.toString))
}
val response = ws.url(findPathEndPoint).post(Json.toJson(request))
response onComplete {
case Failure(e) =>
Logger.error(s"Could not reach the pce ($endPoint): $e")
pceContinuations.unregister(request.correlationId)
connection ! Connection.Command(Instant.now, MessageDeliveryFailure(newCorrelationId(), None, request.correlationId, URI.create(findPathEndPoint), Instant.now(), e.toString))
case Success(response) if response.status == ACCEPTED =>
connection ! Connection.Command(Instant.now, AckFromPce(PceAccepted(request.correlationId)))
case Success(response) =>
Logger.error(s"Got back a ${response.status} response from the PCE: ${response.body}")
pceContinuations.unregister(request.correlationId)
connection ! Connection.Command(Instant.now, AckFromPce(PceFailed(request.correlationId, response.status, response.statusText, response.body)))
}
}
class LastModifiedCache[T] {
private val value = Ref(None: Option[(T, Instant)])
def get: Option[(T, Instant)] = value.single()
def updateAndGet(newSubject: T): (T, Instant) =
value.single.transformAndGet {
case Some(unchanged@(old, _)) if old == newSubject => Some(unchanged)
case _ => Some(newSubject -> Instant.now)
}.get
}
}
class DummyPceRequesterActor extends Actor {
private val Reachability = (
List(
ReachabilityTopologyEntry("urn:ogf:network:surfnet.nl:1990:nsa:bod-dev", 0),
ReachabilityTopologyEntry("urn:ogf:network:es.net:2013:nsa:oscars", 3)),
Instant.now())
def receive = {
case 'healthCheck =>
sender ! Future.successful("PCE (Dummy)" -> true)
case 'reachability =>
sender ! Success(Reachability)
case ToPce(pce: PathComputationRequest) =>
val serviceType = Json.fromJson[ServiceType](Json.toJson(pce.serviceType)(PceMessage.ServiceTypeFormat))(PceMessage.ServiceTypeFormat).get
Connection(sender) ! Connection.Command(Instant.now,
FromPce(PathComputationConfirmed(
pce.correlationId,
Seq(ComputedSegment(
ProviderEndPoint("urn:ogf:network:surfnet.nl:1990:nsa:bod-dev", URI.create("http://localhost:8082/bod/nsi/v2/provider")),
serviceType)))))
}
}
}
| BandwidthOnDemand/nsi-safnari | app/controllers/PathComputationEngine.scala | Scala | bsd-3-clause | 8,305 |
package fr.hmil.roshttp.body
import java.nio.ByteBuffer
import monix.reactive.Observable
trait BodyPart {
def contentType: String
def content: Observable[ByteBuffer]
}
| hmil/RosHTTP | shared/src/main/scala/fr/hmil/roshttp/body/BodyPart.scala | Scala | mit | 175 |
package jp.ijufumi.openreports
/**
* Job Workers
*
* @see http://skinny-framework.org/documentation/worker_jobs.html
*/
package object worker {}
| ijufumi/openreports_scala | src/main/scala/jp/ijufumi/openreports/worker/package.scala | Scala | mit | 154 |
// Copyright (c) Microsoft Corporation. All rights reserved.
// Licensed under the MIT License.
package com.azure.cosmos.spark.diagnostics
import java.util.UUID
private[spark] case class DiagnosticsContext(correlationActivityId: UUID, details: String)
| Azure/azure-sdk-for-java | sdk/cosmos/azure-cosmos-spark_3_2-12/src/main/scala/com/azure/cosmos/spark/diagnostics/DiagnosticsContext.scala | Scala | mit | 255 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.aliyun.emr.examples.sql.streaming
import java.util.UUID
import org.apache.spark.sql.{Dataset, SaveMode, SparkSession}
import org.apache.spark.sql.types._
object StructuredLoghubSinkHive {
def main(args: Array[String]) {
if (args.length < 9) {
// scalastyle:off
System.err.println("Usage: StructuredLoghubSinkHive <logService-project> " +
"<logService-store> <access-key-id> <access-key-secret> <endpoint> " +
"<starting-offsets> <database> <table> <max-offsets-per-trigger> [<checkpoint-location>]")
// scalastyle:on
System.exit(1)
}
val Array(project, logStore, accessKeyId, accessKeySecret, endpoint, startingOffsets,
db, table, maxOffsetsPerTrigger, _*) = args
val checkpointLocation =
if (args.length > 9) args(9) else "/tmp/temporary-" + UUID.randomUUID.toString
val spark = SparkSession
.builder
.enableHiveSupport()
.appName("StructuredLoghubSinkHive")
.getOrCreate()
spark.sparkContext.setLogLevel("WARN")
import spark.implicits._
// Create DataSet representing the stream of input lines from loghub
val schema = new StructType(
Array(
new StructField("__shard__", IntegerType),
new StructField("__time__", TimestampType),
new StructField("content", StringType)))
val lines = spark
.readStream
.format("loghub")
.schema(schema)
.option("sls.project", project)
.option("sls.store", logStore)
.option("access.key.id", accessKeyId)
.option("access.key.secret", accessKeySecret)
.option("endpoint", endpoint)
.option("startingoffsets", startingOffsets)
.option("maxOffsetsPerTrigger", maxOffsetsPerTrigger)
.load()
.select("content")
.as[String]
val query = lines.writeStream
.outputMode("append")
.foreachBatch { (ds: Dataset[String], epochId: Long) =>
ds.limit(10).write.mode(SaveMode.Append).insertInto(s"$db.$table")
}
.option("checkpointLocation", checkpointLocation)
.start()
query.awaitTermination()
}
}
| aliyun/aliyun-emapreduce-sdk | examples/src/main/scala/com/aliyun/emr/examples/sql/streaming/StructuredLoghubSinkHive.scala | Scala | artistic-2.0 | 2,915 |
package com.linecorp.armeria.server.scalapb
import armeria.scalapb.hello.HelloServiceGrpc.{HelloServiceBlockingStub, HelloServiceStub}
import armeria.scalapb.hello._
import com.google.common.base.Stopwatch
import com.linecorp.armeria.client.grpc.GrpcClients
import com.linecorp.armeria.common.SerializationFormat
import com.linecorp.armeria.common.grpc.GrpcSerializationFormats
import com.linecorp.armeria.common.scalapb.ScalaPbJsonMarshaller
import com.linecorp.armeria.server.ServerBuilder
import com.linecorp.armeria.server.grpc.GrpcService
import com.linecorp.armeria.server.scalapb.HelloServiceImpl.toMessage
import com.linecorp.armeria.server.scalapb.HelloServiceTest.{GrpcSerializationProvider, newClient}
import com.linecorp.armeria.testing.junit5.server.ServerExtension
import io.grpc.stub.StreamObserver
import java.util.concurrent.TimeUnit
import java.util.stream
import org.assertj.core.api.Assertions.assertThat
import org.awaitility.Awaitility.await
import org.junit.jupiter.api.BeforeAll
import org.junit.jupiter.api.extension.ExtensionContext
import org.junit.jupiter.params.ParameterizedTest
import org.junit.jupiter.params.provider.{Arguments, ArgumentsProvider, ArgumentsSource}
import scala.concurrent.duration.Duration
import scala.concurrent.{Await, ExecutionContext}
import scala.reflect.ClassTag
class HelloServiceTest {
@ArgumentsSource(classOf[GrpcSerializationProvider])
@ParameterizedTest
def getReply(serializationFormat: SerializationFormat): Unit = {
val helloService = newClient[HelloServiceBlockingStub](serializationFormat)
assertThat(helloService.hello(HelloRequest("Armeria")).message).isEqualTo("Hello, Armeria!")
}
@ArgumentsSource(classOf[GrpcSerializationProvider])
@ParameterizedTest
def replyWithDelay(serializationFormat: SerializationFormat): Unit = {
val helloService = newClient[HelloServiceStub](serializationFormat)
val reply: HelloReply = Await.result(helloService.lazyHello(HelloRequest("Armeria")), Duration.Inf)
assertThat(reply.message).isEqualTo("Hello, Armeria!")
}
@ArgumentsSource(classOf[GrpcSerializationProvider])
@ParameterizedTest
def replyFromServerSideBlockingCall(serializationFormat: SerializationFormat): Unit = {
val helloService = newClient[HelloServiceStub](serializationFormat)
val watch = Stopwatch.createStarted()
val reply: HelloReply = Await.result(helloService.blockingHello(HelloRequest("Armeria")), Duration.Inf)
assertThat(reply.message).isEqualTo("Hello, Armeria!")
assertThat(watch.elapsed(TimeUnit.SECONDS)).isGreaterThanOrEqualTo(3)
}
@ArgumentsSource(classOf[GrpcSerializationProvider])
@ParameterizedTest
def lotsOfReplies(serializationFormat: SerializationFormat): Unit = {
var completed = false
val helloService = newClient[HelloServiceStub](serializationFormat)
helloService.lotsOfReplies(
HelloRequest("Armeria"),
new StreamObserver[HelloReply]() {
private var sequence = 0
override def onNext(value: HelloReply): Unit = {
sequence += 1
assertThat(value.message).isEqualTo(s"Hello, Armeria! (sequence: $sequence)")
}
override def onError(t: Throwable): Unit =
// Should never reach here.
throw new Error(t)
override def onCompleted(): Unit = {
assertThat(sequence).isEqualTo(5)
completed = true
}
}
)
await().untilAsserted(() => assertThat(completed).isTrue())
}
@ArgumentsSource(classOf[GrpcSerializationProvider])
@ParameterizedTest
def sendLotsOfGreetings(serializationFormat: SerializationFormat): Unit = {
val names = List("Armeria", "Grpc", "Streaming")
var completed = false
val helloService = newClient[HelloServiceStub](serializationFormat)
val request = helloService.lotsOfGreetings(new StreamObserver[HelloReply]() {
private var received = false
override def onNext(value: HelloReply): Unit = {
assertThat(received).isFalse()
received = true
assertThat(value.message).isEqualTo(toMessage(names.mkString(", ")))
}
override def onError(t: Throwable): Unit =
// Should never reach here.
throw new Error(t)
override def onCompleted(): Unit = {
assertThat(received).isTrue()
completed = true
}
})
for (name <- names)
request.onNext(HelloRequest(name))
request.onCompleted()
await().untilAsserted(() => assertThat(completed).isTrue())
}
@ArgumentsSource(classOf[GrpcSerializationProvider])
@ParameterizedTest
def bidirectionalHello(serializationFormat: SerializationFormat): Unit = {
val names = List("Armeria", "Grpc", "Streaming")
var completed = false
val helloService = newClient[HelloServiceStub](serializationFormat)
val request = helloService.bidiHello(new StreamObserver[HelloReply]() {
private var received = 0
override def onNext(value: HelloReply): Unit = {
assertThat(value.message).isEqualTo(toMessage(names(received)))
received += 1
}
override def onError(t: Throwable): Unit =
// Should never reach here.
throw new Error(t)
override def onCompleted(): Unit = {
assertThat(received).isEqualTo(names.length)
completed = true
}
})
for (name <- names)
request.onNext(HelloRequest(name))
request.onCompleted()
await().untilAsserted(() => assertThat(completed).isTrue())
}
@ArgumentsSource(classOf[GrpcSerializationProvider])
@ParameterizedTest
def oneof(serializationFormat: SerializationFormat): Unit = {
val oneof: Add = Add(Literal(1), Literal(2))
val helloService = newClient[HelloServiceStub](serializationFormat)
val actual = helloService.oneof(oneof)
val res = Await.result(actual, Duration.Inf)
assertThat(res).isEqualTo(oneof)
}
}
object HelloServiceTest {
var server: ServerExtension = new ServerExtension() {
override protected def configure(sb: ServerBuilder): Unit =
sb.service(
GrpcService
.builder()
.addService(HelloServiceGrpc.bindService(new HelloServiceImpl, ExecutionContext.global))
.supportedSerializationFormats(GrpcSerializationFormats.values)
.jsonMarshallerFactory(_ => ScalaPbJsonMarshaller())
.enableUnframedRequests(true)
.build()
)
}
private def newClient[A](serializationFormat: SerializationFormat = GrpcSerializationFormats.PROTO)(implicit
tag: ClassTag[A]): A = {
GrpcClients
.builder(server.httpUri(serializationFormat))
.jsonMarshallerFactory(_ => ScalaPbJsonMarshaller())
.build(tag.runtimeClass)
.asInstanceOf[A]
}
@BeforeAll
def beforeClass(): Unit =
server.start()
private class GrpcSerializationProvider extends ArgumentsProvider {
override def provideArguments(context: ExtensionContext): stream.Stream[_ <: Arguments] =
GrpcSerializationFormats
.values()
.stream()
.map(Arguments.of(_))
}
}
| line/armeria | scalapb/scalapb_2.13/src/test/scala/com/linecorp/armeria/server/scalapb/HelloServiceTest.scala | Scala | apache-2.0 | 7,053 |
package hydrograph.engine.spark.components
import hydrograph.engine.core.component.entity.UniqueSequenceEntity
import hydrograph.engine.core.component.utils.OperationUtils
import hydrograph.engine.spark.components.base.OperationComponentBase
import hydrograph.engine.spark.components.handler.OperationHelper
import hydrograph.engine.spark.components.platform.BaseComponentParams
import hydrograph.engine.transformation.userfunctions.base.TransformBase
import org.apache.spark.sql.functions._
import org.apache.spark.sql.{Column, DataFrame}
import org.slf4j.LoggerFactory
import scala.collection.JavaConversions._
import scala.collection.JavaConverters._
/**
* The Class UniqueSequenceComponent.
*
* @author Bitwise
*
*/
class UniqueSequenceComponent(uniqueSequenceEntity: UniqueSequenceEntity, baseComponentParams: BaseComponentParams) extends OperationComponentBase with OperationHelper[TransformBase] with Serializable {
val LOG = LoggerFactory.getLogger(classOf[UniqueSequenceComponent])
/**
* These method creates spark component for generating unique sequence
*
* @return Map[String, DataFrame]
*/
override def createComponent(): Map[String, DataFrame] = {
LOG.trace(uniqueSequenceEntity.toString)
try {
val passThroughFields = OperationUtils.getPassThrougFields(uniqueSequenceEntity.getOutSocketList.get(0).getPassThroughFieldsList, baseComponentParams.getDataFrame().schema.map(_.name)).asScala.toArray[String]
val inputColumn = new Array[Column](passThroughFields.size)
passThroughFields.zipWithIndex.foreach(
passThroughField => {
inputColumn(passThroughField._2) = column(passThroughField._1)
})
val operationField = uniqueSequenceEntity.getOperation.getOperationOutputFields.get(0)
val df = baseComponentParams.getDataFrame().select(inputColumn: _*).withColumn(operationField, monotonically_increasing_id())
val outSocketId = uniqueSequenceEntity.getOutSocketList.get(0).getSocketId
LOG.info("Created Unique Sequence component " + uniqueSequenceEntity.getComponentId
+" in batch " + uniqueSequenceEntity.getBatch )
Map(outSocketId -> df)
} catch {
case ex: Exception =>
LOG.error("Error in Unique Sequence component " + uniqueSequenceEntity.getComponentId, ex)
throw new RuntimeException ("Error in Unique Sequence component",ex)
}
}
}
| capitalone/Hydrograph | hydrograph.engine/hydrograph.engine.spark/src/main/scala/hydrograph/engine/spark/components/UniqueSequenceComponent.scala | Scala | apache-2.0 | 2,413 |
/*
* Scala.js (https://www.scala-js.org/)
*
* Copyright EPFL.
*
* Licensed under Apache License 2.0
* (https://www.apache.org/licenses/LICENSE-2.0).
*
* See the NOTICE file distributed with this work for
* additional information regarding copyright ownership.
*/
package org.scalajs.testsuite.javalib.util
import java.{util => ju}
import java.util.Map.Entry
final class TrivialImmutableMap[K, V] private (contents: List[Entry[K, V]])
extends ju.AbstractMap[K, V] {
def entrySet(): ju.Set[Entry[K,V]] = {
new ju.AbstractSet[Entry[K, V]] {
def size(): Int = contents.size
def iterator(): ju.Iterator[Entry[K,V]] = {
new ju.Iterator[Entry[K, V]] {
private var remaining: List[Entry[K, V]] = contents
def hasNext(): Boolean = remaining.nonEmpty
def next(): Entry[K,V] = {
val head = remaining.head
remaining = remaining.tail
head
}
}
}
}
}
}
object TrivialImmutableMap {
def apply[K, V](contents: List[Entry[K, V]]): TrivialImmutableMap[K, V] =
new TrivialImmutableMap(contents)
def apply[K, V](contents: (K, V)*): TrivialImmutableMap[K, V] =
apply(contents.toList.map(kv => new ju.AbstractMap.SimpleImmutableEntry(kv._1, kv._2)))
}
| scala-js/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/javalib/util/TrivialImmutableMap.scala | Scala | apache-2.0 | 1,286 |
package sodium
import java.util.ArrayList
import java.util.Arrays
import org.junit.After
import org.junit.Assert.assertEquals
import org.junit.Test
class CellTester {
import CellTester._
@After def tearDown() {
System.gc()
Thread.sleep(100)
}
@Test
def testHold() {
val e = new StreamSink[Int]()
val out = new ArrayList[Int]()
val l = e.hold(0).updates().listen(out.add(_))
List(2, 9).foreach(e.send(_))
l.unlisten()
assertEquals(Arrays.asList(2, 9), out)
}
@Test
def testSnapshot() {
val b = new CellSink(0)
val trigger = new StreamSink[Long]()
val out = new ArrayList[String]()
val l = trigger.snapshot[Int, String](b, (x, y) => x + " " + y).listen(out.add(_))
trigger.send(100L)
b.send(2)
trigger.send(200L)
b.send(9)
b.send(1)
trigger.send(300L)
l.unlisten()
assertEquals(Arrays.asList("100 0", "200 2", "300 1"), out)
}
@Test
def testValues() {
val b = new CellSink(9)
val out = new ArrayList[Int]()
val l = b.value().listen(out.add(_))
List(2, 7).foreach(b.send(_))
l.unlisten()
assertEquals(Arrays.asList(9, 2, 7), out)
}
@Test
def testConstantBehavior() {
val b = new Cell(12)
val out = new ArrayList[Int]()
val l = b.value().listen(out.add(_))
l.unlisten()
assertEquals(Arrays.asList(12), out)
}
@Test
def testValuesThenMap() {
val b = new CellSink(9)
val out = new ArrayList[Int]()
val l = b.value().map(x => x + 100).listen(out.add(_))
List(2, 7).foreach(b.send(_))
l.unlisten()
assertEquals(Arrays.asList(109, 102, 107), out)
}
@Test
def testValuesTwiceThenMap() {
val b = new CellSink(9)
val out = new ArrayList[Int]()
val l = doubleUp(b.value()).map(x => x + 100).listen(out.add(_))
List(2, 7).foreach(b.send(_))
l.unlisten()
assertEquals(Arrays.asList(109, 109, 102, 102, 107, 107), out)
}
@Test
def testValuesThenCoalesce() {
val b = new CellSink(9)
val out = new ArrayList[Int]()
val l = b.value().coalesce((fst, snd) => snd).listen(out.add(_))
List(2, 7).foreach(b.send(_))
l.unlisten()
assertEquals(Arrays.asList(9, 2, 7), out)
}
@Test
def testValuesTwiceThenCoalesce() {
val b = new CellSink(9)
val out = new ArrayList[Int]()
val l = doubleUp(b.value()).coalesce((fst, snd) => fst + snd).listen(out.add(_))
List(2, 7).foreach(b.send(_))
l.unlisten()
assertEquals(Arrays.asList(18, 4, 14), out)
}
@Test
def testValuesThenSnapshot() {
val bi = new CellSink(9)
val bc = new CellSink('a')
val out = new ArrayList[Character]()
val l = bi.value().snapshot(bc).listen(out.add(_))
bc.send('b')
bi.send(2)
bc.send('c')
bi.send(7)
l.unlisten()
assertEquals(Arrays.asList('a', 'b', 'c'), out)
}
@Test
def testValuesTwiceThenSnapshot() {
val bi = new CellSink(9)
val bc = new CellSink('a')
val out = new ArrayList[Character]()
val l = doubleUp(bi.value()).snapshot(bc).listen(out.add(_))
bc.send('b')
bi.send(2)
bc.send('c')
bi.send(7)
l.unlisten()
assertEquals(Arrays.asList('a', 'a', 'b', 'b', 'c', 'c'), out)
}
@Test
def testValuesThenMerge() {
val bi = new CellSink(9)
val bj = new CellSink(2)
val out = new ArrayList[Int]()
val l = bi.value().merge(bj.value(), (x, y) => x + y).listen(out.add(_))
bi.send(1)
bj.send(4)
l.unlisten()
assertEquals(Arrays.asList(11, 1, 4), out)
}
@Test
def testValuesThenFilter() {
val b = new CellSink(9)
val out = new ArrayList[Int]()
val l = b.value().filter(a => true).listen(out.add(_))
List(2, 7).foreach(b.send(_))
l.unlisten()
assertEquals(Arrays.asList(9, 2, 7), out)
}
@Test
def testValuesTwiceThenFilter() {
val b = new CellSink(9)
val out = new ArrayList[Int]()
val l = doubleUp(b.value()).filter(a => true).listen(out.add(_))
List(2, 7).foreach(b.send(_))
l.unlisten()
assertEquals(Arrays.asList(9, 9, 2, 2, 7, 7), out)
}
@Test
def testValuesThenOnce() {
val b = new CellSink(9)
val out = new ArrayList[Int]()
val l = b.value().once().listen(out.add(_))
List(2, 7).foreach(b.send(_))
l.unlisten()
assertEquals(Arrays.asList(9), out)
}
@Test
def testValuesTwiceThenOnce() {
val b = new CellSink(9)
val out = new ArrayList[Int]()
val l = doubleUp(b.value()).once().listen(out.add(_))
List(2, 7).foreach(b.send(_))
l.unlisten()
assertEquals(Arrays.asList(9), out)
}
@Test
def testValuesLateListen() {
val b = new CellSink(9)
val out = new ArrayList[Int]()
val value = b.value()
b.send(8)
val l = value.listen(out.add(_))
b.send(2)
l.unlisten()
assertEquals(Arrays.asList(8, 2), out)
}
@Test
def testMapB() {
val b = new CellSink(6)
val out = new ArrayList[String]()
val l = b.map(x => x.toString()).value().listen(out.add(_))
b.send(8)
l.unlisten()
assertEquals(Arrays.asList("6", "8"), out)
}
def testMapBLateListen() {
val b = new CellSink(6)
val out = new ArrayList[String]()
val bm = b.map(x => x.toString())
b.send(2)
val l = bm.value().listen(out.add(_))
b.send(8)
l.unlisten()
assertEquals(Arrays.asList("2", "8"), out)
}
@Test
def testTransaction() {
var calledBack = false
Transaction.run(trans => trans.prioritized(Node.NullNode, trans2 => calledBack = true))
assertEquals(true, calledBack)
}
@Test
def testApply() {
val bf = new CellSink[Long => String](b => "1 " + b)
val ba = new CellSink(5L)
val out = new ArrayList[String]()
val l = Cell(bf, ba).value().listen(x => out.add(x))
bf.send(b => "12 " + b)
ba.send(6L)
l.unlisten()
assertEquals(Arrays.asList("1 5", "12 5", "12 6"), out)
}
@Test
def testLift() {
val a = new CellSink(1)
val b = new CellSink(5L)
val out = new ArrayList[String]()
val l = Cell.lift[Int, Long, String]((x, y) => x + " " + y, a, b).value().listen(out.add(_))
a.send(12)
b.send(6L)
l.unlisten()
assertEquals(Arrays.asList("1 5", "12 5", "12 6"), out)
}
@Test
def testLiftGlitch() {
val a = new CellSink(1)
val a3 = a.map(x => x * 3)
val a5 = a.map(x => x * 5)
val out = new ArrayList[String]()
val l = Cell.lift[Int, Int, String]((x, y) => x + " " + y, a3, a5).value().listen(out.add(_))
a.send(2)
l.unlisten()
assertEquals(Arrays.asList("3 5", "6 10"), out)
}
@Test
def testHoldIsDelayed() {
val e = new StreamSink[Int]()
val h = e.hold(0)
val out = new ArrayList[String]()
val l = e.snapshot[Int, String](h, (a, b) => a + " " + b).listen(out.add(_))
List(2, 3).foreach(e.send(_))
l.unlisten()
assertEquals(Arrays.asList("2 0", "3 2"), out)
}
@Test
def testSwitchB() {
val esb = new StreamSink[SB]()
// Split each field out of SB so we can update multiple behaviours in a
// single transaction.
val ba = esb.map(s => s.a).filterNotNull().hold('A')
val bb = esb.map(s => s.b).filterNotNull().hold('a')
val bsw = esb.map(s => s.sw).filterNotNull().hold(ba)
val bo = Cell.switchC(bsw)
val out = new ArrayList[Character]()
val l = bo.value().listen(out.add(_))
List(
new SB('B', 'b', null),
new SB('C', 'c', bb),
new SB('D', 'd', null),
new SB('E', 'e', ba),
new SB('F', 'f', null),
new SB(null, null, bb),
new SB(null, null, ba),
new SB('G', 'g', bb),
new SB('H', 'h', ba),
new SB('I', 'i', ba)).foreach(esb.send(_))
l.unlisten()
assertEquals(Arrays.asList('A', 'B', 'c', 'd', 'E', 'F', 'f', 'F', 'g', 'H', 'I'), out)
}
@Test
def testSwitchE() {
val ese = new StreamSink[SE]()
val ea = ese.map(s => s.a).filterNotNull()
val eb = ese.map(s => s.b).filterNotNull()
val bsw = ese.map(s => s.sw).filterNotNull().hold(ea)
val out = new ArrayList[Char]()
val eo = Cell.switchS(bsw)
val l = eo.listen(out.add(_))
List(
new SE('A', 'a', null),
new SE('B', 'b', null),
new SE('C', 'c', eb),
new SE('D', 'd', null),
new SE('E', 'e', ea),
new SE('F', 'f', null),
new SE('G', 'g', eb),
new SE('H', 'h', ea),
new SE('I', 'i', ea)).foreach(ese.send(_))
l.unlisten()
assertEquals(Arrays.asList('A', 'B', 'C', 'd', 'e', 'F', 'G', 'h', 'I'), out)
}
@Test
def testLoopBehavior() {
val ea = new StreamSink[Int]()
val sum_out = Transaction(_ => {
val sum = new CellLoop[Int]()
val sum_out_ = ea.snapshot[Int, Int](sum, (x, y) => x + y).hold(0)
sum.loop(sum_out_)
sum_out_
})
val out = new ArrayList[Int]()
val l = sum_out.value().listen(out.add(_))
List(2, 3, 1).foreach(ea.send(_))
l.unlisten()
assertEquals(Arrays.asList(0, 2, 5, 6), out)
assertEquals(6, sum_out.sample())
}
@Test
def testCollect() {
val ea = new StreamSink[Int]()
val out = new ArrayList[Int]()
val sum = ea.hold(100).collect[Int, Int](0, (a, s) => (a + s, a + s))
val l = sum.value().listen(out.add(_))
List(5, 7, 1, 2, 3).foreach(ea.send(_))
l.unlisten()
assertEquals(Arrays.asList(100, 105, 112, 113, 115, 118), out)
}
@Test
def testAccum() {
val ea = new StreamSink[Int]()
val out = new ArrayList[Int]()
val sum = ea.accum[Int](100, (a, s) => a + s)
val l = sum.value().listen(out.add(_))
List(5, 7, 1, 2, 3).foreach(ea.send(_))
l.unlisten()
assertEquals(Arrays.asList(100, 105, 112, 113, 115, 118), out)
}
@Test
def testLoopValueSnapshot() {
val out = new ArrayList[String]()
val eSnap = Transaction(_ => {
val a = new Cell("lettuce")
val b = new CellLoop[String]()
val eSnap_ = a.value().snapshot[String, String](b, (aa, bb) => aa + " " + bb)
b.loop(new Cell[String]("cheese"))
eSnap_
})
val l = eSnap.listen(out.add(_))
l.unlisten()
assertEquals(Arrays.asList("lettuce cheese"), out)
}
@Test
def testLoopValueHold() {
val out = new ArrayList[String]()
val value = Transaction(_ => {
val a = new CellLoop[String]()
val value_ = a.value().hold("onion")
a.loop(new Cell[String]("cheese"))
value_
})
val eTick = new StreamSink[Int]()
val l = eTick.snapshot(value).listen(out.add(_))
eTick.send(0)
l.unlisten()
assertEquals(Arrays.asList("cheese"), out)
}
@Test
def testLiftLoop() {
val out = new ArrayList[String]()
val b = new CellSink("kettle")
val c = Transaction(_ => {
val a = new CellLoop[String]()
val c_ = Cell.lift[String, String, String]((aa, bb) => aa + " " + bb, a, b)
a.loop(new Cell[String]("tea"))
c_
})
val l = c.value().listen(out.add(_))
b.send("caddy")
l.unlisten()
assertEquals(Arrays.asList("tea kettle", "tea caddy"), out)
}
}
object CellTester {
/**
* This is used for tests where value() produces a single initial value on listen,
* and then we double that up by causing that single initial event to be repeated.
* This needs testing separately, because the code must be done carefully to achieve
* this.
*/
private def doubleUp(ev: Stream[Int]): Stream[Int] = ev.merge(ev)
}
case class SB(val a: Character, val b: Character, val sw: Cell[Character])
case class SE(val a: Character, val b: Character, val sw: Stream[Character])
| kevintvh/sodium | scala/src/test/scala/sodium/CellTester.scala | Scala | bsd-3-clause | 11,480 |
//
// OrcDesktopEventAction.scala -- Scala class OrcDesktopEventAction and trait OrcDesktopActions
// Project OrcScala
//
// Created by dkitchin on Jan 24, 2011.
//
// Copyright (c) 2016 The University of Texas at Austin. All rights reserved.
//
// Use and redistribution of this file is governed by the license terms in
// the LICENSE file found in the project's top-level directory and also found at
// URL: http://orc.csres.utexas.edu/license.shtml .
//
package orc.run
import orc.OrcEventAction
import orc.OrcEvent
import orc.script.SwingBasedPrompt
import orc.lib.util.PromptEvent
import orc.lib.str.PrintEvent
import orc.lib.web.BrowseEvent
/** @author dkitchin
*/
class OrcDesktopEventAction extends OrcEventAction with OrcDesktopActions
trait OrcDesktopActions extends OrcEventAction {
@throws(classOf[Exception])
override def other(event: OrcEvent) {
event match {
case PrintEvent(text) => {
Console.out.print(text)
Console.out.flush()
}
case PromptEvent(prompt, callback) => {
val response = SwingBasedPrompt.runPromptDialog("Orc", prompt)
if (response != null) {
callback.respondToPrompt(response)
} else {
callback.cancelPrompt()
}
}
case BrowseEvent(url) => {
java.awt.Desktop.getDesktop().browse(url.toURI())
}
case e => super.other(e)
}
}
}
| orc-lang/orc | OrcScala/src/orc/run/OrcDesktopEventAction.scala | Scala | bsd-3-clause | 1,395 |
/*
* Copyright 2017 HM Revenue & Customs
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package uk.gov.hmrc.ct.ct600j.v3
import uk.gov.hmrc.ct.box._
import uk.gov.hmrc.ct.ct600j.v3.retriever.CT600JBoxRetriever
case class J20(value: Option[String]) extends SchemeReferenceNumberBox{
override def validate(boxRetriever: CT600JBoxRetriever): Set[CtValidation] =
validateSchemeReferenceNumber(boxRetriever.j15(), boxRetriever.j15A(), boxRetriever.j20A())
}
| liquidarmour/ct-calculations | src/main/scala/uk/gov/hmrc/ct/ct600j/v3/J20.scala | Scala | apache-2.0 | 980 |
package fr.hurrycane
import java.util.concurrent.TimeUnit
import akka.actor.{ ActorRef, ActorSystem }
import akka.http.scaladsl.Http
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.settings.RoutingSettings
import akka.stream.ActorMaterializer
import com.newmotion.akka.rabbitmq._
import fr.hurrycane.db.DatabaseCreator
import fr.hurrycane.entity.PerformedMessage
import fr.hurrycane.registry.{ ClusterMemberShipRegistry, ConversationRegistryActor, MessageRegistryActor, OfferRegistryActor }
import fr.hurrycane.routes.{ ClusterRoutes, ConversationRoutes, MessageRoutes, OfferRoutes }
import fr.hurrycane.tools.RabbitFactory
import play.api.libs.ws.DefaultBodyReadables._
import play.api.libs.ws.DefaultBodyWritables._
import play.api.libs.ws.ahc.StandaloneAhcWSClient
import spray.json._
import scala.concurrent.duration._
object Server extends App with ConversationRoutes with MessageRoutes with OfferRoutes with ClusterRoutes {
DatabaseCreator.checkAll()
val actorSystemName = sys.env("AKKA_ACTOR_SYSTEM_NAME")
implicit val actorSystem = ActorSystem(actorSystemName)
implicit val mat = ActorMaterializer()
import actorSystem.dispatcher
implicit val http = Http(actorSystem)
implicit val routingSettings = RoutingSettings(actorSystem)
private val rabbit: ActorRef = actorSystem.actorOf(ConnectionActor.props(RabbitFactory.getFactory, reconnectionDelay = 10.seconds), "rabbit-connexion")
rabbit ! CreateChannel(ChannelActor.props(RabbitFactory.setupChannel))
def setupIntentSubscriber(channel: Channel, self: ActorRef) {
channel.queueBind(RabbitFactory.messageQueue, RabbitFactory.exchange, RabbitFactory.messageBinding)
val consumer = new DefaultConsumer(channel) {
override def handleDelivery(consumerTag: String, envelope: Envelope, properties: BasicProperties, body: Array[Byte]) {
val response = RabbitFactory.fromBytes(body).parseJson.convertTo[PerformedMessage]
println("RECEIVE RESPONSE" + sys.env("RESPONSE_URL") + "conversation/" + response.conversationId + "/callback")
println(response.toJson.toString)
StandaloneAhcWSClient()
.url(sys.env("RESPONSE_URL") + "conversation/" + response.conversationId + "/callback")
.addHttpHeaders("Content-Type" -> "application/json")
.post(response.toJson.toString)
}
}
channel.basicConsume(RabbitFactory.messageQueue, true, consumer)
}
rabbit ! CreateChannel(ChannelActor.props(setupIntentSubscriber), Some("bot-intent-api"))
val clusterMembershipAskTimeout = FiniteDuration(sys.env("CLUSTER_MEMBERSHIP_ASK_TIMEOUT").toLong, TimeUnit.MILLISECONDS)
val conversationRegistryActor: ActorRef = actorSystem.actorOf(ConversationRegistryActor.props, "userRegistryActor")
val messageRegistryActor: ActorRef = actorSystem.actorOf(MessageRegistryActor.props, "messageRegistryActor")
val offerRegistryActor: ActorRef = actorSystem.actorOf(OfferRegistryActor.props, "offerRegistryActor")
val clusterMemberShipRegistryActor: ActorRef = actorSystem.actorOf(ClusterMemberShipRegistry.props, "clusterRegistryActor")
val offerRoute: Route = offerRoutes(offerRegistryActor, clusterMembershipAskTimeout)
val conversationRoute: Route = conversationRoutes(conversationRegistryActor, clusterMembershipAskTimeout)
val messageRoute: Route = messageRoutes(messageRegistryActor, clusterMembershipAskTimeout)
val membersRoute: Route = clusterRoutes(clusterMemberShipRegistryActor, clusterMembershipAskTimeout)
lazy val routes: Route = conversationRoute ~ messageRoute ~ offerRoute ~ membersRoute
http.bindAndHandle(routes, sys.env("HTTP_HOST"), sys.env("HTTP_PORT").toInt)
println(s"Server online at http://" + sys.env("HTTP_HOST") + ":" + sys.env("HTTP_PORT").toInt + "/")
}
| haris44/Bot-API | src/main/scala/fr/hurrycane/Server.scala | Scala | bsd-3-clause | 3,818 |
/*
* Copyright 2001-2013 Artima, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.scalatest.funspec
import org.scalatest.SharedHelpers._
import org.scalatest.GivenWhenThen
import org.scalatest.Args
import org.scalatest.Outcome
import org.scalatest.Filter
import org.scalatest.Tracker
import org.scalatest.mytags
import org.scalatest.ConfigMap
import org.scalatest.Stopper
import org.scalatest.Exceptional
import org.scalatest.FailureMessages
import org.scalatest.UnquotedString
import org.scalatest.Suites
import org.scalatest.expectations
import org.scalatest.events._
import org.scalactic.Prettifier
import java.awt.AWTError
import java.lang.annotation.AnnotationFormatError
import java.nio.charset.CoderMalfunctionError
import javax.xml.parsers.FactoryConfigurationError
import javax.xml.transform.TransformerFactoryConfigurationError
import org.scalactic.exceptions.NullArgumentException
import org.scalatest.exceptions.DuplicateTestNameException
import org.scalatest.exceptions.NotAllowedException
import org.scalatest.exceptions.TestCanceledException
import org.scalatest.exceptions.TestFailedException
import org.scalatest.exceptions.TestRegistrationClosedException
import org.scalatest.freespec.AnyFreeSpec
import org.scalatest.funspec.AnyFunSpec
class FunSpecSpec extends AnyFreeSpec with GivenWhenThen {
private val prettifier = Prettifier.default
"A FunSpec" - {
"should return the test names in registration order from testNames" in {
val a = new AnyFunSpec {
it("should test this") {/* ASSERTION_SUCCEED */}
it("should test that") {/* ASSERTION_SUCCEED */}
}
assertResult(List("should test this", "should test that")) {
a.testNames.iterator.toList
}
val b = new AnyFunSpec {}
assertResult(List[String]()) {
b.testNames.iterator.toList
}
val c = new AnyFunSpec {
it("should test that") {/* ASSERTION_SUCCEED */}
it("should test this") {/* ASSERTION_SUCCEED */}
}
assertResult(List("should test that", "should test this")) {
c.testNames.iterator.toList
}
val d = new AnyFunSpec {
describe("A Tester") {
it("should test that") {/* ASSERTION_SUCCEED */}
it("should test this") {/* ASSERTION_SUCCEED */}
}
}
assertResult(List("A Tester should test that", "A Tester should test this")) {
d.testNames.iterator.toList
}
val e = new AnyFunSpec {
describe("A Tester") {
it("should test this") {/* ASSERTION_SUCCEED */}
it("should test that") {/* ASSERTION_SUCCEED */}
}
}
assertResult(List("A Tester should test this", "A Tester should test that")) {
e.testNames.iterator.toList
}
}
"should throw DuplicateTestNameException if a duplicate test name registration is attempted" in {
intercept[DuplicateTestNameException] {
new AnyFunSpec {
it("should test this") {/* ASSERTION_SUCCEED */}
it("should test this") {/* ASSERTION_SUCCEED */}
}
}
intercept[DuplicateTestNameException] {
new AnyFunSpec {
it("should test this") {/* ASSERTION_SUCCEED */}
ignore("should test this") {/* ASSERTION_SUCCEED */}
}
}
intercept[DuplicateTestNameException] {
new AnyFunSpec {
ignore("should test this") {/* ASSERTION_SUCCEED */}
ignore("should test this") {/* ASSERTION_SUCCEED */}
}
}
intercept[DuplicateTestNameException] {
new AnyFunSpec {
ignore("should test this") {/* ASSERTION_SUCCEED */}
it("should test this") {/* ASSERTION_SUCCEED */}
}
}
}
trait InvokeChecking {
var withFixtureWasInvoked = false
var testWasInvoked = false
}
"should invoke withFixture from runTest" in {
val a = new AnyFunSpec with InvokeChecking {
override def withFixture(test: NoArgTest): Outcome = {
withFixtureWasInvoked = true
super.withFixture(test)
}
it("should do something") {
testWasInvoked = true
/* ASSERTION_SUCCEED */
}
}
import scala.language.reflectiveCalls
a.run(None, Args(SilentReporter))
assert(a.withFixtureWasInvoked)
assert(a.testWasInvoked)
}
"should pass the correct test name in the NoArgTest passed to withFixture" in {
trait TestNameChecking {
var correctTestNameWasPassed = false
}
val a = new AnyFunSpec with TestNameChecking {
override def withFixture(test: NoArgTest): Outcome = {
correctTestNameWasPassed = test.name == "should do something"
super.withFixture(test)
}
it("should do something") {/* ASSERTION_SUCCEED */}
}
import scala.language.reflectiveCalls
a.run(None, Args(SilentReporter))
assert(a.correctTestNameWasPassed)
}
"should pass the correct config map in the NoArgTest passed to withFixture" in {
trait ConfigMapChecking {
var correctConfigMapWasPassed = false
}
val a = new AnyFunSpec with ConfigMapChecking {
override def withFixture(test: NoArgTest): Outcome = {
correctConfigMapWasPassed = (test.configMap == ConfigMap("hi" -> 7))
super.withFixture(test)
}
it("should do something") {/* ASSERTION_SUCCEED */}
}
import scala.language.reflectiveCalls
a.run(None, Args(SilentReporter, Stopper.default, Filter(), ConfigMap("hi" -> 7), None, new Tracker(), Set.empty))
assert(a.correctConfigMapWasPassed)
}
"(with info calls)" - {
class InfoInsideTestSpec extends AnyFunSpec {
val msg = "hi there, dude"
val testName = "test name"
it(testName) {
info(msg)
/* ASSERTION_SUCCEED */
}
}
// In a Spec, any InfoProvided's fired during the test should be cached and sent out together with
// test completed event. This makes the report look nicer, because the info is tucked under the "specifier'
// text for that test.
"should, when the info appears in the code of a successful test, report the info in the TestSucceeded" in {
val spec = new InfoInsideTestSpec
val (testStartingIndex, testSucceededIndex) =
getIndexesForTestInformerEventOrderTests(spec, spec.testName, spec.msg)
assert(testStartingIndex < testSucceededIndex)
}
class InfoBeforeTestSpec extends AnyFunSpec {
val msg = "hi there, dude"
val testName = "test name"
info(msg)
it(testName) {/* ASSERTION_SUCCEED */}
}
"should, when the info appears in the body before a test, report the info before the test" in {
val spec = new InfoBeforeTestSpec
val (infoProvidedIndex, testStartingIndex, testSucceededIndex) =
getIndexesForInformerEventOrderTests(spec, spec.testName, spec.msg)
assert(infoProvidedIndex < testStartingIndex)
assert(testStartingIndex < testSucceededIndex)
}
"should, when the info appears in the body after a test, report the info after the test runs" in {
val msg = "hi there, dude"
val testName = "test name"
class MySpec extends AnyFunSpec {
it(testName) {/* ASSERTION_SUCCEED */}
info(msg)
}
val (infoProvidedIndex, testStartingIndex, testSucceededIndex) =
getIndexesForInformerEventOrderTests(new MySpec, testName, msg)
assert(testStartingIndex < testSucceededIndex)
assert(testSucceededIndex < infoProvidedIndex)
}
"should print to stdout when info is called by a method invoked after the suite has been executed" in {
class MySpec extends AnyFunSpec {
callInfo() // This should work fine
def callInfo(): Unit = {
info("howdy")
}
it("howdy also") {
callInfo() // This should work fine
/* ASSERTION_SUCCEED */
}
}
val spec = new MySpec
val myRep = new EventRecordingReporter
spec.run(None, Args(myRep))
spec.callInfo() // TODO: Actually test that This prints to stdout
}
"should send an InfoProvided with an IndentedText formatter with level 1 when called outside a test" in {
val spec = new InfoBeforeTestSpec
val indentedText = getIndentedTextFromInfoProvided(spec)
assert(indentedText == IndentedText("+ " + spec.msg, spec.msg, 0))
}
"should send an InfoProvided with an IndentedText formatter with level 2 when called within a test" in {
val spec = new InfoInsideTestSpec
val indentedText = getIndentedTextFromTestInfoProvided(spec)
assert(indentedText == IndentedText(" + " + spec.msg, spec.msg, 1))
}
}
"(when a nesting rule has been violated)" - {
"should, if they call a describe from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends AnyFunSpec {
it("should blow up") {
describe("in the wrong place, at the wrong time") {
}
/* ASSERTION_SUCCEED */
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a describe with a nested it from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends AnyFunSpec {
it("should blow up") {
describe("in the wrong place, at the wrong time") {
it("should never run") {
assert(1 == 1)
}
}
/* ASSERTION_SUCCEED */
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a nested it from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends AnyFunSpec {
it("should blow up") {
it("should never run") {
assert(1 == 1)
}
/* ASSERTION_SUCCEED */
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a nested it with tags from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends AnyFunSpec {
it("should blow up") {
it("should never run", mytags.SlowAsMolasses) {
assert(1 == 1)
}
/* ASSERTION_SUCCEED */
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a nested registerTest with tags from within an registerTest clause, result in a TestFailedException when running the test" in {
class MySpec extends AnyFunSpec {
registerTest("should blow up") {
registerTest("should never run", mytags.SlowAsMolasses) {
assert(1 == 1)
}
/* ASSERTION_SUCCEED */
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a describe with a nested ignore from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends AnyFunSpec {
it("should blow up") {
describe("in the wrong place, at the wrong time") {
ignore("should never run") {
assert(1 == 1)
}
}
/* ASSERTION_SUCCEED */
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a nested ignore from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends AnyFunSpec {
it("should blow up") {
ignore("should never run") {
assert(1 == 1)
}
/* ASSERTION_SUCCEED */
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a nested ignore with tags from within an it clause, result in a TestFailedException when running the test" in {
class MySpec extends AnyFunSpec {
it("should blow up") {
ignore("should never run", mytags.SlowAsMolasses) {
assert(1 == 1)
}
/* ASSERTION_SUCCEED */
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
"should, if they call a nested registerIgnoredTest with tags from within an registerTest clause, result in a TestFailedException when running the test" in {
class MySpec extends AnyFunSpec {
registerTest("should blow up") {
registerIgnoredTest("should never run", mytags.SlowAsMolasses) {
assert(1 == 1)
}
/* ASSERTION_SUCCEED */
}
}
val spec = new MySpec
ensureTestFailedEventReceived(spec, "should blow up")
}
}
"should run tests registered via the 'it should behave like' syntax" in {
trait SharedSpecTests { this: AnyFunSpec =>
def nonEmptyStack(s: String)(i: Int): Unit = {
it("should be that I am shared") {/* ASSERTION_SUCCEED */}
}
}
class MySpec extends AnyFunSpec with SharedSpecTests {
it should behave like nonEmptyStack("hi")(1)
}
val suite = new MySpec
val reporter = new EventRecordingReporter
suite.run(None, Args(reporter))
val indexedList = reporter.eventsReceived
val testStartingOption = indexedList.find(_.isInstanceOf[TestStarting])
assert(testStartingOption.isDefined)
assert(testStartingOption.get.asInstanceOf[TestStarting].testName == "should be that I am shared")
}
"should throw NullArgumentException if a null test tag is provided" in {
// it
intercept[NullArgumentException] {
new AnyFunSpec {
it("hi", null) {/* ASSERTION_SUCCEED */}
}
}
val caught = intercept[NullArgumentException] {
new AnyFunSpec {
it("hi", mytags.SlowAsMolasses, null) {/* ASSERTION_SUCCEED */}
}
}
assert(caught.getMessage == "a test tag was null")
intercept[NullArgumentException] {
new AnyFunSpec {
it("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) {/* ASSERTION_SUCCEED */}
}
}
// ignore
intercept[NullArgumentException] {
new AnyFunSpec {
ignore("hi", null) {/* ASSERTION_SUCCEED */}
}
}
val caught2 = intercept[NullArgumentException] {
new AnyFunSpec {
ignore("hi", mytags.SlowAsMolasses, null) {/* ASSERTION_SUCCEED */}
}
}
assert(caught2.getMessage == "a test tag was null")
intercept[NullArgumentException] {
new AnyFunSpec {
ignore("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) {/* ASSERTION_SUCCEED */}
}
}
// registerTest
intercept[NullArgumentException] {
new AnyFunSpec {
registerTest("hi", null) {/* ASSERTION_SUCCEED */}
}
}
val caught3 = intercept[NullArgumentException] {
new AnyFunSpec {
registerTest("hi", mytags.SlowAsMolasses, null) {/* ASSERTION_SUCCEED */}
}
}
assert(caught3.getMessage == "a test tag was null")
intercept[NullArgumentException] {
new AnyFunSpec {
registerTest("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) {/* ASSERTION_SUCCEED */}
}
}
// registerIgnoredTest
intercept[NullArgumentException] {
new AnyFunSpec {
registerIgnoredTest("hi", null) {/* ASSERTION_SUCCEED */}
}
}
val caught4 = intercept[NullArgumentException] {
new AnyFunSpec {
registerIgnoredTest("hi", mytags.SlowAsMolasses, null) {/* ASSERTION_SUCCEED */}
}
}
assert(caught4.getMessage == "a test tag was null")
intercept[NullArgumentException] {
new AnyFunSpec {
registerIgnoredTest("hi", mytags.SlowAsMolasses, null, mytags.WeakAsAKitten) {/* ASSERTION_SUCCEED */}
}
}
}
class TestWasCalledSuite extends AnyFunSpec {
var theTestThisCalled = false
var theTestThatCalled = false
it("should run this") { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("should run that, maybe") { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
"should execute all tests when run is called with testName None" in {
val b = new TestWasCalledSuite
b.run(None, Args(SilentReporter))
assert(b.theTestThisCalled)
assert(b.theTestThatCalled)
}
"should execute one test when run is called with a defined testName" in {
val a = new TestWasCalledSuite
a.run(Some("should run this"), Args(SilentReporter))
assert(a.theTestThisCalled)
assert(!a.theTestThatCalled)
}
trait CallChecking {
var theTestThisCalled = false
var theTestThatCalled = false
var theTestTheOtherCalled = false
}
"should report as ignored, and not run, tests marked ignored" in {
val a = new AnyFunSpec with CallChecking {
it("test this") { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("test that") { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
import scala.language.reflectiveCalls
val repA = new TestIgnoredTrackingReporter
a.run(None, Args(repA))
assert(!repA.testIgnoredReceived)
assert(a.theTestThisCalled)
assert(a.theTestThatCalled)
val b = new AnyFunSpec with CallChecking {
ignore("test this") { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("test that") { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
val repB = new TestIgnoredTrackingReporter
b.run(None, Args(repB))
assert(repB.testIgnoredReceived)
assert(repB.lastEvent.isDefined)
assert(repB.lastEvent.get.testName endsWith "test this")
assert(!b.theTestThisCalled)
assert(b.theTestThatCalled)
val c = new AnyFunSpec with CallChecking {
it("test this") { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
ignore("test that") { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
val repC = new TestIgnoredTrackingReporter
c.run(None, Args(repC))
assert(repC.testIgnoredReceived)
assert(repC.lastEvent.isDefined)
assert(repC.lastEvent.get.testName endsWith "test that", repC.lastEvent.get.testName)
assert(c.theTestThisCalled)
assert(!c.theTestThatCalled)
// The order I want is order of appearance in the file.
// Will try and implement that tomorrow. Subtypes will be able to change the order.
val d = new AnyFunSpec with CallChecking {
ignore("test this") { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
ignore("test that") { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
val repD = new TestIgnoredTrackingReporter
d.run(None, Args(repD))
assert(repD.testIgnoredReceived)
assert(repD.lastEvent.isDefined)
assert(repD.lastEvent.get.testName endsWith "test that") // last because should be in order of appearance
assert(!d.theTestThisCalled)
assert(!d.theTestThatCalled)
}
"should ignore a test marked as ignored if run is invoked with that testName" in {
// If I provide a specific testName to run, then it should ignore an Ignore on that test
// method and actually invoke it.
val e = new AnyFunSpec with CallChecking {
ignore("test this") { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("test that") { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
import scala.language.reflectiveCalls
val repE = new TestIgnoredTrackingReporter
e.run(Some("test this"), Args(repE))
assert(repE.testIgnoredReceived)
assert(!e.theTestThisCalled)
assert(!e.theTestThatCalled)
}
"should run only those tests selected by the tags to include and exclude sets" in {
// Nothing is excluded
val a = new AnyFunSpec with CallChecking {
it("test this", mytags.SlowAsMolasses) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("test that") { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
import scala.language.reflectiveCalls
val repA = new TestIgnoredTrackingReporter
a.run(None, Args(repA))
assert(!repA.testIgnoredReceived)
assert(a.theTestThisCalled)
assert(a.theTestThatCalled)
// SlowAsMolasses is included, one test should be excluded
val b = new AnyFunSpec with CallChecking {
it("test this", mytags.SlowAsMolasses) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("test that") { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
val repB = new TestIgnoredTrackingReporter
b.run(None, Args(repB, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repB.testIgnoredReceived)
assert(b.theTestThisCalled)
assert(!b.theTestThatCalled)
// SlowAsMolasses is included, and both tests should be included
val c = new AnyFunSpec with CallChecking {
it("test this", mytags.SlowAsMolasses) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
val repC = new TestIgnoredTrackingReporter
c.run(None, Args(repB, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repC.testIgnoredReceived)
assert(c.theTestThisCalled)
assert(c.theTestThatCalled)
// SlowAsMolasses is included. both tests should be included but one ignored
val d = new AnyFunSpec with CallChecking {
ignore("test this", mytags.SlowAsMolasses) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
val repD = new TestIgnoredTrackingReporter
d.run(None, Args(repD, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.Ignore")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(repD.testIgnoredReceived)
assert(!d.theTestThisCalled)
assert(d.theTestThatCalled)
// SlowAsMolasses included, FastAsLight excluded
val e = new AnyFunSpec with CallChecking {
it("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
it("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repE = new TestIgnoredTrackingReporter
e.run(None, Args(repE, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repE.testIgnoredReceived)
assert(!e.theTestThisCalled)
assert(e.theTestThatCalled)
assert(!e.theTestTheOtherCalled)
// An Ignored test that was both included and excluded should not generate a TestIgnored event
val f = new AnyFunSpec with CallChecking {
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
it("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repF = new TestIgnoredTrackingReporter
f.run(None, Args(repF, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repF.testIgnoredReceived)
assert(!f.theTestThisCalled)
assert(f.theTestThatCalled)
assert(!f.theTestTheOtherCalled)
// An Ignored test that was not included should not generate a TestIgnored event
val g = new AnyFunSpec with CallChecking {
it("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
ignore("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repG = new TestIgnoredTrackingReporter
g.run(None, Args(repG, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repG.testIgnoredReceived)
assert(!g.theTestThisCalled)
assert(g.theTestThatCalled)
assert(!g.theTestTheOtherCalled)
// No tagsToInclude set, FastAsLight excluded
val h = new AnyFunSpec with CallChecking {
it("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
it("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repH = new TestIgnoredTrackingReporter
h.run(None, Args(repH, Stopper.default, Filter(None, Set("org.scalatest.FastAsLight")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repH.testIgnoredReceived)
assert(!h.theTestThisCalled)
assert(h.theTestThatCalled)
assert(h.theTestTheOtherCalled)
// No tagsToInclude set, mytags.SlowAsMolasses excluded
val i = new AnyFunSpec with CallChecking {
it("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
it("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
it("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repI = new TestIgnoredTrackingReporter
i.run(None, Args(repI, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repI.testIgnoredReceived)
assert(!i.theTestThisCalled)
assert(!i.theTestThatCalled)
assert(i.theTestTheOtherCalled)
// No tagsToInclude set, mytags.SlowAsMolasses excluded, TestIgnored should not be received on excluded ones
val j = new AnyFunSpec with CallChecking {
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
ignore("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
it("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repJ = new TestIgnoredTrackingReporter
j.run(None, Args(repJ, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repI.testIgnoredReceived)
assert(!j.theTestThisCalled)
assert(!j.theTestThatCalled)
assert(j.theTestTheOtherCalled)
// Same as previous, except Ignore specifically mentioned in excludes set
val k = new AnyFunSpec with CallChecking {
ignore("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
ignore("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
ignore("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repK = new TestIgnoredTrackingReporter
k.run(None, Args(repK, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses", "org.scalatest.Ignore")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(repK.testIgnoredReceived)
assert(!k.theTestThisCalled)
assert(!k.theTestThatCalled)
assert(!k.theTestTheOtherCalled)
}
"should run only those registered tests selected by the tags to include and exclude sets" in {
// Nothing is excluded
val a = new AnyFunSpec with CallChecking {
registerTest("test this", mytags.SlowAsMolasses) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test that") { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
import scala.language.reflectiveCalls
val repA = new TestIgnoredTrackingReporter
a.run(None, Args(repA))
assert(!repA.testIgnoredReceived)
assert(a.theTestThisCalled)
assert(a.theTestThatCalled)
// SlowAsMolasses is included, one test should be excluded
val b = new AnyFunSpec with CallChecking {
registerTest("test this", mytags.SlowAsMolasses) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test that") { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
val repB = new TestIgnoredTrackingReporter
b.run(None, Args(repB, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repB.testIgnoredReceived)
assert(b.theTestThisCalled)
assert(!b.theTestThatCalled)
// SlowAsMolasses is included, and both tests should be included
val c = new AnyFunSpec with CallChecking {
registerTest("test this", mytags.SlowAsMolasses) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
val repC = new TestIgnoredTrackingReporter
c.run(None, Args(repB, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set()), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repC.testIgnoredReceived)
assert(c.theTestThisCalled)
assert(c.theTestThatCalled)
// SlowAsMolasses is included. both tests should be included but one ignored
val d = new AnyFunSpec with CallChecking {
registerIgnoredTest("test this", mytags.SlowAsMolasses) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
}
val repD = new TestIgnoredTrackingReporter
d.run(None, Args(repD, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.Ignore")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(repD.testIgnoredReceived)
assert(!d.theTestThisCalled)
assert(d.theTestThatCalled)
// SlowAsMolasses included, FastAsLight excluded
val e = new AnyFunSpec with CallChecking {
registerTest("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repE = new TestIgnoredTrackingReporter
e.run(None, Args(repE, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repE.testIgnoredReceived)
assert(!e.theTestThisCalled)
assert(e.theTestThatCalled)
assert(!e.theTestTheOtherCalled)
// An Ignored test that was both included and excluded should not generate a TestIgnored event
val f = new AnyFunSpec with CallChecking {
registerIgnoredTest("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repF = new TestIgnoredTrackingReporter
f.run(None, Args(repF, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repF.testIgnoredReceived)
assert(!f.theTestThisCalled)
assert(f.theTestThatCalled)
assert(!f.theTestTheOtherCalled)
// An Ignored test that was not included should not generate a TestIgnored event
val g = new AnyFunSpec with CallChecking {
registerTest("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
registerIgnoredTest("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repG = new TestIgnoredTrackingReporter
g.run(None, Args(repG, Stopper.default, Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight")),
ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repG.testIgnoredReceived)
assert(!g.theTestThisCalled)
assert(g.theTestThatCalled)
assert(!g.theTestTheOtherCalled)
// No tagsToInclude set, FastAsLight excluded
val h = new AnyFunSpec with CallChecking {
registerTest("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repH = new TestIgnoredTrackingReporter
h.run(None, Args(repH, Stopper.default, Filter(None, Set("org.scalatest.FastAsLight")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repH.testIgnoredReceived)
assert(!h.theTestThisCalled)
assert(h.theTestThatCalled)
assert(h.theTestTheOtherCalled)
// No tagsToInclude set, mytags.SlowAsMolasses excluded
val i = new AnyFunSpec with CallChecking {
registerTest("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repI = new TestIgnoredTrackingReporter
i.run(None, Args(repI, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repI.testIgnoredReceived)
assert(!i.theTestThisCalled)
assert(!i.theTestThatCalled)
assert(i.theTestTheOtherCalled)
// No tagsToInclude set, mytags.SlowAsMolasses excluded, TestIgnored should not be received on excluded ones
val j = new AnyFunSpec with CallChecking {
registerIgnoredTest("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
registerIgnoredTest("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
registerTest("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repJ = new TestIgnoredTrackingReporter
j.run(None, Args(repJ, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(!repI.testIgnoredReceived)
assert(!j.theTestThisCalled)
assert(!j.theTestThatCalled)
assert(j.theTestTheOtherCalled)
// Same as previous, except Ignore specifically mentioned in excludes set
val k = new AnyFunSpec with CallChecking {
registerIgnoredTest("test this", mytags.SlowAsMolasses, mytags.FastAsLight) { theTestThisCalled = true; /* ASSERTION_SUCCEED */ }
registerIgnoredTest("test that", mytags.SlowAsMolasses) { theTestThatCalled = true; /* ASSERTION_SUCCEED */ }
registerIgnoredTest("test the other") { theTestTheOtherCalled = true; /* ASSERTION_SUCCEED */ }
}
val repK = new TestIgnoredTrackingReporter
k.run(None, Args(repK, Stopper.default, Filter(None, Set("org.scalatest.SlowAsMolasses", "org.scalatest.Ignore")), ConfigMap.empty, None, new Tracker, Set.empty))
assert(repK.testIgnoredReceived)
assert(!k.theTestThisCalled)
assert(!k.theTestThatCalled)
assert(!k.theTestTheOtherCalled)
}
"should return the correct test count from its expectedTestCount method" in {
val a = new AnyFunSpec {
it("test this") {/* ASSERTION_SUCCEED */}
it("test that") {/* ASSERTION_SUCCEED */}
}
assert(a.expectedTestCount(Filter()) == 2)
val b = new AnyFunSpec {
ignore("test this") {/* ASSERTION_SUCCEED */}
it("test that") {/* ASSERTION_SUCCEED */}
}
assert(b.expectedTestCount(Filter()) == 1)
val c = new AnyFunSpec {
it("test this", mytags.FastAsLight) {/* ASSERTION_SUCCEED */}
it("test that") {/* ASSERTION_SUCCEED */}
}
assert(c.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) == 1)
assert(c.expectedTestCount(Filter(None, Set("org.scalatest.FastAsLight"))) == 1)
val d = new AnyFunSpec {
it("test this", mytags.FastAsLight, mytags.SlowAsMolasses) {/* ASSERTION_SUCCEED */}
it("test that", mytags.SlowAsMolasses) {/* ASSERTION_SUCCEED */}
it("test the other thing") {/* ASSERTION_SUCCEED */}
}
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) == 1)
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) == 1)
assert(d.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) == 1)
assert(d.expectedTestCount(Filter()) == 3)
val e = new AnyFunSpec {
it("test this", mytags.FastAsLight, mytags.SlowAsMolasses) {/* ASSERTION_SUCCEED */}
it("test that", mytags.SlowAsMolasses) {/* ASSERTION_SUCCEED */}
ignore("test the other thing") {/* ASSERTION_SUCCEED */}
}
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) == 1)
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) == 1)
assert(e.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) == 0)
assert(e.expectedTestCount(Filter()) == 2)
val f = new Suites(a, b, c, d, e)
assert(f.expectedTestCount(Filter()) == 10)
}
"should return the correct test count from its expectedTestCount method when register tests with registerTest and registerIgnoredTest" in {
val a = new AnyFunSpec {
registerTest("test this") {/* ASSERTION_SUCCEED */}
registerTest("test that") {/* ASSERTION_SUCCEED */}
}
assert(a.expectedTestCount(Filter()) == 2)
val b = new AnyFunSpec {
registerIgnoredTest("test this") {/* ASSERTION_SUCCEED */}
registerTest("test that") {/* ASSERTION_SUCCEED */}
}
assert(b.expectedTestCount(Filter()) == 1)
val c = new AnyFunSpec {
registerTest("test this", mytags.FastAsLight) {/* ASSERTION_SUCCEED */}
registerTest("test that") {/* ASSERTION_SUCCEED */}
}
assert(c.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) == 1)
assert(c.expectedTestCount(Filter(None, Set("org.scalatest.FastAsLight"))) == 1)
val d = new AnyFunSpec {
registerTest("test this", mytags.FastAsLight, mytags.SlowAsMolasses) {/* ASSERTION_SUCCEED */}
registerTest("test that", mytags.SlowAsMolasses) {/* ASSERTION_SUCCEED */}
registerTest("test the other thing") {/* ASSERTION_SUCCEED */}
}
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) == 1)
assert(d.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) == 1)
assert(d.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) == 1)
assert(d.expectedTestCount(Filter()) == 3)
val e = new AnyFunSpec {
registerTest("test this", mytags.FastAsLight, mytags.SlowAsMolasses) {/* ASSERTION_SUCCEED */}
registerTest("test that", mytags.SlowAsMolasses) {/* ASSERTION_SUCCEED */}
registerIgnoredTest("test the other thing") {/* ASSERTION_SUCCEED */}
}
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.FastAsLight")), Set())) == 1)
assert(e.expectedTestCount(Filter(Some(Set("org.scalatest.SlowAsMolasses")), Set("org.scalatest.FastAsLight"))) == 1)
assert(e.expectedTestCount(Filter(None, Set("org.scalatest.SlowAsMolasses"))) == 0)
assert(e.expectedTestCount(Filter()) == 2)
val f = new Suites(a, b, c, d, e)
assert(f.expectedTestCount(Filter()) == 10)
}
"should generate a TestPending message when the test body is (pending)" in {
val a = new AnyFunSpec {
it("should do this") (pending)
it("should do that") {
assert(2 + 2 == 4)
}
it("should do something else") {
assert(2 + 2 == 4)
pending
}
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val tp = rep.testPendingEventsReceived
assert(tp.size == 2)
}
"should generate a test failure if a Throwable, or an Error other than direct Error subtypes " +
"known in JDK 1.5, excluding AssertionError" in {
val a = new AnyFunSpec {
it("throws AssertionError") { throw new AssertionError }
it("throws plain old Error") { throw new Error }
it("throws Throwable") { throw new Throwable }
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val tf = rep.testFailedEventsReceived
assert(tf.size == 3)
}
// SKIP-SCALATESTJS,NATIVE-START
"should propagate out Errors that are direct subtypes of Error in JDK 1.5, other than " +
"AssertionError, causing Suites and Runs to abort." in {
val a = new AnyFunSpec {
it("throws AssertionError") { throw new OutOfMemoryError }
}
intercept[OutOfMemoryError] {
a.run(None, Args(SilentReporter))
}
}
// SKIP-SCALATESTJS,NATIVE-END
/*
it("should send InfoProvided events with aboutAPendingTest set to true for info " +
"calls made from a test that is pending") {
val a = new FunSpec with GivenWhenThen {
it("should do something else") {
given("two integers")
when("one is subracted from the other")
then("the result is the difference between the two numbers")
pending
}
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val testPending = rep.testPendingEventsReceived
assert(testPending.size == 1)
val recordedEvents = testPending(0).recordedEvents
assert(recordedEvents.size == 3)
for (event <- recordedEvents) {
val ip = event.asInstanceOf[InfoProvided]
assert(ip.aboutAPendingTest.isDefined && ip.aboutAPendingTest.get)
}
}
it("should send InfoProvided events with aboutAPendingTest set to false for info " +
"calls made from a test that is not pending") {
val a = new FunSpec with GivenWhenThen {
it("should do something else") {
given("two integers")
when("one is subracted from the other")
then("the result is the difference between the two numbers")
assert(1 + 1 == 2)
}
}
val rep = new EventRecordingReporter
a.run(None, Args(rep))
val testSucceeded = rep.testSucceededEventsReceived
assert(testSucceeded.size == 1)
val recordedEvents = testSucceeded(0).recordedEvents
assert(recordedEvents.size == 3)
for (event <- recordedEvents) {
val ip = event.asInstanceOf[InfoProvided]
assert(ip.aboutAPendingTest.isDefined && !ip.aboutAPendingTest.get)
}
}
*/
"should support expectations" ignore { // Unignore after we uncomment the expectation implicits in RegistrationPolicy
class TestSpec extends AnyFunSpec with expectations.Expectations {
it("fail scenario") {
expect(1 == 2); /* ASSERTION_SUCCEED */
}
describe("a feature") {
it("nested fail scenario") {
expect(1 == 2); /* ASSERTION_SUCCEED */
}
}
}
val rep = new EventRecordingReporter
val s1 = new TestSpec
s1.run(None, Args(rep))
assert(rep.testFailedEventsReceived.size == 2)
assert(rep.testFailedEventsReceived(0).throwable.get.asInstanceOf[TestFailedException].failedCodeFileName.get == "FunSpecSpec.scala")
assert(rep.testFailedEventsReceived(0).throwable.get.asInstanceOf[TestFailedException].failedCodeLineNumber.get == thisLineNumber - 13)
assert(rep.testFailedEventsReceived(1).throwable.get.asInstanceOf[TestFailedException].failedCodeFileName.get == "FunSpecSpec.scala")
assert(rep.testFailedEventsReceived(1).throwable.get.asInstanceOf[TestFailedException].failedCodeLineNumber.get == thisLineNumber - 11)
}
}
"when failure happens" - {
"should fire TestFailed event with correct stack depth info when test failed" in {
class TestSpec extends AnyFunSpec {
it("fail scenario") {
assert(1 == 2)
}
describe("a feature") {
it("nested fail scenario") {
assert(1 == 2)
}
}
}
val rep = new EventRecordingReporter
val s1 = new TestSpec
s1.run(None, Args(rep))
assert(rep.testFailedEventsReceived.size == 2)
assert(rep.testFailedEventsReceived(0).throwable.get.asInstanceOf[TestFailedException].failedCodeFileName.get == "FunSpecSpec.scala")
assert(rep.testFailedEventsReceived(0).throwable.get.asInstanceOf[TestFailedException].failedCodeLineNumber.get == thisLineNumber - 13)
assert(rep.testFailedEventsReceived(1).throwable.get.asInstanceOf[TestFailedException].failedCodeFileName.get == "FunSpecSpec.scala")
assert(rep.testFailedEventsReceived(1).throwable.get.asInstanceOf[TestFailedException].failedCodeLineNumber.get == thisLineNumber - 11)
}
// Line checking not working yet.
"should generate TestRegistrationClosedException with correct stack depth info when has a it nested inside a it" in {
class TestSpec extends AnyFunSpec {
var registrationClosedThrown = false
describe("a feature") {
it("a scenario") {
it("nested scenario") {
assert(1 == 2)
}; /* ASSERTION_SUCCEED */
}
}
override def withFixture(test: NoArgTest): Outcome = {
val outcome = test.apply()
outcome match {
case Exceptional(ex: TestRegistrationClosedException) =>
registrationClosedThrown = true
case _ =>
}
outcome
}
}
val rep = new EventRecordingReporter
val s = new TestSpec
s.run(None, Args(rep))
assert(s.registrationClosedThrown == true)
val testFailedEvents = rep.testFailedEventsReceived
assert(testFailedEvents.size == 1)
assert(testFailedEvents(0).throwable.get.getClass() == classOf[TestRegistrationClosedException])
val trce = testFailedEvents(0).throwable.get.asInstanceOf[TestRegistrationClosedException]
assert("FunSpecSpec.scala" == trce.failedCodeFileName.get)
assert(trce.failedCodeLineNumber.get == thisLineNumber - 24)
assert(trce.message == Some("An it clause may not appear inside another it or they clause."))
}
"should generate TestRegistrationClosedException with correct stack depth info when has a ignore nested inside a it" in {
class TestSpec extends AnyFunSpec {
var registrationClosedThrown = false
describe("a feature") {
it("a scenario") {
ignore("nested scenario") {
assert(1 == 2)
}; /* ASSERTION_SUCCEED */
}
}
override def withFixture(test: NoArgTest): Outcome = {
val outcome = test.apply()
outcome match {
case Exceptional(ex: TestRegistrationClosedException) =>
registrationClosedThrown = true
case _ =>
}
outcome
}
}
val rep = new EventRecordingReporter
val s = new TestSpec
s.run(None, Args(rep))
assert(s.registrationClosedThrown == true)
val testFailedEvents = rep.testFailedEventsReceived
assert(testFailedEvents.size == 1)
assert(testFailedEvents(0).throwable.get.getClass() == classOf[TestRegistrationClosedException])
val trce = testFailedEvents(0).throwable.get.asInstanceOf[TestRegistrationClosedException]
assert("FunSpecSpec.scala" == trce.failedCodeFileName.get)
assert(trce.failedCodeLineNumber.get == thisLineNumber - 24)
assert(trce.message == Some("An ignore clause may not appear inside an it or a they clause."))
}
"should generate TestRegistrationClosedException with correct stack depth info when has a they nested inside a they" in {
class TestSpec extends AnyFunSpec {
var registrationClosedThrown = false
describe("a feature") {
they("a scenario") {
they("nested scenario") {
assert(1 == 2)
}; /* ASSERTION_SUCCEED */
}
}
override def withFixture(test: NoArgTest): Outcome = {
val outcome = test.apply()
outcome match {
case Exceptional(ex: TestRegistrationClosedException) =>
registrationClosedThrown = true
case _ =>
}
outcome
}
}
val rep = new EventRecordingReporter
val s = new TestSpec
s.run(None, Args(rep))
assert(s.registrationClosedThrown == true)
val testFailedEvents = rep.testFailedEventsReceived
assert(testFailedEvents.size == 1)
assert(testFailedEvents(0).throwable.get.getClass() == classOf[TestRegistrationClosedException])
val trce = testFailedEvents(0).throwable.get.asInstanceOf[TestRegistrationClosedException]
assert("FunSpecSpec.scala" == trce.failedCodeFileName.get)
assert(trce.failedCodeLineNumber.get == thisLineNumber - 24)
assert(trce.message == Some("A they clause may not appear inside another it or they clause."))
}
"should generate TestRegistrationClosedException with correct stack depth info when has a ignore nested inside a they" in {
class TestSpec extends AnyFunSpec {
var registrationClosedThrown = false
describe("a feature") {
they("a scenario") {
ignore("nested scenario") {
assert(1 == 2)
}; /* ASSERTION_SUCCEED */
}
}
override def withFixture(test: NoArgTest): Outcome = {
val outcome = test.apply()
outcome match {
case Exceptional(ex: TestRegistrationClosedException) =>
registrationClosedThrown = true
case _ =>
}
outcome
}
}
val rep = new EventRecordingReporter
val s = new TestSpec
s.run(None, Args(rep))
assert(s.registrationClosedThrown == true)
val testFailedEvents = rep.testFailedEventsReceived
assert(testFailedEvents.size == 1)
assert(testFailedEvents(0).throwable.get.getClass() == classOf[TestRegistrationClosedException])
val trce = testFailedEvents(0).throwable.get.asInstanceOf[TestRegistrationClosedException]
assert("FunSpecSpec.scala" == trce.failedCodeFileName.get)
assert(trce.failedCodeLineNumber.get == thisLineNumber - 24)
assert(trce.message == Some("An ignore clause may not appear inside an it or a they clause."))
}
"should allow test registration with registerTest and registerIgnoredTest" in {
class TestSpec extends AnyFunSpec {
val a = 1
registerTest("test 1") {
val e = intercept[TestFailedException] {
assert(a == 2)
}
assert(e.message == Some("1 did not equal 2"))
assert(e.failedCodeFileName == Some("FunSpecSpec.scala"))
assert(e.failedCodeLineNumber == Some(thisLineNumber - 4))
}
registerTest("test 2") {
assert(a == 2)
}
registerTest("test 3") {
pending
}
registerTest("test 4") {
cancel()
}
registerIgnoredTest("test 5") {
assert(a == 2)
}
}
val rep = new EventRecordingReporter
val s = new TestSpec
s.run(None, Args(rep))
assert(rep.testStartingEventsReceived.length == 4)
assert(rep.testSucceededEventsReceived.length == 1)
assert(rep.testSucceededEventsReceived(0).testName == "test 1")
assert(rep.testFailedEventsReceived.length == 1)
assert(rep.testFailedEventsReceived(0).testName == "test 2")
assert(rep.testPendingEventsReceived.length == 1)
assert(rep.testPendingEventsReceived(0).testName == "test 3")
assert(rep.testCanceledEventsReceived.length == 1)
assert(rep.testCanceledEventsReceived(0).testName == "test 4")
assert(rep.testIgnoredEventsReceived.length == 1)
assert(rep.testIgnoredEventsReceived(0).testName == "test 5")
}
"should generate TestRegistrationClosedException with correct stack depth info when has a registerTest nested inside a registerTest" in {
class TestSpec extends AnyFunSpec {
var registrationClosedThrown = false
describe("a feature") {
registerTest("a scenario") {
registerTest("nested scenario") {
assert(1 == 2)
}; /* ASSERTION_SUCCEED */
}
}
override def withFixture(test: NoArgTest): Outcome = {
val outcome = test.apply()
outcome match {
case Exceptional(ex: TestRegistrationClosedException) =>
registrationClosedThrown = true
case _ =>
}
outcome
}
}
val rep = new EventRecordingReporter
val s = new TestSpec
s.run(None, Args(rep))
assert(s.registrationClosedThrown == true)
val testFailedEvents = rep.testFailedEventsReceived
assert(testFailedEvents.size == 1)
assert(testFailedEvents(0).throwable.get.getClass() == classOf[TestRegistrationClosedException])
val trce = testFailedEvents(0).throwable.get.asInstanceOf[TestRegistrationClosedException]
assert("FunSpecSpec.scala" == trce.failedCodeFileName.get)
assert(trce.failedCodeLineNumber.get == thisLineNumber - 24)
assert(trce.message == Some("Test cannot be nested inside another test."))
}
"should generate TestRegistrationClosedException with correct stack depth info when has a registerIgnoredTest nested inside a registerTest" in {
class TestSpec extends AnyFunSpec {
var registrationClosedThrown = false
describe("a feature") {
registerTest("a scenario") {
registerIgnoredTest("nested scenario") {
assert(1 == 2)
}; /* ASSERTION_SUCCEED */
}
}
override def withFixture(test: NoArgTest): Outcome = {
val outcome = test.apply()
outcome match {
case Exceptional(ex: TestRegistrationClosedException) =>
registrationClosedThrown = true
case _ =>
}
outcome
}
}
val rep = new EventRecordingReporter
val s = new TestSpec
s.run(None, Args(rep))
assert(s.registrationClosedThrown == true)
val testFailedEvents = rep.testFailedEventsReceived
assert(testFailedEvents.size == 1)
assert(testFailedEvents(0).throwable.get.getClass() == classOf[TestRegistrationClosedException])
val trce = testFailedEvents(0).throwable.get.asInstanceOf[TestRegistrationClosedException]
assert("FunSpecSpec.scala" == trce.failedCodeFileName.get)
assert(trce.failedCodeLineNumber.get == thisLineNumber - 24)
assert(trce.message == Some("Test cannot be nested inside another test."))
}
"should generate NotAllowedException wrapping a TestFailedException when assert fails in scope" in {
class TestSpec extends AnyFunSpec {
describe("a feature") {
val a = 1
assert(a == 2)
}
}
val e = intercept[NotAllowedException] {
new TestSpec
}
assert("FunSpecSpec.scala" == e.failedCodeFileName.get)
assert(e.failedCodeLineNumber.get == thisLineNumber - 7)
assert(e.message == Some(FailureMessages.assertionShouldBePutInsideItOrTheyClauseNotDescribeClause))
assert(e.cause.isDefined)
val causeThrowable = e.cause.get
assert(causeThrowable.isInstanceOf[TestFailedException])
val cause = causeThrowable.asInstanceOf[TestFailedException]
assert("FunSpecSpec.scala" == cause.failedCodeFileName.get)
assert(cause.failedCodeLineNumber.get == thisLineNumber - 15)
assert(cause.message == Some(FailureMessages.didNotEqual(prettifier, 1, 2)))
}
"should generate NotAllowedException wrapping a TestCanceledException when assume fails in scope" in {
class TestSpec extends AnyFunSpec {
describe("a feature") {
val a = 1
assume(a == 2)
}
}
val e = intercept[NotAllowedException] {
new TestSpec
}
assert("FunSpecSpec.scala" == e.failedCodeFileName.get)
assert(e.failedCodeLineNumber.get == thisLineNumber - 7)
assert(e.message == Some(FailureMessages.assertionShouldBePutInsideItOrTheyClauseNotDescribeClause))
assert(e.cause.isDefined)
val causeThrowable = e.cause.get
assert(causeThrowable.isInstanceOf[TestCanceledException])
val cause = causeThrowable.asInstanceOf[TestCanceledException]
assert("FunSpecSpec.scala" == cause.failedCodeFileName.get)
assert(cause.failedCodeLineNumber.get == thisLineNumber - 15)
assert(cause.message == Some(FailureMessages.didNotEqual(prettifier, 1, 2)))
}
"should generate NotAllowedException wrapping a non-fatal RuntimeException is thrown inside scope" in {
class TestSpec extends AnyFunSpec {
describe("a feature") {
throw new RuntimeException("on purpose")
}
}
val e = intercept[NotAllowedException] {
new TestSpec
}
assert("FunSpecSpec.scala" == e.failedCodeFileName.get)
assert(e.failedCodeLineNumber.get == thisLineNumber - 8)
assert(e.cause.isDefined)
val causeThrowable = e.cause.get
assert(e.message == Some(FailureMessages.exceptionWasThrownInDescribeClause(prettifier, UnquotedString(causeThrowable.getClass.getName), "a feature", "on purpose")))
assert(causeThrowable.isInstanceOf[RuntimeException])
val cause = causeThrowable.asInstanceOf[RuntimeException]
assert(cause.getMessage == "on purpose")
}
"should generate NotAllowedException wrapping a DuplicateTestNameException is thrown inside scope" in {
class TestSpec extends AnyFunSpec {
describe("a feature") {
it("test 1") {}
it("test 1") {}
}
}
val e = intercept[NotAllowedException] {
new TestSpec
}
assert("FunSpecSpec.scala" == e.failedCodeFileName.get)
assert(e.failedCodeLineNumber.get == thisLineNumber - 7)
assert(e.cause.isDefined)
val causeThrowable = e.cause.get
assert(e.message == Some(FailureMessages.exceptionWasThrownInDescribeClause(prettifier, UnquotedString(causeThrowable.getClass.getName), "a feature", FailureMessages.duplicateTestName(prettifier, UnquotedString("a feature test 1")))))
assert(causeThrowable.isInstanceOf[DuplicateTestNameException])
val cause = causeThrowable.asInstanceOf[DuplicateTestNameException]
assert(cause.getMessage == FailureMessages.duplicateTestName(prettifier, UnquotedString("a feature test 1")))
}
// SKIP-SCALATESTJS,NATIVE-START
"should propagate AnnotationFormatError when it is thrown inside scope" in {
class TestSpec extends AnyFunSpec {
describe("a feature") {
throw new AnnotationFormatError("on purpose")
}
}
val e = intercept[AnnotationFormatError] {
new TestSpec
}
assert(e.getMessage == "on purpose")
}
"should propagate AWTError when it is thrown inside scope" in {
class TestSpec extends AnyFunSpec {
describe("a feature") {
throw new AWTError("on purpose")
}
}
val e = intercept[AWTError] {
new TestSpec
}
assert(e.getMessage == "on purpose")
}
"should propagate CoderMalfunctionError when it is thrown inside scope" in {
class TestSpec extends AnyFunSpec {
describe("a feature") {
throw new CoderMalfunctionError(new RuntimeException("on purpose"))
}
}
val e = intercept[CoderMalfunctionError] {
new TestSpec
}
assert(e.getMessage == "java.lang.RuntimeException: on purpose")
}
"should propagate FactoryConfigurationError when it is thrown inside scope" in {
class TestSpec extends AnyFunSpec {
describe("a feature") {
throw new FactoryConfigurationError("on purpose")
}
}
val e = intercept[FactoryConfigurationError] {
new TestSpec
}
assert(e.getMessage == "on purpose")
}
"should propagate LinkageError when it is thrown inside scope" in {
class TestSpec extends AnyFunSpec {
describe("a feature") {
throw new LinkageError("on purpose")
}
}
val e = intercept[LinkageError] {
new TestSpec
}
assert(e.getMessage == "on purpose")
}
"should propagate ThreadDeath when it is thrown inside scope" in {
class TestSpec extends AnyFunSpec {
describe("a feature") {
throw new ThreadDeath
}
}
val e = intercept[ThreadDeath] {
new TestSpec
}
assert(e.getMessage == null)
}
"should propagate TransformerFactoryConfigurationError when it is thrown inside scope" in {
class TestSpec extends AnyFunSpec {
describe("a feature") {
throw new TransformerFactoryConfigurationError("on purpose")
}
}
val e = intercept[TransformerFactoryConfigurationError] {
new TestSpec
}
assert(e.getMessage == "on purpose")
}
"should propagate VirtualMachineError when it is thrown inside scope" in {
class TestSpec extends AnyFunSpec {
describe("a feature") {
throw new VirtualMachineError("on purpose") {}
}
}
val e = intercept[VirtualMachineError] {
new TestSpec
}
assert(e.getMessage == "on purpose")
}
// SKIP-SCALATESTJS,NATIVE-END
}
}
| scalatest/scalatest | jvm/funspec-test/src/test/scala/org/scalatest/funspec/FunSpecSpec.scala | Scala | apache-2.0 | 64,537 |
/* __ *\\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\\ \\/ /__/ __ |/ /__/ __ |/_// /_\\ \\ http://scala-js.org/ **
** /____/\\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\\* */
package org.scalajs.testsuite.compiler
import scala.annotation.switch
import org.junit.Test
import org.junit.Assert._
class MatchTest {
import MatchTest._
@Test def switchWithGuardsStat(): Unit = {
def test(x: Int, y: Int): String = {
var result = ""
(x: @switch) match {
case 1 => result = "one"
case 2 if y < 10 => result = "two special"
case 2 => result = "two"
case 3 if y < 10 => result = "three special"
case 3 if y > 100 => result = "three big special"
case z if y > 100 => result = "big " + z
case _ => result = "None of those"
}
result
}
assertEquals("one", test(1, 0))
assertEquals("two special", test(2, 0))
assertEquals("two", test(2, 50))
assertEquals("three special", test(3, 5))
assertEquals("three big special", test(3, 200))
assertEquals("None of those", test(3, 50))
assertEquals("big 5", test(5, 300))
assertEquals("None of those", test(5, 20))
}
@Test def switchWithGuardsExpr(): Unit = {
def test(x: Int, y: Int): String = {
(x: @switch) match {
case 1 => "one"
case 2 if y < 10 => "two special"
case 2 => "two"
case 3 if y < 10 => "three special"
case 3 if y > 100 => "three big special"
case z if y > 100 => "big " + z
case _ => "None of those"
}
}
assertEquals("one", test(1, 0))
assertEquals("two special", test(2, 0))
assertEquals("two", test(2, 50))
assertEquals("three special", test(3, 5))
assertEquals("three big special", test(3, 200))
assertEquals("None of those", test(3, 50))
assertEquals("big 5", test(5, 300))
assertEquals("None of those", test(5, 20))
}
// #2554
@Test def matchWithNonIdentityMatchEndScalaLib(): Unit = {
val foo: Option[Int] = Some(42)
/* This match generates a value class boxing operation in the matchEnd (in
* 2.10 and 2.11).
*/
val result =
"foo = " ++ (foo match { case Some(0) => "zero" case _ => "unknown" })
assertEquals("foo = unknown", result)
}
// #2554
@Test def matchWithNonIdentityMatchEndIndependent(): Unit = {
import scala.language.implicitConversions
implicit def toValueClass(x: Int): ValueClass = new ValueClass(x)
def show[T](x: ValueClassBase[T]): String = x.f().toString
val foo: Option[Int] = Some(42)
assertEquals("4", show(foo match { case Some(0) => 1 case _ => 2 }))
}
}
object MatchTest {
trait ValueClassBase[T] extends Any {
def f(): T
}
class ValueClass(val x: Int) extends AnyVal with ValueClassBase[Int] {
def f() = x * 2
}
}
| lrytz/scala-js | test-suite/shared/src/test/scala/org/scalajs/testsuite/compiler/MatchTest.scala | Scala | bsd-3-clause | 3,245 |
import org.garage.guru.domain._
import scalaz._
package object domain {
import scala.util.Try
import scalaz.Bind
type Repo = Repository[FreeParkingLot, TakenParkingLot, Vehicle, VehicleId]
type ParkingAction[A] = ReaderT[Try, Repo, A]
type DomainService = ParkingService[FreeParkingLot, TakenParkingLot, Vehicle, VehicleId]
type DomainAction[A] = ReaderT[ParkingAction, DomainService, A]
implicit val tryBind = TryBind
object TryBind extends Bind[Try] {
override def bind[A, B](fa: Try[A])(f: (A) => Try[B]): Try[B] = fa.flatMap(f)
override def map[A, B](fa: Try[A])(f: (A) => B): Try[B] = fa.map(f)
}
object DomainAction extends KleisliInstances with KleisliFunctions {
def apply[A](f: DomainService => ParkingAction[A]): DomainAction[A] = kleisli(f)
}
object ParkingAction extends KleisliInstances with KleisliFunctions {
def apply[A](f: Repo => Try[A]): ParkingAction[A] = kleisli(f)
}
}
| ddd-fun/garage-guru-fun | src/main/scala/org/garage/guru/domain/domain.scala | Scala | apache-2.0 | 946 |
package com.twitter.server.handler
import com.twitter.finagle.http
import com.twitter.io.Charsets
import com.twitter.logging.{Level, Logger}
import com.twitter.server.util.HttpUtils._
import com.twitter.util.Await
import org.junit.runner.RunWith
import org.scalatest.FunSuite
import org.scalatest.junit.JUnitRunner
@RunWith(classOf[JUnitRunner])
class LoggingHandlerTest extends FunSuite {
test("query all loggers") {
val handler = new LoggingHandler
val loggers = Logger.iterator
val res = Await.result(handler(http.Request("/")))
assert(res.getStatus === http.Status.Ok)
val text = res.getContent.toString(Charsets.Utf8)
for (logger <- loggers) {
assert(text.contains(logger.name))
}
val browserReq = http.Request("/")
browserReq.headers().set("User-Agent", "Mozilla")
val browserRes = Await.result(handler(http.Request("/")))
assert(browserRes.getStatus === http.Status.Ok)
val html = browserRes.getContent.toString(Charsets.Utf8)
for (logger <- loggers) {
assert(html.contains(logger.name))
}
}
test("change log level") {
val handler = new LoggingHandler
Logger("").setLevel(Level.INFO)
assert(Logger("").getLevel == Level.INFO)
Await.result(handler(http.Request("/?logger=root&level=DEBUG")))
assert(Logger("").getLevel == Level.DEBUG)
}
test("text output") {
val handler = new LoggingHandler
val l0 = () => {
val logger = Logger("l0")
logger.setLevel(Level.ALL)
logger
}
val l1 = () => {
val logger = Logger("l1")
logger.setLevel(Level.DEBUG)
logger
}
Logger.withLoggers(List(l0, l1)) {
val req = http.Request("/")
val res = Await.result(handler(req))
assert(res.getStatus === http.Status.Ok)
val text = res.getContent.toString(Charsets.Utf8)
assert(text === "root OFF\nl0 ALL\nl1 DEBUG")
}
}
}
| takei-shg/twitter-server | src/test/scala/com/twitter/server/handler/LoggingHandlerTest.scala | Scala | apache-2.0 | 1,905 |
package com.theseventhsense.datetime
import com.theseventhsense.utils.types.SSDateTime
import com.theseventhsense.utils.types.SSDateTime.Instant
import moment.Moment
/**
* Created by erik on 2/18/16.
*/
object SSDateTimeZoneOps extends TSSDateTimeZoneOps {
override def isValid(s: String): Boolean = true
override def offsetSeconds(zone: SSDateTime.TimeZone,
instant: SSDateTime.Instant): Integer = 0
override def parse(s: String): Option[SSDateTime.TimeZone] = None
override def instantAsIsoString(instant: Instant): String = {
Moment(instant.millis.toDouble).utc().format()
}
}
| 7thsense/utils-datetime | js/src/main/scala/com/theseventhsense/datetime/SSDateTimeZoneOps.scala | Scala | mit | 632 |
package spark.streaming.api.java
import java.util.{List => JList}
import java.lang.{Long => JLong}
import scala.collection.JavaConversions._
import spark.streaming._
import spark.streaming.StreamingContext._
import spark.api.java.function.{Function => JFunction, Function2 => JFunction2}
import spark.{RDD, Partitioner}
import org.apache.hadoop.mapred.{JobConf, OutputFormat}
import org.apache.hadoop.mapreduce.{OutputFormat => NewOutputFormat}
import org.apache.hadoop.conf.Configuration
import spark.api.java.{JavaRDD, JavaPairRDD}
import spark.storage.StorageLevel
import com.google.common.base.Optional
import spark.RDD
class JavaPairDStream[K, V](val dstream: DStream[(K, V)])(
implicit val kManifiest: ClassManifest[K],
implicit val vManifest: ClassManifest[V])
extends JavaDStreamLike[(K, V), JavaPairDStream[K, V], JavaPairRDD[K, V]] {
override def wrapRDD(rdd: RDD[(K, V)]): JavaPairRDD[K, V] = JavaPairRDD.fromRDD(rdd)
// =======================================================================
// Methods common to all DStream's
// =======================================================================
/** Return a new DStream containing only the elements that satisfy a predicate. */
def filter(f: JFunction[(K, V), java.lang.Boolean]): JavaPairDStream[K, V] =
dstream.filter((x => f(x).booleanValue()))
/** Persist RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */
def cache(): JavaPairDStream[K, V] = dstream.cache()
/** Persist RDDs of this DStream with the default storage level (MEMORY_ONLY_SER) */
def persist(): JavaPairDStream[K, V] = dstream.persist()
/** Persist the RDDs of this DStream with the given storage level */
def persist(storageLevel: StorageLevel): JavaPairDStream[K, V] = dstream.persist(storageLevel)
/** Method that generates a RDD for the given Duration */
def compute(validTime: Time): JavaPairRDD[K, V] = {
dstream.compute(validTime) match {
case Some(rdd) => new JavaPairRDD(rdd)
case None => null
}
}
/**
* Return a new DStream which is computed based on windowed batches of this DStream.
* The new DStream generates RDDs with the same interval as this DStream.
* @param windowDuration width of the window; must be a multiple of this DStream's interval.
* @return
*/
def window(windowDuration: Duration): JavaPairDStream[K, V] =
dstream.window(windowDuration)
/**
* Return a new DStream which is computed based on windowed batches of this DStream.
* @param windowDuration duration (i.e., width) of the window;
* must be a multiple of this DStream's interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's interval
*/
def window(windowDuration: Duration, slideDuration: Duration): JavaPairDStream[K, V] =
dstream.window(windowDuration, slideDuration)
/**
* Return a new DStream by unifying data of another DStream with this DStream.
* @param that Another DStream having the same interval (i.e., slideDuration) as this DStream.
*/
def union(that: JavaPairDStream[K, V]): JavaPairDStream[K, V] =
dstream.union(that.dstream)
// =======================================================================
// Methods only for PairDStream's
// =======================================================================
/**
* Return a new DStream by applying `groupByKey` to each RDD. Hash partitioning is used to
* generate the RDDs with Spark's default number of partitions.
*/
def groupByKey(): JavaPairDStream[K, JList[V]] =
dstream.groupByKey().mapValues(seqAsJavaList _)
/**
* Return a new DStream by applying `groupByKey` to each RDD. Hash partitioning is used to
* generate the RDDs with `numPartitions` partitions.
*/
def groupByKey(numPartitions: Int): JavaPairDStream[K, JList[V]] =
dstream.groupByKey(numPartitions).mapValues(seqAsJavaList _)
/**
* Return a new DStream by applying `groupByKey` on each RDD of `this` DStream.
* Therefore, the values for each key in `this` DStream's RDDs are grouped into a
* single sequence to generate the RDDs of the new DStream. [[spark.Partitioner]]
* is used to control the partitioning of each RDD.
*/
def groupByKey(partitioner: Partitioner): JavaPairDStream[K, JList[V]] =
dstream.groupByKey(partitioner).mapValues(seqAsJavaList _)
/**
* Return a new DStream by applying `reduceByKey` to each RDD. The values for each key are
* merged using the associative reduce function. Hash partitioning is used to generate the RDDs
* with Spark's default number of partitions.
*/
def reduceByKey(func: JFunction2[V, V, V]): JavaPairDStream[K, V] =
dstream.reduceByKey(func)
/**
* Return a new DStream by applying `reduceByKey` to each RDD. The values for each key are
* merged using the supplied reduce function. Hash partitioning is used to generate the RDDs
* with `numPartitions` partitions.
*/
def reduceByKey(func: JFunction2[V, V, V], numPartitions: Int): JavaPairDStream[K, V] =
dstream.reduceByKey(func, numPartitions)
/**
* Return a new DStream by applying `reduceByKey` to each RDD. The values for each key are
* merged using the supplied reduce function. [[spark.Partitioner]] is used to control the
* partitioning of each RDD.
*/
def reduceByKey(func: JFunction2[V, V, V], partitioner: Partitioner): JavaPairDStream[K, V] = {
dstream.reduceByKey(func, partitioner)
}
/**
* Combine elements of each key in DStream's RDDs using custom function. This is similar to the
* combineByKey for RDDs. Please refer to combineByKey in [[spark.PairRDDFunctions]] for more
* information.
*/
def combineByKey[C](createCombiner: JFunction[V, C],
mergeValue: JFunction2[C, V, C],
mergeCombiners: JFunction2[C, C, C],
partitioner: Partitioner
): JavaPairDStream[K, C] = {
implicit val cm: ClassManifest[C] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[C]]
dstream.combineByKey(createCombiner, mergeValue, mergeCombiners, partitioner)
}
/**
* Return a new DStream by applying `groupByKey` over a sliding window. This is similar to
* `DStream.groupByKey()` but applies it over a sliding window. The new DStream generates RDDs
* with the same interval as this DStream. Hash partitioning is used to generate the RDDs with
* Spark's default number of partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
*/
def groupByKeyAndWindow(windowDuration: Duration): JavaPairDStream[K, JList[V]] = {
dstream.groupByKeyAndWindow(windowDuration).mapValues(seqAsJavaList _)
}
/**
* Return a new DStream by applying `groupByKey` over a sliding window. Similar to
* `DStream.groupByKey()`, but applies it over a sliding window. Hash partitioning is used to
* generate the RDDs with Spark's default number of partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration)
: JavaPairDStream[K, JList[V]] = {
dstream.groupByKeyAndWindow(windowDuration, slideDuration).mapValues(seqAsJavaList _)
}
/**
* Return a new DStream by applying `groupByKey` over a sliding window on `this` DStream.
* Similar to `DStream.groupByKey()`, but applies it over a sliding window.
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param numPartitions Number of partitions of each RDD in the new DStream.
*/
def groupByKeyAndWindow(windowDuration: Duration, slideDuration: Duration, numPartitions: Int)
:JavaPairDStream[K, JList[V]] = {
dstream.groupByKeyAndWindow(windowDuration, slideDuration, numPartitions)
.mapValues(seqAsJavaList _)
}
/**
* Return a new DStream by applying `groupByKey` over a sliding window on `this` DStream.
* Similar to `DStream.groupByKey()`, but applies it over a sliding window.
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream.
*/
def groupByKeyAndWindow(
windowDuration: Duration,
slideDuration: Duration,
partitioner: Partitioner
):JavaPairDStream[K, JList[V]] = {
dstream.groupByKeyAndWindow(windowDuration, slideDuration, partitioner)
.mapValues(seqAsJavaList _)
}
/**
* Create a new DStream by applying `reduceByKey` over a sliding window on `this` DStream.
* Similar to `DStream.reduceByKey()`, but applies it over a sliding window. The new DStream
* generates RDDs with the same interval as this DStream. Hash partitioning is used to generate
* the RDDs with Spark's default number of partitions.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
*/
def reduceByKeyAndWindow(reduceFunc: Function2[V, V, V], windowDuration: Duration)
:JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(reduceFunc, windowDuration)
}
/**
* Return a new DStream by applying `reduceByKey` over a sliding window. This is similar to
* `DStream.reduceByKey()` but applies it over a sliding window. Hash partitioning is used to
* generate the RDDs with Spark's default number of partitions.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def reduceByKeyAndWindow(
reduceFunc: Function2[V, V, V],
windowDuration: Duration,
slideDuration: Duration
):JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration)
}
/**
* Return a new DStream by applying `reduceByKey` over a sliding window. This is similar to
* `DStream.reduceByKey()` but applies it over a sliding window. Hash partitioning is used to
* generate the RDDs with `numPartitions` partitions.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param numPartitions Number of partitions of each RDD in the new DStream.
*/
def reduceByKeyAndWindow(
reduceFunc: Function2[V, V, V],
windowDuration: Duration,
slideDuration: Duration,
numPartitions: Int
): JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, numPartitions)
}
/**
* Return a new DStream by applying `reduceByKey` over a sliding window. Similar to
* `DStream.reduceByKey()`, but applies it over a sliding window.
* @param reduceFunc associative reduce function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream.
*/
def reduceByKeyAndWindow(
reduceFunc: Function2[V, V, V],
windowDuration: Duration,
slideDuration: Duration,
partitioner: Partitioner
): JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(reduceFunc, windowDuration, slideDuration, partitioner)
}
/**
* Return a new DStream by reducing over a using incremental computation.
* The reduced value of over a new window is calculated using the old window's reduce value :
* 1. reduce the new values that entered the window (e.g., adding new counts)
* 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
* This is more efficient that reduceByKeyAndWindow without "inverse reduce" function.
* However, it is applicable to only "invertible reduce functions".
* Hash partitioning is used to generate the RDDs with Spark's default number of partitions.
* @param reduceFunc associative reduce function
* @param invReduceFunc inverse function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
*/
def reduceByKeyAndWindow(
reduceFunc: Function2[V, V, V],
invReduceFunc: Function2[V, V, V],
windowDuration: Duration,
slideDuration: Duration
): JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(reduceFunc, invReduceFunc, windowDuration, slideDuration)
}
/**
* Return a new DStream by applying incremental `reduceByKey` over a sliding window.
* The reduced value of over a new window is calculated using the old window's reduce value :
* 1. reduce the new values that entered the window (e.g., adding new counts)
* 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
* This is more efficient that reduceByKeyAndWindow without "inverse reduce" function.
* However, it is applicable to only "invertible reduce functions".
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
* @param reduceFunc associative reduce function
* @param invReduceFunc inverse function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param numPartitions number of partitions of each RDD in the new DStream.
* @param filterFunc function to filter expired key-value pairs;
* only pairs that satisfy the function are retained
* set this to null if you do not want to filter
*/
def reduceByKeyAndWindow(
reduceFunc: Function2[V, V, V],
invReduceFunc: Function2[V, V, V],
windowDuration: Duration,
slideDuration: Duration,
numPartitions: Int,
filterFunc: JFunction[(K, V), java.lang.Boolean]
): JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(
reduceFunc,
invReduceFunc,
windowDuration,
slideDuration,
numPartitions,
(p: (K, V)) => filterFunc(p).booleanValue()
)
}
/**
* Return a new DStream by applying incremental `reduceByKey` over a sliding window.
* The reduced value of over a new window is calculated using the old window's reduce value :
* 1. reduce the new values that entered the window (e.g., adding new counts)
* 2. "inverse reduce" the old values that left the window (e.g., subtracting old counts)
* This is more efficient that reduceByKeyAndWindow without "inverse reduce" function.
* However, it is applicable to only "invertible reduce functions".
* @param reduceFunc associative reduce function
* @param invReduceFunc inverse function
* @param windowDuration width of the window; must be a multiple of this DStream's
* batching interval
* @param slideDuration sliding interval of the window (i.e., the interval after which
* the new DStream will generate RDDs); must be a multiple of this
* DStream's batching interval
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream.
* @param filterFunc function to filter expired key-value pairs;
* only pairs that satisfy the function are retained
* set this to null if you do not want to filter
*/
def reduceByKeyAndWindow(
reduceFunc: Function2[V, V, V],
invReduceFunc: Function2[V, V, V],
windowDuration: Duration,
slideDuration: Duration,
partitioner: Partitioner,
filterFunc: JFunction[(K, V), java.lang.Boolean]
): JavaPairDStream[K, V] = {
dstream.reduceByKeyAndWindow(
reduceFunc,
invReduceFunc,
windowDuration,
slideDuration,
partitioner,
(p: (K, V)) => filterFunc(p).booleanValue()
)
}
private def convertUpdateStateFunction[S](in: JFunction2[JList[V], Optional[S], Optional[S]]):
(Seq[V], Option[S]) => Option[S] = {
val scalaFunc: (Seq[V], Option[S]) => Option[S] = (values, state) => {
val list: JList[V] = values
val scalaState: Optional[S] = state match {
case Some(s) => Optional.of(s)
case _ => Optional.absent()
}
val result: Optional[S] = in.apply(list, scalaState)
result.isPresent match {
case true => Some(result.get())
case _ => None
}
}
scalaFunc
}
/**
* Create a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of each key.
* Hash partitioning is used to generate the RDDs with Spark's default number of partitions.
* @param updateFunc State update function. If `this` function returns None, then
* corresponding state key-value pair will be eliminated.
* @tparam S State type
*/
def updateStateByKey[S](updateFunc: JFunction2[JList[V], Optional[S], Optional[S]])
: JavaPairDStream[K, S] = {
implicit val cm: ClassManifest[S] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[S]]
dstream.updateStateByKey(convertUpdateStateFunction(updateFunc))
}
/**
* Create a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of each key.
* Hash partitioning is used to generate the RDDs with `numPartitions` partitions.
* @param updateFunc State update function. If `this` function returns None, then
* corresponding state key-value pair will be eliminated.
* @param numPartitions Number of partitions of each RDD in the new DStream.
* @tparam S State type
*/
def updateStateByKey[S: ClassManifest](
updateFunc: JFunction2[JList[V], Optional[S], Optional[S]],
numPartitions: Int)
: JavaPairDStream[K, S] = {
dstream.updateStateByKey(convertUpdateStateFunction(updateFunc), numPartitions)
}
/**
* Create a new "state" DStream where the state for each key is updated by applying
* the given function on the previous state of the key and the new values of the key.
* [[spark.Partitioner]] is used to control the partitioning of each RDD.
* @param updateFunc State update function. If `this` function returns None, then
* corresponding state key-value pair will be eliminated.
* @param partitioner Partitioner for controlling the partitioning of each RDD in the new DStream.
* @tparam S State type
*/
def updateStateByKey[S: ClassManifest](
updateFunc: JFunction2[JList[V], Optional[S], Optional[S]],
partitioner: Partitioner
): JavaPairDStream[K, S] = {
dstream.updateStateByKey(convertUpdateStateFunction(updateFunc), partitioner)
}
def mapValues[U](f: JFunction[V, U]): JavaPairDStream[K, U] = {
implicit val cm: ClassManifest[U] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[U]]
dstream.mapValues(f)
}
def flatMapValues[U](f: JFunction[V, java.lang.Iterable[U]]): JavaPairDStream[K, U] = {
import scala.collection.JavaConverters._
def fn = (x: V) => f.apply(x).asScala
implicit val cm: ClassManifest[U] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[U]]
dstream.flatMapValues(fn)
}
/**
* Cogroup `this` DStream with `other` DStream. For each key k in corresponding RDDs of `this`
* or `other` DStreams, the generated RDD will contains a tuple with the list of values for that
* key in both RDDs. HashPartitioner is used to partition each generated RDD into default number
* of partitions.
*/
def cogroup[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (JList[V], JList[W])] = {
implicit val cm: ClassManifest[W] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]]
dstream.cogroup(other.dstream).mapValues(t => (seqAsJavaList(t._1), seqAsJavaList((t._2))))
}
/**
* Cogroup `this` DStream with `other` DStream. For each key k in corresponding RDDs of `this`
* or `other` DStreams, the generated RDD will contains a tuple with the list of values for that
* key in both RDDs. Partitioner is used to partition each generated RDD.
*/
def cogroup[W](other: JavaPairDStream[K, W], partitioner: Partitioner)
: JavaPairDStream[K, (JList[V], JList[W])] = {
implicit val cm: ClassManifest[W] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]]
dstream.cogroup(other.dstream, partitioner)
.mapValues(t => (seqAsJavaList(t._1), seqAsJavaList((t._2))))
}
/**
* Join `this` DStream with `other` DStream. HashPartitioner is used
* to partition each generated RDD into default number of partitions.
*/
def join[W](other: JavaPairDStream[K, W]): JavaPairDStream[K, (V, W)] = {
implicit val cm: ClassManifest[W] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]]
dstream.join(other.dstream)
}
/**
* Join `this` DStream with `other` DStream, that is, each RDD of the new DStream will
* be generated by joining RDDs from `this` and other DStream. Uses the given
* Partitioner to partition each generated RDD.
*/
def join[W](other: JavaPairDStream[K, W], partitioner: Partitioner)
: JavaPairDStream[K, (V, W)] = {
implicit val cm: ClassManifest[W] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[W]]
dstream.join(other.dstream, partitioner)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsHadoopFiles[F <: OutputFormat[K, V]](prefix: String, suffix: String) {
dstream.saveAsHadoopFiles(prefix, suffix)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsHadoopFiles(
prefix: String,
suffix: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[_ <: OutputFormat[_, _]]) {
dstream.saveAsHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsHadoopFiles(
prefix: String,
suffix: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[_ <: OutputFormat[_, _]],
conf: JobConf) {
dstream.saveAsHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass, conf)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsNewAPIHadoopFiles[F <: NewOutputFormat[K, V]](prefix: String, suffix: String) {
dstream.saveAsNewAPIHadoopFiles(prefix, suffix)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsNewAPIHadoopFiles(
prefix: String,
suffix: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[_ <: NewOutputFormat[_, _]]) {
dstream.saveAsNewAPIHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass)
}
/**
* Save each RDD in `this` DStream as a Hadoop file. The file name at each batch interval is
* generated based on `prefix` and `suffix`: "prefix-TIME_IN_MS.suffix".
*/
def saveAsNewAPIHadoopFiles(
prefix: String,
suffix: String,
keyClass: Class[_],
valueClass: Class[_],
outputFormatClass: Class[_ <: NewOutputFormat[_, _]],
conf: Configuration = new Configuration) {
dstream.saveAsNewAPIHadoopFiles(prefix, suffix, keyClass, valueClass, outputFormatClass, conf)
}
override val classManifest: ClassManifest[(K, V)] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[Tuple2[K, V]]]
}
object JavaPairDStream {
implicit def fromPairDStream[K: ClassManifest, V: ClassManifest](dstream: DStream[(K, V)])
:JavaPairDStream[K, V] =
new JavaPairDStream[K, V](dstream)
def fromJavaDStream[K, V](dstream: JavaDStream[(K, V)]): JavaPairDStream[K, V] = {
implicit val cmk: ClassManifest[K] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[K]]
implicit val cmv: ClassManifest[V] =
implicitly[ClassManifest[AnyRef]].asInstanceOf[ClassManifest[V]]
new JavaPairDStream[K, V](dstream.dstream)
}
def scalaToJavaLong[K: ClassManifest](dstream: JavaPairDStream[K, Long])
: JavaPairDStream[K, JLong] = {
StreamingContext.toPairDStreamFunctions(dstream.dstream).mapValues(new JLong(_))
}
}
| koeninger/spark | streaming/src/main/scala/spark/streaming/api/java/JavaPairDStream.scala | Scala | bsd-3-clause | 27,381 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.predictionio.examples.stock
import org.apache.predictionio.controller.LAlgorithm
import org.apache.spark.SparkContext
import org.apache.spark.SparkContext._
import org.apache.spark.rdd.RDD
import org.apache.spark.broadcast.Broadcast
import org.apache.predictionio.controller.EmptyParams
import org.saddle._
import scala.reflect._
import scala.reflect.runtime.universe._
import scala.collection.immutable.HashMap
abstract class StockStrategy[M: ClassTag]
extends LAlgorithm[
TrainingData,
(TrainingData, M),
QueryDate,
Prediction] {
def train(trainingData: TrainingData): (TrainingData, M) = {
(trainingData, createModel(trainingData.view))
}
def createModel(dataView: DataView): M
def predict(dataModel: (TrainingData, M), queryDate: QueryDate)
: Prediction = {
val (trainingData, model) = dataModel
val rawData = trainingData.rawDataB.value
val dataView: DataView =
rawData.view(queryDate.idx, trainingData.maxWindowSize)
val active = rawData._activeFrame
val activeTickers = dataView
.activeFrame()
.rowAt(0)
.filter(identity)
.index.toVec.contents
val query = Query(
idx = queryDate.idx,
dataView = dataView,
tickers = activeTickers,
mktTicker = rawData.mktTicker)
val prediction: Prediction = onClose(model, query)
return prediction
}
def onClose(model: M, query: Query): Prediction
}
class EmptyStrategy extends StockStrategy[AnyRef] {
def createModel(dataView: DataView): AnyRef = None
def onClose(model: AnyRef, query: Query): Prediction =
Prediction(HashMap[String, Double]())
}
| himanshudhami/PredictionIO | examples/experimental/scala-stock/src/main/scala/Algorithm.scala | Scala | apache-2.0 | 2,470 |
package com.ajjpj.adiagram_.render.base
import com.ajjpj.adiagram_.geometry.{ARect, APoint}
import javafx.scene.image.Image
import javafx.scene.canvas.{Canvas, GraphicsContext}
import com.ajjpj.adiagram_.geometry.transform.Translation
import com.ajjpj.adiagram_.render.RenderHelper
import com.ajjpj.adiagram_.ui.fw.Digest
import com.ajjpj.adiagram_.ui.{Zoom, AScreenPos}
/**
* This is the result of a rendering operation of a shape. To save memory (and improve cacheability), it does
* not contain the entire canvas but rather an arbitrary part of it plus a coordinate offset for rendering.
*
* @param renderOffset are the coordinates at which the upper left corner of the image should be rendered
*
* @author arno
*/
class PartialImage (val renderOffset: APoint, val img: Image)
class PartialImageWithShadow(val shape: PartialImage, val shadow: Option[PartialImage])
object PartialImage {
type PartialImageRenderCallback = (GraphicsContext, Translation) => Unit
/**
* @param bounds of the actual image (toModel shadow, if any)
* @param shadowStyle null if there is no shadow
*/
def fromGc(pos: APoint, bounds: ARect, zoom: Zoom, shadowStyle: Option[ShadowStyle], callback: PartialImageRenderCallback): PartialImageWithShadow = {
// one pixel on each side for sub-pixel bleeding
val offset = bounds.topLeft + ((-1/zoom.factor, -1/zoom.factor))
val canvas = new Canvas(bounds.width*zoom.factor+2, bounds.height*zoom.factor+2)
val t = Translation(offset.inverse)
callback(canvas.getGraphicsContext2D, t)
val img = RenderHelper.snapshot(canvas)
shadowStyle match {
case Some(s) => new PartialImageWithShadow(new PartialImage(offset - pos, img), Some(new PartialImage(s.withShadowOffset(offset) - pos, s.shadow(img, zoom))))
case None => new PartialImageWithShadow(new PartialImage(offset - pos, img), None)
}
}
}
| arnohaase/a-diagram | src/main/scala-old/com/ajjpj/adiagram_/render/base/PartialImage.scala | Scala | apache-2.0 | 1,893 |
//
// Game Gardens - a platform for hosting simple multiplayer Java games
// Copyright (c) 2005-2013, Three Rings Design, Inc. - All rights reserved.
// https://github.com/threerings/game-gardens/blob/master/LICENSE
package com.threerings.gardens.web.logic
import com.samskivert.servlet.RedirectException
import com.samskivert.servlet.user.{Password, User}
import com.samskivert.servlet.util.{FriendlyException, ParameterUtil}
import com.samskivert.velocity.InvocationContext
import com.threerings.gardens.web.GardensApp
class login extends AuthLogic {
override def invoke (ctx :InvocationContext, app :GardensApp, user :User) {
val (req, rsp) = (ctx.getRequest, ctx.getResponse)
ParameterUtil.getParameter(req, "action", true) match {
case "login" =>
val username = ParameterUtil.requireParameter(req, "username", "error.missing_username")
val password = ParameterUtil.requireParameter(req, "password", "error.missing_password")
val user = app.userManager.getRepository.loadUser(username)
if (user == null) throw new FriendlyException("login.no_such_user")
if (!user.passwordsMatch(Password.makeFromClear(password)))
throw new FriendlyException("login.invalid_password")
effectLogin(ctx, app, user)
throw new RedirectException("/")
case "logout" =>
app.userManager.logout(req, rsp)
ctx.put("status", "login.logged_out")
case _ => // just display the login page
}
}
}
| threerings/game-gardens | server/src/main/scala/com/threerings/gardens/web/logic/login.scala | Scala | bsd-3-clause | 1,491 |
package org.rebeam.tree.demo
import java.io.File
import java.util.concurrent.atomic.AtomicLong
import org.rebeam.tree.{DeltaIOContext, DeltaIOContextSource, Moment}
import org.rebeam.tree.server.{ServerStore, ServerStoreValueExchange}
import org.rebeam.tree.Delta._
import org.rebeam.tree.sync.Sync._
import org.rebeam.tree.sync.DeltaIORun._
import org.rebeam.tree.demo.DemoData.Address
import org.rebeam.tree.demo.RefData.DataItemList
import org.rebeam.tree.ref.{Mirror, MirrorAndId}
import org.rebeam.tree.sync.{Ref, RefAdder}
import org.http4s._
import org.http4s.dsl._
import org.http4s.server.blaze.BlazeBuilder
import org.http4s.server.staticcontent._
import cats.effect.{Effect, IO}
import fs2.StreamApp.ExitCode
import fs2.{Stream, StreamApp}
import org.rebeam.tree.demo.RefFailureData.DataLinkPair
import scala.concurrent.ExecutionContext.Implicits.global
object ServerDemoAppIO extends ServerDemoApp[IO]
class ServerDemoApp[F[_]](implicit F: Effect[F]) extends StreamApp[F] with Http4sDsl[F] {
val address: ServerStore[Address] = {
import DemoData._
new ServerStore(Address(Street("OLD STREET", 1, 22.3)))
}
private val todoProjectMirrorStore = {
import DemoData._
//FIXME update to use put
val todoProjectMirrorIO: DeltaIO[Mirror] = for {
todoProject <- TodoExample.todoProjectIO
revision <- getGuid
} yield {
Mirror.empty.updated(todoProject.id, todoProject, revision)
}
val todoProjectMirror = todoProjectMirrorIO.runWith(
DeltaIOContext(Moment(0)),
DeltaId(ClientId(0), ClientDeltaId(0))
).data
new ServerStore(todoProjectMirror)
}
private val todoListStore = {
import DemoData._
val todoProject = TodoExample.todoProjectIO.runWith(
DeltaIOContext(Moment(0)),
DeltaId(ClientId(0), ClientDeltaId(0))
).data
new ServerStore(todoProject.lists.head)
}
private val todoProjectStore = {
import DemoData._
val todoProject = TodoExample.todoProjectIO.runWith(
DeltaIOContext(Moment(0)),
DeltaId(ClientId(0), ClientDeltaId(0))
).data
new ServerStore(todoProject)
}
private val taskStore = {
import TaskData._
val taskIO: DeltaIO[Task] = for {
user <- User.create("A", "User", "user", Email("[email protected]"))
task <- Task.example(Ref(user.id))
} yield task
val task = taskIO.runWith(
DeltaIOContext(Moment(0)),
DeltaId(ClientId(0), ClientDeltaId(0))
).data
new ServerStore(task)
}
private val refDemoStore: ServerStore[MirrorAndId[DataItemList]] = {
import RefData._
val result = RefData.exampleDataMirrorIO.runWith(
DeltaIOContext(Moment(0)),
DeltaId(ClientId(0), ClientDeltaId(0))
)
val mirrorAndId = RefAdder.mirrorAndIdRefAdder.addRefs(result)
new ServerStore(mirrorAndId)
}
private val refFailureDemoStore: ServerStore[MirrorAndId[DataLinkPair]] = {
import RefFailureData._
val result = RefFailureData.exampleDataMirrorIO.runWith(
DeltaIOContext(Moment(0)),
DeltaId(ClientId(0), ClientDeltaId(0))
)
val mirrorAndId = RefAdder.mirrorAndIdRefAdder.addRefs(result)
new ServerStore(mirrorAndId)
}
// TODO better way of doing this - start from 1 since we use 0 to generate example data
// Can we make a stream and use this to produce incrementing values?
private val nextClientId = new AtomicLong(1)
private val contextSource = DeltaIOContextSource.default
val apiService: HttpService[F] = HttpService[F] {
case GET -> Root / "hello" =>
Ok("Hello world!")
case GET -> Root / "pwd" =>
Ok(System.getProperty("user.dir"))
case GET -> Root / "todolist" =>
ServerStoreValueExchange(
todoListStore,
ClientId(nextClientId.getAndIncrement()),
contextSource
)
case GET -> Root / "todoproject" =>
ServerStoreValueExchange(
todoProjectStore,
ClientId(nextClientId.getAndIncrement()),
contextSource
)
case GET -> Root / "todoprojectmirror" =>
import DemoData._
ServerStoreValueExchange(
todoProjectMirrorStore,
ClientId(nextClientId.getAndIncrement()),
contextSource
)
case GET -> Root / "refs" =>
import RefData._
ServerStoreValueExchange(
refDemoStore,
ClientId(nextClientId.getAndIncrement()),
contextSource
)
case GET -> Root / "reffailure" =>
import RefFailureData._
ServerStoreValueExchange(
refFailureDemoStore,
ClientId(nextClientId.getAndIncrement()),
contextSource
)
case GET -> Root / "task" =>
ServerStoreValueExchange(
taskStore,
ClientId(nextClientId.getAndIncrement()),
contextSource
)
case GET -> Root / "address" =>
ServerStoreValueExchange(
address,
ClientId(nextClientId.getAndIncrement()),
contextSource
)
}
//This serves directly from development resources directory, so will update
//when we change original resources files and refresh browser
//Serve our assets relative to user directory - kind of messy
val assets: HttpService[F] = fileService(FileService.Config(new File(System.getProperty("user.dir")).getParent + "/assets", "/assets"))
//This serves directly from development resources directory, so will update
//when we change original resources files and refresh browser
val resources: HttpService[F] = fileService(FileService.Config("src/main/resources", "/"))
val indexService: HttpService[F] = HttpService[F] {
case request @ GET -> Root =>
StaticFile.fromFile(new File("src/main/resources/index.html"), Some(request)).getOrElseF(NotFound())
}
// val apiCORS = CORS(apiService)
def stream(args: List[String], requestShutdown: F[Unit]): Stream[F, ExitCode] =
for {
exitCode <- BlazeBuilder[F]
.bindHttp(8080, "0.0.0.0")
.withWebSockets(true)
.mountService(apiService, "/api")
.mountService(indexService, "/")
.mountService(resources, "/")
.mountService(assets, "/") //Note that the "/assets" path is already built into the fileService
.serve
} yield exitCode
}
| trepidacious/tree-material-ui | jvm/src/main/scala/org/rebeam/tree/demo/ServerDemoApp.scala | Scala | gpl-3.0 | 6,224 |
package nsmc.mongo
class MetadataException(message: String = null, cause: Throwable = null) extends RuntimeException(message, cause) | shotishu/spark-mongodb-connector | src/main/scala/nsmc/mongo/MetadataException.scala | Scala | apache-2.0 | 133 |
package spire
package std
import scala.collection.SeqLike
import scala.collection.mutable.Builder
import scala.collection.generic.CanBuildFrom
import spire.algebra._
import spire.NoImplicit
@SerialVersionUID(0L)
class SeqCModule[A, SA <: SeqLike[A, SA]](implicit val scalar: CRing[A], cbf: CanBuildFrom[SA,A,SA])
extends CModule[SA, A] with Serializable {
def zero: SA = cbf().result
def negate(sa: SA): SA = sa map (scalar.negate)
def plus(x: SA, y: SA): SA = {
@tailrec
def add1(it: Iterator[A], b: Builder[A, SA]): SA = if (it.hasNext) {
b += it.next()
add1(it, b)
} else {
b.result
}
@tailrec
def add2(xi: Iterator[A], yi: Iterator[A], b: Builder[A, SA]): SA = {
if (!xi.hasNext) {
add1(yi, b)
} else if (!yi.hasNext) {
add1(xi, b)
} else {
b += scalar.plus(xi.next(), yi.next())
add2(xi, yi, b)
}
}
add2(x.toIterator, y.toIterator, cbf(x))
}
override def minus(x: SA, y: SA): SA = {
@tailrec
def subl(it: Iterator[A], b: Builder[A, SA]): SA = if (it.hasNext) {
b += it.next()
subl(it, b)
} else {
b.result
}
@tailrec
def subr(it: Iterator[A], b: Builder[A, SA]): SA = if (it.hasNext) {
b += scalar.negate(it.next())
subr(it, b)
} else {
b.result
}
@tailrec
def sub2(xi: Iterator[A], yi: Iterator[A], b: Builder[A, SA]): SA = {
if (!xi.hasNext) {
subr(yi, b)
} else if (!yi.hasNext) {
subl(xi, b)
} else {
b += scalar.minus(xi.next(), yi.next())
sub2(xi, yi, b)
}
}
sub2(x.toIterator, y.toIterator, cbf(x))
}
def timesl(r: A, sa: SA): SA = sa map (scalar.times(r, _))
}
@SerialVersionUID(0L)
class SeqVectorSpace[A, SA <: SeqLike[A, SA]](implicit override val scalar: Field[A], cbf: CanBuildFrom[SA,A,SA])
extends SeqCModule[A, SA] with VectorSpace[SA, A] with Serializable
@SerialVersionUID(0L)
class SeqInnerProductSpace[A: Field, SA <: SeqLike[A, SA]](implicit cbf: CanBuildFrom[SA,A,SA])
extends SeqVectorSpace[A, SA] with InnerProductSpace[SA, A] with Serializable {
def dot(x: SA, y: SA): A = {
@tailrec
def loop(xi: Iterator[A], yi: Iterator[A], acc: A): A = {
if (xi.hasNext && yi.hasNext) {
loop(xi, yi, scalar.plus(acc, scalar.times(xi.next(), yi.next())))
} else {
acc
}
}
loop(x.toIterator, y.toIterator, scalar.zero)
}
}
@SerialVersionUID(0L)
class SeqCoordinateSpace[A: Field, SA <: SeqLike[A, SA]](val dimensions: Int)(implicit cbf: CanBuildFrom[SA,A,SA])
extends SeqInnerProductSpace[A, SA] with CoordinateSpace[SA, A] with Serializable {
def coord(v: SA, i: Int): A = v(i)
override def dot(v: SA, w: SA): A = super[SeqInnerProductSpace].dot(v, w)
def axis(i: Int): SA = {
val b = cbf()
@tailrec def loop(j: Int): SA = if (i < dimensions) {
b += (if (i == j) scalar.one else scalar.zero)
loop(j + 1)
} else b.result
loop(0)
}
}
/**
* The L_p norm is equal to the `p`-th root of the sum of each element to the
* power `p`. For instance, if `p = 1` we have the Manhattan distance. If you'd
* like the Euclidean norm (`p = 2`), then you'd probably be best to use an
* `RealInnerProductSpace` instead.
*/
@SerialVersionUID(0L)
class SeqLpNormedVectorSpace[A: Field: NRoot: Signed, SA <: SeqLike[A, SA]](val p: Int)(implicit cbf: CanBuildFrom[SA,A,SA])
extends SeqVectorSpace[A, SA] with NormedVectorSpace[SA, A] with Serializable {
require(p > 0, "p must be > 0")
def norm(v: SA): A = {
@tailrec
def loop(xi: Iterator[A], acc: A): A = {
if (xi.hasNext) {
loop(xi, scalar.plus(acc, Signed[A].abs(scalar.pow(xi.next(), p))))
} else {
NRoot[A].nroot(acc, p)
}
}
loop(v.toIterator, scalar.zero)
}
}
/**
* The norm here uses the absolute maximum of the coordinates (ie. the L_inf
* norm).
*/
@SerialVersionUID(0L)
class SeqMaxNormedVectorSpace[A: Field: Order: Signed, SA <: SeqLike[A, SA]](implicit cbf: CanBuildFrom[SA,A,SA])
extends SeqVectorSpace[A, SA] with NormedVectorSpace[SA, A] with Serializable {
def norm(v: SA): A = {
@tailrec
def loop(xi: Iterator[A], acc: A): A = {
if (xi.hasNext) {
val x = Signed[A].abs(xi.next())
loop(xi, if (Order[A].gt(x, acc)) x else acc)
} else {
acc
}
}
loop(v.toIterator, scalar.zero)
}
}
private object SeqSupport {
@tailrec
final def forall[A](x: Iterator[A], y: Iterator[A])(f: (A, A) => Boolean, g: A => Boolean): Boolean = {
if (x.hasNext && y.hasNext) {
f(x.next(), y.next()) && forall(x, y)(f, g)
} else if (x.hasNext) {
g(x.next()) && forall(x, y)(f, g)
} else if (y.hasNext) {
g(y.next()) && forall(x, y)(f, g)
} else {
true
}
}
private val falsef: Any => Boolean = _ => false
@inline final def forall[A, SA <: SeqLike[A, SA]](x: SA, y: SA)(
f: (A, A) => Boolean, g: A => Boolean = falsef): Boolean = {
forall(x.toIterator, y.toIterator)(f, g)
}
}
import SeqSupport._
@SerialVersionUID(0L)
class SeqEq[A: Eq, SA <: SeqLike[A, SA]] extends Eq[SA] with Serializable {
def eqv(x: SA, y: SA): Boolean = forall[A, SA](x, y)(Eq[A].eqv(_, _))
}
@SerialVersionUID(0L)
class SeqOrder[A: Order, SA <: SeqLike[A, SA]] extends SeqEq[A, SA] with Order[SA] with Serializable {
override def eqv(x: SA, y: SA): Boolean = super[SeqEq].eqv(x, y)
def compare(x: SA, y: SA): Int = {
@tailrec
def loop(xi: Iterator[A], yi: Iterator[A]): Int = {
if (xi.hasNext && yi.hasNext) {
val cmp = Order[A].compare(xi.next(), yi.next())
if (cmp == 0) loop(xi, yi) else cmp
} else if (xi.hasNext) {
1
} else if (yi.hasNext) {
-1
} else {
0
}
}
loop(x.toIterator, y.toIterator)
}
}
@SerialVersionUID(0L)
class SeqVectorEq[A: Eq, SA <: SeqLike[A, SA]](implicit scalar: AdditiveMonoid[A])
extends Eq[SA] with Serializable {
def eqv(x: SA, y: SA): Boolean =
forall[A, SA](x, y)(Eq[A].eqv(_, _), Eq[A].eqv(_, scalar.zero))
}
@SerialVersionUID(0L)
class SeqVectorOrder[A: Order, SA <: SeqLike[A, SA]](implicit scalar: AdditiveMonoid[A])
extends SeqVectorEq[A, SA] with Order[SA] with Serializable {
override def eqv(x: SA, y: SA): Boolean = super[SeqVectorEq].eqv(x, y)
def compare(x: SA, y: SA): Int = {
@tailrec
def loop(xi: Iterator[A], yi: Iterator[A]): Int = {
if (xi.hasNext && yi.hasNext) {
val cmp = Order[A].compare(xi.next(), yi.next())
if (cmp == 0) loop(xi, yi) else cmp
} else if (xi.hasNext) {
if (Order[A].eqv(xi.next(), scalar.zero)) loop(xi, yi) else 1
} else if (yi.hasNext) {
if (Order[A].eqv(yi.next(), scalar.zero)) loop(xi, yi) else -1
} else {
0
}
}
loop(x.toIterator, y.toIterator)
}
}
trait SeqInstances0 {
implicit def SeqCModule[A, CC[A] <: SeqLike[A, CC[A]]](implicit
ring0: CRing[A], cbf0: CanBuildFrom[CC[A], A, CC[A]],
ev: NoImplicit[VectorSpace[CC[A], A]]): SeqCModule[A, CC[A]] = new SeqCModule[A, CC[A]]
}
trait SeqInstances1 extends SeqInstances0 {
implicit def SeqVectorSpace[A, CC[A] <: SeqLike[A, CC[A]]](implicit field0: Field[A],
cbf0: CanBuildFrom[CC[A], A, CC[A]],
ev: NoImplicit[NormedVectorSpace[CC[A], A]]): SeqVectorSpace[A, CC[A]] = new SeqVectorSpace[A, CC[A]]
implicit def SeqEq[A, CC[A] <: SeqLike[A, CC[A]]](implicit A0: Eq[A]): SeqEq[A, CC[A]] =
new SeqEq[A, CC[A]]
}
trait SeqInstances2 extends SeqInstances1 {
implicit def SeqInnerProductSpace[A, CC[A] <: SeqLike[A, CC[A]]](implicit field0: Field[A],
cbf0: CanBuildFrom[CC[A], A, CC[A]]): SeqInnerProductSpace[A, CC[A]] = new SeqInnerProductSpace[A, CC[A]]
implicit def SeqOrder[A, CC[A] <: SeqLike[A, CC[A]]](implicit A0: Order[A]): SeqOrder[A, CC[A]] =
new SeqOrder[A, CC[A]]
}
trait SeqInstances3 extends SeqInstances2 {
implicit def SeqNormedVectorSpace[A, CC[A] <: SeqLike[A, CC[A]]](implicit field0: Field[A],
nroot0: NRoot[A], cbf0: CanBuildFrom[CC[A], A, CC[A]]): NormedVectorSpace[CC[A], A] = SeqInnerProductSpace[A, CC].normed
}
trait SeqInstances extends SeqInstances3
| adampingel/spire | core/src/main/scala/spire/std/seq.scala | Scala | mit | 8,598 |
package com.hyenawarrior
import java.lang.String._
import com.hyenawarrior.OldNorseGrammar.grammar.enums.Case._
import com.hyenawarrior.OldNorseGrammar.grammar.enums.GNumber._
import com.hyenawarrior.OldNorseGrammar.grammar.nouns.stemclasses.NounStemClass
import com.hyenawarrior.OldNorseGrammar.grammar.nouns.{Noun, _}
import com.hyenawarrior.OldNorseGrammar.grammar.enums.{Case, GNumber}
import com.hyenawarrior.auxiliary.getCauses
import org.junit.Assert._
import scala.language.implicitConversions
/**
* Created by HyenaWarrior on 2018.02.03..
*/
object NounTestAux {
case class Form(str: String, reversible: Boolean)
implicit def regular(str: String): Form = Form(str, true)
def nonReversible(str: String) = Form(str, false)
private def abbrevationOf(decl: NounType, isDef: Boolean): String = (if(isDef) "DEF " else "INDEF ") +
abbrevationOf(decl._1) + " " + abbrevationOf(decl._2)
private def abbrevationOf(caze: Case): String = caze match {
case NOMINATIVE => "NOM"
case ACCUSATIVE => "ACC"
case DATIVE => "DAT"
case GENITIVE => "GEN"
}
private def abbrevationOf(number: GNumber): String = number match {
case SINGULAR => "SG"
case PLURAL => "PL"
}
def diff(stemClass: NounStemClass, forms: Map[NounType, Form], definiteForms: Map[NounType, Form]): Unit = {
val allForms = forms.map { case (k, v) => (k, false) -> v } ++ definiteForms.map { case (k, v) => (k, true) -> v }
diff(stemClass, allForms)
}
def diff(stemClass: NounStemClass, forms: Map[NounFormType, Form]): Unit = {
val countOfTests = forms.size
val differences = forms
.zipWithIndex
.collect { case ((formType @ (decl, isDef), Form(str, true)), idx) =>
val tableName = s"\nGenerated forms from $str [${abbrevationOf(decl, isDef)}] (${idx+1} of $countOfTests):"
(tableName, generateTheseFrom(stemClass, formType -> str, forms - formType))
}.filter {
case (_, Left(exception)) => true
case (_, Right(result)) => result.nonEmpty
}
if(differences.nonEmpty) {
val diffText = differences
.map {
case (tableName, Left(exception)) => getCauses(exception).mkString(s"$tableName \n","\n caused by: ", "")
case (tableName, Right(records)) =>
val abbrevatedRecords = records.map { case ((decl, isDef), e2, e3) => (abbrevationOf(decl, isDef), e2, e3) }
val w0 = (abbrevatedRecords.map(_._1.length).toSeq :+ "Declension".length).max
val w1 = (abbrevatedRecords.map(_._2.length).toSeq :+ "Expected".length).max
val w2 = (abbrevatedRecords.map(_._3.length).toSeq :+ "Generated".length).max
val header = format(s" %-${w0}s | %-${w1}s | %-${w2}s", "Declension", "Expected", "Generated")
abbrevatedRecords
.map { case (ntype, e2, e3) => format(s" %-${w0}s | %-${w1}s ~ %-${w2}s", ntype, e2, e3)}
.mkString(s"$tableName\n$header\n", "\n", "")
}
.mkString("\n")
fail(diffText)
}
}
private def generateTheseFrom(stemClass: NounStemClass, givenForm: (NounFormType, String)
, expectedForms: Map[NounFormType, Form]): Either[Exception, Iterable[(NounFormType, String, String)]] = try {
val generatedForms = Noun(stemClass, Map(givenForm)).nounForms
val result = expectedForms.map {
case (nt, Form(expStr, _)) => generatedForms.get(nt)
.map(nf => (expStr == nf.strRepr, nt, expStr, nf.strRepr))
.getOrElse((false, nt, expStr, "<missing>"))
}.collect {
case (false, nt, expStr, gvnStr) => (nt, expStr, gvnStr)
}
Right(result)
} catch {
case e: RuntimeException => Left(e)
}
}
| HyenaSoftware/IG-Dictionary | OldNorseGrammarEngine/src/test/scala/com/hyenawarrior/NounTestAux.scala | Scala | lgpl-3.0 | 3,706 |
/* __ *\
** ________ ___ / / ___ __ ____ Scala.js Test Suite **
** / __/ __// _ | / / / _ | __ / // __/ (c) 2013, LAMP/EPFL **
** __\ \/ /__/ __ |/ /__/ __ |/_// /_\ \ http://scala-js.org/ **
** /____/\___/_/ |_/____/_/ | |__/ /____/ **
** |/____/ **
\* */
package org.scalajs.testsuite.javalib
import scala.scalajs.js
import org.scalajs.jasminetest.JasmineTest
object StackTraceElementTest extends JasmineTest {
describe("java.lang.StackTraceElement") {
it("should use the magic columnNumber field in its toString") {
val st = new StackTraceElement("MyClass", "myMethod", "myFile.scala", 1)
st.asInstanceOf[js.Dynamic].columnNumber = 5
expect(st.toString).toEqual("MyClass.myMethod(myFile.scala:1:5)")
}
it("should leave toString unmodified without magic columnNumber") {
val st = new StackTraceElement("MyClass", "myMethod", "myFile.scala", 1)
expect(st.toString).toEqual("MyClass.myMethod(myFile.scala:1)")
}
}
}
| colinrgodsey/scala-js | test-suite/src/test/scala/org/scalajs/testsuite/javalib/StackTraceElementTest.scala | Scala | bsd-3-clause | 1,240 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.internal.cypher.acceptance
import org.neo4j.cypher.{CypherExecutionException, ExecutionEngineFunSuite, QueryStatisticsTestSupport, SyntaxException}
import org.neo4j.graphdb.{Path, Relationship}
class MergeRelationshipAcceptanceTest extends ExecutionEngineFunSuite with QueryStatisticsTestSupport {
test("should_be_able_to_create_relationship") {
// given
val a = createNode("A")
val b = createNode("B")
// when
val r = executeScalar("MATCH (a {name:'A'}), (b {name:'B'}) MERGE (a)-[r:TYPE]->(b) RETURN r").asInstanceOf[Relationship]
// then
graph.inTx {
r.getStartNode should equal(a)
r.getEndNode should equal(b)
r.getType.name() should equal("TYPE")
}
}
test("should_be_able_to_find_a_relationship") {
// given
val a = createNode("A")
val b = createNode("B")
val r1 = relate(a, b, "TYPE")
// when
val result = executeScalar("MATCH (a {name:'A'}), (b {name:'B'}) MERGE (a)-[r:TYPE]->(b) RETURN r").asInstanceOf[Relationship]
// then
result should equal(r1)
}
test("should_be_able_to_find_two_existing_relationships") {
// given
val a = createNode("A")
val b = createNode("B")
val r1 = relate(a, b, "TYPE")
val r2 = relate(a, b, "TYPE")
// when
val result = execute("MATCH (a {name:'A'}), (b {name:'B'}) MERGE (a)-[r:TYPE]->(b) RETURN r").columnAs[Relationship]("r").toList
// then
result should equal(List(r2, r1))
}
test("should_be_able_to_find_two_relationships") {
// given
val a = createNode("A")
val b = createNode("B")
val r1 = relate(a, b, "TYPE")
val r2 = relate(a, b, "TYPE")
// when
val result = execute("MATCH (a {name:'A'}), (b {name:'B'}) MERGE (a)-[r:TYPE]->(b) RETURN r").columnAs[Relationship]("r")
// then
result.toSet should equal(Set(r1, r2))
}
test("should_be_able_to_filter_out_relationships") {
// given
val a = createNode("A")
val b = createNode("B")
relate(a, b, "TYPE", "r1")
val r = relate(a, b, "TYPE", "r2")
// when
val result = executeScalar("MATCH (a {name:'A'}), (b {name:'B'}) MERGE (a)-[r:TYPE {name:'r2'}]->(b) RETURN r").asInstanceOf[Relationship]
// then
result should equal(r)
}
test("should_be_able_to_create_when_nothing_matches") {
// given
val a = createNode("A")
val b = createNode("B")
relate(a, b, "TYPE", "r1")
// when
val r = executeScalar("MATCH (a {name:'A'}), (b {name:'B'}) MERGE (a)-[r:TYPE {name:'r2'}]->(b) RETURN r").asInstanceOf[Relationship]
// then
graph.inTx {
r.getStartNode should equal(a)
r.getEndNode should equal(b)
r.getType.name() should equal("TYPE")
}
}
test("should_not_be_fooled_by_direction") {
// given
val a = createNode("A")
val b = createNode("B")
val r = relate(b, a, "TYPE")
val r2 = relate(a, b, "TYPE")
// when
val result = execute("MATCH (a {name:'A'}), (b {name:'B'}) MERGE (a)<-[r:TYPE]-(b) RETURN r")
// then
assertStats(result, relationshipsCreated = 0)
result.toList should equal(List(Map("r" -> r)))
}
test("should_create_relationship_with_property") {
// given
val a = createNode("A")
val b = createNode("B")
// when
val result = execute("MATCH (a {name:'A'}), (b {name:'B'}) MERGE (a)-[r:TYPE {name:'Lola'}]->(b) RETURN r")
// then
assertStats(result, relationshipsCreated = 1, propertiesSet = 1)
graph.inTx {
val r = result.toList.head("r").asInstanceOf[Relationship]
r.getProperty("name") should equal("Lola")
r.getType.name() should equal("TYPE")
r.getStartNode should equal(a)
r.getEndNode should equal(b)
}
}
test("should_handle_on_create") {
// given
val a = createNode("A")
val b = createNode("B")
// when
val result = execute("MATCH (a {name:'A'}), (b {name:'B'}) MERGE (a)-[r:TYPE]->(b) ON CREATE SET r.name = 'Lola' RETURN r")
// then
assertStats(result, relationshipsCreated = 1, propertiesSet = 1)
graph.inTx {
val r = result.toList.head("r").asInstanceOf[Relationship]
r.getProperty("name") should equal("Lola")
r.getType.name() should equal("TYPE")
r.getStartNode should equal(a)
r.getEndNode should equal(b)
}
}
test("should_handle_on_match") {
// given
val a = createNode("A")
val b = createNode("B")
relate(a, b, "TYPE")
// when
val result = execute("MATCH (a {name:'A'}), (b {name:'B'}) MERGE (a)-[r:TYPE]->(b) ON MATCH SET r.name = 'Lola' RETURN r")
// then
assertStats(result, relationshipsCreated = 0, propertiesSet = 1)
graph.inTx {
val r = result.toList.head("r").asInstanceOf[Relationship]
r.getProperty("name") should equal("Lola")
r.getType.name() should equal("TYPE")
r.getStartNode should equal(a)
r.getEndNode should equal(b)
}
}
test("should_work_with_single_bound_node") {
// given
val a = createNode("A")
// when
val result = execute("MATCH (a {name:'A'}) MERGE (a)-[r:TYPE]->() RETURN r")
// then
assertStats(result, relationshipsCreated = 1, nodesCreated = 1)
graph.inTx {
val r = result.toList.head("r").asInstanceOf[Relationship]
r.getType.name() should equal("TYPE")
r.getStartNode should equal(a)
}
}
test("should_handle_longer_patterns") {
// given
val a = createNode("A")
// when
val result = execute("MATCH (a {name:'A'}) MERGE (a)-[r:TYPE]->()<-[:TYPE]-(b) RETURN r")
// then
assertStats(result, relationshipsCreated = 2, nodesCreated = 2)
graph.inTx {
val r = result.toList.head("r").asInstanceOf[Relationship]
r.getType.name() should equal("TYPE")
r.getStartNode should equal(a)
}
}
test("should_handle_nodes_bound_in_the_middle") {
// given
val b = createNode("B")
// when
val result = execute("MATCH (b {name:'B'}) MERGE (a)-[r1:TYPE]->(b)<-[r2:TYPE]-(c) RETURN r1, r2")
// then
assertStats(result, relationshipsCreated = 2, nodesCreated = 2)
val resultMap = result.toList.head
graph.inTx {
val r1 = resultMap("r1").asInstanceOf[Relationship]
r1.getType.name() should equal("TYPE")
r1.getEndNode should equal(b)
val r2 = resultMap("r2").asInstanceOf[Relationship]
r2.getType.name() should equal("TYPE")
r2.getEndNode should equal(b)
}
}
test("should_handle_nodes_bound_in_the_middle_when_half_pattern_is_matching") {
// given
val a = createLabeledNode("A")
val b = createLabeledNode("B")
relate(a, b, "TYPE")
// when
val result = execute("MATCH (b:B) MERGE (a:A)-[r1:TYPE]->(b)<-[r2:TYPE]-(c:C) RETURN r1, r2")
// then
assertStats(result, relationshipsCreated = 2, nodesCreated = 2, labelsAdded = 2)
val resultMap = result.toList.head
graph.inTx {
val r1 = resultMap("r1").asInstanceOf[Relationship]
r1.getType.name() should equal("TYPE")
r1.getEndNode should equal(b)
val r2 = resultMap("r2").asInstanceOf[Relationship]
r2.getType.name() should equal("TYPE")
r2.getEndNode should equal(b)
}
}
test("should_handle_first_declaring_nodes_and_then_creating_relationships_between_them") {
// given
val a = createLabeledNode("A")
val b = createLabeledNode("B")
// when
val result = execute("MERGE (a:A) MERGE (b:B) MERGE (a)-[:FOO]->(b)")
// then
assertStats(result, relationshipsCreated = 1)
}
test("should_handle_building_links_mixing_create_with_merge_pattern") {
// given
// when
val result = execute("CREATE (a:A) MERGE (a)-[:KNOWS]->(b:B) CREATE (b)-[:KNOWS]->(c:C) RETURN a, b, c")
// then
assertStats(result, relationshipsCreated = 2, nodesCreated = 3, labelsAdded = 3)
}
test("when_merging_a_pattern_that_includes_a_unique_node_constraint_violation_fail") {
// given
graph.createConstraint("Person", "id")
createLabeledNode(Map("id"->666), "Person")
// when then fails
intercept[CypherExecutionException](execute("CREATE (a:A) MERGE (a)-[:KNOWS]->(b:Person {id:666})"))
}
test("should_work_well_inside_foreach") {
val a = createLabeledNode("Start")
relate(a, createNode("prop" -> 2), "FOO")
val result = execute("match (a:Start) foreach(x in [1,2,3] | merge (a)-[:FOO]->({prop: x}) )")
assertStats(result, nodesCreated = 2, propertiesSet = 2, relationshipsCreated = 2)
}
test("should_handle_two_merges_inside_foreach") {
val a = createLabeledNode("Start")
val b = createLabeledNode(Map("prop" -> 42), "End")
val result = execute("match (a:Start) foreach(x in [42] | merge (b:End {prop: x}) merge (a)-[:FOO]->(b) )")
assertStats(result, nodesCreated = 0, propertiesSet = 0, relationshipsCreated = 1)
graph.inTx {
val rel = a.getRelationships.iterator().next()
rel.getStartNode should equal(a)
rel.getEndNode should equal(b)
}
}
test("should_handle_two_merges_inside_bare_foreach") {
createNode("x" -> 1)
val result = execute("foreach(v in [1, 2] | merge (a {x: v}) merge (b {y: v}) merge (a)-[:FOO]->(b))")
assertStats(result, nodesCreated = 3, propertiesSet = 3, relationshipsCreated = 2)
}
test("should_handle_two_merges_inside_foreach_after_with") {
val result = execute("with 3 as y " +
"foreach(x in [1, 2] | " +
"merge (a {x: x, y: y}) " +
"merge (b {x: x+1, y: y}) " +
"merge (a)-[:FOO]->(b))")
assertStats(result, nodesCreated = 3, propertiesSet = 6, relationshipsCreated = 2)
}
test("should_introduce_named_paths1") {
val result = execute("merge (a) merge p = (a)-[:R]->() return p")
assertStats(result, relationshipsCreated = 1, nodesCreated = 2)
val resultList = result.toList
result should have size 1
resultList.head.head._2.isInstanceOf[Path] should be(true)
}
test("should_introduce_named_paths2") {
val result = execute("merge (a { x:1 }) merge (b { x:2 }) merge p = (a)-[:R]->(b) return p")
assertStats(result, relationshipsCreated = 1, nodesCreated = 2, propertiesSet = 2)
val resultList = result.toList
result should have size 1
resultList.head.head._2.isInstanceOf[Path] should be(true)
}
test("should_introduce_named_paths3") {
val result = execute("merge p = (a { x:1 }) return p")
assertStats(result, nodesCreated = 1, propertiesSet = 1)
val resultList = result.toList
result should have size 1
resultList.head.head._2.isInstanceOf[Path] should be(true)
}
test("should_handle_foreach_in_foreach_game_of_life_ftw") {
/* creates a grid 4 nodes wide and 4 nodes deep.
o-o-o-o
| | | |
o-o-o-o
| | | |
o-o-o-o
| | | |
o-o-o-o
*/
val result = execute(
"foreach(x in [0,1,2] |" +
"foreach(y in [0,1,2] |" +
" merge (a {x:x, y:y})" +
" merge (b {x:x+1, y:y})" +
" merge (c {x:x, y:y+1})" +
" merge (d {x:x+1, y:y+1})" +
" merge (a)-[:R]->(b)" +
" merge (a)-[:R]->(c)" +
" merge (b)-[:R]->(d)" +
" merge (c)-[:R]->(d)))")
assertStats(result, nodesCreated = 16, relationshipsCreated = 24, propertiesSet = 16 * 2)
}
test("should_handle_merge_with_no_known_points") {
val result = execute("merge ({name:'Andres'})-[:R]->({name:'Emil'})")
assertStats(result, nodesCreated = 2, relationshipsCreated = 1, propertiesSet = 2)
}
test("should_handle_foreach_in_foreach_game_without_known_points") {
/* creates a grid 4 nodes wide and 4 nodes deep.
o-o o-o o-o
| | | | | |
o-o o-o o-o
| | | | | |
o-o o-o o-o
| | | | | |
o-o o-o o-o
*/
val result = execute(
"foreach(x in [0,1,2] |" +
"foreach(y in [0,1,2] |" +
" merge (a {x:x, y:y})-[:R]->(b {x:x+1, y:y})" +
" merge (c {x:x, y:y+1})-[:R]->(d {x:x+1, y:y+1})" +
" merge (a)-[:R]->(c)" +
" merge (b)-[:R]->(d)))")
assertStats(result, nodesCreated = 6*4, relationshipsCreated = 3*4+6*3, propertiesSet = 6*4*2)
}
test("should_handle_on_create_on_created_nodes") {
val result = execute("merge (a)-[:KNOWS]->(b) ON CREATE SET b.created = timestamp()")
assertStats(result, nodesCreated = 2, relationshipsCreated = 1, propertiesSet = 1)
}
test("should_handle_on_match_on_created_nodes") {
val result = execute("merge (a)-[:KNOWS]->(b) ON MATCH SET b.created = timestamp()")
assertStats(result, nodesCreated = 2, relationshipsCreated = 1, propertiesSet = 0)
}
test("should_handle_on_create_on_created_rels") {
val result = execute("merge (a)-[r:KNOWS]->(b) ON CREATE SET r.created = timestamp()")
assertStats(result, nodesCreated = 2, relationshipsCreated = 1, propertiesSet = 1)
}
test("should_handle_on_match_on_created_rels") {
val result = execute("merge (a)-[r:KNOWS]->(b) ON MATCH SET r.created = timestamp()")
assertStats(result, nodesCreated = 2, relationshipsCreated = 1, propertiesSet = 0)
}
test("should_use_left_to_right_direction_when_creating_based_on_pattern_with_undirected_relationship") {
val result = executeScalar[Relationship]("merge (a {id: 2})-[r:KNOWS]-(b {id: 1}) RETURN r")
graph.inTx {
result.getEndNode.getProperty("id") should equal(1)
result.getStartNode.getProperty("id") should equal(2)
}
}
test("should_find_existing_right_to_left_relationship_when_matching_with_undirected_relationship") {
val r = relate(createNode("id" -> 1), createNode("id" -> 2), "KNOWS")
val result = executeScalar[Relationship]("merge (a {id: 2})-[r:KNOWS]-(b {id: 1}) RETURN r")
result should equal(r)
}
test("should_find_existing_left_to_right_relationship_when_matching_with_undirected_relationship") {
val r = relate(createNode("id" -> 2), createNode("id" -> 1), "KNOWS")
val result = executeScalar[Relationship]("merge (a {id: 2})-[r:KNOWS]-(b {id: 1}) RETURN r")
result should equal(r)
}
test("should_find_existing_relationships_when_matching_with_undirected_relationship") {
val r1 = relate(createNode("id" -> 2), createNode("id" -> 1), "KNOWS")
val r2 = relate(createNode("id" -> 1), createNode("id" -> 2), "KNOWS")
val result = execute("merge (a {id: 2})-[r:KNOWS]-(b {id: 1}) RETURN r").columnAs[Relationship]("r").toSet
result should equal(Set(r1, r2))
}
test("should_reject_merging_nodes_having_the_same_id_but_different_labels") {
intercept[SyntaxException]{
execute("merge (a: Foo)-[r:KNOWS]->(a: Bar)")
}
}
test("merge should handle array properties properly from identifier") {
val query =
"""
|CREATE (a:Foo),(b:Bar) WITH a,b
|UNWIND ["a,b","a,b"] AS str WITH a,b,split(str,",") AS roles
|MERGE (a)-[r:FB {foobar:roles}]->(b)
|RETURN a,b,r""".stripMargin
val result = execute(query)
assertStats(result, nodesCreated = 2, relationshipsCreated = 1, propertiesSet = 1, labelsAdded = 2)
}
test("merge should handle array properties properly") {
relate(createLabeledNode("A"), createLabeledNode("B"), "T", Map("prop" -> Array(42, 43)))
val result = execute("MATCH (a:A),(b:B) MERGE (a)-[r:T {prop: [42,43]}]->(b) RETURN count(*)")
assertStats(result, nodesCreated = 0, relationshipsCreated = 0, propertiesSet = 0)
}
}
| HuangLS/neo4j | community/cypher/acceptance/src/test/scala/org/neo4j/internal/cypher/acceptance/MergeRelationshipAcceptanceTest.scala | Scala | apache-2.0 | 16,170 |
package monocle.syntax
import monocle.function.{At, Each, FilterIndex, Index}
import monocle._
trait AppliedPSetter[S, T, A, B] {
def value: S
def optic: PSetter[S, T, A, B]
def replace(b: B): T = optic.replace(b)(value)
def modify(f: A => B): T = optic.modify(f)(value)
/** alias to replace */
@deprecated("use replace instead", since = "3.0.0-M1")
def set(b: B): T = replace(b)
def some[A1, B1](implicit ev1: A =:= Option[A1], ev2: B =:= Option[B1]): AppliedPSetter[S, T, A1, B1] =
adapt[Option[A1], Option[B1]].andThen(std.option.pSome[A1, B1])
private[monocle] def adapt[A1, B1](implicit evA: A =:= A1, evB: B =:= B1): AppliedPSetter[S, T, A1, B1] =
evB.substituteCo[AppliedPSetter[S, T, A1, *]](evA.substituteCo[AppliedPSetter[S, T, *, B]](this))
def andThen[C, D](other: PSetter[A, B, C, D]): AppliedPSetter[S, T, C, D] =
AppliedPSetter(value, optic.andThen(other))
}
object AppliedPSetter {
def apply[S, T, A, B](_value: S, _optic: PSetter[S, T, A, B]): AppliedPSetter[S, T, A, B] =
new AppliedPSetter[S, T, A, B] {
val value: S = _value
val optic: PSetter[S, T, A, B] = _optic
}
implicit def appliedPSetterSyntax[S, T, A, B](self: AppliedPSetter[S, T, A, B]): AppliedPSetterSyntax[S, T, A, B] =
new AppliedPSetterSyntax(self)
implicit def appliedSetterSyntax[S, A](self: AppliedSetter[S, A]): AppliedSetterSyntax[S, A] =
new AppliedSetterSyntax(self)
}
object AppliedSetter {
def apply[S, A](_value: S, _optic: Setter[S, A]): AppliedSetter[S, A] =
AppliedPSetter(_value, _optic)
}
final case class AppliedPSetterSyntax[S, T, A, B](private val self: AppliedPSetter[S, T, A, B]) extends AnyVal {
/** compose a [[PSetter]] with a [[PSetter]] */
@deprecated("use andThen", since = "3.0.0-M1")
def composeSetter[C, D](other: PSetter[A, B, C, D]): AppliedPSetter[S, T, C, D] =
self.andThen(other)
/** compose a [[PSetter]] with a [[PTraversal]] */
@deprecated("use andThen", since = "3.0.0-M1")
def composeTraversal[C, D](other: PTraversal[A, B, C, D]): AppliedPSetter[S, T, C, D] =
self.andThen(other)
/** compose a [[PSetter]] with a [[POptional]] */
@deprecated("use andThen", since = "3.0.0-M1")
def composeOptional[C, D](other: POptional[A, B, C, D]): AppliedPSetter[S, T, C, D] =
self.andThen(other)
/** compose a [[PSetter]] with a [[PPrism]] */
@deprecated("use andThen", since = "3.0.0-M1")
def composePrism[C, D](other: PPrism[A, B, C, D]): AppliedPSetter[S, T, C, D] =
self.andThen(other)
/** compose a [[PSetter]] with a [[PLens]] */
@deprecated("use andThen", since = "3.0.0-M1")
def composeLens[C, D](other: PLens[A, B, C, D]): AppliedPSetter[S, T, C, D] =
self.andThen(other)
/** compose a [[PSetter]] with a [[PIso]] */
@deprecated("use andThen", since = "3.0.0-M1")
def composeIso[C, D](other: PIso[A, B, C, D]): AppliedPSetter[S, T, C, D] =
self.andThen(other)
/** alias to composeTraversal */
@deprecated("use andThen", since = "3.0.0-M1")
def ^|->>[C, D](other: PTraversal[A, B, C, D]): AppliedPSetter[S, T, C, D] =
self.andThen(other)
/** alias to composeOptional */
@deprecated("use andThen", since = "3.0.0-M1")
def ^|-?[C, D](other: POptional[A, B, C, D]): AppliedPSetter[S, T, C, D] =
self.andThen(other)
/** alias to composePrism */
@deprecated("use andThen", since = "3.0.0-M1")
def ^<-?[C, D](other: PPrism[A, B, C, D]): AppliedPSetter[S, T, C, D] =
self.andThen(other)
/** alias to composeLens */
@deprecated("use andThen", since = "3.0.0-M1")
def ^|->[C, D](other: PLens[A, B, C, D]): AppliedPSetter[S, T, C, D] =
self.andThen(other)
/** alias to composeIso */
@deprecated("use andThen", since = "3.0.0-M1")
def ^<->[C, D](other: PIso[A, B, C, D]): AppliedPSetter[S, T, C, D] =
self.andThen(other)
}
/** Extension methods for monomorphic Setter
*/
final case class AppliedSetterSyntax[S, A](private val self: AppliedSetter[S, A]) extends AnyVal {
def each[C](implicit evEach: Each[A, C]): AppliedSetter[S, C] =
self.andThen(evEach.each)
/** Select all the elements which satisfies the predicate. This combinator can break the fusion property see
* Optional.filter for more details.
*/
def filter(predicate: A => Boolean): AppliedSetter[S, A] =
self.andThen(Optional.filter(predicate))
def filterIndex[I, A1](predicate: I => Boolean)(implicit ev: FilterIndex[A, I, A1]): AppliedSetter[S, A1] =
self.andThen(ev.filterIndex(predicate))
def withDefault[A1](defaultValue: A1)(implicit evOpt: A =:= Option[A1]): AppliedSetter[S, A1] =
self.adapt[Option[A1], Option[A1]].andThen(std.option.withDefault(defaultValue))
def at[I, A1](i: I)(implicit evAt: At[A, I, A1]): AppliedSetter[S, A1] =
self.andThen(evAt.at(i))
def index[I, A1](i: I)(implicit evIndex: Index[A, I, A1]): AppliedSetter[S, A1] =
self.andThen(evIndex.index(i))
}
| julien-truffaut/Monocle | core/shared/src/main/scala/monocle/syntax/AppliedPSetter.scala | Scala | mit | 4,936 |
import scala.math.BigInt
//import BigInt.*
object test {
1 * BigInt(0)
}
| dotty-staging/dotty | tests/pos/bigint.scala | Scala | apache-2.0 | 77 |
package org.sofi.deadman.http.api
import akka.actor._
import akka.pattern.ask
import akka.stream._
import akka.stream.scaladsl._
import akka.util.Timeout
import cats.data.Validated._
import org.sofi.deadman.http.request._
import org.sofi.deadman.messages.command._, ResponseType._
import org.sofi.deadman.messages.validation._
import scala.concurrent.Future
import scala.concurrent.duration._
final class CommandApi(commandManager: ActorRef)(implicit val system: ActorSystem, timeout: Timeout) {
// Execution context
private implicit val executionContext = system.dispatcher
// Buffer size
private val BUFFER_SIZE = 1000
// Stream is chunked into groups of elements received within a time window
private val GROUP_SIZE = 100
// Stream group time window
private val TIME_WINDOW = 1.second
// Stream parallelism
private val PARALLELISM = 10
// Make sure the command timestamp is set
private def setTimestamp(req: ScheduleRequest) =
if (req.ts.isDefined) req else req.copy(ts = Some(System.currentTimeMillis()))
// Send a command to the command manager
private def sendCommand(command: Any) =
(commandManager ? command).mapTo[CommandResponse]
// Validate, create and send a batch of ScheduleTask commands to the command manager
private def scheduleTasks(requests: Seq[ScheduleRequest]) = Future.sequence {
requests.map { r ⇒
validate(r.key, r.aggregate, r.entity, r.ttl, r.ttw, r.tags, r.ts) match {
case Invalid(nel) ⇒ Future.successful(CommandResponse(ERROR, nel.map(_.error).toList))
case Valid(command) ⇒ sendCommand(command)
}
}
}
// Validate, create and send a batch of CompleteTask commands to the command manager
private def completeTasks(requests: Seq[CompleteRequest]) = Future.sequence {
requests.map { r ⇒
validateCompletion(r.key, r.aggregate, r.entity) match {
case Invalid(nel) ⇒ Future.successful(CommandResponse(ERROR, nel.map(_.error).toList))
case Valid(command) ⇒ sendCommand(command)
}
}
}
// Log all command errors
private def logErrors(reps: Seq[CommandResponse]) = {
reps.foreach { rep ⇒
if (rep.responseType == ERROR) {
system.log.error(rep.errors.mkString(","))
}
}
reps
}
// The following Akka Streams implementations batch writes to the command manager, buffering messages until the buffer size
// is reached -or- a given amount of time passes.
// Task scheduling flow
val scheduleTaskFlow =
Flow[ScheduleRequest]
.buffer(BUFFER_SIZE, OverflowStrategy.backpressure)
.map(setTimestamp)
.groupedWithin(GROUP_SIZE, TIME_WINDOW)
.mapAsync(PARALLELISM)(scheduleTasks)
.map(logErrors)
// Task completion flow
val completeTaskFlow =
Flow[CompleteRequest]
.buffer(BUFFER_SIZE, OverflowStrategy.backpressure)
.groupedWithin(GROUP_SIZE, TIME_WINDOW)
.mapAsync(PARALLELISM)(completeTasks)
.map(logErrors)
}
| SocialFinance/deadman-switch | service/src/main/scala/org/sofi/deadman/http/api/CommandApi.scala | Scala | bsd-3-clause | 2,982 |
/************************************************************************
Tinaviz
*************************************************************************
This application is part of the Tinasoft project: http://tinasoft.eu
Tinaviz main developer: julian.bilcke @ iscpif.fr (twitter.com/flngr)
Copyright (C) 2009-2011 CREA Lab, CNRS/Ecole Polytechnique UMR 7656 (Fr)
This program is free software: you can redistribute it and/or modify it
under the terms of the GNU General Public License as published by the
Free Software Foundation, either version 3 of the License, or (at your
option) any later version.
This program is distributed in the hope that it will be useful, but
WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see <http://www.gnu.org/licenses/>.
************************************************************************/
package eu.tinasoft.tinaviz.graph
import eu.tinasoft._
import tinaviz.graph.Metrics._
import tinaviz.util._
import tinaviz.util.Color._
import tinaviz.util.Vector
import tinaviz.io.json.Base64
import collection.mutable.LinkedList
object Graph {
def get[T](elements: Map[String, Any], key: String): T = elements.get(key).get.asInstanceOf[T]
// TODO be an optimized factory
def makeDiff(newElements: Map[String, Array[Any]],
oldElements: Map[String, Array[Any]]) = {
}
/**
* Default, dumb factory
*/
def make(elements: Map[String, Any]) = new Graph(elements)
/**
* Default settings
*/
val defaults: Map[String, Any] = Map(
"layout" -> "tinaforce", //"tinaforce", // phyloforce
"layoutSpeed" -> 24.0,
"activity" -> 100.0,
"entropy" -> 0.95,
"maxDrawedNodes" -> 10,
"debug" -> false,
"selectionRadius" -> 10.0,
"antiAliasingThreshold" -> 1500,
"pause" -> false,
// "freeze" -> false,
//"logo" -> new PImage(),
"uuid" -> Array.empty[String],
"label" -> Array.empty[String],
"shortLabel" -> Array.empty[String],
"renderedLabel" -> Array.empty[String],
"showLabel" -> Array.empty[Boolean],
"labelColor" -> Array.empty[Color],
"color" -> Array.empty[Color],
"originalColor" -> Array.empty[Color],
"selected" -> Array.empty[Boolean],
"highlighted" -> Array.empty[Boolean],
"updateStatus" -> Array.empty[Symbol], // outdated, updating, updated
"saveStatus" -> Array.empty[Symbol], // saving, saved
"density" -> Array.empty[Double],
"rate" -> Array.empty[Int],
"size" -> Array.empty[Double],
"weight" -> Array.empty[Double],
"category" -> Array.empty[String],
"content" -> Array.empty[String],
"position" -> Array.empty[(Double, Double)],
"links" -> Array.empty[Map[Int, Double]],
"camera.zoom" -> 1.0,
"camera.position" -> (0.0, 0.0),
"camera.target" -> "all", //'all, 'none, or 'selection
"window" -> (800,600),
//"edge.type" -> "line",
"filter.node.category" -> "Document",
"filter.view" -> "macro",
"filter.a.node.size" -> 0.2,
"filter.a.node.weight" -> (0.0, 1.0),
"filter.a.edge.weight" -> (0.0, 1.0),
"filter.b.node.size" -> 0.2,
"filter.b.node.weight" -> (0.0, 1.0),
"filter.b.edge.weight" -> (0.0, 1.0),
"filter.map.node.color.hue" -> "category",
"filter.map.node.color.saturation" -> "weight",
"filter.map.node.color.brightness" -> "weight",
"filter.map.node.size" -> "weight",
"filter.map.node.shape" -> "category",
"nodeAWeightRange" -> List.empty[Double],
"edgeAWeightRange" -> List.empty[Double],
"nodeBWeightRange" -> List.empty[Double],
"edgeBWeightRange" -> List.empty[Double],
"outDegree" -> Array.empty[Int],
"inDegree" -> Array.empty[Int],
"degree" -> Array.empty[Int],
// metrics & properties
"nbNodes" -> 0,
"nbEdges" -> 0,
"nbSingles" -> 0,
"baryCenter" -> (0.0, 0.0),
"selectionCenter" -> (0.0, 0.0),
"selectionNeighbourhood" -> Array.empty[((Double, Double), Int)],
"selectionNeighbourhoodCenter" -> (0.0, 0.0),
"outDegreeExtremums" -> (0, 1),
"inDegreeExtremums" -> (0, 1),
"nodeWeightExtremums" -> (0.0, 1.0, 0.0, 1.0), // minx, maxx, miny, maxy
"edgeWeightExtremums" -> (0.0, 1.0, 0.0, 1.0), // same
"extremums" -> (1.0, 0.0, 1.0, 0.0), // maxx, minx, maxy, miny (yes I know, not the same pattern.. sorry)
"extremumsSelection" -> (1.0, 0.0, 1.0, 0.0), // same
"extremumsSelectionNeighbourhood" -> (1.0, 0.0, 1.0, 0.0), // same
"selectionNeighbourhoodCenter" -> (0.0, 0.0),
"notSinglesCenter" -> (0.0, 0.0),
"connectedComponents" -> Array.empty[Int],
"nodeShape" -> Array.empty[Symbol],
"nodeColor" -> Array.empty[Color],
"nodeBorderColor" -> Array.empty[Color],
"nodeSize" -> Array.empty[Double],
"edgeIndex" -> Array.empty[(Int, Int)],
"edgeWeight" -> Array.empty[Double],
"edgeSize" -> Array.empty[Double],
"edgeColor" -> Array.empty[Color]
)
}
class Graph(val _elements: Map[String, Any] = Map[String, Any]()) {
val elements = Graph.defaults ++ _elements
/**
* Used for export to GEXF
*/
def id(_uuid: String): Int = uuid.indexOf(_uuid)
def getUuid(i: Int) = uuid(i)
def has(_uuid: String): Boolean = uuid.contains(_uuid)
def get[T](key: String): T = elements(key).asInstanceOf[T]
def getArray[T](key: String): Array[T] = get[Array[T]](key)
// some built-in functions
lazy val links = getArray[Map[Int, Double]]("links")
lazy val position = getArray[(Double, Double)]("position")
lazy val color = getArray[Color]("color")
lazy val originalColor = getArray[Color]("color")
lazy val weight = getArray[Double]("weight")
lazy val size = getArray[Double]("size")
lazy val category = getArray[String]("category")
lazy val content = getArray[String]("content")
lazy val selected = getArray[Boolean]("selected")
lazy val highlighted = getArray[Boolean]("highlighted")
lazy val updateStatus = getArray[Symbol]("updateStatus")
lazy val antiAliasingThreshold = get[Int]("antiAliasingThreshold")
// outdated, updating, updated
lazy val saveStatus = getArray[Symbol]("saveStatus")
// saving, saved
lazy val label = getArray[String]("label")
lazy val labelColor = getArray[Color]("labelColor")
lazy val shortLabel = getArray[String]("shortLabel")
lazy val renderedLabel = getArray[String]("renderedLabel")
lazy val showLabel = getArray[Boolean]("showLabel")
lazy val rate = getArray[Int]("rate")
lazy val uuid = getArray[String]("uuid")
lazy val layoutSpeed = get[Double]("layoutSpeed")
//lazy val logo = get[PImage]("logo")
lazy val entropy = get[Double]("entropy")
lazy val activity = get[Double]("activity")
lazy val colorScheme = Rio
// camera settings
lazy val cameraZoom = get[Double]("camera.zoom")
lazy val cameraPosition = get[(Double, Double)]("camera.position")
lazy val cameraTarget = get[String]("camera.target")
// filters and view settings
lazy val currentCategory = get[String]("filter.node.category")
lazy val currentView = get[String]("filter.view")
lazy val layout = get[String]("layout")
lazy val pause = get[Boolean]("pause")
// lazy val freeze = get[Boolean]("freeze")
lazy val debug = get[Boolean]("debug")
lazy val selectionRadius = get[Double]("selectionRadius")
lazy val edgeType = get[String]("edge.type")
// TODO should be precomputed!! OPTIMIZATION
lazy val totalDegree = inDegree zip outDegree map {
case (a, b) => a + b
}
lazy val density = getArray[Double]("density")
lazy val ids = 0 until nbNodes
lazy val outDegree = getArray[Int]("outDegree")
lazy val inDegree = getArray[Int]("inDegree")
lazy val degree = getArray[Int]("degree")
// metrics & properties
lazy val nbNodes = get[Int]("nbNodes")
lazy val nbEdges = get[Int]("nbEdges")
lazy val nbSingles = get[Int]("nbSingles")
lazy val baryCenter = get[(Double, Double)]("baryCenter")
lazy val selectionCenter = get[(Double, Double)]("selectionCenter")
// a list of positions + ID
lazy val selectionNeighbourhood = getArray[((Double, Double), Int)]("selectionNeighbourhood")
lazy val selectionNeighbourhoodCenter = get[(Double, Double)]("selectionNeighbourhoodCenter")
lazy val selectionValid = (selection.size > 0)
lazy val outDegreeExtremums = get[(Double, Double)]("outDegreeExtremums")
lazy val inDegreeExtremums = get[(Double, Double)]("inDegreeExtremums")
lazy val extremums = get[(Double, Double, Double, Double)]("extremums")
lazy val xMax = extremums._1
lazy val xMin = extremums._2
lazy val yMax = extremums._3
lazy val yMin = extremums._4
lazy val nodeWeightExtremums = get[(Double, Double, Double, Double)]("nodeWeightExtremums")
lazy val minANodeWeight = nodeWeightExtremums._1
lazy val maxANodeWeight = nodeWeightExtremums._2
lazy val minBNodeWeight = nodeWeightExtremums._3
lazy val maxBNodeWeight = nodeWeightExtremums._4
lazy val edgeWeightExtremums = get[(Double, Double, Double, Double)]("edgeWeightExtremums")
lazy val minAEdgeWeight = edgeWeightExtremums._1
lazy val maxAEdgeWeight = edgeWeightExtremums._2
lazy val minBEdgeWeight = edgeWeightExtremums._3
lazy val maxBEdgeWeight = edgeWeightExtremums._4
lazy val extremumsSelection = get[(Double, Double, Double, Double)]("extremumsSelection")
lazy val xMaxSelection = extremumsSelection._1
lazy val xMinSelection = extremumsSelection._2
lazy val yMaxSelection = extremumsSelection._3
lazy val yMinSelection = extremumsSelection._4
lazy val extremumsSelectionNeighbourhood = get[(Double, Double, Double, Double)]("extremumsSelectionNeighbourhood")
lazy val xMaxSelectionNeighbourhood = extremumsSelectionNeighbourhood._1
lazy val xMinSelectionNeighbourhood = extremumsSelectionNeighbourhood._2
lazy val yMaxSelectionNeighbourhood = extremumsSelectionNeighbourhood._3
lazy val yMinSelectionNeighbourhood = extremumsSelectionNeighbourhood._4
lazy val singlesCenter = get[(Double, Double)]("selectionNeighbourhoodCenter")
lazy val notSinglesCenter = get[(Double, Double)]("notSinglesCenter")
lazy val connectedComponents = getArray[Int]("connectedComponents")
lazy val nodeAWeightRange = get[List[Double]]("nodeAWeightRange")
lazy val edgeAWeightRange = get[List[Double]]("edgeAWeightRange")
lazy val nodeBWeightRange = get[List[Double]]("nodeBWeightRange")
lazy val edgeBWeightRange = get[List[Double]]("edgeBWeightRange")
/**
* compute the edge position to screen
*/
lazy val nodeShape = getArray[Symbol]("nodeShape")
lazy val nodeColor = getArray[Color]("nodeColor")
lazy val nodeBorderColor = getArray[Color]("nodeBorderColor")
lazy val nodeSize = getArray[Double]("nodeSize")
lazy val edgeIndex = getArray[(Int, Int)]("edgeIndex")
lazy val edgeWeight = getArray[Double]("edgeWeight")
lazy val edgeSize = getArray[Double]("edgeSize")
lazy val edgeColor = getArray[Color]("edgeColor")
lazy val window = get[(Int, Int)]("window")
def callbackNodeCountChanged = {
var g = this
g = g + ("nbNodes" -> (Metrics nbNodes g))
g = g.callbackNodeAttributesChanged
// we can't call callback position, because not everything is computed already
g = g + ("extremums" -> (Metrics extremums g))
g = g + ("extremumsSelection" -> (Metrics extremumsSelection g))
g = g + ("baryCenter" -> Metrics.baryCenter(g))
g = g + ("selectionCenter" -> Metrics.selectionCenter(g))
g = g + ("nodeAWeightRange" -> Metrics.nodeWeightRange(g, "Document"))
g = g + ("nodeBWeightRange" -> Metrics.nodeWeightRange(g, "NGram"))
//println("Result of nodeWeightRange: "+g.nodeWeightRange)
g = g.callbackEdgeCountChanged
g
}
def callbackPositionsChanged = {
var g = this
g = g + ("extremums" -> (Metrics extremums g))
g = g + ("extremumsSelection" -> (Metrics extremumsSelection g))
g = g + ("baryCenter" -> Metrics.baryCenter(g))
g = g + ("selectionCenter" -> Metrics.selectionCenter(g))
// in some case, this is overwritten by a following call to "update edges"
g = g + ("selectionNeighbourhoodCenter" -> Metrics.selectionNeighbourhoodCenter(g)) // need selectionNeighbourhood
g = g + ("singlesCenter" -> Metrics.singlesCenter(g)) // need isSingle
g = g + ("notSinglesCenter" -> Metrics.notSinglesCenter(g))
g
}
def callbackNodeAttributesChanged = {
//println(" callbackNodeAttributesChanged")
var g = this
g = g + ("nodeWeightExtremums" -> (Metrics nodeWeightExtremums g))
g = g + ("nodeColor" -> Drawing.nodeColor(g))
g = g + ("nodeBorderColor" -> Drawing.nodeBorderColor(g))
g = g + ("nodeShape" -> Drawing.nodeShape(g))
// depend on the selection
//println("Updating rendered label and label color")
g = g + ("renderedLabel" -> Drawing.renderedLabel(g))
g = g + ("labelColor" -> Drawing.labelColor(g))
g
}
def callbackEdgeCountChanged = {
println("executing callback EDGE COUNT changed")
var g = this
g = g + ("nbEdges" -> Metrics.nbEdges(g))
g = g + ("outDegree" -> Metrics.outDegree(g))
g = g + ("inDegree" -> Metrics.inDegree(g))
g = g + ("degree" -> Metrics.degree(g)) // do not count twice a link
g = g + ("nbSingles" -> Metrics.nbSingles(g))
g = g + ("outDegreeExtremums" -> (Metrics outDegreeExtremums g))
g = g + ("inDegreeExtremums" -> (Metrics inDegreeExtremums g))
g = g + ("edgeWeightExtremums" -> (Metrics edgeWeightExtremums g))
// thse settings vary with edge count
g = g + ("selectionNeighbourhood" -> Metrics.selectionNeighbourhood(g))
g = g + ("extremumsSelectionNeighbourhood" -> (Metrics extremumsSelectionNeighbourhood g))
g = g + ("selectionNeighbourhoodCenter" -> Metrics.selectionNeighbourhoodCenter(g)) // need selectionNeighbourhood
g = g + ("singlesCenter" -> Metrics.singlesCenter(g)) // need isSingle
g = g + ("notSinglesCenter" -> Metrics.notSinglesCenter(g))
g = g + ("edgeIndex" -> Functions.edgeIndex(g))
g = g + ("edgeWeight" -> Functions.edgeWeight(g))
g = g + ("edgeAWeightRange" -> Metrics.edgeWeightRange(g, "Document"))
g = g + ("edgeBWeightRange" -> Metrics.edgeWeightRange(g, "NGram"))
//println("Result of edgeWeightRange: "+g.edgeWeightRange)
//g = g + ("connectedComponents" -> Metrics.connectedComponents(g))
g = g + ("edgeSize" -> Drawing.edgeSize(g))
g = g + ("edgeColor" -> Drawing.edgeColor(g))
g
}
def callbackSelectionChanged = {
//println(" callbackSelectionChanged")
var g = this
g = g + ("selectionCenter" -> Metrics.selectionCenter(g))
g = g + ("selectionNeighbourhood" -> Metrics.selectionNeighbourhood(g))
g = g + ("extremumsSelectionNeighbourhood" -> (Metrics extremumsSelectionNeighbourhood g))
g = g + ("selectionNeighbourhoodCenter" -> Metrics.selectionNeighbourhoodCenter(g))
g = g + ("nodeColor" -> Drawing.nodeColor(g))
g = g + ("nodeBorderColor" -> Drawing.nodeBorderColor(g))
g = g + ("edgeColor" -> Drawing.edgeColor(g))
// depend on the selection
//println("Updating rendered label and label color")
g = g + ("labelColor" -> Drawing.labelColor(g))
g
}
lazy val warmCache: Graph = {
// TODO I think we don't need to warm the cache anymorewith the layout, since topology and stats don't change often
position
nodeColor
nodeBorderColor
edgeSize
edgeColor
edgeIndex
edgeWeight
nodeShape
selectionNeighbourhood
selectionNeighbourhoodCenter
singlesCenter
notSinglesCenter
outDegreeExtremums
inDegreeExtremums
extremums
extremumsSelection
extremumsSelectionNeighbourhood
nodeWeightExtremums
edgeWeightExtremums
this
}
// hashcode will change if nodes/links are added/deleted
lazy val hashed = (uuid.toList.mkString("") + size.toList.mkString("") + links.map {
case mapID => mapID.hashCode
}.toList.mkString("")).hashCode
lazy val debugStats = {
"**DEBUG**\\nlinks.size: " + links.size + "\\nposition.size: " + position.size + "\\ncolor.size: " + color.size + "\\nuuid.size: " + uuid.size + "\\ncategory.size: " + category.size + "\\nselected.size: " + selected.size + "\\nselection.size: " + selection.size + "\\n**END DEBUG**"
}
/**
* Check if a graph has any link between i and i (directed or undirected)
*/
def hasAnyLink(i: Int, j: Int) = hasThisLink(i, j) | hasThisLink(j, i)
/**
* Check if a graph has a directed link (from i to j)
*/
def hasThisLink(i: Int, j: Int) = if (links.size > i) links(i).contains(j) else false
def isSingle(i: Int) = (inDegree(i) == 0 && outDegree(i) == 0)
/**
* Create a new Graph with an updated column
*/
def +(kv: (String, Any)) = set(kv)
def +(id: Int, k: String, v: Any) = set(id, k, v)
def ++(kv: Map[String, Any]) = new Graph(elements ++ kv)
/**
* Set a column and create a new Graph
*/
def set(kv: (String, Any)) = new Graph(elements + kv)
def set(id: Int, k: String, value: Any) = {
//println("id: "+id+" kv: "+kv)
var newElements = elements
newElements += k -> {
if (!elements.contains(k)) {
value match {
case v: Boolean => List[Boolean](v).toArray
case v: Int => List[Int](v).toArray
case v: Double => List[Double](v).toArray
case v: Float => List[Float](v).toArray
case v: String => List[String](v).toArray
case v: Color => List[Color](v).toArray
case v: Symbol => List[Symbol](v).toArray
case v: (Int, Int) => List[(Int, Int)](v).toArray
case v: (Double, Double) => List[(Double, Double)](v).toArray
case v: ((Double, Double), Int) => List[((Double, Double), Int)](v).toArray
case v: (Double, Double, Double, Double) => List[(Double, Double, Double, Double)](v).toArray
case v: Array[String] => List[Array[String]](v).toArray
case v: Array[Symbol] => List[Array[Symbol]](v).toArray
case v: Array[Double] => List[Array[Double]](v).toArray
case v: Array[(Double,Double)] => List[Array[(Double,Double)]](v).toArray
case v: Array[Int] => List[Array[Int]](v).toArray
case v: List[Double] => List[List[Double]](v).toArray
case v: List[String] => List[List[String]](v).toArray
case v: List[Int] => List[List[Int]](v).toArray
case v: Set[Int] => List[Set[Int]](v).toArray
case v: Map[Int, Double] => List[Map[Int, Double]](v).toArray
case v =>
throw new Exception("UNRECOGNIZED TYPE")
// List(v).toArray
}
} else {
//println("key "+k+" already match!")
val t = elements(k)
//println("elements gave "+t+" ")
value match {
case v: Boolean =>
var m = getArray[Boolean](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[Boolean](v)).toArray
m
case v: Int =>
var m = getArray[Int](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[Int](v)).toArray
m
case v: Double =>
var m = getArray[Double](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[Double](v)).toArray
m
case v: Float =>
var m = getArray[Float](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[Float](v)).toArray
m
case v: String =>
var m = getArray[String](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[String](v)).toArray
m
case v: Color =>
var m = getArray[Color](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[Color](v)).toArray
m
case v: Symbol =>
var m = getArray[Symbol](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[Symbol](v)).toArray
m
case v: (Int, Int) =>
var m = getArray[(Int, Int)](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[(Int, Int)](v)).toArray
m
case v: (Double, Double) =>
var m = getArray[(Double, Double)](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[(Double, Double)](v)).toArray
m
case v: ((Double, Double),Int) =>
var m = getArray[((Double, Double),Int)](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[((Double, Double),Int)](v)).toArray
m
case v: (Double, Double, Double, Double) =>
var m = getArray[(Double, Double, Double, Double)](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[(Double, Double, Double, Double)](v)).toArray
m
case v: Map[Int, Double] =>
var m = getArray[Map[Int, Double]](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[Map[Int, Double]](v)).toArray
m
case v: List[Double] =>
var m = getArray[List[Double]](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[List[Double]](v)).toArray
m
case v: Array[Double] =>
var m = getArray[Array[Double]](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[Array[Double]](v)).toArray
m
case v: List[String] =>
var m = getArray[List[String]](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[List[String]](v)).toArray
m
case v: Array[String] =>
var m = getArray[Array[String]](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[Array[String]](v)).toArray
m
case v: List[Int] =>
var m = getArray[List[Int]](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[List[Int]](v)).toArray
m
case v: Array[Int] =>
var m = getArray[Array[Int]](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[Array[Int]](v)).toArray
m
case v: Set[Int] =>
var m = getArray[Set[Int]](k)
if (id < m.size) m(id) = v else m = (m.toList ::: List[Set[Int]](v)).toArray
m
case v: Any =>
// Actually, this is the only case called
throw new Exception("FATAL ERROR, GOT ANY FOR " + v)
}
}
}
new Graph(newElements)
}
/**
* List of selected nodes' IDs
*/
lazy val selection: List[Int] = selected.zipWithIndex.filter {
case (selected, i) => selected
}.map {
case (s, i) => i
}.toList
/**
* List of selected nodes' attributes
*/
lazy val selectionAttributes = {
//println("mapping selection attributes: "+selection)
selection.map {
case i => lessAttributes(i)
}.toList
}
/**
* Return the current selection as a list of UUID:String
*/
lazy val selectionUUID = selection.map {
case i => getUuid(i)
}.toList
// { UUID : {neighbours}, UUID2; {neighbours}, ... }
lazy val selectionNeighbours = {
Map(selectionUUID.zipWithIndex: _*).map {
case (uuid, i) => (uuid, neighbours(i))
}
}
/**
* Get attributes of a node from it's UUID (Unique ID, arbitrary-length String)
*/
def attributes(uuid: String): Map[String, Any] = {
attributes(id(uuid))
}
/**
* Get attributes of a node from it's index in the graph
*/
def attributes(i: Int): Map[String, Any] = {
Map[String, Any](
"links" -> (if (links.size > i) links(i) else Map.empty[Int, Double]),
"position" -> (if (position.size > i) position(i) else (0.0, 0.0)),
"color" -> (if (color.size > i) color(i) else new Color(0.0, 0.0, 0.0)),
"weight" -> (if (weight.size > i) weight(i) else 1.0),
"size" -> (if (size.size > i) size(i) else 1.0),
"category" -> (if (category.size > i) category(i) else ""),
"content" -> (if (content.size > i) content(i) else ""), //Base64.encode(content(i)),
"selected" -> (if (selected.size > i) selected(i) else false),
"label" -> (if (label.size > i) label(i) else ""), // Base64.encode(label(i)),
"rate" -> (if (rate.size > i) rate(i) else 0),
"id" -> (if (uuid.size > i) uuid(i) else 0),
"inDegree" -> (if (inDegree.size > i) inDegree(i) else 0),
"outDegree" -> (if (outDegree.size > i) outDegree(i) else 0),
"density" -> (if (density.size > i) density(i) else 0)
)
}
/**
* Return neighbours of a node ID - todo refactor to make it more efficient: lazy val for array of IDs, and another function for the minimal attributes
*
*/
/*
lazy val neighboursIDs(i: Int): List[Int] = {
ids.map {
case id =>
// println(" - mapping neighbours of node "+i+"..")
uuid.zipWithIndex.filter{
case (uj, ij) => hasAnyLink(ij,i) && (ij != i)
} map { case (uuid,id) => id }
}
}
}*/
/**
* Return neighbours of a node ID - todo refactor to make it more efficient: lazy val for array of IDs, and another function for the minimal attributes
*
*/
def neighbours(i: Int): Map[String, Map[String, Any]] = {
(if (links.size > i) {
// println(" - mapping neighbours of node "+i+"..")
Map.empty[String, Map[String, Any]] ++ Map((uuid.zipWithIndex.filter {
case (uj, ij) => hasAnyLink(ij, i) && (ij != i)
} collect {
case (uj, ij) =>
(uj, minimalAttributes(ij))
}): _*)
}
else
Map.empty[String, Map[String, Any]])
}
/**
* Get "less" attributes (only the most important, for data transfert and talking with the visualization client)
* of a node from it's UUID (Unique ID, arbitrary-length String)
*/
def lessAttributes(uuid: String): Map[String, Any] = {
lessAttributes(id(uuid))
}
/**
* Get "less" attributes (only the most important, for data transfert and talking with the visualization client)
* of a node from it's index in the graph
*/
def lessAttributes(i: Int): Map[String, Any] = {
/*
println("***category: "+category.size)
println("***selected: "+selected.size)
println("***content: "+content.size)
*/
Map[String, Any](
//"links" -> links(i),
//"position" -> position(i),
//"color" -> color(i),
"weight" -> (if (weight.size > i) weight(i) else 0),
//"size" -> size(i),
"category" -> (if (category.size > i) category(i) else ""),
"content" -> (if (content.size > i) content(i) else ""), //Base64.encode(content(i)),
"selected" -> (if (selected.size > i) selected(i) else false),
"label" -> (if (label.size > i) label(i) else ""), // Base64.encode(label(i)),
"rate" -> (if (rate.size > i) rate(i) else 0),
"id" -> (if (uuid.size > i) uuid(i) else 0),
//"partition" -> (if (partition.size > i) partition(i) else 0),
"degree" -> ((if (inDegree.size > i) inDegree(i) else 0) + (if (outDegree.size > i) outDegree(i) else 0))
//"density" -> density(i)
)
}
/**
* Get "mininal" attributes (only the most important, for neighbourhood data)
* of a node from it's index in the graph
*/
def minimalAttributes(i: Int): Map[String, Any] = {
/*
println("***category: "+category.size)
println("***selected: "+selected.size)
println("***content: "+content.size)
*/
Map[String, Any](
//"links" -> links(i),
//"position" -> position(i),
//"color" -> color(i),
"weight" -> (if (weight.size > i) weight(i) else 0),
//"size" -> size(i),
"category" -> (if (category.size > i) category(i) else ""),
//"content" -> (if (content.size > i) content(i) else ""),//Base64.encode(content(i)),
// "selected" -> (if (selected.size > i) selected(i) else false),
"label" -> (if (label.size > i) label(i) else ""), // Base64.encode(label(i)),
//"rate" -> (if (rate.size > i) rate(i) else 0),
"id" -> (if (uuid.size > i) uuid(i) else 0),
//"partition" -> (if (partition.size > i) partition(i) else 0),
"degree" -> ((if (inDegree.size > i) inDegree(i) else 0) + (if (outDegree.size > i) outDegree(i) else 0))
//"density" -> density(i)
)
}
/**
* Get the map of all nodes
*/
def allNodes: Map[String, Map[String, Any]] = {
var nodeData = Map.empty[String, Map[String, Any]]
for (i <- ids) nodeData += getUuid(i) -> lessAttributes(i)
nodeData
}
def clearSelection = {
this + ("selected" -> this.selected.map(c => false))
}
/**
var nodeData = Map(uuid.map {case uuid => (uuid,Map.empty[String,Any])}:_*)
for (i <- ids) {
//arrays(i).map{case (k,v) (k,v(i))}
//val u = getUuid(i)
//for ((k,v) <- arrays(i)) {
// (k,v(i))
//
//nodeData(u) += k -> v(i)
}
// nodeData(getUuid(i)) += (k,v(i))
}
arrays.foreach{
case (k,v) =>
nodeData += (k,v)
}
nodeData
}*/
def map[T](id: Int, column: String, filter: T => T): Graph = {
set(id, column, filter(getArray[T](column)(id)))
}
def map[T](column: String, filter: T => T): Graph = {
this + (column -> (getArray[T](column).map {
f => filter(f)
}))
}
def filterNodeVisible[T](column: String, filter: T => Boolean) = {
this + ("visible" -> getArray[T](column).map {
x => filter(x)
})
}
def _filterNodeVisible[T](column: String, filter: T => Boolean) = {
this + ("visible" -> getArray[T](column).map {
x => filter(x)
})
}
def converter(removed: Set[Int]): Array[Int] = {
val _removed = removed.toList.sort {
(a, b) => a < b
}
var j = 0
(for (i <- ids) yield {
(if (_removed.size > j && i == _removed(j)) {
j += 1
-1
} else {
i - j
})
}).toArray
}
def remove(set: Set[Int]): Graph = {
val conv = converter(set)
val newElements = elements.map {
// HACH we de not remove edge attributes (TODO: use a more complex pattern matching to do that, eg. "edge*")
case ("edgeNode", entries) =>
("edgeNode", entries)
case ("edgeIndex", entries) =>
("edgeIndex", entries)
case ("edgeWeight", entries) =>
("edgeWeight", entries)
case ("edgeSize", entries) =>
("edgeSize", entries)
case ("edgeColor", entries) =>
("edgeColor", entries)
case ("links", entries: Array[Map[Int, Double]]) =>
val newEntries = entries.zipWithIndex.filter {
case (e, i) => conv(i) >= 0
}.map {
case tpl =>
tpl._1.filter {
case (id, weight) => conv(id) >= 0
}.map {
case (id, weight) => (conv(id), weight)
}
}
("links", newEntries.toArray)
case (key: String, entries: Array[Symbol]) =>
(key, entries.zipWithIndex.filter {
case (e, i) => conv(i) >= 0
}.map(_._1).toArray)
case (key: String, entries: Array[Boolean]) =>
(key, entries.zipWithIndex.filter {
case (e, i) => conv(i) >= 0
}.map(_._1).toArray)
case (key: String, entries: Array[Double]) =>
(key, entries.zipWithIndex.filter {
case (e, i) => conv(i) >= 0
}.map(_._1).toArray)
case (key: String, entries: Array[Int]) =>
(key, entries.zipWithIndex.filter {
case (e, i) => conv(i) >= 0
}.map(_._1).toArray)
case (key: String, entries: Array[Color]) =>
(key, entries.zipWithIndex.filter {
case (e, i) => conv(i) >= 0
}.map(_._1).toArray)
case (key: String, entries: Array[String]) =>
(key, entries.zipWithIndex.filter {
case (e, i) => conv(i) >= 0
}.map(_._1).toArray)
case (key: String, entries: Array[(Int, Int)]) =>
(key, entries.zipWithIndex.filter {
case (e, i) => conv(i) >= 0
}.map(_._1).toArray)
case (key: String, entries: Array[(Double, Double)]) =>
(key, entries.zipWithIndex.filter {
case (e, i) => conv(i) >= 0
}.map(_._1).toArray)
case (key: String, entries: Array[((Double, Double),Int)]) =>
(key, entries.zipWithIndex.filter {
case (e, i) => conv(i) >= 0
}.map(_._1).toArray)
case (key: String, entries: Array[(Double, Double, Double, Double)]) =>
(key, entries.zipWithIndex.filter {
case (e, i) => conv(i) >= 0
}.map(_._1).toArray)
case (key: String, entries: Array[Any]) =>
(key, entries.zipWithIndex.filter {
case (e, i) => conv(i) >= 0
}.map(_._1).toArray)
case (key, entry) => (key, entry)
}
Graph.make(newElements)
}
/**
* TODO refactor to use a generic field update function
*/
def updatePosition(g: Graph): Graph = {
val tmp1: Array[(Double, Double)] = position.zipWithIndex.map {
case (elem, i) =>
val id = g.id(uuid(i))
if (id == -1) elem else g.position(id)
}.toArray
val tmp2: Array[Boolean] = selected.zipWithIndex.map {
case (s, i) =>
val id = g.id(uuid(i))
if (id == -1) s else g.selected(id)
}.toArray
Graph.make(elements ++ Map[String, Any](
"position" -> tmp1,
"selected" -> tmp2) // need to recompute things
)
}
def normalizePositions: Graph = this + ("visible" -> position.map {
case (x, y) => (x - baryCenter._1, y - baryCenter._2)
})
/**
* TODO refactor to use a generic field update function
*/
def updatePositionWithCategory(g: Graph): Graph = {
val tmp1: Array[(Double, Double)] = position.zipWithIndex.map {
case (elem, i) =>
val id = g.id(uuid(i))
if (id == -1) { // the invader do not belong to us
elem
} else if (g.category(id).equalsIgnoreCase(category(i))) {
g.position(id)
} else {
elem
}
}.toArray
Graph.make(elements ++ Map[String, Any](
"position" -> tmp1) // need to recompute things
)
}
/**
* TODO refactor to use a generic field update function
*/
def updateSizeWithCategory(g: Graph): Graph = {
val tmp1: Array[Double] = size.zipWithIndex.map {
case (elem, i) =>
val id = g.id(uuid(i))
if (id == -1) {
elem
} else if (g.category(id).equalsIgnoreCase(category(i))) {
g.size(id)
} else {
elem
}
}.toArray
Graph.make(elements ++ Map[String, Any](
"size" -> tmp1) // need to recompute things
)
}
/**
* TODO refactor to use a generic field update function
*/
def updateLinksWithCategory(g: Graph): Graph = {
val tmp1: Array[Map[Int, Double]] = links.zipWithIndex.map {
case (elem, i) =>
val id = g.id(uuid(i))
if (id == -1) {
elem
} else if (g.category(id).equalsIgnoreCase(category(i))) {
g.links(id)
} else {
elem
}
}.toArray
Graph.make(elements ++ Map[String, Any](
"links" -> tmp1) // need to recompute things
)
}
/**
* TODO refactor to use a generic field update function
*/
def updateSelectedWithCategory(g: Graph): Graph = {
val tmp2: Array[Boolean] = selected.zipWithIndex.map {
case (s, i) =>
val id = g.id(uuid(i))
if (id == -1) {
s
} else if (g.category(id).equalsIgnoreCase(category(i))) {
g.selected(id)
} else {
s
}
}.toArray
Graph.make(elements ++ Map[String, Any](
"selected" -> tmp2) // need to recompute things
)
}
}
| moma/tinaviz | src/main/scala/eu/tinasoft/tinaviz/graph/Graph.scala | Scala | gpl-3.0 | 35,823 |
/*
* Copyright (c) 2002-2018 "Neo Technology,"
* Network Engine for Objects in Lund AB [http://neotechnology.com]
*
* This file is part of Neo4j.
*
* Neo4j is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
package org.neo4j.cypher.internal.compiler.v2_3.commands.expressions
import org.neo4j.cypher.internal.compiler.v2_3._
import org.neo4j.cypher.internal.compiler.v2_3.helpers.CollectionSupport
import org.neo4j.cypher.internal.compiler.v2_3.pipes.QueryState
import org.neo4j.cypher.internal.compiler.v2_3.symbols.SymbolTable
import org.neo4j.cypher.internal.frontend.v2_3.symbols._
case class ExtractFunction(collection: Expression, id: String, expression: Expression)
extends NullInNullOutExpression(collection)
with CollectionSupport
with Closure {
def compute(value: Any, m: ExecutionContext)(implicit state: QueryState) = makeTraversable(value).map {
case iterValue =>
val innerMap = m.newWith(id -> iterValue)
expression(innerMap)
}.toList
def rewrite(f: (Expression) => Expression) = f(ExtractFunction(collection.rewrite(f), id, expression.rewrite(f)))
override def children = Seq(collection, expression)
def arguments: Seq[Expression] = Seq(collection)
def calculateType(symbols: SymbolTable): CypherType = {
val iteratorType = collection.evaluateType(CTCollection(CTAny), symbols).legacyIteratedType
val innerSymbols = symbols.add(id, iteratorType)
CTCollection(expression.evaluateType(CTAny, innerSymbols))
}
def symbolTableDependencies = symbolTableDependencies(collection, expression, id)
}
| HuangLS/neo4j | community/cypher/cypher-compiler-2.3/src/main/scala/org/neo4j/cypher/internal/compiler/v2_3/commands/expressions/ExtractFunction.scala | Scala | apache-2.0 | 2,154 |
package com.ecfront.ez.framework.service.message
import com.ecfront.common.Resp
import com.ecfront.ez.framework.core.EZServiceAdapter
import com.ecfront.ez.framework.core.rpc.AutoBuildingProcessor
import com.fasterxml.jackson.databind.JsonNode
import scala.collection.mutable
object ServiceAdapter extends EZServiceAdapter[JsonNode] {
override def init(parameter: JsonNode): Resp[String] = {
Resp.success("")
}
override def initPost(): Unit = {
AutoBuildingProcessor.autoBuilding("com.ecfront.ez.framework.service.message")
super.initPost()
}
override def destroy(parameter: JsonNode): Resp[String] = {
Resp.success("")
}
override lazy val dependents: mutable.Set[String] =
mutable.Set(com.ecfront.ez.framework.service.jdbc.ServiceAdapter.serviceName)
override var serviceName: String = "message"
}
| gudaoxuri/ez-framework | services/message/src/main/scala/com/ecfront/ez/framework/service/message/ServiceAdapter.scala | Scala | apache-2.0 | 846 |
package varys.framework.master.scheduler
import scala.collection.mutable.{ArrayBuffer, HashMap, Map}
import varys.framework.master.{CoflowInfo, CoflowState, FlowInfo, SlaveInfo}
import varys.Logging
/**
* Implementation of a generalized coflow scheduler that works using the
* following steps:
* 1. Order coflows by some criteria.
* 2. Allocate rates to individual flows of each admitted coflow in that order.
*/
abstract class OrderingBasedScheduler extends CoflowScheduler with Logging {
val NIC_BitPS = System.getProperty("varys.network.nicMbps", "1024").toDouble * 1048576.0
override def schedule(schedulerInput: SchedulerInput): SchedulerOutput = {
val markedForRejection = new ArrayBuffer[CoflowInfo]()
// STEP 1: Sort READY or RUNNING coflows by arrival time
var sortedCoflows = getOrderedCoflows(schedulerInput.activeCoflows)
// STEP 2: Perform WSS + Backfilling
val sBpsFree = new HashMap[String, Double]().withDefaultValue(NIC_BitPS)
val rBpsFree = new HashMap[String, Double]().withDefaultValue(NIC_BitPS)
for (cf <- sortedCoflows) {
logInfo("Scheduling " + cf)
if (markForRejection(cf, sBpsFree, rBpsFree)) {
markedForRejection += cf
} else {
val sUsed = new HashMap[String, Double]().withDefaultValue(0.0)
val rUsed = new HashMap[String, Double]().withDefaultValue(0.0)
for (flowInfo <- cf.getFlows) {
val src = flowInfo.source
val dst = flowInfo.destClient.host
val minFree = math.min(sBpsFree(src), rBpsFree(dst))
if (minFree > 0.0) {
flowInfo.currentBps = calcFlowRate(flowInfo, cf, minFree)
if (math.abs(flowInfo.currentBps) < 1e-6) {
flowInfo.currentBps = 0.0
}
flowInfo.lastScheduled = System.currentTimeMillis
// Remember how much capacity was allocated
sUsed(src) = sUsed(src) + flowInfo.currentBps
rUsed(dst) = rUsed(dst) + flowInfo.currentBps
// Set the coflow as running
cf.changeState(CoflowState.RUNNING)
} else {
flowInfo.currentBps = 0.0
}
}
// Remove capacity from ALL sources and destination for this coflow
for (sl <- schedulerInput.activeSlaves) {
val host = sl.host
sBpsFree(host) = sBpsFree(host) - sUsed(host)
rBpsFree(host) = rBpsFree(host) - rUsed(host)
}
}
}
// STEP2A: Work conservation
sortedCoflows = sortedCoflows.filter(_.curState == CoflowState.RUNNING)
for (cf <- sortedCoflows) {
var totalBps = 0.0
for (flowInfo <- cf.getFlows) {
val src = flowInfo.source
val dst = flowInfo.destClient.host
val minFree = math.min(sBpsFree(src), rBpsFree(dst))
if (minFree > 0.0) {
flowInfo.currentBps += minFree
sBpsFree(src) = sBpsFree(src) - minFree
rBpsFree(dst) = rBpsFree(dst) - minFree
}
totalBps += flowInfo.currentBps
}
// Update current allocation of the coflow
cf.setCurrentAllocation(totalBps)
}
SchedulerOutput(sortedCoflows, markedForRejection)
}
/**
* Returns an ordered list of coflows based on the scheduling policy
*/
def getOrderedCoflows(
activeCoflows: ArrayBuffer[CoflowInfo]): ArrayBuffer[CoflowInfo]
/**
* Mark a coflow as non-admissible based on some criteria.
* Overriden for schedulers with admission control (e.g., DeadlineScheduler)
*/
def markForRejection(
cf: CoflowInfo,
sBpsFree: Map[String, Double],
rBpsFree: Map[String, Double]): Boolean = false
/**
* Calculate rate of an individual flow based on the scheduling policy
*/
def calcFlowRate(
flowInfo: FlowInfo,
cf: CoflowInfo,
minFree: Double): Double
/** Retuns current time */
def now() = System.currentTimeMillis
}
| mosharaf/varys | core/src/main/scala/varys/framework/master/scheduler/OrderingBasedScheduler.scala | Scala | apache-2.0 | 3,933 |
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.spark.deploy.k8s
import org.apache.spark.SparkConf
import org.apache.spark.deploy.k8s.Config._
private[spark] object KubernetesVolumeUtils {
/**
* Extract Spark volume configuration properties with a given name prefix.
*
* @param sparkConf Spark configuration
* @param prefix the given property name prefix
* @return a Map storing with volume name as key and spec as value
*/
def parseVolumesWithPrefix(sparkConf: SparkConf, prefix: String): Seq[KubernetesVolumeSpec] = {
val properties = sparkConf.getAllWithPrefix(prefix).toMap
getVolumeTypesAndNames(properties).map { case (volumeType, volumeName) =>
val pathKey = s"$volumeType.$volumeName.$KUBERNETES_VOLUMES_MOUNT_PATH_KEY"
val readOnlyKey = s"$volumeType.$volumeName.$KUBERNETES_VOLUMES_MOUNT_READONLY_KEY"
val subPathKey = s"$volumeType.$volumeName.$KUBERNETES_VOLUMES_MOUNT_SUBPATH_KEY"
KubernetesVolumeSpec(
volumeName = volumeName,
mountPath = properties(pathKey),
mountSubPath = properties.get(subPathKey).getOrElse(""),
mountReadOnly = properties.get(readOnlyKey).exists(_.toBoolean),
volumeConf = parseVolumeSpecificConf(properties, volumeType, volumeName))
}.toSeq
}
/**
* Get unique pairs of volumeType and volumeName,
* assuming options are formatted in this way:
* `volumeType`.`volumeName`.`property` = `value`
* @param properties flat mapping of property names to values
* @return Set[(volumeType, volumeName)]
*/
private def getVolumeTypesAndNames(properties: Map[String, String]): Set[(String, String)] = {
properties.keys.flatMap { k =>
k.split('.').toList match {
case tpe :: name :: _ => Some((tpe, name))
case _ => None
}
}.toSet
}
private def parseVolumeSpecificConf(
options: Map[String, String],
volumeType: String,
volumeName: String): KubernetesVolumeSpecificConf = {
volumeType match {
case KUBERNETES_VOLUMES_HOSTPATH_TYPE =>
val pathKey = s"$volumeType.$volumeName.$KUBERNETES_VOLUMES_OPTIONS_PATH_KEY"
KubernetesHostPathVolumeConf(options(pathKey))
case KUBERNETES_VOLUMES_PVC_TYPE =>
val claimNameKey = s"$volumeType.$volumeName.$KUBERNETES_VOLUMES_OPTIONS_CLAIM_NAME_KEY"
KubernetesPVCVolumeConf(options(claimNameKey))
case KUBERNETES_VOLUMES_EMPTYDIR_TYPE =>
val mediumKey = s"$volumeType.$volumeName.$KUBERNETES_VOLUMES_OPTIONS_MEDIUM_KEY"
val sizeLimitKey = s"$volumeType.$volumeName.$KUBERNETES_VOLUMES_OPTIONS_SIZE_LIMIT_KEY"
KubernetesEmptyDirVolumeConf(options.get(mediumKey), options.get(sizeLimitKey))
case KUBERNETES_VOLUMES_NFS_TYPE =>
val pathKey = s"$volumeType.$volumeName.$KUBERNETES_VOLUMES_OPTIONS_PATH_KEY"
val serverKey = s"$volumeType.$volumeName.$KUBERNETES_VOLUMES_OPTIONS_SERVER_KEY"
KubernetesNFSVolumeConf(
options(pathKey),
options(serverKey))
case _ =>
throw new IllegalArgumentException(s"Kubernetes Volume type `$volumeType` is not supported")
}
}
}
| dbtsai/spark | resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/KubernetesVolumeUtils.scala | Scala | apache-2.0 | 3,924 |
package actors.flights
import actors.DateRange
import actors.routing.FlightsRouterActor
import actors.routing.FlightsRouterActor._
import controllers.ArrivalGenerator
import drt.shared.ApiFlightWithSplits
import drt.shared.CrunchApi.MillisSinceEpoch
import drt.shared.FlightsApi.FlightsWithSplits
import drt.shared.Terminals.{T1, Terminal}
import drt.shared.dates.UtcDate
import services.SDate
import services.crunch.CrunchTestLike
import scala.concurrent.duration.DurationInt
import scala.concurrent.{Await, Future}
class StreamingFlightsByDaySpec extends CrunchTestLike {
"When I ask for a Source of query dates" >> {
"Given a start date of 2020-09-10 and end date of 2020-09-11" >> {
"I should get 2 days before (2020-09-08) to 1 day after (2020-09-12)" >> {
val result = DateRange.utcDateRangeWithBuffer(2, 1)(SDate(2020, 9, 10), SDate(2020, 9, 11))
val expected = Seq(
UtcDate(2020, 9, 8),
UtcDate(2020, 9, 9),
UtcDate(2020, 9, 10),
UtcDate(2020, 9, 11),
UtcDate(2020, 9, 12)
)
result === expected
}
}
}
"Given a flight scheduled on 2020-09-28" >> {
"When I ask if it's scheduled on 2020-09-28" >> {
"Then I should get a true response" >> {
val flight = ApiFlightWithSplits(ArrivalGenerator.arrival(schDt = "2020-09-28T12:00Z", actPax = Option(100)), Set())
val result = scheduledInRange(SDate(2020, 9, 28), SDate(2020, 9, 28, 23, 59), flight.apiFlight.Scheduled)
result === true
}
}
}
"Given a flight scheduled on 2020-09-29 with pax at the pcp a day earlier" >> {
"When I ask if its pcp range falls within 2020-09-28" >> {
"Then I should get a true response" >> {
val flight = ApiFlightWithSplits(ArrivalGenerator.arrival(schDt = "2020-09-29T12:00Z", pcpDt = "2020-09-28T12:00Z", actPax = Option(100)), Set())
val result = FlightsRouterActor.pcpFallsInRange(SDate(2020, 9, 28), SDate(2020, 9, 28, 23, 59), flight.apiFlight.pcpRange)
result === true
}
}
}
"Given a flight scheduled on 2020-09-27 with pax at the pcp a day later" >> {
"When I ask if its pcp range falls within 2020-09-28" >> {
"Then I should get a true response" >> {
val flight = ApiFlightWithSplits(ArrivalGenerator.arrival(schDt = "2020-09-27T12:00Z", pcpDt = "2020-09-28T12:00Z", actPax = Option(100)), Set())
val result = FlightsRouterActor.pcpFallsInRange(SDate(2020, 9, 28), SDate(2020, 9, 28, 23, 59), flight.apiFlight.pcpRange)
result === true
}
}
}
"Given map of UtcDate to flights spanning several days, each containing flights that have pcp times the day before, after or on time" >> {
val flight0109 = ApiFlightWithSplits(ArrivalGenerator.arrival(schDt = "2020-09-01T12:00Z", pcpDt = "2020-09-01T12:00Z", actPax = Option(100)), Set())
val flight0209onTime = ApiFlightWithSplits(ArrivalGenerator.arrival(schDt = "2020-09-02T23:00Z", pcpDt = "2020-09-02T23:00Z", actPax = Option(100)), Set())
val flight0209Late = ApiFlightWithSplits(ArrivalGenerator.arrival(schDt = "2020-09-02T23:10Z", pcpDt = "2020-09-03T01:00Z", actPax = Option(100)), Set())
val flight0309 = ApiFlightWithSplits(ArrivalGenerator.arrival(schDt = "2020-09-03T12:00Z", pcpDt = "2020-09-03T12:00Z", actPax = Option(100)), Set())
val flight0409 = ApiFlightWithSplits(ArrivalGenerator.arrival(schDt = "2020-09-04T12:00Z", pcpDt = "2020-09-04T12:00Z", actPax = Option(100)), Set())
val flight0509OnTime = ApiFlightWithSplits(ArrivalGenerator.arrival(schDt = "2020-09-05T01:00Z", pcpDt = "2020-09-05T01:00Z", actPax = Option(100)), Set())
val flight0509Early = ApiFlightWithSplits(ArrivalGenerator.arrival(schDt = "2020-09-05T01:10Z", pcpDt = "2020-09-04T23:50Z", actPax = Option(100)), Set())
val flight0609 = ApiFlightWithSplits(ArrivalGenerator.arrival(schDt = "2020-09-06T12:00Z", pcpDt = "2020-09-06T12:00Z", actPax = Option(100)), Set())
val earlyOnTimeAndLateFlights = (_: Option[MillisSinceEpoch]) => (utcDate: UtcDate) => (_: Terminal) =>
Future(Map(
UtcDate(2020, 9, 1) -> FlightsWithSplits(Seq(flight0109)),
UtcDate(2020, 9, 2) -> FlightsWithSplits(Seq(flight0209onTime, flight0209Late)),
UtcDate(2020, 9, 3) -> FlightsWithSplits(Seq(flight0309)),
UtcDate(2020, 9, 4) -> FlightsWithSplits(Seq(flight0409)),
UtcDate(2020, 9, 5) -> FlightsWithSplits(Seq(flight0509OnTime, flight0509Early)),
UtcDate(2020, 9, 6) -> FlightsWithSplits(Seq(flight0609))
).getOrElse(utcDate, FlightsWithSplits.empty))
"When asking for flights for dates 3rd to 4th" >> {
"I should see the late pcp from 2nd, all 3 flights from the 3rd, 4th, and the early flight from the 5th" >> {
val startDate = SDate(2020, 9, 3)
val endDate = SDate(2020, 9, 4, 23, 59)
val flights = FlightsRouterActor.multiTerminalFlightsByDaySource(earlyOnTimeAndLateFlights)(startDate, endDate, Seq(T1), None)
val result = Await.result(FlightsRouterActor.runAndCombine(Future(flights)), 1 second)
val expected = FlightsWithSplits(Seq(flight0209Late, flight0309, flight0409, flight0509Early))
result === expected
}
}
}
}
| UKHomeOffice/drt-scalajs-spa-exploration | server/src/test/scala/actors/flights/StreamingFlightsByDaySpec.scala | Scala | apache-2.0 | 5,254 |
package commitlog
import java.io.{File, IOException}
import java.nio.file._
import java.nio.file.attribute.BasicFileAttributes
import java.util.concurrent.atomic.AtomicLong
import com.bwsw.commitlog.{CommitLog, CommitLogRecord}
import com.bwsw.commitlog.CommitLogFlushPolicy.{OnCountInterval, OnRotation, OnTimeInterval}
import org.scalatest.{BeforeAndAfterAll, FlatSpec, Matchers}
class CommitLogTest extends FlatSpec with Matchers with BeforeAndAfterAll {
val dir = new StringBuffer().append("target").append(File.separatorChar).append("clt").toString
val rec = "sample record".map(_.toByte).toArray
val recordSize = rec.length + CommitLogRecord.headerSize
private val fileIDGen = new AtomicLong(0L)
override def beforeAll() = {
new File(dir).mkdirs()
}
it should "write correctly (OnRotation policy)" in {
val cl = new CommitLog(1, dir, OnRotation, fileIDGen.getAndIncrement)
val f1 = cl.putRec(rec, 0)
val fileF1 = new File(f1)
fileF1.exists() shouldBe true
fileF1.length() == 0 shouldBe true
Thread.sleep(1100)
val f21 = cl.putRec(rec, 0)
fileF1.length() == recordSize*1 shouldBe true
val fileF21 = new File(f21)
fileF21.exists() shouldBe true
fileF21.length() == recordSize*0 shouldBe true
val f22 = cl.putRec(rec, 0)
fileF21.length() == recordSize*0 shouldBe true
val f3 = cl.putRec(rec, 0, true)
fileF21.length() == recordSize*2 shouldBe true
val fileF3 = new File(f3)
fileF3.exists() shouldBe true
fileF3.length() == recordSize*0 shouldBe true
cl.close()
fileF3.length() == recordSize*1 shouldBe true
f1 == f21 shouldBe false
f21 == f22 shouldBe true
f21 == f3 shouldBe false
f1 == f3 shouldBe false
}
it should "write correctly (OnTimeInterval policy) when startNewFileSeconds > policy seconds" in {
val cl = new CommitLog(4, dir, OnTimeInterval(2), fileIDGen.getAndIncrement)
val f11 = cl.putRec(rec, 0)
val fileF1 = new File(f11)
fileF1.exists() shouldBe true
fileF1.length() == recordSize*0 shouldBe true
Thread.sleep(2100)
fileF1.length() == recordSize*0 shouldBe true
val f12 = cl.putRec(rec, 0)
fileF1.length() == recordSize*1 shouldBe true
Thread.sleep(2100)
fileF1.length() == recordSize*1 shouldBe true
val f2 = cl.putRec(rec, 0)
fileF1.length() == recordSize*2 shouldBe true
val fileF2 = new File(f2)
fileF2.exists() shouldBe true
fileF2.length() == recordSize*0 shouldBe true
cl.close()
fileF2.length() == recordSize*1 shouldBe true
val f3 = cl.putRec(rec, 0)
fileF2.length() == recordSize*1 shouldBe true
val fileF3 = new File(f3)
fileF3.exists() shouldBe true
fileF3.length() == recordSize*0 shouldBe true
Thread.sleep(2100)
fileF3.length() == recordSize*0 shouldBe true
Thread.sleep(2100)
val f4 = cl.putRec(rec, 0)
fileF3.length() == recordSize*1 shouldBe true
val fileF4 = new File(f4)
fileF4.exists() shouldBe true
fileF4.length() == recordSize*0 shouldBe true
val f5 = cl.putRec(rec, 0, true)
fileF4.length() == recordSize*1 shouldBe true
val fileF5 = new File(f5)
fileF5.exists() shouldBe true
fileF5.length() == recordSize*0 shouldBe true
cl.close()
fileF5.length() == recordSize*1 shouldBe true
f11 == f12 shouldBe true
f11 == f2 shouldBe false
f2 == f3 shouldBe false
f3 == f4 shouldBe false
f4 == f5 shouldBe false
}
it should "write correctly (OnTimeInterval policy) when startNewFileSeconds < policy seconds" in {
val cl = new CommitLog(2, dir, OnTimeInterval(4), fileIDGen.getAndIncrement)
val f11 = cl.putRec(rec, 0)
val fileF1 = new File(f11)
fileF1.exists() shouldBe true
fileF1.length() == recordSize*0 shouldBe true
Thread.sleep(2100)
val f2 = cl.putRec(rec, 0)
fileF1.length() == recordSize*1 shouldBe true
f11 == f2 shouldBe false
val fileF2 = new File(f2)
fileF2.exists() shouldBe true
fileF2.length() == recordSize*0 shouldBe true
cl.close()
fileF2.length() == recordSize*1 shouldBe true
}
it should "write correctly (OnCountInterval policy)" in {
val cl = new CommitLog(2, dir, OnCountInterval(2), fileIDGen.getAndIncrement)
val f11 = cl.putRec(rec, 0)
val f12 = cl.putRec(rec, 0)
f11 == f12 shouldBe true
val fileF1 = new File(f11)
fileF1.exists() shouldBe true
fileF1.length() == 0 shouldBe true
val f13 = cl.putRec(rec, 0)
f11 == f13 shouldBe true
fileF1.exists() shouldBe true
fileF1.length() == recordSize*2 shouldBe true
Thread.sleep(2100)
fileF1.length() == recordSize*2 shouldBe true
val f2 = cl.putRec(rec, 0)
fileF1.length() == recordSize*3 shouldBe true
f11 == f2 shouldBe false
val fileF2 = new File(f2)
fileF2.exists() shouldBe true
fileF2.length() == recordSize*0 shouldBe true
cl.close()
fileF2.length() == recordSize*1 shouldBe true
}
override def afterAll = {
List(dir).foreach(dir =>
Files.walkFileTree(Paths.get(dir), new SimpleFileVisitor[Path]() {
override def visitFile(file: Path, attrs: BasicFileAttributes): FileVisitResult = {
Files.delete(file)
FileVisitResult.CONTINUE
}
override def postVisitDirectory(dir: Path, exc: IOException): FileVisitResult = {
Files.delete(dir)
FileVisitResult.CONTINUE
}
}))
}
}
| bwsw/tstreams-transaction-server | src/test/scala/commitlog/CommitLogTest.scala | Scala | apache-2.0 | 5,429 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.